aboutsummaryrefslogtreecommitdiff
path: root/modules
diff options
context:
space:
mode:
Diffstat (limited to 'modules')
-rw-r--r--modules/api/api.py18
-rw-r--r--modules/cmd_args.py2
-rw-r--r--modules/images.py15
-rw-r--r--modules/img2img.py8
-rw-r--r--modules/launch_utils.py2
-rwxr-xr-xmodules/processing.py42
-rw-r--r--modules/prompt_parser.py2
-rw-r--r--modules/rng.py2
-rw-r--r--modules/scripts.py4
-rw-r--r--modules/sd_samplers_common.py8
-rw-r--r--modules/sd_vae.py3
-rw-r--r--modules/shared_options.py2
-rw-r--r--modules/ui.py6
-rw-r--r--modules/ui_common.py2
14 files changed, 72 insertions, 44 deletions
diff --git a/modules/api/api.py b/modules/api/api.py
index fb2c2ce9..6e8d21a3 100644
--- a/modules/api/api.py
+++ b/modules/api/api.py
@@ -23,8 +23,7 @@ from modules.textual_inversion.textual_inversion import create_embedding, train_
from modules.textual_inversion.preprocess import preprocess
from modules.hypernetworks.hypernetwork import create_hypernetwork, train_hypernetwork
from PIL import PngImagePlugin,Image
-from modules.sd_models import checkpoints_list, unload_model_weights, reload_model_weights, checkpoint_aliases
-from modules.sd_vae import vae_dict
+from modules.sd_models import unload_model_weights, reload_model_weights, checkpoint_aliases
from modules.sd_models_config import find_checkpoint_config_near_filename
from modules.realesrgan_model import get_realesrgan_models
from modules import devices
@@ -57,6 +56,15 @@ def setUpscalers(req: dict):
def decode_base64_to_image(encoding):
+ if encoding.startswith("http://") or encoding.startswith("https://"):
+ import requests
+ response = requests.get(encoding, timeout=30, headers={'user-agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.94 Safari/537.36'})
+ try:
+ image = Image.open(BytesIO(response.content))
+ return image
+ except Exception as e:
+ raise HTTPException(status_code=500, detail="Invalid image url") from e
+
if encoding.startswith("data:image/"):
encoding = encoding.split(";")[1].split(",")[1]
try:
@@ -567,10 +575,12 @@ class Api:
]
def get_sd_models(self):
- return [{"title": x.title, "model_name": x.model_name, "hash": x.shorthash, "sha256": x.sha256, "filename": x.filename, "config": find_checkpoint_config_near_filename(x)} for x in checkpoints_list.values()]
+ import modules.sd_models as sd_models
+ return [{"title": x.title, "model_name": x.model_name, "hash": x.shorthash, "sha256": x.sha256, "filename": x.filename, "config": find_checkpoint_config_near_filename(x)} for x in sd_models.checkpoints_list.values()]
def get_sd_vaes(self):
- return [{"model_name": x, "filename": vae_dict[x]} for x in vae_dict.keys()]
+ import modules.sd_vae as sd_vae
+ return [{"model_name": x, "filename": sd_vae.vae_dict[x]} for x in sd_vae.vae_dict.keys()]
def get_hypernetworks(self):
return [{"name": name, "path": shared.hypernetworks[name]} for name in shared.hypernetworks]
diff --git a/modules/cmd_args.py b/modules/cmd_args.py
index b0a11538..f360f484 100644
--- a/modules/cmd_args.py
+++ b/modules/cmd_args.py
@@ -81,7 +81,7 @@ parser.add_argument("--gradio-auth", type=str, help='set gradio authentication l
parser.add_argument("--gradio-auth-path", type=str, help='set gradio authentication file path ex. "/path/to/auth/file" same auth format as --gradio-auth', default=None)
parser.add_argument("--gradio-img2img-tool", type=str, help='does not do anything')
parser.add_argument("--gradio-inpaint-tool", type=str, help="does not do anything")
-parser.add_argument("--gradio-allowed-path", action='append', help="add path to gradio's allowed_paths, make it possible to serve files from it")
+parser.add_argument("--gradio-allowed-path", action='append', help="add path to gradio's allowed_paths, make it possible to serve files from it", default=[data_path])
parser.add_argument("--opt-channelslast", action='store_true', help="change memory type for stable diffusion to channels last")
parser.add_argument("--styles-file", type=str, help="filename to use for styles", default=os.path.join(data_path, 'styles.csv'))
parser.add_argument("--autolaunch", action='store_true', help="open the webui URL in the system's default browser upon launch", default=False)
diff --git a/modules/images.py b/modules/images.py
index 019c1d60..a6b4fb1e 100644
--- a/modules/images.py
+++ b/modules/images.py
@@ -355,7 +355,9 @@ class FilenameGenerator:
'date': lambda self: datetime.datetime.now().strftime('%Y-%m-%d'),
'datetime': lambda self, *args: self.datetime(*args), # accepts formats: [datetime], [datetime<Format>], [datetime<Format><Time Zone>]
'job_timestamp': lambda self: getattr(self.p, "job_timestamp", shared.state.job_timestamp),
- 'prompt_hash': lambda self: hashlib.sha256(self.prompt.encode()).hexdigest()[0:8],
+ 'prompt_hash': lambda self, *args: self.string_hash(self.prompt, *args),
+ 'negative_prompt_hash': lambda self, *args: self.string_hash(self.p.negative_prompt, *args),
+ 'full_prompt_hash': lambda self, *args: self.string_hash(f"{self.p.prompt} {self.p.negative_prompt}", *args), # a space in between to create a unique string
'prompt': lambda self: sanitize_filename_part(self.prompt),
'prompt_no_styles': lambda self: self.prompt_no_style(),
'prompt_spaces': lambda self: sanitize_filename_part(self.prompt, replace_spaces=False),
@@ -368,7 +370,8 @@ class FilenameGenerator:
'denoising': lambda self: self.p.denoising_strength if self.p and self.p.denoising_strength else NOTHING_AND_SKIP_PREVIOUS_TEXT,
'user': lambda self: self.p.user,
'vae_filename': lambda self: self.get_vae_filename(),
- 'none': lambda self: '', # Overrides the default so you can get just the sequence number
+ 'none': lambda self: '', # Overrides the default, so you can get just the sequence number
+ 'image_hash': lambda self, *args: self.image_hash(*args) # accepts formats: [image_hash<length>] default full hash
}
default_time_format = '%Y%m%d%H%M%S'
@@ -448,6 +451,14 @@ class FilenameGenerator:
return sanitize_filename_part(formatted_time, replace_spaces=False)
+ def image_hash(self, *args):
+ length = int(args[0]) if (args and args[0] != "") else None
+ return hashlib.sha256(self.image.tobytes()).hexdigest()[0:length]
+
+ def string_hash(self, text, *args):
+ length = int(args[0]) if (args and args[0] != "") else 8
+ return hashlib.sha256(text.encode()).hexdigest()[0:length]
+
def apply(self, x):
res = ''
diff --git a/modules/img2img.py b/modules/img2img.py
index ac9fd3f8..1519e132 100644
--- a/modules/img2img.py
+++ b/modules/img2img.py
@@ -122,15 +122,14 @@ def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_s
is_batch = mode == 5
if mode == 0: # img2img
- image = init_img.convert("RGB")
+ image = init_img
mask = None
elif mode == 1: # img2img sketch
- image = sketch.convert("RGB")
+ image = sketch
mask = None
elif mode == 2: # inpaint
image, mask = init_img_with_mask["image"], init_img_with_mask["mask"]
- mask = mask.split()[-1].convert("L").point(lambda x: 255 if x > 128 else 0)
- image = image.convert("RGB")
+ mask = processing.create_binary_mask(mask)
elif mode == 3: # inpaint sketch
image = inpaint_color_sketch
orig = inpaint_color_sketch_orig or inpaint_color_sketch
@@ -139,7 +138,6 @@ def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_s
mask = ImageEnhance.Brightness(mask).enhance(1 - mask_alpha / 100)
blur = ImageFilter.GaussianBlur(mask_blur)
image = Image.composite(image.filter(blur), orig, mask.filter(blur))
- image = image.convert("RGB")
elif mode == 4: # inpaint upload mask
image = init_img_inpaint
mask = init_mask_inpaint
diff --git a/modules/launch_utils.py b/modules/launch_utils.py
index 444f16fa..7e4d5a61 100644
--- a/modules/launch_utils.py
+++ b/modules/launch_utils.py
@@ -246,7 +246,7 @@ def list_extensions(settings_file):
disabled_extensions = set(settings.get('disabled_extensions', []))
disable_all_extensions = settings.get('disable_all_extensions', 'none')
- if disable_all_extensions != 'none' or args.disable_extra_extensions or args.disable_all_extensions:
+ if disable_all_extensions != 'none' or args.disable_extra_extensions or args.disable_all_extensions or not os.path.isdir(extensions_dir):
return []
return [x for x in os.listdir(extensions_dir) if x not in disabled_extensions]
diff --git a/modules/processing.py b/modules/processing.py
index 1d098302..d4926524 100755
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -81,6 +81,12 @@ def apply_overlay(image, paste_loc, index, overlays):
return image
+def create_binary_mask(image):
+ if image.mode == 'RGBA' and image.getextrema()[-1] != (255, 255):
+ image = image.split()[-1].convert("L").point(lambda x: 255 if x > 128 else 0)
+ else:
+ image = image.convert('L')
+ return image
def txt2img_image_conditioning(sd_model, x, width, height):
if sd_model.model.conditioning_key in {'hybrid', 'concat'}: # Inpainting models
@@ -380,14 +386,14 @@ class StableDiffusionProcessing:
return self.token_merging_ratio or opts.token_merging_ratio
def setup_prompts(self):
- if type(self.prompt) == list:
+ if isinstance(self.prompt,list):
self.all_prompts = self.prompt
- elif type(self.negative_prompt) == list:
+ elif isinstance(self.negative_prompt, list):
self.all_prompts = [self.prompt] * len(self.negative_prompt)
else:
self.all_prompts = self.batch_size * self.n_iter * [self.prompt]
- if type(self.negative_prompt) == list:
+ if isinstance(self.negative_prompt, list):
self.all_negative_prompts = self.negative_prompt
else:
self.all_negative_prompts = [self.negative_prompt] * len(self.all_prompts)
@@ -506,10 +512,10 @@ class Processed:
self.s_noise = p.s_noise
self.s_min_uncond = p.s_min_uncond
self.sampler_noise_scheduler_override = p.sampler_noise_scheduler_override
- self.prompt = self.prompt if type(self.prompt) != list else self.prompt[0]
- self.negative_prompt = self.negative_prompt if type(self.negative_prompt) != list else self.negative_prompt[0]
- self.seed = int(self.seed if type(self.seed) != list else self.seed[0]) if self.seed is not None else -1
- self.subseed = int(self.subseed if type(self.subseed) != list else self.subseed[0]) if self.subseed is not None else -1
+ self.prompt = self.prompt if not isinstance(self.prompt, list) else self.prompt[0]
+ self.negative_prompt = self.negative_prompt if not isinstance(self.negative_prompt, list) else self.negative_prompt[0]
+ self.seed = int(self.seed if not isinstance(self.seed, list) else self.seed[0]) if self.seed is not None else -1
+ self.subseed = int(self.subseed if not isinstance(self.subseed, list) else self.subseed[0]) if self.subseed is not None else -1
self.is_using_inpainting_conditioning = p.is_using_inpainting_conditioning
self.all_prompts = all_prompts or p.all_prompts or [self.prompt]
@@ -696,11 +702,8 @@ def process_images(p: StableDiffusionProcessing) -> Processed:
stored_opts = {k: opts.data[k] for k in p.override_settings.keys()}
try:
- # after running refiner, the refiner model is not unloaded - webui swaps back to main model here
- if shared.sd_model.sd_checkpoint_info.title != opts.sd_model_checkpoint:
- sd_models.reload_model_weights()
-
# if no checkpoint override or the override checkpoint can't be found, remove override entry and load opts checkpoint
+ # and if after running refiner, the refiner model is not unloaded - webui swaps back to main model here, if model over is present it will be reloaded afterwards
if sd_models.checkpoint_aliases.get(p.override_settings.get('sd_model_checkpoint')) is None:
p.override_settings.pop('sd_model_checkpoint', None)
sd_models.reload_model_weights()
@@ -735,7 +738,7 @@ def process_images(p: StableDiffusionProcessing) -> Processed:
def process_images_inner(p: StableDiffusionProcessing) -> Processed:
"""this is the main loop that both txt2img and img2img use; it calls func_init once inside all the scopes and func_sample once per batch"""
- if type(p.prompt) == list:
+ if isinstance(p.prompt, list):
assert(len(p.prompt) > 0)
else:
assert p.prompt is not None
@@ -766,12 +769,12 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
p.setup_prompts()
- if type(seed) == list:
+ if isinstance(seed, list):
p.all_seeds = seed
else:
p.all_seeds = [int(seed) + (x if p.subseed_strength == 0 else 0) for x in range(len(p.all_prompts))]
- if type(subseed) == list:
+ if isinstance(subseed, list):
p.all_subseeds = subseed
else:
p.all_subseeds = [int(subseed) + x for x in range(len(p.all_prompts))]
@@ -1149,6 +1152,9 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
devices.torch_gc()
def sample_hr_pass(self, samples, decoded_samples, seeds, subseeds, subseed_strength, prompts):
+ if shared.state.interrupted:
+ return samples
+
self.is_hr_pass = True
target_width = self.hr_upscale_to_x
@@ -1262,12 +1268,12 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
if self.hr_negative_prompt == '':
self.hr_negative_prompt = self.negative_prompt
- if type(self.hr_prompt) == list:
+ if isinstance(self.hr_prompt, list):
self.all_hr_prompts = self.hr_prompt
else:
self.all_hr_prompts = self.batch_size * self.n_iter * [self.hr_prompt]
- if type(self.hr_negative_prompt) == list:
+ if isinstance(self.hr_negative_prompt, list):
self.all_hr_negative_prompts = self.hr_negative_prompt
else:
self.all_hr_negative_prompts = self.batch_size * self.n_iter * [self.hr_negative_prompt]
@@ -1385,7 +1391,9 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing):
image_mask = self.image_mask
if image_mask is not None:
- image_mask = image_mask.convert('L')
+ # image_mask is passed in as RGBA by Gradio to support alpha masks,
+ # but we still want to support binary masks.
+ image_mask = create_binary_mask(image_mask)
if self.inpainting_mask_invert:
image_mask = ImageOps.invert(image_mask)
diff --git a/modules/prompt_parser.py b/modules/prompt_parser.py
index 32d214e3..e811ae99 100644
--- a/modules/prompt_parser.py
+++ b/modules/prompt_parser.py
@@ -86,7 +86,7 @@ def get_learned_conditioning_prompt_schedules(prompts, steps):
yield args[(step - 1) % len(args)]
def start(self, args):
def flatten(x):
- if type(x) == str:
+ if isinstance(x, str):
yield x
else:
for gen in x:
diff --git a/modules/rng.py b/modules/rng.py
index f927a318..9e8ba2ee 100644
--- a/modules/rng.py
+++ b/modules/rng.py
@@ -98,7 +98,7 @@ def slerp(val, low, high):
class ImageRNG:
def __init__(self, shape, seeds, subseeds=None, subseed_strength=0.0, seed_resize_from_h=0, seed_resize_from_w=0):
- self.shape = shape
+ self.shape = tuple(map(int, shape))
self.seeds = seeds
self.subseeds = subseeds
self.subseed_strength = subseed_strength
diff --git a/modules/scripts.py b/modules/scripts.py
index fcab5d3a..e8518ad0 100644
--- a/modules/scripts.py
+++ b/modules/scripts.py
@@ -269,7 +269,7 @@ class Script:
"""helper function to generate id for a HTML element, constructs final id out of script name, tab and user-supplied item_id"""
need_tabname = self.show(True) == self.show(False)
- tabkind = 'img2img' if self.is_img2img else 'txt2txt'
+ tabkind = 'img2img' if self.is_img2img else 'txt2img'
tabname = f"{tabkind}_" if need_tabname else ""
title = re.sub(r'[^a-z_0-9]', '', re.sub(r'\s', '_', self.title().lower()))
@@ -289,7 +289,7 @@ class ScriptBuiltinUI(Script):
"""helper function to generate id for a HTML element, constructs final id out of tab and user-supplied item_id"""
need_tabname = self.show(True) == self.show(False)
- tabname = ('img2img' if self.is_img2img else 'txt2txt') + "_" if need_tabname else ""
+ tabname = ('img2img' if self.is_img2img else 'txt2img') + "_" if need_tabname else ""
return f'{tabname}{item_id}'
diff --git a/modules/sd_samplers_common.py b/modules/sd_samplers_common.py
index 8886de7e..feb1a9db 100644
--- a/modules/sd_samplers_common.py
+++ b/modules/sd_samplers_common.py
@@ -36,7 +36,7 @@ approximation_indexes = {"Full": 0, "Approx NN": 1, "Approx cheap": 2, "TAESD":
def samples_to_images_tensor(sample, approximation=None, model=None):
'''latents -> images [-1, 1]'''
- if approximation is None:
+ if approximation is None or (shared.state.interrupted and opts.live_preview_fast_interrupt):
approximation = approximation_indexes.get(opts.show_progress_type, 0)
if approximation == 2:
@@ -44,13 +44,13 @@ def samples_to_images_tensor(sample, approximation=None, model=None):
elif approximation == 1:
x_sample = sd_vae_approx.model()(sample.to(devices.device, devices.dtype)).detach()
elif approximation == 3:
- x_sample = sample * 1.5
- x_sample = sd_vae_taesd.decoder_model()(x_sample.to(devices.device, devices.dtype)).detach()
+ x_sample = sd_vae_taesd.decoder_model()(sample.to(devices.device, devices.dtype)).detach()
x_sample = x_sample * 2 - 1
else:
if model is None:
model = shared.sd_model
- x_sample = model.decode_first_stage(sample.to(model.first_stage_model.dtype))
+ with devices.without_autocast(): # fixes an issue with unstable VAEs that are flaky even in fp32
+ x_sample = model.decode_first_stage(sample.to(model.first_stage_model.dtype))
return x_sample
diff --git a/modules/sd_vae.py b/modules/sd_vae.py
index 4b98b3c2..dbade067 100644
--- a/modules/sd_vae.py
+++ b/modules/sd_vae.py
@@ -70,7 +70,6 @@ def get_filename(filepath):
def refresh_vae_list():
- global vae_dict
vae_dict.clear()
paths = [
@@ -104,7 +103,7 @@ def refresh_vae_list():
name = get_filename(filepath)
vae_dict[name] = filepath
- vae_dict = dict(sorted(vae_dict.items(), key=lambda item: shared.natural_sort_key(item[0])))
+ vae_dict.update(dict(sorted(vae_dict.items(), key=lambda item: shared.natural_sort_key(item[0]))))
def find_vae_near_checkpoint(checkpoint_file):
diff --git a/modules/shared_options.py b/modules/shared_options.py
index 79cbb92e..8630d474 100644
--- a/modules/shared_options.py
+++ b/modules/shared_options.py
@@ -232,6 +232,7 @@ options_templates.update(options_section(('ui', "User interface"), {
"localization": OptionInfo("None", "Localization", gr.Dropdown, lambda: {"choices": ["None"] + list(localization.localizations.keys())}, refresh=lambda: localization.list_localizations(cmd_opts.localizations_dir)).needs_reload_ui(),
"gradio_theme": OptionInfo("Default", "Gradio theme", ui_components.DropdownEditable, lambda: {"choices": ["Default"] + shared_gradio_themes.gradio_hf_hub_themes}).info("you can also manually enter any of themes from the <a href='https://huggingface.co/spaces/gradio/theme-gallery'>gallery</a>.").needs_reload_ui(),
"gradio_themes_cache": OptionInfo(True, "Cache gradio themes locally").info("disable to update the selected Gradio theme"),
+ "gallery_height": OptionInfo("", "Gallery height", gr.Textbox).info("an be any valid CSS value").needs_reload_ui(),
"return_grid": OptionInfo(True, "Show grid in results for web"),
"do_not_show_images": OptionInfo(False, "Do not show any images in results for web"),
"send_seed": OptionInfo(True, "Send seed when sending prompt or image to other interface"),
@@ -281,6 +282,7 @@ options_templates.update(options_section(('ui', "Live previews"), {
"show_progress_type": OptionInfo("Approx NN", "Live preview method", gr.Radio, {"choices": ["Full", "Approx NN", "Approx cheap", "TAESD"]}).info("Full = slow but pretty; Approx NN and TAESD = fast but low quality; Approx cheap = super fast but terrible otherwise"),
"live_preview_content": OptionInfo("Prompt", "Live preview subject", gr.Radio, {"choices": ["Combined", "Prompt", "Negative prompt"]}),
"live_preview_refresh_period": OptionInfo(1000, "Progressbar and preview update period").info("in milliseconds"),
+ "live_preview_fast_interrupt": OptionInfo(False, "Return image with chosen live preview method on interrupt").info("makes interrupts faster"),
}))
options_templates.update(options_section(('sampler-params', "Sampler parameters"), {
diff --git a/modules/ui.py b/modules/ui.py
index a6b1f964..01f77849 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -575,7 +575,7 @@ def create_ui():
add_copy_image_controls('img2img', init_img)
with gr.TabItem('Sketch', id='img2img_sketch', elem_id="img2img_img2img_sketch_tab") as tab_sketch:
- sketch = gr.Image(label="Image for img2img", elem_id="img2img_sketch", show_label=False, source="upload", interactive=True, type="pil", tool="color-sketch", image_mode="RGBA", height=opts.img2img_editor_height, brush_color=opts.img2img_sketch_default_brush_color)
+ sketch = gr.Image(label="Image for img2img", elem_id="img2img_sketch", show_label=False, source="upload", interactive=True, type="pil", tool="color-sketch", image_mode="RGB", height=opts.img2img_editor_height, brush_color=opts.img2img_sketch_default_brush_color)
add_copy_image_controls('sketch', sketch)
with gr.TabItem('Inpaint', id='inpaint', elem_id="img2img_inpaint_tab") as tab_inpaint:
@@ -583,7 +583,7 @@ def create_ui():
add_copy_image_controls('inpaint', init_img_with_mask)
with gr.TabItem('Inpaint sketch', id='inpaint_sketch', elem_id="img2img_inpaint_sketch_tab") as tab_inpaint_color:
- inpaint_color_sketch = gr.Image(label="Color sketch inpainting", show_label=False, elem_id="inpaint_sketch", source="upload", interactive=True, type="pil", tool="color-sketch", image_mode="RGBA", height=opts.img2img_editor_height, brush_color=opts.img2img_inpaint_sketch_default_brush_color)
+ inpaint_color_sketch = gr.Image(label="Color sketch inpainting", show_label=False, elem_id="inpaint_sketch", source="upload", interactive=True, type="pil", tool="color-sketch", image_mode="RGB", height=opts.img2img_editor_height, brush_color=opts.img2img_inpaint_sketch_default_brush_color)
inpaint_color_sketch_orig = gr.State(None)
add_copy_image_controls('inpaint_sketch', inpaint_color_sketch)
@@ -598,7 +598,7 @@ def create_ui():
with gr.TabItem('Inpaint upload', id='inpaint_upload', elem_id="img2img_inpaint_upload_tab") as tab_inpaint_upload:
init_img_inpaint = gr.Image(label="Image for img2img", show_label=False, source="upload", interactive=True, type="pil", elem_id="img_inpaint_base")
- init_mask_inpaint = gr.Image(label="Mask", source="upload", interactive=True, type="pil", elem_id="img_inpaint_mask")
+ init_mask_inpaint = gr.Image(label="Mask", source="upload", interactive=True, type="pil", image_mode="RGBA", elem_id="img_inpaint_mask")
with gr.TabItem('Batch', id='batch', elem_id="img2img_batch_tab") as tab_batch:
hidden = '<br>Disabled when launched with --hide-ui-dir-config.' if shared.cmd_opts.hide_ui_dir_config else ''
diff --git a/modules/ui_common.py b/modules/ui_common.py
index 4c035f2a..eddc4bc8 100644
--- a/modules/ui_common.py
+++ b/modules/ui_common.py
@@ -132,7 +132,7 @@ Requested path was: {f}
with gr.Column(variant='panel', elem_id=f"{tabname}_results"):
with gr.Group(elem_id=f"{tabname}_gallery_container"):
- result_gallery = gr.Gallery(label='Output', show_label=False, elem_id=f"{tabname}_gallery", columns=4)
+ result_gallery = gr.Gallery(label='Output', show_label=False, elem_id=f"{tabname}_gallery", columns=4, preview=True, height=shared.opts.gallery_height or None)
generation_info = None
with gr.Column():