aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--javascript/ui.js22
-rw-r--r--modules/generation_parameters_copypaste.py3
-rw-r--r--modules/img2img.py4
-rw-r--r--modules/processing.py37
-rw-r--r--modules/ui.py73
-rw-r--r--scripts/xyz_grid.py1
-rw-r--r--style.css4
7 files changed, 131 insertions, 13 deletions
diff --git a/javascript/ui.js b/javascript/ui.js
index 4a440193..a73eeaa2 100644
--- a/javascript/ui.js
+++ b/javascript/ui.js
@@ -132,7 +132,14 @@ function create_tab_index_args(tabId, args){
function get_img2img_tab_index() {
let res = args_to_array(arguments)
- res.splice(-2)
+ res.splice(-2) // gradio also sends outputs to the arguments, pop them off
+ res[0] = get_tab_index('mode_img2img')
+ return res
+}
+
+function get_img2img_tab_index_for_res_preview() {
+ let res = args_to_array(arguments)
+ res.splice(-1) // gradio also sends outputs to the arguments, pop them off
res[0] = get_tab_index('mode_img2img')
return res
}
@@ -361,3 +368,16 @@ function selectCheckpoint(name){
desiredCheckpointName = name;
gradioApp().getElementById('change_checkpoint').click()
}
+
+
+function onCalcResolutionImg2Img(mode, scale, width, height, resize_mode, init_img, sketch, init_img_with_mask, inpaint_color_sketch, init_img_inpaint){
+ i2iScale = gradioApp().getElementById('img2img_scale')
+ i2iWidth = gradioApp().getElementById('img2img_width')
+ i2iHeight = gradioApp().getElementById('img2img_height')
+
+ setInactive(i2iScale, scale == 1)
+ setInactive(i2iWidth, scale > 1)
+ setInactive(i2iHeight, scale > 1)
+
+ return [];
+}
diff --git a/modules/generation_parameters_copypaste.py b/modules/generation_parameters_copypaste.py
index 6df76858..0ad2ad4f 100644
--- a/modules/generation_parameters_copypaste.py
+++ b/modules/generation_parameters_copypaste.py
@@ -282,6 +282,9 @@ Steps: 20, Sampler: Euler a, CFG scale: 7, Seed: 965400086, Size: 512x512, Model
res["Hires resize-1"] = 0
res["Hires resize-2"] = 0
+ if "Img2Img upscale" not in res:
+ res["Img2Img upscale"] = 1
+
restore_old_hires_fix_params(res)
return res
diff --git a/modules/img2img.py b/modules/img2img.py
index c973b770..959dd96e 100644
--- a/modules/img2img.py
+++ b/modules/img2img.py
@@ -78,7 +78,7 @@ def process_batch(p, input_dir, output_dir, inpaint_mask_dir, args):
processed_image.save(os.path.join(output_dir, filename))
-def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_styles, init_img, sketch, init_img_with_mask, inpaint_color_sketch, inpaint_color_sketch_orig, init_img_inpaint, init_mask_inpaint, steps: int, sampler_index: int, mask_blur: int, mask_alpha: float, inpainting_fill: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, image_cfg_scale: float, denoising_strength: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, height: int, width: int, resize_mode: int, inpaint_full_res: bool, inpaint_full_res_padding: int, inpainting_mask_invert: int, img2img_batch_input_dir: str, img2img_batch_output_dir: str, img2img_batch_inpaint_mask_dir: str, override_settings_texts, *args):
+def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_styles, init_img, sketch, init_img_with_mask, inpaint_color_sketch, inpaint_color_sketch_orig, init_img_inpaint, init_mask_inpaint, steps: int, sampler_index: int, mask_blur: int, mask_alpha: float, inpainting_fill: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, image_cfg_scale: float, denoising_strength: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, height: int, width: int, scale: float, upscaler: str, resize_mode: int, inpaint_full_res: bool, inpaint_full_res_padding: int, inpainting_mask_invert: int, img2img_batch_input_dir: str, img2img_batch_output_dir: str, img2img_batch_inpaint_mask_dir: str, override_settings_texts, *args):
override_settings = create_override_settings_dict(override_settings_texts)
is_batch = mode == 5
@@ -149,6 +149,8 @@ def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_s
inpaint_full_res_padding=inpaint_full_res_padding,
inpainting_mask_invert=inpainting_mask_invert,
override_settings=override_settings,
+ scale=scale,
+ upscaler=upscaler,
)
p.scripts = modules.scripts.scripts_txt2img
diff --git a/modules/processing.py b/modules/processing.py
index 6d9c6a8d..509b80b9 100644
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -946,7 +946,7 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
class StableDiffusionProcessingImg2Img(StableDiffusionProcessing):
sampler = None
- def __init__(self, init_images: list = None, resize_mode: int = 0, denoising_strength: float = 0.75, image_cfg_scale: float = None, mask: Any = None, mask_blur: int = 4, inpainting_fill: int = 0, inpaint_full_res: bool = True, inpaint_full_res_padding: int = 0, inpainting_mask_invert: int = 0, initial_noise_multiplier: float = None, **kwargs):
+ def __init__(self, init_images: Optional[list] = None, resize_mode: int = 0, denoising_strength: float = 0.75, image_cfg_scale: Optional[float] = None, mask: Any = None, mask_blur: int = 4, inpainting_fill: int = 0, inpaint_full_res: bool = True, inpaint_full_res_padding: int = 0, inpainting_mask_invert: int = 0, initial_noise_multiplier: Optional[float] = None, scale: float = 0, upscaler: Optional[str] = None, **kwargs):
super().__init__(**kwargs)
self.init_images = init_images
@@ -966,11 +966,37 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing):
self.mask = None
self.nmask = None
self.image_conditioning = None
+ self.scale = scale
+ self.upscaler = upscaler
+
+ def get_final_size(self):
+ if self.scale > 1:
+ img = self.init_images[0]
+ width = int(img.width * self.scale)
+ height = int(img.height * self.scale)
+ return width, height
+ else:
+ return self.width, self.height
+
def init(self, all_prompts, all_seeds, all_subseeds):
self.sampler = sd_samplers.create_sampler(self.sampler_name, self.sd_model)
crop_region = None
+ if self.scale > 1:
+ self.extra_generation_params["Img2Img upscale"] = self.scale
+
+ # Non-latent upscalers are run before sampling
+ # Latent upscalers are run during sampling
+ init_upscaler = None
+ if self.upscaler is not None:
+ self.extra_generation_params["Img2Img upscaler"] = self.upscaler
+ if self.upscaler not in shared.latent_upscale_modes:
+ assert len([x for x in shared.sd_upscalers if x.name == self.upscaler]) > 0, f"could not find upscaler named {self.upscaler}"
+ init_upscaler = self.upscaler
+
+ self.width, self.height = self.get_final_size()
+
image_mask = self.image_mask
if image_mask is not None:
@@ -993,7 +1019,7 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing):
image_mask = images.resize_image(2, mask, self.width, self.height)
self.paste_to = (x1, y1, x2-x1, y2-y1)
else:
- image_mask = images.resize_image(self.resize_mode, image_mask, self.width, self.height)
+ image_mask = images.resize_image(self.resize_mode, image_mask, self.width, self.height, init_upscaler)
np_mask = np.array(image_mask)
np_mask = np.clip((np_mask.astype(np.float32)) * 2, 0, 255).astype(np.uint8)
self.mask_for_overlay = Image.fromarray(np_mask)
@@ -1010,7 +1036,7 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing):
image = images.flatten(img, opts.img2img_background_color)
if crop_region is None and self.resize_mode != 3:
- image = images.resize_image(self.resize_mode, image, self.width, self.height)
+ image = images.resize_image(self.resize_mode, image, self.width, self.height, init_upscaler)
if image_mask is not None:
image_masked = Image.new('RGBa', (image.width, image.height))
@@ -1055,8 +1081,9 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing):
self.init_latent = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(image))
- if self.resize_mode == 3:
- self.init_latent = torch.nn.functional.interpolate(self.init_latent, size=(self.height // opt_f, self.width // opt_f), mode="bilinear")
+ latent_scale_mode = shared.latent_upscale_modes.get(self.upscaler, None) if self.upscaler is not None else shared.latent_upscale_modes.get(shared.latent_upscale_default_mode, "nearest")
+ if latent_scale_mode is not None:
+ self.init_latent = torch.nn.functional.interpolate(self.init_latent, size=(self.height // opt_f, self.width // opt_f), mode=latent_scale_mode["mode"], antialias=latent_scale_mode["antialias"])
if image_mask is not None:
init_mask = latent_mask
diff --git a/modules/ui.py b/modules/ui.py
index eb5fcd3f..f22da16a 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -15,6 +15,7 @@ import warnings
import gradio as gr
import gradio.routes
import gradio.utils
+from gradio.events import Releaseable
import numpy as np
from PIL import Image, PngImagePlugin
from modules.call_queue import wrap_gradio_gpu_call, wrap_queued_call, wrap_gradio_call
@@ -127,6 +128,26 @@ def calc_resolution_hires(enable, width, height, hr_scale, hr_resize_x, hr_resiz
return f"resize: from <span class='resolution'>{p.width}x{p.height}</span> to <span class='resolution'>{p.hr_resize_x or p.hr_upscale_to_x}x{p.hr_resize_y or p.hr_upscale_to_y}</span>"
+def calc_resolution_img2img(mode, scale, resize_x, resize_y, resize_mode, *i2i_images):
+ init_img = None
+ if mode in {0, 1, 3, 4}:
+ init_img = i2i_images[mode]
+ elif mode == 2:
+ init_img = i2i_images[mode]["image"]
+
+ if not init_img:
+ return ""
+
+ if scale > 1:
+ width = int(init_img.width * scale)
+ height = int(init_img.height * scale)
+ else:
+ width = resize_x
+ height = resize_y
+
+ return f"resize: from <span class='resolution'>{init_img.width}x{init_img.height}</span> to <span class='resolution'>{width}x{height}</span>"
+
+
def apply_styles(prompt, prompt_neg, styles):
prompt = shared.prompt_styles.apply_styles_to_prompt(prompt, styles)
prompt_neg = shared.prompt_styles.apply_negative_styles_to_prompt(prompt_neg, styles)
@@ -735,7 +756,7 @@ def create_ui():
)
with FormRow():
- resize_mode = gr.Radio(label="Resize mode", elem_id="resize_mode", choices=["Just resize", "Crop and resize", "Resize and fill", "Just resize (latent upscale)"], type="index", value="Just resize")
+ resize_mode = gr.Radio(label="Resize mode", elem_id="resize_mode", choices=["Just resize", "Crop and resize", "Resize and fill"], type="index", value="Just resize")
for category in ordered_ui_categories():
if category == "sampler":
@@ -744,8 +765,13 @@ def create_ui():
elif category == "dimensions":
with FormRow():
with gr.Column(elem_id="img2img_column_size", scale=4):
- width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="img2img_width")
- height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="img2img_height")
+ with FormRow(variant="compact"):
+ final_resolution = FormHTML(value="", elem_id="img2img_finalres", label="Upscaled resolution", interactive=False)
+ with FormRow(variant="compact"):
+ scale = gr.Slider(minimum=1.0, maximum=4.0, step=0.05, label="Upscale by", value=1.0, elem_id="img2img_scale")
+ with FormRow(variant="compact"):
+ width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="img2img_width")
+ height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="img2img_height")
with gr.Column(elem_id="img2img_dimensions_row", scale=1, elem_classes="dimensions-tools"):
res_switch_btn = ToolButton(value=switch_values_symbol, elem_id="img2img_res_switch_btn")
@@ -760,7 +786,9 @@ def create_ui():
with FormRow():
cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0, elem_id="img2img_cfg_scale")
image_cfg_scale = gr.Slider(minimum=0, maximum=3.0, step=0.05, label='Image CFG Scale', value=1.5, elem_id="img2img_image_cfg_scale", visible=shared.sd_model and shared.sd_model.cond_stage_key == "edit")
- denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.75, elem_id="img2img_denoising_strength")
+ with FormRow():
+ upscaler = gr.Dropdown(label="Upscaler", elem_id="img2img_upscaler", choices=[*shared.latent_upscale_modes, *[x.name for x in shared.sd_upscalers]], value=shared.latent_upscale_default_mode)
+ denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.75, elem_id="img2img_denoising_strength")
elif category == "seed":
seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox = create_seed_inputs('img2img')
@@ -813,6 +841,39 @@ def create_ui():
outputs=[inpaint_controls, mask_alpha],
)
+ img2img_resolution_preview_inputs = [dummy_component, # filled in by selected img2img tab index in _js
+ scale, width, height, resize_mode,
+ init_img, sketch, init_img_with_mask, inpaint_color_sketch, init_img_inpaint]
+ for input in img2img_resolution_preview_inputs[1:]:
+ if isinstance(input, Releaseable):
+ input.release(
+ fn=calc_resolution_img2img,
+ _js="get_img2img_tab_index_for_res_preview",
+ inputs=img2img_resolution_preview_inputs,
+ outputs=[final_resolution],
+ show_progress=False,
+ ).success(
+ None,
+ _js="onCalcResolutionImg2Img",
+ inputs=img2img_resolution_preview_inputs,
+ outputs=[],
+ show_progress=False,
+ )
+ else:
+ input.change(
+ fn=calc_resolution_img2img,
+ _js="get_img2img_tab_index_for_res_preview",
+ inputs=img2img_resolution_preview_inputs,
+ outputs=[final_resolution],
+ show_progress=False,
+ ).success(
+ None,
+ _js="onCalcResolutionImg2Img",
+ inputs=img2img_resolution_preview_inputs,
+ outputs=[],
+ show_progress=False,
+ )
+
img2img_gallery, generation_info, html_info, html_log = create_output_panel("img2img", opts.outdir_img2img_samples)
connect_reuse_seed(seed, reuse_seed, generation_info, dummy_component, is_subseed=False)
@@ -861,6 +922,8 @@ def create_ui():
subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox,
height,
width,
+ scale,
+ upscaler,
resize_mode,
inpaint_full_res,
inpaint_full_res_padding,
@@ -946,6 +1009,8 @@ def create_ui():
(seed, "Seed"),
(width, "Size-1"),
(height, "Size-2"),
+ (scale, "Img2Img upscale"),
+ (upscaler, "Img2Img upscaler"),
(batch_size, "Batch size"),
(subseed, "Variation seed"),
(subseed_strength, "Variation seed strength"),
diff --git a/scripts/xyz_grid.py b/scripts/xyz_grid.py
index 3895a795..3f6c1997 100644
--- a/scripts/xyz_grid.py
+++ b/scripts/xyz_grid.py
@@ -220,6 +220,7 @@ axis_options = [
AxisOption("Clip skip", int, apply_clip_skip),
AxisOption("Denoising", float, apply_field("denoising_strength")),
AxisOptionTxt2Img("Hires upscaler", str, apply_field("hr_upscaler"), choices=lambda: [*shared.latent_upscale_modes, *[x.name for x in shared.sd_upscalers]]),
+ AxisOptionImg2Img("Upscaler", str, apply_field("upscaler"), choices=lambda: [*shared.latent_upscale_modes, *[x.name for x in shared.sd_upscalers]]),
AxisOptionImg2Img("Cond. Image Mask Weight", float, apply_field("inpainting_mask_weight")),
AxisOption("VAE", str, apply_vae, cost=0.7, choices=lambda: list(sd_vae.vae_dict)),
AxisOption("Styles", str, apply_styles, choices=lambda: list(shared.prompt_styles.styles)),
diff --git a/style.css b/style.css
index de16a7f2..379a89dc 100644
--- a/style.css
+++ b/style.css
@@ -287,13 +287,13 @@ button.custom-button{
border-radius: 0 0.5rem 0.5rem 0;
}
-#txtimg_hr_finalres{
+#txtimg_hr_finalres, #img2img_finalres {
min-height: 0 !important;
padding: .625rem .75rem;
margin-left: -0.75em
}
-#txtimg_hr_finalres .resolution{
+#txtimg_hr_finalres .resolution, #img2img_finalres .resolution{
font-weight: bold;
}