aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBuckzor <bucklogos@yahoo.co.uk>2022-10-13 20:04:22 +0100
committerAUTOMATIC1111 <16777216c@gmail.com>2022-10-14 16:47:16 +0300
commit40d1c6e423b4dc52b3bdae43d9e2442960760ced (patch)
tree31f6ca889a9e67242600c79c149b74095425e1f9
parentb2261b53ae4ad01b3713bc73ff62ab7b6f479e26 (diff)
Option between stretch and crop for Highres. fix
-rw-r--r--modules/processing.py34
-rw-r--r--modules/txt2img.py7
-rw-r--r--modules/ui.py25
3 files changed, 42 insertions, 24 deletions
diff --git a/modules/processing.py b/modules/processing.py
index abbfdf98..0246f5dd 100644
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -506,13 +506,14 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
firstphase_width_truncated = 0
firstphase_height_truncated = 0
- def __init__(self, enable_hr=False, scale_latent=True, denoising_strength=0.75, first_pass_width=512, first_pass_height=512, **kwargs):
+ def __init__(self, enable_hr=False, scale_latent=True, denoising_strength=0.75, firstphase_width=512, firstphase_height=512, crop_scale=False, **kwargs):
super().__init__(**kwargs)
self.enable_hr = enable_hr
self.scale_latent = scale_latent
self.denoising_strength = denoising_strength
- self.first_pass_width = first_pass_width
- self.first_pass_height = first_pass_height
+ self.firstphase_width = firstphase_width
+ self.firstphase_height = firstphase_height
+ self.crop_scale = crop_scale
def init(self, all_prompts, all_seeds, all_subseeds):
if self.enable_hr:
@@ -521,14 +522,14 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
else:
state.job_count = state.job_count * 2
- desired_pixel_count = self.first_pass_width * self.first_pass_height
- actual_pixel_count = self.width * self.height
- scale = math.sqrt(desired_pixel_count / actual_pixel_count)
+ #desired_pixel_count = self.firstphase_width * self.firstphase_height
+ #actual_pixel_count = self.width * self.height
+ #scale = math.sqrt(desired_pixel_count / actual_pixel_count)
- self.firstphase_width = math.ceil(scale * self.width / 64) * 64
- self.firstphase_height = math.ceil(scale * self.height / 64) * 64
- self.firstphase_width_truncated = int(scale * self.width)
- self.firstphase_height_truncated = int(scale * self.height)
+ #self.firstphase_width = math.ceil(scale * self.width / 64) * 64
+ #self.firstphase_height = math.ceil(scale * self.height / 64) * 64
+ #self.firstphase_width_truncated = int(scale * self.width)
+ #self.firstphase_height_truncated = int(scale * self.height)
def sample(self, conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength):
self.sampler = sd_samplers.create_sampler_with_index(sd_samplers.samplers, self.sampler_index, self.sd_model)
@@ -541,8 +542,17 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
x = create_random_tensors([opt_C, self.firstphase_height // opt_f, self.firstphase_width // opt_f], seeds=seeds, subseeds=subseeds, subseed_strength=self.subseed_strength, seed_resize_from_h=self.seed_resize_from_h, seed_resize_from_w=self.seed_resize_from_w, p=self)
samples = self.sampler.sample(self, x, conditioning, unconditional_conditioning)
- truncate_x = (self.firstphase_width - self.firstphase_width_truncated) // opt_f
- truncate_y = (self.firstphase_height - self.firstphase_height_truncated) // opt_f
+ truncate_x = 0
+ truncate_y = 0
+
+ if self.crop_scale:
+ if self.width/self.firstphase_width > self.height/self.firstphase_height:
+ #Crop to landscape
+ truncate_y = (self.width - self.firstphase_width)//2 // opt_f
+
+ elif self.width/self.firstphase_width < self.height/self.firstphase_height:
+ #Crop to portrait
+ truncate_x = (self.height - self.firstphase_height)//2 // opt_f
samples = samples[:, :, truncate_y//2:samples.shape[2]-truncate_y//2, truncate_x//2:samples.shape[3]-truncate_x//2]
diff --git a/modules/txt2img.py b/modules/txt2img.py
index 85cbece4..447ec3d3 100644
--- a/modules/txt2img.py
+++ b/modules/txt2img.py
@@ -6,7 +6,7 @@ import modules.processing as processing
from modules.ui import plaintext_to_html
-def txt2img(prompt: str, negative_prompt: str, prompt_style: str, prompt_style2: str, steps: int, sampler_index: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, height: int, width: int, enable_hr: bool, scale_latent: bool, denoising_strength: float, first_pass_width: int, first_pass_height: int, *args):
+def txt2img(prompt: str, negative_prompt: str, prompt_style: str, prompt_style2: str, steps: int, sampler_index: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, height: int, width: int, enable_hr: bool, scale_latent: bool, denoising_strength: float, firstphase_width: int, firstphase_height: int, crop_scale: bool, *args):
p = StableDiffusionProcessingTxt2Img(
sd_model=shared.sd_model,
outpath_samples=opts.outdir_samples or opts.outdir_txt2img_samples,
@@ -32,8 +32,9 @@ def txt2img(prompt: str, negative_prompt: str, prompt_style: str, prompt_style2:
enable_hr=enable_hr,
scale_latent=scale_latent if enable_hr else None,
denoising_strength=denoising_strength if enable_hr else None,
- first_pass_width=first_pass_width if enable_hr else None,
- first_pass_height=first_pass_height if enable_hr else None,
+ firstphase_width=firstphase_width if enable_hr else None,
+ firstphase_height=firstphase_height if enable_hr else None,
+ crop_scale=crop_scale if enable_hr else None,
)
diff --git a/modules/ui.py b/modules/ui.py
index 544419b2..f2d81f68 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -540,12 +540,18 @@ def create_ui(wrap_gradio_gpu_call):
enable_hr = gr.Checkbox(label='Highres. fix', value=False)
with gr.Row(visible=False) as hr_options:
- first_pass_width = gr.Slider(minimum=64, maximum=1024, step=64, label="First pass width", value=512)
- first_pass_height = gr.Slider(minimum=64, maximum=1024, step=64, label="First pass height", value=512)
- scale_latent = gr.Checkbox(label='Scale latent', value=False)
- denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.7)
+ with gr.Column(scale=1.0):
+ firstphase_width = gr.Slider(minimum=64, maximum=1024, step=64, label="First pass width", value=512)
+ firstphase_height = gr.Slider(minimum=64, maximum=1024, step=64, label="First pass height", value=512)
+
+ with gr.Column(scale=1.0):
+ with gr.Row():
+ crop_scale = gr.Checkbox(label='Crop when scaling', value=False)
+ scale_latent = gr.Checkbox(label='Scale latent', value=False)
+ with gr.Row():
+ denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.7)
- with gr.Row():
+ with gr.Row(equal_height=True):
batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1)
batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1)
@@ -606,8 +612,9 @@ def create_ui(wrap_gradio_gpu_call):
enable_hr,
scale_latent,
denoising_strength,
- first_pass_width,
- first_pass_height,
+ firstphase_width,
+ firstphase_height,
+ crop_scale,
] + custom_inputs,
outputs=[
txt2img_gallery,
@@ -672,8 +679,8 @@ def create_ui(wrap_gradio_gpu_call):
(denoising_strength, "Denoising strength"),
(enable_hr, lambda d: "Denoising strength" in d),
(hr_options, lambda d: gr.Row.update(visible="Denoising strength" in d)),
- (first_pass_width, "First pass width"),
- (first_pass_height, "First pass height"),
+ (firstphase_width, "First pass width"),
+ (firstphase_height, "First pass height"),
]
modules.generation_parameters_copypaste.connect_paste(paste, txt2img_paste_fields, txt2img_prompt)
token_button.click(fn=update_token_counter, inputs=[txt2img_prompt, steps], outputs=[token_counter])