aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAUTOMATIC <16777216c@gmail.com>2022-09-16 13:38:02 +0300
committerAUTOMATIC <16777216c@gmail.com>2022-09-16 13:38:02 +0300
commite49b1c5d73ede818adb624590934f051b94493ac (patch)
treed4c318e9fd8f1dc1c5d26d01d4ad546ff136cc9a
parentbe0f82df12b07d559e18eeabb5c5eef951e6a911 (diff)
an option to do exactly the amount of specified steps in img2img
-rw-r--r--modules/sd_samplers.py26
-rw-r--r--modules/shared.py1
2 files changed, 20 insertions, 7 deletions
diff --git a/modules/sd_samplers.py b/modules/sd_samplers.py
index 02ffce0e..1b3dc302 100644
--- a/modules/sd_samplers.py
+++ b/modules/sd_samplers.py
@@ -38,6 +38,17 @@ samplers = [
samplers_for_img2img = [x for x in samplers if x.name != 'PLMS']
+def setup_img2img_steps(p):
+ if opts.img2img_fix_steps:
+ steps = int(p.steps / min(p.denoising_strength, 0.999))
+ t_enc = p.steps - 1
+ else:
+ steps = p.steps
+ t_enc = int(min(p.denoising_strength, 0.999) * steps)
+
+ return steps, t_enc
+
+
def sample_to_image(samples):
x_sample = shared.sd_model.decode_first_stage(samples[0:1].type(shared.sd_model.dtype))[0]
x_sample = torch.clamp((x_sample + 1.0) / 2.0, min=0.0, max=1.0)
@@ -105,13 +116,13 @@ class VanillaStableDiffusionSampler:
return res
def sample_img2img(self, p, x, noise, conditioning, unconditional_conditioning):
- t_enc = int(min(p.denoising_strength, 0.999) * p.steps)
+ steps, t_enc = setup_img2img_steps(p)
# existing code fails with cetain step counts, like 9
try:
- self.sampler.make_schedule(ddim_num_steps=p.steps, verbose=False)
+ self.sampler.make_schedule(ddim_num_steps=steps, verbose=False)
except Exception:
- self.sampler.make_schedule(ddim_num_steps=p.steps+1, verbose=False)
+ self.sampler.make_schedule(ddim_num_steps=steps+1, verbose=False)
x1 = self.sampler.stochastic_encode(x, torch.tensor([t_enc] * int(x.shape[0])).to(shared.device), noise=noise)
@@ -230,14 +241,15 @@ class KDiffusionSampler:
return res
def sample_img2img(self, p, x, noise, conditioning, unconditional_conditioning):
- t_enc = int(min(p.denoising_strength, 0.999) * p.steps)
- sigmas = self.model_wrap.get_sigmas(p.steps)
+ steps, t_enc = setup_img2img_steps(p)
+
+ sigmas = self.model_wrap.get_sigmas(steps)
- noise = noise * sigmas[p.steps - t_enc - 1]
+ noise = noise * sigmas[steps - t_enc - 1]
xi = x + noise
- sigma_sched = sigmas[p.steps - t_enc - 1:]
+ sigma_sched = sigmas[steps - t_enc - 1:]
self.model_wrap_cfg.mask = p.mask
self.model_wrap_cfg.nmask = p.nmask
diff --git a/modules/shared.py b/modules/shared.py
index fa6a0e99..da56b6ae 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -125,6 +125,7 @@ class Options:
"enable_pnginfo": OptionInfo(True, "Save text information about generation parameters as chunks to png files"),
"add_model_hash_to_info": OptionInfo(False, "Add model hash to generation information"),
"img2img_color_correction": OptionInfo(False, "Apply color correction to img2img results to match original colors."),
+ "img2img_fix_steps": OptionInfo(False, "With img2img, do exactly the amount of steps the slider specifies (normaly you'd do less with less denoising)."),
"enable_quantization": OptionInfo(False, "Enable quantization in K samplers for sharper and cleaner results. This may change existing seeds. Requires restart to apply."),
"font": OptionInfo("", "Font for image grids that have text"),
"enable_emphasis": OptionInfo(True, "Use (text) to make model pay more attention to text and [text] to make it pay less attention"),