From 42082e8a3239c1c32cd9e2a03a20b610af857b51 Mon Sep 17 00:00:00 2001 From: devdn Date: Tue, 28 Mar 2023 18:18:28 -0400 Subject: performance increase --- modules/sd_samplers_kdiffusion.py | 22 +++++++++++++++++----- 1 file changed, 17 insertions(+), 5 deletions(-) (limited to 'modules/sd_samplers_kdiffusion.py') diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py index e9f08518..6a54ce32 100644 --- a/modules/sd_samplers_kdiffusion.py +++ b/modules/sd_samplers_kdiffusion.py @@ -76,7 +76,7 @@ class CFGDenoiser(torch.nn.Module): return denoised - def forward(self, x, sigma, uncond, cond, cond_scale, image_cond): + def forward(self, x, sigma, uncond, cond, cond_scale, s_min_uncond, image_cond): if state.interrupted or state.skipped: raise sd_samplers_common.InterruptedException @@ -116,6 +116,12 @@ class CFGDenoiser(torch.nn.Module): tensor = denoiser_params.text_cond uncond = denoiser_params.text_uncond + sigma_thresh = s_min_uncond + if(torch.dot(sigma,sigma) < sigma.shape[0] * (sigma_thresh*sigma_thresh) and not is_edit_model): + uncond = torch.zeros([0,0,uncond.shape[2]]) + x_in=x_in[:x_in.shape[0]//2] + sigma_in=sigma_in[:sigma_in.shape[0]//2] + if tensor.shape[1] == uncond.shape[1]: if not is_edit_model: cond_in = torch.cat([tensor, uncond]) @@ -144,7 +150,8 @@ class CFGDenoiser(torch.nn.Module): x_out[a:b] = self.inner_model(x_in[a:b], sigma_in[a:b], cond=make_condition_dict(c_crossattn, image_cond_in[a:b])) - x_out[-uncond.shape[0]:] = self.inner_model(x_in[-uncond.shape[0]:], sigma_in[-uncond.shape[0]:], cond=make_condition_dict([uncond], image_cond_in[-uncond.shape[0]:])) + if uncond.shape[0]: + x_out[-uncond.shape[0]:] = self.inner_model(x_in[-uncond.shape[0]:], sigma_in[-uncond.shape[0]:], cond=make_condition_dict([uncond], image_cond_in[-uncond.shape[0]:])) denoised_params = CFGDenoisedParams(x_out, state.sampling_step, state.sampling_steps) cfg_denoised_callback(denoised_params) @@ -157,7 +164,10 @@ class CFGDenoiser(torch.nn.Module): sd_samplers_common.store_latent(x_out[-uncond.shape[0]:]) if not is_edit_model: - denoised = self.combine_denoised(x_out, conds_list, uncond, cond_scale) + if uncond.shape[0]: + denoised = self.combine_denoised(x_out, conds_list, uncond, cond_scale) + else: + denoised = x_out else: denoised = self.combine_denoised_for_edit_model(x_out, cond_scale) @@ -165,7 +175,6 @@ class CFGDenoiser(torch.nn.Module): denoised = self.init_latent * self.mask + self.nmask * denoised self.step += 1 - return denoised @@ -244,6 +253,7 @@ class KDiffusionSampler: self.model_wrap_cfg.step = 0 self.model_wrap_cfg.image_cfg_scale = getattr(p, 'image_cfg_scale', None) self.eta = p.eta if p.eta is not None else opts.eta_ancestral + self.s_min_uncond = getattr(p, 's_min_uncond', 0.0) k_diffusion.sampling.torch = TorchHijack(self.sampler_noises if self.sampler_noises is not None else []) @@ -326,6 +336,7 @@ class KDiffusionSampler: 'image_cond': image_conditioning, 'uncond': unconditional_conditioning, 'cond_scale': p.cfg_scale, + 's_min_uncond': self.s_min_uncond } samples = self.launch_sampling(t_enc + 1, lambda: self.func(self.model_wrap_cfg, xi, extra_args=extra_args, disable=False, callback=self.callback_state, **extra_params_kwargs)) @@ -359,7 +370,8 @@ class KDiffusionSampler: 'cond': conditioning, 'image_cond': image_conditioning, 'uncond': unconditional_conditioning, - 'cond_scale': p.cfg_scale + 'cond_scale': p.cfg_scale, + 's_min_uncond': self.s_min_uncond }, disable=False, callback=self.callback_state, **extra_params_kwargs)) return samples -- cgit v1.2.1 From 44e8e9c36807d4a71c2fc84129ebcf5ba4f77f21 Mon Sep 17 00:00:00 2001 From: devdn Date: Thu, 30 Mar 2023 00:54:28 -0400 Subject: fix live preview & alternate uncond guidance for better quality --- modules/sd_samplers_kdiffusion.py | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) (limited to 'modules/sd_samplers_kdiffusion.py') diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py index 6a54ce32..17d24df4 100644 --- a/modules/sd_samplers_kdiffusion.py +++ b/modules/sd_samplers_kdiffusion.py @@ -116,11 +116,13 @@ class CFGDenoiser(torch.nn.Module): tensor = denoiser_params.text_cond uncond = denoiser_params.text_uncond - sigma_thresh = s_min_uncond - if(torch.dot(sigma,sigma) < sigma.shape[0] * (sigma_thresh*sigma_thresh) and not is_edit_model): - uncond = torch.zeros([0,0,uncond.shape[2]]) - x_in=x_in[:x_in.shape[0]//2] - sigma_in=sigma_in[:sigma_in.shape[0]//2] + if self.step % 2 and s_min_uncond > 0 and not is_edit_model: + # alternating uncond allows for higher thresholds without the quality loss normally expected from raising it + sigma_threshold = s_min_uncond + if(torch.dot(sigma,sigma) < sigma.shape[0] * (sigma_threshold*sigma_threshold) ): + uncond = torch.zeros([0,0,uncond.shape[2]]) + x_in=x_in[:x_in.shape[0]//2] + sigma_in=sigma_in[:sigma_in.shape[0]//2] if tensor.shape[1] == uncond.shape[1]: if not is_edit_model: @@ -159,7 +161,7 @@ class CFGDenoiser(torch.nn.Module): devices.test_for_nans(x_out, "unet") if opts.live_preview_content == "Prompt": - sd_samplers_common.store_latent(x_out[0:uncond.shape[0]]) + sd_samplers_common.store_latent(x_out[0:x_out.shape[0]-uncond.shape[0]]) elif opts.live_preview_content == "Negative prompt": sd_samplers_common.store_latent(x_out[-uncond.shape[0]:]) -- cgit v1.2.1 From d40e44ade479f7bba30d5317381cbc58c861775b Mon Sep 17 00:00:00 2001 From: Deciare <1689220+deciare@users.noreply.github.com> Date: Tue, 18 Apr 2023 23:18:58 -0400 Subject: Option to use CPU for random number generation. Makes a given manual seed generate the same images across different platforms, independently of the GPU architecture in use. Fixes #9613. --- modules/sd_samplers_kdiffusion.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'modules/sd_samplers_kdiffusion.py') diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py index e9f08518..13f4567a 100644 --- a/modules/sd_samplers_kdiffusion.py +++ b/modules/sd_samplers_kdiffusion.py @@ -190,7 +190,7 @@ class TorchHijack: if noise.shape == x.shape: return noise - if x.device.type == 'mps': + if opts.use_cpu_randn or x.device.type == 'mps': return torch.randn_like(x, device=devices.cpu).to(x.device) else: return torch.randn_like(x) -- cgit v1.2.1 From 5fe0dd79beaa5ef737ff85254ee9870f60ae9464 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sat, 29 Apr 2023 11:29:37 +0300 Subject: rename CPU RNG to RNG source in settings, add infotext and parameters copypaste support to RNG source --- modules/sd_samplers_kdiffusion.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'modules/sd_samplers_kdiffusion.py') diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py index 13f4567a..a547d1b5 100644 --- a/modules/sd_samplers_kdiffusion.py +++ b/modules/sd_samplers_kdiffusion.py @@ -190,7 +190,7 @@ class TorchHijack: if noise.shape == x.shape: return noise - if opts.use_cpu_randn or x.device.type == 'mps': + if opts.randn_source == "CPU" or x.device.type == 'mps': return torch.randn_like(x, device=devices.cpu).to(x.device) else: return torch.randn_like(x) -- cgit v1.2.1 From 1d11e896984c883f6a0debb3abaef945595cbc70 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sat, 29 Apr 2023 15:57:09 +0300 Subject: rework Negative Guidance minimum sigma to work with AND, add infotext and copypaste parameters support --- modules/sd_samplers_kdiffusion.py | 43 +++++++++++++++++++++++---------------- 1 file changed, 25 insertions(+), 18 deletions(-) (limited to 'modules/sd_samplers_kdiffusion.py') diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py index d42d5fcf..f8aaac59 100644 --- a/modules/sd_samplers_kdiffusion.py +++ b/modules/sd_samplers_kdiffusion.py @@ -115,20 +115,21 @@ class CFGDenoiser(torch.nn.Module): sigma_in = denoiser_params.sigma tensor = denoiser_params.text_cond uncond = denoiser_params.text_uncond + skip_uncond = False - if self.step % 2 and s_min_uncond > 0 and not is_edit_model: - # alternating uncond allows for higher thresholds without the quality loss normally expected from raising it - sigma_threshold = s_min_uncond - if(torch.dot(sigma,sigma) < sigma.shape[0] * (sigma_threshold*sigma_threshold) ): - uncond = torch.zeros([0,0,uncond.shape[2]]) - x_in=x_in[:x_in.shape[0]//2] - sigma_in=sigma_in[:sigma_in.shape[0]//2] + # alternating uncond allows for higher thresholds without the quality loss normally expected from raising it + if self.step % 2 and s_min_uncond > 0 and sigma[0] < s_min_uncond and not is_edit_model: + skip_uncond = True + x_in = x_in[:-batch_size] + sigma_in = sigma_in[:-batch_size] - if tensor.shape[1] == uncond.shape[1]: - if not is_edit_model: - cond_in = torch.cat([tensor, uncond]) - else: + if tensor.shape[1] == uncond.shape[1] or skip_uncond: + if is_edit_model: cond_in = torch.cat([tensor, uncond, uncond]) + elif skip_uncond: + cond_in = tensor + else: + cond_in = torch.cat([tensor, uncond]) if shared.batch_cond_uncond: x_out = self.inner_model(x_in, sigma_in, cond=make_condition_dict([cond_in], image_cond_in)) @@ -152,9 +153,15 @@ class CFGDenoiser(torch.nn.Module): x_out[a:b] = self.inner_model(x_in[a:b], sigma_in[a:b], cond=make_condition_dict(c_crossattn, image_cond_in[a:b])) - if uncond.shape[0]: + if not skip_uncond: x_out[-uncond.shape[0]:] = self.inner_model(x_in[-uncond.shape[0]:], sigma_in[-uncond.shape[0]:], cond=make_condition_dict([uncond], image_cond_in[-uncond.shape[0]:])) + if skip_uncond: + #x_out = torch.cat([x_out, x_out[0:batch_size]]) # we skipped uncond denoising, so we put cond-denoised image to where the uncond-denoised image should be + denoised_image_indexes = [x[0][0] for x in conds_list] + fake_uncond = torch.cat([x_out[i:i+1] for i in denoised_image_indexes]) + x_out = torch.cat([x_out, fake_uncond]) + denoised_params = CFGDenoisedParams(x_out, state.sampling_step, state.sampling_steps) cfg_denoised_callback(denoised_params) @@ -165,13 +172,12 @@ class CFGDenoiser(torch.nn.Module): elif opts.live_preview_content == "Negative prompt": sd_samplers_common.store_latent(x_out[-uncond.shape[0]:]) - if not is_edit_model: - if uncond.shape[0]: - denoised = self.combine_denoised(x_out, conds_list, uncond, cond_scale) - else: - denoised = x_out - else: + if is_edit_model: denoised = self.combine_denoised_for_edit_model(x_out, cond_scale) + elif skip_uncond: + denoised = self.combine_denoised(x_out, conds_list, uncond, 1.0) + else: + denoised = self.combine_denoised(x_out, conds_list, uncond, cond_scale) if self.mask is not None: denoised = self.init_latent * self.mask + self.nmask * denoised @@ -221,6 +227,7 @@ class KDiffusionSampler: self.eta = None self.config = None self.last_latent = None + self.s_min_uncond = None self.conditioning_key = sd_model.model.conditioning_key -- cgit v1.2.1 From 737b73a820584b8035fcc37fe35993bec867f326 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sat, 29 Apr 2023 16:05:20 +0300 Subject: some extra lines I forgot to add for previous commit --- modules/sd_samplers_kdiffusion.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'modules/sd_samplers_kdiffusion.py') diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py index f8aaac59..136aa8e5 100644 --- a/modules/sd_samplers_kdiffusion.py +++ b/modules/sd_samplers_kdiffusion.py @@ -156,11 +156,10 @@ class CFGDenoiser(torch.nn.Module): if not skip_uncond: x_out[-uncond.shape[0]:] = self.inner_model(x_in[-uncond.shape[0]:], sigma_in[-uncond.shape[0]:], cond=make_condition_dict([uncond], image_cond_in[-uncond.shape[0]:])) + denoised_image_indexes = [x[0][0] for x in conds_list] if skip_uncond: - #x_out = torch.cat([x_out, x_out[0:batch_size]]) # we skipped uncond denoising, so we put cond-denoised image to where the uncond-denoised image should be - denoised_image_indexes = [x[0][0] for x in conds_list] fake_uncond = torch.cat([x_out[i:i+1] for i in denoised_image_indexes]) - x_out = torch.cat([x_out, fake_uncond]) + x_out = torch.cat([x_out, fake_uncond]) # we skipped uncond denoising, so we put cond-denoised image to where the uncond-denoised image should be denoised_params = CFGDenoisedParams(x_out, state.sampling_step, state.sampling_steps) cfg_denoised_callback(denoised_params) -- cgit v1.2.1 From 8863b31d83b527d041ca45e23b9af99e2346081a Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sat, 29 Apr 2023 16:06:20 +0300 Subject: use correct images for previews when using AND (see #9491) --- modules/sd_samplers_kdiffusion.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'modules/sd_samplers_kdiffusion.py') diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py index 136aa8e5..eb98e599 100644 --- a/modules/sd_samplers_kdiffusion.py +++ b/modules/sd_samplers_kdiffusion.py @@ -167,7 +167,7 @@ class CFGDenoiser(torch.nn.Module): devices.test_for_nans(x_out, "unet") if opts.live_preview_content == "Prompt": - sd_samplers_common.store_latent(x_out[0:x_out.shape[0]-uncond.shape[0]]) + sd_samplers_common.store_latent(torch.cat([x_out[i:i+1] for i in denoised_image_indexes])) elif opts.live_preview_content == "Negative prompt": sd_samplers_common.store_latent(x_out[-uncond.shape[0]:]) -- cgit v1.2.1 From 3ba6c3c83c0983a025c7bddc08bb7f49481b3cbb Mon Sep 17 00:00:00 2001 From: Aarni Koskela Date: Tue, 9 May 2023 22:17:58 +0300 Subject: Fix up string formatting/concatenation to f-strings where feasible --- modules/sd_samplers_kdiffusion.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'modules/sd_samplers_kdiffusion.py') diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py index eb98e599..0fc9f456 100644 --- a/modules/sd_samplers_kdiffusion.py +++ b/modules/sd_samplers_kdiffusion.py @@ -198,7 +198,7 @@ class TorchHijack: if hasattr(torch, item): return getattr(torch, item) - raise AttributeError("'{}' object has no attribute '{}'".format(type(self).__name__, item)) + raise AttributeError(f"'{type(self).__name__}' object has no attribute '{item}'") def randn_like(self, x): if self.sampler_noises: -- cgit v1.2.1 From f741a98baccae100fcfb40c017b5c35c5cba1b0c Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Wed, 10 May 2023 08:43:42 +0300 Subject: imports cleanup for ruff --- modules/sd_samplers_kdiffusion.py | 1 - 1 file changed, 1 deletion(-) (limited to 'modules/sd_samplers_kdiffusion.py') diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py index 0fc9f456..3b8e9622 100644 --- a/modules/sd_samplers_kdiffusion.py +++ b/modules/sd_samplers_kdiffusion.py @@ -1,7 +1,6 @@ from collections import deque import torch import inspect -import einops import k_diffusion.sampling from modules import prompt_parser, devices, sd_samplers_common -- cgit v1.2.1 From 028d3f6425d85f122027c127fba8bcbf4f66ee75 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Wed, 10 May 2023 11:05:02 +0300 Subject: ruff auto fixes --- modules/sd_samplers_kdiffusion.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'modules/sd_samplers_kdiffusion.py') diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py index 3b8e9622..2f733cf5 100644 --- a/modules/sd_samplers_kdiffusion.py +++ b/modules/sd_samplers_kdiffusion.py @@ -86,7 +86,7 @@ class CFGDenoiser(torch.nn.Module): conds_list, tensor = prompt_parser.reconstruct_multicond_batch(cond, self.step) uncond = prompt_parser.reconstruct_cond_batch(uncond, self.step) - assert not is_edit_model or all([len(conds) == 1 for conds in conds_list]), "AND is not supported for InstructPix2Pix checkpoint (unless using Image CFG scale = 1.0)" + assert not is_edit_model or all(len(conds) == 1 for conds in conds_list), "AND is not supported for InstructPix2Pix checkpoint (unless using Image CFG scale = 1.0)" batch_size = len(conds_list) repeats = [len(conds_list[i]) for i in range(batch_size)] -- cgit v1.2.1 From 49a55b410b66b7dd9be9335d8a2e3a71e4f8b15c Mon Sep 17 00:00:00 2001 From: Aarni Koskela Date: Thu, 11 May 2023 18:28:15 +0300 Subject: Autofix Ruff W (not W605) (mostly whitespace) --- modules/sd_samplers_kdiffusion.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) (limited to 'modules/sd_samplers_kdiffusion.py') diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py index 2f733cf5..e9e41818 100644 --- a/modules/sd_samplers_kdiffusion.py +++ b/modules/sd_samplers_kdiffusion.py @@ -93,10 +93,10 @@ class CFGDenoiser(torch.nn.Module): if shared.sd_model.model.conditioning_key == "crossattn-adm": image_uncond = torch.zeros_like(image_cond) - make_condition_dict = lambda c_crossattn, c_adm: {"c_crossattn": c_crossattn, "c_adm": c_adm} + make_condition_dict = lambda c_crossattn, c_adm: {"c_crossattn": c_crossattn, "c_adm": c_adm} else: image_uncond = image_cond - make_condition_dict = lambda c_crossattn, c_concat: {"c_crossattn": c_crossattn, "c_concat": [c_concat]} + make_condition_dict = lambda c_crossattn, c_concat: {"c_crossattn": c_crossattn, "c_concat": [c_concat]} if not is_edit_model: x_in = torch.cat([torch.stack([x[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [x]) @@ -316,7 +316,7 @@ class KDiffusionSampler: sigma_sched = sigmas[steps - t_enc - 1:] xi = x + noise * sigma_sched[0] - + extra_params_kwargs = self.initialize(p) parameters = inspect.signature(self.func).parameters @@ -339,9 +339,9 @@ class KDiffusionSampler: self.model_wrap_cfg.init_latent = x self.last_latent = x extra_args={ - 'cond': conditioning, - 'image_cond': image_conditioning, - 'uncond': unconditional_conditioning, + 'cond': conditioning, + 'image_cond': image_conditioning, + 'uncond': unconditional_conditioning, 'cond_scale': p.cfg_scale, 's_min_uncond': self.s_min_uncond } @@ -374,9 +374,9 @@ class KDiffusionSampler: self.last_latent = x samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, x, extra_args={ - 'cond': conditioning, - 'image_cond': image_conditioning, - 'uncond': unconditional_conditioning, + 'cond': conditioning, + 'image_cond': image_conditioning, + 'uncond': unconditional_conditioning, 'cond_scale': p.cfg_scale, 's_min_uncond': self.s_min_uncond }, disable=False, callback=self.callback_state, **extra_params_kwargs)) -- cgit v1.2.1 From 3078001439d25b66ef5627c9e3d431aa23bbed73 Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Sun, 14 May 2023 01:49:41 +0000 Subject: Add/modify CFG callbacks Required by self-attn guidance extension https://github.com/ashen-sensored/sd_webui_SAG --- modules/sd_samplers_kdiffusion.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'modules/sd_samplers_kdiffusion.py') diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py index e9e41818..55f0d3a3 100644 --- a/modules/sd_samplers_kdiffusion.py +++ b/modules/sd_samplers_kdiffusion.py @@ -8,6 +8,7 @@ from modules.shared import opts, state import modules.shared as shared from modules.script_callbacks import CFGDenoiserParams, cfg_denoiser_callback from modules.script_callbacks import CFGDenoisedParams, cfg_denoised_callback +from modules.script_callbacks import AfterCFGCallbackParams, cfg_after_cfg_callback samplers_k_diffusion = [ ('Euler a', 'sample_euler_ancestral', ['k_euler_a', 'k_euler_ancestral'], {}), @@ -160,7 +161,7 @@ class CFGDenoiser(torch.nn.Module): fake_uncond = torch.cat([x_out[i:i+1] for i in denoised_image_indexes]) x_out = torch.cat([x_out, fake_uncond]) # we skipped uncond denoising, so we put cond-denoised image to where the uncond-denoised image should be - denoised_params = CFGDenoisedParams(x_out, state.sampling_step, state.sampling_steps) + denoised_params = CFGDenoisedParams(x_out, state.sampling_step, state.sampling_steps, self.inner_model) cfg_denoised_callback(denoised_params) devices.test_for_nans(x_out, "unet") @@ -180,6 +181,11 @@ class CFGDenoiser(torch.nn.Module): if self.mask is not None: denoised = self.init_latent * self.mask + self.nmask * denoised + after_cfg_callback_params = AfterCFGCallbackParams(denoised, state.sampling_step, state.sampling_steps) + cfg_after_cfg_callback(after_cfg_callback_params) + if after_cfg_callback_params.output_altered: + denoised = after_cfg_callback_params.x + self.step += 1 return denoised -- cgit v1.2.1 From 005849331e82cded96f6f3e5ff828037c672c38d Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sun, 14 May 2023 08:15:22 +0300 Subject: remove output_altered flag from AfterCFGCallbackParams --- modules/sd_samplers_kdiffusion.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'modules/sd_samplers_kdiffusion.py') diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py index 55f0d3a3..61f23ad7 100644 --- a/modules/sd_samplers_kdiffusion.py +++ b/modules/sd_samplers_kdiffusion.py @@ -183,8 +183,7 @@ class CFGDenoiser(torch.nn.Module): after_cfg_callback_params = AfterCFGCallbackParams(denoised, state.sampling_step, state.sampling_steps) cfg_after_cfg_callback(after_cfg_callback_params) - if after_cfg_callback_params.output_altered: - denoised = after_cfg_callback_params.x + denoised = after_cfg_callback_params.x self.step += 1 return denoised -- cgit v1.2.1 From cdac5ace1456ba779d5a0171ff8757f31955bfee Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Tue, 16 May 2023 11:54:02 +0300 Subject: suppress ENSD infotext for samplers that don't use it --- modules/sd_samplers_kdiffusion.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) (limited to 'modules/sd_samplers_kdiffusion.py') diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py index 61f23ad7..5455561a 100644 --- a/modules/sd_samplers_kdiffusion.py +++ b/modules/sd_samplers_kdiffusion.py @@ -11,21 +11,21 @@ from modules.script_callbacks import CFGDenoisedParams, cfg_denoised_callback from modules.script_callbacks import AfterCFGCallbackParams, cfg_after_cfg_callback samplers_k_diffusion = [ - ('Euler a', 'sample_euler_ancestral', ['k_euler_a', 'k_euler_ancestral'], {}), + ('Euler a', 'sample_euler_ancestral', ['k_euler_a', 'k_euler_ancestral'], {"uses_ensd": True}), ('Euler', 'sample_euler', ['k_euler'], {}), ('LMS', 'sample_lms', ['k_lms'], {}), ('Heun', 'sample_heun', ['k_heun'], {}), ('DPM2', 'sample_dpm_2', ['k_dpm_2'], {'discard_next_to_last_sigma': True}), - ('DPM2 a', 'sample_dpm_2_ancestral', ['k_dpm_2_a'], {'discard_next_to_last_sigma': True}), - ('DPM++ 2S a', 'sample_dpmpp_2s_ancestral', ['k_dpmpp_2s_a'], {}), + ('DPM2 a', 'sample_dpm_2_ancestral', ['k_dpm_2_a'], {'discard_next_to_last_sigma': True, "uses_ensd": True}), + ('DPM++ 2S a', 'sample_dpmpp_2s_ancestral', ['k_dpmpp_2s_a'], {"uses_ensd": True}), ('DPM++ 2M', 'sample_dpmpp_2m', ['k_dpmpp_2m'], {}), ('DPM++ SDE', 'sample_dpmpp_sde', ['k_dpmpp_sde'], {}), - ('DPM fast', 'sample_dpm_fast', ['k_dpm_fast'], {}), - ('DPM adaptive', 'sample_dpm_adaptive', ['k_dpm_ad'], {}), + ('DPM fast', 'sample_dpm_fast', ['k_dpm_fast'], {"uses_ensd": True}), + ('DPM adaptive', 'sample_dpm_adaptive', ['k_dpm_ad'], {"uses_ensd": True}), ('LMS Karras', 'sample_lms', ['k_lms_ka'], {'scheduler': 'karras'}), - ('DPM2 Karras', 'sample_dpm_2', ['k_dpm_2_ka'], {'scheduler': 'karras', 'discard_next_to_last_sigma': True}), - ('DPM2 a Karras', 'sample_dpm_2_ancestral', ['k_dpm_2_a_ka'], {'scheduler': 'karras', 'discard_next_to_last_sigma': True}), - ('DPM++ 2S a Karras', 'sample_dpmpp_2s_ancestral', ['k_dpmpp_2s_a_ka'], {'scheduler': 'karras'}), + ('DPM2 Karras', 'sample_dpm_2', ['k_dpm_2_ka'], {'scheduler': 'karras', 'discard_next_to_last_sigma': True, "uses_ensd": True}), + ('DPM2 a Karras', 'sample_dpm_2_ancestral', ['k_dpm_2_a_ka'], {'scheduler': 'karras', 'discard_next_to_last_sigma': True, "uses_ensd": True}), + ('DPM++ 2S a Karras', 'sample_dpmpp_2s_ancestral', ['k_dpmpp_2s_a_ka'], {'scheduler': 'karras', "uses_ensd": True}), ('DPM++ 2M Karras', 'sample_dpmpp_2m', ['k_dpmpp_2m_ka'], {'scheduler': 'karras'}), ('DPM++ SDE Karras', 'sample_dpmpp_sde', ['k_dpmpp_sde_ka'], {'scheduler': 'karras'}), ] -- cgit v1.2.1 From a61cbef02c7652f96d333b28e01f5230e225224e Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Tue, 16 May 2023 12:36:15 +0300 Subject: add second_order field to sampler config --- modules/sd_samplers_kdiffusion.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) (limited to 'modules/sd_samplers_kdiffusion.py') diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py index 5455561a..552c6c64 100644 --- a/modules/sd_samplers_kdiffusion.py +++ b/modules/sd_samplers_kdiffusion.py @@ -14,20 +14,20 @@ samplers_k_diffusion = [ ('Euler a', 'sample_euler_ancestral', ['k_euler_a', 'k_euler_ancestral'], {"uses_ensd": True}), ('Euler', 'sample_euler', ['k_euler'], {}), ('LMS', 'sample_lms', ['k_lms'], {}), - ('Heun', 'sample_heun', ['k_heun'], {}), + ('Heun', 'sample_heun', ['k_heun'], {"second_order": True}), ('DPM2', 'sample_dpm_2', ['k_dpm_2'], {'discard_next_to_last_sigma': True}), ('DPM2 a', 'sample_dpm_2_ancestral', ['k_dpm_2_a'], {'discard_next_to_last_sigma': True, "uses_ensd": True}), - ('DPM++ 2S a', 'sample_dpmpp_2s_ancestral', ['k_dpmpp_2s_a'], {"uses_ensd": True}), + ('DPM++ 2S a', 'sample_dpmpp_2s_ancestral', ['k_dpmpp_2s_a'], {"uses_ensd": True, "second_order": True}), ('DPM++ 2M', 'sample_dpmpp_2m', ['k_dpmpp_2m'], {}), - ('DPM++ SDE', 'sample_dpmpp_sde', ['k_dpmpp_sde'], {}), + ('DPM++ SDE', 'sample_dpmpp_sde', ['k_dpmpp_sde'], {"second_order": True}), ('DPM fast', 'sample_dpm_fast', ['k_dpm_fast'], {"uses_ensd": True}), ('DPM adaptive', 'sample_dpm_adaptive', ['k_dpm_ad'], {"uses_ensd": True}), ('LMS Karras', 'sample_lms', ['k_lms_ka'], {'scheduler': 'karras'}), - ('DPM2 Karras', 'sample_dpm_2', ['k_dpm_2_ka'], {'scheduler': 'karras', 'discard_next_to_last_sigma': True, "uses_ensd": True}), - ('DPM2 a Karras', 'sample_dpm_2_ancestral', ['k_dpm_2_a_ka'], {'scheduler': 'karras', 'discard_next_to_last_sigma': True, "uses_ensd": True}), - ('DPM++ 2S a Karras', 'sample_dpmpp_2s_ancestral', ['k_dpmpp_2s_a_ka'], {'scheduler': 'karras', "uses_ensd": True}), + ('DPM2 Karras', 'sample_dpm_2', ['k_dpm_2_ka'], {'scheduler': 'karras', 'discard_next_to_last_sigma': True, "uses_ensd": True, "second_order": True}), + ('DPM2 a Karras', 'sample_dpm_2_ancestral', ['k_dpm_2_a_ka'], {'scheduler': 'karras', 'discard_next_to_last_sigma': True, "uses_ensd": True, "second_order": True}), + ('DPM++ 2S a Karras', 'sample_dpmpp_2s_ancestral', ['k_dpmpp_2s_a_ka'], {'scheduler': 'karras', "uses_ensd": True, "second_order": True}), ('DPM++ 2M Karras', 'sample_dpmpp_2m', ['k_dpmpp_2m_ka'], {'scheduler': 'karras'}), - ('DPM++ SDE Karras', 'sample_dpmpp_sde', ['k_dpmpp_sde_ka'], {'scheduler': 'karras'}), + ('DPM++ SDE Karras', 'sample_dpmpp_sde', ['k_dpmpp_sde_ka'], {'scheduler': 'karras', "second_order": True}), ] samplers_data_k_diffusion = [ -- cgit v1.2.1