From aa744cadc8e357e696a608c8d0c77a7bfc1c9f39 Mon Sep 17 00:00:00 2001 From: Kohaku-Blueleaf <59680068+KohakuBlueleaf@users.noreply.github.com> Date: Sat, 5 Aug 2023 12:35:40 +0800 Subject: add infotext --- modules/generation_parameters_copypaste.py | 8 ++++++++ modules/processing.py | 3 +++ 2 files changed, 11 insertions(+) diff --git a/modules/generation_parameters_copypaste.py b/modules/generation_parameters_copypaste.py index a3448be9..0713dbf0 100644 --- a/modules/generation_parameters_copypaste.py +++ b/modules/generation_parameters_copypaste.py @@ -304,6 +304,12 @@ Steps: 20, Sampler: Euler a, CFG scale: 7, Seed: 965400086, Size: 512x512, Model if "Schedule rho" not in res: res["Schedule rho"] = 0 + if "VAE Encoder" not in res: + res["VAE Encoder"] = "Full" + + if "VAE Decoder" not in res: + res["VAE Decoder"] = "Full" + return res @@ -329,6 +335,8 @@ infotext_to_setting_name_mapping = [ ('RNG', 'randn_source'), ('NGMS', 's_min_uncond'), ('Pad conds', 'pad_cond_uncond'), + ('VAE Encoder', 'sd_vae_encode_method'), + ('VAE Decoder', 'sd_vae_decode_method'), ] diff --git a/modules/processing.py b/modules/processing.py index aa6d4d2a..a9ee7507 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -788,6 +788,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: with devices.without_autocast() if devices.unet_needs_upcast else devices.autocast(): samples_ddim = p.sample(conditioning=p.c, unconditional_conditioning=p.uc, seeds=p.seeds, subseeds=p.subseeds, subseed_strength=p.subseed_strength, prompts=p.prompts) + p.extra_generation_params['VAE Decoder'] = opts.sd_vae_decode_method x_samples_ddim = decode_latent_batch(p.sd_model, samples_ddim, target_device=devices.cpu, check_for_nans=True) x_samples_ddim = torch.stack(x_samples_ddim).float() x_samples_ddim = torch.clamp((x_samples_ddim + 1.0) / 2.0, min=0.0, max=1.0) @@ -1100,6 +1101,7 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): decoded_samples = torch.from_numpy(np.array(batch_images)) decoded_samples = decoded_samples.to(shared.device) + self.extra_generation_params['VAE Encoder'] = opts.sd_vae_encode_method samples = images_tensor_to_samples(decoded_samples, approximation_indexes.get(opts.sd_vae_encode_method)) image_conditioning = self.img2img_image_conditioning(decoded_samples, samples) @@ -1338,6 +1340,7 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing): raise RuntimeError(f"bad number of images passed: {len(imgs)}; expecting {self.batch_size} or less") image = torch.from_numpy(batch_images) + self.extra_generation_params['VAE Encoder'] = opts.sd_vae_encode_method self.init_latent = images_tensor_to_samples(image, approximation_indexes.get(opts.sd_vae_encode_method), self.sd_model) devices.torch_gc() -- cgit v1.2.1