aboutsummaryrefslogtreecommitdiff
path: root/modules
diff options
context:
space:
mode:
Diffstat (limited to 'modules')
-rw-r--r--modules/processing.py24
-rw-r--r--modules/sd_models_xl.py5
2 files changed, 16 insertions, 13 deletions
diff --git a/modules/processing.py b/modules/processing.py
index 159548db..c05e608a 100644
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -108,17 +108,18 @@ def txt2img_image_conditioning(sd_model, x, width, height):
else:
sd = sd_model.model.state_dict()
diffusion_model_input = sd.get('diffusion_model.input_blocks.0.0.weight', None)
- if diffusion_model_input.shape[1] == 9:
- # The "masked-image" in this case will just be all 0.5 since the entire image is masked.
- image_conditioning = torch.ones(x.shape[0], 3, height, width, device=x.device) * 0.5
- image_conditioning = images_tensor_to_samples(image_conditioning,
- approximation_indexes.get(opts.sd_vae_encode_method))
+ if diffusion_model_input is not None:
+ if diffusion_model_input.shape[1] == 9:
+ # The "masked-image" in this case will just be all 0.5 since the entire image is masked.
+ image_conditioning = torch.ones(x.shape[0], 3, height, width, device=x.device) * 0.5
+ image_conditioning = images_tensor_to_samples(image_conditioning,
+ approximation_indexes.get(opts.sd_vae_encode_method))
- # Add the fake full 1s mask to the first dimension.
- image_conditioning = torch.nn.functional.pad(image_conditioning, (0, 0, 0, 0, 1, 0), value=1.0)
- image_conditioning = image_conditioning.to(x.dtype)
+ # Add the fake full 1s mask to the first dimension.
+ image_conditioning = torch.nn.functional.pad(image_conditioning, (0, 0, 0, 0, 1, 0), value=1.0)
+ image_conditioning = image_conditioning.to(x.dtype)
- return image_conditioning
+ return image_conditioning
# Dummy zero conditioning if we're not using inpainting or unclip models.
# Still takes up a bit of memory, but no encoder call.
@@ -378,8 +379,9 @@ class StableDiffusionProcessing:
sd = self.sampler.model_wrap.inner_model.model.state_dict()
diffusion_model_input = sd.get('diffusion_model.input_blocks.0.0.weight', None)
- if diffusion_model_input.shape[1] == 9:
- return self.inpainting_image_conditioning(source_image, latent_image, image_mask=image_mask)
+ if diffusion_model_input is not None:
+ if diffusion_model_input.shape[1] == 9:
+ return self.inpainting_image_conditioning(source_image, latent_image, image_mask=image_mask)
# Dummy zero conditioning if we're not using inpainting or depth model.
return latent_image.new_zeros(latent_image.shape[0], 5, 1, 1)
diff --git a/modules/sd_models_xl.py b/modules/sd_models_xl.py
index d8a9a73b..162d0fee 100644
--- a/modules/sd_models_xl.py
+++ b/modules/sd_models_xl.py
@@ -36,8 +36,9 @@ def get_learned_conditioning(self: sgm.models.diffusion.DiffusionEngine, batch:
def apply_model(self: sgm.models.diffusion.DiffusionEngine, x, t, cond):
sd = self.model.state_dict()
diffusion_model_input = sd.get('diffusion_model.input_blocks.0.0.weight', None)
- if diffusion_model_input.shape[1] == 9:
- x = torch.cat([x] + cond['c_concat'], dim=1)
+ if diffusion_model_input is not None:
+ if diffusion_model_input.shape[1] == 9:
+ x = torch.cat([x] + cond['c_concat'], dim=1)
return self.model(x, t, cond)