aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAUTOMATIC <16777216c@gmail.com>2023-01-25 23:25:25 +0300
committerAUTOMATIC <16777216c@gmail.com>2023-01-25 23:25:25 +0300
commitd1d6ce29831d1b067801c3206f314258de88f683 (patch)
treee066615c8222291538a995da224b804b8f1e0734
parent3cead6983e2618838696bbf9f0b9dbbbbdd07e30 (diff)
add edit_image_conditioning from my earlier edits in case there's an attempt to inegrate pix2pix properly
this allows to use pix2pix model in img2img though it won't work well this way
-rw-r--r--modules/processing.py10
1 files changed, 9 insertions, 1 deletions
diff --git a/modules/processing.py b/modules/processing.py
index 9e5a2f38..cb41288a 100644
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -185,7 +185,12 @@ class StableDiffusionProcessing:
conditioning = 2. * (conditioning - depth_min) / (depth_max - depth_min) - 1.
return conditioning
- def inpainting_image_conditioning(self, source_image, latent_image, image_mask = None):
+ def edit_image_conditioning(self, source_image):
+ conditioning_image = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(source_image))
+
+ return conditioning_image
+
+ def inpainting_image_conditioning(self, source_image, latent_image, image_mask=None):
self.is_using_inpainting_conditioning = True
# Handle the different mask inputs
@@ -228,6 +233,9 @@ class StableDiffusionProcessing:
if isinstance(self.sd_model, LatentDepth2ImageDiffusion):
return self.depth2img_image_conditioning(source_image.float() if devices.unet_needs_upcast else source_image)
+ if self.sd_model.cond_stage_key == "edit":
+ return self.edit_image_conditioning(source_image)
+
if self.sampler.conditioning_key in {'hybrid', 'concat'}:
return self.inpainting_image_conditioning(source_image.float() if devices.unet_needs_upcast else source_image, latent_image, image_mask=image_mask)