aboutsummaryrefslogtreecommitdiff
path: root/modules/processing.py
diff options
context:
space:
mode:
Diffstat (limited to 'modules/processing.py')
-rw-r--r--modules/processing.py7
1 files changed, 6 insertions, 1 deletions
diff --git a/modules/processing.py b/modules/processing.py
index f44c3f26..d27d86e9 100644
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -20,6 +20,7 @@ import modules.shared as shared
import modules.face_restoration
import modules.images as images
import modules.styles
+import logging
# some of those options should not be changed at all because they would break the model, so I removed them from options.
@@ -28,11 +29,13 @@ opt_f = 8
def setup_color_correction(image):
+ logging.info("Calibrating color correction.")
correction_target = cv2.cvtColor(np.asarray(image.copy()), cv2.COLOR_RGB2LAB)
return correction_target
def apply_color_correction(correction, image):
+ logging.info("Applying color correction.")
image = Image.fromarray(cv2.cvtColor(exposure.match_histograms(
cv2.cvtColor(
np.asarray(image),
@@ -357,7 +360,7 @@ def process_images(p: StableDiffusionProcessing) -> Processed:
if p.restore_faces:
if opts.save and not p.do_not_save_samples and opts.save_images_before_face_restoration:
- images.save_image(Image.fromarray(x_sample), p.outpath_samples, "", seeds[i], prompts[i], opts.samples_format, info=infotext(n, i), p=p)
+ images.save_image(Image.fromarray(x_sample), p.outpath_samples, "", seeds[i], prompts[i], opts.samples_format, info=infotext(n, i), p=p, suffix="-before-face-restoration")
devices.torch_gc()
@@ -366,6 +369,8 @@ def process_images(p: StableDiffusionProcessing) -> Processed:
image = Image.fromarray(x_sample)
if p.color_corrections is not None and i < len(p.color_corrections):
+ if opts.save and not p.do_not_save_samples and opts.save_images_before_color_correction:
+ images.save_image(image, p.outpath_samples, "", seeds[i], prompts[i], opts.samples_format, info=infotext(n, i), p=p, suffix="-before-color-correction")
image = apply_color_correction(p.color_corrections[i], image)
if p.overlay_images is not None and i < len(p.overlay_images):