aboutsummaryrefslogtreecommitdiff
path: root/modules
diff options
context:
space:
mode:
Diffstat (limited to 'modules')
-rw-r--r--modules/import_hook.py11
-rw-r--r--modules/launch_utils.py14
-rw-r--r--modules/models/diffusion/ddpm_edit.py7
-rw-r--r--modules/processing.py29
-rw-r--r--modules/shared_options.py1
5 files changed, 42 insertions, 20 deletions
diff --git a/modules/import_hook.py b/modules/import_hook.py
index 28c67dfa..eba9a372 100644
--- a/modules/import_hook.py
+++ b/modules/import_hook.py
@@ -3,3 +3,14 @@ import sys
# this will break any attempt to import xformers which will prevent stability diffusion repo from trying to use it
if "--xformers" not in "".join(sys.argv):
sys.modules["xformers"] = None
+
+# Hack to fix a changed import in torchvision 0.17+, which otherwise breaks
+# basicsr; see https://github.com/AUTOMATIC1111/stable-diffusion-webui/issues/13985
+try:
+ import torchvision.transforms.functional_tensor # noqa: F401
+except ImportError:
+ try:
+ import torchvision.transforms.functional as functional
+ sys.modules["torchvision.transforms.functional_tensor"] = functional
+ except ImportError:
+ pass # shrug...
diff --git a/modules/launch_utils.py b/modules/launch_utils.py
index 031488a0..2c54e2a0 100644
--- a/modules/launch_utils.py
+++ b/modules/launch_utils.py
@@ -6,6 +6,7 @@ import os
import shutil
import sys
import importlib.util
+import importlib.metadata
import platform
import json
from functools import lru_cache
@@ -119,11 +120,16 @@ def run(command, desc=None, errdesc=None, custom_env=None, live: bool = default_
def is_installed(package):
try:
- spec = importlib.util.find_spec(package)
- except ModuleNotFoundError:
- return False
+ dist = importlib.metadata.distribution(package)
+ except importlib.metadata.PackageNotFoundError:
+ try:
+ spec = importlib.util.find_spec(package)
+ except ModuleNotFoundError:
+ return False
+
+ return spec is not None
- return spec is not None
+ return dist is not None
def repo_dir(name):
diff --git a/modules/models/diffusion/ddpm_edit.py b/modules/models/diffusion/ddpm_edit.py
index b892d5fc..6db340da 100644
--- a/modules/models/diffusion/ddpm_edit.py
+++ b/modules/models/diffusion/ddpm_edit.py
@@ -24,10 +24,15 @@ from pytorch_lightning.utilities.distributed import rank_zero_only
from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config
from ldm.modules.ema import LitEma
from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution
-from ldm.models.autoencoder import VQModelInterface, IdentityFirstStage, AutoencoderKL
+from ldm.models.autoencoder import IdentityFirstStage, AutoencoderKL
from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like
from ldm.models.diffusion.ddim import DDIMSampler
+try:
+ from ldm.models.autoencoder import VQModelInterface
+except Exception:
+ class VQModelInterface:
+ pass
__conditioning_keys__ = {'concat': 'c_concat',
'crossattn': 'c_crossattn',
diff --git a/modules/processing.py b/modules/processing.py
index 5ab6ddde..6f01c95f 100644
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -938,21 +938,20 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
if opts.enable_pnginfo:
image.info["parameters"] = text
output_images.append(image)
- if save_samples and hasattr(p, 'mask_for_overlay') and p.mask_for_overlay and any([opts.save_mask, opts.save_mask_composite, opts.return_mask, opts.return_mask_composite]):
- image_mask = p.mask_for_overlay.convert('RGB')
- image_mask_composite = Image.composite(image.convert('RGBA').convert('RGBa'), Image.new('RGBa', image.size), images.resize_image(2, p.mask_for_overlay, image.width, image.height).convert('L')).convert('RGBA')
-
- if opts.save_mask:
- images.save_image(image_mask, p.outpath_samples, "", p.seeds[i], p.prompts[i], opts.samples_format, info=infotext(i), p=p, suffix="-mask")
-
- if opts.save_mask_composite:
- images.save_image(image_mask_composite, p.outpath_samples, "", p.seeds[i], p.prompts[i], opts.samples_format, info=infotext(i), p=p, suffix="-mask-composite")
-
- if opts.return_mask:
- output_images.append(image_mask)
-
- if opts.return_mask_composite:
- output_images.append(image_mask_composite)
+ if hasattr(p, 'mask_for_overlay') and p.mask_for_overlay:
+ if opts.return_mask or opts.save_mask:
+ image_mask = p.mask_for_overlay.convert('RGB')
+ if save_samples and opts.save_mask:
+ images.save_image(image_mask, p.outpath_samples, "", p.seeds[i], p.prompts[i], opts.samples_format, info=infotext(i), p=p, suffix="-mask")
+ if opts.return_mask:
+ output_images.append(image_mask)
+
+ if opts.return_mask_composite or opts.save_mask_composite:
+ image_mask_composite = Image.composite(image.convert('RGBA').convert('RGBa'), Image.new('RGBa', image.size), images.resize_image(2, p.mask_for_overlay, image.width, image.height).convert('L')).convert('RGBA')
+ if save_samples and opts.save_mask_composite:
+ images.save_image(image_mask_composite, p.outpath_samples, "", p.seeds[i], p.prompts[i], opts.samples_format, info=infotext(i), p=p, suffix="-mask-composite")
+ if opts.return_mask_composite:
+ output_images.append(image_mask_composite)
del x_samples_ddim
diff --git a/modules/shared_options.py b/modules/shared_options.py
index a0a68f99..a860e355 100644
--- a/modules/shared_options.py
+++ b/modules/shared_options.py
@@ -257,6 +257,7 @@ options_templates.update(options_section(('ui_prompt_editing', "Prompt editing",
"keyedit_precision_attention": OptionInfo(0.1, "Precision for (attention:1.1) when editing the prompt with Ctrl+up/down", gr.Slider, {"minimum": 0.01, "maximum": 0.2, "step": 0.001}),
"keyedit_precision_extra": OptionInfo(0.05, "Precision for <extra networks:0.9> when editing the prompt with Ctrl+up/down", gr.Slider, {"minimum": 0.01, "maximum": 0.2, "step": 0.001}),
"keyedit_delimiters": OptionInfo(r".,\/!?%^*;:{}=`~() ", "Word delimiters when editing the prompt with Ctrl+up/down"),
+ "keyedit_delimiters_whitespace": OptionInfo(["Tab", "Carriage Return", "Line Feed"], "Ctrl+up/down whitespace delimiters", gr.CheckboxGroup, lambda: {"choices": ["Tab", "Carriage Return", "Line Feed"]}),
"disable_token_counters": OptionInfo(False, "Disable prompt token counters").needs_reload_ui(),
}))