aboutsummaryrefslogtreecommitdiff
path: root/modules
diff options
context:
space:
mode:
Diffstat (limited to 'modules')
-rw-r--r--modules/api/api.py15
-rw-r--r--modules/api/models.py3
-rw-r--r--modules/cache.py2
-rw-r--r--modules/cmd_args.py1
-rw-r--r--modules/devices.py15
-rw-r--r--modules/errors.py18
-rw-r--r--modules/extensions.py96
-rw-r--r--modules/generation_parameters_copypaste.py13
-rw-r--r--modules/gradio_extensons.py10
-rw-r--r--modules/img2img.py24
-rw-r--r--modules/import_hook.py11
-rw-r--r--modules/launch_utils.py38
-rw-r--r--modules/logging_config.py27
-rw-r--r--modules/mac_specific.py15
-rw-r--r--modules/models/diffusion/ddpm_edit.py7
-rw-r--r--modules/options.py81
-rw-r--r--modules/postprocessing.py94
-rw-r--r--modules/processing.py40
-rw-r--r--modules/rng.py2
-rw-r--r--modules/scripts.py131
-rw-r--r--modules/scripts_postprocessing.py86
-rw-r--r--modules/sd_disable_initialization.py2
-rw-r--r--modules/sd_hijack.py10
-rw-r--r--modules/sd_models.py17
-rw-r--r--modules/sd_samplers_extra.py2
-rw-r--r--modules/sd_samplers_timesteps_impl.py4
-rw-r--r--modules/sd_unet.py14
-rw-r--r--modules/shared_items.py16
-rw-r--r--modules/shared_options.py134
-rw-r--r--modules/styles.py169
-rw-r--r--modules/sysinfo.py18
-rw-r--r--modules/textual_inversion/autocrop.py239
-rw-r--r--modules/textual_inversion/preprocess.py232
-rw-r--r--modules/textual_inversion/ui.py7
-rw-r--r--modules/ui.py134
-rw-r--r--modules/ui_extensions.py17
-rw-r--r--modules/ui_extra_networks.py17
-rw-r--r--modules/ui_extra_networks_checkpoints.py8
-rw-r--r--modules/ui_extra_networks_hypernets.py13
-rw-r--r--modules/ui_extra_networks_textual_inversion.py10
-rw-r--r--modules/ui_extra_networks_user_metadata.py2
-rw-r--r--modules/ui_loadsave.py2
-rw-r--r--modules/ui_postprocessing.py18
-rw-r--r--modules/ui_prompt_styles.py4
-rw-r--r--modules/ui_toprow.py6
-rw-r--r--modules/upscaler.py6
-rw-r--r--modules/xpu_specific.py59
47 files changed, 1139 insertions, 750 deletions
diff --git a/modules/api/api.py b/modules/api/api.py
index 09083874..b3d74e51 100644
--- a/modules/api/api.py
+++ b/modules/api/api.py
@@ -22,7 +22,6 @@ from modules.api import models
from modules.shared import opts
from modules.processing import StableDiffusionProcessingTxt2Img, StableDiffusionProcessingImg2Img, process_images
from modules.textual_inversion.textual_inversion import create_embedding, train_embedding
-from modules.textual_inversion.preprocess import preprocess
from modules.hypernetworks.hypernetwork import create_hypernetwork, train_hypernetwork
from PIL import PngImagePlugin, Image
from modules.sd_models_config import find_checkpoint_config_near_filename
@@ -235,7 +234,6 @@ class Api:
self.add_api_route("/sdapi/v1/refresh-vae", self.refresh_vae, methods=["POST"])
self.add_api_route("/sdapi/v1/create/embedding", self.create_embedding, methods=["POST"], response_model=models.CreateResponse)
self.add_api_route("/sdapi/v1/create/hypernetwork", self.create_hypernetwork, methods=["POST"], response_model=models.CreateResponse)
- self.add_api_route("/sdapi/v1/preprocess", self.preprocess, methods=["POST"], response_model=models.PreprocessResponse)
self.add_api_route("/sdapi/v1/train/embedding", self.train_embedding, methods=["POST"], response_model=models.TrainResponse)
self.add_api_route("/sdapi/v1/train/hypernetwork", self.train_hypernetwork, methods=["POST"], response_model=models.TrainResponse)
self.add_api_route("/sdapi/v1/memory", self.get_memory, methods=["GET"], response_model=models.MemoryResponse)
@@ -675,19 +673,6 @@ class Api:
finally:
shared.state.end()
- def preprocess(self, args: dict):
- try:
- shared.state.begin(job="preprocess")
- preprocess(**args) # quick operation unless blip/booru interrogation is enabled
- shared.state.end()
- return models.PreprocessResponse(info='preprocess complete')
- except KeyError as e:
- return models.PreprocessResponse(info=f"preprocess error: invalid token: {e}")
- except Exception as e:
- return models.PreprocessResponse(info=f"preprocess error: {e}")
- finally:
- shared.state.end()
-
def train_embedding(self, args: dict):
try:
shared.state.begin(job="train_embedding")
diff --git a/modules/api/models.py b/modules/api/models.py
index a0d80af8..33894b3e 100644
--- a/modules/api/models.py
+++ b/modules/api/models.py
@@ -202,9 +202,6 @@ class TrainResponse(BaseModel):
class CreateResponse(BaseModel):
info: str = Field(title="Create info", description="Response string from create embedding or hypernetwork task.")
-class PreprocessResponse(BaseModel):
- info: str = Field(title="Preprocess info", description="Response string from preprocessing task.")
-
fields = {}
for key, metadata in opts.data_labels.items():
value = opts.data.get(key)
diff --git a/modules/cache.py b/modules/cache.py
index ff26a213..2d37e7b9 100644
--- a/modules/cache.py
+++ b/modules/cache.py
@@ -32,7 +32,7 @@ def dump_cache():
with cache_lock:
cache_filename_tmp = cache_filename + "-"
with open(cache_filename_tmp, "w", encoding="utf8") as file:
- json.dump(cache_data, file, indent=4)
+ json.dump(cache_data, file, indent=4, ensure_ascii=False)
os.replace(cache_filename_tmp, cache_filename)
diff --git a/modules/cmd_args.py b/modules/cmd_args.py
index a9fb9bfa..da93eb26 100644
--- a/modules/cmd_args.py
+++ b/modules/cmd_args.py
@@ -70,6 +70,7 @@ parser.add_argument("--opt-sdp-no-mem-attention", action='store_true', help="pre
parser.add_argument("--disable-opt-split-attention", action='store_true', help="prefer no cross-attention layer optimization for automatic choice of optimization")
parser.add_argument("--disable-nan-check", action='store_true', help="do not check if produced images/latent spaces have nans; useful for running without a checkpoint in CI")
parser.add_argument("--use-cpu", nargs='+', help="use CPU as torch device for specified modules", default=[], type=str.lower)
+parser.add_argument("--use-ipex", action="store_true", help="use Intel XPU as torch device")
parser.add_argument("--disable-model-loading-ram-optimization", action='store_true', help="disable an optimization that reduces RAM use when loading a model")
parser.add_argument("--listen", action='store_true', help="launch gradio with 0.0.0.0 as server name, allowing to respond to network requests")
parser.add_argument("--port", type=int, help="launch gradio with given server port, you need root/admin rights for ports < 1024, defaults to 7860 if available", default=None)
diff --git a/modules/devices.py b/modules/devices.py
index 1d4eb563..ea1f712f 100644
--- a/modules/devices.py
+++ b/modules/devices.py
@@ -8,6 +8,13 @@ from modules import errors, shared
if sys.platform == "darwin":
from modules import mac_specific
+if shared.cmd_opts.use_ipex:
+ from modules import xpu_specific
+
+
+def has_xpu() -> bool:
+ return shared.cmd_opts.use_ipex and xpu_specific.has_xpu
+
def has_mps() -> bool:
if sys.platform != "darwin":
@@ -30,6 +37,9 @@ def get_optimal_device_name():
if has_mps():
return "mps"
+ if has_xpu():
+ return xpu_specific.get_xpu_device_string()
+
return "cpu"
@@ -38,7 +48,7 @@ def get_optimal_device():
def get_device_for(task):
- if task in shared.cmd_opts.use_cpu:
+ if task in shared.cmd_opts.use_cpu or "all" in shared.cmd_opts.use_cpu:
return cpu
return get_optimal_device()
@@ -54,6 +64,9 @@ def torch_gc():
if has_mps():
mac_specific.torch_mps_gc()
+ if has_xpu():
+ xpu_specific.torch_xpu_gc()
+
def enable_tf32():
if torch.cuda.is_available():
diff --git a/modules/errors.py b/modules/errors.py
index 8c339464..eb234a83 100644
--- a/modules/errors.py
+++ b/modules/errors.py
@@ -6,6 +6,21 @@ import traceback
exception_records = []
+def format_traceback(tb):
+ return [[f"{x.filename}, line {x.lineno}, {x.name}", x.line] for x in traceback.extract_tb(tb)]
+
+
+def format_exception(e, tb):
+ return {"exception": str(e), "traceback": format_traceback(tb)}
+
+
+def get_exceptions():
+ try:
+ return list(reversed(exception_records))
+ except Exception as e:
+ return str(e)
+
+
def record_exception():
_, e, tb = sys.exc_info()
if e is None:
@@ -14,8 +29,7 @@ def record_exception():
if exception_records and exception_records[-1] == e:
return
- from modules import sysinfo
- exception_records.append(sysinfo.format_exception(e, tb))
+ exception_records.append(format_exception(e, tb))
if len(exception_records) > 5:
exception_records.pop(0)
diff --git a/modules/extensions.py b/modules/extensions.py
index bf9a1878..1899cd52 100644
--- a/modules/extensions.py
+++ b/modules/extensions.py
@@ -1,11 +1,14 @@
+from __future__ import annotations
+
+import configparser
import os
import threading
+import re
from modules import shared, errors, cache, scripts
from modules.gitpython_hack import Repo
from modules.paths_internal import extensions_dir, extensions_builtin_dir, script_path # noqa: F401
-extensions = []
os.makedirs(extensions_dir, exist_ok=True)
@@ -19,11 +22,55 @@ def active():
return [x for x in extensions if x.enabled]
+class ExtensionMetadata:
+ filename = "metadata.ini"
+ config: configparser.ConfigParser
+ canonical_name: str
+ requires: list
+
+ def __init__(self, path, canonical_name):
+ self.config = configparser.ConfigParser()
+
+ filepath = os.path.join(path, self.filename)
+ if os.path.isfile(filepath):
+ try:
+ self.config.read(filepath)
+ except Exception:
+ errors.report(f"Error reading {self.filename} for extension {canonical_name}.", exc_info=True)
+
+ self.canonical_name = self.config.get("Extension", "Name", fallback=canonical_name)
+ self.canonical_name = canonical_name.lower().strip()
+
+ self.requires = self.get_script_requirements("Requires", "Extension")
+
+ def get_script_requirements(self, field, section, extra_section=None):
+ """reads a list of requirements from the config; field is the name of the field in the ini file,
+ like Requires or Before, and section is the name of the [section] in the ini file; additionally,
+ reads more requirements from [extra_section] if specified."""
+
+ x = self.config.get(section, field, fallback='')
+
+ if extra_section:
+ x = x + ', ' + self.config.get(extra_section, field, fallback='')
+
+ return self.parse_list(x.lower())
+
+ def parse_list(self, text):
+ """converts a line from config ("ext1 ext2, ext3 ") into a python list (["ext1", "ext2", "ext3"])"""
+
+ if not text:
+ return []
+
+ # both "," and " " are accepted as separator
+ return [x for x in re.split(r"[,\s]+", text.strip()) if x]
+
+
class Extension:
lock = threading.Lock()
cached_fields = ['remote', 'commit_date', 'branch', 'commit_hash', 'version']
+ metadata: ExtensionMetadata
- def __init__(self, name, path, enabled=True, is_builtin=False):
+ def __init__(self, name, path, enabled=True, is_builtin=False, metadata=None):
self.name = name
self.path = path
self.enabled = enabled
@@ -36,6 +83,8 @@ class Extension:
self.branch = None
self.remote = None
self.have_info_from_repo = False
+ self.metadata = metadata if metadata else ExtensionMetadata(self.path, name.lower())
+ self.canonical_name = metadata.canonical_name
def to_dict(self):
return {x: getattr(self, x) for x in self.cached_fields}
@@ -56,6 +105,7 @@ class Extension:
self.do_read_info_from_repo()
return self.to_dict()
+
try:
d = cache.cached_data_for_file('extensions-git', self.name, os.path.join(self.path, ".git"), read_from_repo)
self.from_dict(d)
@@ -136,9 +186,6 @@ class Extension:
def list_extensions():
extensions.clear()
- if not os.path.isdir(extensions_dir):
- return
-
if shared.cmd_opts.disable_all_extensions:
print("*** \"--disable-all-extensions\" arg was used, will not load any extensions ***")
elif shared.opts.disable_all_extensions == "all":
@@ -148,18 +195,43 @@ def list_extensions():
elif shared.opts.disable_all_extensions == "extra":
print("*** \"Disable all extensions\" option was set, will only load built-in extensions ***")
- extension_paths = []
- for dirname in [extensions_dir, extensions_builtin_dir]:
+ loaded_extensions = {}
+
+ # scan through extensions directory and load metadata
+ for dirname in [extensions_builtin_dir, extensions_dir]:
if not os.path.isdir(dirname):
- return
+ continue
for extension_dirname in sorted(os.listdir(dirname)):
path = os.path.join(dirname, extension_dirname)
if not os.path.isdir(path):
continue
- extension_paths.append((extension_dirname, path, dirname == extensions_builtin_dir))
+ canonical_name = extension_dirname
+ metadata = ExtensionMetadata(path, canonical_name)
+
+ # check for duplicated canonical names
+ already_loaded_extension = loaded_extensions.get(metadata.canonical_name)
+ if already_loaded_extension is not None:
+ errors.report(f'Duplicate canonical name "{canonical_name}" found in extensions "{extension_dirname}" and "{already_loaded_extension.name}". Former will be discarded.', exc_info=False)
+ continue
+
+ is_builtin = dirname == extensions_builtin_dir
+ extension = Extension(name=extension_dirname, path=path, enabled=extension_dirname not in shared.opts.disabled_extensions, is_builtin=is_builtin, metadata=metadata)
+ extensions.append(extension)
+ loaded_extensions[canonical_name] = extension
+
+ # check for requirements
+ for extension in extensions:
+ for req in extension.metadata.requires:
+ required_extension = loaded_extensions.get(req)
+ if required_extension is None:
+ errors.report(f'Extension "{extension.name}" requires "{req}" which is not installed.', exc_info=False)
+ continue
+
+ if not extension.enabled:
+ errors.report(f'Extension "{extension.name}" requires "{required_extension.name}" which is disabled.', exc_info=False)
+ continue
+
- for dirname, path, is_builtin in extension_paths:
- extension = Extension(name=dirname, path=path, enabled=dirname not in shared.opts.disabled_extensions, is_builtin=is_builtin)
- extensions.append(extension)
+extensions: list[Extension] = []
diff --git a/modules/generation_parameters_copypaste.py b/modules/generation_parameters_copypaste.py
index 0a606515..4efe53e0 100644
--- a/modules/generation_parameters_copypaste.py
+++ b/modules/generation_parameters_copypaste.py
@@ -1,3 +1,4 @@
+from __future__ import annotations
import base64
import io
import json
@@ -15,9 +16,6 @@ re_imagesize = re.compile(r"^(\d+)x(\d+)$")
re_hypernet_hash = re.compile("\(([0-9a-f]+)\)$")
type_of_gr_update = type(gr.update())
-paste_fields = {}
-registered_param_bindings = []
-
class ParamBinding:
def __init__(self, paste_button, tabname, source_text_component=None, source_image_component=None, source_tabname=None, override_settings_component=None, paste_field_names=None):
@@ -30,6 +28,10 @@ class ParamBinding:
self.paste_field_names = paste_field_names or []
+paste_fields: dict[str, dict] = {}
+registered_param_bindings: list[ParamBinding] = []
+
+
def reset():
paste_fields.clear()
registered_param_bindings.clear()
@@ -113,7 +115,6 @@ def register_paste_params_button(binding: ParamBinding):
def connect_paste_params_buttons():
- binding: ParamBinding
for binding in registered_param_bindings:
destination_image_component = paste_fields[binding.tabname]["init_img"]
fields = paste_fields[binding.tabname]["fields"]
@@ -313,6 +314,9 @@ Steps: 20, Sampler: Euler a, CFG scale: 7, Seed: 965400086, Size: 512x512, Model
if "VAE Decoder" not in res:
res["VAE Decoder"] = "Full"
+ skip = set(shared.opts.infotext_skip_pasting)
+ res = {k: v for k, v in res.items() if k not in skip}
+
return res
@@ -443,3 +447,4 @@ def connect_paste(button, paste_fields, input_comp, override_settings_component,
outputs=[],
show_progress=False,
)
+
diff --git a/modules/gradio_extensons.py b/modules/gradio_extensons.py
index e6b6835a..7d88dc98 100644
--- a/modules/gradio_extensons.py
+++ b/modules/gradio_extensons.py
@@ -47,10 +47,20 @@ def Block_get_config(self):
def BlockContext_init(self, *args, **kwargs):
+ if scripts.scripts_current is not None:
+ scripts.scripts_current.before_component(self, **kwargs)
+
+ scripts.script_callbacks.before_component_callback(self, **kwargs)
+
res = original_BlockContext_init(self, *args, **kwargs)
add_classes_to_gradio_component(self)
+ scripts.script_callbacks.after_component_callback(self, **kwargs)
+
+ if scripts.scripts_current is not None:
+ scripts.scripts_current.after_component(self, **kwargs)
+
return res
diff --git a/modules/img2img.py b/modules/img2img.py
index 52cb577a..c583290a 100644
--- a/modules/img2img.py
+++ b/modules/img2img.py
@@ -44,6 +44,8 @@ def process_batch(p, input_dir, output_dir, inpaint_mask_dir, args, to_scale=Fal
steps = p.steps
override_settings = p.override_settings
sd_model_checkpoint_override = get_closet_checkpoint_match(override_settings.get("sd_model_checkpoint", None))
+ batch_results = None
+ discard_further_results = False
for i, image in enumerate(images):
state.job = f"{i+1} out of {len(images)}"
if state.skipped:
@@ -127,7 +129,21 @@ def process_batch(p, input_dir, output_dir, inpaint_mask_dir, args, to_scale=Fal
if proc is None:
p.override_settings.pop('save_images_replace_action', None)
- process_images(p)
+ proc = process_images(p)
+
+ if not discard_further_results and proc:
+ if batch_results:
+ batch_results.images.extend(proc.images)
+ batch_results.infotexts.extend(proc.infotexts)
+ else:
+ batch_results = proc
+
+ if 0 <= shared.opts.img2img_batch_show_results_limit < len(batch_results.images):
+ discard_further_results = True
+ batch_results.images = batch_results.images[:int(shared.opts.img2img_batch_show_results_limit)]
+ batch_results.infotexts = batch_results.infotexts[:int(shared.opts.img2img_batch_show_results_limit)]
+
+ return batch_results
def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_styles, init_img, sketch, init_img_with_mask, inpaint_color_sketch, inpaint_color_sketch_orig, init_img_inpaint, init_mask_inpaint, steps: int, sampler_name: str, mask_blur: int, mask_alpha: float, inpainting_fill: int, n_iter: int, batch_size: int, cfg_scale: float, image_cfg_scale: float, denoising_strength: float, selected_scale_tab: int, height: int, width: int, scale_by: float, resize_mode: int, inpaint_full_res: bool, inpaint_full_res_padding: int, inpainting_mask_invert: int, img2img_batch_input_dir: str, img2img_batch_output_dir: str, img2img_batch_inpaint_mask_dir: str, override_settings_texts, img2img_batch_use_png_info: bool, img2img_batch_png_info_props: list, img2img_batch_png_info_dir: str, request: gr.Request, *args):
@@ -212,10 +228,10 @@ def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_s
with closing(p):
if is_batch:
assert not shared.cmd_opts.hide_ui_dir_config, "Launched with --hide-ui-dir-config, batch img2img disabled"
+ processed = process_batch(p, img2img_batch_input_dir, img2img_batch_output_dir, img2img_batch_inpaint_mask_dir, args, to_scale=selected_scale_tab == 1, scale_by=scale_by, use_png_info=img2img_batch_use_png_info, png_info_props=img2img_batch_png_info_props, png_info_dir=img2img_batch_png_info_dir)
- process_batch(p, img2img_batch_input_dir, img2img_batch_output_dir, img2img_batch_inpaint_mask_dir, args, to_scale=selected_scale_tab == 1, scale_by=scale_by, use_png_info=img2img_batch_use_png_info, png_info_props=img2img_batch_png_info_props, png_info_dir=img2img_batch_png_info_dir)
-
- processed = Processed(p, [], p.seed, "")
+ if processed is None:
+ processed = Processed(p, [], p.seed, "")
else:
processed = modules.scripts.scripts_img2img.run(p, *args)
if processed is None:
diff --git a/modules/import_hook.py b/modules/import_hook.py
index 28c67dfa..eba9a372 100644
--- a/modules/import_hook.py
+++ b/modules/import_hook.py
@@ -3,3 +3,14 @@ import sys
# this will break any attempt to import xformers which will prevent stability diffusion repo from trying to use it
if "--xformers" not in "".join(sys.argv):
sys.modules["xformers"] = None
+
+# Hack to fix a changed import in torchvision 0.17+, which otherwise breaks
+# basicsr; see https://github.com/AUTOMATIC1111/stable-diffusion-webui/issues/13985
+try:
+ import torchvision.transforms.functional_tensor # noqa: F401
+except ImportError:
+ try:
+ import torchvision.transforms.functional as functional
+ sys.modules["torchvision.transforms.functional_tensor"] = functional
+ except ImportError:
+ pass # shrug...
diff --git a/modules/launch_utils.py b/modules/launch_utils.py
index 8cdbafa5..29506f24 100644
--- a/modules/launch_utils.py
+++ b/modules/launch_utils.py
@@ -6,6 +6,7 @@ import os
import shutil
import sys
import importlib.util
+import importlib.metadata
import platform
import json
from functools import lru_cache
@@ -119,11 +120,16 @@ def run(command, desc=None, errdesc=None, custom_env=None, live: bool = default_
def is_installed(package):
try:
- spec = importlib.util.find_spec(package)
- except ModuleNotFoundError:
- return False
+ dist = importlib.metadata.distribution(package)
+ except importlib.metadata.PackageNotFoundError:
+ try:
+ spec = importlib.util.find_spec(package)
+ except ModuleNotFoundError:
+ return False
+
+ return spec is not None
- return spec is not None
+ return dist is not None
def repo_dir(name):
@@ -310,6 +316,26 @@ def requirements_met(requirements_file):
def prepare_environment():
torch_index_url = os.environ.get('TORCH_INDEX_URL', "https://download.pytorch.org/whl/cu118")
torch_command = os.environ.get('TORCH_COMMAND', f"pip install torch==2.0.1 torchvision==0.15.2 --extra-index-url {torch_index_url}")
+ if args.use_ipex:
+ if platform.system() == "Windows":
+ # The "Nuullll/intel-extension-for-pytorch" wheels were built from IPEX source for Intel Arc GPU: https://github.com/intel/intel-extension-for-pytorch/tree/xpu-main
+ # This is NOT an Intel official release so please use it at your own risk!!
+ # See https://github.com/Nuullll/intel-extension-for-pytorch/releases/tag/v2.0.110%2Bxpu-master%2Bdll-bundle for details.
+ #
+ # Strengths (over official IPEX 2.0.110 windows release):
+ # - AOT build (for Arc GPU only) to eliminate JIT compilation overhead: https://github.com/intel/intel-extension-for-pytorch/issues/399
+ # - Bundles minimal oneAPI 2023.2 dependencies into the python wheels, so users don't need to install oneAPI for the whole system.
+ # - Provides a compatible torchvision wheel: https://github.com/intel/intel-extension-for-pytorch/issues/465
+ # Limitation:
+ # - Only works for python 3.10
+ url_prefix = "https://github.com/Nuullll/intel-extension-for-pytorch/releases/download/v2.0.110%2Bxpu-master%2Bdll-bundle"
+ torch_command = os.environ.get('TORCH_COMMAND', f"pip install {url_prefix}/torch-2.0.0a0+gite9ebda2-cp310-cp310-win_amd64.whl {url_prefix}/torchvision-0.15.2a0+fa99a53-cp310-cp310-win_amd64.whl {url_prefix}/intel_extension_for_pytorch-2.0.110+gitc6ea20b-cp310-cp310-win_amd64.whl")
+ else:
+ # Using official IPEX release for linux since it's already an AOT build.
+ # However, users still have to install oneAPI toolkit and activate oneAPI environment manually.
+ # See https://intel.github.io/intel-extension-for-pytorch/index.html#installation for details.
+ torch_index_url = os.environ.get('TORCH_INDEX_URL', "https://pytorch-extension.intel.com/release-whl/stable/xpu/us/")
+ torch_command = os.environ.get('TORCH_COMMAND', f"pip install torch==2.0.0a0 intel-extension-for-pytorch==2.0.110+gitba7f6c1 --extra-index-url {torch_index_url}")
requirements_file = os.environ.get('REQS_FILE', "requirements_versions.txt")
xformers_package = os.environ.get('XFORMERS_PACKAGE', 'xformers==0.0.20')
@@ -352,6 +378,8 @@ def prepare_environment():
run(f'"{python}" -m {torch_command}', "Installing torch and torchvision", "Couldn't install torch", live=True)
startup_timer.record("install torch")
+ if args.use_ipex:
+ args.skip_torch_cuda_test = True
if not args.skip_torch_cuda_test and not check_run_python("import torch; assert torch.cuda.is_available()"):
raise RuntimeError(
'Torch is not able to use GPU; '
@@ -441,7 +469,7 @@ def dump_sysinfo():
import datetime
text = sysinfo.get()
- filename = f"sysinfo-{datetime.datetime.utcnow().strftime('%Y-%m-%d-%H-%M')}.txt"
+ filename = f"sysinfo-{datetime.datetime.utcnow().strftime('%Y-%m-%d-%H-%M')}.json"
with open(filename, "w", encoding="utf8") as file:
file.write(text)
diff --git a/modules/logging_config.py b/modules/logging_config.py
index 7db23d4b..79269875 100644
--- a/modules/logging_config.py
+++ b/modules/logging_config.py
@@ -1,16 +1,41 @@
import os
import logging
+try:
+ from tqdm.auto import tqdm
+
+ class TqdmLoggingHandler(logging.Handler):
+ def __init__(self, level=logging.INFO):
+ super().__init__(level)
+
+ def emit(self, record):
+ try:
+ msg = self.format(record)
+ tqdm.write(msg)
+ self.flush()
+ except Exception:
+ self.handleError(record)
+
+ TQDM_IMPORTED = True
+except ImportError:
+ # tqdm does not exist before first launch
+ # I will import once the UI finishes seting up the enviroment and reloads.
+ TQDM_IMPORTED = False
def setup_logging(loglevel):
if loglevel is None:
loglevel = os.environ.get("SD_WEBUI_LOG_LEVEL")
+ loghandlers = []
+
+ if TQDM_IMPORTED:
+ loghandlers.append(TqdmLoggingHandler())
+
if loglevel:
log_level = getattr(logging, loglevel.upper(), None) or logging.INFO
logging.basicConfig(
level=log_level,
format='%(asctime)s %(levelname)s [%(name)s] %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
+ handlers=loghandlers
)
-
diff --git a/modules/mac_specific.py b/modules/mac_specific.py
index 89256c5b..d96d86d7 100644
--- a/modules/mac_specific.py
+++ b/modules/mac_specific.py
@@ -1,6 +1,7 @@
import logging
import torch
+from torch import Tensor
import platform
from modules.sd_hijack_utils import CondFunc
from packaging import version
@@ -51,6 +52,17 @@ def cumsum_fix(input, cumsum_func, *args, **kwargs):
return cumsum_func(input, *args, **kwargs)
+# MPS workaround for https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14046
+def interpolate_with_fp32_fallback(orig_func, *args, **kwargs) -> Tensor:
+ try:
+ return orig_func(*args, **kwargs)
+ except RuntimeError as e:
+ if "not implemented for" in str(e) and "Half" in str(e):
+ input_tensor = args[0]
+ return orig_func(input_tensor.to(torch.float32), *args[1:], **kwargs).to(input_tensor.dtype)
+ else:
+ print(f"An unexpected RuntimeError occurred: {str(e)}")
+
if has_mps:
if platform.mac_ver()[0].startswith("13.2."):
# MPS workaround for https://github.com/pytorch/pytorch/issues/95188, thanks to danieldk (https://github.com/explosion/curated-transformers/pull/124)
@@ -77,6 +89,9 @@ if has_mps:
# MPS workaround for https://github.com/pytorch/pytorch/issues/96113
CondFunc('torch.nn.functional.layer_norm', lambda orig_func, x, normalized_shape, weight, bias, eps, **kwargs: orig_func(x.float(), normalized_shape, weight.float() if weight is not None else None, bias.float() if bias is not None else bias, eps).to(x.dtype), lambda _, input, *args, **kwargs: len(args) == 4 and input.device.type == 'mps')
+ # MPS workaround for https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14046
+ CondFunc('torch.nn.functional.interpolate', interpolate_with_fp32_fallback, None)
+
# MPS workaround for https://github.com/pytorch/pytorch/issues/92311
if platform.processor() == 'i386':
for funcName in ['torch.argmax', 'torch.Tensor.argmax']:
diff --git a/modules/models/diffusion/ddpm_edit.py b/modules/models/diffusion/ddpm_edit.py
index b892d5fc..6db340da 100644
--- a/modules/models/diffusion/ddpm_edit.py
+++ b/modules/models/diffusion/ddpm_edit.py
@@ -24,10 +24,15 @@ from pytorch_lightning.utilities.distributed import rank_zero_only
from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config
from ldm.modules.ema import LitEma
from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution
-from ldm.models.autoencoder import VQModelInterface, IdentityFirstStage, AutoencoderKL
+from ldm.models.autoencoder import IdentityFirstStage, AutoencoderKL
from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like
from ldm.models.diffusion.ddim import DDIMSampler
+try:
+ from ldm.models.autoencoder import VQModelInterface
+except Exception:
+ class VQModelInterface:
+ pass
__conditioning_keys__ = {'concat': 'c_concat',
'crossattn': 'c_crossattn',
diff --git a/modules/options.py b/modules/options.py
index ab40aff7..4fead690 100644
--- a/modules/options.py
+++ b/modules/options.py
@@ -1,5 +1,6 @@
import json
import sys
+from dataclasses import dataclass
import gradio as gr
@@ -8,13 +9,14 @@ from modules.shared_cmd_options import cmd_opts
class OptionInfo:
- def __init__(self, default=None, label="", component=None, component_args=None, onchange=None, section=None, refresh=None, comment_before='', comment_after='', infotext=None, restrict_api=False):
+ def __init__(self, default=None, label="", component=None, component_args=None, onchange=None, section=None, refresh=None, comment_before='', comment_after='', infotext=None, restrict_api=False, category_id=None):
self.default = default
self.label = label
self.component = component
self.component_args = component_args
self.onchange = onchange
self.section = section
+ self.category_id = category_id
self.refresh = refresh
self.do_not_save = False
@@ -63,7 +65,11 @@ class OptionHTML(OptionInfo):
def options_section(section_identifier, options_dict):
for v in options_dict.values():
- v.section = section_identifier
+ if len(section_identifier) == 2:
+ v.section = section_identifier
+ elif len(section_identifier) == 3:
+ v.section = section_identifier[0:2]
+ v.category_id = section_identifier[2]
return options_dict
@@ -76,7 +82,7 @@ class Options:
def __init__(self, data_labels: dict[str, OptionInfo], restricted_opts):
self.data_labels = data_labels
- self.data = {k: v.default for k, v in self.data_labels.items()}
+ self.data = {k: v.default for k, v in self.data_labels.items() if not v.do_not_save}
self.restricted_opts = restricted_opts
def __setattr__(self, key, value):
@@ -158,7 +164,7 @@ class Options:
assert not cmd_opts.freeze_settings, "saving settings is disabled"
with open(filename, "w", encoding="utf8") as file:
- json.dump(self.data, file, indent=4)
+ json.dump(self.data, file, indent=4, ensure_ascii=False)
def same_type(self, x, y):
if x is None or y is None:
@@ -206,23 +212,59 @@ class Options:
d = {k: self.data.get(k, v.default) for k, v in self.data_labels.items()}
d["_comments_before"] = {k: v.comment_before for k, v in self.data_labels.items() if v.comment_before is not None}
d["_comments_after"] = {k: v.comment_after for k, v in self.data_labels.items() if v.comment_after is not None}
+
+ item_categories = {}
+ for item in self.data_labels.values():
+ category = categories.mapping.get(item.category_id)
+ category = "Uncategorized" if category is None else category.label
+ if category not in item_categories:
+ item_categories[category] = item.section[1]
+
+ # _categories is a list of pairs: [section, category]. Each section (a setting page) will get a special heading above it with the category as text.
+ d["_categories"] = [[v, k] for k, v in item_categories.items()] + [["Defaults", "Other"]]
+
return json.dumps(d)
def add_option(self, key, info):
self.data_labels[key] = info
- if key not in self.data:
+ if key not in self.data and not info.do_not_save:
self.data[key] = info.default
def reorder(self):
- """reorder settings so that all items related to section always go together"""
+ """Reorder settings so that:
+ - all items related to section always go together
+ - all sections belonging to a category go together
+ - sections inside a category are ordered alphabetically
+ - categories are ordered by creation order
+
+ Category is a superset of sections: for category "postprocessing" there could be multiple sections: "face restoration", "upscaling".
+
+ This function also changes items' category_id so that all items belonging to a section have the same category_id.
+ """
+
+ category_ids = {}
+ section_categories = {}
- section_ids = {}
settings_items = self.data_labels.items()
for _, item in settings_items:
- if item.section not in section_ids:
- section_ids[item.section] = len(section_ids)
+ if item.section not in section_categories:
+ section_categories[item.section] = item.category_id
+
+ for _, item in settings_items:
+ item.category_id = section_categories.get(item.section)
+
+ for category_id in categories.mapping:
+ if category_id not in category_ids:
+ category_ids[category_id] = len(category_ids)
- self.data_labels = dict(sorted(settings_items, key=lambda x: section_ids[x[1].section]))
+ def sort_key(x):
+ item: OptionInfo = x[1]
+ category_order = category_ids.get(item.category_id, len(category_ids))
+ section_order = item.section[1]
+
+ return category_order, section_order
+
+ self.data_labels = dict(sorted(settings_items, key=sort_key))
def cast_value(self, key, value):
"""casts an arbitrary to the same type as this setting's value with key
@@ -245,3 +287,22 @@ class Options:
value = expected_type(value)
return value
+
+
+@dataclass
+class OptionsCategory:
+ id: str
+ label: str
+
+class OptionsCategories:
+ def __init__(self):
+ self.mapping = {}
+
+ def register_category(self, category_id, label):
+ if category_id in self.mapping:
+ return category_id
+
+ self.mapping[category_id] = OptionsCategory(category_id, label)
+
+
+categories = OptionsCategories()
diff --git a/modules/postprocessing.py b/modules/postprocessing.py
index cf04d38b..0c59fad4 100644
--- a/modules/postprocessing.py
+++ b/modules/postprocessing.py
@@ -29,11 +29,7 @@ def run_postprocessing(extras_mode, image, image_folder, input_dir, output_dir,
image_list = shared.listfiles(input_dir)
for filename in image_list:
- try:
- image = Image.open(filename)
- except Exception:
- continue
- yield image, filename
+ yield filename, filename
else:
assert image, 'image not selected'
yield image, None
@@ -45,43 +41,97 @@ def run_postprocessing(extras_mode, image, image_folder, input_dir, output_dir,
infotext = ''
- for image_data, name in get_images(extras_mode, image, image_folder, input_dir):
+ data_to_process = list(get_images(extras_mode, image, image_folder, input_dir))
+ shared.state.job_count = len(data_to_process)
+
+ for image_placeholder, name in data_to_process:
image_data: Image.Image
+ shared.state.nextjob()
shared.state.textinfo = name
+ shared.state.skipped = False
+
+ if shared.state.interrupted:
+ break
+
+ if isinstance(image_placeholder, str):
+ try:
+ image_data = Image.open(image_placeholder)
+ except Exception:
+ continue
+ else:
+ image_data = image_placeholder
+
+ shared.state.assign_current_image(image_data)
parameters, existing_pnginfo = images.read_info_from_image(image_data)
if parameters:
existing_pnginfo["parameters"] = parameters
- pp = scripts_postprocessing.PostprocessedImage(image_data.convert("RGB"))
+ initial_pp = scripts_postprocessing.PostprocessedImage(image_data.convert("RGB"))
- scripts.scripts_postproc.run(pp, args)
+ scripts.scripts_postproc.run(initial_pp, args)
- if opts.use_original_name_batch and name is not None:
- basename = os.path.splitext(os.path.basename(name))[0]
- else:
- basename = ''
+ if shared.state.skipped:
+ continue
+
+ used_suffixes = {}
+ for pp in [initial_pp, *initial_pp.extra_images]:
+ suffix = pp.get_suffix(used_suffixes)
- infotext = ", ".join([k if k == v else f'{k}: {generation_parameters_copypaste.quote(v)}' for k, v in pp.info.items() if v is not None])
+ if opts.use_original_name_batch and name is not None:
+ basename = os.path.splitext(os.path.basename(name))[0]
+ forced_filename = basename + suffix
+ else:
+ basename = ''
+ forced_filename = None
- if opts.enable_pnginfo:
- pp.image.info = existing_pnginfo
- pp.image.info["postprocessing"] = infotext
+ infotext = ", ".join([k if k == v else f'{k}: {generation_parameters_copypaste.quote(v)}' for k, v in pp.info.items() if v is not None])
- if save_output:
- images.save_image(pp.image, path=outpath, basename=basename, seed=None, prompt=None, extension=opts.samples_format, info=infotext, short_filename=True, no_prompt=True, grid=False, pnginfo_section_name="extras", existing_info=existing_pnginfo, forced_filename=None)
+ if opts.enable_pnginfo:
+ pp.image.info = existing_pnginfo
+ pp.image.info["postprocessing"] = infotext
- if extras_mode != 2 or show_extras_results:
- outputs.append(pp.image)
+ if save_output:
+ fullfn, _ = images.save_image(pp.image, path=outpath, basename=basename, extension=opts.samples_format, info=infotext, short_filename=True, no_prompt=True, grid=False, pnginfo_section_name="extras", existing_info=existing_pnginfo, forced_filename=forced_filename, suffix=suffix)
+
+ if pp.caption:
+ caption_filename = os.path.splitext(fullfn)[0] + ".txt"
+ if os.path.isfile(caption_filename):
+ with open(caption_filename, encoding="utf8") as file:
+ existing_caption = file.read().strip()
+ else:
+ existing_caption = ""
+
+ action = shared.opts.postprocessing_existing_caption_action
+ if action == 'Prepend' and existing_caption:
+ caption = f"{existing_caption} {pp.caption}"
+ elif action == 'Append' and existing_caption:
+ caption = f"{pp.caption} {existing_caption}"
+ elif action == 'Keep' and existing_caption:
+ caption = existing_caption
+ else:
+ caption = pp.caption
+
+ caption = caption.strip()
+ if caption:
+ with open(caption_filename, "w", encoding="utf8") as file:
+ file.write(caption)
+
+ if extras_mode != 2 or show_extras_results:
+ outputs.append(pp.image)
image_data.close()
devices.torch_gc()
-
+ shared.state.end()
return outputs, ui_common.plaintext_to_html(infotext), ''
+def run_postprocessing_webui(id_task, *args, **kwargs):
+ return run_postprocessing(*args, **kwargs)
+
+
def run_extras(extras_mode, resize_mode, image, image_folder, input_dir, output_dir, show_extras_results, gfpgan_visibility, codeformer_visibility, codeformer_weight, upscaling_resize, upscaling_resize_w, upscaling_resize_h, upscaling_crop, extras_upscaler_1, extras_upscaler_2, extras_upscaler_2_visibility, upscale_first: bool, save_output: bool = True):
"""old handler for API"""
@@ -97,9 +147,11 @@ def run_extras(extras_mode, resize_mode, image, image_folder, input_dir, output_
"upscaler_2_visibility": extras_upscaler_2_visibility,
},
"GFPGAN": {
+ "enable": True,
"gfpgan_visibility": gfpgan_visibility,
},
"CodeFormer": {
+ "enable": True,
"codeformer_visibility": codeformer_visibility,
"codeformer_weight": codeformer_weight,
},
diff --git a/modules/processing.py b/modules/processing.py
index b0e240a4..6f01c95f 100644
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -679,8 +679,8 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter
"Size": f"{p.width}x{p.height}",
"Model hash": p.sd_model_hash if opts.add_model_hash_to_info else None,
"Model": p.sd_model_name if opts.add_model_name_to_info else None,
- "VAE hash": p.sd_vae_hash if opts.add_model_hash_to_info else None,
- "VAE": p.sd_vae_name if opts.add_model_name_to_info else None,
+ "VAE hash": p.sd_vae_hash if opts.add_vae_hash_to_info else None,
+ "VAE": p.sd_vae_name if opts.add_vae_name_to_info else None,
"Variation seed": (None if p.subseed_strength == 0 else (p.all_subseeds[0] if use_main_prompt else all_subseeds[index])),
"Variation seed strength": (None if p.subseed_strength == 0 else p.subseed_strength),
"Seed resize from": (None if p.seed_resize_from_w <= 0 or p.seed_resize_from_h <= 0 else f"{p.seed_resize_from_w}x{p.seed_resize_from_h}"),
@@ -799,7 +799,6 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
infotexts = []
output_images = []
-
with torch.no_grad(), p.sd_model.ema_scope():
with devices.autocast():
p.init(p.all_prompts, p.all_seeds, p.all_subseeds)
@@ -873,7 +872,6 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
else:
if opts.sd_vae_decode_method != 'Full':
p.extra_generation_params['VAE Decoder'] = opts.sd_vae_decode_method
-
x_samples_ddim = decode_latent_batch(p.sd_model, samples_ddim, target_device=devices.cpu, check_for_nans=True)
x_samples_ddim = torch.stack(x_samples_ddim).float()
@@ -940,21 +938,20 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
if opts.enable_pnginfo:
image.info["parameters"] = text
output_images.append(image)
- if save_samples and hasattr(p, 'mask_for_overlay') and p.mask_for_overlay and any([opts.save_mask, opts.save_mask_composite, opts.return_mask, opts.return_mask_composite]):
- image_mask = p.mask_for_overlay.convert('RGB')
- image_mask_composite = Image.composite(image.convert('RGBA').convert('RGBa'), Image.new('RGBa', image.size), images.resize_image(2, p.mask_for_overlay, image.width, image.height).convert('L')).convert('RGBA')
-
- if opts.save_mask:
- images.save_image(image_mask, p.outpath_samples, "", p.seeds[i], p.prompts[i], opts.samples_format, info=infotext(i), p=p, suffix="-mask")
-
- if opts.save_mask_composite:
- images.save_image(image_mask_composite, p.outpath_samples, "", p.seeds[i], p.prompts[i], opts.samples_format, info=infotext(i), p=p, suffix="-mask-composite")
-
- if opts.return_mask:
- output_images.append(image_mask)
-
- if opts.return_mask_composite:
- output_images.append(image_mask_composite)
+ if hasattr(p, 'mask_for_overlay') and p.mask_for_overlay:
+ if opts.return_mask or opts.save_mask:
+ image_mask = p.mask_for_overlay.convert('RGB')
+ if save_samples and opts.save_mask:
+ images.save_image(image_mask, p.outpath_samples, "", p.seeds[i], p.prompts[i], opts.samples_format, info=infotext(i), p=p, suffix="-mask")
+ if opts.return_mask:
+ output_images.append(image_mask)
+
+ if opts.return_mask_composite or opts.save_mask_composite:
+ image_mask_composite = Image.composite(image.convert('RGBA').convert('RGBa'), Image.new('RGBa', image.size), images.resize_image(2, p.mask_for_overlay, image.width, image.height).convert('L')).convert('RGBA')
+ if save_samples and opts.save_mask_composite:
+ images.save_image(image_mask_composite, p.outpath_samples, "", p.seeds[i], p.prompts[i], opts.samples_format, info=infotext(i), p=p, suffix="-mask-composite")
+ if opts.return_mask_composite:
+ output_images.append(image_mask_composite)
del x_samples_ddim
@@ -1147,6 +1144,7 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
if not self.enable_hr:
return samples
+ devices.torch_gc()
if self.latent_scale_mode is None:
decoded_samples = torch.stack(decode_latent_batch(self.sd_model, samples, target_device=devices.cpu, check_for_nans=True)).to(dtype=torch.float32)
@@ -1156,8 +1154,6 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
with sd_models.SkipWritingToConfig():
sd_models.reload_model_weights(info=self.hr_checkpoint_info)
- devices.torch_gc()
-
return self.sample_hr_pass(samples, decoded_samples, seeds, subseeds, subseed_strength, prompts)
def sample_hr_pass(self, samples, decoded_samples, seeds, subseeds, subseed_strength, prompts):
@@ -1165,7 +1161,6 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
return samples
self.is_hr_pass = True
-
target_width = self.hr_upscale_to_x
target_height = self.hr_upscale_to_y
@@ -1254,7 +1249,6 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
decoded_samples = decode_latent_batch(self.sd_model, samples, target_device=devices.cpu, check_for_nans=True)
self.is_hr_pass = False
-
return decoded_samples
def close(self):
diff --git a/modules/rng.py b/modules/rng.py
index 9e8ba2ee..8934d39b 100644
--- a/modules/rng.py
+++ b/modules/rng.py
@@ -110,7 +110,7 @@ class ImageRNG:
self.is_first = True
def first(self):
- noise_shape = self.shape if self.seed_resize_from_h <= 0 or self.seed_resize_from_w <= 0 else (self.shape[0], self.seed_resize_from_h // 8, self.seed_resize_from_w // 8)
+ noise_shape = self.shape if self.seed_resize_from_h <= 0 or self.seed_resize_from_w <= 0 else (self.shape[0], int(self.seed_resize_from_h) // 8, int(self.seed_resize_from_w // 8))
xs = []
diff --git a/modules/scripts.py b/modules/scripts.py
index 5c6e0226..7f9454eb 100644
--- a/modules/scripts.py
+++ b/modules/scripts.py
@@ -311,20 +311,113 @@ scripts_data = []
postprocessing_scripts_data = []
ScriptClassData = namedtuple("ScriptClassData", ["script_class", "path", "basedir", "module"])
+def topological_sort(dependencies):
+ """Accepts a dictionary mapping name to its dependencies, returns a list of names ordered according to dependencies.
+ Ignores errors relating to missing dependeencies or circular dependencies
+ """
+
+ visited = {}
+ result = []
+
+ def inner(name):
+ visited[name] = True
+
+ for dep in dependencies.get(name, []):
+ if dep in dependencies and dep not in visited:
+ inner(dep)
+
+ result.append(name)
+
+ for depname in dependencies:
+ if depname not in visited:
+ inner(depname)
+
+ return result
+
+
+@dataclass
+class ScriptWithDependencies:
+ script_canonical_name: str
+ file: ScriptFile
+ requires: list
+ load_before: list
+ load_after: list
+
def list_scripts(scriptdirname, extension, *, include_extensions=True):
- scripts_list = []
+ scripts = {}
+
+ loaded_extensions = {ext.canonical_name: ext for ext in extensions.active()}
+ loaded_extensions_scripts = {ext.canonical_name: [] for ext in extensions.active()}
- basedir = os.path.join(paths.script_path, scriptdirname)
- if os.path.exists(basedir):
- for filename in sorted(os.listdir(basedir)):
- scripts_list.append(ScriptFile(paths.script_path, filename, os.path.join(basedir, filename)))
+ # build script dependency map
+ root_script_basedir = os.path.join(paths.script_path, scriptdirname)
+ if os.path.exists(root_script_basedir):
+ for filename in sorted(os.listdir(root_script_basedir)):
+ if not os.path.isfile(os.path.join(root_script_basedir, filename)):
+ continue
+
+ if os.path.splitext(filename)[1].lower() != extension:
+ continue
+
+ script_file = ScriptFile(paths.script_path, filename, os.path.join(root_script_basedir, filename))
+ scripts[filename] = ScriptWithDependencies(filename, script_file, [], [], [])
if include_extensions:
for ext in extensions.active():
- scripts_list += ext.list_files(scriptdirname, extension)
-
- scripts_list = [x for x in scripts_list if os.path.splitext(x.path)[1].lower() == extension and os.path.isfile(x.path)]
+ extension_scripts_list = ext.list_files(scriptdirname, extension)
+ for extension_script in extension_scripts_list:
+ if not os.path.isfile(extension_script.path):
+ continue
+
+ script_canonical_name = ("builtin/" if ext.is_builtin else "") + ext.canonical_name + "/" + extension_script.filename
+ relative_path = scriptdirname + "/" + extension_script.filename
+
+ script = ScriptWithDependencies(
+ script_canonical_name=script_canonical_name,
+ file=extension_script,
+ requires=ext.metadata.get_script_requirements("Requires", relative_path, scriptdirname),
+ load_before=ext.metadata.get_script_requirements("Before", relative_path, scriptdirname),
+ load_after=ext.metadata.get_script_requirements("After", relative_path, scriptdirname),
+ )
+
+ scripts[script_canonical_name] = script
+ loaded_extensions_scripts[ext.canonical_name].append(script)
+
+ for script_canonical_name, script in scripts.items():
+ # load before requires inverse dependency
+ # in this case, append the script name into the load_after list of the specified script
+ for load_before in script.load_before:
+ # if this requires an individual script to be loaded before
+ other_script = scripts.get(load_before)
+ if other_script:
+ other_script.load_after.append(script_canonical_name)
+
+ # if this requires an extension
+ other_extension_scripts = loaded_extensions_scripts.get(load_before)
+ if other_extension_scripts:
+ for other_script in other_extension_scripts:
+ other_script.load_after.append(script_canonical_name)
+
+ # if After mentions an extension, remove it and instead add all of its scripts
+ for load_after in list(script.load_after):
+ if load_after not in scripts and load_after in loaded_extensions_scripts:
+ script.load_after.remove(load_after)
+
+ for other_script in loaded_extensions_scripts.get(load_after, []):
+ script.load_after.append(other_script.script_canonical_name)
+
+ dependencies = {}
+
+ for script_canonical_name, script in scripts.items():
+ for required_script in script.requires:
+ if required_script not in scripts and required_script not in loaded_extensions:
+ errors.report(f'Script "{script_canonical_name}" requires "{required_script}" to be loaded, but it is not.', exc_info=False)
+
+ dependencies[script_canonical_name] = script.load_after
+
+ ordered_scripts = topological_sort(dependencies)
+ scripts_list = [scripts[script_canonical_name].file for script_canonical_name in ordered_scripts]
return scripts_list
@@ -365,15 +458,9 @@ def load_scripts():
elif issubclass(script_class, scripts_postprocessing.ScriptPostprocessing):
postprocessing_scripts_data.append(ScriptClassData(script_class, scriptfile.path, scriptfile.basedir, module))
- def orderby(basedir):
- # 1st webui, 2nd extensions-builtin, 3rd extensions
- priority = {os.path.join(paths.script_path, "extensions-builtin"):1, paths.script_path:0}
- for key in priority:
- if basedir.startswith(key):
- return priority[key]
- return 9999
-
- for scriptfile in sorted(scripts_list, key=lambda x: [orderby(x.basedir), x]):
+ # here the scripts_list is already ordered
+ # processing_script is not considered though
+ for scriptfile in scripts_list:
try:
if scriptfile.basedir != paths.script_path:
sys.path = [scriptfile.basedir] + sys.path
@@ -473,17 +560,25 @@ class ScriptRunner:
on_after.clear()
def create_script_ui(self, script):
- import modules.api.models as api_models
script.args_from = len(self.inputs)
script.args_to = len(self.inputs)
+ try:
+ self.create_script_ui_inner(script)
+ except Exception:
+ errors.report(f"Error creating UI for {script.name}: ", exc_info=True)
+
+ def create_script_ui_inner(self, script):
+ import modules.api.models as api_models
+
controls = wrap_call(script.ui, script.filename, "ui", script.is_img2img)
if controls is None:
return
script.name = wrap_call(script.title, script.filename, "title", default=script.filename).lower()
+
api_args = []
for control in controls:
diff --git a/modules/scripts_postprocessing.py b/modules/scripts_postprocessing.py
index bac1335d..901cad08 100644
--- a/modules/scripts_postprocessing.py
+++ b/modules/scripts_postprocessing.py
@@ -1,13 +1,56 @@
+import dataclasses
import os
import gradio as gr
from modules import errors, shared
+@dataclasses.dataclass
+class PostprocessedImageSharedInfo:
+ target_width: int = None
+ target_height: int = None
+
+
class PostprocessedImage:
def __init__(self, image):
self.image = image
self.info = {}
+ self.shared = PostprocessedImageSharedInfo()
+ self.extra_images = []
+ self.nametags = []
+ self.disable_processing = False
+ self.caption = None
+
+ def get_suffix(self, used_suffixes=None):
+ used_suffixes = {} if used_suffixes is None else used_suffixes
+ suffix = "-".join(self.nametags)
+ if suffix:
+ suffix = "-" + suffix
+
+ if suffix not in used_suffixes:
+ used_suffixes[suffix] = 1
+ return suffix
+
+ for i in range(1, 100):
+ proposed_suffix = suffix + "-" + str(i)
+
+ if proposed_suffix not in used_suffixes:
+ used_suffixes[proposed_suffix] = 1
+ return proposed_suffix
+
+ return suffix
+
+ def create_copy(self, new_image, *, nametags=None, disable_processing=False):
+ pp = PostprocessedImage(new_image)
+ pp.shared = self.shared
+ pp.nametags = self.nametags.copy()
+ pp.info = self.info.copy()
+ pp.disable_processing = disable_processing
+
+ if nametags is not None:
+ pp.nametags += nametags
+
+ return pp
class ScriptPostprocessing:
@@ -42,10 +85,17 @@ class ScriptPostprocessing:
pass
- def image_changed(self):
- pass
+ def process_firstpass(self, pp: PostprocessedImage, **args):
+ """
+ Called for all scripts before calling process(). Scripts can examine the image here and set fields
+ of the pp object to communicate things to other scripts.
+ args contains a dictionary with all values returned by components from ui()
+ """
+ pass
+ def image_changed(self):
+ pass
def wrap_call(func, filename, funcname, *args, default=None, **kwargs):
@@ -118,16 +168,42 @@ class ScriptPostprocessingRunner:
return inputs
def run(self, pp: PostprocessedImage, args):
- for script in self.scripts_in_preferred_order():
- shared.state.job = script.name
+ scripts = []
+ for script in self.scripts_in_preferred_order():
script_args = args[script.args_from:script.args_to]
process_args = {}
for (name, _component), value in zip(script.controls.items(), script_args):
process_args[name] = value
- script.process(pp, **process_args)
+ scripts.append((script, process_args))
+
+ for script, process_args in scripts:
+ script.process_firstpass(pp, **process_args)
+
+ all_images = [pp]
+
+ for script, process_args in scripts:
+ if shared.state.skipped:
+ break
+
+ shared.state.job = script.name
+
+ for single_image in all_images.copy():
+
+ if not single_image.disable_processing:
+ script.process(single_image, **process_args)
+
+ for extra_image in single_image.extra_images:
+ if not isinstance(extra_image, PostprocessedImage):
+ extra_image = single_image.create_copy(extra_image)
+
+ all_images.append(extra_image)
+
+ single_image.extra_images.clear()
+
+ pp.extra_images = all_images[1:]
def create_args_for_run(self, scripts_args):
if not self.ui_created:
diff --git a/modules/sd_disable_initialization.py b/modules/sd_disable_initialization.py
index 8863107a..273a7edd 100644
--- a/modules/sd_disable_initialization.py
+++ b/modules/sd_disable_initialization.py
@@ -215,7 +215,7 @@ class LoadStateDictOnMeta(ReplaceHelper):
would be on the meta device.
"""
- if state_dict == sd:
+ if state_dict is sd:
state_dict = {k: v.to(device="meta", dtype=v.dtype) for k, v in state_dict.items()}
original(module, state_dict, strict=strict)
diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py
index 0157e19f..e139d996 100644
--- a/modules/sd_hijack.py
+++ b/modules/sd_hijack.py
@@ -38,8 +38,12 @@ ldm.models.diffusion.ddpm.print = shared.ldm_print
optimizers = []
current_optimizer: sd_hijack_optimizations.SdOptimization = None
-ldm_original_forward = patches.patch(__file__, ldm.modules.diffusionmodules.openaimodel.UNetModel, "forward", sd_unet.UNetModel_forward)
-sgm_original_forward = patches.patch(__file__, sgm.modules.diffusionmodules.openaimodel.UNetModel, "forward", sd_unet.UNetModel_forward)
+ldm_patched_forward = sd_unet.create_unet_forward(ldm.modules.diffusionmodules.openaimodel.UNetModel.forward)
+ldm_original_forward = patches.patch(__file__, ldm.modules.diffusionmodules.openaimodel.UNetModel, "forward", ldm_patched_forward)
+
+sgm_patched_forward = sd_unet.create_unet_forward(sgm.modules.diffusionmodules.openaimodel.UNetModel.forward)
+sgm_original_forward = patches.patch(__file__, sgm.modules.diffusionmodules.openaimodel.UNetModel, "forward", sgm_patched_forward)
+
def list_optimizers():
new_optimizers = script_callbacks.list_optimizers_callback()
@@ -303,8 +307,6 @@ class StableDiffusionModelHijack:
self.layers = None
self.clip = None
- sd_unet.original_forward = None
-
def apply_circular(self, enable):
if self.circular_enabled == enable:
diff --git a/modules/sd_models.py b/modules/sd_models.py
index 841402e8..9355f1e1 100644
--- a/modules/sd_models.py
+++ b/modules/sd_models.py
@@ -230,15 +230,19 @@ def select_checkpoint():
return checkpoint_info
-checkpoint_dict_replacements = {
+checkpoint_dict_replacements_sd1 = {
'cond_stage_model.transformer.embeddings.': 'cond_stage_model.transformer.text_model.embeddings.',
'cond_stage_model.transformer.encoder.': 'cond_stage_model.transformer.text_model.encoder.',
'cond_stage_model.transformer.final_layer_norm.': 'cond_stage_model.transformer.text_model.final_layer_norm.',
}
+checkpoint_dict_replacements_sd2_turbo = { # Converts SD 2.1 Turbo from SGM to LDM format.
+ 'conditioner.embedders.0.': 'cond_stage_model.',
+}
+
-def transform_checkpoint_dict_key(k):
- for text, replacement in checkpoint_dict_replacements.items():
+def transform_checkpoint_dict_key(k, replacements):
+ for text, replacement in replacements.items():
if k.startswith(text):
k = replacement + k[len(text):]
@@ -249,9 +253,14 @@ def get_state_dict_from_checkpoint(pl_sd):
pl_sd = pl_sd.pop("state_dict", pl_sd)
pl_sd.pop("state_dict", None)
+ is_sd2_turbo = 'conditioner.embedders.0.model.ln_final.weight' in pl_sd and pl_sd['conditioner.embedders.0.model.ln_final.weight'].size()[0] == 1024
+
sd = {}
for k, v in pl_sd.items():
- new_key = transform_checkpoint_dict_key(k)
+ if is_sd2_turbo:
+ new_key = transform_checkpoint_dict_key(k, checkpoint_dict_replacements_sd2_turbo)
+ else:
+ new_key = transform_checkpoint_dict_key(k, checkpoint_dict_replacements_sd1)
if new_key is not None:
sd[new_key] = v
diff --git a/modules/sd_samplers_extra.py b/modules/sd_samplers_extra.py
index 1b981ca8..72fd0aa5 100644
--- a/modules/sd_samplers_extra.py
+++ b/modules/sd_samplers_extra.py
@@ -60,7 +60,7 @@ def restart_sampler(model, x, sigmas, extra_args=None, callback=None, disable=No
sigma_restart = get_sigmas_karras(restart_steps, sigmas[min_idx].item(), sigmas[max_idx].item(), device=sigmas.device)[:-1]
while restart_times > 0:
restart_times -= 1
- step_list.extend([(old_sigma, new_sigma) for (old_sigma, new_sigma) in zip(sigma_restart[:-1], sigma_restart[1:])])
+ step_list.extend(zip(sigma_restart[:-1], sigma_restart[1:]))
last_sigma = None
for old_sigma, new_sigma in tqdm.tqdm(step_list, disable=disable):
diff --git a/modules/sd_samplers_timesteps_impl.py b/modules/sd_samplers_timesteps_impl.py
index a72daafd..930a64af 100644
--- a/modules/sd_samplers_timesteps_impl.py
+++ b/modules/sd_samplers_timesteps_impl.py
@@ -11,7 +11,7 @@ from modules.models.diffusion.uni_pc import uni_pc
def ddim(model, x, timesteps, extra_args=None, callback=None, disable=None, eta=0.0):
alphas_cumprod = model.inner_model.inner_model.alphas_cumprod
alphas = alphas_cumprod[timesteps]
- alphas_prev = alphas_cumprod[torch.nn.functional.pad(timesteps[:-1], pad=(1, 0))].to(torch.float64 if x.device.type != 'mps' else torch.float32)
+ alphas_prev = alphas_cumprod[torch.nn.functional.pad(timesteps[:-1], pad=(1, 0))].to(torch.float64 if x.device.type != 'mps' and x.device.type != 'xpu' else torch.float32)
sqrt_one_minus_alphas = torch.sqrt(1 - alphas)
sigmas = eta * np.sqrt((1 - alphas_prev.cpu().numpy()) / (1 - alphas.cpu()) * (1 - alphas.cpu() / alphas_prev.cpu().numpy()))
@@ -43,7 +43,7 @@ def ddim(model, x, timesteps, extra_args=None, callback=None, disable=None, eta=
def plms(model, x, timesteps, extra_args=None, callback=None, disable=None):
alphas_cumprod = model.inner_model.inner_model.alphas_cumprod
alphas = alphas_cumprod[timesteps]
- alphas_prev = alphas_cumprod[torch.nn.functional.pad(timesteps[:-1], pad=(1, 0))].to(torch.float64 if x.device.type != 'mps' else torch.float32)
+ alphas_prev = alphas_cumprod[torch.nn.functional.pad(timesteps[:-1], pad=(1, 0))].to(torch.float64 if x.device.type != 'mps' and x.device.type != 'xpu' else torch.float32)
sqrt_one_minus_alphas = torch.sqrt(1 - alphas)
extra_args = {} if extra_args is None else extra_args
diff --git a/modules/sd_unet.py b/modules/sd_unet.py
index 6a7bc9e2..a771849c 100644
--- a/modules/sd_unet.py
+++ b/modules/sd_unet.py
@@ -5,8 +5,7 @@ from modules import script_callbacks, shared, devices
unet_options = []
current_unet_option = None
current_unet = None
-original_forward = None
-
+original_forward = None # not used, only left temporarily for compatibility
def list_unets():
new_unets = script_callbacks.list_unets_callback()
@@ -84,9 +83,12 @@ class SdUnet(torch.nn.Module):
pass
-def UNetModel_forward(self, x, timesteps=None, context=None, *args, **kwargs):
- if current_unet is not None:
- return current_unet.forward(x, timesteps, context, *args, **kwargs)
+def create_unet_forward(original_forward):
+ def UNetModel_forward(self, x, timesteps=None, context=None, *args, **kwargs):
+ if current_unet is not None:
+ return current_unet.forward(x, timesteps, context, *args, **kwargs)
+
+ return original_forward(self, x, timesteps, context, *args, **kwargs)
- return original_forward(self, x, timesteps, context, *args, **kwargs)
+ return UNetModel_forward
diff --git a/modules/shared_items.py b/modules/shared_items.py
index 5024b426..991971ad 100644
--- a/modules/shared_items.py
+++ b/modules/shared_items.py
@@ -66,6 +66,22 @@ def reload_hypernetworks():
shared.hypernetworks = hypernetwork.list_hypernetworks(cmd_opts.hypernetwork_dir)
+def get_infotext_names():
+ from modules import generation_parameters_copypaste, shared
+ res = {}
+
+ for info in shared.opts.data_labels.values():
+ if info.infotext:
+ res[info.infotext] = 1
+
+ for tab_data in generation_parameters_copypaste.paste_fields.values():
+ for _, name in tab_data.get("fields") or []:
+ if isinstance(name, str):
+ res[name] = 1
+
+ return list(res)
+
+
ui_reorder_categories_builtin_items = [
"prompt",
"image",
diff --git a/modules/shared_options.py b/modules/shared_options.py
index d40db530..d2e86ff1 100644
--- a/modules/shared_options.py
+++ b/modules/shared_options.py
@@ -3,7 +3,7 @@ import gradio as gr
from modules import localization, ui_components, shared_items, shared, interrogate, shared_gradio_themes
from modules.paths_internal import models_path, script_path, data_path, sd_configs_path, sd_default_config, sd_model_file, default_sd_model_file, extensions_dir, extensions_builtin_dir # noqa: F401
from modules.shared_cmd_options import cmd_opts
-from modules.options import options_section, OptionInfo, OptionHTML
+from modules.options import options_section, OptionInfo, OptionHTML, categories
options_templates = {}
hide_dirs = shared.hide_dirs
@@ -21,7 +21,14 @@ restricted_opts = {
"outdir_init_images"
}
-options_templates.update(options_section(('saving-images', "Saving images/grids"), {
+categories.register_category("saving", "Saving images")
+categories.register_category("sd", "Stable Diffusion")
+categories.register_category("ui", "User Interface")
+categories.register_category("system", "System")
+categories.register_category("postprocessing", "Postprocessing")
+categories.register_category("training", "Training")
+
+options_templates.update(options_section(('saving-images', "Saving images/grids", "saving"), {
"samples_save": OptionInfo(True, "Always save all generated images"),
"samples_format": OptionInfo('png', 'File format for images'),
"samples_filename_pattern": OptionInfo("", "Images filename pattern", component_args=hide_dirs).link("wiki", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Custom-Images-Filename-Name-and-Subdirectory"),
@@ -39,8 +46,6 @@ options_templates.update(options_section(('saving-images', "Saving images/grids"
"grid_text_inactive_color": OptionInfo("#999999", "Inactive text color for image grids", ui_components.FormColorPicker, {}),
"grid_background_color": OptionInfo("#ffffff", "Background color for image grids", ui_components.FormColorPicker, {}),
- "enable_pnginfo": OptionInfo(True, "Save text information about generation parameters as chunks to png files"),
- "save_txt": OptionInfo(False, "Create a text file next to every image with generation parameters."),
"save_images_before_face_restoration": OptionInfo(False, "Save a copy of image before doing face restoration."),
"save_images_before_highres_fix": OptionInfo(False, "Save a copy of image before applying highres fix."),
"save_images_before_color_correction": OptionInfo(False, "Save a copy of image before applying color correction to img2img results"),
@@ -67,7 +72,7 @@ options_templates.update(options_section(('saving-images', "Saving images/grids"
"notification_volume": OptionInfo(100, "Notification sound volume", gr.Slider, {"minimum": 0, "maximum": 100, "step": 1}).info("in %"),
}))
-options_templates.update(options_section(('saving-paths', "Paths for saving"), {
+options_templates.update(options_section(('saving-paths', "Paths for saving", "saving"), {
"outdir_samples": OptionInfo("", "Output directory for images; if empty, defaults to three directories below", component_args=hide_dirs),
"outdir_txt2img_samples": OptionInfo("outputs/txt2img-images", 'Output directory for txt2img images', component_args=hide_dirs),
"outdir_img2img_samples": OptionInfo("outputs/img2img-images", 'Output directory for img2img images', component_args=hide_dirs),
@@ -79,7 +84,7 @@ options_templates.update(options_section(('saving-paths', "Paths for saving"), {
"outdir_init_images": OptionInfo("outputs/init-images", "Directory for saving init images when using img2img", component_args=hide_dirs),
}))
-options_templates.update(options_section(('saving-to-dirs', "Saving to a directory"), {
+options_templates.update(options_section(('saving-to-dirs', "Saving to a directory", "saving"), {
"save_to_dirs": OptionInfo(True, "Save images to a subdirectory"),
"grid_save_to_dirs": OptionInfo(True, "Save grids to a subdirectory"),
"use_save_to_dirs_for_ui": OptionInfo(False, "When using \"Save\" button, save images to a subdirectory"),
@@ -87,21 +92,21 @@ options_templates.update(options_section(('saving-to-dirs', "Saving to a directo
"directories_max_prompt_words": OptionInfo(8, "Max prompt words for [prompt_words] pattern", gr.Slider, {"minimum": 1, "maximum": 20, "step": 1, **hide_dirs}),
}))
-options_templates.update(options_section(('upscaling', "Upscaling"), {
+options_templates.update(options_section(('upscaling', "Upscaling", "postprocessing"), {
"ESRGAN_tile": OptionInfo(192, "Tile size for ESRGAN upscalers.", gr.Slider, {"minimum": 0, "maximum": 512, "step": 16}).info("0 = no tiling"),
"ESRGAN_tile_overlap": OptionInfo(8, "Tile overlap for ESRGAN upscalers.", gr.Slider, {"minimum": 0, "maximum": 48, "step": 1}).info("Low values = visible seam"),
"realesrgan_enabled_models": OptionInfo(["R-ESRGAN 4x+", "R-ESRGAN 4x+ Anime6B"], "Select which Real-ESRGAN models to show in the web UI.", gr.CheckboxGroup, lambda: {"choices": shared_items.realesrgan_models_names()}),
"upscaler_for_img2img": OptionInfo(None, "Upscaler for img2img", gr.Dropdown, lambda: {"choices": [x.name for x in shared.sd_upscalers]}),
}))
-options_templates.update(options_section(('face-restoration', "Face restoration"), {
+options_templates.update(options_section(('face-restoration', "Face restoration", "postprocessing"), {
"face_restoration": OptionInfo(False, "Restore faces", infotext='Face restoration').info("will use a third-party model on generation result to reconstruct faces"),
"face_restoration_model": OptionInfo("CodeFormer", "Face restoration model", gr.Radio, lambda: {"choices": [x.name() for x in shared.face_restorers]}),
"code_former_weight": OptionInfo(0.5, "CodeFormer weight", gr.Slider, {"minimum": 0, "maximum": 1, "step": 0.01}).info("0 = maximum effect; 1 = minimum effect"),
"face_restoration_unload": OptionInfo(False, "Move face restoration model from VRAM into RAM after processing"),
}))
-options_templates.update(options_section(('system', "System"), {
+options_templates.update(options_section(('system', "System", "system"), {
"auto_launch_browser": OptionInfo("Local", "Automatically open webui in browser on startup", gr.Radio, lambda: {"choices": ["Disable", "Local", "Remote"]}),
"enable_console_prompts": OptionInfo(shared.cmd_opts.enable_console_prompts, "Print prompts to console when generating with txt2img and img2img."),
"show_warnings": OptionInfo(False, "Show warnings in console.").needs_reload_ui(),
@@ -116,13 +121,13 @@ options_templates.update(options_section(('system', "System"), {
"dump_stacks_on_signal": OptionInfo(False, "Print stack traces before exiting the program with ctrl+c."),
}))
-options_templates.update(options_section(('API', "API"), {
+options_templates.update(options_section(('API', "API", "system"), {
"api_enable_requests": OptionInfo(True, "Allow http:// and https:// URLs for input images in API", restrict_api=True),
"api_forbid_local_requests": OptionInfo(True, "Forbid URLs to local resources", restrict_api=True),
"api_useragent": OptionInfo("", "User agent for requests", restrict_api=True),
}))
-options_templates.update(options_section(('training', "Training"), {
+options_templates.update(options_section(('training', "Training", "training"), {
"unload_models_when_training": OptionInfo(False, "Move VAE and CLIP to RAM when training if possible. Saves VRAM."),
"pin_memory": OptionInfo(False, "Turn on pin_memory for DataLoader. Makes training slightly faster but can increase memory usage."),
"save_optimizer_state": OptionInfo(False, "Saves Optimizer state as separate *.optim file. Training of embedding or HN can be resumed with the matching optim file."),
@@ -137,7 +142,7 @@ options_templates.update(options_section(('training', "Training"), {
"training_tensorboard_flush_every": OptionInfo(120, "How often, in seconds, to flush the pending tensorboard events and summaries to disk."),
}))
-options_templates.update(options_section(('sd', "Stable Diffusion"), {
+options_templates.update(options_section(('sd', "Stable Diffusion", "sd"), {
"sd_model_checkpoint": OptionInfo(None, "Stable Diffusion checkpoint", gr.Dropdown, lambda: {"choices": shared_items.list_checkpoint_tiles(shared.opts.sd_checkpoint_dropdown_use_short)}, refresh=shared_items.refresh_checkpoints, infotext='Model hash'),
"sd_checkpoints_limit": OptionInfo(1, "Maximum number of checkpoints loaded at the same time", gr.Slider, {"minimum": 1, "maximum": 10, "step": 1}),
"sd_checkpoints_keep_in_cpu": OptionInfo(True, "Only keep one model on device").info("will keep models other than the currently used one in RAM rather than VRAM"),
@@ -154,14 +159,14 @@ options_templates.update(options_section(('sd', "Stable Diffusion"), {
"hires_fix_refiner_pass": OptionInfo("second pass", "Hires fix: which pass to enable refiner for", gr.Radio, {"choices": ["first pass", "second pass", "both passes"]}, infotext="Hires refiner"),
}))
-options_templates.update(options_section(('sdxl', "Stable Diffusion XL"), {
+options_templates.update(options_section(('sdxl', "Stable Diffusion XL", "sd"), {
"sdxl_crop_top": OptionInfo(0, "crop top coordinate"),
"sdxl_crop_left": OptionInfo(0, "crop left coordinate"),
"sdxl_refiner_low_aesthetic_score": OptionInfo(2.5, "SDXL low aesthetic score", gr.Number).info("used for refiner model negative prompt"),
"sdxl_refiner_high_aesthetic_score": OptionInfo(6.0, "SDXL high aesthetic score", gr.Number).info("used for refiner model prompt"),
}))
-options_templates.update(options_section(('vae', "VAE"), {
+options_templates.update(options_section(('vae', "VAE", "sd"), {
"sd_vae_explanation": OptionHTML("""
<abbr title='Variational autoencoder'>VAE</abbr> is a neural network that transforms a standard <abbr title='red/green/blue'>RGB</abbr>
image into latent space representation and back. Latent space representation is what stable diffusion is working on during sampling
@@ -176,7 +181,7 @@ For img2img, VAE is used to process user's input image before the sampling, and
"sd_vae_decode_method": OptionInfo("Full", "VAE type for decode", gr.Radio, {"choices": ["Full", "TAESD"]}, infotext='VAE Decoder').info("method to decode latent to image"),
}))
-options_templates.update(options_section(('img2img', "img2img"), {
+options_templates.update(options_section(('img2img', "img2img", "sd"), {
"inpainting_mask_weight": OptionInfo(1.0, "Inpainting conditioning mask strength", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}, infotext='Conditional mask weight'),
"initial_noise_multiplier": OptionInfo(1.0, "Noise multiplier for img2img", gr.Slider, {"minimum": 0.0, "maximum": 1.5, "step": 0.001}, infotext='Noise multiplier'),
"img2img_extra_noise": OptionInfo(0.0, "Extra noise multiplier for img2img and hires fix", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}, infotext='Extra noise').info("0 = disabled (default); should be lower than denoising strength"),
@@ -189,9 +194,10 @@ options_templates.update(options_section(('img2img', "img2img"), {
"img2img_inpaint_sketch_default_brush_color": OptionInfo("#ffffff", "Inpaint sketch initial brush color", ui_components.FormColorPicker, {}).info("default brush color of img2img inpaint sketch").needs_reload_ui(),
"return_mask": OptionInfo(False, "For inpainting, include the greyscale mask in results for web"),
"return_mask_composite": OptionInfo(False, "For inpainting, include masked composite in results for web"),
+ "img2img_batch_show_results_limit": OptionInfo(32, "Show the first N batch img2img results in UI", gr.Slider, {"minimum": -1, "maximum": 1000, "step": 1}).info('0: disable, -1: show all images. Too many images can cause lag'),
}))
-options_templates.update(options_section(('optimizations', "Optimizations"), {
+options_templates.update(options_section(('optimizations', "Optimizations", "sd"), {
"cross_attention_optimization": OptionInfo("Automatic", "Cross attention optimization", gr.Dropdown, lambda: {"choices": shared_items.cross_attention_optimizations()}),
"s_min_uncond": OptionInfo(0.0, "Negative Guidance minimum sigma", gr.Slider, {"minimum": 0.0, "maximum": 15.0, "step": 0.01}).link("PR", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/9177").info("skip negative prompt for some steps when the image is almost ready; 0=disable, higher=faster"),
"token_merging_ratio": OptionInfo(0.0, "Token merging ratio", gr.Slider, {"minimum": 0.0, "maximum": 0.9, "step": 0.1}, infotext='Token merging ratio').link("PR", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/9256").info("0=disable, higher=faster"),
@@ -202,7 +208,7 @@ options_templates.update(options_section(('optimizations', "Optimizations"), {
"batch_cond_uncond": OptionInfo(True, "Batch cond/uncond").info("do both conditional and unconditional denoising in one batch; uses a bit more VRAM during sampling, but improves speed; previously this was controlled by --always-batch-cond-uncond comandline argument"),
}))
-options_templates.update(options_section(('compatibility', "Compatibility"), {
+options_templates.update(options_section(('compatibility', "Compatibility", "sd"), {
"use_old_emphasis_implementation": OptionInfo(False, "Use old emphasis implementation. Can be useful to reproduce old seeds."),
"use_old_karras_scheduler_sigmas": OptionInfo(False, "Use old karras scheduler sigmas (0.1 to 10)."),
"no_dpmpp_sde_batch_determinism": OptionInfo(False, "Do not make DPM++ SDE deterministic across different batch sizes."),
@@ -227,15 +233,16 @@ options_templates.update(options_section(('interrogate', "Interrogate"), {
"deepbooru_filter_tags": OptionInfo("", "deepbooru: filter out those tags").info("separate by comma"),
}))
-options_templates.update(options_section(('extra_networks', "Extra Networks"), {
+options_templates.update(options_section(('extra_networks', "Extra Networks", "sd"), {
"extra_networks_show_hidden_directories": OptionInfo(True, "Show hidden directories").info("directory is hidden if its name starts with \".\"."),
+ "extra_networks_dir_button_function": OptionInfo(False, "Add a '/' to the beginning of directory buttons").info("Buttons will display the contents of the selected directory without acting as a search filter."),
"extra_networks_hidden_models": OptionInfo("When searched", "Show cards for models in hidden directories", gr.Radio, {"choices": ["Always", "When searched", "Never"]}).info('"When searched" option will only show the item when the search string has 4 characters or more'),
"extra_networks_default_multiplier": OptionInfo(1.0, "Default multiplier for extra networks", gr.Slider, {"minimum": 0.0, "maximum": 2.0, "step": 0.01}),
"extra_networks_card_width": OptionInfo(0, "Card width for Extra Networks").info("in pixels"),
"extra_networks_card_height": OptionInfo(0, "Card height for Extra Networks").info("in pixels"),
"extra_networks_card_text_scale": OptionInfo(1.0, "Card text scale", gr.Slider, {"minimum": 0.0, "maximum": 2.0, "step": 0.01}).info("1 = original size"),
"extra_networks_card_show_desc": OptionInfo(True, "Show description on card"),
- "extra_networks_card_order_field": OptionInfo("Name", "Default order field for Extra Networks cards", gr.Dropdown, {"choices": ['Name', 'Date Created', 'Date Modified']}).needs_reload_ui(),
+ "extra_networks_card_order_field": OptionInfo("Path", "Default order field for Extra Networks cards", gr.Dropdown, {"choices": ['Path', 'Name', 'Date Created', 'Date Modified']}).needs_reload_ui(),
"extra_networks_card_order": OptionInfo("Ascending", "Default order for Extra Networks cards", gr.Dropdown, {"choices": ['Ascending', 'Descending']}).needs_reload_ui(),
"extra_networks_add_text_separator": OptionInfo(" ", "Extra networks separator").info("extra text to add before <...> when adding extra network to prompt"),
"ui_extra_networks_tab_reorder": OptionInfo("", "Extra networks tab order").needs_reload_ui(),
@@ -244,45 +251,66 @@ options_templates.update(options_section(('extra_networks', "Extra Networks"), {
"sd_hypernetwork": OptionInfo("None", "Add hypernetwork to prompt", gr.Dropdown, lambda: {"choices": ["None", *shared.hypernetworks]}, refresh=shared_items.reload_hypernetworks),
}))
-options_templates.update(options_section(('ui', "User interface"), {
- "localization": OptionInfo("None", "Localization", gr.Dropdown, lambda: {"choices": ["None"] + list(localization.localizations.keys())}, refresh=lambda: localization.list_localizations(cmd_opts.localizations_dir)).needs_reload_ui(),
- "gradio_theme": OptionInfo("Default", "Gradio theme", ui_components.DropdownEditable, lambda: {"choices": ["Default"] + shared_gradio_themes.gradio_hf_hub_themes}).info("you can also manually enter any of themes from the <a href='https://huggingface.co/spaces/gradio/theme-gallery'>gallery</a>.").needs_reload_ui(),
- "gradio_themes_cache": OptionInfo(True, "Cache gradio themes locally").info("disable to update the selected Gradio theme"),
- "gallery_height": OptionInfo("", "Gallery height", gr.Textbox).info("an be any valid CSS value").needs_reload_ui(),
- "return_grid": OptionInfo(True, "Show grid in results for web"),
- "do_not_show_images": OptionInfo(False, "Do not show any images in results for web"),
- "send_seed": OptionInfo(True, "Send seed when sending prompt or image to other interface"),
- "send_size": OptionInfo(True, "Send size when sending prompt or image to another interface"),
- "js_modal_lightbox": OptionInfo(True, "Enable full page image viewer"),
- "js_modal_lightbox_initially_zoomed": OptionInfo(True, "Show images zoomed in by default in full page image viewer"),
- "js_modal_lightbox_gamepad": OptionInfo(False, "Navigate image viewer with gamepad"),
- "js_modal_lightbox_gamepad_repeat": OptionInfo(250, "Gamepad repeat period, in milliseconds"),
- "show_progress_in_title": OptionInfo(True, "Show generation progress in window title."),
- "samplers_in_dropdown": OptionInfo(True, "Use dropdown for sampler selection instead of radio group").needs_reload_ui(),
- "dimensions_and_batch_together": OptionInfo(True, "Show Width/Height and Batch sliders in same row").needs_reload_ui(),
- "keyedit_precision_attention": OptionInfo(0.1, "Ctrl+up/down precision when editing (attention:1.1)", gr.Slider, {"minimum": 0.01, "maximum": 0.2, "step": 0.001}),
- "keyedit_precision_extra": OptionInfo(0.05, "Ctrl+up/down precision when editing <extra networks:0.9>", gr.Slider, {"minimum": 0.01, "maximum": 0.2, "step": 0.001}),
- "keyedit_delimiters": OptionInfo(r".,\/!?%^*;:{}=`~() ", "Ctrl+up/down word delimiters"),
+options_templates.update(options_section(('ui_prompt_editing', "Prompt editing", "ui"), {
+ "keyedit_precision_attention": OptionInfo(0.1, "Precision for (attention:1.1) when editing the prompt with Ctrl+up/down", gr.Slider, {"minimum": 0.01, "maximum": 0.2, "step": 0.001}),
+ "keyedit_precision_extra": OptionInfo(0.05, "Precision for <extra networks:0.9> when editing the prompt with Ctrl+up/down", gr.Slider, {"minimum": 0.01, "maximum": 0.2, "step": 0.001}),
+ "keyedit_delimiters": OptionInfo(r".,\/!?%^*;:{}=`~() ", "Word delimiters when editing the prompt with Ctrl+up/down"),
"keyedit_delimiters_whitespace": OptionInfo(["Tab", "Carriage Return", "Line Feed"], "Ctrl+up/down whitespace delimiters", gr.CheckboxGroup, lambda: {"choices": ["Tab", "Carriage Return", "Line Feed"]}),
"keyedit_move": OptionInfo(True, "Alt+left/right moves prompt elements"),
- "quicksettings_list": OptionInfo(["sd_model_checkpoint"], "Quicksettings list", ui_components.DropdownMulti, lambda: {"choices": list(shared.opts.data_labels.keys())}).js("info", "settingsHintsShowQuicksettings").info("setting entries that appear at the top of page rather than in settings tab").needs_reload_ui(),
- "ui_tab_order": OptionInfo([], "UI tab order", ui_components.DropdownMulti, lambda: {"choices": list(shared.tab_names)}).needs_reload_ui(),
- "hidden_tabs": OptionInfo([], "Hidden UI tabs", ui_components.DropdownMulti, lambda: {"choices": list(shared.tab_names)}).needs_reload_ui(),
- "ui_reorder_list": OptionInfo([], "txt2img/img2img UI item order", ui_components.DropdownMulti, lambda: {"choices": list(shared_items.ui_reorder_categories())}).info("selected items appear first").needs_reload_ui(),
+ "disable_token_counters": OptionInfo(False, "Disable prompt token counters").needs_reload_ui(),
+}))
+
+options_templates.update(options_section(('ui_gallery', "Gallery", "ui"), {
+ "return_grid": OptionInfo(True, "Show grid in gallery"),
+ "do_not_show_images": OptionInfo(False, "Do not show any images in gallery"),
+ "js_modal_lightbox": OptionInfo(True, "Full page image viewer: enable"),
+ "js_modal_lightbox_initially_zoomed": OptionInfo(True, "Full page image viewer: show images zoomed in by default"),
+ "js_modal_lightbox_gamepad": OptionInfo(False, "Full page image viewer: navigate with gamepad"),
+ "js_modal_lightbox_gamepad_repeat": OptionInfo(250, "Full page image viewer: gamepad repeat period").info("in milliseconds"),
+ "gallery_height": OptionInfo("", "Gallery height", gr.Textbox).info("can be any valid CSS value, for example 768px or 20em").needs_reload_ui(),
+}))
+
+options_templates.update(options_section(('ui_alternatives', "UI alternatives", "ui"), {
+ "compact_prompt_box": OptionInfo(False, "Compact prompt layout").info("puts prompt and negative prompt inside the Generate tab, leaving more vertical space for the image on the right").needs_reload_ui(),
+ "samplers_in_dropdown": OptionInfo(True, "Use dropdown for sampler selection instead of radio group").needs_reload_ui(),
+ "dimensions_and_batch_together": OptionInfo(True, "Show Width/Height and Batch sliders in same row").needs_reload_ui(),
"sd_checkpoint_dropdown_use_short": OptionInfo(False, "Checkpoint dropdown: use filenames without paths").info("models in subdirectories like photo/sd15.ckpt will be listed as just sd15.ckpt"),
"hires_fix_show_sampler": OptionInfo(False, "Hires fix: show hires checkpoint and sampler selection").needs_reload_ui(),
"hires_fix_show_prompts": OptionInfo(False, "Hires fix: show hires prompt and negative prompt").needs_reload_ui(),
- "disable_token_counters": OptionInfo(False, "Disable prompt token counters").needs_reload_ui(),
- "compact_prompt_box": OptionInfo(False, "Compact prompt layout").info("puts prompt and negative prompt inside the Generate tab, leaving more vertical space for the image on the right").needs_reload_ui(),
+ "txt2img_settings_accordion": OptionInfo(False, "Settings in txt2img hidden under Accordion").needs_reload_ui(),
+ "img2img_settings_accordion": OptionInfo(False, "Settings in img2img hidden under Accordion").needs_reload_ui(),
}))
+options_templates.update(options_section(('ui', "User interface", "ui"), {
+ "localization": OptionInfo("None", "Localization", gr.Dropdown, lambda: {"choices": ["None"] + list(localization.localizations.keys())}, refresh=lambda: localization.list_localizations(cmd_opts.localizations_dir)).needs_reload_ui(),
+ "quicksettings_list": OptionInfo(["sd_model_checkpoint"], "Quicksettings list", ui_components.DropdownMulti, lambda: {"choices": list(shared.opts.data_labels.keys())}).js("info", "settingsHintsShowQuicksettings").info("setting entries that appear at the top of page rather than in settings tab").needs_reload_ui(),
+ "ui_tab_order": OptionInfo([], "UI tab order", ui_components.DropdownMulti, lambda: {"choices": list(shared.tab_names)}).needs_reload_ui(),
+ "hidden_tabs": OptionInfo([], "Hidden UI tabs", ui_components.DropdownMulti, lambda: {"choices": list(shared.tab_names)}).needs_reload_ui(),
+ "ui_reorder_list": OptionInfo([], "UI item order for txt2img/img2img tabs", ui_components.DropdownMulti, lambda: {"choices": list(shared_items.ui_reorder_categories())}).info("selected items appear first").needs_reload_ui(),
+ "gradio_theme": OptionInfo("Default", "Gradio theme", ui_components.DropdownEditable, lambda: {"choices": ["Default"] + shared_gradio_themes.gradio_hf_hub_themes}).info("you can also manually enter any of themes from the <a href='https://huggingface.co/spaces/gradio/theme-gallery'>gallery</a>.").needs_reload_ui(),
+ "gradio_themes_cache": OptionInfo(True, "Cache gradio themes locally").info("disable to update the selected Gradio theme"),
+ "show_progress_in_title": OptionInfo(True, "Show generation progress in window title."),
+ "send_seed": OptionInfo(True, "Send seed when sending prompt or image to other interface"),
+ "send_size": OptionInfo(True, "Send size when sending prompt or image to another interface"),
+}))
-options_templates.update(options_section(('infotext', "Infotext"), {
- "add_model_hash_to_info": OptionInfo(True, "Add model hash to generation information"),
- "add_model_name_to_info": OptionInfo(True, "Add model name to generation information"),
- "add_user_name_to_info": OptionInfo(False, "Add user name to generation information when authenticated"),
- "add_version_to_infotext": OptionInfo(True, "Add program version to generation information"),
+
+options_templates.update(options_section(('infotext', "Infotext", "ui"), {
+ "infotext_explanation": OptionHTML("""
+Infotext is what this software calls the text that contains generation parameters and can be used to generate the same picture again.
+It is displayed in UI below the image. To use infotext, paste it into the prompt and click the ↙️ paste button.
+"""),
+ "enable_pnginfo": OptionInfo(True, "Write infotext to metadata of the generated image"),
+ "save_txt": OptionInfo(False, "Create a text file with infotext next to every generated image"),
+
+ "add_model_name_to_info": OptionInfo(True, "Add model name to infotext"),
+ "add_model_hash_to_info": OptionInfo(True, "Add model hash to infotext"),
+ "add_vae_name_to_info": OptionInfo(True, "Add VAE name to infotext"),
+ "add_vae_hash_to_info": OptionInfo(True, "Add VAE hash to infotext"),
+ "add_user_name_to_info": OptionInfo(False, "Add user name to infotext when authenticated"),
+ "add_version_to_infotext": OptionInfo(True, "Add program version to infotext"),
"disable_weights_auto_swap": OptionInfo(True, "Disregard checkpoint information from pasted infotext").info("when reading generation parameters from text into UI"),
+ "infotext_skip_pasting": OptionInfo([], "Disregard fields from pasted infotext", ui_components.DropdownMulti, lambda: {"choices": shared_items.get_infotext_names()}),
"infotext_styles": OptionInfo("Apply if any", "Infer styles from prompts of pasted infotext", gr.Radio, {"choices": ["Ignore", "Apply", "Discard", "Apply if any"]}).info("when reading generation parameters from text into UI)").html("""<ul style='margin-left: 1.5em'>
<li>Ignore: keep prompt and styles dropdown as it is.</li>
<li>Apply: remove style text from prompt, always replace styles dropdown value with found styles (even if none are found).</li>
@@ -292,7 +320,7 @@ options_templates.update(options_section(('infotext', "Infotext"), {
}))
-options_templates.update(options_section(('ui', "Live previews"), {
+options_templates.update(options_section(('ui', "Live previews", "ui"), {
"show_progressbar": OptionInfo(True, "Show progressbar"),
"live_previews_enable": OptionInfo(True, "Show live previews of the created image"),
"live_previews_image_format": OptionInfo("png", "Live preview file format", gr.Radio, {"choices": ["jpeg", "png", "webp"]}),
@@ -303,9 +331,10 @@ options_templates.update(options_section(('ui', "Live previews"), {
"live_preview_content": OptionInfo("Prompt", "Live preview subject", gr.Radio, {"choices": ["Combined", "Prompt", "Negative prompt"]}),
"live_preview_refresh_period": OptionInfo(1000, "Progressbar and preview update period").info("in milliseconds"),
"live_preview_fast_interrupt": OptionInfo(False, "Return image with chosen live preview method on interrupt").info("makes interrupts faster"),
+ "js_live_preview_in_modal_lightbox": OptionInfo(False, "Show Live preview in full page image viewer"),
}))
-options_templates.update(options_section(('sampler-params', "Sampler parameters"), {
+options_templates.update(options_section(('sampler-params', "Sampler parameters", "sd"), {
"hide_samplers": OptionInfo([], "Hide samplers in user interface", gr.CheckboxGroup, lambda: {"choices": [x.name for x in shared_items.list_samplers()]}).needs_reload_ui(),
"eta_ddim": OptionInfo(0.0, "Eta for DDIM", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}, infotext='Eta DDIM').info("noise multiplier; higher = more unpredictable results"),
"eta_ancestral": OptionInfo(1.0, "Eta for k-diffusion samplers", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}, infotext='Eta').info("noise multiplier; currently only applies to ancestral samplers (i.e. Euler a) and SDE samplers"),
@@ -327,10 +356,11 @@ options_templates.update(options_section(('sampler-params', "Sampler parameters"
'uni_pc_lower_order_final': OptionInfo(True, "UniPC lower order final", infotext='UniPC lower order final'),
}))
-options_templates.update(options_section(('postprocessing', "Postprocessing"), {
+options_templates.update(options_section(('postprocessing', "Postprocessing", "postprocessing"), {
'postprocessing_enable_in_main_ui': OptionInfo([], "Enable postprocessing operations in txt2img and img2img tabs", ui_components.DropdownMulti, lambda: {"choices": [x.name for x in shared_items.postprocessing_scripts()]}),
'postprocessing_operation_order': OptionInfo([], "Postprocessing operation order", ui_components.DropdownMulti, lambda: {"choices": [x.name for x in shared_items.postprocessing_scripts()]}),
'upscaling_max_images_in_cache': OptionInfo(5, "Maximum number of images in upscaling cache", gr.Slider, {"minimum": 0, "maximum": 10, "step": 1}),
+ 'postprocessing_existing_caption_action': OptionInfo("Ignore", "Action for existing captions", gr.Radio, {"choices": ["Ignore", "Keep", "Prepend", "Append"]}).info("when generating captions using postprocessing; Ignore = use generated; Keep = use original; Prepend/Append = combine both"),
}))
options_templates.update(options_section((None, "Hidden options"), {
diff --git a/modules/styles.py b/modules/styles.py
index 0740fe1b..81d9800d 100644
--- a/modules/styles.py
+++ b/modules/styles.py
@@ -1,7 +1,7 @@
import csv
+import fnmatch
import os
import os.path
-import re
import typing
import shutil
@@ -10,6 +10,7 @@ class PromptStyle(typing.NamedTuple):
name: str
prompt: str
negative_prompt: str
+ path: str = None
def merge_prompts(style_prompt: str, prompt: str) -> str:
@@ -29,38 +30,61 @@ def apply_styles_to_prompt(prompt, styles):
return prompt
-re_spaces = re.compile(" +")
+def unwrap_style_text_from_prompt(style_text, prompt):
+ """
+ Checks the prompt to see if the style text is wrapped around it. If so,
+ returns True plus the prompt text without the style text. Otherwise, returns
+ False with the original prompt.
-
-def extract_style_text_from_prompt(style_text, prompt):
- stripped_prompt = re.sub(re_spaces, " ", prompt.strip())
- stripped_style_text = re.sub(re_spaces, " ", style_text.strip())
+ Note that the "cleaned" version of the style text is only used for matching
+ purposes here. It isn't returned; the original style text is not modified.
+ """
+ stripped_prompt = prompt
+ stripped_style_text = style_text
if "{prompt}" in stripped_style_text:
- left, right = stripped_style_text.split("{prompt}", 2)
+ # Work out whether the prompt is wrapped in the style text. If so, we
+ # return True and the "inner" prompt text that isn't part of the style.
+ try:
+ left, right = stripped_style_text.split("{prompt}", 2)
+ except ValueError as e:
+ # If the style text has multple "{prompt}"s, we can't split it into
+ # two parts. This is an error, but we can't do anything about it.
+ print(f"Unable to compare style text to prompt:\n{style_text}")
+ print(f"Error: {e}")
+ return False, prompt
if stripped_prompt.startswith(left) and stripped_prompt.endswith(right):
- prompt = stripped_prompt[len(left):len(stripped_prompt)-len(right)]
+ prompt = stripped_prompt[len(left) : len(stripped_prompt) - len(right)]
return True, prompt
else:
+ # Work out whether the given prompt ends with the style text. If so, we
+ # return True and the prompt text up to where the style text starts.
if stripped_prompt.endswith(stripped_style_text):
- prompt = stripped_prompt[:len(stripped_prompt)-len(stripped_style_text)]
-
- if prompt.endswith(', '):
+ prompt = stripped_prompt[: len(stripped_prompt) - len(stripped_style_text)]
+ if prompt.endswith(", "):
prompt = prompt[:-2]
-
return True, prompt
return False, prompt
-def extract_style_from_prompts(style: PromptStyle, prompt, negative_prompt):
+def extract_original_prompts(style: PromptStyle, prompt, negative_prompt):
+ """
+ Takes a style and compares it to the prompt and negative prompt. If the style
+ matches, returns True plus the prompt and negative prompt with the style text
+ removed. Otherwise, returns False with the original prompt and negative prompt.
+ """
if not style.prompt and not style.negative_prompt:
return False, prompt, negative_prompt
- match_positive, extracted_positive = extract_style_text_from_prompt(style.prompt, prompt)
+ match_positive, extracted_positive = unwrap_style_text_from_prompt(
+ style.prompt, prompt
+ )
if not match_positive:
return False, prompt, negative_prompt
- match_negative, extracted_negative = extract_style_text_from_prompt(style.negative_prompt, negative_prompt)
+ match_negative, extracted_negative = unwrap_style_text_from_prompt(
+ style.negative_prompt, negative_prompt
+ )
if not match_negative:
return False, prompt, negative_prompt
@@ -69,25 +93,84 @@ def extract_style_from_prompts(style: PromptStyle, prompt, negative_prompt):
class StyleDatabase:
def __init__(self, path: str):
- self.no_style = PromptStyle("None", "", "")
+ self.no_style = PromptStyle("None", "", "", None)
self.styles = {}
self.path = path
+ folder, file = os.path.split(self.path)
+ filename, _, ext = file.partition('*')
+ self.default_path = os.path.join(folder, filename + ext)
+
+ self.prompt_fields = [field for field in PromptStyle._fields if field != "path"]
+
self.reload()
def reload(self):
+ """
+ Clears the style database and reloads the styles from the CSV file(s)
+ matching the path used to initialize the database.
+ """
self.styles.clear()
- if not os.path.exists(self.path):
+ path, filename = os.path.split(self.path)
+
+ if "*" in filename:
+ fileglob = filename.split("*")[0] + "*.csv"
+ filelist = []
+ for file in os.listdir(path):
+ if fnmatch.fnmatch(file, fileglob):
+ filelist.append(file)
+ # Add a visible divider to the style list
+ half_len = round(len(file) / 2)
+ divider = f"{'-' * (20 - half_len)} {file.upper()}"
+ divider = f"{divider} {'-' * (40 - len(divider))}"
+ self.styles[divider] = PromptStyle(
+ f"{divider}", None, None, "do_not_save"
+ )
+ # Add styles from this CSV file
+ self.load_from_csv(os.path.join(path, file))
+ if len(filelist) == 0:
+ print(f"No styles found in {path} matching {fileglob}")
+ return
+ elif not os.path.exists(self.path):
+ print(f"Style database not found: {self.path}")
return
+ else:
+ self.load_from_csv(self.path)
- with open(self.path, "r", encoding="utf-8-sig", newline='') as file:
+ def load_from_csv(self, path: str):
+ with open(path, "r", encoding="utf-8-sig", newline="") as file:
reader = csv.DictReader(file, skipinitialspace=True)
for row in reader:
+ # Ignore empty rows or rows starting with a comment
+ if not row or row["name"].startswith("#"):
+ continue
# Support loading old CSV format with "name, text"-columns
prompt = row["prompt"] if "prompt" in row else row["text"]
negative_prompt = row.get("negative_prompt", "")
- self.styles[row["name"]] = PromptStyle(row["name"], prompt, negative_prompt)
+ # Add style to database
+ self.styles[row["name"]] = PromptStyle(
+ row["name"], prompt, negative_prompt, path
+ )
+
+ def get_style_paths(self) -> set:
+ """Returns a set of all distinct paths of files that styles are loaded from."""
+ # Update any styles without a path to the default path
+ for style in list(self.styles.values()):
+ if not style.path:
+ self.styles[style.name] = style._replace(path=self.default_path)
+
+ # Create a list of all distinct paths, including the default path
+ style_paths = set()
+ style_paths.add(self.default_path)
+ for _, style in self.styles.items():
+ if style.path:
+ style_paths.add(style.path)
+
+ # Remove any paths for styles that are just list dividers
+ style_paths.discard("do_not_save")
+
+ return style_paths
def get_style_prompts(self, styles):
return [self.styles.get(x, self.no_style).prompt for x in styles]
@@ -96,20 +179,40 @@ class StyleDatabase:
return [self.styles.get(x, self.no_style).negative_prompt for x in styles]
def apply_styles_to_prompt(self, prompt, styles):
- return apply_styles_to_prompt(prompt, [self.styles.get(x, self.no_style).prompt for x in styles])
+ return apply_styles_to_prompt(
+ prompt, [self.styles.get(x, self.no_style).prompt for x in styles]
+ )
def apply_negative_styles_to_prompt(self, prompt, styles):
- return apply_styles_to_prompt(prompt, [self.styles.get(x, self.no_style).negative_prompt for x in styles])
-
- def save_styles(self, path: str) -> None:
- # Always keep a backup file around
- if os.path.exists(path):
- shutil.copy(path, f"{path}.bak")
-
- with open(path, "w", encoding="utf-8-sig", newline='') as file:
- writer = csv.DictWriter(file, fieldnames=PromptStyle._fields)
- writer.writeheader()
- writer.writerows(style._asdict() for k, style in self.styles.items())
+ return apply_styles_to_prompt(
+ prompt, [self.styles.get(x, self.no_style).negative_prompt for x in styles]
+ )
+
+ def save_styles(self, path: str = None) -> None:
+ # The path argument is deprecated, but kept for backwards compatibility
+ _ = path
+
+ style_paths = self.get_style_paths()
+
+ csv_names = [os.path.split(path)[1].lower() for path in style_paths]
+
+ for style_path in style_paths:
+ # Always keep a backup file around
+ if os.path.exists(style_path):
+ shutil.copy(style_path, f"{style_path}.bak")
+
+ # Write the styles to the CSV file
+ with open(style_path, "w", encoding="utf-8-sig", newline="") as file:
+ writer = csv.DictWriter(file, fieldnames=self.prompt_fields)
+ writer.writeheader()
+ for style in (s for s in self.styles.values() if s.path == style_path):
+ # Skip style list dividers, e.g. "STYLES.CSV"
+ if style.name.lower().strip("# ") in csv_names:
+ continue
+ # Write style fields, ignoring the path field
+ writer.writerow(
+ {k: v for k, v in style._asdict().items() if k != "path"}
+ )
def extract_styles_from_prompt(self, prompt, negative_prompt):
extracted = []
@@ -120,7 +223,9 @@ class StyleDatabase:
found_style = None
for style in applicable_styles:
- is_match, new_prompt, new_neg_prompt = extract_style_from_prompts(style, prompt, negative_prompt)
+ is_match, new_prompt, new_neg_prompt = extract_original_prompts(
+ style, prompt, negative_prompt
+ )
if is_match:
found_style = style
prompt = new_prompt
diff --git a/modules/sysinfo.py b/modules/sysinfo.py
index 2db7551d..b669edd0 100644
--- a/modules/sysinfo.py
+++ b/modules/sysinfo.py
@@ -1,7 +1,6 @@
import json
import os
import sys
-import traceback
import platform
import hashlib
@@ -84,7 +83,7 @@ def get_dict():
"Checksum": checksum_token,
"Commandline": get_argv(),
"Torch env info": get_torch_sysinfo(),
- "Exceptions": get_exceptions(),
+ "Exceptions": errors.get_exceptions(),
"CPU": {
"model": platform.processor(),
"count logical": psutil.cpu_count(logical=True),
@@ -104,21 +103,6 @@ def get_dict():
return res
-def format_traceback(tb):
- return [[f"{x.filename}, line {x.lineno}, {x.name}", x.line] for x in traceback.extract_tb(tb)]
-
-
-def format_exception(e, tb):
- return {"exception": str(e), "traceback": format_traceback(tb)}
-
-
-def get_exceptions():
- try:
- return list(reversed(errors.exception_records))
- except Exception as e:
- return str(e)
-
-
def get_environment():
return {k: os.environ[k] for k in sorted(os.environ) if k in environment_whitelist}
diff --git a/modules/textual_inversion/autocrop.py b/modules/textual_inversion/autocrop.py
index 1675e39a..e223a2e0 100644
--- a/modules/textual_inversion/autocrop.py
+++ b/modules/textual_inversion/autocrop.py
@@ -3,6 +3,8 @@ import requests
import os
import numpy as np
from PIL import ImageDraw
+from modules import paths_internal
+from pkg_resources import parse_version
GREEN = "#0F0"
BLUE = "#00F"
@@ -25,7 +27,6 @@ def crop_image(im, settings):
elif is_portrait(settings.crop_width, settings.crop_height):
scale_by = settings.crop_height / im.height
-
im = im.resize((int(im.width * scale_by), int(im.height * scale_by)))
im_debug = im.copy()
@@ -69,6 +70,7 @@ def crop_image(im, settings):
return results
+
def focal_point(im, settings):
corner_points = image_corner_points(im, settings) if settings.corner_points_weight > 0 else []
entropy_points = image_entropy_points(im, settings) if settings.entropy_points_weight > 0 else []
@@ -78,118 +80,120 @@ def focal_point(im, settings):
weight_pref_total = 0
if corner_points:
- weight_pref_total += settings.corner_points_weight
+ weight_pref_total += settings.corner_points_weight
if entropy_points:
- weight_pref_total += settings.entropy_points_weight
+ weight_pref_total += settings.entropy_points_weight
if face_points:
- weight_pref_total += settings.face_points_weight
+ weight_pref_total += settings.face_points_weight
corner_centroid = None
if corner_points:
- corner_centroid = centroid(corner_points)
- corner_centroid.weight = settings.corner_points_weight / weight_pref_total
- pois.append(corner_centroid)
+ corner_centroid = centroid(corner_points)
+ corner_centroid.weight = settings.corner_points_weight / weight_pref_total
+ pois.append(corner_centroid)
entropy_centroid = None
if entropy_points:
- entropy_centroid = centroid(entropy_points)
- entropy_centroid.weight = settings.entropy_points_weight / weight_pref_total
- pois.append(entropy_centroid)
+ entropy_centroid = centroid(entropy_points)
+ entropy_centroid.weight = settings.entropy_points_weight / weight_pref_total
+ pois.append(entropy_centroid)
face_centroid = None
if face_points:
- face_centroid = centroid(face_points)
- face_centroid.weight = settings.face_points_weight / weight_pref_total
- pois.append(face_centroid)
+ face_centroid = centroid(face_points)
+ face_centroid.weight = settings.face_points_weight / weight_pref_total
+ pois.append(face_centroid)
average_point = poi_average(pois, settings)
if settings.annotate_image:
- d = ImageDraw.Draw(im)
- max_size = min(im.width, im.height) * 0.07
- if corner_centroid is not None:
- color = BLUE
- box = corner_centroid.bounding(max_size * corner_centroid.weight)
- d.text((box[0], box[1]-15), f"Edge: {corner_centroid.weight:.02f}", fill=color)
- d.ellipse(box, outline=color)
- if len(corner_points) > 1:
- for f in corner_points:
- d.rectangle(f.bounding(4), outline=color)
- if entropy_centroid is not None:
- color = "#ff0"
- box = entropy_centroid.bounding(max_size * entropy_centroid.weight)
- d.text((box[0], box[1]-15), f"Entropy: {entropy_centroid.weight:.02f}", fill=color)
- d.ellipse(box, outline=color)
- if len(entropy_points) > 1:
- for f in entropy_points:
- d.rectangle(f.bounding(4), outline=color)
- if face_centroid is not None:
- color = RED
- box = face_centroid.bounding(max_size * face_centroid.weight)
- d.text((box[0], box[1]-15), f"Face: {face_centroid.weight:.02f}", fill=color)
- d.ellipse(box, outline=color)
- if len(face_points) > 1:
- for f in face_points:
- d.rectangle(f.bounding(4), outline=color)
-
- d.ellipse(average_point.bounding(max_size), outline=GREEN)
+ d = ImageDraw.Draw(im)
+ max_size = min(im.width, im.height) * 0.07
+ if corner_centroid is not None:
+ color = BLUE
+ box = corner_centroid.bounding(max_size * corner_centroid.weight)
+ d.text((box[0], box[1] - 15), f"Edge: {corner_centroid.weight:.02f}", fill=color)
+ d.ellipse(box, outline=color)
+ if len(corner_points) > 1:
+ for f in corner_points:
+ d.rectangle(f.bounding(4), outline=color)
+ if entropy_centroid is not None:
+ color = "#ff0"
+ box = entropy_centroid.bounding(max_size * entropy_centroid.weight)
+ d.text((box[0], box[1] - 15), f"Entropy: {entropy_centroid.weight:.02f}", fill=color)
+ d.ellipse(box, outline=color)
+ if len(entropy_points) > 1:
+ for f in entropy_points:
+ d.rectangle(f.bounding(4), outline=color)
+ if face_centroid is not None:
+ color = RED
+ box = face_centroid.bounding(max_size * face_centroid.weight)
+ d.text((box[0], box[1] - 15), f"Face: {face_centroid.weight:.02f}", fill=color)
+ d.ellipse(box, outline=color)
+ if len(face_points) > 1:
+ for f in face_points:
+ d.rectangle(f.bounding(4), outline=color)
+
+ d.ellipse(average_point.bounding(max_size), outline=GREEN)
return average_point
def image_face_points(im, settings):
if settings.dnn_model_path is not None:
- detector = cv2.FaceDetectorYN.create(
- settings.dnn_model_path,
- "",
- (im.width, im.height),
- 0.9, # score threshold
- 0.3, # nms threshold
- 5000 # keep top k before nms
- )
- faces = detector.detect(np.array(im))
- results = []
- if faces[1] is not None:
- for face in faces[1]:
- x = face[0]
- y = face[1]
- w = face[2]
- h = face[3]
- results.append(
- PointOfInterest(
- int(x + (w * 0.5)), # face focus left/right is center
- int(y + (h * 0.33)), # face focus up/down is close to the top of the head
- size = w,
- weight = 1/len(faces[1])
- )
- )
- return results
+ detector = cv2.FaceDetectorYN.create(
+ settings.dnn_model_path,
+ "",
+ (im.width, im.height),
+ 0.9, # score threshold
+ 0.3, # nms threshold
+ 5000 # keep top k before nms
+ )
+ faces = detector.detect(np.array(im))
+ results = []
+ if faces[1] is not None:
+ for face in faces[1]:
+ x = face[0]
+ y = face[1]
+ w = face[2]
+ h = face[3]
+ results.append(
+ PointOfInterest(
+ int(x + (w * 0.5)), # face focus left/right is center
+ int(y + (h * 0.33)), # face focus up/down is close to the top of the head
+ size=w,
+ weight=1 / len(faces[1])
+ )
+ )
+ return results
else:
- np_im = np.array(im)
- gray = cv2.cvtColor(np_im, cv2.COLOR_BGR2GRAY)
-
- tries = [
- [ f'{cv2.data.haarcascades}haarcascade_eye.xml', 0.01 ],
- [ f'{cv2.data.haarcascades}haarcascade_frontalface_default.xml', 0.05 ],
- [ f'{cv2.data.haarcascades}haarcascade_profileface.xml', 0.05 ],
- [ f'{cv2.data.haarcascades}haarcascade_frontalface_alt.xml', 0.05 ],
- [ f'{cv2.data.haarcascades}haarcascade_frontalface_alt2.xml', 0.05 ],
- [ f'{cv2.data.haarcascades}haarcascade_frontalface_alt_tree.xml', 0.05 ],
- [ f'{cv2.data.haarcascades}haarcascade_eye_tree_eyeglasses.xml', 0.05 ],
- [ f'{cv2.data.haarcascades}haarcascade_upperbody.xml', 0.05 ]
- ]
- for t in tries:
- classifier = cv2.CascadeClassifier(t[0])
- minsize = int(min(im.width, im.height) * t[1]) # at least N percent of the smallest side
- try:
- faces = classifier.detectMultiScale(gray, scaleFactor=1.1,
- minNeighbors=7, minSize=(minsize, minsize), flags=cv2.CASCADE_SCALE_IMAGE)
- except Exception:
- continue
-
- if faces:
- rects = [[f[0], f[1], f[0] + f[2], f[1] + f[3]] for f in faces]
- return [PointOfInterest((r[0] +r[2]) // 2, (r[1] + r[3]) // 2, size=abs(r[0]-r[2]), weight=1/len(rects)) for r in rects]
+ np_im = np.array(im)
+ gray = cv2.cvtColor(np_im, cv2.COLOR_BGR2GRAY)
+
+ tries = [
+ [f'{cv2.data.haarcascades}haarcascade_eye.xml', 0.01],
+ [f'{cv2.data.haarcascades}haarcascade_frontalface_default.xml', 0.05],
+ [f'{cv2.data.haarcascades}haarcascade_profileface.xml', 0.05],
+ [f'{cv2.data.haarcascades}haarcascade_frontalface_alt.xml', 0.05],
+ [f'{cv2.data.haarcascades}haarcascade_frontalface_alt2.xml', 0.05],
+ [f'{cv2.data.haarcascades}haarcascade_frontalface_alt_tree.xml', 0.05],
+ [f'{cv2.data.haarcascades}haarcascade_eye_tree_eyeglasses.xml', 0.05],
+ [f'{cv2.data.haarcascades}haarcascade_upperbody.xml', 0.05]
+ ]
+ for t in tries:
+ classifier = cv2.CascadeClassifier(t[0])
+ minsize = int(min(im.width, im.height) * t[1]) # at least N percent of the smallest side
+ try:
+ faces = classifier.detectMultiScale(gray, scaleFactor=1.1,
+ minNeighbors=7, minSize=(minsize, minsize),
+ flags=cv2.CASCADE_SCALE_IMAGE)
+ except Exception:
+ continue
+
+ if faces:
+ rects = [[f[0], f[1], f[0] + f[2], f[1] + f[3]] for f in faces]
+ return [PointOfInterest((r[0] + r[2]) // 2, (r[1] + r[3]) // 2, size=abs(r[0] - r[2]),
+ weight=1 / len(rects)) for r in rects]
return []
@@ -198,7 +202,7 @@ def image_corner_points(im, settings):
# naive attempt at preventing focal points from collecting at watermarks near the bottom
gd = ImageDraw.Draw(grayscale)
- gd.rectangle([0, im.height*.9, im.width, im.height], fill="#999")
+ gd.rectangle([0, im.height * .9, im.width, im.height], fill="#999")
np_im = np.array(grayscale)
@@ -206,7 +210,7 @@ def image_corner_points(im, settings):
np_im,
maxCorners=100,
qualityLevel=0.04,
- minDistance=min(grayscale.width, grayscale.height)*0.06,
+ minDistance=min(grayscale.width, grayscale.height) * 0.06,
useHarrisDetector=False,
)
@@ -215,8 +219,8 @@ def image_corner_points(im, settings):
focal_points = []
for point in points:
- x, y = point.ravel()
- focal_points.append(PointOfInterest(x, y, size=4, weight=1/len(points)))
+ x, y = point.ravel()
+ focal_points.append(PointOfInterest(x, y, size=4, weight=1 / len(points)))
return focal_points
@@ -225,13 +229,13 @@ def image_entropy_points(im, settings):
landscape = im.height < im.width
portrait = im.height > im.width
if landscape:
- move_idx = [0, 2]
- move_max = im.size[0]
+ move_idx = [0, 2]
+ move_max = im.size[0]
elif portrait:
- move_idx = [1, 3]
- move_max = im.size[1]
+ move_idx = [1, 3]
+ move_max = im.size[1]
else:
- return []
+ return []
e_max = 0
crop_current = [0, 0, settings.crop_width, settings.crop_height]
@@ -241,14 +245,14 @@ def image_entropy_points(im, settings):
e = image_entropy(crop)
if (e > e_max):
- e_max = e
- crop_best = list(crop_current)
+ e_max = e
+ crop_best = list(crop_current)
crop_current[move_idx[0]] += 4
crop_current[move_idx[1]] += 4
- x_mid = int(crop_best[0] + settings.crop_width/2)
- y_mid = int(crop_best[1] + settings.crop_height/2)
+ x_mid = int(crop_best[0] + settings.crop_width / 2)
+ y_mid = int(crop_best[1] + settings.crop_height / 2)
return [PointOfInterest(x_mid, y_mid, size=25, weight=1.0)]
@@ -294,22 +298,23 @@ def is_square(w, h):
return w == h
-def download_and_cache_models(dirname):
- download_url = 'https://github.com/opencv/opencv_zoo/blob/91fb0290f50896f38a0ab1e558b74b16bc009428/models/face_detection_yunet/face_detection_yunet_2022mar.onnx?raw=true'
- model_file_name = 'face_detection_yunet.onnx'
+model_dir_opencv = os.path.join(paths_internal.models_path, 'opencv')
+if parse_version(cv2.__version__) >= parse_version('4.8'):
+ model_file_path = os.path.join(model_dir_opencv, 'face_detection_yunet_2023mar.onnx')
+ model_url = 'https://github.com/opencv/opencv_zoo/blob/b6e370b10f641879a87890d44e42173077154a05/models/face_detection_yunet/face_detection_yunet_2023mar.onnx?raw=true'
+else:
+ model_file_path = os.path.join(model_dir_opencv, 'face_detection_yunet.onnx')
+ model_url = 'https://github.com/opencv/opencv_zoo/blob/91fb0290f50896f38a0ab1e558b74b16bc009428/models/face_detection_yunet/face_detection_yunet_2022mar.onnx?raw=true'
- os.makedirs(dirname, exist_ok=True)
- cache_file = os.path.join(dirname, model_file_name)
- if not os.path.exists(cache_file):
- print(f"downloading face detection model from '{download_url}' to '{cache_file}'")
- response = requests.get(download_url)
- with open(cache_file, "wb") as f:
+def download_and_cache_models():
+ if not os.path.exists(model_file_path):
+ os.makedirs(model_dir_opencv, exist_ok=True)
+ print(f"downloading face detection model from '{model_url}' to '{model_file_path}'")
+ response = requests.get(model_url)
+ with open(model_file_path, "wb") as f:
f.write(response.content)
-
- if os.path.exists(cache_file):
- return cache_file
- return None
+ return model_file_path
class PointOfInterest:
diff --git a/modules/textual_inversion/preprocess.py b/modules/textual_inversion/preprocess.py
deleted file mode 100644
index dbd856bd..00000000
--- a/modules/textual_inversion/preprocess.py
+++ /dev/null
@@ -1,232 +0,0 @@
-import os
-from PIL import Image, ImageOps
-import math
-import tqdm
-
-from modules import paths, shared, images, deepbooru
-from modules.textual_inversion import autocrop
-
-
-def preprocess(id_task, process_src, process_dst, process_width, process_height, preprocess_txt_action, process_keep_original_size, process_flip, process_split, process_caption, process_caption_deepbooru=False, split_threshold=0.5, overlap_ratio=0.2, process_focal_crop=False, process_focal_crop_face_weight=0.9, process_focal_crop_entropy_weight=0.15, process_focal_crop_edges_weight=0.5, process_focal_crop_debug=False, process_multicrop=None, process_multicrop_mindim=None, process_multicrop_maxdim=None, process_multicrop_minarea=None, process_multicrop_maxarea=None, process_multicrop_objective=None, process_multicrop_threshold=None):
- try:
- if process_caption:
- shared.interrogator.load()
-
- if process_caption_deepbooru:
- deepbooru.model.start()
-
- preprocess_work(process_src, process_dst, process_width, process_height, preprocess_txt_action, process_keep_original_size, process_flip, process_split, process_caption, process_caption_deepbooru, split_threshold, overlap_ratio, process_focal_crop, process_focal_crop_face_weight, process_focal_crop_entropy_weight, process_focal_crop_edges_weight, process_focal_crop_debug, process_multicrop, process_multicrop_mindim, process_multicrop_maxdim, process_multicrop_minarea, process_multicrop_maxarea, process_multicrop_objective, process_multicrop_threshold)
-
- finally:
-
- if process_caption:
- shared.interrogator.send_blip_to_ram()
-
- if process_caption_deepbooru:
- deepbooru.model.stop()
-
-
-def listfiles(dirname):
- return os.listdir(dirname)
-
-
-class PreprocessParams:
- src = None
- dstdir = None
- subindex = 0
- flip = False
- process_caption = False
- process_caption_deepbooru = False
- preprocess_txt_action = None
-
-
-def save_pic_with_caption(image, index, params: PreprocessParams, existing_caption=None):
- caption = ""
-
- if params.process_caption:
- caption += shared.interrogator.generate_caption(image)
-
- if params.process_caption_deepbooru:
- if caption:
- caption += ", "
- caption += deepbooru.model.tag_multi(image)
-
- filename_part = params.src
- filename_part = os.path.splitext(filename_part)[0]
- filename_part = os.path.basename(filename_part)
-
- basename = f"{index:05}-{params.subindex}-{filename_part}"
- image.save(os.path.join(params.dstdir, f"{basename}.png"))
-
- if params.preprocess_txt_action == 'prepend' and existing_caption:
- caption = f"{existing_caption} {caption}"
- elif params.preprocess_txt_action == 'append' and existing_caption:
- caption = f"{caption} {existing_caption}"
- elif params.preprocess_txt_action == 'copy' and existing_caption:
- caption = existing_caption
-
- caption = caption.strip()
-
- if caption:
- with open(os.path.join(params.dstdir, f"{basename}.txt"), "w", encoding="utf8") as file:
- file.write(caption)
-
- params.subindex += 1
-
-
-def save_pic(image, index, params, existing_caption=None):
- save_pic_with_caption(image, index, params, existing_caption=existing_caption)
-
- if params.flip:
- save_pic_with_caption(ImageOps.mirror(image), index, params, existing_caption=existing_caption)
-
-
-def split_pic(image, inverse_xy, width, height, overlap_ratio):
- if inverse_xy:
- from_w, from_h = image.height, image.width
- to_w, to_h = height, width
- else:
- from_w, from_h = image.width, image.height
- to_w, to_h = width, height
- h = from_h * to_w // from_w
- if inverse_xy:
- image = image.resize((h, to_w))
- else:
- image = image.resize((to_w, h))
-
- split_count = math.ceil((h - to_h * overlap_ratio) / (to_h * (1.0 - overlap_ratio)))
- y_step = (h - to_h) / (split_count - 1)
- for i in range(split_count):
- y = int(y_step * i)
- if inverse_xy:
- splitted = image.crop((y, 0, y + to_h, to_w))
- else:
- splitted = image.crop((0, y, to_w, y + to_h))
- yield splitted
-
-# not using torchvision.transforms.CenterCrop because it doesn't allow float regions
-def center_crop(image: Image, w: int, h: int):
- iw, ih = image.size
- if ih / h < iw / w:
- sw = w * ih / h
- box = (iw - sw) / 2, 0, iw - (iw - sw) / 2, ih
- else:
- sh = h * iw / w
- box = 0, (ih - sh) / 2, iw, ih - (ih - sh) / 2
- return image.resize((w, h), Image.Resampling.LANCZOS, box)
-
-
-def multicrop_pic(image: Image, mindim, maxdim, minarea, maxarea, objective, threshold):
- iw, ih = image.size
- err = lambda w, h: 1-(lambda x: x if x < 1 else 1/x)(iw/ih/(w/h))
- wh = max(((w, h) for w in range(mindim, maxdim+1, 64) for h in range(mindim, maxdim+1, 64)
- if minarea <= w * h <= maxarea and err(w, h) <= threshold),
- key= lambda wh: (wh[0]*wh[1], -err(*wh))[::1 if objective=='Maximize area' else -1],
- default=None
- )
- return wh and center_crop(image, *wh)
-
-
-def preprocess_work(process_src, process_dst, process_width, process_height, preprocess_txt_action, process_keep_original_size, process_flip, process_split, process_caption, process_caption_deepbooru=False, split_threshold=0.5, overlap_ratio=0.2, process_focal_crop=False, process_focal_crop_face_weight=0.9, process_focal_crop_entropy_weight=0.3, process_focal_crop_edges_weight=0.5, process_focal_crop_debug=False, process_multicrop=None, process_multicrop_mindim=None, process_multicrop_maxdim=None, process_multicrop_minarea=None, process_multicrop_maxarea=None, process_multicrop_objective=None, process_multicrop_threshold=None):
- width = process_width
- height = process_height
- src = os.path.abspath(process_src)
- dst = os.path.abspath(process_dst)
- split_threshold = max(0.0, min(1.0, split_threshold))
- overlap_ratio = max(0.0, min(0.9, overlap_ratio))
-
- assert src != dst, 'same directory specified as source and destination'
-
- os.makedirs(dst, exist_ok=True)
-
- files = listfiles(src)
-
- shared.state.job = "preprocess"
- shared.state.textinfo = "Preprocessing..."
- shared.state.job_count = len(files)
-
- params = PreprocessParams()
- params.dstdir = dst
- params.flip = process_flip
- params.process_caption = process_caption
- params.process_caption_deepbooru = process_caption_deepbooru
- params.preprocess_txt_action = preprocess_txt_action
-
- pbar = tqdm.tqdm(files)
- for index, imagefile in enumerate(pbar):
- params.subindex = 0
- filename = os.path.join(src, imagefile)
- try:
- img = Image.open(filename)
- img = ImageOps.exif_transpose(img)
- img = img.convert("RGB")
- except Exception:
- continue
-
- description = f"Preprocessing [Image {index}/{len(files)}]"
- pbar.set_description(description)
- shared.state.textinfo = description
-
- params.src = filename
-
- existing_caption = None
- existing_caption_filename = f"{os.path.splitext(filename)[0]}.txt"
- if os.path.exists(existing_caption_filename):
- with open(existing_caption_filename, 'r', encoding="utf8") as file:
- existing_caption = file.read()
-
- if shared.state.interrupted:
- break
-
- if img.height > img.width:
- ratio = (img.width * height) / (img.height * width)
- inverse_xy = False
- else:
- ratio = (img.height * width) / (img.width * height)
- inverse_xy = True
-
- process_default_resize = True
-
- if process_split and ratio < 1.0 and ratio <= split_threshold:
- for splitted in split_pic(img, inverse_xy, width, height, overlap_ratio):
- save_pic(splitted, index, params, existing_caption=existing_caption)
- process_default_resize = False
-
- if process_focal_crop and img.height != img.width:
-
- dnn_model_path = None
- try:
- dnn_model_path = autocrop.download_and_cache_models(os.path.join(paths.models_path, "opencv"))
- except Exception as e:
- print("Unable to load face detection model for auto crop selection. Falling back to lower quality haar method.", e)
-
- autocrop_settings = autocrop.Settings(
- crop_width = width,
- crop_height = height,
- face_points_weight = process_focal_crop_face_weight,
- entropy_points_weight = process_focal_crop_entropy_weight,
- corner_points_weight = process_focal_crop_edges_weight,
- annotate_image = process_focal_crop_debug,
- dnn_model_path = dnn_model_path,
- )
- for focal in autocrop.crop_image(img, autocrop_settings):
- save_pic(focal, index, params, existing_caption=existing_caption)
- process_default_resize = False
-
- if process_multicrop:
- cropped = multicrop_pic(img, process_multicrop_mindim, process_multicrop_maxdim, process_multicrop_minarea, process_multicrop_maxarea, process_multicrop_objective, process_multicrop_threshold)
- if cropped is not None:
- save_pic(cropped, index, params, existing_caption=existing_caption)
- else:
- print(f"skipped {img.width}x{img.height} image {filename} (can't find suitable size within error threshold)")
- process_default_resize = False
-
- if process_keep_original_size:
- save_pic(img, index, params, existing_caption=existing_caption)
- process_default_resize = False
-
- if process_default_resize:
- img = images.resize_image(1, img, width, height)
- save_pic(img, index, params, existing_caption=existing_caption)
-
- shared.state.nextjob()
diff --git a/modules/textual_inversion/ui.py b/modules/textual_inversion/ui.py
index 35c4feef..f149ad1f 100644
--- a/modules/textual_inversion/ui.py
+++ b/modules/textual_inversion/ui.py
@@ -3,7 +3,6 @@ import html
import gradio as gr
import modules.textual_inversion.textual_inversion
-import modules.textual_inversion.preprocess
from modules import sd_hijack, shared
@@ -15,12 +14,6 @@ def create_embedding(name, initialization_text, nvpt, overwrite_old):
return gr.Dropdown.update(choices=sorted(sd_hijack.model_hijack.embedding_db.word_embeddings.keys())), f"Created: {filename}", ""
-def preprocess(*args):
- modules.textual_inversion.preprocess.preprocess(*args)
-
- return f"Preprocessing {'interrupted' if shared.state.interrupted else 'finished'}.", ""
-
-
def train_embedding(*args):
assert not shared.cmd_opts.lowvram, 'Training models with lowvram not possible'
diff --git a/modules/ui.py b/modules/ui.py
index accdb457..d80486dd 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -4,6 +4,7 @@ import os
import sys
from functools import reduce
import warnings
+from contextlib import ExitStack
import gradio as gr
import gradio.utils
@@ -270,7 +271,11 @@ def create_ui():
extra_tabs.__enter__()
with gr.Tab("Generation", id="txt2img_generation") as txt2img_generation_tab, ResizeHandleRow(equal_height=False):
- with gr.Column(variant='compact', elem_id="txt2img_settings"):
+ with ExitStack() as stack:
+ if shared.opts.txt2img_settings_accordion:
+ stack.enter_context(gr.Accordion("Open for Settings", open=False))
+ stack.enter_context(gr.Column(variant='compact', elem_id="txt2img_settings"))
+
scripts.scripts_txt2img.prepare_ui()
for category in ordered_ui_categories():
@@ -489,7 +494,11 @@ def create_ui():
extra_tabs.__enter__()
with gr.Tab("Generation", id="img2img_generation") as img2img_generation_tab, ResizeHandleRow(equal_height=False):
- with gr.Column(variant='compact', elem_id="img2img_settings"):
+ with ExitStack() as stack:
+ if shared.opts.img2img_settings_accordion:
+ stack.enter_context(gr.Accordion("Open for Settings", open=False))
+ stack.enter_context(gr.Column(variant='compact', elem_id="img2img_settings"))
+
copy_image_buttons = []
copy_image_destinations = {}
@@ -626,12 +635,6 @@ def create_ui():
scale_by.release(**on_change_args)
button_update_resize_to.click(**on_change_args)
- # the code below is meant to update the resolution label after the image in the image selection UI has changed.
- # as it is now the event keeps firing continuously for inpaint edits, which ruins the page with constant requests.
- # I assume this must be a gradio bug and for now we'll just do it for non-inpaint inputs.
- for component in [init_img, sketch]:
- component.change(fn=lambda: None, _js="updateImg2imgResizeToTextAfterChangingImage", inputs=[], outputs=[], show_progress=False)
-
tab_scale_to.select(fn=lambda: 0, inputs=[], outputs=[selected_scale_tab])
tab_scale_by.select(fn=lambda: 1, inputs=[], outputs=[selected_scale_tab])
@@ -692,6 +695,12 @@ def create_ui():
if category not in {"accordions"}:
scripts.scripts_img2img.setup_ui_for_section(category)
+ # the code below is meant to update the resolution label after the image in the image selection UI has changed.
+ # as it is now the event keeps firing continuously for inpaint edits, which ruins the page with constant requests.
+ # I assume this must be a gradio bug and for now we'll just do it for non-inpaint inputs.
+ for component in [init_img, sketch]:
+ component.change(fn=lambda: None, _js="updateImg2imgResizeToTextAfterChangingImage", inputs=[], outputs=[], show_progress=False)
+
def select_img2img_tab(tab):
return gr.update(visible=tab in [2, 3, 4]), gr.update(visible=tab == 3),
@@ -903,71 +912,6 @@ def create_ui():
with gr.Column():
create_hypernetwork = gr.Button(value="Create hypernetwork", variant='primary', elem_id="train_create_hypernetwork")
- with gr.Tab(label="Preprocess images", id="preprocess_images"):
- process_src = gr.Textbox(label='Source directory', elem_id="train_process_src")
- process_dst = gr.Textbox(label='Destination directory', elem_id="train_process_dst")
- process_width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="train_process_width")
- process_height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="train_process_height")
- preprocess_txt_action = gr.Dropdown(label='Existing Caption txt Action', value="ignore", choices=["ignore", "copy", "prepend", "append"], elem_id="train_preprocess_txt_action")
-
- with gr.Row():
- process_keep_original_size = gr.Checkbox(label='Keep original size', elem_id="train_process_keep_original_size")
- process_flip = gr.Checkbox(label='Create flipped copies', elem_id="train_process_flip")
- process_split = gr.Checkbox(label='Split oversized images', elem_id="train_process_split")
- process_focal_crop = gr.Checkbox(label='Auto focal point crop', elem_id="train_process_focal_crop")
- process_multicrop = gr.Checkbox(label='Auto-sized crop', elem_id="train_process_multicrop")
- process_caption = gr.Checkbox(label='Use BLIP for caption', elem_id="train_process_caption")
- process_caption_deepbooru = gr.Checkbox(label='Use deepbooru for caption', visible=True, elem_id="train_process_caption_deepbooru")
-
- with gr.Row(visible=False) as process_split_extra_row:
- process_split_threshold = gr.Slider(label='Split image threshold', value=0.5, minimum=0.0, maximum=1.0, step=0.05, elem_id="train_process_split_threshold")
- process_overlap_ratio = gr.Slider(label='Split image overlap ratio', value=0.2, minimum=0.0, maximum=0.9, step=0.05, elem_id="train_process_overlap_ratio")
-
- with gr.Row(visible=False) as process_focal_crop_row:
- process_focal_crop_face_weight = gr.Slider(label='Focal point face weight', value=0.9, minimum=0.0, maximum=1.0, step=0.05, elem_id="train_process_focal_crop_face_weight")
- process_focal_crop_entropy_weight = gr.Slider(label='Focal point entropy weight', value=0.15, minimum=0.0, maximum=1.0, step=0.05, elem_id="train_process_focal_crop_entropy_weight")
- process_focal_crop_edges_weight = gr.Slider(label='Focal point edges weight', value=0.5, minimum=0.0, maximum=1.0, step=0.05, elem_id="train_process_focal_crop_edges_weight")
- process_focal_crop_debug = gr.Checkbox(label='Create debug image', elem_id="train_process_focal_crop_debug")
-
- with gr.Column(visible=False) as process_multicrop_col:
- gr.Markdown('Each image is center-cropped with an automatically chosen width and height.')
- with gr.Row():
- process_multicrop_mindim = gr.Slider(minimum=64, maximum=2048, step=8, label="Dimension lower bound", value=384, elem_id="train_process_multicrop_mindim")
- process_multicrop_maxdim = gr.Slider(minimum=64, maximum=2048, step=8, label="Dimension upper bound", value=768, elem_id="train_process_multicrop_maxdim")
- with gr.Row():
- process_multicrop_minarea = gr.Slider(minimum=64*64, maximum=2048*2048, step=1, label="Area lower bound", value=64*64, elem_id="train_process_multicrop_minarea")
- process_multicrop_maxarea = gr.Slider(minimum=64*64, maximum=2048*2048, step=1, label="Area upper bound", value=640*640, elem_id="train_process_multicrop_maxarea")
- with gr.Row():
- process_multicrop_objective = gr.Radio(["Maximize area", "Minimize error"], value="Maximize area", label="Resizing objective", elem_id="train_process_multicrop_objective")
- process_multicrop_threshold = gr.Slider(minimum=0, maximum=1, step=0.01, label="Error threshold", value=0.1, elem_id="train_process_multicrop_threshold")
-
- with gr.Row():
- with gr.Column(scale=3):
- gr.HTML(value="")
-
- with gr.Column():
- with gr.Row():
- interrupt_preprocessing = gr.Button("Interrupt", elem_id="train_interrupt_preprocessing")
- run_preprocess = gr.Button(value="Preprocess", variant='primary', elem_id="train_run_preprocess")
-
- process_split.change(
- fn=lambda show: gr_show(show),
- inputs=[process_split],
- outputs=[process_split_extra_row],
- )
-
- process_focal_crop.change(
- fn=lambda show: gr_show(show),
- inputs=[process_focal_crop],
- outputs=[process_focal_crop_row],
- )
-
- process_multicrop.change(
- fn=lambda show: gr_show(show),
- inputs=[process_multicrop],
- outputs=[process_multicrop_col],
- )
-
def get_textual_inversion_template_names():
return sorted(textual_inversion.textual_inversion_templates)
@@ -1068,42 +1012,6 @@ def create_ui():
]
)
- run_preprocess.click(
- fn=wrap_gradio_gpu_call(textual_inversion_ui.preprocess, extra_outputs=[gr.update()]),
- _js="start_training_textual_inversion",
- inputs=[
- dummy_component,
- process_src,
- process_dst,
- process_width,
- process_height,
- preprocess_txt_action,
- process_keep_original_size,
- process_flip,
- process_split,
- process_caption,
- process_caption_deepbooru,
- process_split_threshold,
- process_overlap_ratio,
- process_focal_crop,
- process_focal_crop_face_weight,
- process_focal_crop_entropy_weight,
- process_focal_crop_edges_weight,
- process_focal_crop_debug,
- process_multicrop,
- process_multicrop_mindim,
- process_multicrop_maxdim,
- process_multicrop_minarea,
- process_multicrop_maxarea,
- process_multicrop_objective,
- process_multicrop_threshold,
- ],
- outputs=[
- ti_output,
- ti_outcome,
- ],
- )
-
train_embedding.click(
fn=wrap_gradio_gpu_call(textual_inversion_ui.train_embedding, extra_outputs=[gr.update()]),
_js="start_training_textual_inversion",
@@ -1177,12 +1085,6 @@ def create_ui():
outputs=[],
)
- interrupt_preprocessing.click(
- fn=lambda: shared.state.interrupt(),
- inputs=[],
- outputs=[],
- )
-
loadsave = ui_loadsave.UiLoadsave(cmd_opts.ui_config_file)
settings = ui_settings.UiSettings()
@@ -1299,7 +1201,7 @@ def setup_ui_api(app):
from fastapi.responses import PlainTextResponse
text = sysinfo.get()
- filename = f"sysinfo-{datetime.datetime.utcnow().strftime('%Y-%m-%d-%H-%M')}.txt"
+ filename = f"sysinfo-{datetime.datetime.utcnow().strftime('%Y-%m-%d-%H-%M')}.json"
return PlainTextResponse(text, headers={'Content-Disposition': f'{"attachment" if attachment else "inline"}; filename="{filename}"'})
diff --git a/modules/ui_extensions.py b/modules/ui_extensions.py
index c0a73b57..dc1e34c8 100644
--- a/modules/ui_extensions.py
+++ b/modules/ui_extensions.py
@@ -65,7 +65,7 @@ def save_config_state(name):
filename = os.path.join(config_states_dir, f"{timestamp}_{name}.json")
print(f"Saving backup of webui/extension state to {filename}.")
with open(filename, "w", encoding="utf-8") as f:
- json.dump(current_config_state, f, indent=4)
+ json.dump(current_config_state, f, indent=4, ensure_ascii=False)
config_states.list_config_states()
new_value = next(iter(config_states.all_config_states.keys()), "Current")
new_choices = ["Current"] + list(config_states.all_config_states.keys())
@@ -335,6 +335,11 @@ def normalize_git_url(url):
return url
+def get_extension_dirname_from_url(url):
+ *parts, last_part = url.split('/')
+ return normalize_git_url(last_part)
+
+
def install_extension_from_url(dirname, url, branch_name=None):
check_access()
@@ -346,10 +351,7 @@ def install_extension_from_url(dirname, url, branch_name=None):
assert url, 'No URL specified'
if dirname is None or dirname == "":
- *parts, last_part = url.split('/')
- last_part = normalize_git_url(last_part)
-
- dirname = last_part
+ dirname = get_extension_dirname_from_url(url)
target_dir = os.path.join(extensions.extensions_dir, dirname)
assert not os.path.exists(target_dir), f'Extension directory already exists: {target_dir}'
@@ -449,7 +451,8 @@ def get_date(info: dict, key):
def refresh_available_extensions_from_data(hide_tags, sort_column, filter_text=""):
extlist = available_extensions["extensions"]
- installed_extension_urls = {normalize_git_url(extension.remote): extension.name for extension in extensions.extensions}
+ installed_extensions = {extension.name for extension in extensions.extensions}
+ installed_extension_urls = {normalize_git_url(extension.remote) for extension in extensions.extensions if extension.remote is not None}
tags = available_extensions.get("tags", {})
tags_to_hide = set(hide_tags)
@@ -482,7 +485,7 @@ def refresh_available_extensions_from_data(hide_tags, sort_column, filter_text="
if url is None:
continue
- existing = installed_extension_urls.get(normalize_git_url(url), None)
+ existing = get_extension_dirname_from_url(url) in installed_extensions or normalize_git_url(url) in installed_extension_urls
extension_tags = extension_tags + ["installed"] if existing else extension_tags
if any(x for x in extension_tags if x in tags_to_hide):
diff --git a/modules/ui_extra_networks.py b/modules/ui_extra_networks.py
index 7907cd63..fe5d3ba3 100644
--- a/modules/ui_extra_networks.py
+++ b/modules/ui_extra_networks.py
@@ -151,8 +151,13 @@ class ExtraNetworksPage:
continue
subdir = os.path.abspath(x)[len(parentdir):].replace("\\", "/")
- while subdir.startswith("/"):
- subdir = subdir[1:]
+
+ if shared.opts.extra_networks_dir_button_function:
+ if not subdir.startswith("/"):
+ subdir = "/" + subdir
+ else:
+ while subdir.startswith("/"):
+ subdir = subdir[1:]
is_empty = len(os.listdir(x)) == 0
if not is_empty and not subdir.endswith("/"):
@@ -279,6 +284,7 @@ class ExtraNetworksPage:
"date_created": int(stat.st_ctime or 0),
"date_modified": int(stat.st_mtime or 0),
"name": pth.name.lower(),
+ "path": str(pth.parent).lower(),
}
def find_preview(self, path):
@@ -369,6 +375,9 @@ def create_ui(interface: gr.Blocks, unrelated_tabs, tabname):
for page in ui.stored_extra_pages:
with gr.Tab(page.title, elem_id=f"{tabname}_{page.id_page}", elem_classes=["extra-page"]) as tab:
+ with gr.Column(elem_id=f"{tabname}_{page.id_page}_prompts", elem_classes=["extra-page-prompts"]):
+ pass
+
elem_id = f"{tabname}_{page.id_page}_cards_html"
page_elem = gr.HTML('Loading...', elem_id=elem_id)
ui.pages.append(page_elem)
@@ -382,7 +391,7 @@ def create_ui(interface: gr.Blocks, unrelated_tabs, tabname):
related_tabs.append(tab)
edit_search = gr.Textbox('', show_label=False, elem_id=tabname+"_extra_search", elem_classes="search", placeholder="Search...", visible=False, interactive=True)
- dropdown_sort = gr.Dropdown(choices=['Name', 'Date Created', 'Date Modified', ], value=shared.opts.extra_networks_card_order_field, elem_id=tabname+"_extra_sort", elem_classes="sort", multiselect=False, visible=False, show_label=False, interactive=True, label=tabname+"_extra_sort_order")
+ dropdown_sort = gr.Dropdown(choices=['Path', 'Name', 'Date Created', 'Date Modified', ], value=shared.opts.extra_networks_card_order_field, elem_id=tabname+"_extra_sort", elem_classes="sort", multiselect=False, visible=False, show_label=False, interactive=True, label=tabname+"_extra_sort_order")
button_sortorder = ToolButton(switch_values_symbol, elem_id=tabname+"_extra_sortorder", elem_classes=["sortorder"] + ([] if shared.opts.extra_networks_card_order == "Ascending" else ["sortReverse"]), visible=False, tooltip="Invert sort order")
button_refresh = gr.Button('Refresh', elem_id=tabname+"_extra_refresh", visible=False)
checkbox_show_dirs = gr.Checkbox(True, label='Show dirs', elem_id=tabname+"_extra_show_dirs", elem_classes="show-dirs", visible=False)
@@ -399,7 +408,7 @@ def create_ui(interface: gr.Blocks, unrelated_tabs, tabname):
allow_prompt = "true" if page.allow_prompt else "false"
allow_negative_prompt = "true" if page.allow_negative_prompt else "false"
- jscode = 'extraNetworksTabSelected("' + tabname + '", "' + f"{tabname}_{page.id_page}" + '", ' + allow_prompt + ', ' + allow_negative_prompt + ');'
+ jscode = 'extraNetworksTabSelected("' + tabname + '", "' + f"{tabname}_{page.id_page}_prompts" + '", ' + allow_prompt + ', ' + allow_negative_prompt + ');'
tab.select(fn=lambda: [gr.update(visible=True) for _ in tab_controls], _js='function(){ ' + jscode + ' }', inputs=[], outputs=tab_controls, show_progress=False)
diff --git a/modules/ui_extra_networks_checkpoints.py b/modules/ui_extra_networks_checkpoints.py
index 2fc0ed43..1693e71f 100644
--- a/modules/ui_extra_networks_checkpoints.py
+++ b/modules/ui_extra_networks_checkpoints.py
@@ -17,6 +17,9 @@ class ExtraNetworksPageCheckpoints(ui_extra_networks.ExtraNetworksPage):
def create_item(self, name, index=None, enable_filter=True):
checkpoint: sd_models.CheckpointInfo = sd_models.checkpoint_aliases.get(name)
+ if checkpoint is None:
+ return
+
path, ext = os.path.splitext(checkpoint.filename)
return {
"name": checkpoint.name_for_extra,
@@ -32,9 +35,12 @@ class ExtraNetworksPageCheckpoints(ui_extra_networks.ExtraNetworksPage):
}
def list_items(self):
+ # instantiate a list to protect against concurrent modification
names = list(sd_models.checkpoints_list)
for index, name in enumerate(names):
- yield self.create_item(name, index)
+ item = self.create_item(name, index)
+ if item is not None:
+ yield item
def allowed_directories_for_previews(self):
return [v for v in [shared.cmd_opts.ckpt_dir, sd_models.model_path] if v is not None]
diff --git a/modules/ui_extra_networks_hypernets.py b/modules/ui_extra_networks_hypernets.py
index 4cedf085..c96c4fa3 100644
--- a/modules/ui_extra_networks_hypernets.py
+++ b/modules/ui_extra_networks_hypernets.py
@@ -13,7 +13,10 @@ class ExtraNetworksPageHypernetworks(ui_extra_networks.ExtraNetworksPage):
shared.reload_hypernetworks()
def create_item(self, name, index=None, enable_filter=True):
- full_path = shared.hypernetworks[name]
+ full_path = shared.hypernetworks.get(name)
+ if full_path is None:
+ return
+
path, ext = os.path.splitext(full_path)
sha256 = sha256_from_cache(full_path, f'hypernet/{name}')
shorthash = sha256[0:10] if sha256 else None
@@ -31,8 +34,12 @@ class ExtraNetworksPageHypernetworks(ui_extra_networks.ExtraNetworksPage):
}
def list_items(self):
- for index, name in enumerate(shared.hypernetworks):
- yield self.create_item(name, index)
+ # instantiate a list to protect against concurrent modification
+ names = list(shared.hypernetworks)
+ for index, name in enumerate(names):
+ item = self.create_item(name, index)
+ if item is not None:
+ yield item
def allowed_directories_for_previews(self):
return [shared.cmd_opts.hypernetwork_dir]
diff --git a/modules/ui_extra_networks_textual_inversion.py b/modules/ui_extra_networks_textual_inversion.py
index 55ef0ea7..1b334fda 100644
--- a/modules/ui_extra_networks_textual_inversion.py
+++ b/modules/ui_extra_networks_textual_inversion.py
@@ -14,6 +14,8 @@ class ExtraNetworksPageTextualInversion(ui_extra_networks.ExtraNetworksPage):
def create_item(self, name, index=None, enable_filter=True):
embedding = sd_hijack.model_hijack.embedding_db.word_embeddings.get(name)
+ if embedding is None:
+ return
path, ext = os.path.splitext(embedding.filename)
return {
@@ -29,8 +31,12 @@ class ExtraNetworksPageTextualInversion(ui_extra_networks.ExtraNetworksPage):
}
def list_items(self):
- for index, name in enumerate(sd_hijack.model_hijack.embedding_db.word_embeddings):
- yield self.create_item(name, index)
+ # instantiate a list to protect against concurrent modification
+ names = list(sd_hijack.model_hijack.embedding_db.word_embeddings)
+ for index, name in enumerate(names):
+ item = self.create_item(name, index)
+ if item is not None:
+ yield item
def allowed_directories_for_previews(self):
return list(sd_hijack.model_hijack.embedding_db.embedding_dirs)
diff --git a/modules/ui_extra_networks_user_metadata.py b/modules/ui_extra_networks_user_metadata.py
index bfec140c..36a807fc 100644
--- a/modules/ui_extra_networks_user_metadata.py
+++ b/modules/ui_extra_networks_user_metadata.py
@@ -134,7 +134,7 @@ class UserMetadataEditor:
basename, ext = os.path.splitext(filename)
with open(basename + '.json', "w", encoding="utf8") as file:
- json.dump(metadata, file, indent=4)
+ json.dump(metadata, file, indent=4, ensure_ascii=False)
def save_user_metadata(self, name, desc, notes):
user_metadata = self.get_user_metadata(name)
diff --git a/modules/ui_loadsave.py b/modules/ui_loadsave.py
index eb20ff25..7826786c 100644
--- a/modules/ui_loadsave.py
+++ b/modules/ui_loadsave.py
@@ -141,7 +141,7 @@ class UiLoadsave:
def write_to_file(self, current_ui_settings):
with open(self.filename, "w", encoding="utf8") as file:
- json.dump(current_ui_settings, file, indent=4)
+ json.dump(current_ui_settings, file, indent=4, ensure_ascii=False)
def dump_defaults(self):
"""saves default values to a file unless tjhe file is present and there was an error loading default values at start"""
diff --git a/modules/ui_postprocessing.py b/modules/ui_postprocessing.py
index 802e1ce7..13d888e4 100644
--- a/modules/ui_postprocessing.py
+++ b/modules/ui_postprocessing.py
@@ -1,9 +1,10 @@
import gradio as gr
-from modules import scripts, shared, ui_common, postprocessing, call_queue
+from modules import scripts, shared, ui_common, postprocessing, call_queue, ui_toprow
import modules.generation_parameters_copypaste as parameters_copypaste
def create_ui():
+ dummy_component = gr.Label(visible=False)
tab_index = gr.State(value=0)
with gr.Row(equal_height=False, variant='compact'):
@@ -20,11 +21,13 @@ def create_ui():
extras_batch_output_dir = gr.Textbox(label="Output directory", **shared.hide_dirs, placeholder="Leave blank to save images to the default path.", elem_id="extras_batch_output_dir")
show_extras_results = gr.Checkbox(label='Show result images', value=True, elem_id="extras_show_extras_results")
- submit = gr.Button('Generate', elem_id="extras_generate", variant='primary')
-
script_inputs = scripts.scripts_postproc.setup_ui()
with gr.Column():
+ toprow = ui_toprow.Toprow(is_compact=True, is_img2img=False, id_part="extras")
+ toprow.create_inline_toprow_image()
+ submit = toprow.submit
+
result_images, html_info_x, html_info, html_log = ui_common.create_output_panel("extras", shared.opts.outdir_extras_samples)
tab_single.select(fn=lambda: 0, inputs=[], outputs=[tab_index])
@@ -32,8 +35,10 @@ def create_ui():
tab_batch_dir.select(fn=lambda: 2, inputs=[], outputs=[tab_index])
submit.click(
- fn=call_queue.wrap_gradio_gpu_call(postprocessing.run_postprocessing, extra_outputs=[None, '']),
+ fn=call_queue.wrap_gradio_gpu_call(postprocessing.run_postprocessing_webui, extra_outputs=[None, '']),
+ _js="submit_extras",
inputs=[
+ dummy_component,
tab_index,
extras_image,
image_batch,
@@ -45,8 +50,9 @@ def create_ui():
outputs=[
result_images,
html_info_x,
- html_info,
- ]
+ html_log,
+ ],
+ show_progress=False,
)
parameters_copypaste.add_paste_fields("extras", extras_image, None)
diff --git a/modules/ui_prompt_styles.py b/modules/ui_prompt_styles.py
index 3bcf092f..0d74c23f 100644
--- a/modules/ui_prompt_styles.py
+++ b/modules/ui_prompt_styles.py
@@ -68,10 +68,10 @@ class UiPromptStyles:
self.copy = ui_components.ToolButton(value=styles_copy_symbol, elem_id=f"{tabname}_style_copy", tooltip="Copy main UI prompt to style.")
with gr.Row():
- self.prompt = gr.Textbox(label="Prompt", show_label=True, elem_id=f"{tabname}_edit_style_prompt", lines=3)
+ self.prompt = gr.Textbox(label="Prompt", show_label=True, elem_id=f"{tabname}_edit_style_prompt", lines=3, elem_classes=["prompt"])
with gr.Row():
- self.neg_prompt = gr.Textbox(label="Negative prompt", show_label=True, elem_id=f"{tabname}_edit_style_neg_prompt", lines=3)
+ self.neg_prompt = gr.Textbox(label="Negative prompt", show_label=True, elem_id=f"{tabname}_edit_style_neg_prompt", lines=3, elem_classes=["prompt"])
with gr.Row():
self.save = gr.Button('Save', variant='primary', elem_id=f'{tabname}_edit_style_save', visible=False)
diff --git a/modules/ui_toprow.py b/modules/ui_toprow.py
index 985b5a2d..88838f97 100644
--- a/modules/ui_toprow.py
+++ b/modules/ui_toprow.py
@@ -34,8 +34,10 @@ class Toprow:
submit_box = None
- def __init__(self, is_img2img, is_compact=False):
- id_part = "img2img" if is_img2img else "txt2img"
+ def __init__(self, is_img2img, is_compact=False, id_part=None):
+ if id_part is None:
+ id_part = "img2img" if is_img2img else "txt2img"
+
self.id_part = id_part
self.is_img2img = is_img2img
self.is_compact = is_compact
diff --git a/modules/upscaler.py b/modules/upscaler.py
index e682bbaa..b256e085 100644
--- a/modules/upscaler.py
+++ b/modules/upscaler.py
@@ -57,6 +57,9 @@ class Upscaler:
dest_h = int((img.height * scale) // 8 * 8)
for _ in range(3):
+ if img.width >= dest_w and img.height >= dest_h:
+ break
+
shape = (img.width, img.height)
img = self.do_upscale(img, selected_model)
@@ -64,9 +67,6 @@ class Upscaler:
if shape == (img.width, img.height):
break
- if img.width >= dest_w and img.height >= dest_h:
- break
-
if img.width != dest_w or img.height != dest_h:
img = img.resize((int(dest_w), int(dest_h)), resample=LANCZOS)
diff --git a/modules/xpu_specific.py b/modules/xpu_specific.py
new file mode 100644
index 00000000..d8da94a0
--- /dev/null
+++ b/modules/xpu_specific.py
@@ -0,0 +1,59 @@
+from modules import shared
+from modules.sd_hijack_utils import CondFunc
+
+has_ipex = False
+try:
+ import torch
+ import intel_extension_for_pytorch as ipex # noqa: F401
+ has_ipex = True
+except Exception:
+ pass
+
+
+def check_for_xpu():
+ return has_ipex and hasattr(torch, 'xpu') and torch.xpu.is_available()
+
+
+def get_xpu_device_string():
+ if shared.cmd_opts.device_id is not None:
+ return f"xpu:{shared.cmd_opts.device_id}"
+ return "xpu"
+
+
+def torch_xpu_gc():
+ with torch.xpu.device(get_xpu_device_string()):
+ torch.xpu.empty_cache()
+
+
+has_xpu = check_for_xpu()
+
+if has_xpu:
+ # W/A for https://github.com/intel/intel-extension-for-pytorch/issues/452: torch.Generator API doesn't support XPU device
+ CondFunc('torch.Generator',
+ lambda orig_func, device=None: torch.xpu.Generator(device),
+ lambda orig_func, device=None: device is not None and device.type == "xpu")
+
+ # W/A for some OPs that could not handle different input dtypes
+ CondFunc('torch.nn.functional.layer_norm',
+ lambda orig_func, input, normalized_shape=None, weight=None, *args, **kwargs:
+ orig_func(input.to(weight.data.dtype), normalized_shape, weight, *args, **kwargs),
+ lambda orig_func, input, normalized_shape=None, weight=None, *args, **kwargs:
+ weight is not None and input.dtype != weight.data.dtype)
+ CondFunc('torch.nn.modules.GroupNorm.forward',
+ lambda orig_func, self, input: orig_func(self, input.to(self.weight.data.dtype)),
+ lambda orig_func, self, input: input.dtype != self.weight.data.dtype)
+ CondFunc('torch.nn.modules.linear.Linear.forward',
+ lambda orig_func, self, input: orig_func(self, input.to(self.weight.data.dtype)),
+ lambda orig_func, self, input: input.dtype != self.weight.data.dtype)
+ CondFunc('torch.nn.modules.conv.Conv2d.forward',
+ lambda orig_func, self, input: orig_func(self, input.to(self.weight.data.dtype)),
+ lambda orig_func, self, input: input.dtype != self.weight.data.dtype)
+ CondFunc('torch.bmm',
+ lambda orig_func, input, mat2, out=None: orig_func(input.to(mat2.dtype), mat2, out=out),
+ lambda orig_func, input, mat2, out=None: input.dtype != mat2.dtype)
+ CondFunc('torch.cat',
+ lambda orig_func, tensors, dim=0, out=None: orig_func([t.to(tensors[0].dtype) for t in tensors], dim=dim, out=out),
+ lambda orig_func, tensors, dim=0, out=None: not all(t.dtype == tensors[0].dtype for t in tensors))
+ CondFunc('torch.nn.functional.scaled_dot_product_attention',
+ lambda orig_func, query, key, value, attn_mask=None, dropout_p=0.0, is_causal=False: orig_func(query, key.to(query.dtype), value.to(query.dtype), attn_mask, dropout_p, is_causal),
+ lambda orig_func, query, key, value, attn_mask=None, dropout_p=0.0, is_causal=False: query.dtype != key.dtype or query.dtype != value.dtype)