aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--README.md3
-rw-r--r--launch.py8
-rw-r--r--modules/api/api.py9
-rw-r--r--modules/cmd_args.py2
-rw-r--r--modules/img2img.py32
-rw-r--r--modules/launch_utils.py31
-rw-r--r--modules/sd_disable_initialization.py106
-rw-r--r--modules/sd_hijack.py8
-rw-r--r--modules/sd_hijack_clip.py2
-rw-r--r--modules/sd_models.py16
-rw-r--r--modules/sd_models_xl.py9
-rw-r--r--modules/sd_samplers_extra.py74
-rw-r--r--modules/sd_samplers_kdiffusion.py8
-rw-r--r--modules/textual_inversion/textual_inversion.py19
-rw-r--r--modules/timer.py23
-rw-r--r--webui.py4
16 files changed, 293 insertions, 61 deletions
diff --git a/README.md b/README.md
index b796d150..2fd6e425 100644
--- a/README.md
+++ b/README.md
@@ -88,7 +88,7 @@ A browser interface based on Gradio library for Stable Diffusion.
- [Alt-Diffusion](https://arxiv.org/abs/2211.06679) support - see [wiki](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Features#alt-diffusion) for instructions
- Now without any bad letters!
- Load checkpoints in safetensors format
-- Eased resolution restriction: generated image's domension must be a multiple of 8 rather than 64
+- Eased resolution restriction: generated image's dimension must be a multiple of 8 rather than 64
- Now with a license!
- Reorder elements in the UI from settings screen
@@ -169,5 +169,6 @@ Licenses for borrowed code can be found in `Settings -> Licenses` screen, and al
- UniPC sampler - Wenliang Zhao - https://github.com/wl-zhao/UniPC
- TAESD - Ollin Boer Bohan - https://github.com/madebyollin/taesd
- LyCORIS - KohakuBlueleaf
+- Restart sampling - lambertae - https://github.com/Newbeeer/diffusion_restart_sampling
- Initial Gradio script - posted on 4chan by an Anonymous user. Thank you Anonymous user.
- (You)
diff --git a/launch.py b/launch.py
index 1dbc4c6e..e4c2ce99 100644
--- a/launch.py
+++ b/launch.py
@@ -1,6 +1,5 @@
from modules import launch_utils
-
args = launch_utils.args
python = launch_utils.python
git = launch_utils.git
@@ -26,8 +25,11 @@ start = launch_utils.start
def main():
- if not args.skip_prepare_environment:
- prepare_environment()
+ launch_utils.startup_timer.record("initial startup")
+
+ with launch_utils.startup_timer.subcategory("prepare environment"):
+ if not args.skip_prepare_environment:
+ prepare_environment()
if args.test_server:
configure_for_tests()
diff --git a/modules/api/api.py b/modules/api/api.py
index 606db179..908c4514 100644
--- a/modules/api/api.py
+++ b/modules/api/api.py
@@ -15,7 +15,7 @@ from fastapi.encoders import jsonable_encoder
from secrets import compare_digest
import modules.shared as shared
-from modules import sd_samplers, deepbooru, sd_hijack, images, scripts, ui, postprocessing, errors, restart
+from modules import sd_samplers, deepbooru, sd_hijack, images, scripts, ui, postprocessing, errors, restart, shared_items
from modules.api import models
from modules.shared import opts
from modules.processing import StableDiffusionProcessingTxt2Img, StableDiffusionProcessingImg2Img, process_images
@@ -197,6 +197,7 @@ class Api:
self.add_api_route("/sdapi/v1/prompt-styles", self.get_prompt_styles, methods=["GET"], response_model=List[models.PromptStyleItem])
self.add_api_route("/sdapi/v1/embeddings", self.get_embeddings, methods=["GET"], response_model=models.EmbeddingsResponse)
self.add_api_route("/sdapi/v1/refresh-checkpoints", self.refresh_checkpoints, methods=["POST"])
+ self.add_api_route("/sdapi/v1/refresh-vae", self.refresh_vae, methods=["POST"])
self.add_api_route("/sdapi/v1/create/embedding", self.create_embedding, methods=["POST"], response_model=models.CreateResponse)
self.add_api_route("/sdapi/v1/create/hypernetwork", self.create_hypernetwork, methods=["POST"], response_model=models.CreateResponse)
self.add_api_route("/sdapi/v1/preprocess", self.preprocess, methods=["POST"], response_model=models.PreprocessResponse)
@@ -343,6 +344,7 @@ class Api:
processed = process_images(p)
finally:
shared.state.end()
+ shared.total_tqdm.clear()
b64images = list(map(encode_pil_to_base64, processed.images)) if send_images else []
@@ -402,6 +404,7 @@ class Api:
processed = process_images(p)
finally:
shared.state.end()
+ shared.total_tqdm.clear()
b64images = list(map(encode_pil_to_base64, processed.images)) if send_images else []
@@ -608,6 +611,10 @@ class Api:
with self.queue_lock:
shared.refresh_checkpoints()
+ def refresh_vae(self):
+ with self.queue_lock:
+ shared_items.refresh_vae_list()
+
def create_embedding(self, args: dict):
try:
shared.state.begin(job="create_embedding")
diff --git a/modules/cmd_args.py b/modules/cmd_args.py
index e401f641..cb4ec5f7 100644
--- a/modules/cmd_args.py
+++ b/modules/cmd_args.py
@@ -13,6 +13,7 @@ parser.add_argument("--reinstall-xformers", action='store_true', help="launch.py
parser.add_argument("--reinstall-torch", action='store_true', help="launch.py argument: install the appropriate version of torch even if you have some version already installed")
parser.add_argument("--update-check", action='store_true', help="launch.py argument: check for updates at startup")
parser.add_argument("--test-server", action='store_true', help="launch.py argument: configure server for testing")
+parser.add_argument("--log-startup", action='store_true', help="launch.py argument: print a detailed log of what's happening at startup")
parser.add_argument("--skip-prepare-environment", action='store_true', help="launch.py argument: skip all environment preparation")
parser.add_argument("--skip-install", action='store_true', help="launch.py argument: skip installation of packages")
parser.add_argument("--do-not-download-clip", action='store_true', help="do not download CLIP model even if it's not included in the checkpoint")
@@ -66,6 +67,7 @@ parser.add_argument("--opt-sdp-no-mem-attention", action='store_true', help="pre
parser.add_argument("--disable-opt-split-attention", action='store_true', help="prefer no cross-attention layer optimization for automatic choice of optimization")
parser.add_argument("--disable-nan-check", action='store_true', help="do not check if produced images/latent spaces have nans; useful for running without a checkpoint in CI")
parser.add_argument("--use-cpu", nargs='+', help="use CPU as torch device for specified modules", default=[], type=str.lower)
+parser.add_argument("--disable-model-loading-ram-optimization", action='store_true', help="disable an optimization that reduces RAM use when loading a model")
parser.add_argument("--listen", action='store_true', help="launch gradio with 0.0.0.0 as server name, allowing to respond to network requests")
parser.add_argument("--port", type=int, help="launch gradio with given server port, you need root/admin rights for ports < 1024, defaults to 7860 if available", default=None)
parser.add_argument("--show-negative-prompt", action='store_true', help="does not do anything", default=False)
diff --git a/modules/img2img.py b/modules/img2img.py
index a811e7a4..68e415ef 100644
--- a/modules/img2img.py
+++ b/modules/img2img.py
@@ -10,7 +10,6 @@ from modules import sd_samplers, images as imgutil
from modules.generation_parameters_copypaste import create_override_settings_dict, parse_generation_parameters
from modules.processing import Processed, StableDiffusionProcessingImg2Img, process_images
from modules.shared import opts, state
-from modules.images import save_image
import modules.shared as shared
import modules.processing as processing
from modules.ui import plaintext_to_html
@@ -18,9 +17,10 @@ import modules.scripts
def process_batch(p, input_dir, output_dir, inpaint_mask_dir, args, to_scale=False, scale_by=1.0, use_png_info=False, png_info_props=None, png_info_dir=None):
+ output_dir = output_dir.strip()
processing.fix_seed(p)
- images = list(shared.walk_files(input_dir, allowed_extensions=(".png", ".jpg", ".jpeg", ".webp")))
+ images = list(shared.walk_files(input_dir, allowed_extensions=(".png", ".jpg", ".jpeg", ".webp", ".tif", ".tiff")))
is_inpaint_batch = False
if inpaint_mask_dir:
@@ -32,11 +32,6 @@ def process_batch(p, input_dir, output_dir, inpaint_mask_dir, args, to_scale=Fal
print(f"Will process {len(images)} images, creating {p.n_iter * p.batch_size} new images for each.")
- save_normally = output_dir == ''
-
- p.do_not_save_grid = True
- p.do_not_save_samples = not save_normally
-
state.job_count = len(images) * p.n_iter
# extract "default" params to use in case getting png info fails
@@ -111,21 +106,14 @@ def process_batch(p, input_dir, output_dir, inpaint_mask_dir, args, to_scale=Fal
proc = modules.scripts.scripts_img2img.run(p, *args)
if proc is None:
- proc = process_images(p)
-
- for n, processed_image in enumerate(proc.images):
- filename = image_path.stem
- infotext = proc.infotext(p, n)
- relpath = os.path.dirname(os.path.relpath(image, input_dir))
-
- if n > 0:
- filename += f"-{n}"
-
- if not save_normally:
- os.makedirs(os.path.join(output_dir, relpath), exist_ok=True)
- if processed_image.mode == 'RGBA':
- processed_image = processed_image.convert("RGB")
- save_image(processed_image, os.path.join(output_dir, relpath), None, extension=opts.samples_format, info=infotext, forced_filename=filename, save_to_dirs=False)
+ if output_dir:
+ p.outpath_samples = output_dir
+ p.override_settings['save_to_dirs'] = False
+ if p.n_iter > 1 or p.batch_size > 1:
+ p.override_settings['samples_filename_pattern'] = f'{image_path.stem}-[generation_number]'
+ else:
+ p.override_settings['samples_filename_pattern'] = f'{image_path.stem}'
+ process_images(p)
def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_styles, init_img, sketch, init_img_with_mask, inpaint_color_sketch, inpaint_color_sketch_orig, init_img_inpaint, init_mask_inpaint, steps: int, sampler_index: int, mask_blur: int, mask_alpha: float, inpainting_fill: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, image_cfg_scale: float, denoising_strength: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, selected_scale_tab: int, height: int, width: int, scale_by: float, resize_mode: int, inpaint_full_res: bool, inpaint_full_res_padding: int, inpainting_mask_invert: int, img2img_batch_input_dir: str, img2img_batch_output_dir: str, img2img_batch_inpaint_mask_dir: str, override_settings_texts, img2img_batch_use_png_info: bool, img2img_batch_png_info_props: list, img2img_batch_png_info_dir: str, request: gr.Request, *args):
diff --git a/modules/launch_utils.py b/modules/launch_utils.py
index e1c9cfbe..f77b577a 100644
--- a/modules/launch_utils.py
+++ b/modules/launch_utils.py
@@ -10,9 +10,7 @@ from functools import lru_cache
from modules import cmd_args, errors
from modules.paths_internal import script_path, extensions_dir
-from modules import timer
-
-timer.startup_timer.record("start")
+from modules.timer import startup_timer
args, _ = cmd_args.parser.parse_known_args()
@@ -226,8 +224,13 @@ def run_extensions_installers(settings_file):
if not os.path.isdir(extensions_dir):
return
- for dirname_extension in list_extensions(settings_file):
- run_extension_installer(os.path.join(extensions_dir, dirname_extension))
+ with startup_timer.subcategory("run extensions installers"):
+ for dirname_extension in list_extensions(settings_file):
+ path = os.path.join(extensions_dir, dirname_extension)
+
+ if os.path.isdir(path):
+ run_extension_installer(path)
+ startup_timer.record(dirname_extension)
re_requirement = re.compile(r"\s*([-_a-zA-Z0-9]+)\s*(?:==\s*([-+_.a-zA-Z0-9]+))?\s*")
@@ -300,8 +303,11 @@ def prepare_environment():
if not args.skip_python_version_check:
check_python_version()
+ startup_timer.record("checks")
+
commit = commit_hash()
tag = git_tag()
+ startup_timer.record("git version info")
print(f"Python {sys.version}")
print(f"Version: {tag}")
@@ -309,21 +315,27 @@ def prepare_environment():
if args.reinstall_torch or not is_installed("torch") or not is_installed("torchvision"):
run(f'"{python}" -m {torch_command}', "Installing torch and torchvision", "Couldn't install torch", live=True)
+ startup_timer.record("install torch")
if not args.skip_torch_cuda_test and not check_run_python("import torch; assert torch.cuda.is_available()"):
raise RuntimeError(
'Torch is not able to use GPU; '
'add --skip-torch-cuda-test to COMMANDLINE_ARGS variable to disable this check'
)
+ startup_timer.record("torch GPU test")
+
if not is_installed("gfpgan"):
run_pip(f"install {gfpgan_package}", "gfpgan")
+ startup_timer.record("install gfpgan")
if not is_installed("clip"):
run_pip(f"install {clip_package}", "clip")
+ startup_timer.record("install clip")
if not is_installed("open_clip"):
run_pip(f"install {openclip_package}", "open_clip")
+ startup_timer.record("install open_clip")
if (not is_installed("xformers") or args.reinstall_xformers) and args.xformers:
if platform.system() == "Windows":
@@ -337,8 +349,11 @@ def prepare_environment():
elif platform.system() == "Linux":
run_pip(f"install -U -I --no-deps {xformers_package}", "xformers")
+ startup_timer.record("install xformers")
+
if not is_installed("ngrok") and args.ngrok:
run_pip("install ngrok", "ngrok")
+ startup_timer.record("install ngrok")
os.makedirs(os.path.join(script_path, dir_repos), exist_ok=True)
@@ -348,22 +363,28 @@ def prepare_environment():
git_clone(codeformer_repo, repo_dir('CodeFormer'), "CodeFormer", codeformer_commit_hash)
git_clone(blip_repo, repo_dir('BLIP'), "BLIP", blip_commit_hash)
+ startup_timer.record("clone repositores")
+
if not is_installed("lpips"):
run_pip(f"install -r \"{os.path.join(repo_dir('CodeFormer'), 'requirements.txt')}\"", "requirements for CodeFormer")
+ startup_timer.record("install CodeFormer requirements")
if not os.path.isfile(requirements_file):
requirements_file = os.path.join(script_path, requirements_file)
if not requirements_met(requirements_file):
run_pip(f"install -r \"{requirements_file}\"", "requirements")
+ startup_timer.record("install requirements")
run_extensions_installers(settings_file=args.ui_settings_file)
if args.update_check:
version_check(commit)
+ startup_timer.record("check version")
if args.update_all_extensions:
git_pull_recursive(extensions_dir)
+ startup_timer.record("update extensions")
if "--exit" in sys.argv:
print("Exiting because of --exit argument")
diff --git a/modules/sd_disable_initialization.py b/modules/sd_disable_initialization.py
index 9fc89dc6..695c5736 100644
--- a/modules/sd_disable_initialization.py
+++ b/modules/sd_disable_initialization.py
@@ -3,8 +3,31 @@ import open_clip
import torch
import transformers.utils.hub
+from modules import shared
-class DisableInitialization:
+
+class ReplaceHelper:
+ def __init__(self):
+ self.replaced = []
+
+ def replace(self, obj, field, func):
+ original = getattr(obj, field, None)
+ if original is None:
+ return None
+
+ self.replaced.append((obj, field, original))
+ setattr(obj, field, func)
+
+ return original
+
+ def restore(self):
+ for obj, field, original in self.replaced:
+ setattr(obj, field, original)
+
+ self.replaced.clear()
+
+
+class DisableInitialization(ReplaceHelper):
"""
When an object of this class enters a `with` block, it starts:
- preventing torch's layer initialization functions from working
@@ -21,7 +44,7 @@ class DisableInitialization:
"""
def __init__(self, disable_clip=True):
- self.replaced = []
+ super().__init__()
self.disable_clip = disable_clip
def replace(self, obj, field, func):
@@ -86,8 +109,81 @@ class DisableInitialization:
self.transformers_utils_hub_get_from_cache = self.replace(transformers.utils.hub, 'get_from_cache', transformers_utils_hub_get_from_cache)
def __exit__(self, exc_type, exc_val, exc_tb):
- for obj, field, original in self.replaced:
- setattr(obj, field, original)
+ self.restore()
- self.replaced.clear()
+class InitializeOnMeta(ReplaceHelper):
+ """
+ Context manager that causes all parameters for linear/conv2d/mha layers to be allocated on meta device,
+ which results in those parameters having no values and taking no memory. model.to() will be broken and
+ will need to be repaired by using LoadStateDictOnMeta below when loading params from state dict.
+
+ Usage:
+ ```
+ with sd_disable_initialization.InitializeOnMeta():
+ sd_model = instantiate_from_config(sd_config.model)
+ ```
+ """
+
+ def __enter__(self):
+ if shared.cmd_opts.disable_model_loading_ram_optimization:
+ return
+
+ def set_device(x):
+ x["device"] = "meta"
+ return x
+
+ linear_init = self.replace(torch.nn.Linear, '__init__', lambda *args, **kwargs: linear_init(*args, **set_device(kwargs)))
+ conv2d_init = self.replace(torch.nn.Conv2d, '__init__', lambda *args, **kwargs: conv2d_init(*args, **set_device(kwargs)))
+ mha_init = self.replace(torch.nn.MultiheadAttention, '__init__', lambda *args, **kwargs: mha_init(*args, **set_device(kwargs)))
+ self.replace(torch.nn.Module, 'to', lambda *args, **kwargs: None)
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ self.restore()
+
+
+class LoadStateDictOnMeta(ReplaceHelper):
+ """
+ Context manager that allows to read parameters from state_dict into a model that has some of its parameters in the meta device.
+ As those parameters are read from state_dict, they will be deleted from it, so by the end state_dict will be mostly empty, to save memory.
+ Meant to be used together with InitializeOnMeta above.
+
+ Usage:
+ ```
+ with sd_disable_initialization.LoadStateDictOnMeta(state_dict):
+ model.load_state_dict(state_dict, strict=False)
+ ```
+ """
+
+ def __init__(self, state_dict, device):
+ super().__init__()
+ self.state_dict = state_dict
+ self.device = device
+
+ def __enter__(self):
+ if shared.cmd_opts.disable_model_loading_ram_optimization:
+ return
+
+ sd = self.state_dict
+ device = self.device
+
+ def load_from_state_dict(original, self, state_dict, prefix, *args, **kwargs):
+ params = [(name, param) for name, param in self._parameters.items() if param is not None and param.is_meta]
+
+ for name, param in params:
+ if param.is_meta:
+ self._parameters[name] = torch.nn.parameter.Parameter(torch.zeros_like(param, device=device), requires_grad=param.requires_grad)
+
+ original(self, state_dict, prefix, *args, **kwargs)
+
+ for name, _ in params:
+ key = prefix + name
+ if key in sd:
+ del sd[key]
+
+ linear_load_from_state_dict = self.replace(torch.nn.Linear, '_load_from_state_dict', lambda *args, **kwargs: load_from_state_dict(linear_load_from_state_dict, *args, **kwargs))
+ conv2d_load_from_state_dict = self.replace(torch.nn.Conv2d, '_load_from_state_dict', lambda *args, **kwargs: load_from_state_dict(conv2d_load_from_state_dict, *args, **kwargs))
+ mha_load_from_state_dict = self.replace(torch.nn.MultiheadAttention, '_load_from_state_dict', lambda *args, **kwargs: load_from_state_dict(mha_load_from_state_dict, *args, **kwargs))
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ self.restore()
diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py
index c8fdd4f1..cfa5f0eb 100644
--- a/modules/sd_hijack.py
+++ b/modules/sd_hijack.py
@@ -197,7 +197,7 @@ class StableDiffusionModelHijack:
conditioner.embedders[i] = sd_hijack_clip.FrozenCLIPEmbedderForSDXLWithCustomWords(embedder, self)
text_cond_models.append(conditioner.embedders[i])
if typename == 'FrozenOpenCLIPEmbedder2':
- embedder.model.token_embedding = EmbeddingsWithFixes(embedder.model.token_embedding, self)
+ embedder.model.token_embedding = EmbeddingsWithFixes(embedder.model.token_embedding, self, textual_inversion_key='clip_g')
conditioner.embedders[i] = sd_hijack_open_clip.FrozenOpenCLIPEmbedder2WithCustomWords(embedder, self)
text_cond_models.append(conditioner.embedders[i])
@@ -292,10 +292,11 @@ class StableDiffusionModelHijack:
class EmbeddingsWithFixes(torch.nn.Module):
- def __init__(self, wrapped, embeddings):
+ def __init__(self, wrapped, embeddings, textual_inversion_key='clip_l'):
super().__init__()
self.wrapped = wrapped
self.embeddings = embeddings
+ self.textual_inversion_key = textual_inversion_key
def forward(self, input_ids):
batch_fixes = self.embeddings.fixes
@@ -309,7 +310,8 @@ class EmbeddingsWithFixes(torch.nn.Module):
vecs = []
for fixes, tensor in zip(batch_fixes, inputs_embeds):
for offset, embedding in fixes:
- emb = devices.cond_cast_unet(embedding.vec)
+ vec = embedding.vec[self.textual_inversion_key] if isinstance(embedding.vec, dict) else embedding.vec
+ emb = devices.cond_cast_unet(vec)
emb_len = min(tensor.shape[0] - offset - 1, emb.shape[0])
tensor = torch.cat([tensor[0:offset + 1], emb[0:emb_len], tensor[offset + 1 + emb_len:]])
diff --git a/modules/sd_hijack_clip.py b/modules/sd_hijack_clip.py
index 16a5500e..2f9d569b 100644
--- a/modules/sd_hijack_clip.py
+++ b/modules/sd_hijack_clip.py
@@ -161,7 +161,7 @@ class FrozenCLIPEmbedderWithCustomWordsBase(torch.nn.Module):
position += 1
continue
- emb_len = int(embedding.vec.shape[0])
+ emb_len = int(embedding.vectors)
if len(chunk.tokens) + emb_len > self.chunk_length:
next_chunk()
diff --git a/modules/sd_models.py b/modules/sd_models.py
index fb31a793..acb1e817 100644
--- a/modules/sd_models.py
+++ b/modules/sd_models.py
@@ -460,7 +460,6 @@ def get_empty_cond(sd_model):
return sd_model.cond_stage_model([""])
-
def load_model(checkpoint_info=None, already_loaded_state_dict=None):
from modules import lowvram, sd_hijack
checkpoint_info = checkpoint_info or select_checkpoint()
@@ -495,19 +494,24 @@ def load_model(checkpoint_info=None, already_loaded_state_dict=None):
sd_model = None
try:
with sd_disable_initialization.DisableInitialization(disable_clip=clip_is_included_into_sd or shared.cmd_opts.do_not_download_clip):
- sd_model = instantiate_from_config(sd_config.model)
- except Exception:
- pass
+ with sd_disable_initialization.InitializeOnMeta():
+ sd_model = instantiate_from_config(sd_config.model)
+
+ except Exception as e:
+ errors.display(e, "creating model quickly", full_traceback=True)
if sd_model is None:
print('Failed to create model quickly; will retry using slow method.', file=sys.stderr)
- sd_model = instantiate_from_config(sd_config.model)
+
+ with sd_disable_initialization.InitializeOnMeta():
+ sd_model = instantiate_from_config(sd_config.model)
sd_model.used_config = checkpoint_config
timer.record("create model")
- load_model_weights(sd_model, checkpoint_info, state_dict, timer)
+ with sd_disable_initialization.LoadStateDictOnMeta(state_dict, devices.cpu):
+ load_model_weights(sd_model, checkpoint_info, state_dict, timer)
if shared.cmd_opts.lowvram or shared.cmd_opts.medvram:
lowvram.setup_for_low_vram(sd_model, shared.cmd_opts.medvram)
diff --git a/modules/sd_models_xl.py b/modules/sd_models_xl.py
index 40559208..bc219508 100644
--- a/modules/sd_models_xl.py
+++ b/modules/sd_models_xl.py
@@ -56,6 +56,14 @@ def encode_embedding_init_text(self: sgm.modules.GeneralConditioner, init_text,
return torch.cat(res, dim=1)
+def tokenize(self: sgm.modules.GeneralConditioner, texts):
+ for embedder in [embedder for embedder in self.embedders if hasattr(embedder, 'tokenize')]:
+ return embedder.tokenize(texts)
+
+ raise AssertionError('no tokenizer available')
+
+
+
def process_texts(self, texts):
for embedder in [embedder for embedder in self.embedders if hasattr(embedder, 'process_texts')]:
return embedder.process_texts(texts)
@@ -68,6 +76,7 @@ def get_target_prompt_token_count(self, token_count):
# those additions to GeneralConditioner make it possible to use it as model.cond_stage_model from SD1.5 in exist
sgm.modules.GeneralConditioner.encode_embedding_init_text = encode_embedding_init_text
+sgm.modules.GeneralConditioner.tokenize = tokenize
sgm.modules.GeneralConditioner.process_texts = process_texts
sgm.modules.GeneralConditioner.get_target_prompt_token_count = get_target_prompt_token_count
diff --git a/modules/sd_samplers_extra.py b/modules/sd_samplers_extra.py
new file mode 100644
index 00000000..1b981ca8
--- /dev/null
+++ b/modules/sd_samplers_extra.py
@@ -0,0 +1,74 @@
+import torch
+import tqdm
+import k_diffusion.sampling
+
+
+@torch.no_grad()
+def restart_sampler(model, x, sigmas, extra_args=None, callback=None, disable=None, s_noise=1., restart_list=None):
+ """Implements restart sampling in Restart Sampling for Improving Generative Processes (2023)
+ Restart_list format: {min_sigma: [ restart_steps, restart_times, max_sigma]}
+ If restart_list is None: will choose restart_list automatically, otherwise will use the given restart_list
+ """
+ extra_args = {} if extra_args is None else extra_args
+ s_in = x.new_ones([x.shape[0]])
+ step_id = 0
+ from k_diffusion.sampling import to_d, get_sigmas_karras
+
+ def heun_step(x, old_sigma, new_sigma, second_order=True):
+ nonlocal step_id
+ denoised = model(x, old_sigma * s_in, **extra_args)
+ d = to_d(x, old_sigma, denoised)
+ if callback is not None:
+ callback({'x': x, 'i': step_id, 'sigma': new_sigma, 'sigma_hat': old_sigma, 'denoised': denoised})
+ dt = new_sigma - old_sigma
+ if new_sigma == 0 or not second_order:
+ # Euler method
+ x = x + d * dt
+ else:
+ # Heun's method
+ x_2 = x + d * dt
+ denoised_2 = model(x_2, new_sigma * s_in, **extra_args)
+ d_2 = to_d(x_2, new_sigma, denoised_2)
+ d_prime = (d + d_2) / 2
+ x = x + d_prime * dt
+ step_id += 1
+ return x
+
+ steps = sigmas.shape[0] - 1
+ if restart_list is None:
+ if steps >= 20:
+ restart_steps = 9
+ restart_times = 1
+ if steps >= 36:
+ restart_steps = steps // 4
+ restart_times = 2
+ sigmas = get_sigmas_karras(steps - restart_steps * restart_times, sigmas[-2].item(), sigmas[0].item(), device=sigmas.device)
+ restart_list = {0.1: [restart_steps + 1, restart_times, 2]}
+ else:
+ restart_list = {}
+
+ restart_list = {int(torch.argmin(abs(sigmas - key), dim=0)): value for key, value in restart_list.items()}
+
+ step_list = []
+ for i in range(len(sigmas) - 1):
+ step_list.append((sigmas[i], sigmas[i + 1]))
+ if i + 1 in restart_list:
+ restart_steps, restart_times, restart_max = restart_list[i + 1]
+ min_idx = i + 1
+ max_idx = int(torch.argmin(abs(sigmas - restart_max), dim=0))
+ if max_idx < min_idx:
+ sigma_restart = get_sigmas_karras(restart_steps, sigmas[min_idx].item(), sigmas[max_idx].item(), device=sigmas.device)[:-1]
+ while restart_times > 0:
+ restart_times -= 1
+ step_list.extend([(old_sigma, new_sigma) for (old_sigma, new_sigma) in zip(sigma_restart[:-1], sigma_restart[1:])])
+
+ last_sigma = None
+ for old_sigma, new_sigma in tqdm.tqdm(step_list, disable=disable):
+ if last_sigma is None:
+ last_sigma = old_sigma
+ elif last_sigma < old_sigma:
+ x = x + k_diffusion.sampling.torch.randn_like(x) * s_noise * (old_sigma ** 2 - last_sigma ** 2) ** 0.5
+ x = heun_step(x, old_sigma, new_sigma)
+ last_sigma = new_sigma
+
+ return x
diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py
index 5552a8dc..e0da3425 100644
--- a/modules/sd_samplers_kdiffusion.py
+++ b/modules/sd_samplers_kdiffusion.py
@@ -2,7 +2,7 @@ from collections import deque
import torch
import inspect
import k_diffusion.sampling
-from modules import prompt_parser, devices, sd_samplers_common
+from modules import prompt_parser, devices, sd_samplers_common, sd_samplers_extra
from modules.shared import opts, state
import modules.shared as shared
@@ -30,12 +30,14 @@ samplers_k_diffusion = [
('DPM++ 2M Karras', 'sample_dpmpp_2m', ['k_dpmpp_2m_ka'], {'scheduler': 'karras'}),
('DPM++ SDE Karras', 'sample_dpmpp_sde', ['k_dpmpp_sde_ka'], {'scheduler': 'karras', "second_order": True, "brownian_noise": True}),
('DPM++ 2M SDE Karras', 'sample_dpmpp_2m_sde', ['k_dpmpp_2m_sde_ka'], {'scheduler': 'karras', "brownian_noise": True}),
+ ('Restart', sd_samplers_extra.restart_sampler, ['restart'], {'scheduler': 'karras'}),
]
+
samplers_data_k_diffusion = [
sd_samplers_common.SamplerData(label, lambda model, funcname=funcname: KDiffusionSampler(funcname, model), aliases, options)
for label, funcname, aliases, options in samplers_k_diffusion
- if hasattr(k_diffusion.sampling, funcname)
+ if callable(funcname) or hasattr(k_diffusion.sampling, funcname)
]
sampler_extra_params = {
@@ -270,7 +272,7 @@ class KDiffusionSampler:
self.model_wrap = denoiser(sd_model, quantize=shared.opts.enable_quantization)
self.funcname = funcname
- self.func = getattr(k_diffusion.sampling, self.funcname)
+ self.func = funcname if callable(funcname) else getattr(k_diffusion.sampling, self.funcname)
self.extra_params = sampler_extra_params.get(funcname, [])
self.model_wrap_cfg = CFGDenoiser(self.model_wrap)
self.sampler_noises = None
diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py
index 6166c76f..4713bc2d 100644
--- a/modules/textual_inversion/textual_inversion.py
+++ b/modules/textual_inversion/textual_inversion.py
@@ -181,29 +181,38 @@ class EmbeddingDatabase:
else:
return
+
# textual inversion embeddings
if 'string_to_param' in data:
param_dict = data['string_to_param']
param_dict = getattr(param_dict, '_parameters', param_dict) # fix for torch 1.12.1 loading saved file from torch 1.11
assert len(param_dict) == 1, 'embedding file has multiple terms in it'
emb = next(iter(param_dict.items()))[1]
- # diffuser concepts
- elif type(data) == dict and type(next(iter(data.values()))) == torch.Tensor:
+ vec = emb.detach().to(devices.device, dtype=torch.float32)
+ shape = vec.shape[-1]
+ vectors = vec.shape[0]
+ elif type(data) == dict and 'clip_g' in data and 'clip_l' in data: # SDXL embedding
+ vec = {k: v.detach().to(devices.device, dtype=torch.float32) for k, v in data.items()}
+ shape = data['clip_g'].shape[-1] + data['clip_l'].shape[-1]
+ vectors = data['clip_g'].shape[0]
+ elif type(data) == dict and type(next(iter(data.values()))) == torch.Tensor: # diffuser concepts
assert len(data.keys()) == 1, 'embedding file has multiple terms in it'
emb = next(iter(data.values()))
if len(emb.shape) == 1:
emb = emb.unsqueeze(0)
+ vec = emb.detach().to(devices.device, dtype=torch.float32)
+ shape = vec.shape[-1]
+ vectors = vec.shape[0]
else:
raise Exception(f"Couldn't identify {filename} as neither textual inversion embedding nor diffuser concept.")
- vec = emb.detach().to(devices.device, dtype=torch.float32)
embedding = Embedding(vec, name)
embedding.step = data.get('step', None)
embedding.sd_checkpoint = data.get('sd_checkpoint', None)
embedding.sd_checkpoint_name = data.get('sd_checkpoint_name', None)
- embedding.vectors = vec.shape[0]
- embedding.shape = vec.shape[-1]
+ embedding.vectors = vectors
+ embedding.shape = shape
embedding.filename = path
embedding.set_hash(hashes.sha256(embedding.filename, "textual_inversion/" + name) or '')
diff --git a/modules/timer.py b/modules/timer.py
index da99e49f..1d38595c 100644
--- a/modules/timer.py
+++ b/modules/timer.py
@@ -1,4 +1,5 @@
import time
+import argparse
class TimerSubcategory:
@@ -11,20 +12,27 @@ class TimerSubcategory:
def __enter__(self):
self.start = time.time()
self.timer.base_category = self.original_base_category + self.category + "/"
+ self.timer.subcategory_level += 1
+
+ if self.timer.print_log:
+ print(f"{' ' * self.timer.subcategory_level}{self.category}:")
def __exit__(self, exc_type, exc_val, exc_tb):
elapsed_for_subcategroy = time.time() - self.start
self.timer.base_category = self.original_base_category
self.timer.add_time_to_record(self.original_base_category + self.category, elapsed_for_subcategroy)
- self.timer.record(self.category)
+ self.timer.subcategory_level -= 1
+ self.timer.record(self.category, disable_log=True)
class Timer:
- def __init__(self):
+ def __init__(self, print_log=False):
self.start = time.time()
self.records = {}
self.total = 0
self.base_category = ''
+ self.print_log = print_log
+ self.subcategory_level = 0
def elapsed(self):
end = time.time()
@@ -38,13 +46,16 @@ class Timer:
self.records[category] += amount
- def record(self, category, extra_time=0):
+ def record(self, category, extra_time=0, disable_log=False):
e = self.elapsed()
self.add_time_to_record(self.base_category + category, e + extra_time)
self.total += e + extra_time
+ if self.print_log and not disable_log:
+ print(f"{' ' * self.subcategory_level}{category}: done in {e + extra_time:.3f}s")
+
def subcategory(self, name):
self.elapsed()
@@ -71,6 +82,10 @@ class Timer:
self.__init__()
-startup_timer = Timer()
+parser = argparse.ArgumentParser(add_help=False)
+parser.add_argument("--log-startup", action='store_true', help="print a detailed log of what's happening at startup")
+args = parser.parse_known_args()[0]
+
+startup_timer = Timer(print_log=args.log_startup)
startup_record = None
diff --git a/webui.py b/webui.py
index 6bf06854..2dc4f1aa 100644
--- a/webui.py
+++ b/webui.py
@@ -320,9 +320,9 @@ def initialize_rest(*, reload_script_modules=False):
if modules.sd_hijack.current_optimizer is None:
modules.sd_hijack.apply_optimizations()
- Thread(target=load_model).start()
+ devices.first_time_calculation()
- Thread(target=devices.first_time_calculation).start()
+ Thread(target=load_model).start()
shared.reload_hypernetworks()
startup_timer.record("reload hypernetworks")