aboutsummaryrefslogtreecommitdiff
path: root/modules
diff options
context:
space:
mode:
authorAUTOMATIC1111 <16777216c@gmail.com>2023-05-31 19:15:21 +0300
committerGitHub <noreply@github.com>2023-05-31 19:15:21 +0300
commit52b8752e6201e24c783f674f8dc0681027e10ea9 (patch)
tree30470f64d84cadc4140023c0bf34c35bb68f79b0 /modules
parent00dfe27f59727407c5b408a80ff2a262934df495 (diff)
parent78a602ae8c006077ca93913576335a3a33dba7cb (diff)
Merge branch 'dev' into report-error
Diffstat (limited to 'modules')
-rw-r--r--modules/api/api.py5
-rw-r--r--modules/api/models.py4
-rw-r--r--modules/cmd_args.py2
-rw-r--r--modules/extensions.py9
-rw-r--r--modules/gitpython_hack.py42
-rw-r--r--modules/launch_utils.py3
-rw-r--r--modules/paths.py1
-rw-r--r--modules/processing.py5
-rw-r--r--modules/shared.py4
-rw-r--r--modules/ui.py4
-rw-r--r--modules/ui_extensions.py6
-rw-r--r--modules/upscaler.py4
12 files changed, 70 insertions, 19 deletions
diff --git a/modules/api/api.py b/modules/api/api.py
index 79ce9228..fbd616a3 100644
--- a/modules/api/api.py
+++ b/modules/api/api.py
@@ -24,6 +24,7 @@ from modules.textual_inversion.preprocess import preprocess
from modules.hypernetworks.hypernetwork import create_hypernetwork, train_hypernetwork
from PIL import PngImagePlugin,Image
from modules.sd_models import checkpoints_list, unload_model_weights, reload_model_weights
+from modules.sd_vae import vae_dict
from modules.sd_models_config import find_checkpoint_config_near_filename
from modules.realesrgan_model import get_realesrgan_models
from modules import devices
@@ -190,6 +191,7 @@ class Api:
self.add_api_route("/sdapi/v1/samplers", self.get_samplers, methods=["GET"], response_model=List[models.SamplerItem])
self.add_api_route("/sdapi/v1/upscalers", self.get_upscalers, methods=["GET"], response_model=List[models.UpscalerItem])
self.add_api_route("/sdapi/v1/sd-models", self.get_sd_models, methods=["GET"], response_model=List[models.SDModelItem])
+ self.add_api_route("/sdapi/v1/sd-vae", self.get_sd_vaes, methods=["GET"], response_model=List[models.SDVaeItem])
self.add_api_route("/sdapi/v1/hypernetworks", self.get_hypernetworks, methods=["GET"], response_model=List[models.HypernetworkItem])
self.add_api_route("/sdapi/v1/face-restorers", self.get_face_restorers, methods=["GET"], response_model=List[models.FaceRestorerItem])
self.add_api_route("/sdapi/v1/realesrgan-models", self.get_realesrgan_models, methods=["GET"], response_model=List[models.RealesrganItem])
@@ -542,6 +544,9 @@ class Api:
def get_sd_models(self):
return [{"title": x.title, "model_name": x.model_name, "hash": x.shorthash, "sha256": x.sha256, "filename": x.filename, "config": find_checkpoint_config_near_filename(x)} for x in checkpoints_list.values()]
+ def get_sd_vaes(self):
+ return [{"model_name": x, "filename": vae_dict[x]} for x in vae_dict.keys()]
+
def get_hypernetworks(self):
return [{"name": name, "path": shared.hypernetworks[name]} for name in shared.hypernetworks]
diff --git a/modules/api/models.py b/modules/api/models.py
index 1ff2fb33..47fdede2 100644
--- a/modules/api/models.py
+++ b/modules/api/models.py
@@ -249,6 +249,10 @@ class SDModelItem(BaseModel):
filename: str = Field(title="Filename")
config: Optional[str] = Field(title="Config file")
+class SDVaeItem(BaseModel):
+ model_name: str = Field(title="Model Name")
+ filename: str = Field(title="Filename")
+
class HypernetworkItem(BaseModel):
name: str = Field(title="Name")
path: Optional[str] = Field(title="Path")
diff --git a/modules/cmd_args.py b/modules/cmd_args.py
index 3eeb84d5..0974056d 100644
--- a/modules/cmd_args.py
+++ b/modules/cmd_args.py
@@ -11,7 +11,7 @@ parser.add_argument("--skip-python-version-check", action='store_true', help="la
parser.add_argument("--skip-torch-cuda-test", action='store_true', help="launch.py argument: do not check if CUDA is able to work properly")
parser.add_argument("--reinstall-xformers", action='store_true', help="launch.py argument: install the appropriate version of xformers even if you have some version already installed")
parser.add_argument("--reinstall-torch", action='store_true', help="launch.py argument: install the appropriate version of torch even if you have some version already installed")
-parser.add_argument("--update-check", action='store_true', help="launch.py argument: chck for updates at startup")
+parser.add_argument("--update-check", action='store_true', help="launch.py argument: check for updates at startup")
parser.add_argument("--test-server", action='store_true', help="launch.py argument: configure server for testing")
parser.add_argument("--skip-prepare-environment", action='store_true', help="launch.py argument: skip all environment preparation")
parser.add_argument("--skip-install", action='store_true', help="launch.py argument: skip installation of packages")
diff --git a/modules/extensions.py b/modules/extensions.py
index 369d2584..92f93ad9 100644
--- a/modules/extensions.py
+++ b/modules/extensions.py
@@ -1,10 +1,9 @@
import os
import threading
-import git
-
from modules import shared
from modules.errors import print_error
+from modules.gitpython_hack import Repo
from modules.paths_internal import extensions_dir, extensions_builtin_dir, script_path # noqa: F401
extensions = []
@@ -53,7 +52,7 @@ class Extension:
repo = None
try:
if os.path.exists(os.path.join(self.path, ".git")):
- repo = git.Repo(self.path)
+ repo = Repo(self.path)
except Exception:
print_error(f"Error reading github repository info from {self.path}", exc_info=True)
@@ -92,7 +91,7 @@ class Extension:
return res
def check_updates(self):
- repo = git.Repo(self.path)
+ repo = Repo(self.path)
for fetch in repo.remote().fetch(dry_run=True):
if fetch.flags != fetch.HEAD_UPTODATE:
self.can_update = True
@@ -114,7 +113,7 @@ class Extension:
self.status = "latest"
def fetch_and_reset_hard(self, commit='origin'):
- repo = git.Repo(self.path)
+ repo = Repo(self.path)
# Fix: `error: Your local changes to the following files would be overwritten by merge`,
# because WSL2 Docker set 755 file permissions instead of 644, this results to the error.
repo.git.fetch(all=True)
diff --git a/modules/gitpython_hack.py b/modules/gitpython_hack.py
new file mode 100644
index 00000000..e537c1df
--- /dev/null
+++ b/modules/gitpython_hack.py
@@ -0,0 +1,42 @@
+from __future__ import annotations
+
+import io
+import subprocess
+
+import git
+
+
+class Git(git.Git):
+ """
+ Git subclassed to never use persistent processes.
+ """
+
+ def _get_persistent_cmd(self, attr_name, cmd_name, *args, **kwargs):
+ raise NotImplementedError(f"Refusing to use persistent process: {attr_name} ({cmd_name} {args} {kwargs})")
+
+ def get_object_header(self, ref: str | bytes) -> tuple[str, str, int]:
+ ret = subprocess.check_output(
+ [self.GIT_PYTHON_GIT_EXECUTABLE, "cat-file", "--batch-check"],
+ input=self._prepare_ref(ref),
+ cwd=self._working_dir,
+ timeout=2,
+ )
+ return self._parse_object_header(ret)
+
+ def stream_object_data(self, ref: str) -> tuple[str, str, int, "Git.CatFileContentStream"]:
+ # Not really streaming, per se; this buffers the entire object in memory.
+ # Shouldn't be a problem for our use case, since we're only using this for
+ # object headers (commit objects).
+ ret = subprocess.check_output(
+ [self.GIT_PYTHON_GIT_EXECUTABLE, "cat-file", "--batch"],
+ input=self._prepare_ref(ref),
+ cwd=self._working_dir,
+ timeout=30,
+ )
+ bio = io.BytesIO(ret)
+ hexsha, typename, size = self._parse_object_header(bio.readline())
+ return (hexsha, typename, size, self.CatFileContentStream(size, bio))
+
+
+class Repo(git.Repo):
+ GitCommandWrapperType = Git
diff --git a/modules/launch_utils.py b/modules/launch_utils.py
index 22edc106..bcbf2e2c 100644
--- a/modules/launch_utils.py
+++ b/modules/launch_utils.py
@@ -230,13 +230,11 @@ def prepare_environment():
openclip_package = os.environ.get('OPENCLIP_PACKAGE', "https://github.com/mlfoundations/open_clip/archive/bb6e834e9c70d9c27d0dc3ecedeebeaeb1ffad6b.zip")
stable_diffusion_repo = os.environ.get('STABLE_DIFFUSION_REPO', "https://github.com/Stability-AI/stablediffusion.git")
- taming_transformers_repo = os.environ.get('TAMING_TRANSFORMERS_REPO', "https://github.com/CompVis/taming-transformers.git")
k_diffusion_repo = os.environ.get('K_DIFFUSION_REPO', 'https://github.com/crowsonkb/k-diffusion.git')
codeformer_repo = os.environ.get('CODEFORMER_REPO', 'https://github.com/sczhou/CodeFormer.git')
blip_repo = os.environ.get('BLIP_REPO', 'https://github.com/salesforce/BLIP.git')
stable_diffusion_commit_hash = os.environ.get('STABLE_DIFFUSION_COMMIT_HASH', "cf1d67a6fd5ea1aa600c4df58e5b47da45f6bdbf")
- taming_transformers_commit_hash = os.environ.get('TAMING_TRANSFORMERS_COMMIT_HASH', "24268930bf1dce879235a7fddd0b2355b84d7ea6")
k_diffusion_commit_hash = os.environ.get('K_DIFFUSION_COMMIT_HASH', "c9fe758757e022f05ca5a53fa8fac28889e4f1cf")
codeformer_commit_hash = os.environ.get('CODEFORMER_COMMIT_HASH', "c5b4593074ba6214284d6acd5f1719b6c5d739af")
blip_commit_hash = os.environ.get('BLIP_COMMIT_HASH', "48211a1594f1321b00f14c9f7a5b4813144b2fb9")
@@ -287,7 +285,6 @@ def prepare_environment():
os.makedirs(os.path.join(script_path, dir_repos), exist_ok=True)
git_clone(stable_diffusion_repo, repo_dir('stable-diffusion-stability-ai'), "Stable Diffusion", stable_diffusion_commit_hash)
- git_clone(taming_transformers_repo, repo_dir('taming-transformers'), "Taming Transformers", taming_transformers_commit_hash)
git_clone(k_diffusion_repo, repo_dir('k-diffusion'), "K-diffusion", k_diffusion_commit_hash)
git_clone(codeformer_repo, repo_dir('CodeFormer'), "CodeFormer", codeformer_commit_hash)
git_clone(blip_repo, repo_dir('BLIP'), "BLIP", blip_commit_hash)
diff --git a/modules/paths.py b/modules/paths.py
index 5f6474c0..5171df4f 100644
--- a/modules/paths.py
+++ b/modules/paths.py
@@ -20,7 +20,6 @@ assert sd_path is not None, f"Couldn't find Stable Diffusion in any of: {possibl
path_dirs = [
(sd_path, 'ldm', 'Stable Diffusion', []),
- (os.path.join(sd_path, '../taming-transformers'), 'taming', 'Taming Transformers', []),
(os.path.join(sd_path, '../CodeFormer'), 'inference_codeformer.py', 'CodeFormer', []),
(os.path.join(sd_path, '../BLIP'), 'models/blip.py', 'BLIP', []),
(os.path.join(sd_path, '../k-diffusion'), 'k_diffusion/sampling.py', 'k_diffusion', ["atstart"]),
diff --git a/modules/processing.py b/modules/processing.py
index 5c9bcce8..f628d88b 100644
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -321,14 +321,13 @@ class StableDiffusionProcessing:
have been used before. The second element is where the previously
computed result is stored.
"""
-
- if cache[0] is not None and (required_prompts, steps) == cache[0]:
+ if cache[0] is not None and (required_prompts, steps, opts.CLIP_stop_at_last_layers, shared.sd_model.sd_checkpoint_info) == cache[0]:
return cache[1]
with devices.autocast():
cache[1] = function(shared.sd_model, required_prompts, steps)
- cache[0] = (required_prompts, steps)
+ cache[0] = (required_prompts, steps, opts.CLIP_stop_at_last_layers, shared.sd_model.sd_checkpoint_info)
return cache[1]
def setup_conds(self):
diff --git a/modules/shared.py b/modules/shared.py
index 4d59fbf1..acec7f18 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -416,12 +416,12 @@ options_templates.update(options_section(('sd', "Stable Diffusion"), {
"comma_padding_backtrack": OptionInfo(20, "Prompt word wrap length limit", gr.Slider, {"minimum": 0, "maximum": 74, "step": 1}).info("in tokens - for texts shorter than specified, if they don't fit into 75 token limit, move them to the next 75 token chunk"),
"CLIP_stop_at_last_layers": OptionInfo(1, "Clip skip", gr.Slider, {"minimum": 1, "maximum": 12, "step": 1}).link("wiki", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Features#clip-skip").info("ignore last layers of CLIP nrtwork; 1 ignores none, 2 ignores one layer"),
"upcast_attn": OptionInfo(False, "Upcast cross attention layer to float32"),
- "randn_source": OptionInfo("GPU", "Random number generator source.", gr.Radio, {"choices": ["GPU", "CPU"]}).info("changes seeds drastically; use CPU to produce the same picture across different vidocard vendors"),
+ "randn_source": OptionInfo("GPU", "Random number generator source.", gr.Radio, {"choices": ["GPU", "CPU"]}).info("changes seeds drastically; use CPU to produce the same picture across different videocard vendors"),
}))
options_templates.update(options_section(('optimizations', "Optimizations"), {
"cross_attention_optimization": OptionInfo("Automatic", "Cross attention optimization", gr.Dropdown, lambda: {"choices": shared_items.cross_attention_optimizations()}),
- "s_min_uncond": OptionInfo(0, "Negative Guidance minimum sigma", gr.Slider, {"minimum": 0.0, "maximum": 4.0, "step": 0.01}).link("PR", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/9177").info("skip negative prompt for some steps when the image is almost ready; 0=disable, higher=faster"),
+ "s_min_uncond": OptionInfo(0.0, "Negative Guidance minimum sigma", gr.Slider, {"minimum": 0.0, "maximum": 4.0, "step": 0.01}).link("PR", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/9177").info("skip negative prompt for some steps when the image is almost ready; 0=disable, higher=faster"),
"token_merging_ratio": OptionInfo(0.0, "Token merging ratio", gr.Slider, {"minimum": 0.0, "maximum": 0.9, "step": 0.1}).link("PR", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/9256").info("0=disable, higher=faster"),
"token_merging_ratio_img2img": OptionInfo(0.0, "Token merging ratio for img2img", gr.Slider, {"minimum": 0.0, "maximum": 0.9, "step": 0.1}).info("only applies if non-zero and overrides above"),
"token_merging_ratio_hr": OptionInfo(0.0, "Token merging ratio for high-res pass", gr.Slider, {"minimum": 0.0, "maximum": 0.9, "step": 0.1}).info("only applies if non-zero and overrides above"),
diff --git a/modules/ui.py b/modules/ui.py
index 1ad94f02..fb6b2498 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -504,10 +504,10 @@ def create_ui():
with FormRow(elem_id="txt2img_hires_fix_row4", variant="compact", visible=opts.hires_fix_show_prompts) as hr_prompts_container:
with gr.Column(scale=80):
with gr.Row():
- hr_prompt = gr.Textbox(label="Prompt", elem_id="hires_prompt", show_label=False, lines=3, placeholder="Prompt for hires fix pass.\nLeave empty to use the same prompt as in first pass.", elem_classes=["prompt"])
+ hr_prompt = gr.Textbox(label="Hires prompt", elem_id="hires_prompt", show_label=False, lines=3, placeholder="Prompt for hires fix pass.\nLeave empty to use the same prompt as in first pass.", elem_classes=["prompt"])
with gr.Column(scale=80):
with gr.Row():
- hr_negative_prompt = gr.Textbox(label="Negative prompt", elem_id="hires_neg_prompt", show_label=False, lines=3, placeholder="Negative prompt for hires fix pass.\nLeave empty to use the same negative prompt as in first pass.", elem_classes=["prompt"])
+ hr_negative_prompt = gr.Textbox(label="Hires negative prompt", elem_id="hires_neg_prompt", show_label=False, lines=3, placeholder="Negative prompt for hires fix pass.\nLeave empty to use the same negative prompt as in first pass.", elem_classes=["prompt"])
elif category == "batch":
if not opts.dimensions_and_batch_together:
diff --git a/modules/ui_extensions.py b/modules/ui_extensions.py
index cadf56be..e2ee9d72 100644
--- a/modules/ui_extensions.py
+++ b/modules/ui_extensions.py
@@ -487,8 +487,14 @@ def refresh_available_extensions_from_data(hide_tags, sort_column, filter_text="
def preload_extensions_git_metadata():
+ t0 = time.time()
for extension in extensions.extensions:
extension.read_info_from_repo()
+ print(
+ f"preload_extensions_git_metadata for "
+ f"{len(extensions.extensions)} extensions took "
+ f"{time.time() - t0:.2f}s"
+ )
def create_ui():
diff --git a/modules/upscaler.py b/modules/upscaler.py
index 7b1046d6..3c82861d 100644
--- a/modules/upscaler.py
+++ b/modules/upscaler.py
@@ -53,8 +53,8 @@ class Upscaler:
def upscale(self, img: PIL.Image, scale, selected_model: str = None):
self.scale = scale
- dest_w = int(img.width * scale)
- dest_h = int(img.height * scale)
+ dest_w = round((img.width * scale - 4) / 8) * 8
+ dest_h = round((img.height * scale - 4) / 8) * 8
for _ in range(3):
shape = (img.width, img.height)