aboutsummaryrefslogtreecommitdiff
path: root/modules
diff options
context:
space:
mode:
authorAUTOMATIC <16777216c@gmail.com>2023-06-01 21:35:14 +0300
committerAUTOMATIC <16777216c@gmail.com>2023-06-01 21:35:14 +0300
commitb6af0a3809ea869fb180633f9affcae4b199ffcf (patch)
treeac47f34ac97c08e8f83fb2c90f6977459121e6b5 /modules
parent20ae71faa8ef035c31aa3a410b707d792c8203a3 (diff)
parent8c3e64f4f6d67076031132b1628daba66dfa1121 (diff)
Merge branch 'release_candidate'
Diffstat (limited to 'modules')
-rw-r--r--modules/cmd_args.py2
-rw-r--r--modules/extra_networks.py2
-rw-r--r--modules/generation_parameters_copypaste.py2
-rw-r--r--modules/images.py9
-rw-r--r--modules/processing.py5
-rw-r--r--modules/sd_hijack.py2
-rw-r--r--modules/sd_hijack_optimizations.py6
-rw-r--r--modules/sd_models.py2
-rw-r--r--modules/shared.py4
-rw-r--r--modules/ui.py4
10 files changed, 22 insertions, 16 deletions
diff --git a/modules/cmd_args.py b/modules/cmd_args.py
index 3eeb84d5..71a21b1a 100644
--- a/modules/cmd_args.py
+++ b/modules/cmd_args.py
@@ -62,7 +62,7 @@ parser.add_argument("--opt-split-attention-invokeai", action='store_true', help=
parser.add_argument("--opt-split-attention-v1", action='store_true', help="prefer older version of split attention optimization for automatic choice of optimization")
parser.add_argument("--opt-sdp-attention", action='store_true', help="prefer scaled dot product cross-attention layer optimization for automatic choice of optimization; requires PyTorch 2.*")
parser.add_argument("--opt-sdp-no-mem-attention", action='store_true', help="prefer scaled dot product cross-attention layer optimization without memory efficient attention for automatic choice of optimization, makes image generation deterministic; requires PyTorch 2.*")
-parser.add_argument("--disable-opt-split-attention", action='store_true', help="does not do anything")
+parser.add_argument("--disable-opt-split-attention", action='store_true', help="prefer no cross-attention layer optimization for automatic choice of optimization")
parser.add_argument("--disable-nan-check", action='store_true', help="do not check if produced images/latent spaces have nans; useful for running without a checkpoint in CI")
parser.add_argument("--use-cpu", nargs='+', help="use CPU as torch device for specified modules", default=[], type=str.lower)
parser.add_argument("--listen", action='store_true', help="launch gradio with 0.0.0.0 as server name, allowing to respond to network requests")
diff --git a/modules/extra_networks.py b/modules/extra_networks.py
index 34a3ba63..f4743928 100644
--- a/modules/extra_networks.py
+++ b/modules/extra_networks.py
@@ -26,7 +26,7 @@ class ExtraNetworkParams:
self.named = {}
for item in self.items:
- parts = item.split('=', 2)
+ parts = item.split('=', 2) if isinstance(item, str) else [item]
if len(parts) == 2:
self.named[parts[0]] = parts[1]
else:
diff --git a/modules/generation_parameters_copypaste.py b/modules/generation_parameters_copypaste.py
index d5f0a49b..cfc04114 100644
--- a/modules/generation_parameters_copypaste.py
+++ b/modules/generation_parameters_copypaste.py
@@ -35,7 +35,7 @@ def reset():
def quote(text):
- if ',' not in str(text) and '\n' not in str(text):
+ if ',' not in str(text) and '\n' not in str(text) and ':' not in str(text):
return text
return json.dumps(text, ensure_ascii=False)
diff --git a/modules/images.py b/modules/images.py
index 4e8cd993..3f60e944 100644
--- a/modules/images.py
+++ b/modules/images.py
@@ -493,9 +493,12 @@ def save_image_with_geninfo(image, geninfo, filename, extension=None, existing_p
existing_pnginfo['parameters'] = geninfo
if extension.lower() == '.png':
- pnginfo_data = PngImagePlugin.PngInfo()
- for k, v in (existing_pnginfo or {}).items():
- pnginfo_data.add_text(k, str(v))
+ if opts.enable_pnginfo:
+ pnginfo_data = PngImagePlugin.PngInfo()
+ for k, v in (existing_pnginfo or {}).items():
+ pnginfo_data.add_text(k, str(v))
+ else:
+ pnginfo_data = None
image.save(filename, format=image_format, quality=opts.jpeg_quality, pnginfo=pnginfo_data)
diff --git a/modules/processing.py b/modules/processing.py
index 29a3743f..d22b353f 100644
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -321,14 +321,13 @@ class StableDiffusionProcessing:
have been used before. The second element is where the previously
computed result is stored.
"""
-
- if cache[0] is not None and (required_prompts, steps) == cache[0]:
+ if cache[0] is not None and (required_prompts, steps, opts.CLIP_stop_at_last_layers, shared.sd_model.sd_checkpoint_info) == cache[0]:
return cache[1]
with devices.autocast():
cache[1] = function(shared.sd_model, required_prompts, steps)
- cache[0] = (required_prompts, steps)
+ cache[0] = (required_prompts, steps, opts.CLIP_stop_at_last_layers, shared.sd_model.sd_checkpoint_info)
return cache[1]
def setup_conds(self):
diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py
index f93df0a6..221771da 100644
--- a/modules/sd_hijack.py
+++ b/modules/sd_hijack.py
@@ -68,6 +68,8 @@ def apply_optimizations():
if selection == "None":
matching_optimizer = None
+ elif selection == "Automatic" and shared.cmd_opts.disable_opt_split_attention:
+ matching_optimizer = None
elif matching_optimizer is None:
matching_optimizer = optimizers[0]
diff --git a/modules/sd_hijack_optimizations.py b/modules/sd_hijack_optimizations.py
index 2ec0b049..80e48a42 100644
--- a/modules/sd_hijack_optimizations.py
+++ b/modules/sd_hijack_optimizations.py
@@ -59,7 +59,7 @@ class SdOptimizationSdpNoMem(SdOptimization):
name = "sdp-no-mem"
label = "scaled dot product without memory efficient attention"
cmd_opt = "opt_sdp_no_mem_attention"
- priority = 90
+ priority = 80
def is_available(self):
return hasattr(torch.nn.functional, "scaled_dot_product_attention") and callable(torch.nn.functional.scaled_dot_product_attention)
@@ -73,7 +73,7 @@ class SdOptimizationSdp(SdOptimizationSdpNoMem):
name = "sdp"
label = "scaled dot product"
cmd_opt = "opt_sdp_attention"
- priority = 80
+ priority = 70
def apply(self):
ldm.modules.attention.CrossAttention.forward = scaled_dot_product_attention_forward
@@ -116,7 +116,7 @@ class SdOptimizationInvokeAI(SdOptimization):
class SdOptimizationDoggettx(SdOptimization):
name = "Doggettx"
cmd_opt = "opt_split_attention"
- priority = 20
+ priority = 90
def apply(self):
ldm.modules.attention.CrossAttention.forward = split_cross_attention_forward
diff --git a/modules/sd_models.py b/modules/sd_models.py
index b1afbaa7..7742998a 100644
--- a/modules/sd_models.py
+++ b/modules/sd_models.py
@@ -313,8 +313,6 @@ def load_model_weights(model, checkpoint_info: CheckpointInfo, state_dict, timer
timer.record("apply half()")
- devices.dtype = torch.float32 if shared.cmd_opts.no_half else torch.float16
- devices.dtype_vae = torch.float32 if shared.cmd_opts.no_half or shared.cmd_opts.no_half_vae else torch.float16
devices.dtype_unet = model.model.diffusion_model.dtype
devices.unet_needs_upcast = shared.cmd_opts.upcast_sampling and devices.dtype == torch.float16 and devices.dtype_unet == torch.float16
diff --git a/modules/shared.py b/modules/shared.py
index 3099d1d2..271a062d 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -6,6 +6,7 @@ import threading
import time
import gradio as gr
+import torch
import tqdm
import modules.interrogate
@@ -76,6 +77,9 @@ cmd_opts.disable_extension_access = (cmd_opts.share or cmd_opts.listen or cmd_op
devices.device, devices.device_interrogate, devices.device_gfpgan, devices.device_esrgan, devices.device_codeformer = \
(devices.cpu if any(y in cmd_opts.use_cpu for y in [x, 'all']) else devices.get_optimal_device() for x in ['sd', 'interrogate', 'gfpgan', 'esrgan', 'codeformer'])
+devices.dtype = torch.float32 if cmd_opts.no_half else torch.float16
+devices.dtype_vae = torch.float32 if cmd_opts.no_half or cmd_opts.no_half_vae else torch.float16
+
device = devices.device
weight_load_location = None if cmd_opts.lowram else "cpu"
diff --git a/modules/ui.py b/modules/ui.py
index e62182da..361f596e 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -505,10 +505,10 @@ def create_ui():
with FormRow(elem_id="txt2img_hires_fix_row4", variant="compact", visible=opts.hires_fix_show_prompts) as hr_prompts_container:
with gr.Column(scale=80):
with gr.Row():
- hr_prompt = gr.Textbox(label="Prompt", elem_id="hires_prompt", show_label=False, lines=3, placeholder="Prompt for hires fix pass.\nLeave empty to use the same prompt as in first pass.", elem_classes=["prompt"])
+ hr_prompt = gr.Textbox(label="Hires prompt", elem_id="hires_prompt", show_label=False, lines=3, placeholder="Prompt for hires fix pass.\nLeave empty to use the same prompt as in first pass.", elem_classes=["prompt"])
with gr.Column(scale=80):
with gr.Row():
- hr_negative_prompt = gr.Textbox(label="Negative prompt", elem_id="hires_neg_prompt", show_label=False, lines=3, placeholder="Negative prompt for hires fix pass.\nLeave empty to use the same negative prompt as in first pass.", elem_classes=["prompt"])
+ hr_negative_prompt = gr.Textbox(label="Hires negative prompt", elem_id="hires_neg_prompt", show_label=False, lines=3, placeholder="Negative prompt for hires fix pass.\nLeave empty to use the same negative prompt as in first pass.", elem_classes=["prompt"])
elif category == "batch":
if not opts.dimensions_and_batch_together: