aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAUTOMATIC1111 <16777216c@gmail.com>2024-02-11 08:34:40 +0300
committerGitHub <noreply@github.com>2024-02-11 08:34:40 +0300
commit3732cf2f97be873f17b735221cca177f056bd478 (patch)
treeaa51ad124586b108c419fd2c292a12fd251d9f9b
parent2f1e2c492f4d8268deec341ceb858fc4e6efd039 (diff)
parentc3c88ca8b46a19f48104d0421e14be28853b2a92 (diff)
Merge pull request #14874 from hako-mikan/master
Add option to disable normalize embeddings after after calculating emphasis.
-rw-r--r--modules/sd_hijack_clip.py4
-rw-r--r--modules/shared_options.py1
2 files changed, 4 insertions, 1 deletions
diff --git a/modules/sd_hijack_clip.py b/modules/sd_hijack_clip.py
index 8f29057a..89634fbf 100644
--- a/modules/sd_hijack_clip.py
+++ b/modules/sd_hijack_clip.py
@@ -279,7 +279,9 @@ class FrozenCLIPEmbedderWithCustomWordsBase(torch.nn.Module):
original_mean = z.mean()
z = z * batch_multipliers.reshape(batch_multipliers.shape + (1,)).expand(z.shape)
new_mean = z.mean()
- z = z * (original_mean / new_mean)
+
+ if not getattr(opts, "disable_normalize_embeddings", False):
+ z = z * (original_mean / new_mean)
if pooled is not None:
z.pooled = pooled
diff --git a/modules/shared_options.py b/modules/shared_options.py
index bdd066c4..417a42b2 100644
--- a/modules/shared_options.py
+++ b/modules/shared_options.py
@@ -155,6 +155,7 @@ options_templates.update(options_section(('sd', "Stable Diffusion", "sd"), {
"sd_unet": OptionInfo("Automatic", "SD Unet", gr.Dropdown, lambda: {"choices": shared_items.sd_unet_items()}, refresh=shared_items.refresh_unet_list).info("choose Unet model: Automatic = use one with same filename as checkpoint; None = use Unet from checkpoint"),
"enable_quantization": OptionInfo(False, "Enable quantization in K samplers for sharper and cleaner results. This may change existing seeds").needs_reload_ui(),
"enable_emphasis": OptionInfo(True, "Enable emphasis").info("use (text) to make model pay more attention to text and [text] to make it pay less attention"),
+ "disable_normalize_embeddings": OptionInfo(False, "Disable normalize embeddings").info("Do not normalize embeddings after calculating emphasis. It can be expected to be effective in preventing artifacts in SDXL."),
"enable_batch_seeds": OptionInfo(True, "Make K-diffusion samplers produce same images in a batch as when making a single image"),
"comma_padding_backtrack": OptionInfo(20, "Prompt word wrap length limit", gr.Slider, {"minimum": 0, "maximum": 74, "step": 1}).info("in tokens - for texts shorter than specified, if they don't fit into 75 token limit, move them to the next 75 token chunk"),
"CLIP_stop_at_last_layers": OptionInfo(1, "Clip skip", gr.Slider, {"minimum": 1, "maximum": 12, "step": 1}, infotext="Clip skip").link("wiki", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Features#clip-skip").info("ignore last layers of CLIP network; 1 ignores none, 2 ignores one layer"),