From 3ee12386307bbedb51265028e2e9af246094a12c Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Thu, 1 Jun 2023 08:12:06 +0300 Subject: revert default cross attention optimization to Doggettx make --disable-opt-split-attention command line option work again --- modules/cmd_args.py | 2 +- modules/sd_hijack.py | 2 ++ modules/sd_hijack_optimizations.py | 6 +++--- 3 files changed, 6 insertions(+), 4 deletions(-) (limited to 'modules') diff --git a/modules/cmd_args.py b/modules/cmd_args.py index 3eeb84d5..71a21b1a 100644 --- a/modules/cmd_args.py +++ b/modules/cmd_args.py @@ -62,7 +62,7 @@ parser.add_argument("--opt-split-attention-invokeai", action='store_true', help= parser.add_argument("--opt-split-attention-v1", action='store_true', help="prefer older version of split attention optimization for automatic choice of optimization") parser.add_argument("--opt-sdp-attention", action='store_true', help="prefer scaled dot product cross-attention layer optimization for automatic choice of optimization; requires PyTorch 2.*") parser.add_argument("--opt-sdp-no-mem-attention", action='store_true', help="prefer scaled dot product cross-attention layer optimization without memory efficient attention for automatic choice of optimization, makes image generation deterministic; requires PyTorch 2.*") -parser.add_argument("--disable-opt-split-attention", action='store_true', help="does not do anything") +parser.add_argument("--disable-opt-split-attention", action='store_true', help="prefer no cross-attention layer optimization for automatic choice of optimization") parser.add_argument("--disable-nan-check", action='store_true', help="do not check if produced images/latent spaces have nans; useful for running without a checkpoint in CI") parser.add_argument("--use-cpu", nargs='+', help="use CPU as torch device for specified modules", default=[], type=str.lower) parser.add_argument("--listen", action='store_true', help="launch gradio with 0.0.0.0 as server name, allowing to respond to network requests") diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py index f93df0a6..221771da 100644 --- a/modules/sd_hijack.py +++ b/modules/sd_hijack.py @@ -68,6 +68,8 @@ def apply_optimizations(): if selection == "None": matching_optimizer = None + elif selection == "Automatic" and shared.cmd_opts.disable_opt_split_attention: + matching_optimizer = None elif matching_optimizer is None: matching_optimizer = optimizers[0] diff --git a/modules/sd_hijack_optimizations.py b/modules/sd_hijack_optimizations.py index 2ec0b049..80e48a42 100644 --- a/modules/sd_hijack_optimizations.py +++ b/modules/sd_hijack_optimizations.py @@ -59,7 +59,7 @@ class SdOptimizationSdpNoMem(SdOptimization): name = "sdp-no-mem" label = "scaled dot product without memory efficient attention" cmd_opt = "opt_sdp_no_mem_attention" - priority = 90 + priority = 80 def is_available(self): return hasattr(torch.nn.functional, "scaled_dot_product_attention") and callable(torch.nn.functional.scaled_dot_product_attention) @@ -73,7 +73,7 @@ class SdOptimizationSdp(SdOptimizationSdpNoMem): name = "sdp" label = "scaled dot product" cmd_opt = "opt_sdp_attention" - priority = 80 + priority = 70 def apply(self): ldm.modules.attention.CrossAttention.forward = scaled_dot_product_attention_forward @@ -116,7 +116,7 @@ class SdOptimizationInvokeAI(SdOptimization): class SdOptimizationDoggettx(SdOptimization): name = "Doggettx" cmd_opt = "opt_split_attention" - priority = 20 + priority = 90 def apply(self): ldm.modules.attention.CrossAttention.forward = split_cross_attention_forward -- cgit v1.2.1