aboutsummaryrefslogtreecommitdiff
path: root/modules/sd_hijack_optimizations.py
diff options
context:
space:
mode:
authorAUTOMATIC <16777216c@gmail.com>2023-06-01 08:12:06 +0300
committerAUTOMATIC <16777216c@gmail.com>2023-06-01 08:12:21 +0300
commit3ee12386307bbedb51265028e2e9af246094a12c (patch)
treece0cfcb19c6e94187d1d6f9114e7f6d1283bc867 /modules/sd_hijack_optimizations.py
parent17a66931da70d066691ec700c7b642ee3a6cab25 (diff)
revert default cross attention optimization to Doggettx
make --disable-opt-split-attention command line option work again
Diffstat (limited to 'modules/sd_hijack_optimizations.py')
-rw-r--r--modules/sd_hijack_optimizations.py6
1 files changed, 3 insertions, 3 deletions
diff --git a/modules/sd_hijack_optimizations.py b/modules/sd_hijack_optimizations.py
index 2ec0b049..80e48a42 100644
--- a/modules/sd_hijack_optimizations.py
+++ b/modules/sd_hijack_optimizations.py
@@ -59,7 +59,7 @@ class SdOptimizationSdpNoMem(SdOptimization):
name = "sdp-no-mem"
label = "scaled dot product without memory efficient attention"
cmd_opt = "opt_sdp_no_mem_attention"
- priority = 90
+ priority = 80
def is_available(self):
return hasattr(torch.nn.functional, "scaled_dot_product_attention") and callable(torch.nn.functional.scaled_dot_product_attention)
@@ -73,7 +73,7 @@ class SdOptimizationSdp(SdOptimizationSdpNoMem):
name = "sdp"
label = "scaled dot product"
cmd_opt = "opt_sdp_attention"
- priority = 80
+ priority = 70
def apply(self):
ldm.modules.attention.CrossAttention.forward = scaled_dot_product_attention_forward
@@ -116,7 +116,7 @@ class SdOptimizationInvokeAI(SdOptimization):
class SdOptimizationDoggettx(SdOptimization):
name = "Doggettx"
cmd_opt = "opt_split_attention"
- priority = 20
+ priority = 90
def apply(self):
ldm.modules.attention.CrossAttention.forward = split_cross_attention_forward