aboutsummaryrefslogtreecommitdiff
path: root/modules/sd_hijack_optimizations.py
diff options
context:
space:
mode:
authorAUTOMATIC <16777216c@gmail.com>2023-06-01 08:12:06 +0300
committerAUTOMATIC <16777216c@gmail.com>2023-06-01 08:12:06 +0300
commit36888092afa82ee248bc947229f813b453629317 (patch)
tree1225366202c135e1e8da2e831bd932a0b2a39e02 /modules/sd_hijack_optimizations.py
parentf1533de982350af06bc9cbaa436b3e4dfdef4eb8 (diff)
revert default cross attention optimization to Doggettx
make --disable-opt-split-attention command line option work again
Diffstat (limited to 'modules/sd_hijack_optimizations.py')
-rw-r--r--modules/sd_hijack_optimizations.py6
1 files changed, 3 insertions, 3 deletions
diff --git a/modules/sd_hijack_optimizations.py b/modules/sd_hijack_optimizations.py
index 5f0ff513..b41aa419 100644
--- a/modules/sd_hijack_optimizations.py
+++ b/modules/sd_hijack_optimizations.py
@@ -57,7 +57,7 @@ class SdOptimizationSdpNoMem(SdOptimization):
name = "sdp-no-mem"
label = "scaled dot product without memory efficient attention"
cmd_opt = "opt_sdp_no_mem_attention"
- priority = 90
+ priority = 80
def is_available(self):
return hasattr(torch.nn.functional, "scaled_dot_product_attention") and callable(torch.nn.functional.scaled_dot_product_attention)
@@ -71,7 +71,7 @@ class SdOptimizationSdp(SdOptimizationSdpNoMem):
name = "sdp"
label = "scaled dot product"
cmd_opt = "opt_sdp_attention"
- priority = 80
+ priority = 70
def apply(self):
ldm.modules.attention.CrossAttention.forward = scaled_dot_product_attention_forward
@@ -114,7 +114,7 @@ class SdOptimizationInvokeAI(SdOptimization):
class SdOptimizationDoggettx(SdOptimization):
name = "Doggettx"
cmd_opt = "opt_split_attention"
- priority = 20
+ priority = 90
def apply(self):
ldm.modules.attention.CrossAttention.forward = split_cross_attention_forward