aboutsummaryrefslogtreecommitdiff
path: root/modules/sd_hijack_optimizations.py
diff options
context:
space:
mode:
authorspace-nuko <24979496+space-nuko@users.noreply.github.com>2023-03-27 16:26:23 -0500
committerGitHub <noreply@github.com>2023-03-27 16:26:23 -0500
commit082613036aa7b9a8a008384b1770046d6714bc28 (patch)
treea14ceb8cc54e7359058376ff6eb05bc3471f5e67 /modules/sd_hijack_optimizations.py
parentd86beb822832c9162714cf0a3567ad087839a2ac (diff)
parent955df7751eef11bb7697e2d77f6b8a6226b21e13 (diff)
Merge branch 'master' into remove-watermark-option
Diffstat (limited to 'modules/sd_hijack_optimizations.py')
-rw-r--r--modules/sd_hijack_optimizations.py4
1 files changed, 2 insertions, 2 deletions
diff --git a/modules/sd_hijack_optimizations.py b/modules/sd_hijack_optimizations.py
index 2e307b5d..372555ff 100644
--- a/modules/sd_hijack_optimizations.py
+++ b/modules/sd_hijack_optimizations.py
@@ -337,7 +337,7 @@ def xformers_attention_forward(self, x, context=None, mask=None):
dtype = q.dtype
if shared.opts.upcast_attn:
- q, k = q.float(), k.float()
+ q, k, v = q.float(), k.float(), v.float()
out = xformers.ops.memory_efficient_attention(q, k, v, attn_bias=None, op=get_xformers_flash_attention_op(q, k, v))
@@ -372,7 +372,7 @@ def scaled_dot_product_attention_forward(self, x, context=None, mask=None):
dtype = q.dtype
if shared.opts.upcast_attn:
- q, k = q.float(), k.float()
+ q, k, v = q.float(), k.float(), v.float()
# the output of sdp = (batch, num_heads, seq_len, head_dim)
hidden_states = torch.nn.functional.scaled_dot_product_attention(