From 00dfe27f59727407c5b408a80ff2a262934df495 Mon Sep 17 00:00:00 2001 From: Aarni Koskela Date: Mon, 29 May 2023 08:54:13 +0300 Subject: Add & use modules.errors.print_error where currently printing exception info by hand --- modules/sd_hijack_optimizations.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'modules/sd_hijack_optimizations.py') diff --git a/modules/sd_hijack_optimizations.py b/modules/sd_hijack_optimizations.py index 2ec0b049..fd186fa2 100644 --- a/modules/sd_hijack_optimizations.py +++ b/modules/sd_hijack_optimizations.py @@ -1,7 +1,5 @@ from __future__ import annotations import math -import sys -import traceback import psutil import torch @@ -11,6 +9,7 @@ from ldm.util import default from einops import rearrange from modules import shared, errors, devices, sub_quadratic_attention +from modules.errors import print_error from modules.hypernetworks import hypernetwork import ldm.modules.attention @@ -140,8 +139,7 @@ if shared.cmd_opts.xformers or shared.cmd_opts.force_enable_xformers: import xformers.ops shared.xformers_available = True except Exception: - print("Cannot import xformers", file=sys.stderr) - print(traceback.format_exc(), file=sys.stderr) + print_error("Cannot import xformers", exc_info=True) def get_available_vram(): -- cgit v1.2.1 From 05933840f0676dd1a90a7e2ad3f2a0672624b2cd Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Wed, 31 May 2023 19:56:37 +0300 Subject: rename print_error to report, use it with together with package name --- modules/sd_hijack_optimizations.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'modules/sd_hijack_optimizations.py') diff --git a/modules/sd_hijack_optimizations.py b/modules/sd_hijack_optimizations.py index fd186fa2..5f0ff513 100644 --- a/modules/sd_hijack_optimizations.py +++ b/modules/sd_hijack_optimizations.py @@ -9,7 +9,6 @@ from ldm.util import default from einops import rearrange from modules import shared, errors, devices, sub_quadratic_attention -from modules.errors import print_error from modules.hypernetworks import hypernetwork import ldm.modules.attention @@ -139,7 +138,7 @@ if shared.cmd_opts.xformers or shared.cmd_opts.force_enable_xformers: import xformers.ops shared.xformers_available = True except Exception: - print_error("Cannot import xformers", exc_info=True) + errors.report("Cannot import xformers", exc_info=True) def get_available_vram(): -- cgit v1.2.1 From 36888092afa82ee248bc947229f813b453629317 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Thu, 1 Jun 2023 08:12:06 +0300 Subject: revert default cross attention optimization to Doggettx make --disable-opt-split-attention command line option work again --- modules/sd_hijack_optimizations.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'modules/sd_hijack_optimizations.py') diff --git a/modules/sd_hijack_optimizations.py b/modules/sd_hijack_optimizations.py index 5f0ff513..b41aa419 100644 --- a/modules/sd_hijack_optimizations.py +++ b/modules/sd_hijack_optimizations.py @@ -57,7 +57,7 @@ class SdOptimizationSdpNoMem(SdOptimization): name = "sdp-no-mem" label = "scaled dot product without memory efficient attention" cmd_opt = "opt_sdp_no_mem_attention" - priority = 90 + priority = 80 def is_available(self): return hasattr(torch.nn.functional, "scaled_dot_product_attention") and callable(torch.nn.functional.scaled_dot_product_attention) @@ -71,7 +71,7 @@ class SdOptimizationSdp(SdOptimizationSdpNoMem): name = "sdp" label = "scaled dot product" cmd_opt = "opt_sdp_attention" - priority = 80 + priority = 70 def apply(self): ldm.modules.attention.CrossAttention.forward = scaled_dot_product_attention_forward @@ -114,7 +114,7 @@ class SdOptimizationInvokeAI(SdOptimization): class SdOptimizationDoggettx(SdOptimization): name = "Doggettx" cmd_opt = "opt_split_attention" - priority = 20 + priority = 90 def apply(self): ldm.modules.attention.CrossAttention.forward = split_cross_attention_forward -- cgit v1.2.1 From b1a72bc7e292246e70ec8ebebd3a9ca42dffff03 Mon Sep 17 00:00:00 2001 From: "Vivek K. Vasishtha" Date: Sat, 3 Jun 2023 21:54:27 +0530 Subject: torch.cuda.is_available() check for SdOptimizationXformers --- modules/sd_hijack_optimizations.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'modules/sd_hijack_optimizations.py') diff --git a/modules/sd_hijack_optimizations.py b/modules/sd_hijack_optimizations.py index 80e48a42..c2660177 100644 --- a/modules/sd_hijack_optimizations.py +++ b/modules/sd_hijack_optimizations.py @@ -48,7 +48,7 @@ class SdOptimizationXformers(SdOptimization): priority = 100 def is_available(self): - return shared.cmd_opts.force_enable_xformers or (shared.xformers_available and torch.version.cuda and (6, 0) <= torch.cuda.get_device_capability(shared.device) <= (9, 0)) + return shared.cmd_opts.force_enable_xformers or (shared.xformers_available and torch.cuda.is_available() and (6, 0) <= torch.cuda.get_device_capability(shared.device) <= (9, 0) def apply(self): ldm.modules.attention.CrossAttention.forward = xformers_attention_forward -- cgit v1.2.1 From 2e23c9c568617b4da16ca67d5bab0368ef14f68c Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sun, 4 Jun 2023 11:33:51 +0300 Subject: fix the broken line for #10990 --- modules/sd_hijack_optimizations.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'modules/sd_hijack_optimizations.py') diff --git a/modules/sd_hijack_optimizations.py b/modules/sd_hijack_optimizations.py index c2660177..49f4bd16 100644 --- a/modules/sd_hijack_optimizations.py +++ b/modules/sd_hijack_optimizations.py @@ -48,7 +48,7 @@ class SdOptimizationXformers(SdOptimization): priority = 100 def is_available(self): - return shared.cmd_opts.force_enable_xformers or (shared.xformers_available and torch.cuda.is_available() and (6, 0) <= torch.cuda.get_device_capability(shared.device) <= (9, 0) + return shared.cmd_opts.force_enable_xformers or (shared.xformers_available and torch.cuda.is_available() and (6, 0) <= torch.cuda.get_device_capability(shared.device) <= (9, 0)) def apply(self): ldm.modules.attention.CrossAttention.forward = xformers_attention_forward -- cgit v1.2.1 From d9cc0910c8aca481f294009526897152901c32b9 Mon Sep 17 00:00:00 2001 From: Alexander Ljungberg Date: Tue, 6 Jun 2023 21:45:30 +0100 Subject: Fix upcast attention dtype error. Without this fix, enabling the "Upcast cross attention layer to float32" option while also using `--opt-sdp-attention` breaks generation with an error: ``` File "/ext3/automatic1111/stable-diffusion-webui/modules/sd_hijack_optimizations.py", line 612, in sdp_attnblock_forward out = torch.nn.functional.scaled_dot_product_attention(q, k, v, dropout_p=0.0, is_causal=False) RuntimeError: Expected query, key, and value to have the same dtype, but got query.dtype: float key.dtype: float and value.dtype: c10::Half instead. ``` The fix is to make sure to upcast the value tensor too. --- modules/sd_hijack_optimizations.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'modules/sd_hijack_optimizations.py') diff --git a/modules/sd_hijack_optimizations.py b/modules/sd_hijack_optimizations.py index 80e48a42..6464ca8e 100644 --- a/modules/sd_hijack_optimizations.py +++ b/modules/sd_hijack_optimizations.py @@ -605,7 +605,7 @@ def sdp_attnblock_forward(self, x): q, k, v = (rearrange(t, 'b c h w -> b (h w) c') for t in (q, k, v)) dtype = q.dtype if shared.opts.upcast_attn: - q, k = q.float(), k.float() + q, k, v = q.float(), k.float(), v.float() q = q.contiguous() k = k.contiguous() v = v.contiguous() -- cgit v1.2.1