aboutsummaryrefslogtreecommitdiff
path: root/modules
diff options
context:
space:
mode:
authorAUTOMATIC1111 <16777216c@gmail.com>2023-06-04 11:34:32 +0300
committerGitHub <noreply@github.com>2023-06-04 11:34:32 +0300
commit56bf522913a5230e9fa0167639ea0205f0c498b1 (patch)
treece39741baa531262150d52d3127caa3ffab94e02 /modules
parent0819383de05e57ec5da638bd4d5d180b5bac981a (diff)
parent2e23c9c568617b4da16ca67d5bab0368ef14f68c (diff)
Merge pull request #10990 from vkage/sd_hijack_optimizations_bugfix
torch.cuda.is_available() check for SdOptimizationXformers
Diffstat (limited to 'modules')
-rw-r--r--modules/sd_hijack_optimizations.py2
1 files changed, 1 insertions, 1 deletions
diff --git a/modules/sd_hijack_optimizations.py b/modules/sd_hijack_optimizations.py
index b41aa419..3c71e6b5 100644
--- a/modules/sd_hijack_optimizations.py
+++ b/modules/sd_hijack_optimizations.py
@@ -46,7 +46,7 @@ class SdOptimizationXformers(SdOptimization):
priority = 100
def is_available(self):
- return shared.cmd_opts.force_enable_xformers or (shared.xformers_available and torch.version.cuda and (6, 0) <= torch.cuda.get_device_capability(shared.device) <= (9, 0))
+ return shared.cmd_opts.force_enable_xformers or (shared.xformers_available and torch.cuda.is_available() and (6, 0) <= torch.cuda.get_device_capability(shared.device) <= (9, 0))
def apply(self):
ldm.modules.attention.CrossAttention.forward = xformers_attention_forward