aboutsummaryrefslogtreecommitdiff
path: root/modules/devices.py
diff options
context:
space:
mode:
authorAUTOMATIC1111 <16777216c@gmail.com>2023-09-09 09:33:37 +0300
committerGitHub <noreply@github.com>2023-09-09 09:33:37 +0300
commit558baffa2c934e84eff1a6648b2b4f4c4eb25e2b (patch)
tree1715892a36451c948b87fcf3a07205aed04447b4 /modules/devices.py
parent4ebed495ed692fbd88b0b6f13670eab69faeca8c (diff)
parent5681bf801664aa09fa02ab8b4e73f780d9563440 (diff)
Merge pull request #12924 from catboxanon/fix/cudnn
More accurate check for enabling cuDNN benchmark on 16XX cards
Diffstat (limited to 'modules/devices.py')
-rw-r--r--modules/devices.py3
1 files changed, 2 insertions, 1 deletions
diff --git a/modules/devices.py b/modules/devices.py
index c01f0602..63c38eff 100644
--- a/modules/devices.py
+++ b/modules/devices.py
@@ -60,7 +60,8 @@ def enable_tf32():
# enabling benchmark option seems to enable a range of cards to do fp16 when they otherwise can't
# see https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/4407
- if any(torch.cuda.get_device_capability(devid) == (7, 5) for devid in range(0, torch.cuda.device_count())):
+ device_id = (int(shared.cmd_opts.device_id) if shared.cmd_opts.device_id.isdigit() else 0) or torch.cuda.current_device()
+ if torch.cuda.get_device_capability(device_id) == (7, 5) and torch.cuda.get_device_name(device_id).startswith("NVIDIA GeForce GTX 16"):
torch.backends.cudnn.benchmark = True
torch.backends.cuda.matmul.allow_tf32 = True