aboutsummaryrefslogtreecommitdiff
path: root/modules/mac_specific.py
diff options
context:
space:
mode:
authorAUTOMATIC1111 <16777216c@gmail.com>2023-08-31 07:38:34 +0300
committerAUTOMATIC1111 <16777216c@gmail.com>2023-08-31 07:38:34 +0300
commit5ef669de080814067961f28357256e8fe27544f4 (patch)
tree655f4582e692f0fc3667b3b668ad365ac3ab92ae /modules/mac_specific.py
parentc9c8485bc1e8720aba70f029d25cba1c4abf2b5c (diff)
parente7965a5eb804a51e949df07c66c0b7c61ab7fa7b (diff)
Merge branch 'release_candidate'
Diffstat (limited to 'modules/mac_specific.py')
-rw-r--r--modules/mac_specific.py7
1 files changed, 2 insertions, 5 deletions
diff --git a/modules/mac_specific.py b/modules/mac_specific.py
index 9ceb43ba..89256c5b 100644
--- a/modules/mac_specific.py
+++ b/modules/mac_specific.py
@@ -4,6 +4,7 @@ import torch
import platform
from modules.sd_hijack_utils import CondFunc
from packaging import version
+from modules import shared
log = logging.getLogger(__name__)
@@ -30,8 +31,7 @@ has_mps = check_for_mps()
def torch_mps_gc() -> None:
try:
- from modules.shared import state
- if state.current_latent is not None:
+ if shared.state.current_latent is not None:
log.debug("`current_latent` is set, skipping MPS garbage collection")
return
from torch.mps import empty_cache
@@ -52,9 +52,6 @@ def cumsum_fix(input, cumsum_func, *args, **kwargs):
if has_mps:
- # MPS fix for randn in torchsde
- CondFunc('torchsde._brownian.brownian_interval._randn', lambda _, size, dtype, device, seed: torch.randn(size, dtype=dtype, device=torch.device("cpu"), generator=torch.Generator(torch.device("cpu")).manual_seed(int(seed))).to(device), lambda _, size, dtype, device, seed: device.type == 'mps')
-
if platform.mac_ver()[0].startswith("13.2."):
# MPS workaround for https://github.com/pytorch/pytorch/issues/95188, thanks to danieldk (https://github.com/explosion/curated-transformers/pull/124)
CondFunc('torch.nn.functional.linear', lambda _, input, weight, bias: (torch.matmul(input, weight.t()) + bias) if bias is not None else torch.matmul(input, weight.t()), lambda _, input, weight, bias: input.numel() > 10485760)