aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorxeonvs <xeonvs@gmail.com>2022-09-07 15:58:25 +0200
committerxeonvs <xeonvs@gmail.com>2022-09-07 15:58:25 +0200
commit65fbefd0337f9deb913c6956a9cfe2155c9c2f5b (patch)
tree832839cc42c2f698c5cd080e718aa3a6cd4fef97
parent15bb8e8057f8e19af4e5377437687aac7e33eeb4 (diff)
Added support for launching on Apple Silicon
-rw-r--r--modules/esrgan_model.py7
-rw-r--r--modules/sd_hijack.py5
-rw-r--r--modules/shared.py9
-rw-r--r--requirements.txt2
4 files changed, 17 insertions, 6 deletions
diff --git a/modules/esrgan_model.py b/modules/esrgan_model.py
index 3dcef5a6..2ed1d273 100644
--- a/modules/esrgan_model.py
+++ b/modules/esrgan_model.py
@@ -14,8 +14,11 @@ import modules.images
def load_model(filename):
# this code is adapted from https://github.com/xinntao/ESRGAN
-
- pretrained_net = torch.load(filename)
+ if torch.has_mps:
+ map_l = 'cpu'
+ else:
+ map_l = None
+ pretrained_net = torch.load(filename, map_location=map_l)
crt_model = arch.RRDBNet(3, 3, 64, 23, gc=32)
if 'conv_first.weight' in pretrained_net:
diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py
index 2d26b5f7..9d0637bf 100644
--- a/modules/sd_hijack.py
+++ b/modules/sd_hijack.py
@@ -232,7 +232,10 @@ class FrozenCLIPEmbedderWithCustomWords(torch.nn.Module):
z = outputs.last_hidden_state
# restoring original mean is likely not correct, but it seems to work well to prevent artifacts that happen otherwise
- batch_multipliers = torch.asarray(np.array(batch_multipliers)).to(device)
+ if torch.has_mps:
+ batch_multipliers = torch.asarray(np.array(batch_multipliers).astype('float32')).to(device)
+ else:
+ batch_multipliers = torch.asarray(np.array(batch_multipliers)).to(device)
original_mean = z.mean()
z *= batch_multipliers.reshape(batch_multipliers.shape + (1,)).expand(z.shape)
new_mean = z.mean()
diff --git a/modules/shared.py b/modules/shared.py
index beb6f9bb..e529ec27 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -36,9 +36,12 @@ parser.add_argument("--opt-split-attention", action='store_true', help="enable o
parser.add_argument("--listen", action='store_true', help="launch gradio with 0.0.0.0 as server name, allowing to respond to network requests")
cmd_opts = parser.parse_args()
-cpu = torch.device("cpu")
-gpu = torch.device("cuda")
-device = gpu if torch.cuda.is_available() else cpu
+if torch.has_cuda:
+ device = torch.device("cuda")
+elif torch.has_mps:
+ device = torch.device("mps")
+else:
+ device = torch.device("cpu")
batch_cond_uncond = cmd_opts.always_batch_cond_uncond or not (cmd_opts.lowvram or cmd_opts.medvram)
parallel_processing_allowed = not cmd_opts.lowvram and not cmd_opts.medvram
diff --git a/requirements.txt b/requirements.txt
index c9e3f2fc..2eebb029 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -10,5 +10,7 @@ omegaconf
pytorch_lightning
diffusers
invisible-watermark
+einops
+taming-transformers-rom1504
git+https://github.com/crowsonkb/k-diffusion.git
git+https://github.com/TencentARC/GFPGAN.git