aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.DS_Storebin0 -> 6148 bytes
-rw-r--r--launch.py10
-rw-r--r--modules/sd_hijack.py6
-rw-r--r--v2-inference-v.yaml (renamed from v2-inference.yaml)1
4 files changed, 11 insertions, 6 deletions
diff --git a/.DS_Store b/.DS_Store
new file mode 100644
index 00000000..5008ddfc
--- /dev/null
+++ b/.DS_Store
Binary files differ
diff --git a/launch.py b/launch.py
index 0d8f2776..0e1bbaf2 100644
--- a/launch.py
+++ b/launch.py
@@ -234,11 +234,11 @@ def prepare_enviroment():
os.makedirs(dir_repos, exist_ok=True)
- git_clone(stable_diffusion_repo, repo_dir('stable-diffusion-stability-ai'), "Stable Diffusion", )
- git_clone(taming_transformers_repo, repo_dir('taming-transformers'), "Taming Transformers", )
- git_clone(k_diffusion_repo, repo_dir('k-diffusion'), "K-diffusion", )
- git_clone(codeformer_repo, repo_dir('CodeFormer'), "CodeFormer", )
- git_clone(blip_repo, repo_dir('BLIP'), "BLIP", )
+ git_clone(stable_diffusion_repo, repo_dir('stable-diffusion-stability-ai'), "Stable Diffusion", stable_diffusion_commit_hash)
+ git_clone(taming_transformers_repo, repo_dir('taming-transformers'), "Taming Transformers", taming_transformers_commit_hash)
+ git_clone(k_diffusion_repo, repo_dir('k-diffusion'), "K-diffusion", k_diffusion_commit_hash)
+ git_clone(codeformer_repo, repo_dir('CodeFormer'), "CodeFormer", codeformer_commit_hash)
+ git_clone(blip_repo, repo_dir('BLIP'), "BLIP", blip_commit_hash)
if not is_installed("lpips"):
run_pip(f"install -r {os.path.join(repo_dir('CodeFormer'), 'requirements.txt')}", "requirements for CodeFormer")
diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py
index 9b5890e7..9fed1b6f 100644
--- a/modules/sd_hijack.py
+++ b/modules/sd_hijack.py
@@ -112,7 +112,11 @@ class StableDiffusionModelHijack:
self.layers = flatten(m)
def undo_hijack(self, m):
- if type(m.cond_stage_model) == sd_hijack_clip.FrozenCLIPEmbedderWithCustomWords:
+
+ if shared.text_model_name == "XLMR-Large":
+ m.cond_stage_model = m.cond_stage_model.wrapped
+
+ elif type(m.cond_stage_model) == sd_hijack_clip.FrozenCLIPEmbedderWithCustomWords:
m.cond_stage_model = m.cond_stage_model.wrapped
model_embeddings = m.cond_stage_model.transformer.text_model.embeddings
diff --git a/v2-inference.yaml b/v2-inference-v.yaml
index 0eb25395..513cd635 100644
--- a/v2-inference.yaml
+++ b/v2-inference-v.yaml
@@ -2,6 +2,7 @@ model:
base_learning_rate: 1.0e-4
target: ldm.models.diffusion.ddpm.LatentDiffusion
params:
+ parameterization: "v"
linear_start: 0.00085
linear_end: 0.0120
num_timesteps_cond: 1