aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--modules/api/models.py2
-rw-r--r--modules/sd_models.py10
-rw-r--r--modules/sd_models_config.py7
3 files changed, 11 insertions, 8 deletions
diff --git a/modules/api/models.py b/modules/api/models.py
index 805bd8f7..cba43d3b 100644
--- a/modules/api/models.py
+++ b/modules/api/models.py
@@ -228,7 +228,7 @@ class SDModelItem(BaseModel):
hash: Optional[str] = Field(title="Short hash")
sha256: Optional[str] = Field(title="sha256 hash")
filename: str = Field(title="Filename")
- config: str = Field(title="Config file")
+ config: Optional[str] = Field(title="Config file")
class HypernetworkItem(BaseModel):
name: str = Field(title="Name")
diff --git a/modules/sd_models.py b/modules/sd_models.py
index fa208728..37dad18d 100644
--- a/modules/sd_models.py
+++ b/modules/sd_models.py
@@ -439,12 +439,12 @@ def reload_model_weights(sd_model=None, info=None):
if sd_model.sd_model_checkpoint == checkpoint_info.filename:
return
- if shared.cmd_opts.lowvram or shared.cmd_opts.medvram:
- lowvram.send_everything_to_cpu()
- else:
- sd_model.to(devices.cpu)
+ if shared.cmd_opts.lowvram or shared.cmd_opts.medvram:
+ lowvram.send_everything_to_cpu()
+ else:
+ sd_model.to(devices.cpu)
- sd_hijack.model_hijack.undo_hijack(sd_model)
+ sd_hijack.model_hijack.undo_hijack(sd_model)
timer = Timer()
diff --git a/modules/sd_models_config.py b/modules/sd_models_config.py
index ea773a10..4d1e92e1 100644
--- a/modules/sd_models_config.py
+++ b/modules/sd_models_config.py
@@ -10,6 +10,7 @@ sd_repo_configs_path = os.path.join(paths.paths['Stable Diffusion'], "configs",
config_default = shared.sd_default_config
config_sd2 = os.path.join(sd_repo_configs_path, "v2-inference.yaml")
config_sd2v = os.path.join(sd_repo_configs_path, "v2-inference-v.yaml")
+config_depth_model = os.path.join(sd_repo_configs_path, "v2-midas-inference.yaml")
config_inpainting = os.path.join(sd_configs_path, "v1-inpainting-inference.yaml")
config_instruct_pix2pix = os.path.join(sd_configs_path, "instruct-pix2pix.yaml")
config_alt_diffusion = os.path.join(sd_configs_path, "alt-diffusion-inference.yaml")
@@ -22,7 +23,9 @@ def guess_model_config_from_state_dict(sd, filename):
sd2_cond_proj_weight = sd.get('cond_stage_model.model.transformer.resblocks.0.attn.in_proj_weight', None)
diffusion_model_input = sd.get('model.diffusion_model.input_blocks.0.0.weight', None)
- roberta_weight = sd.get('cond_stage_model.roberta.embeddings.word_embeddings.weight', None)
+
+ if sd.get('depth_model.model.pretrained.act_postprocess3.0.project.0.bias', None) is not None:
+ return config_depth_model
if sd2_cond_proj_weight is not None and sd2_cond_proj_weight.shape[1] == 1024:
if re.search(re_parametrization_v, fn) or "v2-1_768" in fn:
@@ -36,7 +39,7 @@ def guess_model_config_from_state_dict(sd, filename):
if diffusion_model_input.shape[1] == 8:
return config_instruct_pix2pix
- if roberta_weight is not None:
+ if sd.get('cond_stage_model.roberta.embeddings.word_embeddings.weight', None) is not None:
return config_alt_diffusion
return config_default