aboutsummaryrefslogtreecommitdiff
path: root/modules/sd_models.py
diff options
context:
space:
mode:
author不会画画的中医不是好程序员 <yfszzx@gmail.com>2022-10-16 10:04:05 +0800
committerGitHub <noreply@github.com>2022-10-16 10:04:05 +0800
commitd41ac174e24e1e7cdcf7b42f2a03cbc6394eb5e5 (patch)
treeda39175c109598f17e89fafe57aac9b9597ff616 /modules/sd_models.py
parent6e4f5566b58e36aede83427df6c69eba8517af28 (diff)
parentbe1596ce30b1ead6998da0c62003003dcce5eb2c (diff)
Merge branch 'AUTOMATIC1111:master' into master
Diffstat (limited to 'modules/sd_models.py')
-rw-r--r--modules/sd_models.py58
1 files changed, 32 insertions, 26 deletions
diff --git a/modules/sd_models.py b/modules/sd_models.py
index 3a01c93d..3aa21ec1 100644
--- a/modules/sd_models.py
+++ b/modules/sd_models.py
@@ -1,4 +1,4 @@
-import glob
+import collections
import os.path
import sys
from collections import namedtuple
@@ -15,6 +15,7 @@ model_path = os.path.abspath(os.path.join(models_path, model_dir))
CheckpointInfo = namedtuple("CheckpointInfo", ['filename', 'title', 'hash', 'model_name', 'config'])
checkpoints_list = {}
+checkpoints_loaded = collections.OrderedDict()
try:
# this silences the annoying "Some weights of the model checkpoint were not used when initializing..." message at start.
@@ -132,41 +133,45 @@ def load_model_weights(model, checkpoint_info):
checkpoint_file = checkpoint_info.filename
sd_model_hash = checkpoint_info.hash
- print(f"Loading weights [{sd_model_hash}] from {checkpoint_file}")
+ if checkpoint_info not in checkpoints_loaded:
+ print(f"Loading weights [{sd_model_hash}] from {checkpoint_file}")
- pl_sd = torch.load(checkpoint_file, map_location=shared.weight_load_location)
+ pl_sd = torch.load(checkpoint_file, map_location=shared.weight_load_location)
+ if "global_step" in pl_sd:
+ print(f"Global Step: {pl_sd['global_step']}")
- if "global_step" in pl_sd:
- print(f"Global Step: {pl_sd['global_step']}")
+ sd = get_state_dict_from_checkpoint(pl_sd)
+ model.load_state_dict(sd, strict=False)
- sd = get_state_dict_from_checkpoint(pl_sd)
+ if shared.cmd_opts.opt_channelslast:
+ model.to(memory_format=torch.channels_last)
- model.load_state_dict(sd, strict=False)
+ if not shared.cmd_opts.no_half:
+ model.half()
- if shared.cmd_opts.opt_channelslast:
- model.to(memory_format=torch.channels_last)
+ devices.dtype = torch.float32 if shared.cmd_opts.no_half else torch.float16
+ devices.dtype_vae = torch.float32 if shared.cmd_opts.no_half or shared.cmd_opts.no_half_vae else torch.float16
- if not shared.cmd_opts.no_half:
- model.half()
+ vae_file = os.path.splitext(checkpoint_file)[0] + ".vae.pt"
- devices.dtype = torch.float32 if shared.cmd_opts.no_half else torch.float16
- devices.dtype_vae = torch.float32 if shared.cmd_opts.no_half or shared.cmd_opts.no_half_vae else torch.float16
+ if not os.path.exists(vae_file) and shared.cmd_opts.vae_path is not None:
+ vae_file = shared.cmd_opts.vae_path
- vae_file = os.path.splitext(checkpoint_file)[0] + ".vae.pt"
+ if os.path.exists(vae_file):
+ print(f"Loading VAE weights from: {vae_file}")
+ vae_ckpt = torch.load(vae_file, map_location=shared.weight_load_location)
+ vae_dict = {k: v for k, v in vae_ckpt["state_dict"].items() if k[0:4] != "loss"}
+ model.first_stage_model.load_state_dict(vae_dict)
- if not os.path.exists(vae_file) and shared.cmd_opts.vae_path is not None:
- vae_file = shared.cmd_opts.vae_path
+ model.first_stage_model.to(devices.dtype_vae)
- if os.path.exists(vae_file):
- print(f"Loading VAE weights from: {vae_file}")
-
- vae_ckpt = torch.load(vae_file, map_location=shared.weight_load_location)
-
- vae_dict = {k: v for k, v in vae_ckpt["state_dict"].items() if k[0:4] != "loss"}
-
- model.first_stage_model.load_state_dict(vae_dict)
-
- model.first_stage_model.to(devices.dtype_vae)
+ checkpoints_loaded[checkpoint_info] = model.state_dict().copy()
+ while len(checkpoints_loaded) > shared.opts.sd_checkpoint_cache:
+ checkpoints_loaded.popitem(last=False) # LRU
+ else:
+ print(f"Loading weights [{sd_model_hash}] from cache")
+ checkpoints_loaded.move_to_end(checkpoint_info)
+ model.load_state_dict(checkpoints_loaded[checkpoint_info])
model.sd_model_hash = sd_model_hash
model.sd_model_checkpoint = checkpoint_file
@@ -205,6 +210,7 @@ def reload_model_weights(sd_model, info=None):
return
if sd_model.sd_checkpoint_info.config != checkpoint_info.config:
+ checkpoints_loaded.clear()
shared.sd_model = load_model()
return shared.sd_model