aboutsummaryrefslogtreecommitdiff
path: root/modules/sd_models.py
diff options
context:
space:
mode:
authorAUTOMATIC1111 <16777216c@gmail.com>2022-10-29 09:16:00 +0300
committerGitHub <noreply@github.com>2022-10-29 09:16:00 +0300
commit9553a7e071351455074bac255c529e1184c5ee24 (patch)
treefeee5700dae71c81837cf73ee3d3769a427cf57d /modules/sd_models.py
parent28e6d4a54ea1fa1e34ad1ea0742ab2003ed7fa7f (diff)
parentb50ff4f4e4d4d6bf31e222832d3fe4cfde4703c9 (diff)
Merge pull request #3818 from jwatzman/master
Reduce peak memory usage when changing models
Diffstat (limited to 'modules/sd_models.py')
-rw-r--r--modules/sd_models.py11
1 files changed, 7 insertions, 4 deletions
diff --git a/modules/sd_models.py b/modules/sd_models.py
index 64d5ee0d..f86dc3ed 100644
--- a/modules/sd_models.py
+++ b/modules/sd_models.py
@@ -173,7 +173,9 @@ def load_model_weights(model, checkpoint_info):
print(f"Global Step: {pl_sd['global_step']}")
sd = get_state_dict_from_checkpoint(pl_sd)
- missing, extra = model.load_state_dict(sd, strict=False)
+ del pl_sd
+ model.load_state_dict(sd, strict=False)
+ del sd
if shared.cmd_opts.opt_channelslast:
model.to(memory_format=torch.channels_last)
@@ -197,9 +199,10 @@ def load_model_weights(model, checkpoint_info):
model.first_stage_model.to(devices.dtype_vae)
- checkpoints_loaded[checkpoint_info] = model.state_dict().copy()
- while len(checkpoints_loaded) > shared.opts.sd_checkpoint_cache:
- checkpoints_loaded.popitem(last=False) # LRU
+ if shared.opts.sd_checkpoint_cache > 0:
+ checkpoints_loaded[checkpoint_info] = model.state_dict().copy()
+ while len(checkpoints_loaded) > shared.opts.sd_checkpoint_cache:
+ checkpoints_loaded.popitem(last=False) # LRU
else:
print(f"Loading weights [{sd_model_hash}] from cache")
checkpoints_loaded.move_to_end(checkpoint_info)