aboutsummaryrefslogtreecommitdiff
path: root/modules/sd_models.py
diff options
context:
space:
mode:
authorΦφ <42910943+Brawlence@users.noreply.github.com>2023-03-09 07:56:19 +0300
committerΦφ <42910943+Brawlence@users.noreply.github.com>2023-03-21 09:28:50 +0300
commit4cbbb881ee530d9b9ba18027e2b0057e6a2c4ee1 (patch)
tree943c119f18a0aa7a748f95c989f73e3a9a69a1c3 /modules/sd_models.py
parenta9fed7c364061ae6efb37f797b6b522cb3cf7aa2 (diff)
Unload checkpoints on Request
…to free VRAM. New Action buttons in the settings to manually free and reload checkpoints, essentially juggling models between RAM and VRAM.
Diffstat (limited to 'modules/sd_models.py')
-rw-r--r--modules/sd_models.py22
1 files changed, 21 insertions, 1 deletions
diff --git a/modules/sd_models.py b/modules/sd_models.py
index f0cb1240..f9dd0521 100644
--- a/modules/sd_models.py
+++ b/modules/sd_models.py
@@ -494,7 +494,7 @@ def reload_model_weights(sd_model=None, info=None):
if sd_model is None or checkpoint_config != sd_model.used_config:
del sd_model
checkpoints_loaded.clear()
- load_model(checkpoint_info, already_loaded_state_dict=state_dict, time_taken_to_load_state_dict=timer.records["load weights from disk"])
+ load_model(checkpoint_info, already_loaded_state_dict=state_dict)
return shared.sd_model
try:
@@ -517,3 +517,23 @@ def reload_model_weights(sd_model=None, info=None):
print(f"Weights loaded in {timer.summary()}.")
return sd_model
+
+def unload_model_weights(sd_model=None, info=None):
+ from modules import lowvram, devices, sd_hijack
+ timer = Timer()
+
+ if shared.sd_model:
+
+ # shared.sd_model.cond_stage_model.to(devices.cpu)
+ # shared.sd_model.first_stage_model.to(devices.cpu)
+ shared.sd_model.to(devices.cpu)
+ sd_hijack.model_hijack.undo_hijack(shared.sd_model)
+ shared.sd_model = None
+ sd_model = None
+ gc.collect()
+ devices.torch_gc()
+ torch.cuda.empty_cache()
+
+ print(f"Unloaded weights {timer.summary()}.")
+
+ return sd_model \ No newline at end of file