aboutsummaryrefslogtreecommitdiff
path: root/modules/interrogate.py
diff options
context:
space:
mode:
authorAUTOMATIC <16777216c@gmail.com>2022-09-12 11:55:27 +0300
committerAUTOMATIC <16777216c@gmail.com>2022-09-12 11:55:27 +0300
commit9bb20be09092bb6c568f676b63105fb85b0c05cf (patch)
tree41fd3d246dd3f3f64f8dd17a178146b0896c3cf1 /modules/interrogate.py
parentab0a79cdf40a149442a759226cf990b7f3033b01 (diff)
memory optimization for CLIP interrogator
changed default cfg_scale to a higher value
Diffstat (limited to 'modules/interrogate.py')
-rw-r--r--modules/interrogate.py28
1 files changed, 23 insertions, 5 deletions
diff --git a/modules/interrogate.py b/modules/interrogate.py
index 7ebb79fc..1579c042 100644
--- a/modules/interrogate.py
+++ b/modules/interrogate.py
@@ -11,7 +11,7 @@ from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
import modules.shared as shared
-from modules import devices, paths
+from modules import devices, paths, lowvram
blip_image_eval_size = 384
blip_model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_caption_capfilt_large.pth'
@@ -75,19 +75,28 @@ class InterrogateModels:
self.dtype = next(self.clip_model.parameters()).dtype
- def unload(self):
+ def send_clip_to_ram(self):
if not shared.opts.interrogate_keep_models_in_memory:
if self.clip_model is not None:
self.clip_model = self.clip_model.to(devices.cpu)
+ def send_blip_to_ram(self):
+ if not shared.opts.interrogate_keep_models_in_memory:
if self.blip_model is not None:
self.blip_model = self.blip_model.to(devices.cpu)
- devices.torch_gc()
+ def unload(self):
+ self.send_clip_to_ram()
+ self.send_blip_to_ram()
+
+ devices.torch_gc()
def rank(self, image_features, text_array, top_count=1):
import clip
+ if shared.opts.interrogate_clip_dict_limit != 0:
+ text_array = text_array[0:int(shared.opts.interrogate_clip_dict_limit)]
+
top_count = min(top_count, len(text_array))
text_tokens = clip.tokenize([text for text in text_array]).to(shared.device)
text_features = self.clip_model.encode_text(text_tokens).type(self.dtype)
@@ -117,16 +126,24 @@ class InterrogateModels:
res = None
try:
+
+ if shared.cmd_opts.lowvram or shared.cmd_opts.medvram:
+ lowvram.send_everything_to_cpu()
+ devices.torch_gc()
+
self.load()
caption = self.generate_caption(pil_image)
+ self.send_blip_to_ram()
+ devices.torch_gc()
+
res = caption
- images = self.clip_preprocess(pil_image).unsqueeze(0).type(self.dtype).to(shared.device)
+ cilp_image = self.clip_preprocess(pil_image).unsqueeze(0).type(self.dtype).to(shared.device)
precision_scope = torch.autocast if shared.cmd_opts.precision == "autocast" else contextlib.nullcontext
with torch.no_grad(), precision_scope("cuda"):
- image_features = self.clip_model.encode_image(images).type(self.dtype)
+ image_features = self.clip_model.encode_image(cilp_image).type(self.dtype)
image_features /= image_features.norm(dim=-1, keepdim=True)
@@ -146,4 +163,5 @@ class InterrogateModels:
self.unload()
+ res += "<error>"
return res