aboutsummaryrefslogtreecommitdiff
path: root/modules/textual_inversion
diff options
context:
space:
mode:
authorAUTOMATIC <16777216c@gmail.com>2022-10-02 22:59:01 +0300
committerAUTOMATIC <16777216c@gmail.com>2022-10-02 22:59:01 +0300
commit6785331e22d6a488fbf5905fab56d7fec867e038 (patch)
tree111f08d07e3dfc82a02857155cccdeefe7afdb70 /modules/textual_inversion
parentc7543d4940da672d970124ae8f2fec9de7bdc1da (diff)
keep textual inversion dataset latents in CPU memory to save a bit of VRAM
Diffstat (limited to 'modules/textual_inversion')
-rw-r--r--modules/textual_inversion/dataset.py2
-rw-r--r--modules/textual_inversion/textual_inversion.py3
2 files changed, 5 insertions, 0 deletions
diff --git a/modules/textual_inversion/dataset.py b/modules/textual_inversion/dataset.py
index 7e134a08..e8394ff6 100644
--- a/modules/textual_inversion/dataset.py
+++ b/modules/textual_inversion/dataset.py
@@ -8,6 +8,7 @@ from torchvision import transforms
import random
import tqdm
+from modules import devices
class PersonalizedBase(Dataset):
@@ -47,6 +48,7 @@ class PersonalizedBase(Dataset):
torchdata = torch.moveaxis(torchdata, 2, 0)
init_latent = model.get_first_stage_encoding(model.encode_first_stage(torchdata.unsqueeze(dim=0))).squeeze()
+ init_latent = init_latent.to(devices.cpu)
self.dataset.append((init_latent, filename_tokens))
diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py
index d4e250d8..8686f534 100644
--- a/modules/textual_inversion/textual_inversion.py
+++ b/modules/textual_inversion/textual_inversion.py
@@ -212,7 +212,10 @@ def train_embedding(embedding_name, learn_rate, data_root, log_directory, steps,
with torch.autocast("cuda"):
c = cond_model([text])
+
+ x = x.to(devices.device)
loss = shared.sd_model(x.unsqueeze(0), c)[0]
+ del x
losses[embedding.step % losses.shape[0]] = loss.item()