aboutsummaryrefslogtreecommitdiff
path: root/modules/textual_inversion/textual_inversion.py
diff options
context:
space:
mode:
authorflamelaw <flamelaw.com3d2@gmail.com>2022-11-27 00:35:44 +0900
committerflamelaw <flamelaw.com3d2@gmail.com>2022-11-27 00:35:44 +0900
commit755df94b2aa62eabd96f900e0dd7ddc83c2f692c (patch)
treeee02b0a05e868e6e1234f469d1d503bf96d3ccb8 /modules/textual_inversion/textual_inversion.py
parent1bd57cc9791e2e742f72a3d74d589f2c289e8e92 (diff)
set TI AdamW default weight decay to 0
Diffstat (limited to 'modules/textual_inversion/textual_inversion.py')
-rw-r--r--modules/textual_inversion/textual_inversion.py2
1 files changed, 1 insertions, 1 deletions
diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py
index fee08e33..b9b1394f 100644
--- a/modules/textual_inversion/textual_inversion.py
+++ b/modules/textual_inversion/textual_inversion.py
@@ -283,7 +283,7 @@ def train_embedding(embedding_name, learn_rate, batch_size, gradient_step, data_
shared.sd_model.first_stage_model.to(devices.cpu)
embedding.vec.requires_grad = True
- optimizer = torch.optim.AdamW([embedding.vec], lr=scheduler.learn_rate)
+ optimizer = torch.optim.AdamW([embedding.vec], lr=scheduler.learn_rate, weight_decay=0.0)
scaler = torch.cuda.amp.GradScaler()
batch_size = ds.batch_size