aboutsummaryrefslogtreecommitdiff
path: root/modules
diff options
context:
space:
mode:
authorAUTOMATIC1111 <16777216c@gmail.com>2023-01-11 19:04:54 +0300
committerGitHub <noreply@github.com>2023-01-11 19:04:54 +0300
commit6d7f3d107263ed4e767bd2a50cce82f125d4ddbf (patch)
tree5a511db80b026a6399486c8e338ef797232929bc /modules
parent97ff69eff338c6641f4abf430bf5ac112c1775e0 (diff)
parent3f43d8a966ba8462ba019a5ad573f94508cd45f8 (diff)
Merge pull request #6648 from vladmandic/progress-description
Set TQDM progress bar and state textinfo description
Diffstat (limited to 'modules')
-rw-r--r--modules/hypernetworks/hypernetwork.py4
-rw-r--r--modules/textual_inversion/preprocess.py7
-rw-r--r--modules/textual_inversion/textual_inversion.py4
3 files changed, 12 insertions, 3 deletions
diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py
index 300d3975..194679e8 100644
--- a/modules/hypernetworks/hypernetwork.py
+++ b/modules/hypernetworks/hypernetwork.py
@@ -619,7 +619,9 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, gradient_step,
epoch_num = hypernetwork.step // steps_per_epoch
epoch_step = hypernetwork.step % steps_per_epoch
- pbar.set_description(f"[Epoch {epoch_num}: {epoch_step+1}/{steps_per_epoch}]loss: {loss_step:.7f}")
+ description = f"Training hypernetwork [Epoch {epoch_num}: {epoch_step+1}/{steps_per_epoch}]loss: {loss_step:.7f}"
+ pbar.set_description(description)
+ shared.state.textinfo = description
if hypernetwork_dir is not None and steps_done % save_hypernetwork_every == 0:
# Before saving, change name to match current checkpoint.
hypernetwork_name_every = f'{hypernetwork_name}-{steps_done}'
diff --git a/modules/textual_inversion/preprocess.py b/modules/textual_inversion/preprocess.py
index feb876c6..3c1042ad 100644
--- a/modules/textual_inversion/preprocess.py
+++ b/modules/textual_inversion/preprocess.py
@@ -135,7 +135,8 @@ def preprocess_work(process_src, process_dst, process_width, process_height, pre
params.process_caption_deepbooru = process_caption_deepbooru
params.preprocess_txt_action = preprocess_txt_action
- for index, imagefile in enumerate(tqdm.tqdm(files)):
+ pbar = tqdm.tqdm(files)
+ for index, imagefile in enumerate(pbar):
params.subindex = 0
filename = os.path.join(src, imagefile)
try:
@@ -143,6 +144,10 @@ def preprocess_work(process_src, process_dst, process_width, process_height, pre
except Exception:
continue
+ description = f"Preprocessing [Image {index}/{len(files)}]"
+ pbar.set_description(description)
+ shared.state.textinfo = description
+
params.src = filename
existing_caption = None
diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py
index 3866c154..b915b091 100644
--- a/modules/textual_inversion/textual_inversion.py
+++ b/modules/textual_inversion/textual_inversion.py
@@ -476,7 +476,9 @@ def train_embedding(embedding_name, learn_rate, batch_size, gradient_step, data_
epoch_num = embedding.step // steps_per_epoch
epoch_step = embedding.step % steps_per_epoch
- pbar.set_description(f"[Epoch {epoch_num}: {epoch_step+1}/{steps_per_epoch}]loss: {loss_step:.7f}")
+ description = f"Training textual inversion [Epoch {epoch_num}: {epoch_step+1}/{steps_per_epoch}]loss: {loss_step:.7f}"
+ pbar.set_description(description)
+ shared.state.textinfo = description
if embedding_dir is not None and steps_done % save_embedding_every == 0:
# Before saving, change name to match current checkpoint.
embedding_name_every = f'{embedding_name}-{steps_done}'