aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--README.md26
-rw-r--r--modules/processing.py3
-rw-r--r--modules/shared.py1
-rw-r--r--modules/ui.py5
-rw-r--r--requirements.txt4
-rw-r--r--scripts/xy_grid.py15
-rw-r--r--webui.py2
7 files changed, 45 insertions, 11 deletions
diff --git a/README.md b/README.md
index a9b26dcc..4214834e 100644
--- a/README.md
+++ b/README.md
@@ -149,9 +149,9 @@ Open the URL in browser, and you are good to go.
### What options to use for low VRAM videocards?
- If you have 4GB VRAM and want to make 512x512 (or maybe up to 640x640) images, use `--medvram`.
- If you have 4GB VRAM and want to make 512x512 images, but you get an out of memory error with `--medvram`, use `--medvram --opt-split-attention` instead.
-- If you have 4GB VRAM and want to make 512x512 images, and you still get an out of memory error, use `--lowvram --always-batch-cond-uncond` instead.
-- If you have 4GB VRAM and want to make images larger than you can with `--medvram`, use `--lowvram`.
-- If you have more VRAM and want to make larger images than you can usually make, use `--medvram`. You can use `--lowvram`
+- If you have 4GB VRAM and want to make 512x512 images, and you still get an out of memory error, use `--lowvram --always-batch-cond-uncond --opt-split-attention` instead.
+- If you have 4GB VRAM and want to make images larger than you can with `--medvram`, use `--lowvram --opt-split-attention`.
+- If you have more VRAM and want to make larger images than you can usually make, use `--medvram --opt-split-attention`. You can use `--lowvram`
also but the effect will likely be barely noticeable.
- Otherwise, do not use any of those.
@@ -159,6 +159,26 @@ Extra: if you get a green screen instead of generated pictures, you have a card
precision floating point numbers. You must use `--precision full --no-half` in addition to other flags,
and the model will take much more space in VRAM.
+### How to change UI defaults?
+
+After running once, a `ui-config.json` file appears in webui directory:
+
+```json
+{
+ "txt2img/Sampling Steps/value": 20,
+ "txt2img/Sampling Steps/minimum": 1,
+ "txt2img/Sampling Steps/maximum": 150,
+ "txt2img/Sampling Steps/step": 1,
+ "txt2img/Batch count/value": 1,
+ "txt2img/Batch count/minimum": 1,
+ "txt2img/Batch count/maximum": 32,
+ "txt2img/Batch count/step": 1,
+ "txt2img/Batch size/value": 1,
+ "txt2img/Batch size/minimum": 1,
+```
+
+Edit values to your liking and the next time you launch the program they will be applied.
+
## Credits
- Stable Diffusion - https://github.com/CompVis/stable-diffusion, https://github.com/CompVis/taming-transformers
- k-diffusion - https://github.com/crowsonkb/k-diffusion.git
diff --git a/modules/processing.py b/modules/processing.py
index 1351579b..e8923a7a 100644
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -153,7 +153,8 @@ def process_images(p: StableDiffusionProcessing) -> Processed:
with torch.no_grad(), precision_scope("cuda"), ema_scope():
p.init()
- state.job_count = p.n_iter
+ if state.job_count == -1:
+ state.job_count = p.n_iter
for n in range(p.n_iter):
if state.interrupted:
diff --git a/modules/shared.py b/modules/shared.py
index 53861daf..d57aba37 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -54,6 +54,7 @@ class State:
self.job_no += 1
self.sampling_step = 0
+
state = State()
artist_db = modules.artists.ArtistsDatabase(os.path.join(script_path, 'artists.csv'))
diff --git a/modules/ui.py b/modules/ui.py
index a9e4fd00..1df74070 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -140,7 +140,10 @@ def check_progress_call():
if shared.state.job_count == 0:
return ""
- progress = shared.state.job_no / shared.state.job_count
+ progress = 0
+
+ if shared.state.job_count > 0:
+ progress += shared.state.job_no / shared.state.job_count
if shared.state.sampling_steps > 0:
progress += 1 / shared.state.job_count * shared.state.sampling_step / shared.state.sampling_steps
diff --git a/requirements.txt b/requirements.txt
index 91b21222..c9e3f2fc 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -8,3 +8,7 @@ torch
transformers
omegaconf
pytorch_lightning
+diffusers
+invisible-watermark
+git+https://github.com/crowsonkb/k-diffusion.git
+git+https://github.com/TencentARC/GFPGAN.git
diff --git a/scripts/xy_grid.py b/scripts/xy_grid.py
index 87692983..7f6842b0 100644
--- a/scripts/xy_grid.py
+++ b/scripts/xy_grid.py
@@ -9,6 +9,7 @@ from modules import images
from modules.processing import process_images, Processed
from modules.shared import opts, cmd_opts, state
import modules.sd_samplers
+import re
def apply_field(field):
@@ -89,6 +90,8 @@ def draw_xy_grid(xs, ys, x_label, y_label, cell):
return first_pocessed
+re_range = re.compile(r"\s*([+-]?\s*\d+)\s*-\s*([+-]?\s*\d+)(?:\s*\(([+-]\d+)\s*\))?\s*")
+
class Script(scripts.Script):
def title(self):
return "X/Y plot"
@@ -118,11 +121,13 @@ class Script(scripts.Script):
valslist_ext = []
for val in valslist:
- if "-" in val:
- s = val.split("-")
- start = int(s[0])
- end = int(s[1])+1
- step = 1 if len(s) < 3 else int(s[2])
+ m = re_range.fullmatch(val)
+ if m is not None:
+
+ start = int(m.group(1))
+ end = int(m.group(2))+1
+ step = int(m.group(3)) if m.group(3) is not None else 1
+
valslist_ext += list(range(start, end, step))
else:
valslist_ext.append(val)
diff --git a/webui.py b/webui.py
index f1fb506f..67e9e0a8 100644
--- a/webui.py
+++ b/webui.py
@@ -123,7 +123,7 @@ queue_lock = threading.Lock()
def wrap_gradio_gpu_call(func):
def f(*args, **kwargs):
shared.state.sampling_step = 0
- shared.state.job_count = 1
+ shared.state.job_count = -1
shared.state.job_no = 0