aboutsummaryrefslogtreecommitdiff
path: root/modules
diff options
context:
space:
mode:
authorAUTOMATIC <16777216c@gmail.com>2022-09-09 23:16:02 +0300
committerAUTOMATIC <16777216c@gmail.com>2022-09-09 23:16:02 +0300
commit86867e153f4449167e3489323df35cf04f1fffa0 (patch)
tree735b7311b84136c87ac17472ed464d1d7fb6759f /modules
parentd714ea4c41e4e7c0dc4730d9ea137d134691c0a2 (diff)
support for prompt styles
fix broken prompt matrix
Diffstat (limited to 'modules')
-rw-r--r--modules/img2img.py3
-rw-r--r--modules/processing.py16
-rw-r--r--modules/shared.py6
-rw-r--r--modules/styles.py41
-rw-r--r--modules/txt2img.py3
-rw-r--r--modules/ui.py46
6 files changed, 96 insertions, 19 deletions
diff --git a/modules/img2img.py b/modules/img2img.py
index 008e8688..8da2d80e 100644
--- a/modules/img2img.py
+++ b/modules/img2img.py
@@ -11,7 +11,7 @@ from modules.ui import plaintext_to_html
import modules.images as images
import modules.scripts
-def img2img(prompt: str, negative_prompt: str, init_img, init_img_with_mask, init_mask, mask_mode, steps: int, sampler_index: int, mask_blur: int, inpainting_fill: int, restore_faces: bool, tiling: bool, mode: int, n_iter: int, batch_size: int, cfg_scale: float, denoising_strength: float, denoising_strength_change_factor: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, height: int, width: int, resize_mode: int, upscaler_index: str, upscale_overlap: int, inpaint_full_res: bool, inpainting_mask_invert: int, *args):
+def img2img(prompt: str, negative_prompt: str, prompt_style: int, init_img, init_img_with_mask, init_mask, mask_mode, steps: int, sampler_index: int, mask_blur: int, inpainting_fill: int, restore_faces: bool, tiling: bool, mode: int, n_iter: int, batch_size: int, cfg_scale: float, denoising_strength: float, denoising_strength_change_factor: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, height: int, width: int, resize_mode: int, upscaler_index: str, upscale_overlap: int, inpaint_full_res: bool, inpainting_mask_invert: int, *args):
is_inpaint = mode == 1
is_loopback = mode == 2
is_upscale = mode == 3
@@ -38,6 +38,7 @@ def img2img(prompt: str, negative_prompt: str, init_img, init_img_with_mask, ini
outpath_grids=opts.outdir_grids or opts.outdir_img2img_grids,
prompt=prompt,
negative_prompt=negative_prompt,
+ prompt_style=prompt_style,
seed=seed,
subseed=subseed,
subseed_strength=subseed_strength,
diff --git a/modules/processing.py b/modules/processing.py
index 09680fbf..7e6cd8ee 100644
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -16,6 +16,7 @@ from modules.shared import opts, cmd_opts, state
import modules.shared as shared
import modules.face_restoration
import modules.images as images
+import modules.styles
# some of those options should not be changed at all because they would break the model, so I removed them from options.
opt_C = 4
@@ -29,13 +30,14 @@ def torch_gc():
class StableDiffusionProcessing:
- def __init__(self, sd_model=None, outpath_samples=None, outpath_grids=None, prompt="", seed=-1, subseed=-1, subseed_strength=0, seed_resize_from_h=-1, seed_resize_from_w=-1, sampler_index=0, batch_size=1, n_iter=1, steps=50, cfg_scale=7.0, width=512, height=512, restore_faces=False, tiling=False, do_not_save_samples=False, do_not_save_grid=False, extra_generation_params=None, overlay_images=None, negative_prompt=None):
+ def __init__(self, sd_model=None, outpath_samples=None, outpath_grids=None, prompt="", prompt_style=0, seed=-1, subseed=-1, subseed_strength=0, seed_resize_from_h=-1, seed_resize_from_w=-1, sampler_index=0, batch_size=1, n_iter=1, steps=50, cfg_scale=7.0, width=512, height=512, restore_faces=False, tiling=False, do_not_save_samples=False, do_not_save_grid=False, extra_generation_params=None, overlay_images=None, negative_prompt=None):
self.sd_model = sd_model
self.outpath_samples: str = outpath_samples
self.outpath_grids: str = outpath_grids
self.prompt: str = prompt
self.prompt_for_display: str = None
self.negative_prompt: str = (negative_prompt or "")
+ self.prompt_style: int = prompt_style
self.seed: int = seed
self.subseed: int = subseed
self.subseed_strength: float = subseed_strength
@@ -154,8 +156,6 @@ def fix_seed(p):
def process_images(p: StableDiffusionProcessing) -> Processed:
"""this is the main loop that both txt2img and img2img use; it calls func_init once inside all the scopes and func_sample once per batch"""
- prompt = p.prompt
-
assert p.prompt is not None
torch_gc()
@@ -168,10 +168,12 @@ def process_images(p: StableDiffusionProcessing) -> Processed:
comments = []
- if type(prompt) == list:
- all_prompts = prompt
+ modules.styles.apply_style(p, shared.prompt_styles[p.prompt_style])
+
+ if type(p.prompt) == list:
+ all_prompts = p.prompt
else:
- all_prompts = p.batch_size * p.n_iter * [prompt]
+ all_prompts = p.batch_size * p.n_iter * [p.prompt]
if type(p.seed) == list:
all_seeds = int(p.seed)
@@ -207,7 +209,7 @@ def process_images(p: StableDiffusionProcessing) -> Processed:
negative_prompt_text = "\nNegative prompt: " + p.negative_prompt if p.negative_prompt else ""
- return f"{p.prompt_for_display or prompt}{negative_prompt_text}\n{generation_params_text}".strip() + "".join(["\n\n" + x for x in comments])
+ return f"{all_prompts[index]}{negative_prompt_text}\n{generation_params_text}".strip() + "".join(["\n\n" + x for x in comments])
if os.path.exists(cmd_opts.embeddings_dir):
model_hijack.load_textual_inversion_embeddings(cmd_opts.embeddings_dir, p.sd_model)
diff --git a/modules/shared.py b/modules/shared.py
index e577332d..5985d09e 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -9,7 +9,7 @@ import tqdm
import modules.artists
from modules.paths import script_path, sd_path
-import modules.codeformer_model
+import modules.styles
config_filename = "config.json"
@@ -75,8 +75,10 @@ state = State()
artist_db = modules.artists.ArtistsDatabase(os.path.join(script_path, 'artists.csv'))
-face_restorers = []
+styles_filename = os.path.join(script_path, 'styles.csv')
+prompt_styles = modules.styles.load_styles(styles_filename)
+face_restorers = []
def find_any_font():
fonts = ['/usr/share/fonts/truetype/liberation/LiberationSans-Regular.ttf']
diff --git a/modules/styles.py b/modules/styles.py
new file mode 100644
index 00000000..58fb7d75
--- /dev/null
+++ b/modules/styles.py
@@ -0,0 +1,41 @@
+import csv
+import os.path
+from collections import namedtuple
+
+PromptStyle = namedtuple("PromptStyle", ["name", "text"])
+
+
+def load_styles(filename):
+ res = {"None": PromptStyle("None", "")}
+
+ if os.path.exists(filename):
+ with open(filename, "r", encoding="utf8", newline='') as file:
+ reader = csv.DictReader(file)
+
+ for row in reader:
+ res[row["name"]] = PromptStyle(row["name"], row["text"])
+
+ return res
+
+
+def apply_style_text(style_text, prompt):
+ return prompt + ", " + style_text if prompt else style_text
+
+
+def apply_style(p, style):
+ if type(p.prompt) == list:
+ p.prompt = [apply_style_text(style.text, x) for x in p.prompt]
+ else:
+ p.prompt = apply_style_text(style.text, p.prompt)
+
+
+def save_style(filename, style):
+ with open(filename, "a", encoding="utf8", newline='') as file:
+ atstart = file.tell() == 0
+
+ writer = csv.DictWriter(file, fieldnames=["name", "text"])
+
+ if atstart:
+ writer.writeheader()
+
+ writer.writerow({"name": style.name, "text": style.text})
diff --git a/modules/txt2img.py b/modules/txt2img.py
index 606421ea..070bd094 100644
--- a/modules/txt2img.py
+++ b/modules/txt2img.py
@@ -6,12 +6,13 @@ import modules.processing as processing
from modules.ui import plaintext_to_html
-def txt2img(prompt: str, negative_prompt: str, steps: int, sampler_index: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, height: int, width: int, *args):
+def txt2img(prompt: str, negative_prompt: str, prompt_style: int, steps: int, sampler_index: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, height: int, width: int, *args):
p = StableDiffusionProcessingTxt2Img(
sd_model=shared.sd_model,
outpath_samples=opts.outdir_samples or opts.outdir_txt2img_samples,
outpath_grids=opts.outdir_grids or opts.outdir_txt2img_grids,
prompt=prompt,
+ prompt_style=prompt_style,
negative_prompt=negative_prompt,
seed=seed,
subseed=subseed,
diff --git a/modules/ui.py b/modules/ui.py
index 65076edb..63ae62ab 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -25,6 +25,7 @@ import modules.realesrgan_model as realesrgan
import modules.scripts
import modules.gfpgan_model
import modules.codeformer_model
+import modules.styles
# this is a fix for Windows users. Without it, javascript files will be served with text/html content-type and the bowser will not show any UI
mimetypes.init()
@@ -226,11 +227,26 @@ def create_seed_inputs():
return seed, subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w
+def add_style(style_name, text):
+ if style_name is None:
+ return [gr_show(), gr_show()]
+
+ style = modules.styles.PromptStyle(style_name, text)
+
+ modules.styles.save_style(shared.styles_filename, style)
+
+ shared.prompt_styles[style.name] = style
+
+ update = {"visible": True, "choices": [k for k, v in shared.prompt_styles.items()], "__type__": "update"}
+ return [update, update]
+
+
def create_ui(txt2img, img2img, run_extras, run_pnginfo):
with gr.Blocks(analytics_enabled=False) as txt2img_interface:
with gr.Row():
- prompt = gr.Textbox(label="Prompt", elem_id="txt2img_prompt", show_label=False, placeholder="Prompt", lines=1)
+ txt2img_prompt = gr.Textbox(label="Prompt", elem_id="txt2img_prompt", show_label=False, placeholder="Prompt", lines=1)
negative_prompt = gr.Textbox(label="Negative prompt", elem_id="txt2img_negative_prompt", show_label=False, placeholder="Negative prompt", lines=1, visible=cmd_opts.show_negative_prompt)
+ txt2img_prompt_style = gr.Dropdown(label="Style", show_label=False, elem_id="style_index", choices=[k for k, v in shared.prompt_styles.items()], value=next(iter(shared.prompt_styles.keys())), visible=len(shared.prompt_styles) > 1)
roll = gr.Button('Roll', elem_id="txt2img_roll", visible=len(shared.artist_db.artists) > 0)
submit = gr.Button('Generate', elem_id="txt2img_generate", variant='primary')
check_progress = gr.Button('Check progress', elem_id="check_progress", visible=False)
@@ -272,6 +288,7 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo):
send_to_inpaint = gr.Button('Send to inpaint')
send_to_extras = gr.Button('Send to extras')
interrupt = gr.Button('Interrupt')
+ txt2img_save_style = gr.Button('Save prompt as style')
progressbar = gr.HTML(elem_id="progressbar")
@@ -284,8 +301,9 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo):
fn=txt2img,
_js="submit",
inputs=[
- prompt,
+ txt2img_prompt,
negative_prompt,
+ txt2img_prompt_style,
steps,
sampler_index,
restore_faces,
@@ -305,7 +323,7 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo):
]
)
- prompt.submit(**txt2img_args)
+ txt2img_prompt.submit(**txt2img_args)
submit.click(**txt2img_args)
check_progress.click(
@@ -338,18 +356,19 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo):
roll.click(
fn=roll_artist,
inputs=[
- prompt,
+ txt2img_prompt,
],
outputs=[
- prompt
+ txt2img_prompt,
]
)
with gr.Blocks(analytics_enabled=False) as img2img_interface:
with gr.Row():
- prompt = gr.Textbox(label="Prompt", elem_id="img2img_prompt", show_label=False, placeholder="Prompt", lines=1)
+ img2img_prompt = gr.Textbox(label="Prompt", elem_id="img2img_prompt", show_label=False, placeholder="Prompt", lines=1)
negative_prompt = gr.Textbox(label="Negative prompt", elem_id="img2img_negative_prompt", show_label=False, placeholder="Negative prompt", lines=1, visible=cmd_opts.show_negative_prompt)
+ img2img_prompt_style = gr.Dropdown(label="Style", show_label=False, elem_id="style_index", choices=[k for k, v in shared.prompt_styles.items()], value=next(iter(shared.prompt_styles.keys())), visible=len(shared.prompt_styles) > 1)
submit = gr.Button('Generate', elem_id="img2img_generate", variant='primary')
check_progress = gr.Button('Check progress', elem_id="check_progress", visible=False)
@@ -413,8 +432,10 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo):
img2img_send_to_inpaint = gr.Button('Send to inpaint')
img2img_send_to_extras = gr.Button('Send to extras')
interrupt = gr.Button('Interrupt')
+ img2img_save_style = gr.Button('Save prompt as style')
progressbar = gr.HTML(elem_id="progressbar")
+ style_dummpy = gr.Textbox(visible=False)
with gr.Group():
html_info = gr.HTML()
@@ -480,8 +501,9 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo):
fn=img2img,
_js="submit",
inputs=[
- prompt,
+ img2img_prompt,
negative_prompt,
+ img2img_prompt_style,
init_img,
init_img_with_mask,
init_mask,
@@ -515,7 +537,7 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo):
]
)
- prompt.submit(**img2img_args)
+ img2img_prompt.submit(**img2img_args)
submit.click(**img2img_args)
check_progress.click(
@@ -572,6 +594,14 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo):
outputs=[init_img_with_mask],
)
+ for button, propmt in zip([txt2img_save_style, img2img_save_style], [txt2img_prompt, img2img_prompt]):
+ button.click(
+ fn=add_style,
+ _js="ask_for_style_name",
+ inputs=[style_dummpy, propmt],
+ outputs=[txt2img_prompt_style, img2img_prompt_style],
+ )
+
with gr.Blocks(analytics_enabled=False) as extras_interface:
with gr.Row().style(equal_height=False):
with gr.Column(variant='panel'):