From 5ef7590324891ec7263c767d178a51827a6f9b33 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sun, 16 Jul 2023 11:38:59 +0300 Subject: always show extra networks tabs in the UI --- javascript/extraNetworks.js | 8 +++--- modules/ui.py | 35 ++++++++++++++------------ modules/ui_extra_networks.py | 58 ++++++++++++++++++-------------------------- style.css | 8 +++--- 4 files changed, 50 insertions(+), 59 deletions(-) diff --git a/javascript/extraNetworks.js b/javascript/extraNetworks.js index e453094a..1835717b 100644 --- a/javascript/extraNetworks.js +++ b/javascript/extraNetworks.js @@ -2,16 +2,14 @@ function setupExtraNetworksForTab(tabname) { gradioApp().querySelector('#' + tabname + '_extra_tabs').classList.add('extra-networks'); var tabs = gradioApp().querySelector('#' + tabname + '_extra_tabs > div'); - var search = gradioApp().querySelector('#' + tabname + '_extra_search textarea'); + var searchDiv = gradioApp().getElementById(tabname + '_extra_search'); + var search = searchDiv.querySelector('textarea'); var sort = gradioApp().getElementById(tabname + '_extra_sort'); var sortOrder = gradioApp().getElementById(tabname + '_extra_sortorder'); var refresh = gradioApp().getElementById(tabname + '_extra_refresh'); - search.classList.add('search'); - sort.classList.add('sort'); - sortOrder.classList.add('sortorder'); sort.dataset.sortkey = 'sortDefault'; - tabs.appendChild(search); + tabs.appendChild(searchDiv); tabs.appendChild(sort); tabs.appendChild(sortOrder); tabs.appendChild(refresh); diff --git a/modules/ui.py b/modules/ui.py index 07ecee7b..085561c1 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -310,7 +310,6 @@ def create_toprow(is_img2img): with gr.Row(elem_id=f"{id_part}_tools"): paste = ToolButton(value=paste_symbol, elem_id="paste") clear_prompt_button = ToolButton(value=clear_prompt_symbol, elem_id=f"{id_part}_clear_prompt") - extra_networks_button = ToolButton(value=extra_networks_symbol, elem_id=f"{id_part}_extra_networks") prompt_style_apply = ToolButton(value=apply_style_symbol, elem_id=f"{id_part}_style_apply") save_style = ToolButton(value=save_style_symbol, elem_id=f"{id_part}_style_create") restore_progress_button = ToolButton(value=restore_progress_symbol, elem_id=f"{id_part}_restore_progress", visible=False) @@ -331,7 +330,7 @@ def create_toprow(is_img2img): prompt_styles = gr.Dropdown(label="Styles", elem_id=f"{id_part}_styles", choices=[k for k, v in shared.prompt_styles.styles.items()], value=[], multiselect=True) create_refresh_button(prompt_styles, shared.prompt_styles.reload, lambda: {"choices": [k for k, v in shared.prompt_styles.styles.items()]}, f"refresh_{id_part}_styles") - return prompt, prompt_styles, negative_prompt, submit, button_interrogate, button_deepbooru, prompt_style_apply, save_style, paste, extra_networks_button, token_counter, token_button, negative_token_counter, negative_token_button, restore_progress_button + return prompt, prompt_styles, negative_prompt, submit, button_interrogate, button_deepbooru, prompt_style_apply, save_style, paste, None, token_counter, token_button, negative_token_counter, negative_token_button, restore_progress_button def setup_progressbar(*args, **kwargs): @@ -419,16 +418,15 @@ def create_ui(): modules.scripts.scripts_txt2img.initialize_scripts(is_img2img=False) with gr.Blocks(analytics_enabled=False) as txt2img_interface: - txt2img_prompt, txt2img_prompt_styles, txt2img_negative_prompt, submit, _, _, txt2img_prompt_style_apply, txt2img_save_style, txt2img_paste, extra_networks_button, token_counter, token_button, negative_token_counter, negative_token_button, restore_progress_button = create_toprow(is_img2img=False) + txt2img_prompt, txt2img_prompt_styles, txt2img_negative_prompt, submit, _, _, txt2img_prompt_style_apply, txt2img_save_style, txt2img_paste, _, token_counter, token_button, negative_token_counter, negative_token_button, restore_progress_button = create_toprow(is_img2img=False) dummy_component = gr.Label(visible=False) txt_prompt_img = gr.File(label="", elem_id="txt2img_prompt_image", file_count="single", type="binary", visible=False) - with FormRow(variant='compact', elem_id="txt2img_extra_networks", visible=False) as extra_networks: - from modules import ui_extra_networks - extra_networks_ui = ui_extra_networks.create_ui(extra_networks, extra_networks_button, 'txt2img') + extra_tabs = gr.Tabs(elem_id="txt2img_extra_tabs") + extra_tabs.__enter__() - with gr.Row().style(equal_height=False): + with gr.Tab("Generation", id="txt2img_generation") as txt2img_generation_tab, gr.Row().style(equal_height=False): with gr.Column(variant='compact', elem_id="txt2img_settings"): modules.scripts.scripts_txt2img.prepare_ui() @@ -656,21 +654,24 @@ def create_ui(): token_button.click(fn=wrap_queued_call(update_token_counter), inputs=[txt2img_prompt, steps], outputs=[token_counter]) negative_token_button.click(fn=wrap_queued_call(update_token_counter), inputs=[txt2img_negative_prompt, steps], outputs=[negative_token_counter]) - ui_extra_networks.setup_ui(extra_networks_ui, txt2img_gallery) + from modules import ui_extra_networks + extra_networks_ui = ui_extra_networks.create_ui(txt2img_interface, [txt2img_generation_tab], 'txt2img') + ui_extra_networks.setup_ui(extra_networks_ui, txt2img_gallery) + + extra_tabs.__exit__() modules.scripts.scripts_current = modules.scripts.scripts_img2img modules.scripts.scripts_img2img.initialize_scripts(is_img2img=True) with gr.Blocks(analytics_enabled=False) as img2img_interface: - img2img_prompt, img2img_prompt_styles, img2img_negative_prompt, submit, img2img_interrogate, img2img_deepbooru, img2img_prompt_style_apply, img2img_save_style, img2img_paste, extra_networks_button, token_counter, token_button, negative_token_counter, negative_token_button, restore_progress_button = create_toprow(is_img2img=True) + img2img_prompt, img2img_prompt_styles, img2img_negative_prompt, submit, img2img_interrogate, img2img_deepbooru, img2img_prompt_style_apply, img2img_save_style, img2img_paste, _, token_counter, token_button, negative_token_counter, negative_token_button, restore_progress_button = create_toprow(is_img2img=True) img2img_prompt_img = gr.File(label="", elem_id="img2img_prompt_image", file_count="single", type="binary", visible=False) - with FormRow(variant='compact', elem_id="img2img_extra_networks", visible=False) as extra_networks: - from modules import ui_extra_networks - extra_networks_ui_img2img = ui_extra_networks.create_ui(extra_networks, extra_networks_button, 'img2img') + extra_tabs = gr.Tabs(elem_id="img2img_extra_tabs") + extra_tabs.__enter__() - with FormRow().style(equal_height=False): + with gr.Tab("Generation", id="img2img_generation") as img2img_generation_tab, FormRow().style(equal_height=False): with gr.Column(variant='compact', elem_id="img2img_settings"): copy_image_buttons = [] copy_image_destinations = {} @@ -1026,8 +1027,6 @@ def create_ui(): token_button.click(fn=update_token_counter, inputs=[img2img_prompt, steps], outputs=[token_counter]) negative_token_button.click(fn=wrap_queued_call(update_token_counter), inputs=[img2img_negative_prompt, steps], outputs=[negative_token_counter]) - ui_extra_networks.setup_ui(extra_networks_ui_img2img, img2img_gallery) - img2img_paste_fields = [ (img2img_prompt, "Prompt"), (img2img_negative_prompt, "Negative prompt"), @@ -1055,6 +1054,12 @@ def create_ui(): paste_button=img2img_paste, tabname="img2img", source_text_component=img2img_prompt, source_image_component=None, )) + from modules import ui_extra_networks + extra_networks_ui_img2img = ui_extra_networks.create_ui(img2img_interface, [img2img_generation_tab], 'img2img') + ui_extra_networks.setup_ui(extra_networks_ui_img2img, img2img_gallery) + + extra_tabs.__exit__() + modules.scripts.scripts_current = None with gr.Blocks(analytics_enabled=False) as extras_interface: diff --git a/modules/ui_extra_networks.py b/modules/ui_extra_networks.py index 6c73998f..0eb02873 100644 --- a/modules/ui_extra_networks.py +++ b/modules/ui_extra_networks.py @@ -355,7 +355,7 @@ def pages_in_preferred_order(pages): return sorted(pages, key=lambda x: tab_scores[x.name]) -def create_ui(container, button, tabname): +def create_ui(interface: gr.Blocks, unrelated_tabs, tabname): ui = ExtraNetworksUi() ui.pages = [] ui.pages_contents = [] @@ -363,48 +363,35 @@ def create_ui(container, button, tabname): ui.stored_extra_pages = pages_in_preferred_order(extra_pages.copy()) ui.tabname = tabname - with gr.Tabs(elem_id=tabname+"_extra_tabs"): - for page in ui.stored_extra_pages: - with gr.Tab(page.title, id=page.id_page): - elem_id = f"{tabname}_{page.id_page}_cards_html" - page_elem = gr.HTML('Loading...', elem_id=elem_id) - ui.pages.append(page_elem) + related_tabs = [] - page_elem.change(fn=lambda: None, _js='function(){applyExtraNetworkFilter(' + quote_js(tabname) + '); return []}', inputs=[], outputs=[]) + for page in ui.stored_extra_pages: + with gr.Tab(page.title, id=page.id_page) as tab: + elem_id = f"{tabname}_{page.id_page}_cards_html" + page_elem = gr.HTML('Loading...', elem_id=elem_id) + ui.pages.append(page_elem) - editor = page.create_user_metadata_editor(ui, tabname) - editor.create_ui() - ui.user_metadata_editors.append(editor) + page_elem.change(fn=lambda: None, _js='function(){applyExtraNetworkFilter(' + quote_js(tabname) + '); return []}', inputs=[], outputs=[]) - gr.Textbox('', show_label=False, elem_id=tabname+"_extra_search", placeholder="Search...", visible=False) - gr.Dropdown(choices=['Default Sort', 'Date Created', 'Date Modified', 'Name'], value='Default Sort', elem_id=tabname+"_extra_sort", multiselect=False, visible=False, show_label=False, interactive=True) - ToolButton(up_down_symbol, elem_id=tabname+"_extra_sortorder") - button_refresh = gr.Button('Refresh', elem_id=tabname+"_extra_refresh") + editor = page.create_user_metadata_editor(ui, tabname) + editor.create_ui() + ui.user_metadata_editors.append(editor) - ui.button_save_preview = gr.Button('Save preview', elem_id=tabname+"_save_preview", visible=False) - ui.preview_target_filename = gr.Textbox('Preview save filename', elem_id=tabname+"_preview_filename", visible=False) - - def toggle_visibility(is_visible): - is_visible = not is_visible - - return is_visible, gr.update(visible=is_visible), gr.update(variant=("secondary-down" if is_visible else "secondary")) + related_tabs.append(tab) - def fill_tabs(is_empty): - """Creates HTML for extra networks' tabs when the extra networks button is clicked for the first time.""" + edit_search = gr.Textbox('', show_label=False, elem_id=tabname+"_extra_search", elem_classes="search", placeholder="Search...", visible=False, interactive=True) + dropdown_sort = gr.Dropdown(choices=['Default Sort', 'Date Created', 'Date Modified', 'Name'], value='Default Sort', elem_id=tabname+"_extra_sort", elem_classes="sort", multiselect=False, visible=False, show_label=False, interactive=True) + button_sortorder = ToolButton(up_down_symbol, elem_id=tabname+"_extra_sortorder", elem_classes="sortorder", visible=False) + button_refresh = gr.Button('Refresh', elem_id=tabname+"_extra_refresh", visible=False) - if not ui.pages_contents: - refresh() - - if is_empty: - return True, *ui.pages_contents - - return True, *[gr.update() for _ in ui.pages_contents] + ui.button_save_preview = gr.Button('Save preview', elem_id=tabname+"_save_preview", visible=False) + ui.preview_target_filename = gr.Textbox('Preview save filename', elem_id=tabname+"_preview_filename", visible=False) - state_visible = gr.State(value=False) - button.click(fn=toggle_visibility, inputs=[state_visible], outputs=[state_visible, container, button], show_progress=False) + for tab in unrelated_tabs: + tab.select(fn=lambda: [gr.update(visible=False) for _ in range(5)], inputs=[], outputs=[edit_search, edit_search, dropdown_sort, button_sortorder, button_refresh], show_progress=False) - state_empty = gr.State(value=True) - button.click(fn=fill_tabs, inputs=[state_empty], outputs=[state_empty, *ui.pages], show_progress=False) + for tab in related_tabs: + tab.select(fn=lambda: [gr.update(visible=True) for _ in range(5)], inputs=[], outputs=[edit_search, edit_search, dropdown_sort, button_sortorder, button_refresh], show_progress=False) def refresh(): for pg in ui.stored_extra_pages: @@ -414,6 +401,7 @@ def create_ui(container, button, tabname): return ui.pages_contents + interface.load(fn=refresh, inputs=[], outputs=[*ui.pages]) button_refresh.click(fn=refresh, inputs=[], outputs=ui.pages) return ui diff --git a/style.css b/style.css index 8a66c3d2..a7d79955 100644 --- a/style.css +++ b/style.css @@ -766,9 +766,10 @@ footer { /* extra networks UI */ .extra-network-cards{ - height: 725px; - overflow: scroll; - resize: vertical; +} + +.extra-networks > div.tab-nav{ + height: 3.4rem; } .extra-networks > div > [id *= '_extra_']{ @@ -784,7 +785,6 @@ footer { } .extra-networks .tab-nav .search, .extra-networks .tab-nav .sort{ - display: inline-block; margin: 0.3em; align-self: center; } -- cgit v1.2.1 From 57d61de25cb6de2e317ae23580971e98c70f542e Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sun, 16 Jul 2023 11:52:29 +0300 Subject: fix unneded reload from disk --- modules/ui_extra_networks.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/modules/ui_extra_networks.py b/modules/ui_extra_networks.py index 0eb02873..c11f1d5b 100644 --- a/modules/ui_extra_networks.py +++ b/modules/ui_extra_networks.py @@ -393,6 +393,12 @@ def create_ui(interface: gr.Blocks, unrelated_tabs, tabname): for tab in related_tabs: tab.select(fn=lambda: [gr.update(visible=True) for _ in range(5)], inputs=[], outputs=[edit_search, edit_search, dropdown_sort, button_sortorder, button_refresh], show_progress=False) + def pages_html(): + if not ui.pages_contents: + return refresh() + + return ui.pages_contents + def refresh(): for pg in ui.stored_extra_pages: pg.refresh() @@ -401,7 +407,7 @@ def create_ui(interface: gr.Blocks, unrelated_tabs, tabname): return ui.pages_contents - interface.load(fn=refresh, inputs=[], outputs=[*ui.pages]) + interface.load(fn=pages_html, inputs=[], outputs=[*ui.pages]) button_refresh.click(fn=refresh, inputs=[], outputs=ui.pages) return ui -- cgit v1.2.1 From 24bad5dc7b4dd4dfdb47bc1202e4fe438b860e2e Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sun, 16 Jul 2023 13:59:15 +0300 Subject: change extra networks list to have constant height and scrolling --- style.css | 3 +++ 1 file changed, 3 insertions(+) diff --git a/style.css b/style.css index a7d79955..c605cce2 100644 --- a/style.css +++ b/style.css @@ -766,6 +766,9 @@ footer { /* extra networks UI */ .extra-network-cards{ + height: 100vh; + overflow: scroll; + resize: vertical; } .extra-networks > div.tab-nav{ -- cgit v1.2.1 From 643836007f6f47344767322223f96723511f58e0 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sun, 16 Jul 2023 14:46:05 +0300 Subject: more tweaking for cards section height --- style.css | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/style.css b/style.css index c605cce2..88d7135c 100644 --- a/style.css +++ b/style.css @@ -766,9 +766,10 @@ footer { /* extra networks UI */ .extra-network-cards{ - height: 100vh; - overflow: scroll; + height: calc(100vh - 24rem); + overflow: clip scroll; resize: vertical; + min-height: 52rem; } .extra-networks > div.tab-nav{ -- cgit v1.2.1 From 543ea5730b8c2eea271739cab74bd962b45a4fea Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Mon, 17 Jul 2023 16:15:52 +0900 Subject: fix extra search button --- javascript/extraNetworks.js | 2 +- modules/ui_extra_networks.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/javascript/extraNetworks.js b/javascript/extraNetworks.js index 1835717b..2361144a 100644 --- a/javascript/extraNetworks.js +++ b/javascript/extraNetworks.js @@ -177,7 +177,7 @@ function saveCardPreview(event, tabname, filename) { } function extraNetworksSearchButton(tabs_id, event) { - var searchTextarea = gradioApp().querySelector("#" + tabs_id + ' > div > textarea'); + var searchTextarea = gradioApp().querySelector("#" + tabs_id + ' > label > textarea'); var button = event.target; var text = button.classList.contains("search-all") ? "" : button.textContent.trim(); diff --git a/modules/ui_extra_networks.py b/modules/ui_extra_networks.py index c11f1d5b..b913cb3e 100644 --- a/modules/ui_extra_networks.py +++ b/modules/ui_extra_networks.py @@ -163,7 +163,7 @@ class ExtraNetworksPage: subdirs = {"": 1, **subdirs} subdirs_html = "".join([f""" - """ for subdir in subdirs]) -- cgit v1.2.1 From 95c5c4d64ef7fb67725df7ce048f633a9dd4a9b6 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Mon, 17 Jul 2023 11:18:08 +0300 Subject: fix tabs height on small screens --- style.css | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/style.css b/style.css index 88d7135c..7157ac0b 100644 --- a/style.css +++ b/style.css @@ -773,7 +773,7 @@ footer { } .extra-networks > div.tab-nav{ - height: 3.4rem; + min-height: 3.4rem; } .extra-networks > div > [id *= '_extra_']{ -- cgit v1.2.1 From 0dcf6436a868066ada3aaf30f68a3405a24a842c Mon Sep 17 00:00:00 2001 From: wzgrx <39661556+wzgrx@users.noreply.github.com> Date: Mon, 17 Jul 2023 18:49:53 +0800 Subject: Update requirements.txt --- requirements.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/requirements.txt b/requirements.txt index b3f8a7f4..0a2fd236 100644 --- a/requirements.txt +++ b/requirements.txt @@ -7,7 +7,7 @@ blendmodes clean-fid einops gfpgan -gradio==3.32.0 +gradio==3.36.1 inflection jsonmerge kornia @@ -30,4 +30,4 @@ tomesd torch torchdiffeq torchsde -transformers==4.25.1 +transformers==4.30.2 -- cgit v1.2.1 From 952effa8b10dba2f2f7f2cf4191f987e3666e9f0 Mon Sep 17 00:00:00 2001 From: wzgrx <39661556+wzgrx@users.noreply.github.com> Date: Mon, 17 Jul 2023 18:50:29 +0800 Subject: Update requirements_versions.txt --- requirements_versions.txt | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/requirements_versions.txt b/requirements_versions.txt index b826bf43..09c2d292 100644 --- a/requirements_versions.txt +++ b/requirements_versions.txt @@ -1,13 +1,13 @@ -GitPython==3.1.30 +GitPython==3.1.32 Pillow==9.5.0 -accelerate==0.18.0 +accelerate==0.21.0 basicsr==1.4.2 blendmodes==2022 clean-fid==0.1.35 einops==0.4.1 fastapi==0.94.0 gfpgan==1.3.8 -gradio==3.32.0 +gradio==3.36.1 httpcore<=0.15 inflection==0.5.1 jsonmerge==1.8.0 @@ -22,10 +22,11 @@ pytorch_lightning==1.9.4 realesrgan==0.3.0 resize-right==0.0.2 safetensors==0.3.1 -scikit-image==0.20.0 -timm==0.6.7 -tomesd==0.1.2 +scikit-image==0.21.0 +timm==0.9.2 +tomesd==0.1.3 torch torchdiffeq==0.2.3 torchsde==0.2.5 -transformers==4.25.1 +transformers==4.30.2 +diffusers==0.18.2 -- cgit v1.2.1 From 40a18d38a8fcb88d1c2947a2653b52cd2085536f Mon Sep 17 00:00:00 2001 From: lambertae Date: Tue, 18 Jul 2023 00:32:01 -0400 Subject: add restart sampler --- modules/sd_samplers_kdiffusion.py | 70 +++++++++++++++++++++++++++++++++++++-- 1 file changed, 68 insertions(+), 2 deletions(-) diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py index 71581b76..c63b677c 100644 --- a/modules/sd_samplers_kdiffusion.py +++ b/modules/sd_samplers_kdiffusion.py @@ -1,3 +1,5 @@ +# export PIP_CACHE_DIR=/scratch/dengm/cache +# export XDG_CACHE_HOME=/scratch/dengm/cache from collections import deque import torch import inspect @@ -30,12 +32,76 @@ samplers_k_diffusion = [ ('DPM++ 2M Karras', 'sample_dpmpp_2m', ['k_dpmpp_2m_ka'], {'scheduler': 'karras'}), ('DPM++ SDE Karras', 'sample_dpmpp_sde', ['k_dpmpp_sde_ka'], {'scheduler': 'karras', "second_order": True, "brownian_noise": True}), ('DPM++ 2M SDE Karras', 'sample_dpmpp_2m_sde', ['k_dpmpp_2m_sde_ka'], {'scheduler': 'karras', "brownian_noise": True}), + ('Restart (new)', 'restart_sampler', ['restart'], {'scheduler': 'karras', "second_order": True}), ] + +@torch.no_grad() +def restart_sampler(model, x, sigmas, extra_args=None, callback=None, disable=None, s_noise=1., restart_list = {0.1: [10, 2, 2]}): + """Implements restart sampling in Restart Sampling for Improving Generative Processes (2023)""" + '''Restart_list format: {min_sigma: [ restart_steps, restart_times, max_sigma]}''' + + from tqdm.auto import trange, tqdm + extra_args = {} if extra_args is None else extra_args + s_in = x.new_ones([x.shape[0]]) + step_id = 0 + + from k_diffusion.sampling import to_d, append_zero + + def heun_step(x, old_sigma, new_sigma): + nonlocal step_id + denoised = model(x, old_sigma * s_in, **extra_args) + d = to_d(x, old_sigma, denoised) + if callback is not None: + callback({'x': x, 'i': step_id, 'sigma': new_sigma, 'sigma_hat': old_sigma, 'denoised': denoised}) + dt = new_sigma - old_sigma + if new_sigma == 0: + # Euler method + x = x + d * dt + else: + # Heun's method + x_2 = x + d * dt + denoised_2 = model(x_2, new_sigma * s_in, **extra_args) + d_2 = to_d(x_2, new_sigma, denoised_2) + d_prime = (d + d_2) / 2 + x = x + d_prime * dt + step_id += 1 + return x + # print(sigmas) + temp_list = dict() + for key, value in restart_list.items(): + temp_list[int(torch.argmin(abs(sigmas - key), dim=0))] = value + restart_list = temp_list + + + def get_sigmas_karras(n, sigma_min, sigma_max, rho=7., device='cpu'): + ramp = torch.linspace(0, 1, n).to(device) + min_inv_rho = (sigma_min ** (1 / rho)) + max_inv_rho = (sigma_max ** (1 / rho)) + if isinstance(min_inv_rho, torch.Tensor): + min_inv_rho = min_inv_rho.to(device) + if isinstance(max_inv_rho, torch.Tensor): + max_inv_rho = max_inv_rho.to(device) + sigmas = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho + return append_zero(sigmas).to(device) + + for i in trange(len(sigmas) - 1, disable=disable): + x = heun_step(x, sigmas[i], sigmas[i+1]) + if i + 1 in restart_list: + restart_steps, restart_times, restart_max = restart_list[i + 1] + min_idx = i + 1 + max_idx = int(torch.argmin(abs(sigmas - restart_max), dim=0)) + sigma_restart = get_sigmas_karras(restart_steps, sigmas[min_idx], sigmas[max_idx], device=sigmas.device)[:-1] # remove the zero at the end + for times in range(restart_times): + x = x + torch.randn_like(x) * s_noise * (sigmas[max_idx] ** 2 - sigmas[min_idx] ** 2) ** 0.5 + for (old_sigma, new_sigma) in zip(sigma_restart[:-1], sigma_restart[1:]): + x = heun_step(x, old_sigma, new_sigma) + return x + samplers_data_k_diffusion = [ sd_samplers_common.SamplerData(label, lambda model, funcname=funcname: KDiffusionSampler(funcname, model), aliases, options) for label, funcname, aliases, options in samplers_k_diffusion - if hasattr(k_diffusion.sampling, funcname) + if (hasattr(k_diffusion.sampling, funcname) or funcname == 'restart_sampler') ] sampler_extra_params = { @@ -245,7 +311,7 @@ class KDiffusionSampler: self.model_wrap = denoiser(sd_model, quantize=shared.opts.enable_quantization) self.funcname = funcname - self.func = getattr(k_diffusion.sampling, self.funcname) + self.func = getattr(k_diffusion.sampling, self.funcname) if funcname != "restart_sampler" else restart_sampler self.extra_params = sampler_extra_params.get(funcname, []) self.model_wrap_cfg = CFGDenoiser(self.model_wrap) self.sampler_noises = None -- cgit v1.2.1 From 15a94d6cf7fa075c09362e73c1239692d021c559 Mon Sep 17 00:00:00 2001 From: lambertae Date: Tue, 18 Jul 2023 00:39:26 -0400 Subject: remove useless header --- modules/sd_samplers_kdiffusion.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py index c63b677c..7888d864 100644 --- a/modules/sd_samplers_kdiffusion.py +++ b/modules/sd_samplers_kdiffusion.py @@ -1,5 +1,3 @@ -# export PIP_CACHE_DIR=/scratch/dengm/cache -# export XDG_CACHE_HOME=/scratch/dengm/cache from collections import deque import torch import inspect -- cgit v1.2.1 From 37e048a7e2356f4caebfd976351112f03856f082 Mon Sep 17 00:00:00 2001 From: lambertae Date: Tue, 18 Jul 2023 00:55:02 -0400 Subject: fix floating error --- modules/sd_samplers_kdiffusion.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py index 7888d864..1bb25adf 100644 --- a/modules/sd_samplers_kdiffusion.py +++ b/modules/sd_samplers_kdiffusion.py @@ -89,11 +89,12 @@ def restart_sampler(model, x, sigmas, extra_args=None, callback=None, disable=No restart_steps, restart_times, restart_max = restart_list[i + 1] min_idx = i + 1 max_idx = int(torch.argmin(abs(sigmas - restart_max), dim=0)) - sigma_restart = get_sigmas_karras(restart_steps, sigmas[min_idx], sigmas[max_idx], device=sigmas.device)[:-1] # remove the zero at the end - for times in range(restart_times): - x = x + torch.randn_like(x) * s_noise * (sigmas[max_idx] ** 2 - sigmas[min_idx] ** 2) ** 0.5 - for (old_sigma, new_sigma) in zip(sigma_restart[:-1], sigma_restart[1:]): - x = heun_step(x, old_sigma, new_sigma) + if max_idx < min_idx: + sigma_restart = get_sigmas_karras(restart_steps, sigmas[min_idx], sigmas[max_idx], device=sigmas.device)[:-1] # remove the zero at the end + for times in range(restart_times): + x = x + torch.randn_like(x) * s_noise * (sigmas[max_idx] ** 2 - sigmas[min_idx] ** 2) ** 0.5 + for (old_sigma, new_sigma) in zip(sigma_restart[:-1], sigma_restart[1:]): + x = heun_step(x, old_sigma, new_sigma) return x samplers_data_k_diffusion = [ -- cgit v1.2.1 From 7bb0fbed136c6a345b211e09102659fd89362576 Mon Sep 17 00:00:00 2001 From: lambertae Date: Tue, 18 Jul 2023 01:02:04 -0400 Subject: code styling --- modules/sd_samplers_kdiffusion.py | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py index 1bb25adf..db7013f2 100644 --- a/modules/sd_samplers_kdiffusion.py +++ b/modules/sd_samplers_kdiffusion.py @@ -35,17 +35,15 @@ samplers_k_diffusion = [ @torch.no_grad() -def restart_sampler(model, x, sigmas, extra_args=None, callback=None, disable=None, s_noise=1., restart_list = {0.1: [10, 2, 2]}): +def restart_sampler(model, x, sigmas, extra_args=None, callback=None, disable=None, s_noise=1.): """Implements restart sampling in Restart Sampling for Improving Generative Processes (2023)""" '''Restart_list format: {min_sigma: [ restart_steps, restart_times, max_sigma]}''' - - from tqdm.auto import trange, tqdm + restart_list = {0.1: [10, 2, 2]} + from tqdm.auto import trange extra_args = {} if extra_args is None else extra_args s_in = x.new_ones([x.shape[0]]) step_id = 0 - from k_diffusion.sampling import to_d, append_zero - def heun_step(x, old_sigma, new_sigma): nonlocal step_id denoised = model(x, old_sigma * s_in, **extra_args) @@ -70,8 +68,6 @@ def restart_sampler(model, x, sigmas, extra_args=None, callback=None, disable=No for key, value in restart_list.items(): temp_list[int(torch.argmin(abs(sigmas - key), dim=0))] = value restart_list = temp_list - - def get_sigmas_karras(n, sigma_min, sigma_max, rho=7., device='cpu'): ramp = torch.linspace(0, 1, n).to(device) min_inv_rho = (sigma_min ** (1 / rho)) @@ -82,7 +78,6 @@ def restart_sampler(model, x, sigmas, extra_args=None, callback=None, disable=No max_inv_rho = max_inv_rho.to(device) sigmas = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho return append_zero(sigmas).to(device) - for i in trange(len(sigmas) - 1, disable=disable): x = heun_step(x, sigmas[i], sigmas[i+1]) if i + 1 in restart_list: @@ -91,7 +86,8 @@ def restart_sampler(model, x, sigmas, extra_args=None, callback=None, disable=No max_idx = int(torch.argmin(abs(sigmas - restart_max), dim=0)) if max_idx < min_idx: sigma_restart = get_sigmas_karras(restart_steps, sigmas[min_idx], sigmas[max_idx], device=sigmas.device)[:-1] # remove the zero at the end - for times in range(restart_times): + while restart_times > 0: + restart_times -= 1 x = x + torch.randn_like(x) * s_noise * (sigmas[max_idx] ** 2 - sigmas[min_idx] ** 2) ** 0.5 for (old_sigma, new_sigma) in zip(sigma_restart[:-1], sigma_restart[1:]): x = heun_step(x, old_sigma, new_sigma) -- cgit v1.2.1 From 3c570421d3a2eb24528b5f5bb615dcb0c7717e4a Mon Sep 17 00:00:00 2001 From: wfjsw Date: Tue, 18 Jul 2023 19:00:16 +0800 Subject: move start timer --- launch.py | 4 +++- modules/api/models.py | 2 +- modules/launch_utils.py | 3 --- 3 files changed, 4 insertions(+), 5 deletions(-) diff --git a/launch.py b/launch.py index b103c8f3..e9667c88 100644 --- a/launch.py +++ b/launch.py @@ -1,4 +1,4 @@ -from modules import launch_utils +from modules import launch_utils, timer args = launch_utils.args @@ -25,6 +25,8 @@ start = launch_utils.start def main(): + timer.startup_timer.record("start") + if not args.skip_prepare_environment: prepare_environment() diff --git a/modules/api/models.py b/modules/api/models.py index b55fa728..96cfe920 100644 --- a/modules/api/models.py +++ b/modules/api/models.py @@ -213,7 +213,7 @@ for key, metadata in opts.data_labels.items(): value = None optType = opts.typemap.get(type(metadata.default), type(value)) - if optType == types.NoneType: + if isinstance(optType, types.NoneType): pass elif metadata is not None: fields.update({key: (Optional[optType], Field(default=metadata.default, description=metadata.label))}) diff --git a/modules/launch_utils.py b/modules/launch_utils.py index 03552bc2..ea995eda 100644 --- a/modules/launch_utils.py +++ b/modules/launch_utils.py @@ -10,9 +10,6 @@ from functools import lru_cache from modules import cmd_args, errors from modules.paths_internal import script_path, extensions_dir -from modules import timer - -timer.startup_timer.record("start") args, _ = cmd_args.parser.parse_known_args() -- cgit v1.2.1 From c278e60131d34b58069c91d441e60a5d87f14a22 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Wed, 19 Jul 2023 04:58:30 +0900 Subject: add dropdown extra_sort_order lable --- modules/ui_extra_networks.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/ui_extra_networks.py b/modules/ui_extra_networks.py index b913cb3e..7387d01e 100644 --- a/modules/ui_extra_networks.py +++ b/modules/ui_extra_networks.py @@ -380,7 +380,7 @@ def create_ui(interface: gr.Blocks, unrelated_tabs, tabname): related_tabs.append(tab) edit_search = gr.Textbox('', show_label=False, elem_id=tabname+"_extra_search", elem_classes="search", placeholder="Search...", visible=False, interactive=True) - dropdown_sort = gr.Dropdown(choices=['Default Sort', 'Date Created', 'Date Modified', 'Name'], value='Default Sort', elem_id=tabname+"_extra_sort", elem_classes="sort", multiselect=False, visible=False, show_label=False, interactive=True) + dropdown_sort = gr.Dropdown(choices=['Default Sort', 'Date Created', 'Date Modified', 'Name'], value='Default Sort', elem_id=tabname+"_extra_sort", elem_classes="sort", multiselect=False, visible=False, show_label=False, interactive=True, label=tabname+"_extra_sort_order") button_sortorder = ToolButton(up_down_symbol, elem_id=tabname+"_extra_sortorder", elem_classes="sortorder", visible=False) button_refresh = gr.Button('Refresh', elem_id=tabname+"_extra_refresh", visible=False) -- cgit v1.2.1 From cb7573489670cc7a042d24285e158b797c9558b2 Mon Sep 17 00:00:00 2001 From: yfzhou Date: Wed, 19 Jul 2023 17:53:28 +0800 Subject: =?UTF-8?q?=E3=80=90bug=E3=80=91reload=20altclip=20model=20error?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When using BertSeriesModelWithTransformation as the cond_stage_model, the undo_hijack should be performed using the FrozenXLMREmbedderWithCustomWords type; otherwise, it will result in a failed model reload. --- modules/sd_hijack.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py index 3b6f95ce..928233ab 100644 --- a/modules/sd_hijack.py +++ b/modules/sd_hijack.py @@ -203,7 +203,7 @@ class StableDiffusionModelHijack: ldm.modules.diffusionmodules.openaimodel.UNetModel.forward = sd_unet.UNetModel_forward def undo_hijack(self, m): - if type(m.cond_stage_model) == xlmr.BertSeriesModelWithTransformation: + if type(m.cond_stage_model) == sd_hijack_xlmr.FrozenXLMREmbedderWithCustomWords: m.cond_stage_model = m.cond_stage_model.wrapped elif type(m.cond_stage_model) == sd_hijack_clip.FrozenCLIPEmbedderWithCustomWords: -- cgit v1.2.1 From ddbf4a73f5c0cfe63ca0988b8e642d3b977a3fa9 Mon Sep 17 00:00:00 2001 From: lambertae Date: Thu, 20 Jul 2023 02:24:18 -0400 Subject: restart-sampler with correct steps --- modules/sd_samplers_kdiffusion.py | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py index db7013f2..ed5e6c79 100644 --- a/modules/sd_samplers_kdiffusion.py +++ b/modules/sd_samplers_kdiffusion.py @@ -38,20 +38,19 @@ samplers_k_diffusion = [ def restart_sampler(model, x, sigmas, extra_args=None, callback=None, disable=None, s_noise=1.): """Implements restart sampling in Restart Sampling for Improving Generative Processes (2023)""" '''Restart_list format: {min_sigma: [ restart_steps, restart_times, max_sigma]}''' - restart_list = {0.1: [10, 2, 2]} from tqdm.auto import trange extra_args = {} if extra_args is None else extra_args s_in = x.new_ones([x.shape[0]]) step_id = 0 from k_diffusion.sampling import to_d, append_zero - def heun_step(x, old_sigma, new_sigma): + def heun_step(x, old_sigma, new_sigma, second_order = True): nonlocal step_id denoised = model(x, old_sigma * s_in, **extra_args) d = to_d(x, old_sigma, denoised) if callback is not None: callback({'x': x, 'i': step_id, 'sigma': new_sigma, 'sigma_hat': old_sigma, 'denoised': denoised}) dt = new_sigma - old_sigma - if new_sigma == 0: + if new_sigma == 0 or not second_order: # Euler method x = x + d * dt else: @@ -63,11 +62,6 @@ def restart_sampler(model, x, sigmas, extra_args=None, callback=None, disable=No x = x + d_prime * dt step_id += 1 return x - # print(sigmas) - temp_list = dict() - for key, value in restart_list.items(): - temp_list[int(torch.argmin(abs(sigmas - key), dim=0))] = value - restart_list = temp_list def get_sigmas_karras(n, sigma_min, sigma_max, rho=7., device='cpu'): ramp = torch.linspace(0, 1, n).to(device) min_inv_rho = (sigma_min ** (1 / rho)) @@ -78,6 +72,18 @@ def restart_sampler(model, x, sigmas, extra_args=None, callback=None, disable=No max_inv_rho = max_inv_rho.to(device) sigmas = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho return append_zero(sigmas).to(device) + steps = sigmas.shape[0] - 1 + if steps >= 20: + restart_steps = 9 + restart_times = 2 if steps >= 36 else 1 + sigmas = get_sigmas_karras(steps - restart_steps * restart_times, sigmas[-2], sigmas[0], device=sigmas.device) + restart_list = {0.1: [restart_steps + 1, restart_times, 2]} + else: + restart_list = dict() + temp_list = dict() + for key, value in restart_list.items(): + temp_list[int(torch.argmin(abs(sigmas - key), dim=0))] = value + restart_list = temp_list for i in trange(len(sigmas) - 1, disable=disable): x = heun_step(x, sigmas[i], sigmas[i+1]) if i + 1 in restart_list: -- cgit v1.2.1 From 6233268964fe050f1d25e8148ccf12a8884623a9 Mon Sep 17 00:00:00 2001 From: lambertae Date: Thu, 20 Jul 2023 02:27:28 -0400 Subject: add credit --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 73d94960..32ffd4c9 100644 --- a/README.md +++ b/README.md @@ -142,6 +142,7 @@ Licenses for borrowed code can be found in `Settings -> Licenses` screen, and al - Stable Diffusion - https://github.com/CompVis/stable-diffusion, https://github.com/CompVis/taming-transformers - k-diffusion - https://github.com/crowsonkb/k-diffusion.git +- Restart sampling - https://github.com/Newbeeer/diffusion_restart_sampling - GFPGAN - https://github.com/TencentARC/GFPGAN.git - CodeFormer - https://github.com/sczhou/CodeFormer - ESRGAN - https://github.com/xinntao/ESRGAN -- cgit v1.2.1 From 2f57a559ac3381c1ef2516655c3a3d1088191c54 Mon Sep 17 00:00:00 2001 From: lambertae Date: Thu, 20 Jul 2023 20:34:41 -0400 Subject: allow choise of restart_list & use karras from kdiffusion --- modules/sd_samplers_kdiffusion.py | 32 ++++++++++++-------------------- 1 file changed, 12 insertions(+), 20 deletions(-) diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py index ed5e6c79..c72d01c8 100644 --- a/modules/sd_samplers_kdiffusion.py +++ b/modules/sd_samplers_kdiffusion.py @@ -35,14 +35,15 @@ samplers_k_diffusion = [ @torch.no_grad() -def restart_sampler(model, x, sigmas, extra_args=None, callback=None, disable=None, s_noise=1.): +def restart_sampler(model, x, sigmas, extra_args=None, callback=None, disable=None, s_noise=1., restart_list = None): """Implements restart sampling in Restart Sampling for Improving Generative Processes (2023)""" '''Restart_list format: {min_sigma: [ restart_steps, restart_times, max_sigma]}''' + '''If restart_list is None: will choose restart_list automatically, otherwise will use the given restart_list''' from tqdm.auto import trange extra_args = {} if extra_args is None else extra_args s_in = x.new_ones([x.shape[0]]) step_id = 0 - from k_diffusion.sampling import to_d, append_zero + from k_diffusion.sampling import to_d, append_zero, get_sigmas_karras def heun_step(x, old_sigma, new_sigma, second_order = True): nonlocal step_id denoised = model(x, old_sigma * s_in, **extra_args) @@ -62,24 +63,15 @@ def restart_sampler(model, x, sigmas, extra_args=None, callback=None, disable=No x = x + d_prime * dt step_id += 1 return x - def get_sigmas_karras(n, sigma_min, sigma_max, rho=7., device='cpu'): - ramp = torch.linspace(0, 1, n).to(device) - min_inv_rho = (sigma_min ** (1 / rho)) - max_inv_rho = (sigma_max ** (1 / rho)) - if isinstance(min_inv_rho, torch.Tensor): - min_inv_rho = min_inv_rho.to(device) - if isinstance(max_inv_rho, torch.Tensor): - max_inv_rho = max_inv_rho.to(device) - sigmas = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho - return append_zero(sigmas).to(device) steps = sigmas.shape[0] - 1 - if steps >= 20: - restart_steps = 9 - restart_times = 2 if steps >= 36 else 1 - sigmas = get_sigmas_karras(steps - restart_steps * restart_times, sigmas[-2], sigmas[0], device=sigmas.device) - restart_list = {0.1: [restart_steps + 1, restart_times, 2]} - else: - restart_list = dict() + if restart_list is None: + if steps >= 20: + restart_steps = 9 + restart_times = 2 if steps >= 36 else 1 + sigmas = get_sigmas_karras(steps - restart_steps * restart_times, sigmas[-2].item(), sigmas[0].item(), device=sigmas.device) + restart_list = {0.1: [restart_steps + 1, restart_times, 2]} + else: + restart_list = dict() temp_list = dict() for key, value in restart_list.items(): temp_list[int(torch.argmin(abs(sigmas - key), dim=0))] = value @@ -91,7 +83,7 @@ def restart_sampler(model, x, sigmas, extra_args=None, callback=None, disable=No min_idx = i + 1 max_idx = int(torch.argmin(abs(sigmas - restart_max), dim=0)) if max_idx < min_idx: - sigma_restart = get_sigmas_karras(restart_steps, sigmas[min_idx], sigmas[max_idx], device=sigmas.device)[:-1] # remove the zero at the end + sigma_restart = get_sigmas_karras(restart_steps, sigmas[min_idx].item(), sigmas[max_idx].item(), device=sigmas.device)[:-1] # remove the zero at the end while restart_times > 0: restart_times -= 1 x = x + torch.randn_like(x) * s_noise * (sigmas[max_idx] ** 2 - sigmas[min_idx] ** 2) ** 0.5 -- cgit v1.2.1 From 128d59c9ccfbc9c7fccd6f1b2fe58bbbb18459f9 Mon Sep 17 00:00:00 2001 From: lambertae Date: Thu, 20 Jul 2023 20:36:40 -0400 Subject: fix ruff --- modules/sd_samplers_kdiffusion.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py index c72d01c8..21b347ed 100644 --- a/modules/sd_samplers_kdiffusion.py +++ b/modules/sd_samplers_kdiffusion.py @@ -43,7 +43,7 @@ def restart_sampler(model, x, sigmas, extra_args=None, callback=None, disable=No extra_args = {} if extra_args is None else extra_args s_in = x.new_ones([x.shape[0]]) step_id = 0 - from k_diffusion.sampling import to_d, append_zero, get_sigmas_karras + from k_diffusion.sampling import to_d, get_sigmas_karras def heun_step(x, old_sigma, new_sigma, second_order = True): nonlocal step_id denoised = model(x, old_sigma * s_in, **extra_args) -- cgit v1.2.1 From f87389029839a27464a18846815339e81787b882 Mon Sep 17 00:00:00 2001 From: lambertae Date: Thu, 20 Jul 2023 21:27:43 -0400 Subject: new restart scheme --- modules/sd_samplers_kdiffusion.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py index 21b347ed..ed60670c 100644 --- a/modules/sd_samplers_kdiffusion.py +++ b/modules/sd_samplers_kdiffusion.py @@ -67,7 +67,10 @@ def restart_sampler(model, x, sigmas, extra_args=None, callback=None, disable=No if restart_list is None: if steps >= 20: restart_steps = 9 - restart_times = 2 if steps >= 36 else 1 + restart_times = 1 + if steps >= 36: + restart_steps = steps // 4 + restart_times = 2 sigmas = get_sigmas_karras(steps - restart_steps * restart_times, sigmas[-2].item(), sigmas[0].item(), device=sigmas.device) restart_list = {0.1: [restart_steps + 1, restart_times, 2]} else: -- cgit v1.2.1 From 33694baea1a886021e19da27b4ff41898428cbbd Mon Sep 17 00:00:00 2001 From: Jabasukuriputo Wang Date: Fri, 21 Jul 2023 17:15:44 +0800 Subject: avoid importing timer when it is not strictly needed --- launch.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/launch.py b/launch.py index 370f2bf3..114466cf 100644 --- a/launch.py +++ b/launch.py @@ -1,4 +1,4 @@ -from modules import launch_utils, timer +from modules import launch_utils args = launch_utils.args @@ -26,6 +26,7 @@ start = launch_utils.start def main(): + from modules import timer timer.startup_timer.record("start") if not args.skip_prepare_environment: -- cgit v1.2.1 From 118529a6dc9481499ebcd589a9b42f98b58bca8a Mon Sep 17 00:00:00 2001 From: Jabasukuriputo Wang Date: Fri, 21 Jul 2023 21:49:33 +0800 Subject: typo fix --- modules/launch_utils.py | 6 +++--- webui.py | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/modules/launch_utils.py b/modules/launch_utils.py index 03552bc2..8e11bf42 100644 --- a/modules/launch_utils.py +++ b/modules/launch_utils.py @@ -233,7 +233,7 @@ def run_extensions_installers(settings_file): re_requirement = re.compile(r"\s*([-_a-zA-Z0-9]+)\s*(?:==\s*([-+_.a-zA-Z0-9]+))?\s*") -def requrements_met(requirements_file): +def requirements_met(requirements_file): """ Does a simple parse of a requirements.txt file to determine if all rerqirements in it are already installed. Returns True if so, False if not installed or parsing fails. @@ -293,7 +293,7 @@ def prepare_environment(): try: # the existance of this file is a signal to webui.sh/bat that webui needs to be restarted when it stops execution os.remove(os.path.join(script_path, "tmp", "restart")) - os.environ.setdefault('SD_WEBUI_RESTARTING ', '1') + os.environ.setdefault('SD_WEBUI_RESTARTING', '1') except OSError: pass @@ -354,7 +354,7 @@ def prepare_environment(): if not os.path.isfile(requirements_file): requirements_file = os.path.join(script_path, requirements_file) - if not requrements_met(requirements_file): + if not requirements_met(requirements_file): run_pip(f"install -r \"{requirements_file}\"", "requirements") run_extensions_installers(settings_file=args.ui_settings_file) diff --git a/webui.py b/webui.py index 2aafc09f..2314735f 100644 --- a/webui.py +++ b/webui.py @@ -407,7 +407,7 @@ def webui(): ssl_verify=cmd_opts.disable_tls_verify, debug=cmd_opts.gradio_debug, auth=gradio_auth_creds, - inbrowser=cmd_opts.autolaunch and os.getenv('SD_WEBUI_RESTARTING ') != '1', + inbrowser=cmd_opts.autolaunch and os.getenv('SD_WEBUI_RESTARTING') != '1', prevent_thread_lock=True, allowed_paths=cmd_opts.gradio_allowed_path, app_kwargs={ -- cgit v1.2.1 From bc91f15ed32ac29c7bdf9e5abbbf02f18d79a1ab Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Fri, 21 Jul 2023 22:56:41 +0900 Subject: typo fix --- modules/scripts.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/scripts.py b/modules/scripts.py index 7d9dd59f..f34240a0 100644 --- a/modules/scripts.py +++ b/modules/scripts.py @@ -119,7 +119,7 @@ class Script: def after_extra_networks_activate(self, p, *args, **kwargs): """ - Calledafter extra networks activation, before conds calculation + Called after extra networks activation, before conds calculation allow modification of the network after extra networks activation been applied won't be call if p.disable_extra_networks -- cgit v1.2.1 From 16eddc622e6d091f51d22269742afddc9b6d0f4b Mon Sep 17 00:00:00 2001 From: Jabasukuriputo Wang Date: Fri, 21 Jul 2023 22:00:03 +0800 Subject: prepend the pythonpath instead of overriding it --- modules/launch_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/launch_utils.py b/modules/launch_utils.py index 609a181e..231ebc5f 100644 --- a/modules/launch_utils.py +++ b/modules/launch_utils.py @@ -190,7 +190,7 @@ def run_extension_installer(extension_dir): try: env = os.environ.copy() - env['PYTHONPATH'] = os.path.abspath(".") + env['PYTHONPATH'] = f"{os.path.abspath('.')}{os.pathsep}{env['PYTHONPATH']}" print(run(f'"{python}" "{path_installer}"', errdesc=f"Error running install.py for extension {extension_dir}", custom_env=env)) except Exception as e: -- cgit v1.2.1 From 1fe2dcaa2a5c7f6f4864a94ffb65f8eedd8a0129 Mon Sep 17 00:00:00 2001 From: AnyISalIn Date: Sat, 22 Jul 2023 10:00:27 +0800 Subject: [bug] If txt2img/img2img raises an exception, finally call state.end() Signed-off-by: AnyISalIn --- modules/api/api.py | 36 ++++++++++++++++++++---------------- 1 file changed, 20 insertions(+), 16 deletions(-) diff --git a/modules/api/api.py b/modules/api/api.py index 2a4cd8a2..9d73083f 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -333,14 +333,16 @@ class Api: p.outpath_grids = opts.outdir_txt2img_grids p.outpath_samples = opts.outdir_txt2img_samples - shared.state.begin(job="scripts_txt2img") - if selectable_scripts is not None: - p.script_args = script_args - processed = scripts.scripts_txt2img.run(p, *p.script_args) # Need to pass args as list here - else: - p.script_args = tuple(script_args) # Need to pass args as tuple here - processed = process_images(p) - shared.state.end() + try: + shared.state.begin(job="scripts_txt2img") + if selectable_scripts is not None: + p.script_args = script_args + processed = scripts.scripts_txt2img.run(p, *p.script_args) # Need to pass args as list here + else: + p.script_args = tuple(script_args) # Need to pass args as tuple here + processed = process_images(p) + finally: + shared.state.end() b64images = list(map(encode_pil_to_base64, processed.images)) if send_images else [] @@ -390,14 +392,16 @@ class Api: p.outpath_grids = opts.outdir_img2img_grids p.outpath_samples = opts.outdir_img2img_samples - shared.state.begin(job="scripts_img2img") - if selectable_scripts is not None: - p.script_args = script_args - processed = scripts.scripts_img2img.run(p, *p.script_args) # Need to pass args as list here - else: - p.script_args = tuple(script_args) # Need to pass args as tuple here - processed = process_images(p) - shared.state.end() + try: + shared.state.begin(job="scripts_img2img") + if selectable_scripts is not None: + p.script_args = script_args + processed = scripts.scripts_img2img.run(p, *p.script_args) # Need to pass args as list here + else: + p.script_args = tuple(script_args) # Need to pass args as tuple here + processed = process_images(p) + finally: + shared.state.end() b64images = list(map(encode_pil_to_base64, processed.images)) if send_images else [] -- cgit v1.2.1 From 90eb731ff1d73fdc5872ff9682d5c88c9737ba38 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sat, 22 Jul 2023 12:21:05 +0300 Subject: start timer early anyway --- launch.py | 3 --- modules/launch_utils.py | 1 + 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/launch.py b/launch.py index 114466cf..d46e9e27 100644 --- a/launch.py +++ b/launch.py @@ -1,6 +1,5 @@ from modules import launch_utils - args = launch_utils.args python = launch_utils.python git = launch_utils.git @@ -26,8 +25,6 @@ start = launch_utils.start def main(): - from modules import timer - timer.startup_timer.record("start") if not args.skip_prepare_environment: prepare_environment() diff --git a/modules/launch_utils.py b/modules/launch_utils.py index aaa671ab..18b444d4 100644 --- a/modules/launch_utils.py +++ b/modules/launch_utils.py @@ -10,6 +10,7 @@ from functools import lru_cache from modules import cmd_args, errors from modules.paths_internal import script_path, extensions_dir +from modules import timer # noqa:F401 args, _ = cmd_args.parser.parse_known_args() -- cgit v1.2.1 From 7afe7375e104d85542f7572ca0f8559bb4e3a7fe Mon Sep 17 00:00:00 2001 From: Jabasukuriputo Wang Date: Sat, 22 Jul 2023 17:46:50 +0800 Subject: display a progressbar for extension installer --- modules/launch_utils.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/modules/launch_utils.py b/modules/launch_utils.py index 18b444d4..b827debe 100644 --- a/modules/launch_utils.py +++ b/modules/launch_utils.py @@ -7,6 +7,7 @@ import importlib.util import platform import json from functools import lru_cache +from tqdm.auto import tqdm from modules import cmd_args, errors from modules.paths_internal import script_path, extensions_dir @@ -224,7 +225,10 @@ def run_extensions_installers(settings_file): if not os.path.isdir(extensions_dir): return - for dirname_extension in list_extensions(settings_file): + pbar_extensions = tqdm(list_extensions(settings_file), + bar_format="{desc}: |{bar}|{percentage:3.0f}% [{n_fmt}/{total_fmt} {elapsed}<{remaining}]") + for dirname_extension in pbar_extensions: + pbar_extensions.set_description("Installing %s" % dirname_extension) run_extension_installer(os.path.join(extensions_dir, dirname_extension)) -- cgit v1.2.1 From b2f0040da7df65a5deac34f59263cf518d970dcd Mon Sep 17 00:00:00 2001 From: Jabasukuriputo Wang Date: Sat, 22 Jul 2023 17:51:15 +0800 Subject: fix tqdm not found on new instance --- modules/launch_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/launch_utils.py b/modules/launch_utils.py index b827debe..43256072 100644 --- a/modules/launch_utils.py +++ b/modules/launch_utils.py @@ -7,7 +7,6 @@ import importlib.util import platform import json from functools import lru_cache -from tqdm.auto import tqdm from modules import cmd_args, errors from modules.paths_internal import script_path, extensions_dir @@ -225,6 +224,7 @@ def run_extensions_installers(settings_file): if not os.path.isdir(extensions_dir): return + from tqdm.auto import tqdm pbar_extensions = tqdm(list_extensions(settings_file), bar_format="{desc}: |{bar}|{percentage:3.0f}% [{n_fmt}/{total_fmt} {elapsed}<{remaining}]") for dirname_extension in pbar_extensions: -- cgit v1.2.1 From 2a7e34fe79c82ac264919be8965b8d51afa32ac1 Mon Sep 17 00:00:00 2001 From: Jabasukuriputo Wang Date: Sat, 22 Jul 2023 18:09:00 +0800 Subject: fix https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/11921#issuecomment-1646547908 --- modules/launch_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/launch_utils.py b/modules/launch_utils.py index 18b444d4..0c863e4a 100644 --- a/modules/launch_utils.py +++ b/modules/launch_utils.py @@ -194,7 +194,7 @@ def run_extension_installer(extension_dir): try: env = os.environ.copy() - env['PYTHONPATH'] = f"{os.path.abspath('.')}{os.pathsep}{env['PYTHONPATH']}" + env['PYTHONPATH'] = f"{os.path.abspath('.')}{os.pathsep}{env.get('PYTHONPATH')}" print(run(f'"{python}" "{path_installer}"', errdesc=f"Error running install.py for extension {extension_dir}", custom_env=env)) except Exception as e: -- cgit v1.2.1 From 3c26734d603560f57f7a145eed6cbdd50d20379c Mon Sep 17 00:00:00 2001 From: Jabasukuriputo Wang Date: Sat, 22 Jul 2023 18:33:59 +0800 Subject: nop --- modules/launch_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/launch_utils.py b/modules/launch_utils.py index 0c863e4a..c61edb36 100644 --- a/modules/launch_utils.py +++ b/modules/launch_utils.py @@ -194,7 +194,7 @@ def run_extension_installer(extension_dir): try: env = os.environ.copy() - env['PYTHONPATH'] = f"{os.path.abspath('.')}{os.pathsep}{env.get('PYTHONPATH')}" + env['PYTHONPATH'] = f"{os.path.abspath('.')}{os.pathsep}{env.get('PYTHONPATH', '')}" print(run(f'"{python}" "{path_installer}"', errdesc=f"Error running install.py for extension {extension_dir}", custom_env=env)) except Exception as e: -- cgit v1.2.1 From c76a30af41e50932847230631d26bfa9635ebd62 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sat, 22 Jul 2023 13:49:29 +0300 Subject: more info for startup timings --- launch.py | 6 ++++-- modules/launch_utils.py | 26 +++++++++++++++++++++++--- 2 files changed, 27 insertions(+), 5 deletions(-) diff --git a/launch.py b/launch.py index d46e9e27..e4c2ce99 100644 --- a/launch.py +++ b/launch.py @@ -25,9 +25,11 @@ start = launch_utils.start def main(): + launch_utils.startup_timer.record("initial startup") - if not args.skip_prepare_environment: - prepare_environment() + with launch_utils.startup_timer.subcategory("prepare environment"): + if not args.skip_prepare_environment: + prepare_environment() if args.test_server: configure_for_tests() diff --git a/modules/launch_utils.py b/modules/launch_utils.py index 18b444d4..0178e1b0 100644 --- a/modules/launch_utils.py +++ b/modules/launch_utils.py @@ -10,7 +10,7 @@ from functools import lru_cache from modules import cmd_args, errors from modules.paths_internal import script_path, extensions_dir -from modules import timer # noqa:F401 +from modules.timer import startup_timer args, _ = cmd_args.parser.parse_known_args() @@ -224,8 +224,10 @@ def run_extensions_installers(settings_file): if not os.path.isdir(extensions_dir): return - for dirname_extension in list_extensions(settings_file): - run_extension_installer(os.path.join(extensions_dir, dirname_extension)) + with startup_timer.subcategory("run extensions installers"): + for dirname_extension in list_extensions(settings_file): + run_extension_installer(os.path.join(extensions_dir, dirname_extension)) + startup_timer.record(dirname_extension) re_requirement = re.compile(r"\s*([-_a-zA-Z0-9]+)\s*(?:==\s*([-+_.a-zA-Z0-9]+))?\s*") @@ -298,8 +300,11 @@ def prepare_environment(): if not args.skip_python_version_check: check_python_version() + startup_timer.record("checks") + commit = commit_hash() tag = git_tag() + startup_timer.record("git version info") print(f"Python {sys.version}") print(f"Version: {tag}") @@ -307,21 +312,27 @@ def prepare_environment(): if args.reinstall_torch or not is_installed("torch") or not is_installed("torchvision"): run(f'"{python}" -m {torch_command}', "Installing torch and torchvision", "Couldn't install torch", live=True) + startup_timer.record("install torch") if not args.skip_torch_cuda_test and not check_run_python("import torch; assert torch.cuda.is_available()"): raise RuntimeError( 'Torch is not able to use GPU; ' 'add --skip-torch-cuda-test to COMMANDLINE_ARGS variable to disable this check' ) + startup_timer.record("torch GPU test") + if not is_installed("gfpgan"): run_pip(f"install {gfpgan_package}", "gfpgan") + startup_timer.record("install gfpgan") if not is_installed("clip"): run_pip(f"install {clip_package}", "clip") + startup_timer.record("install clip") if not is_installed("open_clip"): run_pip(f"install {openclip_package}", "open_clip") + startup_timer.record("install open_clip") if (not is_installed("xformers") or args.reinstall_xformers) and args.xformers: if platform.system() == "Windows": @@ -335,8 +346,11 @@ def prepare_environment(): elif platform.system() == "Linux": run_pip(f"install -U -I --no-deps {xformers_package}", "xformers") + startup_timer.record("install xformers") + if not is_installed("ngrok") and args.ngrok: run_pip("install ngrok", "ngrok") + startup_timer.record("install ngrok") os.makedirs(os.path.join(script_path, dir_repos), exist_ok=True) @@ -346,22 +360,28 @@ def prepare_environment(): git_clone(codeformer_repo, repo_dir('CodeFormer'), "CodeFormer", codeformer_commit_hash) git_clone(blip_repo, repo_dir('BLIP'), "BLIP", blip_commit_hash) + startup_timer.record("clone repositores") + if not is_installed("lpips"): run_pip(f"install -r \"{os.path.join(repo_dir('CodeFormer'), 'requirements.txt')}\"", "requirements for CodeFormer") + startup_timer.record("install CodeFormer requirements") if not os.path.isfile(requirements_file): requirements_file = os.path.join(script_path, requirements_file) if not requirements_met(requirements_file): run_pip(f"install -r \"{requirements_file}\"", "requirements") + startup_timer.record("install requirements") run_extensions_installers(settings_file=args.ui_settings_file) if args.update_check: version_check(commit) + startup_timer.record("check version") if args.update_all_extensions: git_pull_recursive(extensions_dir) + startup_timer.record("update extensions") if "--exit" in sys.argv: print("Exiting because of --exit argument") -- cgit v1.2.1 From 0674fabd0d82c69c02ff362762815c4536687b95 Mon Sep 17 00:00:00 2001 From: ljleb Date: Sat, 22 Jul 2023 07:10:20 -0400 Subject: fix AND linebreaks --- modules/prompt_parser.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/prompt_parser.py b/modules/prompt_parser.py index 0069d8b0..c514e63b 100644 --- a/modules/prompt_parser.py +++ b/modules/prompt_parser.py @@ -153,7 +153,7 @@ def get_learned_conditioning(model, prompts, steps): re_AND = re.compile(r"\bAND\b") -re_weight = re.compile(r"^(.*?)(?:\s*:\s*([-+]?(?:\d+\.?|\d*\.\d+)))?\s*$") +re_weight = re.compile(r"^(\s*.*?)(?:\s*:\s*([-+]?(?:\d+\.?|\d*\.\d+)))?\s*$") def get_multicond_prompt_list(prompts): res_indexes = [] -- cgit v1.2.1 From 88a3e1d3062401d7f5775cf362975de6327fded2 Mon Sep 17 00:00:00 2001 From: ljleb Date: Sat, 22 Jul 2023 07:40:30 -0400 Subject: fix AND linebreaks --- modules/prompt_parser.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/prompt_parser.py b/modules/prompt_parser.py index c514e63b..607b1ad3 100644 --- a/modules/prompt_parser.py +++ b/modules/prompt_parser.py @@ -153,7 +153,7 @@ def get_learned_conditioning(model, prompts, steps): re_AND = re.compile(r"\bAND\b") -re_weight = re.compile(r"^(\s*.*?)(?:\s*:\s*([-+]?(?:\d+\.?|\d*\.\d+)))?\s*$") +re_weight = re.compile(r"^((?:\s|.)*?)(?:\s*:\s*([-+]?(?:\d+\.?|\d*\.\d+)))?\s*$") def get_multicond_prompt_list(prompts): res_indexes = [] -- cgit v1.2.1 From a8d4213317c6970aa3ca8cbeeaacb07b936b591c Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sat, 22 Jul 2023 17:08:45 +0300 Subject: add --log-startup option to print detailed startup progress --- modules/cmd_args.py | 1 + modules/launch_utils.py | 7 +++++-- modules/timer.py | 23 +++++++++++++++++++---- 3 files changed, 25 insertions(+), 6 deletions(-) diff --git a/modules/cmd_args.py b/modules/cmd_args.py index e401f641..dd5fadc4 100644 --- a/modules/cmd_args.py +++ b/modules/cmd_args.py @@ -13,6 +13,7 @@ parser.add_argument("--reinstall-xformers", action='store_true', help="launch.py parser.add_argument("--reinstall-torch", action='store_true', help="launch.py argument: install the appropriate version of torch even if you have some version already installed") parser.add_argument("--update-check", action='store_true', help="launch.py argument: check for updates at startup") parser.add_argument("--test-server", action='store_true', help="launch.py argument: configure server for testing") +parser.add_argument("--log-startup", action='store_true', help="launch.py argument: print a detailed log of what's happening at startup") parser.add_argument("--skip-prepare-environment", action='store_true', help="launch.py argument: skip all environment preparation") parser.add_argument("--skip-install", action='store_true', help="launch.py argument: skip installation of packages") parser.add_argument("--do-not-download-clip", action='store_true', help="do not download CLIP model even if it's not included in the checkpoint") diff --git a/modules/launch_utils.py b/modules/launch_utils.py index c9e4344b..f77b577a 100644 --- a/modules/launch_utils.py +++ b/modules/launch_utils.py @@ -226,8 +226,11 @@ def run_extensions_installers(settings_file): with startup_timer.subcategory("run extensions installers"): for dirname_extension in list_extensions(settings_file): - run_extension_installer(os.path.join(extensions_dir, dirname_extension)) - startup_timer.record(dirname_extension) + path = os.path.join(extensions_dir, dirname_extension) + + if os.path.isdir(path): + run_extension_installer(path) + startup_timer.record(dirname_extension) re_requirement = re.compile(r"\s*([-_a-zA-Z0-9]+)\s*(?:==\s*([-+_.a-zA-Z0-9]+))?\s*") diff --git a/modules/timer.py b/modules/timer.py index da99e49f..1d38595c 100644 --- a/modules/timer.py +++ b/modules/timer.py @@ -1,4 +1,5 @@ import time +import argparse class TimerSubcategory: @@ -11,20 +12,27 @@ class TimerSubcategory: def __enter__(self): self.start = time.time() self.timer.base_category = self.original_base_category + self.category + "/" + self.timer.subcategory_level += 1 + + if self.timer.print_log: + print(f"{' ' * self.timer.subcategory_level}{self.category}:") def __exit__(self, exc_type, exc_val, exc_tb): elapsed_for_subcategroy = time.time() - self.start self.timer.base_category = self.original_base_category self.timer.add_time_to_record(self.original_base_category + self.category, elapsed_for_subcategroy) - self.timer.record(self.category) + self.timer.subcategory_level -= 1 + self.timer.record(self.category, disable_log=True) class Timer: - def __init__(self): + def __init__(self, print_log=False): self.start = time.time() self.records = {} self.total = 0 self.base_category = '' + self.print_log = print_log + self.subcategory_level = 0 def elapsed(self): end = time.time() @@ -38,13 +46,16 @@ class Timer: self.records[category] += amount - def record(self, category, extra_time=0): + def record(self, category, extra_time=0, disable_log=False): e = self.elapsed() self.add_time_to_record(self.base_category + category, e + extra_time) self.total += e + extra_time + if self.print_log and not disable_log: + print(f"{' ' * self.subcategory_level}{category}: done in {e + extra_time:.3f}s") + def subcategory(self, name): self.elapsed() @@ -71,6 +82,10 @@ class Timer: self.__init__() -startup_timer = Timer() +parser = argparse.ArgumentParser(add_help=False) +parser.add_argument("--log-startup", action='store_true', help="print a detailed log of what's happening at startup") +args = parser.parse_known_args()[0] + +startup_timer = Timer(print_log=args.log_startup) startup_record = None -- cgit v1.2.1 From ec83db897887b52fbf31b430cfc4386e3ad02424 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sat, 22 Jul 2023 17:14:33 +0300 Subject: restyle Startup profile for black users --- style.css | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/style.css b/style.css index e249cfd3..6c92d6e7 100644 --- a/style.css +++ b/style.css @@ -423,15 +423,16 @@ div#extras_scale_to_tab div.form{ } table.popup-table{ - background: white; + background: var(--body-background-fill); + color: var(--body-text-color); border-collapse: collapse; margin: 1em; - border: 4px solid white; + border: 4px solid var(--body-background-fill); } table.popup-table td{ padding: 0.4em; - border: 1px solid #ccc; + border: 1px solid rgba(128, 128, 128, 0.5); max-width: 36em; } -- cgit v1.2.1 From 1cbfafafd2dc68c4985da2ae88d7e30f7074fed9 Mon Sep 17 00:00:00 2001 From: AnyISalIn Date: Mon, 24 Jul 2023 19:45:08 +0800 Subject: feat: add refresh vae api Signed-off-by: AnyISalIn --- modules/api/api.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/modules/api/api.py b/modules/api/api.py index 9d73083f..09166df2 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -15,7 +15,7 @@ from fastapi.encoders import jsonable_encoder from secrets import compare_digest import modules.shared as shared -from modules import sd_samplers, deepbooru, sd_hijack, images, scripts, ui, postprocessing, errors, restart +from modules import sd_samplers, deepbooru, sd_hijack, images, scripts, ui, postprocessing, errors, restart, shared_items from modules.api import models from modules.shared import opts from modules.processing import StableDiffusionProcessingTxt2Img, StableDiffusionProcessingImg2Img, process_images @@ -197,6 +197,7 @@ class Api: self.add_api_route("/sdapi/v1/prompt-styles", self.get_prompt_styles, methods=["GET"], response_model=List[models.PromptStyleItem]) self.add_api_route("/sdapi/v1/embeddings", self.get_embeddings, methods=["GET"], response_model=models.EmbeddingsResponse) self.add_api_route("/sdapi/v1/refresh-checkpoints", self.refresh_checkpoints, methods=["POST"]) + self.add_api_route("/sdapi/v1/refresh-vae", self.refresh_vae, methods=["POST"]) self.add_api_route("/sdapi/v1/create/embedding", self.create_embedding, methods=["POST"], response_model=models.CreateResponse) self.add_api_route("/sdapi/v1/create/hypernetwork", self.create_hypernetwork, methods=["POST"], response_model=models.CreateResponse) self.add_api_route("/sdapi/v1/preprocess", self.preprocess, methods=["POST"], response_model=models.PreprocessResponse) @@ -608,6 +609,10 @@ class Api: with self.queue_lock: shared.refresh_checkpoints() + def refresh_vae(self): + with self.queue_lock: + shared_items.refresh_vae_list() + def create_embedding(self, args: dict): try: shared.state.begin(job="create_embedding") -- cgit v1.2.1 From ca45ff1ae6fdd5c2dcd754fde95dd29f49bd414b Mon Sep 17 00:00:00 2001 From: ljleb Date: Mon, 24 Jul 2023 13:52:24 -0400 Subject: add postprocess_batch_list callback --- modules/processing.py | 24 +++++++++++++++++++++++- modules/scripts.py | 32 ++++++++++++++++++++++++++++++++ 2 files changed, 55 insertions(+), 1 deletion(-) diff --git a/modules/processing.py b/modules/processing.py index a74a5302..c16404f4 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -717,7 +717,25 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: p.all_subseeds = [int(subseed) + x for x in range(len(p.all_prompts))] def infotext(iteration=0, position_in_batch=0, use_main_prompt=False): - return create_infotext(p, p.all_prompts, p.all_seeds, p.all_subseeds, comments, iteration, position_in_batch, use_main_prompt) + all_prompts = p.all_prompts[:] + all_seeds = p.all_seeds[:] + all_subseeds = p.all_subseeds[:] + + # apply changes to generation data + all_prompts[n * p.batch_size:(n + 1) * p.batch_size] = p.prompts + all_seeds[n * p.batch_size:(n + 1) * p.batch_size] = p.seeds + all_subseeds[n * p.batch_size:(n + 1) * p.batch_size] = p.subseeds + + # update p.all_negative_prompts in case extensions changed the size of the batch + # create_infotext below uses it + old_negative_prompts = p.all_negative_prompts[n * p.batch_size:(n + 1) * p.batch_size] + p.all_negative_prompts[n * p.batch_size:(n + 1) * p.batch_size] = p.negative_prompts + + try: + return create_infotext(p, all_prompts, all_seeds, all_subseeds, comments, iteration, position_in_batch, use_main_prompt) + finally: + # restore p.all_negative_prompts in case extensions changed the size of the batch + p.all_negative_prompts[n * p.batch_size:n * p.batch_size + len(p.negative_prompts)] = old_negative_prompts if os.path.exists(cmd_opts.embeddings_dir) and not p.do_not_reload_embeddings: model_hijack.embedding_db.load_textual_inversion_embeddings() @@ -806,6 +824,10 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: if p.scripts is not None: p.scripts.postprocess_batch(p, x_samples_ddim, batch_number=n) + postprocess_batch_list_args = scripts.PostprocessBatchListArgs(list(x_samples_ddim)) + p.scripts.postprocess_batch_list(p, postprocess_batch_list_args, batch_number=n) + x_samples_ddim = postprocess_batch_list_args.images + for i, x_sample in enumerate(x_samples_ddim): p.batch_index = i diff --git a/modules/scripts.py b/modules/scripts.py index f34240a0..5b4edcac 100644 --- a/modules/scripts.py +++ b/modules/scripts.py @@ -16,6 +16,11 @@ class PostprocessImageArgs: self.image = image +class PostprocessBatchListArgs: + def __init__(self, images): + self.images = images + + class Script: name = None """script's internal name derived from title""" @@ -156,6 +161,25 @@ class Script: pass + def postprocess_batch_list(self, p, pp: PostprocessBatchListArgs, *args, **kwargs): + """ + Same as postprocess_batch(), but receives batch images as a list of 3D tensors instead of a 4D tensor. + This is useful when you want to update the entire batch instead of individual images. + + You can modify the postprocessing object (pp) to update the images in the batch, remove images, add images, etc. + If the number of images is different from the batch size when returning, + then the script has the responsibility to also update the following attributes in the processing object (p): + - p.prompts + - p.negative_prompts + - p.seeds + - p.subseeds + + **kwargs will have same items as process_batch, and also: + - batch_number - index of current batch, from 0 to number of batches-1 + """ + + pass + def postprocess_image(self, p, pp: PostprocessImageArgs, *args): """ Called for every image after it has been generated. @@ -536,6 +560,14 @@ class ScriptRunner: except Exception: errors.report(f"Error running postprocess_batch: {script.filename}", exc_info=True) + def postprocess_batch_list(self, p, pp: PostprocessBatchListArgs, **kwargs): + for script in self.alwayson_scripts: + try: + script_args = p.script_args[script.args_from:script.args_to] + script.postprocess_batch_list(p, pp, *script_args, **kwargs) + except Exception: + errors.report(f"Error running postprocess_batch_list: {script.filename}", exc_info=True) + def postprocess_image(self, p, pp: PostprocessImageArgs): for script in self.alwayson_scripts: try: -- cgit v1.2.1 From 0a89cd1a584b1584a0609c0ba27fb35c434b0b68 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Mon, 24 Jul 2023 22:08:08 +0300 Subject: Use less RAM when creating models --- modules/cmd_args.py | 1 + modules/sd_disable_initialization.py | 106 +++++++++++++++++++++++++++++++++-- modules/sd_models.py | 16 ++++-- webui.py | 4 +- 4 files changed, 114 insertions(+), 13 deletions(-) diff --git a/modules/cmd_args.py b/modules/cmd_args.py index dd5fadc4..cb4ec5f7 100644 --- a/modules/cmd_args.py +++ b/modules/cmd_args.py @@ -67,6 +67,7 @@ parser.add_argument("--opt-sdp-no-mem-attention", action='store_true', help="pre parser.add_argument("--disable-opt-split-attention", action='store_true', help="prefer no cross-attention layer optimization for automatic choice of optimization") parser.add_argument("--disable-nan-check", action='store_true', help="do not check if produced images/latent spaces have nans; useful for running without a checkpoint in CI") parser.add_argument("--use-cpu", nargs='+', help="use CPU as torch device for specified modules", default=[], type=str.lower) +parser.add_argument("--disable-model-loading-ram-optimization", action='store_true', help="disable an optimization that reduces RAM use when loading a model") parser.add_argument("--listen", action='store_true', help="launch gradio with 0.0.0.0 as server name, allowing to respond to network requests") parser.add_argument("--port", type=int, help="launch gradio with given server port, you need root/admin rights for ports < 1024, defaults to 7860 if available", default=None) parser.add_argument("--show-negative-prompt", action='store_true', help="does not do anything", default=False) diff --git a/modules/sd_disable_initialization.py b/modules/sd_disable_initialization.py index 9fc89dc6..695c5736 100644 --- a/modules/sd_disable_initialization.py +++ b/modules/sd_disable_initialization.py @@ -3,8 +3,31 @@ import open_clip import torch import transformers.utils.hub +from modules import shared -class DisableInitialization: + +class ReplaceHelper: + def __init__(self): + self.replaced = [] + + def replace(self, obj, field, func): + original = getattr(obj, field, None) + if original is None: + return None + + self.replaced.append((obj, field, original)) + setattr(obj, field, func) + + return original + + def restore(self): + for obj, field, original in self.replaced: + setattr(obj, field, original) + + self.replaced.clear() + + +class DisableInitialization(ReplaceHelper): """ When an object of this class enters a `with` block, it starts: - preventing torch's layer initialization functions from working @@ -21,7 +44,7 @@ class DisableInitialization: """ def __init__(self, disable_clip=True): - self.replaced = [] + super().__init__() self.disable_clip = disable_clip def replace(self, obj, field, func): @@ -86,8 +109,81 @@ class DisableInitialization: self.transformers_utils_hub_get_from_cache = self.replace(transformers.utils.hub, 'get_from_cache', transformers_utils_hub_get_from_cache) def __exit__(self, exc_type, exc_val, exc_tb): - for obj, field, original in self.replaced: - setattr(obj, field, original) + self.restore() - self.replaced.clear() +class InitializeOnMeta(ReplaceHelper): + """ + Context manager that causes all parameters for linear/conv2d/mha layers to be allocated on meta device, + which results in those parameters having no values and taking no memory. model.to() will be broken and + will need to be repaired by using LoadStateDictOnMeta below when loading params from state dict. + + Usage: + ``` + with sd_disable_initialization.InitializeOnMeta(): + sd_model = instantiate_from_config(sd_config.model) + ``` + """ + + def __enter__(self): + if shared.cmd_opts.disable_model_loading_ram_optimization: + return + + def set_device(x): + x["device"] = "meta" + return x + + linear_init = self.replace(torch.nn.Linear, '__init__', lambda *args, **kwargs: linear_init(*args, **set_device(kwargs))) + conv2d_init = self.replace(torch.nn.Conv2d, '__init__', lambda *args, **kwargs: conv2d_init(*args, **set_device(kwargs))) + mha_init = self.replace(torch.nn.MultiheadAttention, '__init__', lambda *args, **kwargs: mha_init(*args, **set_device(kwargs))) + self.replace(torch.nn.Module, 'to', lambda *args, **kwargs: None) + + def __exit__(self, exc_type, exc_val, exc_tb): + self.restore() + + +class LoadStateDictOnMeta(ReplaceHelper): + """ + Context manager that allows to read parameters from state_dict into a model that has some of its parameters in the meta device. + As those parameters are read from state_dict, they will be deleted from it, so by the end state_dict will be mostly empty, to save memory. + Meant to be used together with InitializeOnMeta above. + + Usage: + ``` + with sd_disable_initialization.LoadStateDictOnMeta(state_dict): + model.load_state_dict(state_dict, strict=False) + ``` + """ + + def __init__(self, state_dict, device): + super().__init__() + self.state_dict = state_dict + self.device = device + + def __enter__(self): + if shared.cmd_opts.disable_model_loading_ram_optimization: + return + + sd = self.state_dict + device = self.device + + def load_from_state_dict(original, self, state_dict, prefix, *args, **kwargs): + params = [(name, param) for name, param in self._parameters.items() if param is not None and param.is_meta] + + for name, param in params: + if param.is_meta: + self._parameters[name] = torch.nn.parameter.Parameter(torch.zeros_like(param, device=device), requires_grad=param.requires_grad) + + original(self, state_dict, prefix, *args, **kwargs) + + for name, _ in params: + key = prefix + name + if key in sd: + del sd[key] + + linear_load_from_state_dict = self.replace(torch.nn.Linear, '_load_from_state_dict', lambda *args, **kwargs: load_from_state_dict(linear_load_from_state_dict, *args, **kwargs)) + conv2d_load_from_state_dict = self.replace(torch.nn.Conv2d, '_load_from_state_dict', lambda *args, **kwargs: load_from_state_dict(conv2d_load_from_state_dict, *args, **kwargs)) + mha_load_from_state_dict = self.replace(torch.nn.MultiheadAttention, '_load_from_state_dict', lambda *args, **kwargs: load_from_state_dict(mha_load_from_state_dict, *args, **kwargs)) + + def __exit__(self, exc_type, exc_val, exc_tb): + self.restore() diff --git a/modules/sd_models.py b/modules/sd_models.py index fb31a793..acb1e817 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -460,7 +460,6 @@ def get_empty_cond(sd_model): return sd_model.cond_stage_model([""]) - def load_model(checkpoint_info=None, already_loaded_state_dict=None): from modules import lowvram, sd_hijack checkpoint_info = checkpoint_info or select_checkpoint() @@ -495,19 +494,24 @@ def load_model(checkpoint_info=None, already_loaded_state_dict=None): sd_model = None try: with sd_disable_initialization.DisableInitialization(disable_clip=clip_is_included_into_sd or shared.cmd_opts.do_not_download_clip): - sd_model = instantiate_from_config(sd_config.model) - except Exception: - pass + with sd_disable_initialization.InitializeOnMeta(): + sd_model = instantiate_from_config(sd_config.model) + + except Exception as e: + errors.display(e, "creating model quickly", full_traceback=True) if sd_model is None: print('Failed to create model quickly; will retry using slow method.', file=sys.stderr) - sd_model = instantiate_from_config(sd_config.model) + + with sd_disable_initialization.InitializeOnMeta(): + sd_model = instantiate_from_config(sd_config.model) sd_model.used_config = checkpoint_config timer.record("create model") - load_model_weights(sd_model, checkpoint_info, state_dict, timer) + with sd_disable_initialization.LoadStateDictOnMeta(state_dict, devices.cpu): + load_model_weights(sd_model, checkpoint_info, state_dict, timer) if shared.cmd_opts.lowvram or shared.cmd_opts.medvram: lowvram.setup_for_low_vram(sd_model, shared.cmd_opts.medvram) diff --git a/webui.py b/webui.py index 2314735f..51248c39 100644 --- a/webui.py +++ b/webui.py @@ -320,9 +320,9 @@ def initialize_rest(*, reload_script_modules=False): if modules.sd_hijack.current_optimizer is None: modules.sd_hijack.apply_optimizations() - Thread(target=load_model).start() + devices.first_time_calculation() - Thread(target=devices.first_time_calculation).start() + Thread(target=load_model).start() shared.reload_hypernetworks() startup_timer.record("reload hypernetworks") -- cgit v1.2.1 From 6b68b590321fcac2ad6d71c5aee1ac02687328d7 Mon Sep 17 00:00:00 2001 From: ljleb Date: Mon, 24 Jul 2023 15:38:52 -0400 Subject: use local vars --- modules/processing.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/modules/processing.py b/modules/processing.py index c16404f4..7043477f 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -722,20 +722,20 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: all_subseeds = p.all_subseeds[:] # apply changes to generation data - all_prompts[n * p.batch_size:(n + 1) * p.batch_size] = p.prompts - all_seeds[n * p.batch_size:(n + 1) * p.batch_size] = p.seeds - all_subseeds[n * p.batch_size:(n + 1) * p.batch_size] = p.subseeds + all_prompts[iteration * p.batch_size:(iteration + 1) * p.batch_size] = p.prompts + all_seeds[iteration * p.batch_size:(iteration + 1) * p.batch_size] = p.seeds + all_subseeds[iteration * p.batch_size:(iteration + 1) * p.batch_size] = p.subseeds # update p.all_negative_prompts in case extensions changed the size of the batch # create_infotext below uses it - old_negative_prompts = p.all_negative_prompts[n * p.batch_size:(n + 1) * p.batch_size] - p.all_negative_prompts[n * p.batch_size:(n + 1) * p.batch_size] = p.negative_prompts + old_negative_prompts = p.all_negative_prompts[iteration * p.batch_size:(iteration + 1) * p.batch_size] + p.all_negative_prompts[iteration * p.batch_size:(iteration + 1) * p.batch_size] = p.negative_prompts try: return create_infotext(p, all_prompts, all_seeds, all_subseeds, comments, iteration, position_in_batch, use_main_prompt) finally: # restore p.all_negative_prompts in case extensions changed the size of the batch - p.all_negative_prompts[n * p.batch_size:n * p.batch_size + len(p.negative_prompts)] = old_negative_prompts + p.all_negative_prompts[iteration * p.batch_size:iteration * p.batch_size + len(p.negative_prompts)] = old_negative_prompts if os.path.exists(cmd_opts.embeddings_dir) and not p.do_not_reload_embeddings: model_hijack.embedding_db.load_textual_inversion_embeddings() -- cgit v1.2.1 From 5b06607476d1ef2c9d16fe8b21c786b2ca13b95c Mon Sep 17 00:00:00 2001 From: ljleb Date: Mon, 24 Jul 2023 15:43:06 -0400 Subject: simplify --- modules/processing.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/modules/processing.py b/modules/processing.py index 7043477f..6dc178e1 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -718,24 +718,26 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: def infotext(iteration=0, position_in_batch=0, use_main_prompt=False): all_prompts = p.all_prompts[:] + all_negative_prompts = p.all_negative_prompts[:] all_seeds = p.all_seeds[:] all_subseeds = p.all_subseeds[:] # apply changes to generation data all_prompts[iteration * p.batch_size:(iteration + 1) * p.batch_size] = p.prompts + all_negative_prompts[iteration * p.batch_size:(iteration + 1) * p.batch_size] = p.negative_prompts all_seeds[iteration * p.batch_size:(iteration + 1) * p.batch_size] = p.seeds all_subseeds[iteration * p.batch_size:(iteration + 1) * p.batch_size] = p.subseeds # update p.all_negative_prompts in case extensions changed the size of the batch # create_infotext below uses it - old_negative_prompts = p.all_negative_prompts[iteration * p.batch_size:(iteration + 1) * p.batch_size] - p.all_negative_prompts[iteration * p.batch_size:(iteration + 1) * p.batch_size] = p.negative_prompts + old_negative_prompts = p.all_negative_prompts + p.all_negative_prompts = all_negative_prompts try: return create_infotext(p, all_prompts, all_seeds, all_subseeds, comments, iteration, position_in_batch, use_main_prompt) finally: # restore p.all_negative_prompts in case extensions changed the size of the batch - p.all_negative_prompts[iteration * p.batch_size:iteration * p.batch_size + len(p.negative_prompts)] = old_negative_prompts + p.all_negative_prompts = old_negative_prompts if os.path.exists(cmd_opts.embeddings_dir) and not p.do_not_reload_embeddings: model_hijack.embedding_db.load_textual_inversion_embeddings() -- cgit v1.2.1 From a68f4690307b0f94404b85513429e18ee3936589 Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Mon, 24 Jul 2023 17:54:59 -0400 Subject: Fix to parse TE in some LoRAs --- extensions-builtin/Lora/networks.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/extensions-builtin/Lora/networks.py b/extensions-builtin/Lora/networks.py index af8188e3..3a8cfa3b 100644 --- a/extensions-builtin/Lora/networks.py +++ b/extensions-builtin/Lora/networks.py @@ -161,7 +161,7 @@ def load_network(name, network_on_disk): sd_module = shared.sd_model.network_layer_mapping.get(key, None) elif sd_module is None and "lora_te1_text_model" in key_network_without_network_parts: key = key_network_without_network_parts.replace("lora_te1_text_model", "0_transformer_text_model") - sd_module = shared.sd_model.network_layer_mapping.get(key, None) + sd_module = shared.sd_model.network_layer_mapping.get(key, None) or shared.sd_model.network_layer_mapping.get(key[2:], None) if sd_module is None: keys_failed_to_match[key_network] = key -- cgit v1.2.1 From fee593a07f5784bd0bc169535e7017edf22448b4 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Tue, 25 Jul 2023 20:01:10 +0900 Subject: catch exception for non git extensions --- modules/extensions.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/modules/extensions.py b/modules/extensions.py index c561159a..09d1e550 100644 --- a/modules/extensions.py +++ b/modules/extensions.py @@ -56,9 +56,11 @@ class Extension: self.do_read_info_from_repo() return self.to_dict() - - d = cache.cached_data_for_file('extensions-git', self.name, os.path.join(self.path, ".git"), read_from_repo) - self.from_dict(d) + try: + d = cache.cached_data_for_file('extensions-git', self.name, os.path.join(self.path, ".git"), read_from_repo) + self.from_dict(d) + except FileNotFoundError: + pass self.status = 'unknown' def do_read_info_from_repo(self): -- cgit v1.2.1 From b1a16a298c7f71e8e7d6254f16d4143f77b5c9a5 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Tue, 25 Jul 2023 20:36:39 +0900 Subject: api only subpath (rootpath) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-Authored-By: 陈杰 --- modules/api/api.py | 4 ++-- webui.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/modules/api/api.py b/modules/api/api.py index 9d73083f..606db179 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -724,9 +724,9 @@ class Api: cuda = {'error': f'{err}'} return models.MemoryResponse(ram=ram, cuda=cuda) - def launch(self, server_name, port): + def launch(self, server_name, port, root_path): self.app.include_router(self.router) - uvicorn.run(self.app, host=server_name, port=port, timeout_keep_alive=shared.cmd_opts.timeout_keep_alive) + uvicorn.run(self.app, host=server_name, port=port, timeout_keep_alive=shared.cmd_opts.timeout_keep_alive, root_path=root_path) def kill_webui(self): restart.stop_program() diff --git a/webui.py b/webui.py index 2314735f..6bf06854 100644 --- a/webui.py +++ b/webui.py @@ -374,7 +374,7 @@ def api_only(): api.launch( server_name="0.0.0.0" if cmd_opts.listen else "127.0.0.1", port=cmd_opts.port if cmd_opts.port else 7861, - root_path = f"/{cmd_opts.subpath}" + root_path=f"/{cmd_opts.subpath}" if cmd_opts.subpath else "" ) -- cgit v1.2.1 From d0bf509fa14babebedbaef121ef54599003aa457 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Tue, 25 Jul 2023 16:18:10 +0300 Subject: fix for #11963 --- extensions-builtin/Lora/networks.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/extensions-builtin/Lora/networks.py b/extensions-builtin/Lora/networks.py index 3a8cfa3b..17cbe1bb 100644 --- a/extensions-builtin/Lora/networks.py +++ b/extensions-builtin/Lora/networks.py @@ -161,7 +161,12 @@ def load_network(name, network_on_disk): sd_module = shared.sd_model.network_layer_mapping.get(key, None) elif sd_module is None and "lora_te1_text_model" in key_network_without_network_parts: key = key_network_without_network_parts.replace("lora_te1_text_model", "0_transformer_text_model") - sd_module = shared.sd_model.network_layer_mapping.get(key, None) or shared.sd_model.network_layer_mapping.get(key[2:], None) + sd_module = shared.sd_model.network_layer_mapping.get(key, None) + + # some SD1 Loras also have correct compvis keys + if sd_module is None: + key = key_network_without_network_parts.replace("lora_te1_text_model", "transformer_text_model") + sd_module = shared.sd_model.network_layer_mapping.get(key, None) if sd_module is None: keys_failed_to_match[key_network] = key -- cgit v1.2.1 From fd435585864dec036d08beb5fda5f155ee0b312a Mon Sep 17 00:00:00 2001 From: JetVarimax <140423365+JetVarimax@users.noreply.github.com> Date: Tue, 25 Jul 2023 20:31:15 +0100 Subject: Fix typo --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index b796d150..2fbcc962 100644 --- a/README.md +++ b/README.md @@ -88,7 +88,7 @@ A browser interface based on Gradio library for Stable Diffusion. - [Alt-Diffusion](https://arxiv.org/abs/2211.06679) support - see [wiki](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Features#alt-diffusion) for instructions - Now without any bad letters! - Load checkpoints in safetensors format -- Eased resolution restriction: generated image's domension must be a multiple of 8 rather than 64 +- Eased resolution restriction: generated image's dimension must be a multiple of 8 rather than 64 - Now with a license! - Reorder elements in the UI from settings screen -- cgit v1.2.1 From 8de6d3ff77e841a5fd9d5f1b16bdd22737c8d657 Mon Sep 17 00:00:00 2001 From: lambertae Date: Tue, 25 Jul 2023 22:35:43 -0400 Subject: fix progress bar & torchHijack --- modules/sd_samplers_kdiffusion.py | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py index ed60670c..7a2427b5 100644 --- a/modules/sd_samplers_kdiffusion.py +++ b/modules/sd_samplers_kdiffusion.py @@ -79,19 +79,26 @@ def restart_sampler(model, x, sigmas, extra_args=None, callback=None, disable=No for key, value in restart_list.items(): temp_list[int(torch.argmin(abs(sigmas - key), dim=0))] = value restart_list = temp_list - for i in trange(len(sigmas) - 1, disable=disable): - x = heun_step(x, sigmas[i], sigmas[i+1]) + step_list = [] + for i in range(len(sigmas) - 1): + step_list.append((sigmas[i], sigmas[i + 1])) if i + 1 in restart_list: restart_steps, restart_times, restart_max = restart_list[i + 1] min_idx = i + 1 max_idx = int(torch.argmin(abs(sigmas - restart_max), dim=0)) if max_idx < min_idx: - sigma_restart = get_sigmas_karras(restart_steps, sigmas[min_idx].item(), sigmas[max_idx].item(), device=sigmas.device)[:-1] # remove the zero at the end + sigma_restart = get_sigmas_karras(restart_steps, sigmas[min_idx].item(), sigmas[max_idx].item(), device=sigmas.device)[:-1] while restart_times > 0: restart_times -= 1 - x = x + torch.randn_like(x) * s_noise * (sigmas[max_idx] ** 2 - sigmas[min_idx] ** 2) ** 0.5 - for (old_sigma, new_sigma) in zip(sigma_restart[:-1], sigma_restart[1:]): - x = heun_step(x, old_sigma, new_sigma) + step_list.extend([(old_sigma, new_sigma) for (old_sigma, new_sigma) in zip(sigma_restart[:-1], sigma_restart[1:])]) + last_sigma = None + for i in trange(len(step_list), disable=disable): + if last_sigma is None: + last_sigma = step_list[i][0] + elif last_sigma < step_list[i][0]: + x = x + k_diffusion.sampling.torch.randn_like(x) * s_noise * (step_list[i][0] ** 2 - last_sigma ** 2) ** 0.5 + x = heun_step(x, step_list[i][0], step_list[i][1]) + last_sigma = step_list[i][1] return x samplers_data_k_diffusion = [ -- cgit v1.2.1 From b73c405013f63afa82d6358f9ce544931e5e9bc6 Mon Sep 17 00:00:00 2001 From: Littleor Date: Wed, 26 Jul 2023 11:02:34 +0800 Subject: fix: error rendering name and description in extra network ui --- modules/ui_extra_networks.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/ui_extra_networks.py b/modules/ui_extra_networks.py index 49612298..48537bc1 100644 --- a/modules/ui_extra_networks.py +++ b/modules/ui_extra_networks.py @@ -253,8 +253,8 @@ class ExtraNetworksPage: "prompt": item.get("prompt", None), "tabname": quote_js(tabname), "local_preview": quote_js(item["local_preview"]), - "name": item["name"], - "description": (item.get("description") or "" if shared.opts.extra_networks_card_show_desc else ""), + "name": html.escape(item["name"]), + "description": html.escape(item.get("description") or "" if shared.opts.extra_networks_card_show_desc else ""), "card_clicked": onclick, "save_card_preview": '"' + html.escape(f"""return saveCardPreview(event, {quote_js(tabname)}, {quote_js(item["local_preview"])})""") + '"', "search_term": item.get("search_term", ""), -- cgit v1.2.1 From ae36e0899fe912cd701fc4bae5c9d0ce9a5b3e41 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Wed, 26 Jul 2023 06:36:06 +0300 Subject: alternative solution for infotext issue --- modules/processing.py | 62 ++++++++++++++++++++++----------------------------- modules/scripts.py | 6 ++++- 2 files changed, 32 insertions(+), 36 deletions(-) diff --git a/modules/processing.py b/modules/processing.py index 6dc178e1..146e409a 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -600,8 +600,12 @@ def program_version(): return res -def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iteration=0, position_in_batch=0, use_main_prompt=False): - index = position_in_batch + iteration * p.batch_size +def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iteration=0, position_in_batch=0, use_main_prompt=False, index=None, all_negative_prompts=None): + if index is None: + index = position_in_batch + iteration * p.batch_size + + if all_negative_prompts is None: + all_negative_prompts = p.all_negative_prompts clip_skip = getattr(p, 'clip_skip', opts.CLIP_stop_at_last_layers) enable_hr = getattr(p, 'enable_hr', False) @@ -642,7 +646,7 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter generation_params_text = ", ".join([k if k == v else f'{k}: {generation_parameters_copypaste.quote(v)}' for k, v in generation_params.items() if v is not None]) prompt_text = p.prompt if use_main_prompt else all_prompts[index] - negative_prompt_text = f"\nNegative prompt: {p.all_negative_prompts[index]}" if p.all_negative_prompts[index] else "" + negative_prompt_text = f"\nNegative prompt: {all_negative_prompts[index]}" if all_negative_prompts[index] else "" return f"{prompt_text}{negative_prompt_text}\n{generation_params_text}".strip() @@ -716,29 +720,6 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: else: p.all_subseeds = [int(subseed) + x for x in range(len(p.all_prompts))] - def infotext(iteration=0, position_in_batch=0, use_main_prompt=False): - all_prompts = p.all_prompts[:] - all_negative_prompts = p.all_negative_prompts[:] - all_seeds = p.all_seeds[:] - all_subseeds = p.all_subseeds[:] - - # apply changes to generation data - all_prompts[iteration * p.batch_size:(iteration + 1) * p.batch_size] = p.prompts - all_negative_prompts[iteration * p.batch_size:(iteration + 1) * p.batch_size] = p.negative_prompts - all_seeds[iteration * p.batch_size:(iteration + 1) * p.batch_size] = p.seeds - all_subseeds[iteration * p.batch_size:(iteration + 1) * p.batch_size] = p.subseeds - - # update p.all_negative_prompts in case extensions changed the size of the batch - # create_infotext below uses it - old_negative_prompts = p.all_negative_prompts - p.all_negative_prompts = all_negative_prompts - - try: - return create_infotext(p, all_prompts, all_seeds, all_subseeds, comments, iteration, position_in_batch, use_main_prompt) - finally: - # restore p.all_negative_prompts in case extensions changed the size of the batch - p.all_negative_prompts = old_negative_prompts - if os.path.exists(cmd_opts.embeddings_dir) and not p.do_not_reload_embeddings: model_hijack.embedding_db.load_textual_inversion_embeddings() @@ -826,9 +807,20 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: if p.scripts is not None: p.scripts.postprocess_batch(p, x_samples_ddim, batch_number=n) - postprocess_batch_list_args = scripts.PostprocessBatchListArgs(list(x_samples_ddim)) - p.scripts.postprocess_batch_list(p, postprocess_batch_list_args, batch_number=n) - x_samples_ddim = postprocess_batch_list_args.images + batch_params = scripts.PostprocessBatchListArgs( + list(x_samples_ddim), + p.all_prompts[n * p.batch_size:(n + 1) * p.batch_size], + p.all_negative_prompts[n * p.batch_size:(n + 1) * p.batch_size], + p.seeds, + p.subseeds, + ) + + if p.scripts is not None: + p.scripts.postprocess_batch_list(p, batch_params, batch_number=n) + x_samples_ddim = batch_params.images + + def infotext(index=0, use_main_prompt=False): + return create_infotext(p, batch_params.prompts, batch_params.seeds, batch_params.subseeds, use_main_prompt=use_main_prompt, index=index, all_negative_prompts=batch_params.negative_prompts) for i, x_sample in enumerate(x_samples_ddim): p.batch_index = i @@ -838,7 +830,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: if p.restore_faces: if opts.save and not p.do_not_save_samples and opts.save_images_before_face_restoration: - images.save_image(Image.fromarray(x_sample), p.outpath_samples, "", p.seeds[i], p.prompts[i], opts.samples_format, info=infotext(n, i), p=p, suffix="-before-face-restoration") + images.save_image(Image.fromarray(x_sample), p.outpath_samples, "", p.seeds[i], p.prompts[i], opts.samples_format, info=infotext(i), p=p, suffix="-before-face-restoration") devices.torch_gc() @@ -855,15 +847,15 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: if p.color_corrections is not None and i < len(p.color_corrections): if opts.save and not p.do_not_save_samples and opts.save_images_before_color_correction: image_without_cc = apply_overlay(image, p.paste_to, i, p.overlay_images) - images.save_image(image_without_cc, p.outpath_samples, "", p.seeds[i], p.prompts[i], opts.samples_format, info=infotext(n, i), p=p, suffix="-before-color-correction") + images.save_image(image_without_cc, p.outpath_samples, "", p.seeds[i], p.prompts[i], opts.samples_format, info=infotext(i), p=p, suffix="-before-color-correction") image = apply_color_correction(p.color_corrections[i], image) image = apply_overlay(image, p.paste_to, i, p.overlay_images) if opts.samples_save and not p.do_not_save_samples: - images.save_image(image, p.outpath_samples, "", p.seeds[i], p.prompts[i], opts.samples_format, info=infotext(n, i), p=p) + images.save_image(image, p.outpath_samples, "", p.seeds[i], p.prompts[i], opts.samples_format, info=infotext(i), p=p) - text = infotext(n, i) + text = infotext(i) infotexts.append(text) if opts.enable_pnginfo: image.info["parameters"] = text @@ -874,10 +866,10 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: image_mask_composite = Image.composite(image.convert('RGBA').convert('RGBa'), Image.new('RGBa', image.size), images.resize_image(2, p.mask_for_overlay, image.width, image.height).convert('L')).convert('RGBA') if opts.save_mask: - images.save_image(image_mask, p.outpath_samples, "", p.seeds[i], p.prompts[i], opts.samples_format, info=infotext(n, i), p=p, suffix="-mask") + images.save_image(image_mask, p.outpath_samples, "", p.seeds[i], p.prompts[i], opts.samples_format, info=infotext(i), p=p, suffix="-mask") if opts.save_mask_composite: - images.save_image(image_mask_composite, p.outpath_samples, "", p.seeds[i], p.prompts[i], opts.samples_format, info=infotext(n, i), p=p, suffix="-mask-composite") + images.save_image(image_mask_composite, p.outpath_samples, "", p.seeds[i], p.prompts[i], opts.samples_format, info=infotext(i), p=p, suffix="-mask-composite") if opts.return_mask: output_images.append(image_mask) diff --git a/modules/scripts.py b/modules/scripts.py index 5b4edcac..1049740d 100644 --- a/modules/scripts.py +++ b/modules/scripts.py @@ -17,8 +17,12 @@ class PostprocessImageArgs: class PostprocessBatchListArgs: - def __init__(self, images): + def __init__(self, images, prompts, negative_prompts, seeds, subseeds): self.images = images + self.prompts = prompts + self.negative_prompts = negative_prompts + self.seeds = seeds + self.subseeds = subseeds class Script: -- cgit v1.2.1 From 13e371af73645934bd57178a6ced1e9480cc4300 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Wed, 26 Jul 2023 06:37:13 +0300 Subject: doc update --- modules/scripts.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/modules/scripts.py b/modules/scripts.py index 1049740d..4317cbb6 100644 --- a/modules/scripts.py +++ b/modules/scripts.py @@ -172,11 +172,11 @@ class Script: You can modify the postprocessing object (pp) to update the images in the batch, remove images, add images, etc. If the number of images is different from the batch size when returning, - then the script has the responsibility to also update the following attributes in the processing object (p): - - p.prompts - - p.negative_prompts - - p.seeds - - p.subseeds + then the script has the responsibility to also update the following attributes in the processing object (pp): + - pp.prompts + - pp.negative_prompts + - pp.seeds + - pp.subseeds **kwargs will have same items as process_batch, and also: - batch_number - index of current batch, from 0 to number of batches-1 -- cgit v1.2.1 From 7c22bbd3ad5a149e0cf29df887405188fb2d0471 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Wed, 26 Jul 2023 07:04:07 +0300 Subject: attempt 2 --- modules/processing.py | 28 ++++++++++++++++------------ 1 file changed, 16 insertions(+), 12 deletions(-) diff --git a/modules/processing.py b/modules/processing.py index 146e409a..e9108f11 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -621,12 +621,12 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter "Sampler": p.sampler_name, "CFG scale": p.cfg_scale, "Image CFG scale": getattr(p, 'image_cfg_scale', None), - "Seed": all_seeds[index], + "Seed": p.all_seeds[0] if use_main_prompt else all_seeds[index], "Face restoration": (opts.face_restoration_model if p.restore_faces else None), "Size": f"{p.width}x{p.height}", "Model hash": getattr(p, 'sd_model_hash', None if not opts.add_model_hash_to_info or not shared.sd_model.sd_model_hash else shared.sd_model.sd_model_hash), "Model": (None if not opts.add_model_name_to_info else shared.sd_model.sd_checkpoint_info.name_for_extra), - "Variation seed": (None if p.subseed_strength == 0 else all_subseeds[index]), + "Variation seed": (None if p.subseed_strength == 0 else (p.all_subseeds[0] if use_main_prompt else all_subseeds[index])), "Variation seed strength": (None if p.subseed_strength == 0 else p.subseed_strength), "Seed resize from": (None if p.seed_resize_from_w <= 0 or p.seed_resize_from_h <= 0 else f"{p.seed_resize_from_w}x{p.seed_resize_from_h}"), "Denoising strength": getattr(p, 'denoising_strength', None), @@ -807,20 +807,24 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: if p.scripts is not None: p.scripts.postprocess_batch(p, x_samples_ddim, batch_number=n) - batch_params = scripts.PostprocessBatchListArgs( - list(x_samples_ddim), - p.all_prompts[n * p.batch_size:(n + 1) * p.batch_size], - p.all_negative_prompts[n * p.batch_size:(n + 1) * p.batch_size], - p.seeds, - p.subseeds, - ) + batch_params = scripts.PostprocessBatchListArgs( + list(x_samples_ddim), + p.all_prompts[n * p.batch_size:(n + 1) * p.batch_size], + p.all_negative_prompts[n * p.batch_size:(n + 1) * p.batch_size], + p.seeds, + p.subseeds, + ) - if p.scripts is not None: p.scripts.postprocess_batch_list(p, batch_params, batch_number=n) + x_samples_ddim = batch_params.images + p.prompts = batch_params.prompts + p.negative_prompts = batch_params.negative_prompts + p.seeds = batch_params.seeds + p.subseeds = batch_params.subseeds def infotext(index=0, use_main_prompt=False): - return create_infotext(p, batch_params.prompts, batch_params.seeds, batch_params.subseeds, use_main_prompt=use_main_prompt, index=index, all_negative_prompts=batch_params.negative_prompts) + return create_infotext(p, p.prompts, p.seeds, p.subseeds, use_main_prompt=use_main_prompt, index=index, all_negative_prompts=p.negative_prompts) for i, x_sample in enumerate(x_samples_ddim): p.batch_index = i @@ -910,7 +914,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: p, images_list=output_images, seed=p.all_seeds[0], - info=infotext(), + info=infotexts[0], comments="".join(f"{comment}\n" for comment in comments), subseed=p.all_subseeds[0], index_of_first_image=index_of_first_image, -- cgit v1.2.1 From b8a903efbe76824da48747bec637db2470d1b25d Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Wed, 26 Jul 2023 13:43:38 +0900 Subject: fix check for updates status always "unknown" --- modules/extensions.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/extensions.py b/modules/extensions.py index 09d1e550..3ad5ed53 100644 --- a/modules/extensions.py +++ b/modules/extensions.py @@ -61,7 +61,7 @@ class Extension: self.from_dict(d) except FileNotFoundError: pass - self.status = 'unknown' + self.status = 'unknown' if self.status == '' else self.status def do_read_info_from_repo(self): repo = None -- cgit v1.2.1 From 835a7dbf0e73c4cdf945b588d319a6c36652cbe5 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Wed, 26 Jul 2023 07:49:57 +0300 Subject: simplify PostprocessBatchListArgs --- modules/processing.py | 15 +++------------ modules/scripts.py | 16 ++++++---------- 2 files changed, 9 insertions(+), 22 deletions(-) diff --git a/modules/processing.py b/modules/processing.py index e9108f11..b0992ee1 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -807,21 +807,12 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: if p.scripts is not None: p.scripts.postprocess_batch(p, x_samples_ddim, batch_number=n) - batch_params = scripts.PostprocessBatchListArgs( - list(x_samples_ddim), - p.all_prompts[n * p.batch_size:(n + 1) * p.batch_size], - p.all_negative_prompts[n * p.batch_size:(n + 1) * p.batch_size], - p.seeds, - p.subseeds, - ) + p.prompts = p.all_prompts[n * p.batch_size:(n + 1) * p.batch_size] + p.negative_prompts = p.all_negative_prompts[n * p.batch_size:(n + 1) * p.batch_size] + batch_params = scripts.PostprocessBatchListArgs(list(x_samples_ddim)) p.scripts.postprocess_batch_list(p, batch_params, batch_number=n) - x_samples_ddim = batch_params.images - p.prompts = batch_params.prompts - p.negative_prompts = batch_params.negative_prompts - p.seeds = batch_params.seeds - p.subseeds = batch_params.subseeds def infotext(index=0, use_main_prompt=False): return create_infotext(p, p.prompts, p.seeds, p.subseeds, use_main_prompt=use_main_prompt, index=index, all_negative_prompts=p.negative_prompts) diff --git a/modules/scripts.py b/modules/scripts.py index 4317cbb6..5b4edcac 100644 --- a/modules/scripts.py +++ b/modules/scripts.py @@ -17,12 +17,8 @@ class PostprocessImageArgs: class PostprocessBatchListArgs: - def __init__(self, images, prompts, negative_prompts, seeds, subseeds): + def __init__(self, images): self.images = images - self.prompts = prompts - self.negative_prompts = negative_prompts - self.seeds = seeds - self.subseeds = subseeds class Script: @@ -172,11 +168,11 @@ class Script: You can modify the postprocessing object (pp) to update the images in the batch, remove images, add images, etc. If the number of images is different from the batch size when returning, - then the script has the responsibility to also update the following attributes in the processing object (pp): - - pp.prompts - - pp.negative_prompts - - pp.seeds - - pp.subseeds + then the script has the responsibility to also update the following attributes in the processing object (p): + - p.prompts + - p.negative_prompts + - p.seeds + - p.subseeds **kwargs will have same items as process_batch, and also: - batch_number - index of current batch, from 0 to number of batches-1 -- cgit v1.2.1 From 246d1f1f706262b6e6f2cdc62de853f90e86a240 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Wed, 26 Jul 2023 09:19:46 +0300 Subject: delete scale checker script due to user demand --- javascript/badScaleChecker.js | 108 ------------------------------------------ 1 file changed, 108 deletions(-) delete mode 100644 javascript/badScaleChecker.js diff --git a/javascript/badScaleChecker.js b/javascript/badScaleChecker.js deleted file mode 100644 index 625ad309..00000000 --- a/javascript/badScaleChecker.js +++ /dev/null @@ -1,108 +0,0 @@ -(function() { - var ignore = localStorage.getItem("bad-scale-ignore-it") == "ignore-it"; - - function getScale() { - var ratio = 0, - screen = window.screen, - ua = navigator.userAgent.toLowerCase(); - - if (window.devicePixelRatio !== undefined) { - ratio = window.devicePixelRatio; - } else if (~ua.indexOf('msie')) { - if (screen.deviceXDPI && screen.logicalXDPI) { - ratio = screen.deviceXDPI / screen.logicalXDPI; - } - } else if (window.outerWidth !== undefined && window.innerWidth !== undefined) { - ratio = window.outerWidth / window.innerWidth; - } - - return ratio == 0 ? 0 : Math.round(ratio * 100); - } - - var showing = false; - - var div = document.createElement("div"); - div.style.position = "fixed"; - div.style.top = "0px"; - div.style.left = "0px"; - div.style.width = "100vw"; - div.style.backgroundColor = "firebrick"; - div.style.textAlign = "center"; - div.style.zIndex = 99; - - var b = document.createElement("b"); - b.innerHTML = 'Bad Scale: ??% '; - - div.appendChild(b); - - var note1 = document.createElement("p"); - note1.innerHTML = "Change your browser or your computer settings!"; - note1.title = 'Just make sure "computer-scale" * "browser-scale" = 100% ,\n' + - "you can keep your computer-scale and only change this page's scale,\n" + - "for example: your computer-scale is 125%, just use [\"CTRL\"+\"-\"] to make your browser-scale of this page to 80%."; - div.appendChild(note1); - - var note2 = document.createElement("p"); - note2.innerHTML = " Otherwise, it will cause this page to not function properly!"; - note2.title = "When you click \"Copy image to: [inpaint sketch]\" in some img2img's tab,\n" + - "if scale<100% the canvas will be invisible,\n" + - "else if scale>100% this page will take large amount of memory and CPU performance."; - div.appendChild(note2); - - var btn = document.createElement("button"); - btn.innerHTML = "Click here to ignore"; - - div.appendChild(btn); - - function tryShowTopBar(scale) { - if (showing) return; - - b.innerHTML = 'Bad Scale: ' + scale + '% '; - - var updateScaleTimer = setInterval(function() { - var newScale = getScale(); - b.innerHTML = 'Bad Scale: ' + newScale + '% '; - if (newScale == 100) { - var p = div.parentNode; - if (p != null) p.removeChild(div); - showing = false; - clearInterval(updateScaleTimer); - check(); - } - }, 999); - - btn.onclick = function() { - clearInterval(updateScaleTimer); - var p = div.parentNode; - if (p != null) p.removeChild(div); - ignore = true; - showing = false; - localStorage.setItem("bad-scale-ignore-it", "ignore-it"); - }; - - document.body.appendChild(div); - } - - function check() { - if (!ignore) { - var timer = setInterval(function() { - var scale = getScale(); - if (scale != 100 && !ignore) { - tryShowTopBar(scale); - clearInterval(timer); - } - if (ignore) { - clearInterval(timer); - } - }, 999); - } - } - - if (document.readyState != "complete") { - document.onreadystatechange = function() { - if (document.readyState != "complete") check(); - }; - } else { - check(); - } -})(); -- cgit v1.2.1 From 6305632493406ff078a2a3fd22c06859fee260d4 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Wed, 26 Jul 2023 16:47:12 +0900 Subject: use "Any" type when type is None --- modules/api/models.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/modules/api/models.py b/modules/api/models.py index bf97b1a3..800c9b93 100644 --- a/modules/api/models.py +++ b/modules/api/models.py @@ -208,11 +208,9 @@ class PreprocessResponse(BaseModel): fields = {} for key, metadata in opts.data_labels.items(): value = opts.data.get(key) - optType = opts.typemap.get(type(metadata.default), type(metadata.default)) + optType = opts.typemap.get(type(metadata.default), type(metadata.default)) if metadata.default else Any - if metadata.default is None: - pass - elif metadata is not None: + if metadata is not None: fields.update({key: (Optional[optType], Field(default=metadata.default, description=metadata.label))}) else: fields.update({key: (Optional[optType], Field())}) -- cgit v1.2.1 From 187323a606bc0e4913240e5f20c51c9789234654 Mon Sep 17 00:00:00 2001 From: Littleor Date: Wed, 26 Jul 2023 17:23:57 +0800 Subject: fix: extra network ui description allow HTML tags --- modules/ui_extra_networks.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/ui_extra_networks.py b/modules/ui_extra_networks.py index 48537bc1..f2752f10 100644 --- a/modules/ui_extra_networks.py +++ b/modules/ui_extra_networks.py @@ -254,7 +254,7 @@ class ExtraNetworksPage: "tabname": quote_js(tabname), "local_preview": quote_js(item["local_preview"]), "name": html.escape(item["name"]), - "description": html.escape(item.get("description") or "" if shared.opts.extra_networks_card_show_desc else ""), + "description": (item.get("description") or "" if shared.opts.extra_networks_card_show_desc else ""), "card_clicked": onclick, "save_card_preview": '"' + html.escape(f"""return saveCardPreview(event, {quote_js(tabname)}, {quote_js(item["local_preview"])})""") + '"', "search_term": item.get("search_term", ""), -- cgit v1.2.1 From 8284ebd94c4d4f7e7141ebb47af206faab66ffed Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Wed, 26 Jul 2023 13:03:52 +0300 Subject: fix autograd which i broke for no good reason when implementing SDXL --- modules/sd_hijack_clip.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/sd_hijack_clip.py b/modules/sd_hijack_clip.py index 5443e609..990533fe 100644 --- a/modules/sd_hijack_clip.py +++ b/modules/sd_hijack_clip.py @@ -273,9 +273,9 @@ class FrozenCLIPEmbedderWithCustomWordsBase(torch.nn.Module): # restoring original mean is likely not correct, but it seems to work well to prevent artifacts that happen otherwise batch_multipliers = torch.asarray(batch_multipliers).to(devices.device) original_mean = z.mean() - z *= batch_multipliers.reshape(batch_multipliers.shape + (1,)).expand(z.shape) + z = z * batch_multipliers.reshape(batch_multipliers.shape + (1,)).expand(z.shape) new_mean = z.mean() - z *= (original_mean / new_mean) + z = z * (original_mean / new_mean) return z -- cgit v1.2.1 From 89e6dfff717e2c2e57009fdabb0ed2b4f57b98a2 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Wed, 26 Jul 2023 15:07:56 +0300 Subject: repair SDXL --- modules/sd_hijack_clip.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/modules/sd_hijack_clip.py b/modules/sd_hijack_clip.py index 990533fe..16a5500e 100644 --- a/modules/sd_hijack_clip.py +++ b/modules/sd_hijack_clip.py @@ -270,6 +270,8 @@ class FrozenCLIPEmbedderWithCustomWordsBase(torch.nn.Module): z = self.encode_with_transformers(tokens) + pooled = getattr(z, 'pooled', None) + # restoring original mean is likely not correct, but it seems to work well to prevent artifacts that happen otherwise batch_multipliers = torch.asarray(batch_multipliers).to(devices.device) original_mean = z.mean() @@ -277,6 +279,9 @@ class FrozenCLIPEmbedderWithCustomWordsBase(torch.nn.Module): new_mean = z.mean() z = z * (original_mean / new_mean) + if pooled is not None: + z.pooled = pooled + return z -- cgit v1.2.1 From 91a131aa6ca26d35f7879d8240e4f6e321130160 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Thu, 27 Jul 2023 09:00:47 +0300 Subject: update lora extension to work with python 3.8 --- extensions-builtin/Lora/network.py | 1 + 1 file changed, 1 insertion(+) diff --git a/extensions-builtin/Lora/network.py b/extensions-builtin/Lora/network.py index 8ecfa29a..0a18d69e 100644 --- a/extensions-builtin/Lora/network.py +++ b/extensions-builtin/Lora/network.py @@ -1,3 +1,4 @@ +from __future__ import annotations import os from collections import namedtuple import enum -- cgit v1.2.1 From 9cbf3461f7ecb7aa556a4683912f171be9254480 Mon Sep 17 00:00:00 2001 From: DiabolicDiabetic <41561319+DiabolicDiabetic@users.noreply.github.com> Date: Thu, 27 Jul 2023 20:15:50 -0500 Subject: IMG2IMG TIF batch fix img2img.py IMG2IMG batch tab wouldn't process tif images --- modules/img2img.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/img2img.py b/modules/img2img.py index a811e7a4..132cd100 100644 --- a/modules/img2img.py +++ b/modules/img2img.py @@ -20,7 +20,7 @@ import modules.scripts def process_batch(p, input_dir, output_dir, inpaint_mask_dir, args, to_scale=False, scale_by=1.0, use_png_info=False, png_info_props=None, png_info_dir=None): processing.fix_seed(p) - images = list(shared.walk_files(input_dir, allowed_extensions=(".png", ".jpg", ".jpeg", ".webp"))) + images = list(shared.walk_files(input_dir, allowed_extensions=(".png", ".jpg", ".jpeg", ".webp", ".tif", ".tiff"))) is_inpaint_batch = False if inpaint_mask_dir: -- cgit v1.2.1 From 6cc5a886ae9ff30148c43a95904accd191922afc Mon Sep 17 00:00:00 2001 From: caoxipeng Date: Fri, 28 Jul 2023 11:40:10 +0800 Subject: Add total_tqdm clear in the end of txt2img & img2img api. --- modules/api/api.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/modules/api/api.py b/modules/api/api.py index 606db179..909ea976 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -343,6 +343,7 @@ class Api: processed = process_images(p) finally: shared.state.end() + shared.total_tqdm.clear() b64images = list(map(encode_pil_to_base64, processed.images)) if send_images else [] @@ -402,6 +403,7 @@ class Api: processed = process_images(p) finally: shared.state.end() + shared.total_tqdm.clear() b64images = list(map(encode_pil_to_base64, processed.images)) if send_images else [] -- cgit v1.2.1 From 3ac950248df33d85b0a1ef82f7fa29ca36024649 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sat, 29 Jul 2023 08:06:03 +0300 Subject: Split history: mv modules/sd_samplers_kdiffusion.py modules/sd_samplers_extra.py --- modules/sd_samplers_extra.py | 545 ++++++++++++++++++++++++++++++++++++++ modules/sd_samplers_kdiffusion.py | 545 -------------------------------------- 2 files changed, 545 insertions(+), 545 deletions(-) create mode 100644 modules/sd_samplers_extra.py delete mode 100644 modules/sd_samplers_kdiffusion.py diff --git a/modules/sd_samplers_extra.py b/modules/sd_samplers_extra.py new file mode 100644 index 00000000..a54673eb --- /dev/null +++ b/modules/sd_samplers_extra.py @@ -0,0 +1,545 @@ +from collections import deque +import torch +import inspect +import k_diffusion.sampling +from modules import prompt_parser, devices, sd_samplers_common + +from modules.shared import opts, state +import modules.shared as shared +from modules.script_callbacks import CFGDenoiserParams, cfg_denoiser_callback +from modules.script_callbacks import CFGDenoisedParams, cfg_denoised_callback +from modules.script_callbacks import AfterCFGCallbackParams, cfg_after_cfg_callback + +samplers_k_diffusion = [ + ('Euler a', 'sample_euler_ancestral', ['k_euler_a', 'k_euler_ancestral'], {"uses_ensd": True}), + ('Euler', 'sample_euler', ['k_euler'], {}), + ('LMS', 'sample_lms', ['k_lms'], {}), + ('Heun', 'sample_heun', ['k_heun'], {"second_order": True}), + ('DPM2', 'sample_dpm_2', ['k_dpm_2'], {'discard_next_to_last_sigma': True}), + ('DPM2 a', 'sample_dpm_2_ancestral', ['k_dpm_2_a'], {'discard_next_to_last_sigma': True, "uses_ensd": True}), + ('DPM++ 2S a', 'sample_dpmpp_2s_ancestral', ['k_dpmpp_2s_a'], {"uses_ensd": True, "second_order": True}), + ('DPM++ 2M', 'sample_dpmpp_2m', ['k_dpmpp_2m'], {}), + ('DPM++ SDE', 'sample_dpmpp_sde', ['k_dpmpp_sde'], {"second_order": True, "brownian_noise": True}), + ('DPM++ 2M SDE', 'sample_dpmpp_2m_sde', ['k_dpmpp_2m_sde_ka'], {"brownian_noise": True}), + ('DPM fast', 'sample_dpm_fast', ['k_dpm_fast'], {"uses_ensd": True}), + ('DPM adaptive', 'sample_dpm_adaptive', ['k_dpm_ad'], {"uses_ensd": True}), + ('LMS Karras', 'sample_lms', ['k_lms_ka'], {'scheduler': 'karras'}), + ('DPM2 Karras', 'sample_dpm_2', ['k_dpm_2_ka'], {'scheduler': 'karras', 'discard_next_to_last_sigma': True, "uses_ensd": True, "second_order": True}), + ('DPM2 a Karras', 'sample_dpm_2_ancestral', ['k_dpm_2_a_ka'], {'scheduler': 'karras', 'discard_next_to_last_sigma': True, "uses_ensd": True, "second_order": True}), + ('DPM++ 2S a Karras', 'sample_dpmpp_2s_ancestral', ['k_dpmpp_2s_a_ka'], {'scheduler': 'karras', "uses_ensd": True, "second_order": True}), + ('DPM++ 2M Karras', 'sample_dpmpp_2m', ['k_dpmpp_2m_ka'], {'scheduler': 'karras'}), + ('DPM++ SDE Karras', 'sample_dpmpp_sde', ['k_dpmpp_sde_ka'], {'scheduler': 'karras', "second_order": True, "brownian_noise": True}), + ('DPM++ 2M SDE Karras', 'sample_dpmpp_2m_sde', ['k_dpmpp_2m_sde_ka'], {'scheduler': 'karras', "brownian_noise": True}), + ('Restart (new)', 'restart_sampler', ['restart'], {'scheduler': 'karras', "second_order": True}), +] + + +@torch.no_grad() +def restart_sampler(model, x, sigmas, extra_args=None, callback=None, disable=None, s_noise=1., restart_list = None): + """Implements restart sampling in Restart Sampling for Improving Generative Processes (2023)""" + '''Restart_list format: {min_sigma: [ restart_steps, restart_times, max_sigma]}''' + '''If restart_list is None: will choose restart_list automatically, otherwise will use the given restart_list''' + from tqdm.auto import trange + extra_args = {} if extra_args is None else extra_args + s_in = x.new_ones([x.shape[0]]) + step_id = 0 + from k_diffusion.sampling import to_d, get_sigmas_karras + def heun_step(x, old_sigma, new_sigma, second_order = True): + nonlocal step_id + denoised = model(x, old_sigma * s_in, **extra_args) + d = to_d(x, old_sigma, denoised) + if callback is not None: + callback({'x': x, 'i': step_id, 'sigma': new_sigma, 'sigma_hat': old_sigma, 'denoised': denoised}) + dt = new_sigma - old_sigma + if new_sigma == 0 or not second_order: + # Euler method + x = x + d * dt + else: + # Heun's method + x_2 = x + d * dt + denoised_2 = model(x_2, new_sigma * s_in, **extra_args) + d_2 = to_d(x_2, new_sigma, denoised_2) + d_prime = (d + d_2) / 2 + x = x + d_prime * dt + step_id += 1 + return x + steps = sigmas.shape[0] - 1 + if restart_list is None: + if steps >= 20: + restart_steps = 9 + restart_times = 1 + if steps >= 36: + restart_steps = steps // 4 + restart_times = 2 + sigmas = get_sigmas_karras(steps - restart_steps * restart_times, sigmas[-2].item(), sigmas[0].item(), device=sigmas.device) + restart_list = {0.1: [restart_steps + 1, restart_times, 2]} + else: + restart_list = dict() + temp_list = dict() + for key, value in restart_list.items(): + temp_list[int(torch.argmin(abs(sigmas - key), dim=0))] = value + restart_list = temp_list + step_list = [] + for i in range(len(sigmas) - 1): + step_list.append((sigmas[i], sigmas[i + 1])) + if i + 1 in restart_list: + restart_steps, restart_times, restart_max = restart_list[i + 1] + min_idx = i + 1 + max_idx = int(torch.argmin(abs(sigmas - restart_max), dim=0)) + if max_idx < min_idx: + sigma_restart = get_sigmas_karras(restart_steps, sigmas[min_idx].item(), sigmas[max_idx].item(), device=sigmas.device)[:-1] + while restart_times > 0: + restart_times -= 1 + step_list.extend([(old_sigma, new_sigma) for (old_sigma, new_sigma) in zip(sigma_restart[:-1], sigma_restart[1:])]) + last_sigma = None + for i in trange(len(step_list), disable=disable): + if last_sigma is None: + last_sigma = step_list[i][0] + elif last_sigma < step_list[i][0]: + x = x + k_diffusion.sampling.torch.randn_like(x) * s_noise * (step_list[i][0] ** 2 - last_sigma ** 2) ** 0.5 + x = heun_step(x, step_list[i][0], step_list[i][1]) + last_sigma = step_list[i][1] + return x + +samplers_data_k_diffusion = [ + sd_samplers_common.SamplerData(label, lambda model, funcname=funcname: KDiffusionSampler(funcname, model), aliases, options) + for label, funcname, aliases, options in samplers_k_diffusion + if (hasattr(k_diffusion.sampling, funcname) or funcname == 'restart_sampler') +] + +sampler_extra_params = { + 'sample_euler': ['s_churn', 's_tmin', 's_tmax', 's_noise'], + 'sample_heun': ['s_churn', 's_tmin', 's_tmax', 's_noise'], + 'sample_dpm_2': ['s_churn', 's_tmin', 's_tmax', 's_noise'], +} + +k_diffusion_samplers_map = {x.name: x for x in samplers_data_k_diffusion} +k_diffusion_scheduler = { + 'Automatic': None, + 'karras': k_diffusion.sampling.get_sigmas_karras, + 'exponential': k_diffusion.sampling.get_sigmas_exponential, + 'polyexponential': k_diffusion.sampling.get_sigmas_polyexponential +} + + +def catenate_conds(conds): + if not isinstance(conds[0], dict): + return torch.cat(conds) + + return {key: torch.cat([x[key] for x in conds]) for key in conds[0].keys()} + + +def subscript_cond(cond, a, b): + if not isinstance(cond, dict): + return cond[a:b] + + return {key: vec[a:b] for key, vec in cond.items()} + + +def pad_cond(tensor, repeats, empty): + if not isinstance(tensor, dict): + return torch.cat([tensor, empty.repeat((tensor.shape[0], repeats, 1))], axis=1) + + tensor['crossattn'] = pad_cond(tensor['crossattn'], repeats, empty) + return tensor + + +class CFGDenoiser(torch.nn.Module): + """ + Classifier free guidance denoiser. A wrapper for stable diffusion model (specifically for unet) + that can take a noisy picture and produce a noise-free picture using two guidances (prompts) + instead of one. Originally, the second prompt is just an empty string, but we use non-empty + negative prompt. + """ + + def __init__(self, model): + super().__init__() + self.inner_model = model + self.mask = None + self.nmask = None + self.init_latent = None + self.step = 0 + self.image_cfg_scale = None + self.padded_cond_uncond = False + + def combine_denoised(self, x_out, conds_list, uncond, cond_scale): + denoised_uncond = x_out[-uncond.shape[0]:] + denoised = torch.clone(denoised_uncond) + + for i, conds in enumerate(conds_list): + for cond_index, weight in conds: + denoised[i] += (x_out[cond_index] - denoised_uncond[i]) * (weight * cond_scale) + + return denoised + + def combine_denoised_for_edit_model(self, x_out, cond_scale): + out_cond, out_img_cond, out_uncond = x_out.chunk(3) + denoised = out_uncond + cond_scale * (out_cond - out_img_cond) + self.image_cfg_scale * (out_img_cond - out_uncond) + + return denoised + + def forward(self, x, sigma, uncond, cond, cond_scale, s_min_uncond, image_cond): + if state.interrupted or state.skipped: + raise sd_samplers_common.InterruptedException + + # at self.image_cfg_scale == 1.0 produced results for edit model are the same as with normal sampling, + # so is_edit_model is set to False to support AND composition. + is_edit_model = shared.sd_model.cond_stage_key == "edit" and self.image_cfg_scale is not None and self.image_cfg_scale != 1.0 + + conds_list, tensor = prompt_parser.reconstruct_multicond_batch(cond, self.step) + uncond = prompt_parser.reconstruct_cond_batch(uncond, self.step) + + assert not is_edit_model or all(len(conds) == 1 for conds in conds_list), "AND is not supported for InstructPix2Pix checkpoint (unless using Image CFG scale = 1.0)" + + batch_size = len(conds_list) + repeats = [len(conds_list[i]) for i in range(batch_size)] + + if shared.sd_model.model.conditioning_key == "crossattn-adm": + image_uncond = torch.zeros_like(image_cond) + make_condition_dict = lambda c_crossattn, c_adm: {"c_crossattn": [c_crossattn], "c_adm": c_adm} + else: + image_uncond = image_cond + if isinstance(uncond, dict): + make_condition_dict = lambda c_crossattn, c_concat: {**c_crossattn, "c_concat": [c_concat]} + else: + make_condition_dict = lambda c_crossattn, c_concat: {"c_crossattn": [c_crossattn], "c_concat": [c_concat]} + + if not is_edit_model: + x_in = torch.cat([torch.stack([x[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [x]) + sigma_in = torch.cat([torch.stack([sigma[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [sigma]) + image_cond_in = torch.cat([torch.stack([image_cond[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [image_uncond]) + else: + x_in = torch.cat([torch.stack([x[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [x] + [x]) + sigma_in = torch.cat([torch.stack([sigma[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [sigma] + [sigma]) + image_cond_in = torch.cat([torch.stack([image_cond[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [image_uncond] + [torch.zeros_like(self.init_latent)]) + + denoiser_params = CFGDenoiserParams(x_in, image_cond_in, sigma_in, state.sampling_step, state.sampling_steps, tensor, uncond) + cfg_denoiser_callback(denoiser_params) + x_in = denoiser_params.x + image_cond_in = denoiser_params.image_cond + sigma_in = denoiser_params.sigma + tensor = denoiser_params.text_cond + uncond = denoiser_params.text_uncond + skip_uncond = False + + # alternating uncond allows for higher thresholds without the quality loss normally expected from raising it + if self.step % 2 and s_min_uncond > 0 and sigma[0] < s_min_uncond and not is_edit_model: + skip_uncond = True + x_in = x_in[:-batch_size] + sigma_in = sigma_in[:-batch_size] + + self.padded_cond_uncond = False + if shared.opts.pad_cond_uncond and tensor.shape[1] != uncond.shape[1]: + empty = shared.sd_model.cond_stage_model_empty_prompt + num_repeats = (tensor.shape[1] - uncond.shape[1]) // empty.shape[1] + + if num_repeats < 0: + tensor = pad_cond(tensor, -num_repeats, empty) + self.padded_cond_uncond = True + elif num_repeats > 0: + uncond = pad_cond(uncond, num_repeats, empty) + self.padded_cond_uncond = True + + if tensor.shape[1] == uncond.shape[1] or skip_uncond: + if is_edit_model: + cond_in = catenate_conds([tensor, uncond, uncond]) + elif skip_uncond: + cond_in = tensor + else: + cond_in = catenate_conds([tensor, uncond]) + + if shared.batch_cond_uncond: + x_out = self.inner_model(x_in, sigma_in, cond=make_condition_dict(cond_in, image_cond_in)) + else: + x_out = torch.zeros_like(x_in) + for batch_offset in range(0, x_out.shape[0], batch_size): + a = batch_offset + b = a + batch_size + x_out[a:b] = self.inner_model(x_in[a:b], sigma_in[a:b], cond=make_condition_dict(subscript_cond(cond_in, a, b), image_cond_in[a:b])) + else: + x_out = torch.zeros_like(x_in) + batch_size = batch_size*2 if shared.batch_cond_uncond else batch_size + for batch_offset in range(0, tensor.shape[0], batch_size): + a = batch_offset + b = min(a + batch_size, tensor.shape[0]) + + if not is_edit_model: + c_crossattn = subscript_cond(tensor, a, b) + else: + c_crossattn = torch.cat([tensor[a:b]], uncond) + + x_out[a:b] = self.inner_model(x_in[a:b], sigma_in[a:b], cond=make_condition_dict(c_crossattn, image_cond_in[a:b])) + + if not skip_uncond: + x_out[-uncond.shape[0]:] = self.inner_model(x_in[-uncond.shape[0]:], sigma_in[-uncond.shape[0]:], cond=make_condition_dict(uncond, image_cond_in[-uncond.shape[0]:])) + + denoised_image_indexes = [x[0][0] for x in conds_list] + if skip_uncond: + fake_uncond = torch.cat([x_out[i:i+1] for i in denoised_image_indexes]) + x_out = torch.cat([x_out, fake_uncond]) # we skipped uncond denoising, so we put cond-denoised image to where the uncond-denoised image should be + + denoised_params = CFGDenoisedParams(x_out, state.sampling_step, state.sampling_steps, self.inner_model) + cfg_denoised_callback(denoised_params) + + devices.test_for_nans(x_out, "unet") + + if opts.live_preview_content == "Prompt": + sd_samplers_common.store_latent(torch.cat([x_out[i:i+1] for i in denoised_image_indexes])) + elif opts.live_preview_content == "Negative prompt": + sd_samplers_common.store_latent(x_out[-uncond.shape[0]:]) + + if is_edit_model: + denoised = self.combine_denoised_for_edit_model(x_out, cond_scale) + elif skip_uncond: + denoised = self.combine_denoised(x_out, conds_list, uncond, 1.0) + else: + denoised = self.combine_denoised(x_out, conds_list, uncond, cond_scale) + + if self.mask is not None: + denoised = self.init_latent * self.mask + self.nmask * denoised + + after_cfg_callback_params = AfterCFGCallbackParams(denoised, state.sampling_step, state.sampling_steps) + cfg_after_cfg_callback(after_cfg_callback_params) + denoised = after_cfg_callback_params.x + + self.step += 1 + return denoised + + +class TorchHijack: + def __init__(self, sampler_noises): + # Using a deque to efficiently receive the sampler_noises in the same order as the previous index-based + # implementation. + self.sampler_noises = deque(sampler_noises) + + def __getattr__(self, item): + if item == 'randn_like': + return self.randn_like + + if hasattr(torch, item): + return getattr(torch, item) + + raise AttributeError(f"'{type(self).__name__}' object has no attribute '{item}'") + + def randn_like(self, x): + if self.sampler_noises: + noise = self.sampler_noises.popleft() + if noise.shape == x.shape: + return noise + + if opts.randn_source == "CPU" or x.device.type == 'mps': + return torch.randn_like(x, device=devices.cpu).to(x.device) + else: + return torch.randn_like(x) + + +class KDiffusionSampler: + def __init__(self, funcname, sd_model): + denoiser = k_diffusion.external.CompVisVDenoiser if sd_model.parameterization == "v" else k_diffusion.external.CompVisDenoiser + + self.model_wrap = denoiser(sd_model, quantize=shared.opts.enable_quantization) + self.funcname = funcname + self.func = getattr(k_diffusion.sampling, self.funcname) if funcname != "restart_sampler" else restart_sampler + self.extra_params = sampler_extra_params.get(funcname, []) + self.model_wrap_cfg = CFGDenoiser(self.model_wrap) + self.sampler_noises = None + self.stop_at = None + self.eta = None + self.config = None # set by the function calling the constructor + self.last_latent = None + self.s_min_uncond = None + + self.conditioning_key = sd_model.model.conditioning_key + + def callback_state(self, d): + step = d['i'] + latent = d["denoised"] + if opts.live_preview_content == "Combined": + sd_samplers_common.store_latent(latent) + self.last_latent = latent + + if self.stop_at is not None and step > self.stop_at: + raise sd_samplers_common.InterruptedException + + state.sampling_step = step + shared.total_tqdm.update() + + def launch_sampling(self, steps, func): + state.sampling_steps = steps + state.sampling_step = 0 + + try: + return func() + except RecursionError: + print( + 'Encountered RecursionError during sampling, returning last latent. ' + 'rho >5 with a polyexponential scheduler may cause this error. ' + 'You should try to use a smaller rho value instead.' + ) + return self.last_latent + except sd_samplers_common.InterruptedException: + return self.last_latent + + def number_of_needed_noises(self, p): + return p.steps + + def initialize(self, p): + self.model_wrap_cfg.mask = p.mask if hasattr(p, 'mask') else None + self.model_wrap_cfg.nmask = p.nmask if hasattr(p, 'nmask') else None + self.model_wrap_cfg.step = 0 + self.model_wrap_cfg.image_cfg_scale = getattr(p, 'image_cfg_scale', None) + self.eta = p.eta if p.eta is not None else opts.eta_ancestral + self.s_min_uncond = getattr(p, 's_min_uncond', 0.0) + + k_diffusion.sampling.torch = TorchHijack(self.sampler_noises if self.sampler_noises is not None else []) + + extra_params_kwargs = {} + for param_name in self.extra_params: + if hasattr(p, param_name) and param_name in inspect.signature(self.func).parameters: + extra_params_kwargs[param_name] = getattr(p, param_name) + + if 'eta' in inspect.signature(self.func).parameters: + if self.eta != 1.0: + p.extra_generation_params["Eta"] = self.eta + + extra_params_kwargs['eta'] = self.eta + + return extra_params_kwargs + + def get_sigmas(self, p, steps): + discard_next_to_last_sigma = self.config is not None and self.config.options.get('discard_next_to_last_sigma', False) + if opts.always_discard_next_to_last_sigma and not discard_next_to_last_sigma: + discard_next_to_last_sigma = True + p.extra_generation_params["Discard penultimate sigma"] = True + + steps += 1 if discard_next_to_last_sigma else 0 + + if p.sampler_noise_scheduler_override: + sigmas = p.sampler_noise_scheduler_override(steps) + elif opts.k_sched_type != "Automatic": + m_sigma_min, m_sigma_max = (self.model_wrap.sigmas[0].item(), self.model_wrap.sigmas[-1].item()) + sigma_min, sigma_max = (0.1, 10) if opts.use_old_karras_scheduler_sigmas else (m_sigma_min, m_sigma_max) + sigmas_kwargs = { + 'sigma_min': sigma_min, + 'sigma_max': sigma_max, + } + + sigmas_func = k_diffusion_scheduler[opts.k_sched_type] + p.extra_generation_params["Schedule type"] = opts.k_sched_type + + if opts.sigma_min != m_sigma_min and opts.sigma_min != 0: + sigmas_kwargs['sigma_min'] = opts.sigma_min + p.extra_generation_params["Schedule min sigma"] = opts.sigma_min + if opts.sigma_max != m_sigma_max and opts.sigma_max != 0: + sigmas_kwargs['sigma_max'] = opts.sigma_max + p.extra_generation_params["Schedule max sigma"] = opts.sigma_max + + default_rho = 1. if opts.k_sched_type == "polyexponential" else 7. + + if opts.k_sched_type != 'exponential' and opts.rho != 0 and opts.rho != default_rho: + sigmas_kwargs['rho'] = opts.rho + p.extra_generation_params["Schedule rho"] = opts.rho + + sigmas = sigmas_func(n=steps, **sigmas_kwargs, device=shared.device) + elif self.config is not None and self.config.options.get('scheduler', None) == 'karras': + sigma_min, sigma_max = (0.1, 10) if opts.use_old_karras_scheduler_sigmas else (self.model_wrap.sigmas[0].item(), self.model_wrap.sigmas[-1].item()) + + sigmas = k_diffusion.sampling.get_sigmas_karras(n=steps, sigma_min=sigma_min, sigma_max=sigma_max, device=shared.device) + else: + sigmas = self.model_wrap.get_sigmas(steps) + + if discard_next_to_last_sigma: + sigmas = torch.cat([sigmas[:-2], sigmas[-1:]]) + + return sigmas + + def create_noise_sampler(self, x, sigmas, p): + """For DPM++ SDE: manually create noise sampler to enable deterministic results across different batch sizes""" + if shared.opts.no_dpmpp_sde_batch_determinism: + return None + + from k_diffusion.sampling import BrownianTreeNoiseSampler + sigma_min, sigma_max = sigmas[sigmas > 0].min(), sigmas.max() + current_iter_seeds = p.all_seeds[p.iteration * p.batch_size:(p.iteration + 1) * p.batch_size] + return BrownianTreeNoiseSampler(x, sigma_min, sigma_max, seed=current_iter_seeds) + + def sample_img2img(self, p, x, noise, conditioning, unconditional_conditioning, steps=None, image_conditioning=None): + steps, t_enc = sd_samplers_common.setup_img2img_steps(p, steps) + + sigmas = self.get_sigmas(p, steps) + + sigma_sched = sigmas[steps - t_enc - 1:] + xi = x + noise * sigma_sched[0] + + extra_params_kwargs = self.initialize(p) + parameters = inspect.signature(self.func).parameters + + if 'sigma_min' in parameters: + ## last sigma is zero which isn't allowed by DPM Fast & Adaptive so taking value before last + extra_params_kwargs['sigma_min'] = sigma_sched[-2] + if 'sigma_max' in parameters: + extra_params_kwargs['sigma_max'] = sigma_sched[0] + if 'n' in parameters: + extra_params_kwargs['n'] = len(sigma_sched) - 1 + if 'sigma_sched' in parameters: + extra_params_kwargs['sigma_sched'] = sigma_sched + if 'sigmas' in parameters: + extra_params_kwargs['sigmas'] = sigma_sched + + if self.config.options.get('brownian_noise', False): + noise_sampler = self.create_noise_sampler(x, sigmas, p) + extra_params_kwargs['noise_sampler'] = noise_sampler + + self.model_wrap_cfg.init_latent = x + self.last_latent = x + extra_args = { + 'cond': conditioning, + 'image_cond': image_conditioning, + 'uncond': unconditional_conditioning, + 'cond_scale': p.cfg_scale, + 's_min_uncond': self.s_min_uncond + } + + samples = self.launch_sampling(t_enc + 1, lambda: self.func(self.model_wrap_cfg, xi, extra_args=extra_args, disable=False, callback=self.callback_state, **extra_params_kwargs)) + + if self.model_wrap_cfg.padded_cond_uncond: + p.extra_generation_params["Pad conds"] = True + + return samples + + def sample(self, p, x, conditioning, unconditional_conditioning, steps=None, image_conditioning=None): + steps = steps or p.steps + + sigmas = self.get_sigmas(p, steps) + + x = x * sigmas[0] + + extra_params_kwargs = self.initialize(p) + parameters = inspect.signature(self.func).parameters + + if 'sigma_min' in parameters: + extra_params_kwargs['sigma_min'] = self.model_wrap.sigmas[0].item() + extra_params_kwargs['sigma_max'] = self.model_wrap.sigmas[-1].item() + if 'n' in parameters: + extra_params_kwargs['n'] = steps + else: + extra_params_kwargs['sigmas'] = sigmas + + if self.config.options.get('brownian_noise', False): + noise_sampler = self.create_noise_sampler(x, sigmas, p) + extra_params_kwargs['noise_sampler'] = noise_sampler + + self.last_latent = x + samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, x, extra_args={ + 'cond': conditioning, + 'image_cond': image_conditioning, + 'uncond': unconditional_conditioning, + 'cond_scale': p.cfg_scale, + 's_min_uncond': self.s_min_uncond + }, disable=False, callback=self.callback_state, **extra_params_kwargs)) + + if self.model_wrap_cfg.padded_cond_uncond: + p.extra_generation_params["Pad conds"] = True + + return samples + diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py deleted file mode 100644 index a54673eb..00000000 --- a/modules/sd_samplers_kdiffusion.py +++ /dev/null @@ -1,545 +0,0 @@ -from collections import deque -import torch -import inspect -import k_diffusion.sampling -from modules import prompt_parser, devices, sd_samplers_common - -from modules.shared import opts, state -import modules.shared as shared -from modules.script_callbacks import CFGDenoiserParams, cfg_denoiser_callback -from modules.script_callbacks import CFGDenoisedParams, cfg_denoised_callback -from modules.script_callbacks import AfterCFGCallbackParams, cfg_after_cfg_callback - -samplers_k_diffusion = [ - ('Euler a', 'sample_euler_ancestral', ['k_euler_a', 'k_euler_ancestral'], {"uses_ensd": True}), - ('Euler', 'sample_euler', ['k_euler'], {}), - ('LMS', 'sample_lms', ['k_lms'], {}), - ('Heun', 'sample_heun', ['k_heun'], {"second_order": True}), - ('DPM2', 'sample_dpm_2', ['k_dpm_2'], {'discard_next_to_last_sigma': True}), - ('DPM2 a', 'sample_dpm_2_ancestral', ['k_dpm_2_a'], {'discard_next_to_last_sigma': True, "uses_ensd": True}), - ('DPM++ 2S a', 'sample_dpmpp_2s_ancestral', ['k_dpmpp_2s_a'], {"uses_ensd": True, "second_order": True}), - ('DPM++ 2M', 'sample_dpmpp_2m', ['k_dpmpp_2m'], {}), - ('DPM++ SDE', 'sample_dpmpp_sde', ['k_dpmpp_sde'], {"second_order": True, "brownian_noise": True}), - ('DPM++ 2M SDE', 'sample_dpmpp_2m_sde', ['k_dpmpp_2m_sde_ka'], {"brownian_noise": True}), - ('DPM fast', 'sample_dpm_fast', ['k_dpm_fast'], {"uses_ensd": True}), - ('DPM adaptive', 'sample_dpm_adaptive', ['k_dpm_ad'], {"uses_ensd": True}), - ('LMS Karras', 'sample_lms', ['k_lms_ka'], {'scheduler': 'karras'}), - ('DPM2 Karras', 'sample_dpm_2', ['k_dpm_2_ka'], {'scheduler': 'karras', 'discard_next_to_last_sigma': True, "uses_ensd": True, "second_order": True}), - ('DPM2 a Karras', 'sample_dpm_2_ancestral', ['k_dpm_2_a_ka'], {'scheduler': 'karras', 'discard_next_to_last_sigma': True, "uses_ensd": True, "second_order": True}), - ('DPM++ 2S a Karras', 'sample_dpmpp_2s_ancestral', ['k_dpmpp_2s_a_ka'], {'scheduler': 'karras', "uses_ensd": True, "second_order": True}), - ('DPM++ 2M Karras', 'sample_dpmpp_2m', ['k_dpmpp_2m_ka'], {'scheduler': 'karras'}), - ('DPM++ SDE Karras', 'sample_dpmpp_sde', ['k_dpmpp_sde_ka'], {'scheduler': 'karras', "second_order": True, "brownian_noise": True}), - ('DPM++ 2M SDE Karras', 'sample_dpmpp_2m_sde', ['k_dpmpp_2m_sde_ka'], {'scheduler': 'karras', "brownian_noise": True}), - ('Restart (new)', 'restart_sampler', ['restart'], {'scheduler': 'karras', "second_order": True}), -] - - -@torch.no_grad() -def restart_sampler(model, x, sigmas, extra_args=None, callback=None, disable=None, s_noise=1., restart_list = None): - """Implements restart sampling in Restart Sampling for Improving Generative Processes (2023)""" - '''Restart_list format: {min_sigma: [ restart_steps, restart_times, max_sigma]}''' - '''If restart_list is None: will choose restart_list automatically, otherwise will use the given restart_list''' - from tqdm.auto import trange - extra_args = {} if extra_args is None else extra_args - s_in = x.new_ones([x.shape[0]]) - step_id = 0 - from k_diffusion.sampling import to_d, get_sigmas_karras - def heun_step(x, old_sigma, new_sigma, second_order = True): - nonlocal step_id - denoised = model(x, old_sigma * s_in, **extra_args) - d = to_d(x, old_sigma, denoised) - if callback is not None: - callback({'x': x, 'i': step_id, 'sigma': new_sigma, 'sigma_hat': old_sigma, 'denoised': denoised}) - dt = new_sigma - old_sigma - if new_sigma == 0 or not second_order: - # Euler method - x = x + d * dt - else: - # Heun's method - x_2 = x + d * dt - denoised_2 = model(x_2, new_sigma * s_in, **extra_args) - d_2 = to_d(x_2, new_sigma, denoised_2) - d_prime = (d + d_2) / 2 - x = x + d_prime * dt - step_id += 1 - return x - steps = sigmas.shape[0] - 1 - if restart_list is None: - if steps >= 20: - restart_steps = 9 - restart_times = 1 - if steps >= 36: - restart_steps = steps // 4 - restart_times = 2 - sigmas = get_sigmas_karras(steps - restart_steps * restart_times, sigmas[-2].item(), sigmas[0].item(), device=sigmas.device) - restart_list = {0.1: [restart_steps + 1, restart_times, 2]} - else: - restart_list = dict() - temp_list = dict() - for key, value in restart_list.items(): - temp_list[int(torch.argmin(abs(sigmas - key), dim=0))] = value - restart_list = temp_list - step_list = [] - for i in range(len(sigmas) - 1): - step_list.append((sigmas[i], sigmas[i + 1])) - if i + 1 in restart_list: - restart_steps, restart_times, restart_max = restart_list[i + 1] - min_idx = i + 1 - max_idx = int(torch.argmin(abs(sigmas - restart_max), dim=0)) - if max_idx < min_idx: - sigma_restart = get_sigmas_karras(restart_steps, sigmas[min_idx].item(), sigmas[max_idx].item(), device=sigmas.device)[:-1] - while restart_times > 0: - restart_times -= 1 - step_list.extend([(old_sigma, new_sigma) for (old_sigma, new_sigma) in zip(sigma_restart[:-1], sigma_restart[1:])]) - last_sigma = None - for i in trange(len(step_list), disable=disable): - if last_sigma is None: - last_sigma = step_list[i][0] - elif last_sigma < step_list[i][0]: - x = x + k_diffusion.sampling.torch.randn_like(x) * s_noise * (step_list[i][0] ** 2 - last_sigma ** 2) ** 0.5 - x = heun_step(x, step_list[i][0], step_list[i][1]) - last_sigma = step_list[i][1] - return x - -samplers_data_k_diffusion = [ - sd_samplers_common.SamplerData(label, lambda model, funcname=funcname: KDiffusionSampler(funcname, model), aliases, options) - for label, funcname, aliases, options in samplers_k_diffusion - if (hasattr(k_diffusion.sampling, funcname) or funcname == 'restart_sampler') -] - -sampler_extra_params = { - 'sample_euler': ['s_churn', 's_tmin', 's_tmax', 's_noise'], - 'sample_heun': ['s_churn', 's_tmin', 's_tmax', 's_noise'], - 'sample_dpm_2': ['s_churn', 's_tmin', 's_tmax', 's_noise'], -} - -k_diffusion_samplers_map = {x.name: x for x in samplers_data_k_diffusion} -k_diffusion_scheduler = { - 'Automatic': None, - 'karras': k_diffusion.sampling.get_sigmas_karras, - 'exponential': k_diffusion.sampling.get_sigmas_exponential, - 'polyexponential': k_diffusion.sampling.get_sigmas_polyexponential -} - - -def catenate_conds(conds): - if not isinstance(conds[0], dict): - return torch.cat(conds) - - return {key: torch.cat([x[key] for x in conds]) for key in conds[0].keys()} - - -def subscript_cond(cond, a, b): - if not isinstance(cond, dict): - return cond[a:b] - - return {key: vec[a:b] for key, vec in cond.items()} - - -def pad_cond(tensor, repeats, empty): - if not isinstance(tensor, dict): - return torch.cat([tensor, empty.repeat((tensor.shape[0], repeats, 1))], axis=1) - - tensor['crossattn'] = pad_cond(tensor['crossattn'], repeats, empty) - return tensor - - -class CFGDenoiser(torch.nn.Module): - """ - Classifier free guidance denoiser. A wrapper for stable diffusion model (specifically for unet) - that can take a noisy picture and produce a noise-free picture using two guidances (prompts) - instead of one. Originally, the second prompt is just an empty string, but we use non-empty - negative prompt. - """ - - def __init__(self, model): - super().__init__() - self.inner_model = model - self.mask = None - self.nmask = None - self.init_latent = None - self.step = 0 - self.image_cfg_scale = None - self.padded_cond_uncond = False - - def combine_denoised(self, x_out, conds_list, uncond, cond_scale): - denoised_uncond = x_out[-uncond.shape[0]:] - denoised = torch.clone(denoised_uncond) - - for i, conds in enumerate(conds_list): - for cond_index, weight in conds: - denoised[i] += (x_out[cond_index] - denoised_uncond[i]) * (weight * cond_scale) - - return denoised - - def combine_denoised_for_edit_model(self, x_out, cond_scale): - out_cond, out_img_cond, out_uncond = x_out.chunk(3) - denoised = out_uncond + cond_scale * (out_cond - out_img_cond) + self.image_cfg_scale * (out_img_cond - out_uncond) - - return denoised - - def forward(self, x, sigma, uncond, cond, cond_scale, s_min_uncond, image_cond): - if state.interrupted or state.skipped: - raise sd_samplers_common.InterruptedException - - # at self.image_cfg_scale == 1.0 produced results for edit model are the same as with normal sampling, - # so is_edit_model is set to False to support AND composition. - is_edit_model = shared.sd_model.cond_stage_key == "edit" and self.image_cfg_scale is not None and self.image_cfg_scale != 1.0 - - conds_list, tensor = prompt_parser.reconstruct_multicond_batch(cond, self.step) - uncond = prompt_parser.reconstruct_cond_batch(uncond, self.step) - - assert not is_edit_model or all(len(conds) == 1 for conds in conds_list), "AND is not supported for InstructPix2Pix checkpoint (unless using Image CFG scale = 1.0)" - - batch_size = len(conds_list) - repeats = [len(conds_list[i]) for i in range(batch_size)] - - if shared.sd_model.model.conditioning_key == "crossattn-adm": - image_uncond = torch.zeros_like(image_cond) - make_condition_dict = lambda c_crossattn, c_adm: {"c_crossattn": [c_crossattn], "c_adm": c_adm} - else: - image_uncond = image_cond - if isinstance(uncond, dict): - make_condition_dict = lambda c_crossattn, c_concat: {**c_crossattn, "c_concat": [c_concat]} - else: - make_condition_dict = lambda c_crossattn, c_concat: {"c_crossattn": [c_crossattn], "c_concat": [c_concat]} - - if not is_edit_model: - x_in = torch.cat([torch.stack([x[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [x]) - sigma_in = torch.cat([torch.stack([sigma[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [sigma]) - image_cond_in = torch.cat([torch.stack([image_cond[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [image_uncond]) - else: - x_in = torch.cat([torch.stack([x[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [x] + [x]) - sigma_in = torch.cat([torch.stack([sigma[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [sigma] + [sigma]) - image_cond_in = torch.cat([torch.stack([image_cond[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [image_uncond] + [torch.zeros_like(self.init_latent)]) - - denoiser_params = CFGDenoiserParams(x_in, image_cond_in, sigma_in, state.sampling_step, state.sampling_steps, tensor, uncond) - cfg_denoiser_callback(denoiser_params) - x_in = denoiser_params.x - image_cond_in = denoiser_params.image_cond - sigma_in = denoiser_params.sigma - tensor = denoiser_params.text_cond - uncond = denoiser_params.text_uncond - skip_uncond = False - - # alternating uncond allows for higher thresholds without the quality loss normally expected from raising it - if self.step % 2 and s_min_uncond > 0 and sigma[0] < s_min_uncond and not is_edit_model: - skip_uncond = True - x_in = x_in[:-batch_size] - sigma_in = sigma_in[:-batch_size] - - self.padded_cond_uncond = False - if shared.opts.pad_cond_uncond and tensor.shape[1] != uncond.shape[1]: - empty = shared.sd_model.cond_stage_model_empty_prompt - num_repeats = (tensor.shape[1] - uncond.shape[1]) // empty.shape[1] - - if num_repeats < 0: - tensor = pad_cond(tensor, -num_repeats, empty) - self.padded_cond_uncond = True - elif num_repeats > 0: - uncond = pad_cond(uncond, num_repeats, empty) - self.padded_cond_uncond = True - - if tensor.shape[1] == uncond.shape[1] or skip_uncond: - if is_edit_model: - cond_in = catenate_conds([tensor, uncond, uncond]) - elif skip_uncond: - cond_in = tensor - else: - cond_in = catenate_conds([tensor, uncond]) - - if shared.batch_cond_uncond: - x_out = self.inner_model(x_in, sigma_in, cond=make_condition_dict(cond_in, image_cond_in)) - else: - x_out = torch.zeros_like(x_in) - for batch_offset in range(0, x_out.shape[0], batch_size): - a = batch_offset - b = a + batch_size - x_out[a:b] = self.inner_model(x_in[a:b], sigma_in[a:b], cond=make_condition_dict(subscript_cond(cond_in, a, b), image_cond_in[a:b])) - else: - x_out = torch.zeros_like(x_in) - batch_size = batch_size*2 if shared.batch_cond_uncond else batch_size - for batch_offset in range(0, tensor.shape[0], batch_size): - a = batch_offset - b = min(a + batch_size, tensor.shape[0]) - - if not is_edit_model: - c_crossattn = subscript_cond(tensor, a, b) - else: - c_crossattn = torch.cat([tensor[a:b]], uncond) - - x_out[a:b] = self.inner_model(x_in[a:b], sigma_in[a:b], cond=make_condition_dict(c_crossattn, image_cond_in[a:b])) - - if not skip_uncond: - x_out[-uncond.shape[0]:] = self.inner_model(x_in[-uncond.shape[0]:], sigma_in[-uncond.shape[0]:], cond=make_condition_dict(uncond, image_cond_in[-uncond.shape[0]:])) - - denoised_image_indexes = [x[0][0] for x in conds_list] - if skip_uncond: - fake_uncond = torch.cat([x_out[i:i+1] for i in denoised_image_indexes]) - x_out = torch.cat([x_out, fake_uncond]) # we skipped uncond denoising, so we put cond-denoised image to where the uncond-denoised image should be - - denoised_params = CFGDenoisedParams(x_out, state.sampling_step, state.sampling_steps, self.inner_model) - cfg_denoised_callback(denoised_params) - - devices.test_for_nans(x_out, "unet") - - if opts.live_preview_content == "Prompt": - sd_samplers_common.store_latent(torch.cat([x_out[i:i+1] for i in denoised_image_indexes])) - elif opts.live_preview_content == "Negative prompt": - sd_samplers_common.store_latent(x_out[-uncond.shape[0]:]) - - if is_edit_model: - denoised = self.combine_denoised_for_edit_model(x_out, cond_scale) - elif skip_uncond: - denoised = self.combine_denoised(x_out, conds_list, uncond, 1.0) - else: - denoised = self.combine_denoised(x_out, conds_list, uncond, cond_scale) - - if self.mask is not None: - denoised = self.init_latent * self.mask + self.nmask * denoised - - after_cfg_callback_params = AfterCFGCallbackParams(denoised, state.sampling_step, state.sampling_steps) - cfg_after_cfg_callback(after_cfg_callback_params) - denoised = after_cfg_callback_params.x - - self.step += 1 - return denoised - - -class TorchHijack: - def __init__(self, sampler_noises): - # Using a deque to efficiently receive the sampler_noises in the same order as the previous index-based - # implementation. - self.sampler_noises = deque(sampler_noises) - - def __getattr__(self, item): - if item == 'randn_like': - return self.randn_like - - if hasattr(torch, item): - return getattr(torch, item) - - raise AttributeError(f"'{type(self).__name__}' object has no attribute '{item}'") - - def randn_like(self, x): - if self.sampler_noises: - noise = self.sampler_noises.popleft() - if noise.shape == x.shape: - return noise - - if opts.randn_source == "CPU" or x.device.type == 'mps': - return torch.randn_like(x, device=devices.cpu).to(x.device) - else: - return torch.randn_like(x) - - -class KDiffusionSampler: - def __init__(self, funcname, sd_model): - denoiser = k_diffusion.external.CompVisVDenoiser if sd_model.parameterization == "v" else k_diffusion.external.CompVisDenoiser - - self.model_wrap = denoiser(sd_model, quantize=shared.opts.enable_quantization) - self.funcname = funcname - self.func = getattr(k_diffusion.sampling, self.funcname) if funcname != "restart_sampler" else restart_sampler - self.extra_params = sampler_extra_params.get(funcname, []) - self.model_wrap_cfg = CFGDenoiser(self.model_wrap) - self.sampler_noises = None - self.stop_at = None - self.eta = None - self.config = None # set by the function calling the constructor - self.last_latent = None - self.s_min_uncond = None - - self.conditioning_key = sd_model.model.conditioning_key - - def callback_state(self, d): - step = d['i'] - latent = d["denoised"] - if opts.live_preview_content == "Combined": - sd_samplers_common.store_latent(latent) - self.last_latent = latent - - if self.stop_at is not None and step > self.stop_at: - raise sd_samplers_common.InterruptedException - - state.sampling_step = step - shared.total_tqdm.update() - - def launch_sampling(self, steps, func): - state.sampling_steps = steps - state.sampling_step = 0 - - try: - return func() - except RecursionError: - print( - 'Encountered RecursionError during sampling, returning last latent. ' - 'rho >5 with a polyexponential scheduler may cause this error. ' - 'You should try to use a smaller rho value instead.' - ) - return self.last_latent - except sd_samplers_common.InterruptedException: - return self.last_latent - - def number_of_needed_noises(self, p): - return p.steps - - def initialize(self, p): - self.model_wrap_cfg.mask = p.mask if hasattr(p, 'mask') else None - self.model_wrap_cfg.nmask = p.nmask if hasattr(p, 'nmask') else None - self.model_wrap_cfg.step = 0 - self.model_wrap_cfg.image_cfg_scale = getattr(p, 'image_cfg_scale', None) - self.eta = p.eta if p.eta is not None else opts.eta_ancestral - self.s_min_uncond = getattr(p, 's_min_uncond', 0.0) - - k_diffusion.sampling.torch = TorchHijack(self.sampler_noises if self.sampler_noises is not None else []) - - extra_params_kwargs = {} - for param_name in self.extra_params: - if hasattr(p, param_name) and param_name in inspect.signature(self.func).parameters: - extra_params_kwargs[param_name] = getattr(p, param_name) - - if 'eta' in inspect.signature(self.func).parameters: - if self.eta != 1.0: - p.extra_generation_params["Eta"] = self.eta - - extra_params_kwargs['eta'] = self.eta - - return extra_params_kwargs - - def get_sigmas(self, p, steps): - discard_next_to_last_sigma = self.config is not None and self.config.options.get('discard_next_to_last_sigma', False) - if opts.always_discard_next_to_last_sigma and not discard_next_to_last_sigma: - discard_next_to_last_sigma = True - p.extra_generation_params["Discard penultimate sigma"] = True - - steps += 1 if discard_next_to_last_sigma else 0 - - if p.sampler_noise_scheduler_override: - sigmas = p.sampler_noise_scheduler_override(steps) - elif opts.k_sched_type != "Automatic": - m_sigma_min, m_sigma_max = (self.model_wrap.sigmas[0].item(), self.model_wrap.sigmas[-1].item()) - sigma_min, sigma_max = (0.1, 10) if opts.use_old_karras_scheduler_sigmas else (m_sigma_min, m_sigma_max) - sigmas_kwargs = { - 'sigma_min': sigma_min, - 'sigma_max': sigma_max, - } - - sigmas_func = k_diffusion_scheduler[opts.k_sched_type] - p.extra_generation_params["Schedule type"] = opts.k_sched_type - - if opts.sigma_min != m_sigma_min and opts.sigma_min != 0: - sigmas_kwargs['sigma_min'] = opts.sigma_min - p.extra_generation_params["Schedule min sigma"] = opts.sigma_min - if opts.sigma_max != m_sigma_max and opts.sigma_max != 0: - sigmas_kwargs['sigma_max'] = opts.sigma_max - p.extra_generation_params["Schedule max sigma"] = opts.sigma_max - - default_rho = 1. if opts.k_sched_type == "polyexponential" else 7. - - if opts.k_sched_type != 'exponential' and opts.rho != 0 and opts.rho != default_rho: - sigmas_kwargs['rho'] = opts.rho - p.extra_generation_params["Schedule rho"] = opts.rho - - sigmas = sigmas_func(n=steps, **sigmas_kwargs, device=shared.device) - elif self.config is not None and self.config.options.get('scheduler', None) == 'karras': - sigma_min, sigma_max = (0.1, 10) if opts.use_old_karras_scheduler_sigmas else (self.model_wrap.sigmas[0].item(), self.model_wrap.sigmas[-1].item()) - - sigmas = k_diffusion.sampling.get_sigmas_karras(n=steps, sigma_min=sigma_min, sigma_max=sigma_max, device=shared.device) - else: - sigmas = self.model_wrap.get_sigmas(steps) - - if discard_next_to_last_sigma: - sigmas = torch.cat([sigmas[:-2], sigmas[-1:]]) - - return sigmas - - def create_noise_sampler(self, x, sigmas, p): - """For DPM++ SDE: manually create noise sampler to enable deterministic results across different batch sizes""" - if shared.opts.no_dpmpp_sde_batch_determinism: - return None - - from k_diffusion.sampling import BrownianTreeNoiseSampler - sigma_min, sigma_max = sigmas[sigmas > 0].min(), sigmas.max() - current_iter_seeds = p.all_seeds[p.iteration * p.batch_size:(p.iteration + 1) * p.batch_size] - return BrownianTreeNoiseSampler(x, sigma_min, sigma_max, seed=current_iter_seeds) - - def sample_img2img(self, p, x, noise, conditioning, unconditional_conditioning, steps=None, image_conditioning=None): - steps, t_enc = sd_samplers_common.setup_img2img_steps(p, steps) - - sigmas = self.get_sigmas(p, steps) - - sigma_sched = sigmas[steps - t_enc - 1:] - xi = x + noise * sigma_sched[0] - - extra_params_kwargs = self.initialize(p) - parameters = inspect.signature(self.func).parameters - - if 'sigma_min' in parameters: - ## last sigma is zero which isn't allowed by DPM Fast & Adaptive so taking value before last - extra_params_kwargs['sigma_min'] = sigma_sched[-2] - if 'sigma_max' in parameters: - extra_params_kwargs['sigma_max'] = sigma_sched[0] - if 'n' in parameters: - extra_params_kwargs['n'] = len(sigma_sched) - 1 - if 'sigma_sched' in parameters: - extra_params_kwargs['sigma_sched'] = sigma_sched - if 'sigmas' in parameters: - extra_params_kwargs['sigmas'] = sigma_sched - - if self.config.options.get('brownian_noise', False): - noise_sampler = self.create_noise_sampler(x, sigmas, p) - extra_params_kwargs['noise_sampler'] = noise_sampler - - self.model_wrap_cfg.init_latent = x - self.last_latent = x - extra_args = { - 'cond': conditioning, - 'image_cond': image_conditioning, - 'uncond': unconditional_conditioning, - 'cond_scale': p.cfg_scale, - 's_min_uncond': self.s_min_uncond - } - - samples = self.launch_sampling(t_enc + 1, lambda: self.func(self.model_wrap_cfg, xi, extra_args=extra_args, disable=False, callback=self.callback_state, **extra_params_kwargs)) - - if self.model_wrap_cfg.padded_cond_uncond: - p.extra_generation_params["Pad conds"] = True - - return samples - - def sample(self, p, x, conditioning, unconditional_conditioning, steps=None, image_conditioning=None): - steps = steps or p.steps - - sigmas = self.get_sigmas(p, steps) - - x = x * sigmas[0] - - extra_params_kwargs = self.initialize(p) - parameters = inspect.signature(self.func).parameters - - if 'sigma_min' in parameters: - extra_params_kwargs['sigma_min'] = self.model_wrap.sigmas[0].item() - extra_params_kwargs['sigma_max'] = self.model_wrap.sigmas[-1].item() - if 'n' in parameters: - extra_params_kwargs['n'] = steps - else: - extra_params_kwargs['sigmas'] = sigmas - - if self.config.options.get('brownian_noise', False): - noise_sampler = self.create_noise_sampler(x, sigmas, p) - extra_params_kwargs['noise_sampler'] = noise_sampler - - self.last_latent = x - samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, x, extra_args={ - 'cond': conditioning, - 'image_cond': image_conditioning, - 'uncond': unconditional_conditioning, - 'cond_scale': p.cfg_scale, - 's_min_uncond': self.s_min_uncond - }, disable=False, callback=self.callback_state, **extra_params_kwargs)) - - if self.model_wrap_cfg.padded_cond_uncond: - p.extra_generation_params["Pad conds"] = True - - return samples - -- cgit v1.2.1 From e1323fc1b70438165bbfd90d80dec00b8b2ab884 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sat, 29 Jul 2023 08:06:03 +0300 Subject: Split history: mv modules/sd_samplers_kdiffusion.py temp --- modules/sd_samplers_kdiffusion.py | 545 -------------------------------------- temp | 545 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 545 insertions(+), 545 deletions(-) delete mode 100644 modules/sd_samplers_kdiffusion.py create mode 100644 temp diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py deleted file mode 100644 index a54673eb..00000000 --- a/modules/sd_samplers_kdiffusion.py +++ /dev/null @@ -1,545 +0,0 @@ -from collections import deque -import torch -import inspect -import k_diffusion.sampling -from modules import prompt_parser, devices, sd_samplers_common - -from modules.shared import opts, state -import modules.shared as shared -from modules.script_callbacks import CFGDenoiserParams, cfg_denoiser_callback -from modules.script_callbacks import CFGDenoisedParams, cfg_denoised_callback -from modules.script_callbacks import AfterCFGCallbackParams, cfg_after_cfg_callback - -samplers_k_diffusion = [ - ('Euler a', 'sample_euler_ancestral', ['k_euler_a', 'k_euler_ancestral'], {"uses_ensd": True}), - ('Euler', 'sample_euler', ['k_euler'], {}), - ('LMS', 'sample_lms', ['k_lms'], {}), - ('Heun', 'sample_heun', ['k_heun'], {"second_order": True}), - ('DPM2', 'sample_dpm_2', ['k_dpm_2'], {'discard_next_to_last_sigma': True}), - ('DPM2 a', 'sample_dpm_2_ancestral', ['k_dpm_2_a'], {'discard_next_to_last_sigma': True, "uses_ensd": True}), - ('DPM++ 2S a', 'sample_dpmpp_2s_ancestral', ['k_dpmpp_2s_a'], {"uses_ensd": True, "second_order": True}), - ('DPM++ 2M', 'sample_dpmpp_2m', ['k_dpmpp_2m'], {}), - ('DPM++ SDE', 'sample_dpmpp_sde', ['k_dpmpp_sde'], {"second_order": True, "brownian_noise": True}), - ('DPM++ 2M SDE', 'sample_dpmpp_2m_sde', ['k_dpmpp_2m_sde_ka'], {"brownian_noise": True}), - ('DPM fast', 'sample_dpm_fast', ['k_dpm_fast'], {"uses_ensd": True}), - ('DPM adaptive', 'sample_dpm_adaptive', ['k_dpm_ad'], {"uses_ensd": True}), - ('LMS Karras', 'sample_lms', ['k_lms_ka'], {'scheduler': 'karras'}), - ('DPM2 Karras', 'sample_dpm_2', ['k_dpm_2_ka'], {'scheduler': 'karras', 'discard_next_to_last_sigma': True, "uses_ensd": True, "second_order": True}), - ('DPM2 a Karras', 'sample_dpm_2_ancestral', ['k_dpm_2_a_ka'], {'scheduler': 'karras', 'discard_next_to_last_sigma': True, "uses_ensd": True, "second_order": True}), - ('DPM++ 2S a Karras', 'sample_dpmpp_2s_ancestral', ['k_dpmpp_2s_a_ka'], {'scheduler': 'karras', "uses_ensd": True, "second_order": True}), - ('DPM++ 2M Karras', 'sample_dpmpp_2m', ['k_dpmpp_2m_ka'], {'scheduler': 'karras'}), - ('DPM++ SDE Karras', 'sample_dpmpp_sde', ['k_dpmpp_sde_ka'], {'scheduler': 'karras', "second_order": True, "brownian_noise": True}), - ('DPM++ 2M SDE Karras', 'sample_dpmpp_2m_sde', ['k_dpmpp_2m_sde_ka'], {'scheduler': 'karras', "brownian_noise": True}), - ('Restart (new)', 'restart_sampler', ['restart'], {'scheduler': 'karras', "second_order": True}), -] - - -@torch.no_grad() -def restart_sampler(model, x, sigmas, extra_args=None, callback=None, disable=None, s_noise=1., restart_list = None): - """Implements restart sampling in Restart Sampling for Improving Generative Processes (2023)""" - '''Restart_list format: {min_sigma: [ restart_steps, restart_times, max_sigma]}''' - '''If restart_list is None: will choose restart_list automatically, otherwise will use the given restart_list''' - from tqdm.auto import trange - extra_args = {} if extra_args is None else extra_args - s_in = x.new_ones([x.shape[0]]) - step_id = 0 - from k_diffusion.sampling import to_d, get_sigmas_karras - def heun_step(x, old_sigma, new_sigma, second_order = True): - nonlocal step_id - denoised = model(x, old_sigma * s_in, **extra_args) - d = to_d(x, old_sigma, denoised) - if callback is not None: - callback({'x': x, 'i': step_id, 'sigma': new_sigma, 'sigma_hat': old_sigma, 'denoised': denoised}) - dt = new_sigma - old_sigma - if new_sigma == 0 or not second_order: - # Euler method - x = x + d * dt - else: - # Heun's method - x_2 = x + d * dt - denoised_2 = model(x_2, new_sigma * s_in, **extra_args) - d_2 = to_d(x_2, new_sigma, denoised_2) - d_prime = (d + d_2) / 2 - x = x + d_prime * dt - step_id += 1 - return x - steps = sigmas.shape[0] - 1 - if restart_list is None: - if steps >= 20: - restart_steps = 9 - restart_times = 1 - if steps >= 36: - restart_steps = steps // 4 - restart_times = 2 - sigmas = get_sigmas_karras(steps - restart_steps * restart_times, sigmas[-2].item(), sigmas[0].item(), device=sigmas.device) - restart_list = {0.1: [restart_steps + 1, restart_times, 2]} - else: - restart_list = dict() - temp_list = dict() - for key, value in restart_list.items(): - temp_list[int(torch.argmin(abs(sigmas - key), dim=0))] = value - restart_list = temp_list - step_list = [] - for i in range(len(sigmas) - 1): - step_list.append((sigmas[i], sigmas[i + 1])) - if i + 1 in restart_list: - restart_steps, restart_times, restart_max = restart_list[i + 1] - min_idx = i + 1 - max_idx = int(torch.argmin(abs(sigmas - restart_max), dim=0)) - if max_idx < min_idx: - sigma_restart = get_sigmas_karras(restart_steps, sigmas[min_idx].item(), sigmas[max_idx].item(), device=sigmas.device)[:-1] - while restart_times > 0: - restart_times -= 1 - step_list.extend([(old_sigma, new_sigma) for (old_sigma, new_sigma) in zip(sigma_restart[:-1], sigma_restart[1:])]) - last_sigma = None - for i in trange(len(step_list), disable=disable): - if last_sigma is None: - last_sigma = step_list[i][0] - elif last_sigma < step_list[i][0]: - x = x + k_diffusion.sampling.torch.randn_like(x) * s_noise * (step_list[i][0] ** 2 - last_sigma ** 2) ** 0.5 - x = heun_step(x, step_list[i][0], step_list[i][1]) - last_sigma = step_list[i][1] - return x - -samplers_data_k_diffusion = [ - sd_samplers_common.SamplerData(label, lambda model, funcname=funcname: KDiffusionSampler(funcname, model), aliases, options) - for label, funcname, aliases, options in samplers_k_diffusion - if (hasattr(k_diffusion.sampling, funcname) or funcname == 'restart_sampler') -] - -sampler_extra_params = { - 'sample_euler': ['s_churn', 's_tmin', 's_tmax', 's_noise'], - 'sample_heun': ['s_churn', 's_tmin', 's_tmax', 's_noise'], - 'sample_dpm_2': ['s_churn', 's_tmin', 's_tmax', 's_noise'], -} - -k_diffusion_samplers_map = {x.name: x for x in samplers_data_k_diffusion} -k_diffusion_scheduler = { - 'Automatic': None, - 'karras': k_diffusion.sampling.get_sigmas_karras, - 'exponential': k_diffusion.sampling.get_sigmas_exponential, - 'polyexponential': k_diffusion.sampling.get_sigmas_polyexponential -} - - -def catenate_conds(conds): - if not isinstance(conds[0], dict): - return torch.cat(conds) - - return {key: torch.cat([x[key] for x in conds]) for key in conds[0].keys()} - - -def subscript_cond(cond, a, b): - if not isinstance(cond, dict): - return cond[a:b] - - return {key: vec[a:b] for key, vec in cond.items()} - - -def pad_cond(tensor, repeats, empty): - if not isinstance(tensor, dict): - return torch.cat([tensor, empty.repeat((tensor.shape[0], repeats, 1))], axis=1) - - tensor['crossattn'] = pad_cond(tensor['crossattn'], repeats, empty) - return tensor - - -class CFGDenoiser(torch.nn.Module): - """ - Classifier free guidance denoiser. A wrapper for stable diffusion model (specifically for unet) - that can take a noisy picture and produce a noise-free picture using two guidances (prompts) - instead of one. Originally, the second prompt is just an empty string, but we use non-empty - negative prompt. - """ - - def __init__(self, model): - super().__init__() - self.inner_model = model - self.mask = None - self.nmask = None - self.init_latent = None - self.step = 0 - self.image_cfg_scale = None - self.padded_cond_uncond = False - - def combine_denoised(self, x_out, conds_list, uncond, cond_scale): - denoised_uncond = x_out[-uncond.shape[0]:] - denoised = torch.clone(denoised_uncond) - - for i, conds in enumerate(conds_list): - for cond_index, weight in conds: - denoised[i] += (x_out[cond_index] - denoised_uncond[i]) * (weight * cond_scale) - - return denoised - - def combine_denoised_for_edit_model(self, x_out, cond_scale): - out_cond, out_img_cond, out_uncond = x_out.chunk(3) - denoised = out_uncond + cond_scale * (out_cond - out_img_cond) + self.image_cfg_scale * (out_img_cond - out_uncond) - - return denoised - - def forward(self, x, sigma, uncond, cond, cond_scale, s_min_uncond, image_cond): - if state.interrupted or state.skipped: - raise sd_samplers_common.InterruptedException - - # at self.image_cfg_scale == 1.0 produced results for edit model are the same as with normal sampling, - # so is_edit_model is set to False to support AND composition. - is_edit_model = shared.sd_model.cond_stage_key == "edit" and self.image_cfg_scale is not None and self.image_cfg_scale != 1.0 - - conds_list, tensor = prompt_parser.reconstruct_multicond_batch(cond, self.step) - uncond = prompt_parser.reconstruct_cond_batch(uncond, self.step) - - assert not is_edit_model or all(len(conds) == 1 for conds in conds_list), "AND is not supported for InstructPix2Pix checkpoint (unless using Image CFG scale = 1.0)" - - batch_size = len(conds_list) - repeats = [len(conds_list[i]) for i in range(batch_size)] - - if shared.sd_model.model.conditioning_key == "crossattn-adm": - image_uncond = torch.zeros_like(image_cond) - make_condition_dict = lambda c_crossattn, c_adm: {"c_crossattn": [c_crossattn], "c_adm": c_adm} - else: - image_uncond = image_cond - if isinstance(uncond, dict): - make_condition_dict = lambda c_crossattn, c_concat: {**c_crossattn, "c_concat": [c_concat]} - else: - make_condition_dict = lambda c_crossattn, c_concat: {"c_crossattn": [c_crossattn], "c_concat": [c_concat]} - - if not is_edit_model: - x_in = torch.cat([torch.stack([x[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [x]) - sigma_in = torch.cat([torch.stack([sigma[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [sigma]) - image_cond_in = torch.cat([torch.stack([image_cond[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [image_uncond]) - else: - x_in = torch.cat([torch.stack([x[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [x] + [x]) - sigma_in = torch.cat([torch.stack([sigma[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [sigma] + [sigma]) - image_cond_in = torch.cat([torch.stack([image_cond[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [image_uncond] + [torch.zeros_like(self.init_latent)]) - - denoiser_params = CFGDenoiserParams(x_in, image_cond_in, sigma_in, state.sampling_step, state.sampling_steps, tensor, uncond) - cfg_denoiser_callback(denoiser_params) - x_in = denoiser_params.x - image_cond_in = denoiser_params.image_cond - sigma_in = denoiser_params.sigma - tensor = denoiser_params.text_cond - uncond = denoiser_params.text_uncond - skip_uncond = False - - # alternating uncond allows for higher thresholds without the quality loss normally expected from raising it - if self.step % 2 and s_min_uncond > 0 and sigma[0] < s_min_uncond and not is_edit_model: - skip_uncond = True - x_in = x_in[:-batch_size] - sigma_in = sigma_in[:-batch_size] - - self.padded_cond_uncond = False - if shared.opts.pad_cond_uncond and tensor.shape[1] != uncond.shape[1]: - empty = shared.sd_model.cond_stage_model_empty_prompt - num_repeats = (tensor.shape[1] - uncond.shape[1]) // empty.shape[1] - - if num_repeats < 0: - tensor = pad_cond(tensor, -num_repeats, empty) - self.padded_cond_uncond = True - elif num_repeats > 0: - uncond = pad_cond(uncond, num_repeats, empty) - self.padded_cond_uncond = True - - if tensor.shape[1] == uncond.shape[1] or skip_uncond: - if is_edit_model: - cond_in = catenate_conds([tensor, uncond, uncond]) - elif skip_uncond: - cond_in = tensor - else: - cond_in = catenate_conds([tensor, uncond]) - - if shared.batch_cond_uncond: - x_out = self.inner_model(x_in, sigma_in, cond=make_condition_dict(cond_in, image_cond_in)) - else: - x_out = torch.zeros_like(x_in) - for batch_offset in range(0, x_out.shape[0], batch_size): - a = batch_offset - b = a + batch_size - x_out[a:b] = self.inner_model(x_in[a:b], sigma_in[a:b], cond=make_condition_dict(subscript_cond(cond_in, a, b), image_cond_in[a:b])) - else: - x_out = torch.zeros_like(x_in) - batch_size = batch_size*2 if shared.batch_cond_uncond else batch_size - for batch_offset in range(0, tensor.shape[0], batch_size): - a = batch_offset - b = min(a + batch_size, tensor.shape[0]) - - if not is_edit_model: - c_crossattn = subscript_cond(tensor, a, b) - else: - c_crossattn = torch.cat([tensor[a:b]], uncond) - - x_out[a:b] = self.inner_model(x_in[a:b], sigma_in[a:b], cond=make_condition_dict(c_crossattn, image_cond_in[a:b])) - - if not skip_uncond: - x_out[-uncond.shape[0]:] = self.inner_model(x_in[-uncond.shape[0]:], sigma_in[-uncond.shape[0]:], cond=make_condition_dict(uncond, image_cond_in[-uncond.shape[0]:])) - - denoised_image_indexes = [x[0][0] for x in conds_list] - if skip_uncond: - fake_uncond = torch.cat([x_out[i:i+1] for i in denoised_image_indexes]) - x_out = torch.cat([x_out, fake_uncond]) # we skipped uncond denoising, so we put cond-denoised image to where the uncond-denoised image should be - - denoised_params = CFGDenoisedParams(x_out, state.sampling_step, state.sampling_steps, self.inner_model) - cfg_denoised_callback(denoised_params) - - devices.test_for_nans(x_out, "unet") - - if opts.live_preview_content == "Prompt": - sd_samplers_common.store_latent(torch.cat([x_out[i:i+1] for i in denoised_image_indexes])) - elif opts.live_preview_content == "Negative prompt": - sd_samplers_common.store_latent(x_out[-uncond.shape[0]:]) - - if is_edit_model: - denoised = self.combine_denoised_for_edit_model(x_out, cond_scale) - elif skip_uncond: - denoised = self.combine_denoised(x_out, conds_list, uncond, 1.0) - else: - denoised = self.combine_denoised(x_out, conds_list, uncond, cond_scale) - - if self.mask is not None: - denoised = self.init_latent * self.mask + self.nmask * denoised - - after_cfg_callback_params = AfterCFGCallbackParams(denoised, state.sampling_step, state.sampling_steps) - cfg_after_cfg_callback(after_cfg_callback_params) - denoised = after_cfg_callback_params.x - - self.step += 1 - return denoised - - -class TorchHijack: - def __init__(self, sampler_noises): - # Using a deque to efficiently receive the sampler_noises in the same order as the previous index-based - # implementation. - self.sampler_noises = deque(sampler_noises) - - def __getattr__(self, item): - if item == 'randn_like': - return self.randn_like - - if hasattr(torch, item): - return getattr(torch, item) - - raise AttributeError(f"'{type(self).__name__}' object has no attribute '{item}'") - - def randn_like(self, x): - if self.sampler_noises: - noise = self.sampler_noises.popleft() - if noise.shape == x.shape: - return noise - - if opts.randn_source == "CPU" or x.device.type == 'mps': - return torch.randn_like(x, device=devices.cpu).to(x.device) - else: - return torch.randn_like(x) - - -class KDiffusionSampler: - def __init__(self, funcname, sd_model): - denoiser = k_diffusion.external.CompVisVDenoiser if sd_model.parameterization == "v" else k_diffusion.external.CompVisDenoiser - - self.model_wrap = denoiser(sd_model, quantize=shared.opts.enable_quantization) - self.funcname = funcname - self.func = getattr(k_diffusion.sampling, self.funcname) if funcname != "restart_sampler" else restart_sampler - self.extra_params = sampler_extra_params.get(funcname, []) - self.model_wrap_cfg = CFGDenoiser(self.model_wrap) - self.sampler_noises = None - self.stop_at = None - self.eta = None - self.config = None # set by the function calling the constructor - self.last_latent = None - self.s_min_uncond = None - - self.conditioning_key = sd_model.model.conditioning_key - - def callback_state(self, d): - step = d['i'] - latent = d["denoised"] - if opts.live_preview_content == "Combined": - sd_samplers_common.store_latent(latent) - self.last_latent = latent - - if self.stop_at is not None and step > self.stop_at: - raise sd_samplers_common.InterruptedException - - state.sampling_step = step - shared.total_tqdm.update() - - def launch_sampling(self, steps, func): - state.sampling_steps = steps - state.sampling_step = 0 - - try: - return func() - except RecursionError: - print( - 'Encountered RecursionError during sampling, returning last latent. ' - 'rho >5 with a polyexponential scheduler may cause this error. ' - 'You should try to use a smaller rho value instead.' - ) - return self.last_latent - except sd_samplers_common.InterruptedException: - return self.last_latent - - def number_of_needed_noises(self, p): - return p.steps - - def initialize(self, p): - self.model_wrap_cfg.mask = p.mask if hasattr(p, 'mask') else None - self.model_wrap_cfg.nmask = p.nmask if hasattr(p, 'nmask') else None - self.model_wrap_cfg.step = 0 - self.model_wrap_cfg.image_cfg_scale = getattr(p, 'image_cfg_scale', None) - self.eta = p.eta if p.eta is not None else opts.eta_ancestral - self.s_min_uncond = getattr(p, 's_min_uncond', 0.0) - - k_diffusion.sampling.torch = TorchHijack(self.sampler_noises if self.sampler_noises is not None else []) - - extra_params_kwargs = {} - for param_name in self.extra_params: - if hasattr(p, param_name) and param_name in inspect.signature(self.func).parameters: - extra_params_kwargs[param_name] = getattr(p, param_name) - - if 'eta' in inspect.signature(self.func).parameters: - if self.eta != 1.0: - p.extra_generation_params["Eta"] = self.eta - - extra_params_kwargs['eta'] = self.eta - - return extra_params_kwargs - - def get_sigmas(self, p, steps): - discard_next_to_last_sigma = self.config is not None and self.config.options.get('discard_next_to_last_sigma', False) - if opts.always_discard_next_to_last_sigma and not discard_next_to_last_sigma: - discard_next_to_last_sigma = True - p.extra_generation_params["Discard penultimate sigma"] = True - - steps += 1 if discard_next_to_last_sigma else 0 - - if p.sampler_noise_scheduler_override: - sigmas = p.sampler_noise_scheduler_override(steps) - elif opts.k_sched_type != "Automatic": - m_sigma_min, m_sigma_max = (self.model_wrap.sigmas[0].item(), self.model_wrap.sigmas[-1].item()) - sigma_min, sigma_max = (0.1, 10) if opts.use_old_karras_scheduler_sigmas else (m_sigma_min, m_sigma_max) - sigmas_kwargs = { - 'sigma_min': sigma_min, - 'sigma_max': sigma_max, - } - - sigmas_func = k_diffusion_scheduler[opts.k_sched_type] - p.extra_generation_params["Schedule type"] = opts.k_sched_type - - if opts.sigma_min != m_sigma_min and opts.sigma_min != 0: - sigmas_kwargs['sigma_min'] = opts.sigma_min - p.extra_generation_params["Schedule min sigma"] = opts.sigma_min - if opts.sigma_max != m_sigma_max and opts.sigma_max != 0: - sigmas_kwargs['sigma_max'] = opts.sigma_max - p.extra_generation_params["Schedule max sigma"] = opts.sigma_max - - default_rho = 1. if opts.k_sched_type == "polyexponential" else 7. - - if opts.k_sched_type != 'exponential' and opts.rho != 0 and opts.rho != default_rho: - sigmas_kwargs['rho'] = opts.rho - p.extra_generation_params["Schedule rho"] = opts.rho - - sigmas = sigmas_func(n=steps, **sigmas_kwargs, device=shared.device) - elif self.config is not None and self.config.options.get('scheduler', None) == 'karras': - sigma_min, sigma_max = (0.1, 10) if opts.use_old_karras_scheduler_sigmas else (self.model_wrap.sigmas[0].item(), self.model_wrap.sigmas[-1].item()) - - sigmas = k_diffusion.sampling.get_sigmas_karras(n=steps, sigma_min=sigma_min, sigma_max=sigma_max, device=shared.device) - else: - sigmas = self.model_wrap.get_sigmas(steps) - - if discard_next_to_last_sigma: - sigmas = torch.cat([sigmas[:-2], sigmas[-1:]]) - - return sigmas - - def create_noise_sampler(self, x, sigmas, p): - """For DPM++ SDE: manually create noise sampler to enable deterministic results across different batch sizes""" - if shared.opts.no_dpmpp_sde_batch_determinism: - return None - - from k_diffusion.sampling import BrownianTreeNoiseSampler - sigma_min, sigma_max = sigmas[sigmas > 0].min(), sigmas.max() - current_iter_seeds = p.all_seeds[p.iteration * p.batch_size:(p.iteration + 1) * p.batch_size] - return BrownianTreeNoiseSampler(x, sigma_min, sigma_max, seed=current_iter_seeds) - - def sample_img2img(self, p, x, noise, conditioning, unconditional_conditioning, steps=None, image_conditioning=None): - steps, t_enc = sd_samplers_common.setup_img2img_steps(p, steps) - - sigmas = self.get_sigmas(p, steps) - - sigma_sched = sigmas[steps - t_enc - 1:] - xi = x + noise * sigma_sched[0] - - extra_params_kwargs = self.initialize(p) - parameters = inspect.signature(self.func).parameters - - if 'sigma_min' in parameters: - ## last sigma is zero which isn't allowed by DPM Fast & Adaptive so taking value before last - extra_params_kwargs['sigma_min'] = sigma_sched[-2] - if 'sigma_max' in parameters: - extra_params_kwargs['sigma_max'] = sigma_sched[0] - if 'n' in parameters: - extra_params_kwargs['n'] = len(sigma_sched) - 1 - if 'sigma_sched' in parameters: - extra_params_kwargs['sigma_sched'] = sigma_sched - if 'sigmas' in parameters: - extra_params_kwargs['sigmas'] = sigma_sched - - if self.config.options.get('brownian_noise', False): - noise_sampler = self.create_noise_sampler(x, sigmas, p) - extra_params_kwargs['noise_sampler'] = noise_sampler - - self.model_wrap_cfg.init_latent = x - self.last_latent = x - extra_args = { - 'cond': conditioning, - 'image_cond': image_conditioning, - 'uncond': unconditional_conditioning, - 'cond_scale': p.cfg_scale, - 's_min_uncond': self.s_min_uncond - } - - samples = self.launch_sampling(t_enc + 1, lambda: self.func(self.model_wrap_cfg, xi, extra_args=extra_args, disable=False, callback=self.callback_state, **extra_params_kwargs)) - - if self.model_wrap_cfg.padded_cond_uncond: - p.extra_generation_params["Pad conds"] = True - - return samples - - def sample(self, p, x, conditioning, unconditional_conditioning, steps=None, image_conditioning=None): - steps = steps or p.steps - - sigmas = self.get_sigmas(p, steps) - - x = x * sigmas[0] - - extra_params_kwargs = self.initialize(p) - parameters = inspect.signature(self.func).parameters - - if 'sigma_min' in parameters: - extra_params_kwargs['sigma_min'] = self.model_wrap.sigmas[0].item() - extra_params_kwargs['sigma_max'] = self.model_wrap.sigmas[-1].item() - if 'n' in parameters: - extra_params_kwargs['n'] = steps - else: - extra_params_kwargs['sigmas'] = sigmas - - if self.config.options.get('brownian_noise', False): - noise_sampler = self.create_noise_sampler(x, sigmas, p) - extra_params_kwargs['noise_sampler'] = noise_sampler - - self.last_latent = x - samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, x, extra_args={ - 'cond': conditioning, - 'image_cond': image_conditioning, - 'uncond': unconditional_conditioning, - 'cond_scale': p.cfg_scale, - 's_min_uncond': self.s_min_uncond - }, disable=False, callback=self.callback_state, **extra_params_kwargs)) - - if self.model_wrap_cfg.padded_cond_uncond: - p.extra_generation_params["Pad conds"] = True - - return samples - diff --git a/temp b/temp new file mode 100644 index 00000000..a54673eb --- /dev/null +++ b/temp @@ -0,0 +1,545 @@ +from collections import deque +import torch +import inspect +import k_diffusion.sampling +from modules import prompt_parser, devices, sd_samplers_common + +from modules.shared import opts, state +import modules.shared as shared +from modules.script_callbacks import CFGDenoiserParams, cfg_denoiser_callback +from modules.script_callbacks import CFGDenoisedParams, cfg_denoised_callback +from modules.script_callbacks import AfterCFGCallbackParams, cfg_after_cfg_callback + +samplers_k_diffusion = [ + ('Euler a', 'sample_euler_ancestral', ['k_euler_a', 'k_euler_ancestral'], {"uses_ensd": True}), + ('Euler', 'sample_euler', ['k_euler'], {}), + ('LMS', 'sample_lms', ['k_lms'], {}), + ('Heun', 'sample_heun', ['k_heun'], {"second_order": True}), + ('DPM2', 'sample_dpm_2', ['k_dpm_2'], {'discard_next_to_last_sigma': True}), + ('DPM2 a', 'sample_dpm_2_ancestral', ['k_dpm_2_a'], {'discard_next_to_last_sigma': True, "uses_ensd": True}), + ('DPM++ 2S a', 'sample_dpmpp_2s_ancestral', ['k_dpmpp_2s_a'], {"uses_ensd": True, "second_order": True}), + ('DPM++ 2M', 'sample_dpmpp_2m', ['k_dpmpp_2m'], {}), + ('DPM++ SDE', 'sample_dpmpp_sde', ['k_dpmpp_sde'], {"second_order": True, "brownian_noise": True}), + ('DPM++ 2M SDE', 'sample_dpmpp_2m_sde', ['k_dpmpp_2m_sde_ka'], {"brownian_noise": True}), + ('DPM fast', 'sample_dpm_fast', ['k_dpm_fast'], {"uses_ensd": True}), + ('DPM adaptive', 'sample_dpm_adaptive', ['k_dpm_ad'], {"uses_ensd": True}), + ('LMS Karras', 'sample_lms', ['k_lms_ka'], {'scheduler': 'karras'}), + ('DPM2 Karras', 'sample_dpm_2', ['k_dpm_2_ka'], {'scheduler': 'karras', 'discard_next_to_last_sigma': True, "uses_ensd": True, "second_order": True}), + ('DPM2 a Karras', 'sample_dpm_2_ancestral', ['k_dpm_2_a_ka'], {'scheduler': 'karras', 'discard_next_to_last_sigma': True, "uses_ensd": True, "second_order": True}), + ('DPM++ 2S a Karras', 'sample_dpmpp_2s_ancestral', ['k_dpmpp_2s_a_ka'], {'scheduler': 'karras', "uses_ensd": True, "second_order": True}), + ('DPM++ 2M Karras', 'sample_dpmpp_2m', ['k_dpmpp_2m_ka'], {'scheduler': 'karras'}), + ('DPM++ SDE Karras', 'sample_dpmpp_sde', ['k_dpmpp_sde_ka'], {'scheduler': 'karras', "second_order": True, "brownian_noise": True}), + ('DPM++ 2M SDE Karras', 'sample_dpmpp_2m_sde', ['k_dpmpp_2m_sde_ka'], {'scheduler': 'karras', "brownian_noise": True}), + ('Restart (new)', 'restart_sampler', ['restart'], {'scheduler': 'karras', "second_order": True}), +] + + +@torch.no_grad() +def restart_sampler(model, x, sigmas, extra_args=None, callback=None, disable=None, s_noise=1., restart_list = None): + """Implements restart sampling in Restart Sampling for Improving Generative Processes (2023)""" + '''Restart_list format: {min_sigma: [ restart_steps, restart_times, max_sigma]}''' + '''If restart_list is None: will choose restart_list automatically, otherwise will use the given restart_list''' + from tqdm.auto import trange + extra_args = {} if extra_args is None else extra_args + s_in = x.new_ones([x.shape[0]]) + step_id = 0 + from k_diffusion.sampling import to_d, get_sigmas_karras + def heun_step(x, old_sigma, new_sigma, second_order = True): + nonlocal step_id + denoised = model(x, old_sigma * s_in, **extra_args) + d = to_d(x, old_sigma, denoised) + if callback is not None: + callback({'x': x, 'i': step_id, 'sigma': new_sigma, 'sigma_hat': old_sigma, 'denoised': denoised}) + dt = new_sigma - old_sigma + if new_sigma == 0 or not second_order: + # Euler method + x = x + d * dt + else: + # Heun's method + x_2 = x + d * dt + denoised_2 = model(x_2, new_sigma * s_in, **extra_args) + d_2 = to_d(x_2, new_sigma, denoised_2) + d_prime = (d + d_2) / 2 + x = x + d_prime * dt + step_id += 1 + return x + steps = sigmas.shape[0] - 1 + if restart_list is None: + if steps >= 20: + restart_steps = 9 + restart_times = 1 + if steps >= 36: + restart_steps = steps // 4 + restart_times = 2 + sigmas = get_sigmas_karras(steps - restart_steps * restart_times, sigmas[-2].item(), sigmas[0].item(), device=sigmas.device) + restart_list = {0.1: [restart_steps + 1, restart_times, 2]} + else: + restart_list = dict() + temp_list = dict() + for key, value in restart_list.items(): + temp_list[int(torch.argmin(abs(sigmas - key), dim=0))] = value + restart_list = temp_list + step_list = [] + for i in range(len(sigmas) - 1): + step_list.append((sigmas[i], sigmas[i + 1])) + if i + 1 in restart_list: + restart_steps, restart_times, restart_max = restart_list[i + 1] + min_idx = i + 1 + max_idx = int(torch.argmin(abs(sigmas - restart_max), dim=0)) + if max_idx < min_idx: + sigma_restart = get_sigmas_karras(restart_steps, sigmas[min_idx].item(), sigmas[max_idx].item(), device=sigmas.device)[:-1] + while restart_times > 0: + restart_times -= 1 + step_list.extend([(old_sigma, new_sigma) for (old_sigma, new_sigma) in zip(sigma_restart[:-1], sigma_restart[1:])]) + last_sigma = None + for i in trange(len(step_list), disable=disable): + if last_sigma is None: + last_sigma = step_list[i][0] + elif last_sigma < step_list[i][0]: + x = x + k_diffusion.sampling.torch.randn_like(x) * s_noise * (step_list[i][0] ** 2 - last_sigma ** 2) ** 0.5 + x = heun_step(x, step_list[i][0], step_list[i][1]) + last_sigma = step_list[i][1] + return x + +samplers_data_k_diffusion = [ + sd_samplers_common.SamplerData(label, lambda model, funcname=funcname: KDiffusionSampler(funcname, model), aliases, options) + for label, funcname, aliases, options in samplers_k_diffusion + if (hasattr(k_diffusion.sampling, funcname) or funcname == 'restart_sampler') +] + +sampler_extra_params = { + 'sample_euler': ['s_churn', 's_tmin', 's_tmax', 's_noise'], + 'sample_heun': ['s_churn', 's_tmin', 's_tmax', 's_noise'], + 'sample_dpm_2': ['s_churn', 's_tmin', 's_tmax', 's_noise'], +} + +k_diffusion_samplers_map = {x.name: x for x in samplers_data_k_diffusion} +k_diffusion_scheduler = { + 'Automatic': None, + 'karras': k_diffusion.sampling.get_sigmas_karras, + 'exponential': k_diffusion.sampling.get_sigmas_exponential, + 'polyexponential': k_diffusion.sampling.get_sigmas_polyexponential +} + + +def catenate_conds(conds): + if not isinstance(conds[0], dict): + return torch.cat(conds) + + return {key: torch.cat([x[key] for x in conds]) for key in conds[0].keys()} + + +def subscript_cond(cond, a, b): + if not isinstance(cond, dict): + return cond[a:b] + + return {key: vec[a:b] for key, vec in cond.items()} + + +def pad_cond(tensor, repeats, empty): + if not isinstance(tensor, dict): + return torch.cat([tensor, empty.repeat((tensor.shape[0], repeats, 1))], axis=1) + + tensor['crossattn'] = pad_cond(tensor['crossattn'], repeats, empty) + return tensor + + +class CFGDenoiser(torch.nn.Module): + """ + Classifier free guidance denoiser. A wrapper for stable diffusion model (specifically for unet) + that can take a noisy picture and produce a noise-free picture using two guidances (prompts) + instead of one. Originally, the second prompt is just an empty string, but we use non-empty + negative prompt. + """ + + def __init__(self, model): + super().__init__() + self.inner_model = model + self.mask = None + self.nmask = None + self.init_latent = None + self.step = 0 + self.image_cfg_scale = None + self.padded_cond_uncond = False + + def combine_denoised(self, x_out, conds_list, uncond, cond_scale): + denoised_uncond = x_out[-uncond.shape[0]:] + denoised = torch.clone(denoised_uncond) + + for i, conds in enumerate(conds_list): + for cond_index, weight in conds: + denoised[i] += (x_out[cond_index] - denoised_uncond[i]) * (weight * cond_scale) + + return denoised + + def combine_denoised_for_edit_model(self, x_out, cond_scale): + out_cond, out_img_cond, out_uncond = x_out.chunk(3) + denoised = out_uncond + cond_scale * (out_cond - out_img_cond) + self.image_cfg_scale * (out_img_cond - out_uncond) + + return denoised + + def forward(self, x, sigma, uncond, cond, cond_scale, s_min_uncond, image_cond): + if state.interrupted or state.skipped: + raise sd_samplers_common.InterruptedException + + # at self.image_cfg_scale == 1.0 produced results for edit model are the same as with normal sampling, + # so is_edit_model is set to False to support AND composition. + is_edit_model = shared.sd_model.cond_stage_key == "edit" and self.image_cfg_scale is not None and self.image_cfg_scale != 1.0 + + conds_list, tensor = prompt_parser.reconstruct_multicond_batch(cond, self.step) + uncond = prompt_parser.reconstruct_cond_batch(uncond, self.step) + + assert not is_edit_model or all(len(conds) == 1 for conds in conds_list), "AND is not supported for InstructPix2Pix checkpoint (unless using Image CFG scale = 1.0)" + + batch_size = len(conds_list) + repeats = [len(conds_list[i]) for i in range(batch_size)] + + if shared.sd_model.model.conditioning_key == "crossattn-adm": + image_uncond = torch.zeros_like(image_cond) + make_condition_dict = lambda c_crossattn, c_adm: {"c_crossattn": [c_crossattn], "c_adm": c_adm} + else: + image_uncond = image_cond + if isinstance(uncond, dict): + make_condition_dict = lambda c_crossattn, c_concat: {**c_crossattn, "c_concat": [c_concat]} + else: + make_condition_dict = lambda c_crossattn, c_concat: {"c_crossattn": [c_crossattn], "c_concat": [c_concat]} + + if not is_edit_model: + x_in = torch.cat([torch.stack([x[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [x]) + sigma_in = torch.cat([torch.stack([sigma[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [sigma]) + image_cond_in = torch.cat([torch.stack([image_cond[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [image_uncond]) + else: + x_in = torch.cat([torch.stack([x[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [x] + [x]) + sigma_in = torch.cat([torch.stack([sigma[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [sigma] + [sigma]) + image_cond_in = torch.cat([torch.stack([image_cond[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [image_uncond] + [torch.zeros_like(self.init_latent)]) + + denoiser_params = CFGDenoiserParams(x_in, image_cond_in, sigma_in, state.sampling_step, state.sampling_steps, tensor, uncond) + cfg_denoiser_callback(denoiser_params) + x_in = denoiser_params.x + image_cond_in = denoiser_params.image_cond + sigma_in = denoiser_params.sigma + tensor = denoiser_params.text_cond + uncond = denoiser_params.text_uncond + skip_uncond = False + + # alternating uncond allows for higher thresholds without the quality loss normally expected from raising it + if self.step % 2 and s_min_uncond > 0 and sigma[0] < s_min_uncond and not is_edit_model: + skip_uncond = True + x_in = x_in[:-batch_size] + sigma_in = sigma_in[:-batch_size] + + self.padded_cond_uncond = False + if shared.opts.pad_cond_uncond and tensor.shape[1] != uncond.shape[1]: + empty = shared.sd_model.cond_stage_model_empty_prompt + num_repeats = (tensor.shape[1] - uncond.shape[1]) // empty.shape[1] + + if num_repeats < 0: + tensor = pad_cond(tensor, -num_repeats, empty) + self.padded_cond_uncond = True + elif num_repeats > 0: + uncond = pad_cond(uncond, num_repeats, empty) + self.padded_cond_uncond = True + + if tensor.shape[1] == uncond.shape[1] or skip_uncond: + if is_edit_model: + cond_in = catenate_conds([tensor, uncond, uncond]) + elif skip_uncond: + cond_in = tensor + else: + cond_in = catenate_conds([tensor, uncond]) + + if shared.batch_cond_uncond: + x_out = self.inner_model(x_in, sigma_in, cond=make_condition_dict(cond_in, image_cond_in)) + else: + x_out = torch.zeros_like(x_in) + for batch_offset in range(0, x_out.shape[0], batch_size): + a = batch_offset + b = a + batch_size + x_out[a:b] = self.inner_model(x_in[a:b], sigma_in[a:b], cond=make_condition_dict(subscript_cond(cond_in, a, b), image_cond_in[a:b])) + else: + x_out = torch.zeros_like(x_in) + batch_size = batch_size*2 if shared.batch_cond_uncond else batch_size + for batch_offset in range(0, tensor.shape[0], batch_size): + a = batch_offset + b = min(a + batch_size, tensor.shape[0]) + + if not is_edit_model: + c_crossattn = subscript_cond(tensor, a, b) + else: + c_crossattn = torch.cat([tensor[a:b]], uncond) + + x_out[a:b] = self.inner_model(x_in[a:b], sigma_in[a:b], cond=make_condition_dict(c_crossattn, image_cond_in[a:b])) + + if not skip_uncond: + x_out[-uncond.shape[0]:] = self.inner_model(x_in[-uncond.shape[0]:], sigma_in[-uncond.shape[0]:], cond=make_condition_dict(uncond, image_cond_in[-uncond.shape[0]:])) + + denoised_image_indexes = [x[0][0] for x in conds_list] + if skip_uncond: + fake_uncond = torch.cat([x_out[i:i+1] for i in denoised_image_indexes]) + x_out = torch.cat([x_out, fake_uncond]) # we skipped uncond denoising, so we put cond-denoised image to where the uncond-denoised image should be + + denoised_params = CFGDenoisedParams(x_out, state.sampling_step, state.sampling_steps, self.inner_model) + cfg_denoised_callback(denoised_params) + + devices.test_for_nans(x_out, "unet") + + if opts.live_preview_content == "Prompt": + sd_samplers_common.store_latent(torch.cat([x_out[i:i+1] for i in denoised_image_indexes])) + elif opts.live_preview_content == "Negative prompt": + sd_samplers_common.store_latent(x_out[-uncond.shape[0]:]) + + if is_edit_model: + denoised = self.combine_denoised_for_edit_model(x_out, cond_scale) + elif skip_uncond: + denoised = self.combine_denoised(x_out, conds_list, uncond, 1.0) + else: + denoised = self.combine_denoised(x_out, conds_list, uncond, cond_scale) + + if self.mask is not None: + denoised = self.init_latent * self.mask + self.nmask * denoised + + after_cfg_callback_params = AfterCFGCallbackParams(denoised, state.sampling_step, state.sampling_steps) + cfg_after_cfg_callback(after_cfg_callback_params) + denoised = after_cfg_callback_params.x + + self.step += 1 + return denoised + + +class TorchHijack: + def __init__(self, sampler_noises): + # Using a deque to efficiently receive the sampler_noises in the same order as the previous index-based + # implementation. + self.sampler_noises = deque(sampler_noises) + + def __getattr__(self, item): + if item == 'randn_like': + return self.randn_like + + if hasattr(torch, item): + return getattr(torch, item) + + raise AttributeError(f"'{type(self).__name__}' object has no attribute '{item}'") + + def randn_like(self, x): + if self.sampler_noises: + noise = self.sampler_noises.popleft() + if noise.shape == x.shape: + return noise + + if opts.randn_source == "CPU" or x.device.type == 'mps': + return torch.randn_like(x, device=devices.cpu).to(x.device) + else: + return torch.randn_like(x) + + +class KDiffusionSampler: + def __init__(self, funcname, sd_model): + denoiser = k_diffusion.external.CompVisVDenoiser if sd_model.parameterization == "v" else k_diffusion.external.CompVisDenoiser + + self.model_wrap = denoiser(sd_model, quantize=shared.opts.enable_quantization) + self.funcname = funcname + self.func = getattr(k_diffusion.sampling, self.funcname) if funcname != "restart_sampler" else restart_sampler + self.extra_params = sampler_extra_params.get(funcname, []) + self.model_wrap_cfg = CFGDenoiser(self.model_wrap) + self.sampler_noises = None + self.stop_at = None + self.eta = None + self.config = None # set by the function calling the constructor + self.last_latent = None + self.s_min_uncond = None + + self.conditioning_key = sd_model.model.conditioning_key + + def callback_state(self, d): + step = d['i'] + latent = d["denoised"] + if opts.live_preview_content == "Combined": + sd_samplers_common.store_latent(latent) + self.last_latent = latent + + if self.stop_at is not None and step > self.stop_at: + raise sd_samplers_common.InterruptedException + + state.sampling_step = step + shared.total_tqdm.update() + + def launch_sampling(self, steps, func): + state.sampling_steps = steps + state.sampling_step = 0 + + try: + return func() + except RecursionError: + print( + 'Encountered RecursionError during sampling, returning last latent. ' + 'rho >5 with a polyexponential scheduler may cause this error. ' + 'You should try to use a smaller rho value instead.' + ) + return self.last_latent + except sd_samplers_common.InterruptedException: + return self.last_latent + + def number_of_needed_noises(self, p): + return p.steps + + def initialize(self, p): + self.model_wrap_cfg.mask = p.mask if hasattr(p, 'mask') else None + self.model_wrap_cfg.nmask = p.nmask if hasattr(p, 'nmask') else None + self.model_wrap_cfg.step = 0 + self.model_wrap_cfg.image_cfg_scale = getattr(p, 'image_cfg_scale', None) + self.eta = p.eta if p.eta is not None else opts.eta_ancestral + self.s_min_uncond = getattr(p, 's_min_uncond', 0.0) + + k_diffusion.sampling.torch = TorchHijack(self.sampler_noises if self.sampler_noises is not None else []) + + extra_params_kwargs = {} + for param_name in self.extra_params: + if hasattr(p, param_name) and param_name in inspect.signature(self.func).parameters: + extra_params_kwargs[param_name] = getattr(p, param_name) + + if 'eta' in inspect.signature(self.func).parameters: + if self.eta != 1.0: + p.extra_generation_params["Eta"] = self.eta + + extra_params_kwargs['eta'] = self.eta + + return extra_params_kwargs + + def get_sigmas(self, p, steps): + discard_next_to_last_sigma = self.config is not None and self.config.options.get('discard_next_to_last_sigma', False) + if opts.always_discard_next_to_last_sigma and not discard_next_to_last_sigma: + discard_next_to_last_sigma = True + p.extra_generation_params["Discard penultimate sigma"] = True + + steps += 1 if discard_next_to_last_sigma else 0 + + if p.sampler_noise_scheduler_override: + sigmas = p.sampler_noise_scheduler_override(steps) + elif opts.k_sched_type != "Automatic": + m_sigma_min, m_sigma_max = (self.model_wrap.sigmas[0].item(), self.model_wrap.sigmas[-1].item()) + sigma_min, sigma_max = (0.1, 10) if opts.use_old_karras_scheduler_sigmas else (m_sigma_min, m_sigma_max) + sigmas_kwargs = { + 'sigma_min': sigma_min, + 'sigma_max': sigma_max, + } + + sigmas_func = k_diffusion_scheduler[opts.k_sched_type] + p.extra_generation_params["Schedule type"] = opts.k_sched_type + + if opts.sigma_min != m_sigma_min and opts.sigma_min != 0: + sigmas_kwargs['sigma_min'] = opts.sigma_min + p.extra_generation_params["Schedule min sigma"] = opts.sigma_min + if opts.sigma_max != m_sigma_max and opts.sigma_max != 0: + sigmas_kwargs['sigma_max'] = opts.sigma_max + p.extra_generation_params["Schedule max sigma"] = opts.sigma_max + + default_rho = 1. if opts.k_sched_type == "polyexponential" else 7. + + if opts.k_sched_type != 'exponential' and opts.rho != 0 and opts.rho != default_rho: + sigmas_kwargs['rho'] = opts.rho + p.extra_generation_params["Schedule rho"] = opts.rho + + sigmas = sigmas_func(n=steps, **sigmas_kwargs, device=shared.device) + elif self.config is not None and self.config.options.get('scheduler', None) == 'karras': + sigma_min, sigma_max = (0.1, 10) if opts.use_old_karras_scheduler_sigmas else (self.model_wrap.sigmas[0].item(), self.model_wrap.sigmas[-1].item()) + + sigmas = k_diffusion.sampling.get_sigmas_karras(n=steps, sigma_min=sigma_min, sigma_max=sigma_max, device=shared.device) + else: + sigmas = self.model_wrap.get_sigmas(steps) + + if discard_next_to_last_sigma: + sigmas = torch.cat([sigmas[:-2], sigmas[-1:]]) + + return sigmas + + def create_noise_sampler(self, x, sigmas, p): + """For DPM++ SDE: manually create noise sampler to enable deterministic results across different batch sizes""" + if shared.opts.no_dpmpp_sde_batch_determinism: + return None + + from k_diffusion.sampling import BrownianTreeNoiseSampler + sigma_min, sigma_max = sigmas[sigmas > 0].min(), sigmas.max() + current_iter_seeds = p.all_seeds[p.iteration * p.batch_size:(p.iteration + 1) * p.batch_size] + return BrownianTreeNoiseSampler(x, sigma_min, sigma_max, seed=current_iter_seeds) + + def sample_img2img(self, p, x, noise, conditioning, unconditional_conditioning, steps=None, image_conditioning=None): + steps, t_enc = sd_samplers_common.setup_img2img_steps(p, steps) + + sigmas = self.get_sigmas(p, steps) + + sigma_sched = sigmas[steps - t_enc - 1:] + xi = x + noise * sigma_sched[0] + + extra_params_kwargs = self.initialize(p) + parameters = inspect.signature(self.func).parameters + + if 'sigma_min' in parameters: + ## last sigma is zero which isn't allowed by DPM Fast & Adaptive so taking value before last + extra_params_kwargs['sigma_min'] = sigma_sched[-2] + if 'sigma_max' in parameters: + extra_params_kwargs['sigma_max'] = sigma_sched[0] + if 'n' in parameters: + extra_params_kwargs['n'] = len(sigma_sched) - 1 + if 'sigma_sched' in parameters: + extra_params_kwargs['sigma_sched'] = sigma_sched + if 'sigmas' in parameters: + extra_params_kwargs['sigmas'] = sigma_sched + + if self.config.options.get('brownian_noise', False): + noise_sampler = self.create_noise_sampler(x, sigmas, p) + extra_params_kwargs['noise_sampler'] = noise_sampler + + self.model_wrap_cfg.init_latent = x + self.last_latent = x + extra_args = { + 'cond': conditioning, + 'image_cond': image_conditioning, + 'uncond': unconditional_conditioning, + 'cond_scale': p.cfg_scale, + 's_min_uncond': self.s_min_uncond + } + + samples = self.launch_sampling(t_enc + 1, lambda: self.func(self.model_wrap_cfg, xi, extra_args=extra_args, disable=False, callback=self.callback_state, **extra_params_kwargs)) + + if self.model_wrap_cfg.padded_cond_uncond: + p.extra_generation_params["Pad conds"] = True + + return samples + + def sample(self, p, x, conditioning, unconditional_conditioning, steps=None, image_conditioning=None): + steps = steps or p.steps + + sigmas = self.get_sigmas(p, steps) + + x = x * sigmas[0] + + extra_params_kwargs = self.initialize(p) + parameters = inspect.signature(self.func).parameters + + if 'sigma_min' in parameters: + extra_params_kwargs['sigma_min'] = self.model_wrap.sigmas[0].item() + extra_params_kwargs['sigma_max'] = self.model_wrap.sigmas[-1].item() + if 'n' in parameters: + extra_params_kwargs['n'] = steps + else: + extra_params_kwargs['sigmas'] = sigmas + + if self.config.options.get('brownian_noise', False): + noise_sampler = self.create_noise_sampler(x, sigmas, p) + extra_params_kwargs['noise_sampler'] = noise_sampler + + self.last_latent = x + samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, x, extra_args={ + 'cond': conditioning, + 'image_cond': image_conditioning, + 'uncond': unconditional_conditioning, + 'cond_scale': p.cfg_scale, + 's_min_uncond': self.s_min_uncond + }, disable=False, callback=self.callback_state, **extra_params_kwargs)) + + if self.model_wrap_cfg.padded_cond_uncond: + p.extra_generation_params["Pad conds"] = True + + return samples + -- cgit v1.2.1 From 11dc92dc0ae8fe477539583e6a743e57a7cd69ad Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sat, 29 Jul 2023 08:06:04 +0300 Subject: Split history: mv temp modules/sd_samplers_kdiffusion.py --- modules/sd_samplers_kdiffusion.py | 545 ++++++++++++++++++++++++++++++++++++++ temp | 545 -------------------------------------- 2 files changed, 545 insertions(+), 545 deletions(-) create mode 100644 modules/sd_samplers_kdiffusion.py delete mode 100644 temp diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py new file mode 100644 index 00000000..a54673eb --- /dev/null +++ b/modules/sd_samplers_kdiffusion.py @@ -0,0 +1,545 @@ +from collections import deque +import torch +import inspect +import k_diffusion.sampling +from modules import prompt_parser, devices, sd_samplers_common + +from modules.shared import opts, state +import modules.shared as shared +from modules.script_callbacks import CFGDenoiserParams, cfg_denoiser_callback +from modules.script_callbacks import CFGDenoisedParams, cfg_denoised_callback +from modules.script_callbacks import AfterCFGCallbackParams, cfg_after_cfg_callback + +samplers_k_diffusion = [ + ('Euler a', 'sample_euler_ancestral', ['k_euler_a', 'k_euler_ancestral'], {"uses_ensd": True}), + ('Euler', 'sample_euler', ['k_euler'], {}), + ('LMS', 'sample_lms', ['k_lms'], {}), + ('Heun', 'sample_heun', ['k_heun'], {"second_order": True}), + ('DPM2', 'sample_dpm_2', ['k_dpm_2'], {'discard_next_to_last_sigma': True}), + ('DPM2 a', 'sample_dpm_2_ancestral', ['k_dpm_2_a'], {'discard_next_to_last_sigma': True, "uses_ensd": True}), + ('DPM++ 2S a', 'sample_dpmpp_2s_ancestral', ['k_dpmpp_2s_a'], {"uses_ensd": True, "second_order": True}), + ('DPM++ 2M', 'sample_dpmpp_2m', ['k_dpmpp_2m'], {}), + ('DPM++ SDE', 'sample_dpmpp_sde', ['k_dpmpp_sde'], {"second_order": True, "brownian_noise": True}), + ('DPM++ 2M SDE', 'sample_dpmpp_2m_sde', ['k_dpmpp_2m_sde_ka'], {"brownian_noise": True}), + ('DPM fast', 'sample_dpm_fast', ['k_dpm_fast'], {"uses_ensd": True}), + ('DPM adaptive', 'sample_dpm_adaptive', ['k_dpm_ad'], {"uses_ensd": True}), + ('LMS Karras', 'sample_lms', ['k_lms_ka'], {'scheduler': 'karras'}), + ('DPM2 Karras', 'sample_dpm_2', ['k_dpm_2_ka'], {'scheduler': 'karras', 'discard_next_to_last_sigma': True, "uses_ensd": True, "second_order": True}), + ('DPM2 a Karras', 'sample_dpm_2_ancestral', ['k_dpm_2_a_ka'], {'scheduler': 'karras', 'discard_next_to_last_sigma': True, "uses_ensd": True, "second_order": True}), + ('DPM++ 2S a Karras', 'sample_dpmpp_2s_ancestral', ['k_dpmpp_2s_a_ka'], {'scheduler': 'karras', "uses_ensd": True, "second_order": True}), + ('DPM++ 2M Karras', 'sample_dpmpp_2m', ['k_dpmpp_2m_ka'], {'scheduler': 'karras'}), + ('DPM++ SDE Karras', 'sample_dpmpp_sde', ['k_dpmpp_sde_ka'], {'scheduler': 'karras', "second_order": True, "brownian_noise": True}), + ('DPM++ 2M SDE Karras', 'sample_dpmpp_2m_sde', ['k_dpmpp_2m_sde_ka'], {'scheduler': 'karras', "brownian_noise": True}), + ('Restart (new)', 'restart_sampler', ['restart'], {'scheduler': 'karras', "second_order": True}), +] + + +@torch.no_grad() +def restart_sampler(model, x, sigmas, extra_args=None, callback=None, disable=None, s_noise=1., restart_list = None): + """Implements restart sampling in Restart Sampling for Improving Generative Processes (2023)""" + '''Restart_list format: {min_sigma: [ restart_steps, restart_times, max_sigma]}''' + '''If restart_list is None: will choose restart_list automatically, otherwise will use the given restart_list''' + from tqdm.auto import trange + extra_args = {} if extra_args is None else extra_args + s_in = x.new_ones([x.shape[0]]) + step_id = 0 + from k_diffusion.sampling import to_d, get_sigmas_karras + def heun_step(x, old_sigma, new_sigma, second_order = True): + nonlocal step_id + denoised = model(x, old_sigma * s_in, **extra_args) + d = to_d(x, old_sigma, denoised) + if callback is not None: + callback({'x': x, 'i': step_id, 'sigma': new_sigma, 'sigma_hat': old_sigma, 'denoised': denoised}) + dt = new_sigma - old_sigma + if new_sigma == 0 or not second_order: + # Euler method + x = x + d * dt + else: + # Heun's method + x_2 = x + d * dt + denoised_2 = model(x_2, new_sigma * s_in, **extra_args) + d_2 = to_d(x_2, new_sigma, denoised_2) + d_prime = (d + d_2) / 2 + x = x + d_prime * dt + step_id += 1 + return x + steps = sigmas.shape[0] - 1 + if restart_list is None: + if steps >= 20: + restart_steps = 9 + restart_times = 1 + if steps >= 36: + restart_steps = steps // 4 + restart_times = 2 + sigmas = get_sigmas_karras(steps - restart_steps * restart_times, sigmas[-2].item(), sigmas[0].item(), device=sigmas.device) + restart_list = {0.1: [restart_steps + 1, restart_times, 2]} + else: + restart_list = dict() + temp_list = dict() + for key, value in restart_list.items(): + temp_list[int(torch.argmin(abs(sigmas - key), dim=0))] = value + restart_list = temp_list + step_list = [] + for i in range(len(sigmas) - 1): + step_list.append((sigmas[i], sigmas[i + 1])) + if i + 1 in restart_list: + restart_steps, restart_times, restart_max = restart_list[i + 1] + min_idx = i + 1 + max_idx = int(torch.argmin(abs(sigmas - restart_max), dim=0)) + if max_idx < min_idx: + sigma_restart = get_sigmas_karras(restart_steps, sigmas[min_idx].item(), sigmas[max_idx].item(), device=sigmas.device)[:-1] + while restart_times > 0: + restart_times -= 1 + step_list.extend([(old_sigma, new_sigma) for (old_sigma, new_sigma) in zip(sigma_restart[:-1], sigma_restart[1:])]) + last_sigma = None + for i in trange(len(step_list), disable=disable): + if last_sigma is None: + last_sigma = step_list[i][0] + elif last_sigma < step_list[i][0]: + x = x + k_diffusion.sampling.torch.randn_like(x) * s_noise * (step_list[i][0] ** 2 - last_sigma ** 2) ** 0.5 + x = heun_step(x, step_list[i][0], step_list[i][1]) + last_sigma = step_list[i][1] + return x + +samplers_data_k_diffusion = [ + sd_samplers_common.SamplerData(label, lambda model, funcname=funcname: KDiffusionSampler(funcname, model), aliases, options) + for label, funcname, aliases, options in samplers_k_diffusion + if (hasattr(k_diffusion.sampling, funcname) or funcname == 'restart_sampler') +] + +sampler_extra_params = { + 'sample_euler': ['s_churn', 's_tmin', 's_tmax', 's_noise'], + 'sample_heun': ['s_churn', 's_tmin', 's_tmax', 's_noise'], + 'sample_dpm_2': ['s_churn', 's_tmin', 's_tmax', 's_noise'], +} + +k_diffusion_samplers_map = {x.name: x for x in samplers_data_k_diffusion} +k_diffusion_scheduler = { + 'Automatic': None, + 'karras': k_diffusion.sampling.get_sigmas_karras, + 'exponential': k_diffusion.sampling.get_sigmas_exponential, + 'polyexponential': k_diffusion.sampling.get_sigmas_polyexponential +} + + +def catenate_conds(conds): + if not isinstance(conds[0], dict): + return torch.cat(conds) + + return {key: torch.cat([x[key] for x in conds]) for key in conds[0].keys()} + + +def subscript_cond(cond, a, b): + if not isinstance(cond, dict): + return cond[a:b] + + return {key: vec[a:b] for key, vec in cond.items()} + + +def pad_cond(tensor, repeats, empty): + if not isinstance(tensor, dict): + return torch.cat([tensor, empty.repeat((tensor.shape[0], repeats, 1))], axis=1) + + tensor['crossattn'] = pad_cond(tensor['crossattn'], repeats, empty) + return tensor + + +class CFGDenoiser(torch.nn.Module): + """ + Classifier free guidance denoiser. A wrapper for stable diffusion model (specifically for unet) + that can take a noisy picture and produce a noise-free picture using two guidances (prompts) + instead of one. Originally, the second prompt is just an empty string, but we use non-empty + negative prompt. + """ + + def __init__(self, model): + super().__init__() + self.inner_model = model + self.mask = None + self.nmask = None + self.init_latent = None + self.step = 0 + self.image_cfg_scale = None + self.padded_cond_uncond = False + + def combine_denoised(self, x_out, conds_list, uncond, cond_scale): + denoised_uncond = x_out[-uncond.shape[0]:] + denoised = torch.clone(denoised_uncond) + + for i, conds in enumerate(conds_list): + for cond_index, weight in conds: + denoised[i] += (x_out[cond_index] - denoised_uncond[i]) * (weight * cond_scale) + + return denoised + + def combine_denoised_for_edit_model(self, x_out, cond_scale): + out_cond, out_img_cond, out_uncond = x_out.chunk(3) + denoised = out_uncond + cond_scale * (out_cond - out_img_cond) + self.image_cfg_scale * (out_img_cond - out_uncond) + + return denoised + + def forward(self, x, sigma, uncond, cond, cond_scale, s_min_uncond, image_cond): + if state.interrupted or state.skipped: + raise sd_samplers_common.InterruptedException + + # at self.image_cfg_scale == 1.0 produced results for edit model are the same as with normal sampling, + # so is_edit_model is set to False to support AND composition. + is_edit_model = shared.sd_model.cond_stage_key == "edit" and self.image_cfg_scale is not None and self.image_cfg_scale != 1.0 + + conds_list, tensor = prompt_parser.reconstruct_multicond_batch(cond, self.step) + uncond = prompt_parser.reconstruct_cond_batch(uncond, self.step) + + assert not is_edit_model or all(len(conds) == 1 for conds in conds_list), "AND is not supported for InstructPix2Pix checkpoint (unless using Image CFG scale = 1.0)" + + batch_size = len(conds_list) + repeats = [len(conds_list[i]) for i in range(batch_size)] + + if shared.sd_model.model.conditioning_key == "crossattn-adm": + image_uncond = torch.zeros_like(image_cond) + make_condition_dict = lambda c_crossattn, c_adm: {"c_crossattn": [c_crossattn], "c_adm": c_adm} + else: + image_uncond = image_cond + if isinstance(uncond, dict): + make_condition_dict = lambda c_crossattn, c_concat: {**c_crossattn, "c_concat": [c_concat]} + else: + make_condition_dict = lambda c_crossattn, c_concat: {"c_crossattn": [c_crossattn], "c_concat": [c_concat]} + + if not is_edit_model: + x_in = torch.cat([torch.stack([x[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [x]) + sigma_in = torch.cat([torch.stack([sigma[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [sigma]) + image_cond_in = torch.cat([torch.stack([image_cond[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [image_uncond]) + else: + x_in = torch.cat([torch.stack([x[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [x] + [x]) + sigma_in = torch.cat([torch.stack([sigma[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [sigma] + [sigma]) + image_cond_in = torch.cat([torch.stack([image_cond[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [image_uncond] + [torch.zeros_like(self.init_latent)]) + + denoiser_params = CFGDenoiserParams(x_in, image_cond_in, sigma_in, state.sampling_step, state.sampling_steps, tensor, uncond) + cfg_denoiser_callback(denoiser_params) + x_in = denoiser_params.x + image_cond_in = denoiser_params.image_cond + sigma_in = denoiser_params.sigma + tensor = denoiser_params.text_cond + uncond = denoiser_params.text_uncond + skip_uncond = False + + # alternating uncond allows for higher thresholds without the quality loss normally expected from raising it + if self.step % 2 and s_min_uncond > 0 and sigma[0] < s_min_uncond and not is_edit_model: + skip_uncond = True + x_in = x_in[:-batch_size] + sigma_in = sigma_in[:-batch_size] + + self.padded_cond_uncond = False + if shared.opts.pad_cond_uncond and tensor.shape[1] != uncond.shape[1]: + empty = shared.sd_model.cond_stage_model_empty_prompt + num_repeats = (tensor.shape[1] - uncond.shape[1]) // empty.shape[1] + + if num_repeats < 0: + tensor = pad_cond(tensor, -num_repeats, empty) + self.padded_cond_uncond = True + elif num_repeats > 0: + uncond = pad_cond(uncond, num_repeats, empty) + self.padded_cond_uncond = True + + if tensor.shape[1] == uncond.shape[1] or skip_uncond: + if is_edit_model: + cond_in = catenate_conds([tensor, uncond, uncond]) + elif skip_uncond: + cond_in = tensor + else: + cond_in = catenate_conds([tensor, uncond]) + + if shared.batch_cond_uncond: + x_out = self.inner_model(x_in, sigma_in, cond=make_condition_dict(cond_in, image_cond_in)) + else: + x_out = torch.zeros_like(x_in) + for batch_offset in range(0, x_out.shape[0], batch_size): + a = batch_offset + b = a + batch_size + x_out[a:b] = self.inner_model(x_in[a:b], sigma_in[a:b], cond=make_condition_dict(subscript_cond(cond_in, a, b), image_cond_in[a:b])) + else: + x_out = torch.zeros_like(x_in) + batch_size = batch_size*2 if shared.batch_cond_uncond else batch_size + for batch_offset in range(0, tensor.shape[0], batch_size): + a = batch_offset + b = min(a + batch_size, tensor.shape[0]) + + if not is_edit_model: + c_crossattn = subscript_cond(tensor, a, b) + else: + c_crossattn = torch.cat([tensor[a:b]], uncond) + + x_out[a:b] = self.inner_model(x_in[a:b], sigma_in[a:b], cond=make_condition_dict(c_crossattn, image_cond_in[a:b])) + + if not skip_uncond: + x_out[-uncond.shape[0]:] = self.inner_model(x_in[-uncond.shape[0]:], sigma_in[-uncond.shape[0]:], cond=make_condition_dict(uncond, image_cond_in[-uncond.shape[0]:])) + + denoised_image_indexes = [x[0][0] for x in conds_list] + if skip_uncond: + fake_uncond = torch.cat([x_out[i:i+1] for i in denoised_image_indexes]) + x_out = torch.cat([x_out, fake_uncond]) # we skipped uncond denoising, so we put cond-denoised image to where the uncond-denoised image should be + + denoised_params = CFGDenoisedParams(x_out, state.sampling_step, state.sampling_steps, self.inner_model) + cfg_denoised_callback(denoised_params) + + devices.test_for_nans(x_out, "unet") + + if opts.live_preview_content == "Prompt": + sd_samplers_common.store_latent(torch.cat([x_out[i:i+1] for i in denoised_image_indexes])) + elif opts.live_preview_content == "Negative prompt": + sd_samplers_common.store_latent(x_out[-uncond.shape[0]:]) + + if is_edit_model: + denoised = self.combine_denoised_for_edit_model(x_out, cond_scale) + elif skip_uncond: + denoised = self.combine_denoised(x_out, conds_list, uncond, 1.0) + else: + denoised = self.combine_denoised(x_out, conds_list, uncond, cond_scale) + + if self.mask is not None: + denoised = self.init_latent * self.mask + self.nmask * denoised + + after_cfg_callback_params = AfterCFGCallbackParams(denoised, state.sampling_step, state.sampling_steps) + cfg_after_cfg_callback(after_cfg_callback_params) + denoised = after_cfg_callback_params.x + + self.step += 1 + return denoised + + +class TorchHijack: + def __init__(self, sampler_noises): + # Using a deque to efficiently receive the sampler_noises in the same order as the previous index-based + # implementation. + self.sampler_noises = deque(sampler_noises) + + def __getattr__(self, item): + if item == 'randn_like': + return self.randn_like + + if hasattr(torch, item): + return getattr(torch, item) + + raise AttributeError(f"'{type(self).__name__}' object has no attribute '{item}'") + + def randn_like(self, x): + if self.sampler_noises: + noise = self.sampler_noises.popleft() + if noise.shape == x.shape: + return noise + + if opts.randn_source == "CPU" or x.device.type == 'mps': + return torch.randn_like(x, device=devices.cpu).to(x.device) + else: + return torch.randn_like(x) + + +class KDiffusionSampler: + def __init__(self, funcname, sd_model): + denoiser = k_diffusion.external.CompVisVDenoiser if sd_model.parameterization == "v" else k_diffusion.external.CompVisDenoiser + + self.model_wrap = denoiser(sd_model, quantize=shared.opts.enable_quantization) + self.funcname = funcname + self.func = getattr(k_diffusion.sampling, self.funcname) if funcname != "restart_sampler" else restart_sampler + self.extra_params = sampler_extra_params.get(funcname, []) + self.model_wrap_cfg = CFGDenoiser(self.model_wrap) + self.sampler_noises = None + self.stop_at = None + self.eta = None + self.config = None # set by the function calling the constructor + self.last_latent = None + self.s_min_uncond = None + + self.conditioning_key = sd_model.model.conditioning_key + + def callback_state(self, d): + step = d['i'] + latent = d["denoised"] + if opts.live_preview_content == "Combined": + sd_samplers_common.store_latent(latent) + self.last_latent = latent + + if self.stop_at is not None and step > self.stop_at: + raise sd_samplers_common.InterruptedException + + state.sampling_step = step + shared.total_tqdm.update() + + def launch_sampling(self, steps, func): + state.sampling_steps = steps + state.sampling_step = 0 + + try: + return func() + except RecursionError: + print( + 'Encountered RecursionError during sampling, returning last latent. ' + 'rho >5 with a polyexponential scheduler may cause this error. ' + 'You should try to use a smaller rho value instead.' + ) + return self.last_latent + except sd_samplers_common.InterruptedException: + return self.last_latent + + def number_of_needed_noises(self, p): + return p.steps + + def initialize(self, p): + self.model_wrap_cfg.mask = p.mask if hasattr(p, 'mask') else None + self.model_wrap_cfg.nmask = p.nmask if hasattr(p, 'nmask') else None + self.model_wrap_cfg.step = 0 + self.model_wrap_cfg.image_cfg_scale = getattr(p, 'image_cfg_scale', None) + self.eta = p.eta if p.eta is not None else opts.eta_ancestral + self.s_min_uncond = getattr(p, 's_min_uncond', 0.0) + + k_diffusion.sampling.torch = TorchHijack(self.sampler_noises if self.sampler_noises is not None else []) + + extra_params_kwargs = {} + for param_name in self.extra_params: + if hasattr(p, param_name) and param_name in inspect.signature(self.func).parameters: + extra_params_kwargs[param_name] = getattr(p, param_name) + + if 'eta' in inspect.signature(self.func).parameters: + if self.eta != 1.0: + p.extra_generation_params["Eta"] = self.eta + + extra_params_kwargs['eta'] = self.eta + + return extra_params_kwargs + + def get_sigmas(self, p, steps): + discard_next_to_last_sigma = self.config is not None and self.config.options.get('discard_next_to_last_sigma', False) + if opts.always_discard_next_to_last_sigma and not discard_next_to_last_sigma: + discard_next_to_last_sigma = True + p.extra_generation_params["Discard penultimate sigma"] = True + + steps += 1 if discard_next_to_last_sigma else 0 + + if p.sampler_noise_scheduler_override: + sigmas = p.sampler_noise_scheduler_override(steps) + elif opts.k_sched_type != "Automatic": + m_sigma_min, m_sigma_max = (self.model_wrap.sigmas[0].item(), self.model_wrap.sigmas[-1].item()) + sigma_min, sigma_max = (0.1, 10) if opts.use_old_karras_scheduler_sigmas else (m_sigma_min, m_sigma_max) + sigmas_kwargs = { + 'sigma_min': sigma_min, + 'sigma_max': sigma_max, + } + + sigmas_func = k_diffusion_scheduler[opts.k_sched_type] + p.extra_generation_params["Schedule type"] = opts.k_sched_type + + if opts.sigma_min != m_sigma_min and opts.sigma_min != 0: + sigmas_kwargs['sigma_min'] = opts.sigma_min + p.extra_generation_params["Schedule min sigma"] = opts.sigma_min + if opts.sigma_max != m_sigma_max and opts.sigma_max != 0: + sigmas_kwargs['sigma_max'] = opts.sigma_max + p.extra_generation_params["Schedule max sigma"] = opts.sigma_max + + default_rho = 1. if opts.k_sched_type == "polyexponential" else 7. + + if opts.k_sched_type != 'exponential' and opts.rho != 0 and opts.rho != default_rho: + sigmas_kwargs['rho'] = opts.rho + p.extra_generation_params["Schedule rho"] = opts.rho + + sigmas = sigmas_func(n=steps, **sigmas_kwargs, device=shared.device) + elif self.config is not None and self.config.options.get('scheduler', None) == 'karras': + sigma_min, sigma_max = (0.1, 10) if opts.use_old_karras_scheduler_sigmas else (self.model_wrap.sigmas[0].item(), self.model_wrap.sigmas[-1].item()) + + sigmas = k_diffusion.sampling.get_sigmas_karras(n=steps, sigma_min=sigma_min, sigma_max=sigma_max, device=shared.device) + else: + sigmas = self.model_wrap.get_sigmas(steps) + + if discard_next_to_last_sigma: + sigmas = torch.cat([sigmas[:-2], sigmas[-1:]]) + + return sigmas + + def create_noise_sampler(self, x, sigmas, p): + """For DPM++ SDE: manually create noise sampler to enable deterministic results across different batch sizes""" + if shared.opts.no_dpmpp_sde_batch_determinism: + return None + + from k_diffusion.sampling import BrownianTreeNoiseSampler + sigma_min, sigma_max = sigmas[sigmas > 0].min(), sigmas.max() + current_iter_seeds = p.all_seeds[p.iteration * p.batch_size:(p.iteration + 1) * p.batch_size] + return BrownianTreeNoiseSampler(x, sigma_min, sigma_max, seed=current_iter_seeds) + + def sample_img2img(self, p, x, noise, conditioning, unconditional_conditioning, steps=None, image_conditioning=None): + steps, t_enc = sd_samplers_common.setup_img2img_steps(p, steps) + + sigmas = self.get_sigmas(p, steps) + + sigma_sched = sigmas[steps - t_enc - 1:] + xi = x + noise * sigma_sched[0] + + extra_params_kwargs = self.initialize(p) + parameters = inspect.signature(self.func).parameters + + if 'sigma_min' in parameters: + ## last sigma is zero which isn't allowed by DPM Fast & Adaptive so taking value before last + extra_params_kwargs['sigma_min'] = sigma_sched[-2] + if 'sigma_max' in parameters: + extra_params_kwargs['sigma_max'] = sigma_sched[0] + if 'n' in parameters: + extra_params_kwargs['n'] = len(sigma_sched) - 1 + if 'sigma_sched' in parameters: + extra_params_kwargs['sigma_sched'] = sigma_sched + if 'sigmas' in parameters: + extra_params_kwargs['sigmas'] = sigma_sched + + if self.config.options.get('brownian_noise', False): + noise_sampler = self.create_noise_sampler(x, sigmas, p) + extra_params_kwargs['noise_sampler'] = noise_sampler + + self.model_wrap_cfg.init_latent = x + self.last_latent = x + extra_args = { + 'cond': conditioning, + 'image_cond': image_conditioning, + 'uncond': unconditional_conditioning, + 'cond_scale': p.cfg_scale, + 's_min_uncond': self.s_min_uncond + } + + samples = self.launch_sampling(t_enc + 1, lambda: self.func(self.model_wrap_cfg, xi, extra_args=extra_args, disable=False, callback=self.callback_state, **extra_params_kwargs)) + + if self.model_wrap_cfg.padded_cond_uncond: + p.extra_generation_params["Pad conds"] = True + + return samples + + def sample(self, p, x, conditioning, unconditional_conditioning, steps=None, image_conditioning=None): + steps = steps or p.steps + + sigmas = self.get_sigmas(p, steps) + + x = x * sigmas[0] + + extra_params_kwargs = self.initialize(p) + parameters = inspect.signature(self.func).parameters + + if 'sigma_min' in parameters: + extra_params_kwargs['sigma_min'] = self.model_wrap.sigmas[0].item() + extra_params_kwargs['sigma_max'] = self.model_wrap.sigmas[-1].item() + if 'n' in parameters: + extra_params_kwargs['n'] = steps + else: + extra_params_kwargs['sigmas'] = sigmas + + if self.config.options.get('brownian_noise', False): + noise_sampler = self.create_noise_sampler(x, sigmas, p) + extra_params_kwargs['noise_sampler'] = noise_sampler + + self.last_latent = x + samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, x, extra_args={ + 'cond': conditioning, + 'image_cond': image_conditioning, + 'uncond': unconditional_conditioning, + 'cond_scale': p.cfg_scale, + 's_min_uncond': self.s_min_uncond + }, disable=False, callback=self.callback_state, **extra_params_kwargs)) + + if self.model_wrap_cfg.padded_cond_uncond: + p.extra_generation_params["Pad conds"] = True + + return samples + diff --git a/temp b/temp deleted file mode 100644 index a54673eb..00000000 --- a/temp +++ /dev/null @@ -1,545 +0,0 @@ -from collections import deque -import torch -import inspect -import k_diffusion.sampling -from modules import prompt_parser, devices, sd_samplers_common - -from modules.shared import opts, state -import modules.shared as shared -from modules.script_callbacks import CFGDenoiserParams, cfg_denoiser_callback -from modules.script_callbacks import CFGDenoisedParams, cfg_denoised_callback -from modules.script_callbacks import AfterCFGCallbackParams, cfg_after_cfg_callback - -samplers_k_diffusion = [ - ('Euler a', 'sample_euler_ancestral', ['k_euler_a', 'k_euler_ancestral'], {"uses_ensd": True}), - ('Euler', 'sample_euler', ['k_euler'], {}), - ('LMS', 'sample_lms', ['k_lms'], {}), - ('Heun', 'sample_heun', ['k_heun'], {"second_order": True}), - ('DPM2', 'sample_dpm_2', ['k_dpm_2'], {'discard_next_to_last_sigma': True}), - ('DPM2 a', 'sample_dpm_2_ancestral', ['k_dpm_2_a'], {'discard_next_to_last_sigma': True, "uses_ensd": True}), - ('DPM++ 2S a', 'sample_dpmpp_2s_ancestral', ['k_dpmpp_2s_a'], {"uses_ensd": True, "second_order": True}), - ('DPM++ 2M', 'sample_dpmpp_2m', ['k_dpmpp_2m'], {}), - ('DPM++ SDE', 'sample_dpmpp_sde', ['k_dpmpp_sde'], {"second_order": True, "brownian_noise": True}), - ('DPM++ 2M SDE', 'sample_dpmpp_2m_sde', ['k_dpmpp_2m_sde_ka'], {"brownian_noise": True}), - ('DPM fast', 'sample_dpm_fast', ['k_dpm_fast'], {"uses_ensd": True}), - ('DPM adaptive', 'sample_dpm_adaptive', ['k_dpm_ad'], {"uses_ensd": True}), - ('LMS Karras', 'sample_lms', ['k_lms_ka'], {'scheduler': 'karras'}), - ('DPM2 Karras', 'sample_dpm_2', ['k_dpm_2_ka'], {'scheduler': 'karras', 'discard_next_to_last_sigma': True, "uses_ensd": True, "second_order": True}), - ('DPM2 a Karras', 'sample_dpm_2_ancestral', ['k_dpm_2_a_ka'], {'scheduler': 'karras', 'discard_next_to_last_sigma': True, "uses_ensd": True, "second_order": True}), - ('DPM++ 2S a Karras', 'sample_dpmpp_2s_ancestral', ['k_dpmpp_2s_a_ka'], {'scheduler': 'karras', "uses_ensd": True, "second_order": True}), - ('DPM++ 2M Karras', 'sample_dpmpp_2m', ['k_dpmpp_2m_ka'], {'scheduler': 'karras'}), - ('DPM++ SDE Karras', 'sample_dpmpp_sde', ['k_dpmpp_sde_ka'], {'scheduler': 'karras', "second_order": True, "brownian_noise": True}), - ('DPM++ 2M SDE Karras', 'sample_dpmpp_2m_sde', ['k_dpmpp_2m_sde_ka'], {'scheduler': 'karras', "brownian_noise": True}), - ('Restart (new)', 'restart_sampler', ['restart'], {'scheduler': 'karras', "second_order": True}), -] - - -@torch.no_grad() -def restart_sampler(model, x, sigmas, extra_args=None, callback=None, disable=None, s_noise=1., restart_list = None): - """Implements restart sampling in Restart Sampling for Improving Generative Processes (2023)""" - '''Restart_list format: {min_sigma: [ restart_steps, restart_times, max_sigma]}''' - '''If restart_list is None: will choose restart_list automatically, otherwise will use the given restart_list''' - from tqdm.auto import trange - extra_args = {} if extra_args is None else extra_args - s_in = x.new_ones([x.shape[0]]) - step_id = 0 - from k_diffusion.sampling import to_d, get_sigmas_karras - def heun_step(x, old_sigma, new_sigma, second_order = True): - nonlocal step_id - denoised = model(x, old_sigma * s_in, **extra_args) - d = to_d(x, old_sigma, denoised) - if callback is not None: - callback({'x': x, 'i': step_id, 'sigma': new_sigma, 'sigma_hat': old_sigma, 'denoised': denoised}) - dt = new_sigma - old_sigma - if new_sigma == 0 or not second_order: - # Euler method - x = x + d * dt - else: - # Heun's method - x_2 = x + d * dt - denoised_2 = model(x_2, new_sigma * s_in, **extra_args) - d_2 = to_d(x_2, new_sigma, denoised_2) - d_prime = (d + d_2) / 2 - x = x + d_prime * dt - step_id += 1 - return x - steps = sigmas.shape[0] - 1 - if restart_list is None: - if steps >= 20: - restart_steps = 9 - restart_times = 1 - if steps >= 36: - restart_steps = steps // 4 - restart_times = 2 - sigmas = get_sigmas_karras(steps - restart_steps * restart_times, sigmas[-2].item(), sigmas[0].item(), device=sigmas.device) - restart_list = {0.1: [restart_steps + 1, restart_times, 2]} - else: - restart_list = dict() - temp_list = dict() - for key, value in restart_list.items(): - temp_list[int(torch.argmin(abs(sigmas - key), dim=0))] = value - restart_list = temp_list - step_list = [] - for i in range(len(sigmas) - 1): - step_list.append((sigmas[i], sigmas[i + 1])) - if i + 1 in restart_list: - restart_steps, restart_times, restart_max = restart_list[i + 1] - min_idx = i + 1 - max_idx = int(torch.argmin(abs(sigmas - restart_max), dim=0)) - if max_idx < min_idx: - sigma_restart = get_sigmas_karras(restart_steps, sigmas[min_idx].item(), sigmas[max_idx].item(), device=sigmas.device)[:-1] - while restart_times > 0: - restart_times -= 1 - step_list.extend([(old_sigma, new_sigma) for (old_sigma, new_sigma) in zip(sigma_restart[:-1], sigma_restart[1:])]) - last_sigma = None - for i in trange(len(step_list), disable=disable): - if last_sigma is None: - last_sigma = step_list[i][0] - elif last_sigma < step_list[i][0]: - x = x + k_diffusion.sampling.torch.randn_like(x) * s_noise * (step_list[i][0] ** 2 - last_sigma ** 2) ** 0.5 - x = heun_step(x, step_list[i][0], step_list[i][1]) - last_sigma = step_list[i][1] - return x - -samplers_data_k_diffusion = [ - sd_samplers_common.SamplerData(label, lambda model, funcname=funcname: KDiffusionSampler(funcname, model), aliases, options) - for label, funcname, aliases, options in samplers_k_diffusion - if (hasattr(k_diffusion.sampling, funcname) or funcname == 'restart_sampler') -] - -sampler_extra_params = { - 'sample_euler': ['s_churn', 's_tmin', 's_tmax', 's_noise'], - 'sample_heun': ['s_churn', 's_tmin', 's_tmax', 's_noise'], - 'sample_dpm_2': ['s_churn', 's_tmin', 's_tmax', 's_noise'], -} - -k_diffusion_samplers_map = {x.name: x for x in samplers_data_k_diffusion} -k_diffusion_scheduler = { - 'Automatic': None, - 'karras': k_diffusion.sampling.get_sigmas_karras, - 'exponential': k_diffusion.sampling.get_sigmas_exponential, - 'polyexponential': k_diffusion.sampling.get_sigmas_polyexponential -} - - -def catenate_conds(conds): - if not isinstance(conds[0], dict): - return torch.cat(conds) - - return {key: torch.cat([x[key] for x in conds]) for key in conds[0].keys()} - - -def subscript_cond(cond, a, b): - if not isinstance(cond, dict): - return cond[a:b] - - return {key: vec[a:b] for key, vec in cond.items()} - - -def pad_cond(tensor, repeats, empty): - if not isinstance(tensor, dict): - return torch.cat([tensor, empty.repeat((tensor.shape[0], repeats, 1))], axis=1) - - tensor['crossattn'] = pad_cond(tensor['crossattn'], repeats, empty) - return tensor - - -class CFGDenoiser(torch.nn.Module): - """ - Classifier free guidance denoiser. A wrapper for stable diffusion model (specifically for unet) - that can take a noisy picture and produce a noise-free picture using two guidances (prompts) - instead of one. Originally, the second prompt is just an empty string, but we use non-empty - negative prompt. - """ - - def __init__(self, model): - super().__init__() - self.inner_model = model - self.mask = None - self.nmask = None - self.init_latent = None - self.step = 0 - self.image_cfg_scale = None - self.padded_cond_uncond = False - - def combine_denoised(self, x_out, conds_list, uncond, cond_scale): - denoised_uncond = x_out[-uncond.shape[0]:] - denoised = torch.clone(denoised_uncond) - - for i, conds in enumerate(conds_list): - for cond_index, weight in conds: - denoised[i] += (x_out[cond_index] - denoised_uncond[i]) * (weight * cond_scale) - - return denoised - - def combine_denoised_for_edit_model(self, x_out, cond_scale): - out_cond, out_img_cond, out_uncond = x_out.chunk(3) - denoised = out_uncond + cond_scale * (out_cond - out_img_cond) + self.image_cfg_scale * (out_img_cond - out_uncond) - - return denoised - - def forward(self, x, sigma, uncond, cond, cond_scale, s_min_uncond, image_cond): - if state.interrupted or state.skipped: - raise sd_samplers_common.InterruptedException - - # at self.image_cfg_scale == 1.0 produced results for edit model are the same as with normal sampling, - # so is_edit_model is set to False to support AND composition. - is_edit_model = shared.sd_model.cond_stage_key == "edit" and self.image_cfg_scale is not None and self.image_cfg_scale != 1.0 - - conds_list, tensor = prompt_parser.reconstruct_multicond_batch(cond, self.step) - uncond = prompt_parser.reconstruct_cond_batch(uncond, self.step) - - assert not is_edit_model or all(len(conds) == 1 for conds in conds_list), "AND is not supported for InstructPix2Pix checkpoint (unless using Image CFG scale = 1.0)" - - batch_size = len(conds_list) - repeats = [len(conds_list[i]) for i in range(batch_size)] - - if shared.sd_model.model.conditioning_key == "crossattn-adm": - image_uncond = torch.zeros_like(image_cond) - make_condition_dict = lambda c_crossattn, c_adm: {"c_crossattn": [c_crossattn], "c_adm": c_adm} - else: - image_uncond = image_cond - if isinstance(uncond, dict): - make_condition_dict = lambda c_crossattn, c_concat: {**c_crossattn, "c_concat": [c_concat]} - else: - make_condition_dict = lambda c_crossattn, c_concat: {"c_crossattn": [c_crossattn], "c_concat": [c_concat]} - - if not is_edit_model: - x_in = torch.cat([torch.stack([x[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [x]) - sigma_in = torch.cat([torch.stack([sigma[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [sigma]) - image_cond_in = torch.cat([torch.stack([image_cond[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [image_uncond]) - else: - x_in = torch.cat([torch.stack([x[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [x] + [x]) - sigma_in = torch.cat([torch.stack([sigma[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [sigma] + [sigma]) - image_cond_in = torch.cat([torch.stack([image_cond[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [image_uncond] + [torch.zeros_like(self.init_latent)]) - - denoiser_params = CFGDenoiserParams(x_in, image_cond_in, sigma_in, state.sampling_step, state.sampling_steps, tensor, uncond) - cfg_denoiser_callback(denoiser_params) - x_in = denoiser_params.x - image_cond_in = denoiser_params.image_cond - sigma_in = denoiser_params.sigma - tensor = denoiser_params.text_cond - uncond = denoiser_params.text_uncond - skip_uncond = False - - # alternating uncond allows for higher thresholds without the quality loss normally expected from raising it - if self.step % 2 and s_min_uncond > 0 and sigma[0] < s_min_uncond and not is_edit_model: - skip_uncond = True - x_in = x_in[:-batch_size] - sigma_in = sigma_in[:-batch_size] - - self.padded_cond_uncond = False - if shared.opts.pad_cond_uncond and tensor.shape[1] != uncond.shape[1]: - empty = shared.sd_model.cond_stage_model_empty_prompt - num_repeats = (tensor.shape[1] - uncond.shape[1]) // empty.shape[1] - - if num_repeats < 0: - tensor = pad_cond(tensor, -num_repeats, empty) - self.padded_cond_uncond = True - elif num_repeats > 0: - uncond = pad_cond(uncond, num_repeats, empty) - self.padded_cond_uncond = True - - if tensor.shape[1] == uncond.shape[1] or skip_uncond: - if is_edit_model: - cond_in = catenate_conds([tensor, uncond, uncond]) - elif skip_uncond: - cond_in = tensor - else: - cond_in = catenate_conds([tensor, uncond]) - - if shared.batch_cond_uncond: - x_out = self.inner_model(x_in, sigma_in, cond=make_condition_dict(cond_in, image_cond_in)) - else: - x_out = torch.zeros_like(x_in) - for batch_offset in range(0, x_out.shape[0], batch_size): - a = batch_offset - b = a + batch_size - x_out[a:b] = self.inner_model(x_in[a:b], sigma_in[a:b], cond=make_condition_dict(subscript_cond(cond_in, a, b), image_cond_in[a:b])) - else: - x_out = torch.zeros_like(x_in) - batch_size = batch_size*2 if shared.batch_cond_uncond else batch_size - for batch_offset in range(0, tensor.shape[0], batch_size): - a = batch_offset - b = min(a + batch_size, tensor.shape[0]) - - if not is_edit_model: - c_crossattn = subscript_cond(tensor, a, b) - else: - c_crossattn = torch.cat([tensor[a:b]], uncond) - - x_out[a:b] = self.inner_model(x_in[a:b], sigma_in[a:b], cond=make_condition_dict(c_crossattn, image_cond_in[a:b])) - - if not skip_uncond: - x_out[-uncond.shape[0]:] = self.inner_model(x_in[-uncond.shape[0]:], sigma_in[-uncond.shape[0]:], cond=make_condition_dict(uncond, image_cond_in[-uncond.shape[0]:])) - - denoised_image_indexes = [x[0][0] for x in conds_list] - if skip_uncond: - fake_uncond = torch.cat([x_out[i:i+1] for i in denoised_image_indexes]) - x_out = torch.cat([x_out, fake_uncond]) # we skipped uncond denoising, so we put cond-denoised image to where the uncond-denoised image should be - - denoised_params = CFGDenoisedParams(x_out, state.sampling_step, state.sampling_steps, self.inner_model) - cfg_denoised_callback(denoised_params) - - devices.test_for_nans(x_out, "unet") - - if opts.live_preview_content == "Prompt": - sd_samplers_common.store_latent(torch.cat([x_out[i:i+1] for i in denoised_image_indexes])) - elif opts.live_preview_content == "Negative prompt": - sd_samplers_common.store_latent(x_out[-uncond.shape[0]:]) - - if is_edit_model: - denoised = self.combine_denoised_for_edit_model(x_out, cond_scale) - elif skip_uncond: - denoised = self.combine_denoised(x_out, conds_list, uncond, 1.0) - else: - denoised = self.combine_denoised(x_out, conds_list, uncond, cond_scale) - - if self.mask is not None: - denoised = self.init_latent * self.mask + self.nmask * denoised - - after_cfg_callback_params = AfterCFGCallbackParams(denoised, state.sampling_step, state.sampling_steps) - cfg_after_cfg_callback(after_cfg_callback_params) - denoised = after_cfg_callback_params.x - - self.step += 1 - return denoised - - -class TorchHijack: - def __init__(self, sampler_noises): - # Using a deque to efficiently receive the sampler_noises in the same order as the previous index-based - # implementation. - self.sampler_noises = deque(sampler_noises) - - def __getattr__(self, item): - if item == 'randn_like': - return self.randn_like - - if hasattr(torch, item): - return getattr(torch, item) - - raise AttributeError(f"'{type(self).__name__}' object has no attribute '{item}'") - - def randn_like(self, x): - if self.sampler_noises: - noise = self.sampler_noises.popleft() - if noise.shape == x.shape: - return noise - - if opts.randn_source == "CPU" or x.device.type == 'mps': - return torch.randn_like(x, device=devices.cpu).to(x.device) - else: - return torch.randn_like(x) - - -class KDiffusionSampler: - def __init__(self, funcname, sd_model): - denoiser = k_diffusion.external.CompVisVDenoiser if sd_model.parameterization == "v" else k_diffusion.external.CompVisDenoiser - - self.model_wrap = denoiser(sd_model, quantize=shared.opts.enable_quantization) - self.funcname = funcname - self.func = getattr(k_diffusion.sampling, self.funcname) if funcname != "restart_sampler" else restart_sampler - self.extra_params = sampler_extra_params.get(funcname, []) - self.model_wrap_cfg = CFGDenoiser(self.model_wrap) - self.sampler_noises = None - self.stop_at = None - self.eta = None - self.config = None # set by the function calling the constructor - self.last_latent = None - self.s_min_uncond = None - - self.conditioning_key = sd_model.model.conditioning_key - - def callback_state(self, d): - step = d['i'] - latent = d["denoised"] - if opts.live_preview_content == "Combined": - sd_samplers_common.store_latent(latent) - self.last_latent = latent - - if self.stop_at is not None and step > self.stop_at: - raise sd_samplers_common.InterruptedException - - state.sampling_step = step - shared.total_tqdm.update() - - def launch_sampling(self, steps, func): - state.sampling_steps = steps - state.sampling_step = 0 - - try: - return func() - except RecursionError: - print( - 'Encountered RecursionError during sampling, returning last latent. ' - 'rho >5 with a polyexponential scheduler may cause this error. ' - 'You should try to use a smaller rho value instead.' - ) - return self.last_latent - except sd_samplers_common.InterruptedException: - return self.last_latent - - def number_of_needed_noises(self, p): - return p.steps - - def initialize(self, p): - self.model_wrap_cfg.mask = p.mask if hasattr(p, 'mask') else None - self.model_wrap_cfg.nmask = p.nmask if hasattr(p, 'nmask') else None - self.model_wrap_cfg.step = 0 - self.model_wrap_cfg.image_cfg_scale = getattr(p, 'image_cfg_scale', None) - self.eta = p.eta if p.eta is not None else opts.eta_ancestral - self.s_min_uncond = getattr(p, 's_min_uncond', 0.0) - - k_diffusion.sampling.torch = TorchHijack(self.sampler_noises if self.sampler_noises is not None else []) - - extra_params_kwargs = {} - for param_name in self.extra_params: - if hasattr(p, param_name) and param_name in inspect.signature(self.func).parameters: - extra_params_kwargs[param_name] = getattr(p, param_name) - - if 'eta' in inspect.signature(self.func).parameters: - if self.eta != 1.0: - p.extra_generation_params["Eta"] = self.eta - - extra_params_kwargs['eta'] = self.eta - - return extra_params_kwargs - - def get_sigmas(self, p, steps): - discard_next_to_last_sigma = self.config is not None and self.config.options.get('discard_next_to_last_sigma', False) - if opts.always_discard_next_to_last_sigma and not discard_next_to_last_sigma: - discard_next_to_last_sigma = True - p.extra_generation_params["Discard penultimate sigma"] = True - - steps += 1 if discard_next_to_last_sigma else 0 - - if p.sampler_noise_scheduler_override: - sigmas = p.sampler_noise_scheduler_override(steps) - elif opts.k_sched_type != "Automatic": - m_sigma_min, m_sigma_max = (self.model_wrap.sigmas[0].item(), self.model_wrap.sigmas[-1].item()) - sigma_min, sigma_max = (0.1, 10) if opts.use_old_karras_scheduler_sigmas else (m_sigma_min, m_sigma_max) - sigmas_kwargs = { - 'sigma_min': sigma_min, - 'sigma_max': sigma_max, - } - - sigmas_func = k_diffusion_scheduler[opts.k_sched_type] - p.extra_generation_params["Schedule type"] = opts.k_sched_type - - if opts.sigma_min != m_sigma_min and opts.sigma_min != 0: - sigmas_kwargs['sigma_min'] = opts.sigma_min - p.extra_generation_params["Schedule min sigma"] = opts.sigma_min - if opts.sigma_max != m_sigma_max and opts.sigma_max != 0: - sigmas_kwargs['sigma_max'] = opts.sigma_max - p.extra_generation_params["Schedule max sigma"] = opts.sigma_max - - default_rho = 1. if opts.k_sched_type == "polyexponential" else 7. - - if opts.k_sched_type != 'exponential' and opts.rho != 0 and opts.rho != default_rho: - sigmas_kwargs['rho'] = opts.rho - p.extra_generation_params["Schedule rho"] = opts.rho - - sigmas = sigmas_func(n=steps, **sigmas_kwargs, device=shared.device) - elif self.config is not None and self.config.options.get('scheduler', None) == 'karras': - sigma_min, sigma_max = (0.1, 10) if opts.use_old_karras_scheduler_sigmas else (self.model_wrap.sigmas[0].item(), self.model_wrap.sigmas[-1].item()) - - sigmas = k_diffusion.sampling.get_sigmas_karras(n=steps, sigma_min=sigma_min, sigma_max=sigma_max, device=shared.device) - else: - sigmas = self.model_wrap.get_sigmas(steps) - - if discard_next_to_last_sigma: - sigmas = torch.cat([sigmas[:-2], sigmas[-1:]]) - - return sigmas - - def create_noise_sampler(self, x, sigmas, p): - """For DPM++ SDE: manually create noise sampler to enable deterministic results across different batch sizes""" - if shared.opts.no_dpmpp_sde_batch_determinism: - return None - - from k_diffusion.sampling import BrownianTreeNoiseSampler - sigma_min, sigma_max = sigmas[sigmas > 0].min(), sigmas.max() - current_iter_seeds = p.all_seeds[p.iteration * p.batch_size:(p.iteration + 1) * p.batch_size] - return BrownianTreeNoiseSampler(x, sigma_min, sigma_max, seed=current_iter_seeds) - - def sample_img2img(self, p, x, noise, conditioning, unconditional_conditioning, steps=None, image_conditioning=None): - steps, t_enc = sd_samplers_common.setup_img2img_steps(p, steps) - - sigmas = self.get_sigmas(p, steps) - - sigma_sched = sigmas[steps - t_enc - 1:] - xi = x + noise * sigma_sched[0] - - extra_params_kwargs = self.initialize(p) - parameters = inspect.signature(self.func).parameters - - if 'sigma_min' in parameters: - ## last sigma is zero which isn't allowed by DPM Fast & Adaptive so taking value before last - extra_params_kwargs['sigma_min'] = sigma_sched[-2] - if 'sigma_max' in parameters: - extra_params_kwargs['sigma_max'] = sigma_sched[0] - if 'n' in parameters: - extra_params_kwargs['n'] = len(sigma_sched) - 1 - if 'sigma_sched' in parameters: - extra_params_kwargs['sigma_sched'] = sigma_sched - if 'sigmas' in parameters: - extra_params_kwargs['sigmas'] = sigma_sched - - if self.config.options.get('brownian_noise', False): - noise_sampler = self.create_noise_sampler(x, sigmas, p) - extra_params_kwargs['noise_sampler'] = noise_sampler - - self.model_wrap_cfg.init_latent = x - self.last_latent = x - extra_args = { - 'cond': conditioning, - 'image_cond': image_conditioning, - 'uncond': unconditional_conditioning, - 'cond_scale': p.cfg_scale, - 's_min_uncond': self.s_min_uncond - } - - samples = self.launch_sampling(t_enc + 1, lambda: self.func(self.model_wrap_cfg, xi, extra_args=extra_args, disable=False, callback=self.callback_state, **extra_params_kwargs)) - - if self.model_wrap_cfg.padded_cond_uncond: - p.extra_generation_params["Pad conds"] = True - - return samples - - def sample(self, p, x, conditioning, unconditional_conditioning, steps=None, image_conditioning=None): - steps = steps or p.steps - - sigmas = self.get_sigmas(p, steps) - - x = x * sigmas[0] - - extra_params_kwargs = self.initialize(p) - parameters = inspect.signature(self.func).parameters - - if 'sigma_min' in parameters: - extra_params_kwargs['sigma_min'] = self.model_wrap.sigmas[0].item() - extra_params_kwargs['sigma_max'] = self.model_wrap.sigmas[-1].item() - if 'n' in parameters: - extra_params_kwargs['n'] = steps - else: - extra_params_kwargs['sigmas'] = sigmas - - if self.config.options.get('brownian_noise', False): - noise_sampler = self.create_noise_sampler(x, sigmas, p) - extra_params_kwargs['noise_sampler'] = noise_sampler - - self.last_latent = x - samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, x, extra_args={ - 'cond': conditioning, - 'image_cond': image_conditioning, - 'uncond': unconditional_conditioning, - 'cond_scale': p.cfg_scale, - 's_min_uncond': self.s_min_uncond - }, disable=False, callback=self.callback_state, **extra_params_kwargs)) - - if self.model_wrap_cfg.padded_cond_uncond: - p.extra_generation_params["Pad conds"] = True - - return samples - -- cgit v1.2.1 From aefe1325df60a925b3a75a2cb58bf74e8ca86df4 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sat, 29 Jul 2023 08:11:59 +0300 Subject: split the new sampler into a different file --- modules/sd_samplers_extra.py | 475 -------------------------------------- modules/sd_samplers_kdiffusion.py | 75 +----- 2 files changed, 4 insertions(+), 546 deletions(-) diff --git a/modules/sd_samplers_extra.py b/modules/sd_samplers_extra.py index a54673eb..a1b5dab3 100644 --- a/modules/sd_samplers_extra.py +++ b/modules/sd_samplers_extra.py @@ -1,38 +1,5 @@ -from collections import deque import torch -import inspect import k_diffusion.sampling -from modules import prompt_parser, devices, sd_samplers_common - -from modules.shared import opts, state -import modules.shared as shared -from modules.script_callbacks import CFGDenoiserParams, cfg_denoiser_callback -from modules.script_callbacks import CFGDenoisedParams, cfg_denoised_callback -from modules.script_callbacks import AfterCFGCallbackParams, cfg_after_cfg_callback - -samplers_k_diffusion = [ - ('Euler a', 'sample_euler_ancestral', ['k_euler_a', 'k_euler_ancestral'], {"uses_ensd": True}), - ('Euler', 'sample_euler', ['k_euler'], {}), - ('LMS', 'sample_lms', ['k_lms'], {}), - ('Heun', 'sample_heun', ['k_heun'], {"second_order": True}), - ('DPM2', 'sample_dpm_2', ['k_dpm_2'], {'discard_next_to_last_sigma': True}), - ('DPM2 a', 'sample_dpm_2_ancestral', ['k_dpm_2_a'], {'discard_next_to_last_sigma': True, "uses_ensd": True}), - ('DPM++ 2S a', 'sample_dpmpp_2s_ancestral', ['k_dpmpp_2s_a'], {"uses_ensd": True, "second_order": True}), - ('DPM++ 2M', 'sample_dpmpp_2m', ['k_dpmpp_2m'], {}), - ('DPM++ SDE', 'sample_dpmpp_sde', ['k_dpmpp_sde'], {"second_order": True, "brownian_noise": True}), - ('DPM++ 2M SDE', 'sample_dpmpp_2m_sde', ['k_dpmpp_2m_sde_ka'], {"brownian_noise": True}), - ('DPM fast', 'sample_dpm_fast', ['k_dpm_fast'], {"uses_ensd": True}), - ('DPM adaptive', 'sample_dpm_adaptive', ['k_dpm_ad'], {"uses_ensd": True}), - ('LMS Karras', 'sample_lms', ['k_lms_ka'], {'scheduler': 'karras'}), - ('DPM2 Karras', 'sample_dpm_2', ['k_dpm_2_ka'], {'scheduler': 'karras', 'discard_next_to_last_sigma': True, "uses_ensd": True, "second_order": True}), - ('DPM2 a Karras', 'sample_dpm_2_ancestral', ['k_dpm_2_a_ka'], {'scheduler': 'karras', 'discard_next_to_last_sigma': True, "uses_ensd": True, "second_order": True}), - ('DPM++ 2S a Karras', 'sample_dpmpp_2s_ancestral', ['k_dpmpp_2s_a_ka'], {'scheduler': 'karras', "uses_ensd": True, "second_order": True}), - ('DPM++ 2M Karras', 'sample_dpmpp_2m', ['k_dpmpp_2m_ka'], {'scheduler': 'karras'}), - ('DPM++ SDE Karras', 'sample_dpmpp_sde', ['k_dpmpp_sde_ka'], {'scheduler': 'karras', "second_order": True, "brownian_noise": True}), - ('DPM++ 2M SDE Karras', 'sample_dpmpp_2m_sde', ['k_dpmpp_2m_sde_ka'], {'scheduler': 'karras', "brownian_noise": True}), - ('Restart (new)', 'restart_sampler', ['restart'], {'scheduler': 'karras', "second_order": True}), -] - @torch.no_grad() def restart_sampler(model, x, sigmas, extra_args=None, callback=None, disable=None, s_noise=1., restart_list = None): @@ -101,445 +68,3 @@ def restart_sampler(model, x, sigmas, extra_args=None, callback=None, disable=No last_sigma = step_list[i][1] return x -samplers_data_k_diffusion = [ - sd_samplers_common.SamplerData(label, lambda model, funcname=funcname: KDiffusionSampler(funcname, model), aliases, options) - for label, funcname, aliases, options in samplers_k_diffusion - if (hasattr(k_diffusion.sampling, funcname) or funcname == 'restart_sampler') -] - -sampler_extra_params = { - 'sample_euler': ['s_churn', 's_tmin', 's_tmax', 's_noise'], - 'sample_heun': ['s_churn', 's_tmin', 's_tmax', 's_noise'], - 'sample_dpm_2': ['s_churn', 's_tmin', 's_tmax', 's_noise'], -} - -k_diffusion_samplers_map = {x.name: x for x in samplers_data_k_diffusion} -k_diffusion_scheduler = { - 'Automatic': None, - 'karras': k_diffusion.sampling.get_sigmas_karras, - 'exponential': k_diffusion.sampling.get_sigmas_exponential, - 'polyexponential': k_diffusion.sampling.get_sigmas_polyexponential -} - - -def catenate_conds(conds): - if not isinstance(conds[0], dict): - return torch.cat(conds) - - return {key: torch.cat([x[key] for x in conds]) for key in conds[0].keys()} - - -def subscript_cond(cond, a, b): - if not isinstance(cond, dict): - return cond[a:b] - - return {key: vec[a:b] for key, vec in cond.items()} - - -def pad_cond(tensor, repeats, empty): - if not isinstance(tensor, dict): - return torch.cat([tensor, empty.repeat((tensor.shape[0], repeats, 1))], axis=1) - - tensor['crossattn'] = pad_cond(tensor['crossattn'], repeats, empty) - return tensor - - -class CFGDenoiser(torch.nn.Module): - """ - Classifier free guidance denoiser. A wrapper for stable diffusion model (specifically for unet) - that can take a noisy picture and produce a noise-free picture using two guidances (prompts) - instead of one. Originally, the second prompt is just an empty string, but we use non-empty - negative prompt. - """ - - def __init__(self, model): - super().__init__() - self.inner_model = model - self.mask = None - self.nmask = None - self.init_latent = None - self.step = 0 - self.image_cfg_scale = None - self.padded_cond_uncond = False - - def combine_denoised(self, x_out, conds_list, uncond, cond_scale): - denoised_uncond = x_out[-uncond.shape[0]:] - denoised = torch.clone(denoised_uncond) - - for i, conds in enumerate(conds_list): - for cond_index, weight in conds: - denoised[i] += (x_out[cond_index] - denoised_uncond[i]) * (weight * cond_scale) - - return denoised - - def combine_denoised_for_edit_model(self, x_out, cond_scale): - out_cond, out_img_cond, out_uncond = x_out.chunk(3) - denoised = out_uncond + cond_scale * (out_cond - out_img_cond) + self.image_cfg_scale * (out_img_cond - out_uncond) - - return denoised - - def forward(self, x, sigma, uncond, cond, cond_scale, s_min_uncond, image_cond): - if state.interrupted or state.skipped: - raise sd_samplers_common.InterruptedException - - # at self.image_cfg_scale == 1.0 produced results for edit model are the same as with normal sampling, - # so is_edit_model is set to False to support AND composition. - is_edit_model = shared.sd_model.cond_stage_key == "edit" and self.image_cfg_scale is not None and self.image_cfg_scale != 1.0 - - conds_list, tensor = prompt_parser.reconstruct_multicond_batch(cond, self.step) - uncond = prompt_parser.reconstruct_cond_batch(uncond, self.step) - - assert not is_edit_model or all(len(conds) == 1 for conds in conds_list), "AND is not supported for InstructPix2Pix checkpoint (unless using Image CFG scale = 1.0)" - - batch_size = len(conds_list) - repeats = [len(conds_list[i]) for i in range(batch_size)] - - if shared.sd_model.model.conditioning_key == "crossattn-adm": - image_uncond = torch.zeros_like(image_cond) - make_condition_dict = lambda c_crossattn, c_adm: {"c_crossattn": [c_crossattn], "c_adm": c_adm} - else: - image_uncond = image_cond - if isinstance(uncond, dict): - make_condition_dict = lambda c_crossattn, c_concat: {**c_crossattn, "c_concat": [c_concat]} - else: - make_condition_dict = lambda c_crossattn, c_concat: {"c_crossattn": [c_crossattn], "c_concat": [c_concat]} - - if not is_edit_model: - x_in = torch.cat([torch.stack([x[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [x]) - sigma_in = torch.cat([torch.stack([sigma[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [sigma]) - image_cond_in = torch.cat([torch.stack([image_cond[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [image_uncond]) - else: - x_in = torch.cat([torch.stack([x[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [x] + [x]) - sigma_in = torch.cat([torch.stack([sigma[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [sigma] + [sigma]) - image_cond_in = torch.cat([torch.stack([image_cond[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [image_uncond] + [torch.zeros_like(self.init_latent)]) - - denoiser_params = CFGDenoiserParams(x_in, image_cond_in, sigma_in, state.sampling_step, state.sampling_steps, tensor, uncond) - cfg_denoiser_callback(denoiser_params) - x_in = denoiser_params.x - image_cond_in = denoiser_params.image_cond - sigma_in = denoiser_params.sigma - tensor = denoiser_params.text_cond - uncond = denoiser_params.text_uncond - skip_uncond = False - - # alternating uncond allows for higher thresholds without the quality loss normally expected from raising it - if self.step % 2 and s_min_uncond > 0 and sigma[0] < s_min_uncond and not is_edit_model: - skip_uncond = True - x_in = x_in[:-batch_size] - sigma_in = sigma_in[:-batch_size] - - self.padded_cond_uncond = False - if shared.opts.pad_cond_uncond and tensor.shape[1] != uncond.shape[1]: - empty = shared.sd_model.cond_stage_model_empty_prompt - num_repeats = (tensor.shape[1] - uncond.shape[1]) // empty.shape[1] - - if num_repeats < 0: - tensor = pad_cond(tensor, -num_repeats, empty) - self.padded_cond_uncond = True - elif num_repeats > 0: - uncond = pad_cond(uncond, num_repeats, empty) - self.padded_cond_uncond = True - - if tensor.shape[1] == uncond.shape[1] or skip_uncond: - if is_edit_model: - cond_in = catenate_conds([tensor, uncond, uncond]) - elif skip_uncond: - cond_in = tensor - else: - cond_in = catenate_conds([tensor, uncond]) - - if shared.batch_cond_uncond: - x_out = self.inner_model(x_in, sigma_in, cond=make_condition_dict(cond_in, image_cond_in)) - else: - x_out = torch.zeros_like(x_in) - for batch_offset in range(0, x_out.shape[0], batch_size): - a = batch_offset - b = a + batch_size - x_out[a:b] = self.inner_model(x_in[a:b], sigma_in[a:b], cond=make_condition_dict(subscript_cond(cond_in, a, b), image_cond_in[a:b])) - else: - x_out = torch.zeros_like(x_in) - batch_size = batch_size*2 if shared.batch_cond_uncond else batch_size - for batch_offset in range(0, tensor.shape[0], batch_size): - a = batch_offset - b = min(a + batch_size, tensor.shape[0]) - - if not is_edit_model: - c_crossattn = subscript_cond(tensor, a, b) - else: - c_crossattn = torch.cat([tensor[a:b]], uncond) - - x_out[a:b] = self.inner_model(x_in[a:b], sigma_in[a:b], cond=make_condition_dict(c_crossattn, image_cond_in[a:b])) - - if not skip_uncond: - x_out[-uncond.shape[0]:] = self.inner_model(x_in[-uncond.shape[0]:], sigma_in[-uncond.shape[0]:], cond=make_condition_dict(uncond, image_cond_in[-uncond.shape[0]:])) - - denoised_image_indexes = [x[0][0] for x in conds_list] - if skip_uncond: - fake_uncond = torch.cat([x_out[i:i+1] for i in denoised_image_indexes]) - x_out = torch.cat([x_out, fake_uncond]) # we skipped uncond denoising, so we put cond-denoised image to where the uncond-denoised image should be - - denoised_params = CFGDenoisedParams(x_out, state.sampling_step, state.sampling_steps, self.inner_model) - cfg_denoised_callback(denoised_params) - - devices.test_for_nans(x_out, "unet") - - if opts.live_preview_content == "Prompt": - sd_samplers_common.store_latent(torch.cat([x_out[i:i+1] for i in denoised_image_indexes])) - elif opts.live_preview_content == "Negative prompt": - sd_samplers_common.store_latent(x_out[-uncond.shape[0]:]) - - if is_edit_model: - denoised = self.combine_denoised_for_edit_model(x_out, cond_scale) - elif skip_uncond: - denoised = self.combine_denoised(x_out, conds_list, uncond, 1.0) - else: - denoised = self.combine_denoised(x_out, conds_list, uncond, cond_scale) - - if self.mask is not None: - denoised = self.init_latent * self.mask + self.nmask * denoised - - after_cfg_callback_params = AfterCFGCallbackParams(denoised, state.sampling_step, state.sampling_steps) - cfg_after_cfg_callback(after_cfg_callback_params) - denoised = after_cfg_callback_params.x - - self.step += 1 - return denoised - - -class TorchHijack: - def __init__(self, sampler_noises): - # Using a deque to efficiently receive the sampler_noises in the same order as the previous index-based - # implementation. - self.sampler_noises = deque(sampler_noises) - - def __getattr__(self, item): - if item == 'randn_like': - return self.randn_like - - if hasattr(torch, item): - return getattr(torch, item) - - raise AttributeError(f"'{type(self).__name__}' object has no attribute '{item}'") - - def randn_like(self, x): - if self.sampler_noises: - noise = self.sampler_noises.popleft() - if noise.shape == x.shape: - return noise - - if opts.randn_source == "CPU" or x.device.type == 'mps': - return torch.randn_like(x, device=devices.cpu).to(x.device) - else: - return torch.randn_like(x) - - -class KDiffusionSampler: - def __init__(self, funcname, sd_model): - denoiser = k_diffusion.external.CompVisVDenoiser if sd_model.parameterization == "v" else k_diffusion.external.CompVisDenoiser - - self.model_wrap = denoiser(sd_model, quantize=shared.opts.enable_quantization) - self.funcname = funcname - self.func = getattr(k_diffusion.sampling, self.funcname) if funcname != "restart_sampler" else restart_sampler - self.extra_params = sampler_extra_params.get(funcname, []) - self.model_wrap_cfg = CFGDenoiser(self.model_wrap) - self.sampler_noises = None - self.stop_at = None - self.eta = None - self.config = None # set by the function calling the constructor - self.last_latent = None - self.s_min_uncond = None - - self.conditioning_key = sd_model.model.conditioning_key - - def callback_state(self, d): - step = d['i'] - latent = d["denoised"] - if opts.live_preview_content == "Combined": - sd_samplers_common.store_latent(latent) - self.last_latent = latent - - if self.stop_at is not None and step > self.stop_at: - raise sd_samplers_common.InterruptedException - - state.sampling_step = step - shared.total_tqdm.update() - - def launch_sampling(self, steps, func): - state.sampling_steps = steps - state.sampling_step = 0 - - try: - return func() - except RecursionError: - print( - 'Encountered RecursionError during sampling, returning last latent. ' - 'rho >5 with a polyexponential scheduler may cause this error. ' - 'You should try to use a smaller rho value instead.' - ) - return self.last_latent - except sd_samplers_common.InterruptedException: - return self.last_latent - - def number_of_needed_noises(self, p): - return p.steps - - def initialize(self, p): - self.model_wrap_cfg.mask = p.mask if hasattr(p, 'mask') else None - self.model_wrap_cfg.nmask = p.nmask if hasattr(p, 'nmask') else None - self.model_wrap_cfg.step = 0 - self.model_wrap_cfg.image_cfg_scale = getattr(p, 'image_cfg_scale', None) - self.eta = p.eta if p.eta is not None else opts.eta_ancestral - self.s_min_uncond = getattr(p, 's_min_uncond', 0.0) - - k_diffusion.sampling.torch = TorchHijack(self.sampler_noises if self.sampler_noises is not None else []) - - extra_params_kwargs = {} - for param_name in self.extra_params: - if hasattr(p, param_name) and param_name in inspect.signature(self.func).parameters: - extra_params_kwargs[param_name] = getattr(p, param_name) - - if 'eta' in inspect.signature(self.func).parameters: - if self.eta != 1.0: - p.extra_generation_params["Eta"] = self.eta - - extra_params_kwargs['eta'] = self.eta - - return extra_params_kwargs - - def get_sigmas(self, p, steps): - discard_next_to_last_sigma = self.config is not None and self.config.options.get('discard_next_to_last_sigma', False) - if opts.always_discard_next_to_last_sigma and not discard_next_to_last_sigma: - discard_next_to_last_sigma = True - p.extra_generation_params["Discard penultimate sigma"] = True - - steps += 1 if discard_next_to_last_sigma else 0 - - if p.sampler_noise_scheduler_override: - sigmas = p.sampler_noise_scheduler_override(steps) - elif opts.k_sched_type != "Automatic": - m_sigma_min, m_sigma_max = (self.model_wrap.sigmas[0].item(), self.model_wrap.sigmas[-1].item()) - sigma_min, sigma_max = (0.1, 10) if opts.use_old_karras_scheduler_sigmas else (m_sigma_min, m_sigma_max) - sigmas_kwargs = { - 'sigma_min': sigma_min, - 'sigma_max': sigma_max, - } - - sigmas_func = k_diffusion_scheduler[opts.k_sched_type] - p.extra_generation_params["Schedule type"] = opts.k_sched_type - - if opts.sigma_min != m_sigma_min and opts.sigma_min != 0: - sigmas_kwargs['sigma_min'] = opts.sigma_min - p.extra_generation_params["Schedule min sigma"] = opts.sigma_min - if opts.sigma_max != m_sigma_max and opts.sigma_max != 0: - sigmas_kwargs['sigma_max'] = opts.sigma_max - p.extra_generation_params["Schedule max sigma"] = opts.sigma_max - - default_rho = 1. if opts.k_sched_type == "polyexponential" else 7. - - if opts.k_sched_type != 'exponential' and opts.rho != 0 and opts.rho != default_rho: - sigmas_kwargs['rho'] = opts.rho - p.extra_generation_params["Schedule rho"] = opts.rho - - sigmas = sigmas_func(n=steps, **sigmas_kwargs, device=shared.device) - elif self.config is not None and self.config.options.get('scheduler', None) == 'karras': - sigma_min, sigma_max = (0.1, 10) if opts.use_old_karras_scheduler_sigmas else (self.model_wrap.sigmas[0].item(), self.model_wrap.sigmas[-1].item()) - - sigmas = k_diffusion.sampling.get_sigmas_karras(n=steps, sigma_min=sigma_min, sigma_max=sigma_max, device=shared.device) - else: - sigmas = self.model_wrap.get_sigmas(steps) - - if discard_next_to_last_sigma: - sigmas = torch.cat([sigmas[:-2], sigmas[-1:]]) - - return sigmas - - def create_noise_sampler(self, x, sigmas, p): - """For DPM++ SDE: manually create noise sampler to enable deterministic results across different batch sizes""" - if shared.opts.no_dpmpp_sde_batch_determinism: - return None - - from k_diffusion.sampling import BrownianTreeNoiseSampler - sigma_min, sigma_max = sigmas[sigmas > 0].min(), sigmas.max() - current_iter_seeds = p.all_seeds[p.iteration * p.batch_size:(p.iteration + 1) * p.batch_size] - return BrownianTreeNoiseSampler(x, sigma_min, sigma_max, seed=current_iter_seeds) - - def sample_img2img(self, p, x, noise, conditioning, unconditional_conditioning, steps=None, image_conditioning=None): - steps, t_enc = sd_samplers_common.setup_img2img_steps(p, steps) - - sigmas = self.get_sigmas(p, steps) - - sigma_sched = sigmas[steps - t_enc - 1:] - xi = x + noise * sigma_sched[0] - - extra_params_kwargs = self.initialize(p) - parameters = inspect.signature(self.func).parameters - - if 'sigma_min' in parameters: - ## last sigma is zero which isn't allowed by DPM Fast & Adaptive so taking value before last - extra_params_kwargs['sigma_min'] = sigma_sched[-2] - if 'sigma_max' in parameters: - extra_params_kwargs['sigma_max'] = sigma_sched[0] - if 'n' in parameters: - extra_params_kwargs['n'] = len(sigma_sched) - 1 - if 'sigma_sched' in parameters: - extra_params_kwargs['sigma_sched'] = sigma_sched - if 'sigmas' in parameters: - extra_params_kwargs['sigmas'] = sigma_sched - - if self.config.options.get('brownian_noise', False): - noise_sampler = self.create_noise_sampler(x, sigmas, p) - extra_params_kwargs['noise_sampler'] = noise_sampler - - self.model_wrap_cfg.init_latent = x - self.last_latent = x - extra_args = { - 'cond': conditioning, - 'image_cond': image_conditioning, - 'uncond': unconditional_conditioning, - 'cond_scale': p.cfg_scale, - 's_min_uncond': self.s_min_uncond - } - - samples = self.launch_sampling(t_enc + 1, lambda: self.func(self.model_wrap_cfg, xi, extra_args=extra_args, disable=False, callback=self.callback_state, **extra_params_kwargs)) - - if self.model_wrap_cfg.padded_cond_uncond: - p.extra_generation_params["Pad conds"] = True - - return samples - - def sample(self, p, x, conditioning, unconditional_conditioning, steps=None, image_conditioning=None): - steps = steps or p.steps - - sigmas = self.get_sigmas(p, steps) - - x = x * sigmas[0] - - extra_params_kwargs = self.initialize(p) - parameters = inspect.signature(self.func).parameters - - if 'sigma_min' in parameters: - extra_params_kwargs['sigma_min'] = self.model_wrap.sigmas[0].item() - extra_params_kwargs['sigma_max'] = self.model_wrap.sigmas[-1].item() - if 'n' in parameters: - extra_params_kwargs['n'] = steps - else: - extra_params_kwargs['sigmas'] = sigmas - - if self.config.options.get('brownian_noise', False): - noise_sampler = self.create_noise_sampler(x, sigmas, p) - extra_params_kwargs['noise_sampler'] = noise_sampler - - self.last_latent = x - samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, x, extra_args={ - 'cond': conditioning, - 'image_cond': image_conditioning, - 'uncond': unconditional_conditioning, - 'cond_scale': p.cfg_scale, - 's_min_uncond': self.s_min_uncond - }, disable=False, callback=self.callback_state, **extra_params_kwargs)) - - if self.model_wrap_cfg.padded_cond_uncond: - p.extra_generation_params["Pad conds"] = True - - return samples - diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py index a54673eb..e0da3425 100644 --- a/modules/sd_samplers_kdiffusion.py +++ b/modules/sd_samplers_kdiffusion.py @@ -2,7 +2,7 @@ from collections import deque import torch import inspect import k_diffusion.sampling -from modules import prompt_parser, devices, sd_samplers_common +from modules import prompt_parser, devices, sd_samplers_common, sd_samplers_extra from modules.shared import opts, state import modules.shared as shared @@ -30,81 +30,14 @@ samplers_k_diffusion = [ ('DPM++ 2M Karras', 'sample_dpmpp_2m', ['k_dpmpp_2m_ka'], {'scheduler': 'karras'}), ('DPM++ SDE Karras', 'sample_dpmpp_sde', ['k_dpmpp_sde_ka'], {'scheduler': 'karras', "second_order": True, "brownian_noise": True}), ('DPM++ 2M SDE Karras', 'sample_dpmpp_2m_sde', ['k_dpmpp_2m_sde_ka'], {'scheduler': 'karras', "brownian_noise": True}), - ('Restart (new)', 'restart_sampler', ['restart'], {'scheduler': 'karras', "second_order": True}), + ('Restart', sd_samplers_extra.restart_sampler, ['restart'], {'scheduler': 'karras'}), ] -@torch.no_grad() -def restart_sampler(model, x, sigmas, extra_args=None, callback=None, disable=None, s_noise=1., restart_list = None): - """Implements restart sampling in Restart Sampling for Improving Generative Processes (2023)""" - '''Restart_list format: {min_sigma: [ restart_steps, restart_times, max_sigma]}''' - '''If restart_list is None: will choose restart_list automatically, otherwise will use the given restart_list''' - from tqdm.auto import trange - extra_args = {} if extra_args is None else extra_args - s_in = x.new_ones([x.shape[0]]) - step_id = 0 - from k_diffusion.sampling import to_d, get_sigmas_karras - def heun_step(x, old_sigma, new_sigma, second_order = True): - nonlocal step_id - denoised = model(x, old_sigma * s_in, **extra_args) - d = to_d(x, old_sigma, denoised) - if callback is not None: - callback({'x': x, 'i': step_id, 'sigma': new_sigma, 'sigma_hat': old_sigma, 'denoised': denoised}) - dt = new_sigma - old_sigma - if new_sigma == 0 or not second_order: - # Euler method - x = x + d * dt - else: - # Heun's method - x_2 = x + d * dt - denoised_2 = model(x_2, new_sigma * s_in, **extra_args) - d_2 = to_d(x_2, new_sigma, denoised_2) - d_prime = (d + d_2) / 2 - x = x + d_prime * dt - step_id += 1 - return x - steps = sigmas.shape[0] - 1 - if restart_list is None: - if steps >= 20: - restart_steps = 9 - restart_times = 1 - if steps >= 36: - restart_steps = steps // 4 - restart_times = 2 - sigmas = get_sigmas_karras(steps - restart_steps * restart_times, sigmas[-2].item(), sigmas[0].item(), device=sigmas.device) - restart_list = {0.1: [restart_steps + 1, restart_times, 2]} - else: - restart_list = dict() - temp_list = dict() - for key, value in restart_list.items(): - temp_list[int(torch.argmin(abs(sigmas - key), dim=0))] = value - restart_list = temp_list - step_list = [] - for i in range(len(sigmas) - 1): - step_list.append((sigmas[i], sigmas[i + 1])) - if i + 1 in restart_list: - restart_steps, restart_times, restart_max = restart_list[i + 1] - min_idx = i + 1 - max_idx = int(torch.argmin(abs(sigmas - restart_max), dim=0)) - if max_idx < min_idx: - sigma_restart = get_sigmas_karras(restart_steps, sigmas[min_idx].item(), sigmas[max_idx].item(), device=sigmas.device)[:-1] - while restart_times > 0: - restart_times -= 1 - step_list.extend([(old_sigma, new_sigma) for (old_sigma, new_sigma) in zip(sigma_restart[:-1], sigma_restart[1:])]) - last_sigma = None - for i in trange(len(step_list), disable=disable): - if last_sigma is None: - last_sigma = step_list[i][0] - elif last_sigma < step_list[i][0]: - x = x + k_diffusion.sampling.torch.randn_like(x) * s_noise * (step_list[i][0] ** 2 - last_sigma ** 2) ** 0.5 - x = heun_step(x, step_list[i][0], step_list[i][1]) - last_sigma = step_list[i][1] - return x - samplers_data_k_diffusion = [ sd_samplers_common.SamplerData(label, lambda model, funcname=funcname: KDiffusionSampler(funcname, model), aliases, options) for label, funcname, aliases, options in samplers_k_diffusion - if (hasattr(k_diffusion.sampling, funcname) or funcname == 'restart_sampler') + if callable(funcname) or hasattr(k_diffusion.sampling, funcname) ] sampler_extra_params = { @@ -339,7 +272,7 @@ class KDiffusionSampler: self.model_wrap = denoiser(sd_model, quantize=shared.opts.enable_quantization) self.funcname = funcname - self.func = getattr(k_diffusion.sampling, self.funcname) if funcname != "restart_sampler" else restart_sampler + self.func = funcname if callable(funcname) else getattr(k_diffusion.sampling, self.funcname) self.extra_params = sampler_extra_params.get(funcname, []) self.model_wrap_cfg = CFGDenoiser(self.model_wrap) self.sampler_noises = None -- cgit v1.2.1 From 79d6e9cd325353b5c6a02f8374494f781760d211 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sat, 29 Jul 2023 08:38:00 +0300 Subject: some stylistic changes for the sampler code --- modules/sd_samplers_extra.py | 40 ++++++++++++++++++++++------------------ 1 file changed, 22 insertions(+), 18 deletions(-) diff --git a/modules/sd_samplers_extra.py b/modules/sd_samplers_extra.py index a1b5dab3..1b981ca8 100644 --- a/modules/sd_samplers_extra.py +++ b/modules/sd_samplers_extra.py @@ -1,17 +1,20 @@ import torch +import tqdm import k_diffusion.sampling + @torch.no_grad() -def restart_sampler(model, x, sigmas, extra_args=None, callback=None, disable=None, s_noise=1., restart_list = None): - """Implements restart sampling in Restart Sampling for Improving Generative Processes (2023)""" - '''Restart_list format: {min_sigma: [ restart_steps, restart_times, max_sigma]}''' - '''If restart_list is None: will choose restart_list automatically, otherwise will use the given restart_list''' - from tqdm.auto import trange +def restart_sampler(model, x, sigmas, extra_args=None, callback=None, disable=None, s_noise=1., restart_list=None): + """Implements restart sampling in Restart Sampling for Improving Generative Processes (2023) + Restart_list format: {min_sigma: [ restart_steps, restart_times, max_sigma]} + If restart_list is None: will choose restart_list automatically, otherwise will use the given restart_list + """ extra_args = {} if extra_args is None else extra_args s_in = x.new_ones([x.shape[0]]) step_id = 0 from k_diffusion.sampling import to_d, get_sigmas_karras - def heun_step(x, old_sigma, new_sigma, second_order = True): + + def heun_step(x, old_sigma, new_sigma, second_order=True): nonlocal step_id denoised = model(x, old_sigma * s_in, **extra_args) d = to_d(x, old_sigma, denoised) @@ -30,6 +33,7 @@ def restart_sampler(model, x, sigmas, extra_args=None, callback=None, disable=No x = x + d_prime * dt step_id += 1 return x + steps = sigmas.shape[0] - 1 if restart_list is None: if steps >= 20: @@ -41,11 +45,10 @@ def restart_sampler(model, x, sigmas, extra_args=None, callback=None, disable=No sigmas = get_sigmas_karras(steps - restart_steps * restart_times, sigmas[-2].item(), sigmas[0].item(), device=sigmas.device) restart_list = {0.1: [restart_steps + 1, restart_times, 2]} else: - restart_list = dict() - temp_list = dict() - for key, value in restart_list.items(): - temp_list[int(torch.argmin(abs(sigmas - key), dim=0))] = value - restart_list = temp_list + restart_list = {} + + restart_list = {int(torch.argmin(abs(sigmas - key), dim=0)): value for key, value in restart_list.items()} + step_list = [] for i in range(len(sigmas) - 1): step_list.append((sigmas[i], sigmas[i + 1])) @@ -58,13 +61,14 @@ def restart_sampler(model, x, sigmas, extra_args=None, callback=None, disable=No while restart_times > 0: restart_times -= 1 step_list.extend([(old_sigma, new_sigma) for (old_sigma, new_sigma) in zip(sigma_restart[:-1], sigma_restart[1:])]) + last_sigma = None - for i in trange(len(step_list), disable=disable): + for old_sigma, new_sigma in tqdm.tqdm(step_list, disable=disable): if last_sigma is None: - last_sigma = step_list[i][0] - elif last_sigma < step_list[i][0]: - x = x + k_diffusion.sampling.torch.randn_like(x) * s_noise * (step_list[i][0] ** 2 - last_sigma ** 2) ** 0.5 - x = heun_step(x, step_list[i][0], step_list[i][1]) - last_sigma = step_list[i][1] - return x + last_sigma = old_sigma + elif last_sigma < old_sigma: + x = x + k_diffusion.sampling.torch.randn_like(x) * s_noise * (old_sigma ** 2 - last_sigma ** 2) ** 0.5 + x = heun_step(x, old_sigma, new_sigma) + last_sigma = new_sigma + return x -- cgit v1.2.1 From e18fc29bbfed09f2a33087b089a491074ec8e8af Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sat, 29 Jul 2023 08:40:43 +0300 Subject: put the entry for the sampler in the readme section in order of addition --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index edf7d7c6..2fd6e425 100644 --- a/README.md +++ b/README.md @@ -145,7 +145,6 @@ Licenses for borrowed code can be found in `Settings -> Licenses` screen, and al - Stable Diffusion - https://github.com/CompVis/stable-diffusion, https://github.com/CompVis/taming-transformers - k-diffusion - https://github.com/crowsonkb/k-diffusion.git -- Restart sampling - https://github.com/Newbeeer/diffusion_restart_sampling - GFPGAN - https://github.com/TencentARC/GFPGAN.git - CodeFormer - https://github.com/sczhou/CodeFormer - ESRGAN - https://github.com/xinntao/ESRGAN @@ -170,5 +169,6 @@ Licenses for borrowed code can be found in `Settings -> Licenses` screen, and al - UniPC sampler - Wenliang Zhao - https://github.com/wl-zhao/UniPC - TAESD - Ollin Boer Bohan - https://github.com/madebyollin/taesd - LyCORIS - KohakuBlueleaf +- Restart sampling - lambertae - https://github.com/Newbeeer/diffusion_restart_sampling - Initial Gradio script - posted on 4chan by an Anonymous user. Thank you Anonymous user. - (You) -- cgit v1.2.1 From 6f0abbb71a3f29d6df63fed82d5d5e196ca0d4de Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sat, 29 Jul 2023 15:15:06 +0300 Subject: textual inversion support for SDXL --- modules/sd_hijack.py | 8 +++++--- modules/sd_hijack_clip.py | 2 +- modules/sd_models_xl.py | 9 +++++++++ modules/textual_inversion/textual_inversion.py | 19 ++++++++++++++----- 4 files changed, 29 insertions(+), 9 deletions(-) diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py index c8fdd4f1..cfa5f0eb 100644 --- a/modules/sd_hijack.py +++ b/modules/sd_hijack.py @@ -197,7 +197,7 @@ class StableDiffusionModelHijack: conditioner.embedders[i] = sd_hijack_clip.FrozenCLIPEmbedderForSDXLWithCustomWords(embedder, self) text_cond_models.append(conditioner.embedders[i]) if typename == 'FrozenOpenCLIPEmbedder2': - embedder.model.token_embedding = EmbeddingsWithFixes(embedder.model.token_embedding, self) + embedder.model.token_embedding = EmbeddingsWithFixes(embedder.model.token_embedding, self, textual_inversion_key='clip_g') conditioner.embedders[i] = sd_hijack_open_clip.FrozenOpenCLIPEmbedder2WithCustomWords(embedder, self) text_cond_models.append(conditioner.embedders[i]) @@ -292,10 +292,11 @@ class StableDiffusionModelHijack: class EmbeddingsWithFixes(torch.nn.Module): - def __init__(self, wrapped, embeddings): + def __init__(self, wrapped, embeddings, textual_inversion_key='clip_l'): super().__init__() self.wrapped = wrapped self.embeddings = embeddings + self.textual_inversion_key = textual_inversion_key def forward(self, input_ids): batch_fixes = self.embeddings.fixes @@ -309,7 +310,8 @@ class EmbeddingsWithFixes(torch.nn.Module): vecs = [] for fixes, tensor in zip(batch_fixes, inputs_embeds): for offset, embedding in fixes: - emb = devices.cond_cast_unet(embedding.vec) + vec = embedding.vec[self.textual_inversion_key] if isinstance(embedding.vec, dict) else embedding.vec + emb = devices.cond_cast_unet(vec) emb_len = min(tensor.shape[0] - offset - 1, emb.shape[0]) tensor = torch.cat([tensor[0:offset + 1], emb[0:emb_len], tensor[offset + 1 + emb_len:]]) diff --git a/modules/sd_hijack_clip.py b/modules/sd_hijack_clip.py index 16a5500e..2f9d569b 100644 --- a/modules/sd_hijack_clip.py +++ b/modules/sd_hijack_clip.py @@ -161,7 +161,7 @@ class FrozenCLIPEmbedderWithCustomWordsBase(torch.nn.Module): position += 1 continue - emb_len = int(embedding.vec.shape[0]) + emb_len = int(embedding.vectors) if len(chunk.tokens) + emb_len > self.chunk_length: next_chunk() diff --git a/modules/sd_models_xl.py b/modules/sd_models_xl.py index 40559208..bc219508 100644 --- a/modules/sd_models_xl.py +++ b/modules/sd_models_xl.py @@ -56,6 +56,14 @@ def encode_embedding_init_text(self: sgm.modules.GeneralConditioner, init_text, return torch.cat(res, dim=1) +def tokenize(self: sgm.modules.GeneralConditioner, texts): + for embedder in [embedder for embedder in self.embedders if hasattr(embedder, 'tokenize')]: + return embedder.tokenize(texts) + + raise AssertionError('no tokenizer available') + + + def process_texts(self, texts): for embedder in [embedder for embedder in self.embedders if hasattr(embedder, 'process_texts')]: return embedder.process_texts(texts) @@ -68,6 +76,7 @@ def get_target_prompt_token_count(self, token_count): # those additions to GeneralConditioner make it possible to use it as model.cond_stage_model from SD1.5 in exist sgm.modules.GeneralConditioner.encode_embedding_init_text = encode_embedding_init_text +sgm.modules.GeneralConditioner.tokenize = tokenize sgm.modules.GeneralConditioner.process_texts = process_texts sgm.modules.GeneralConditioner.get_target_prompt_token_count = get_target_prompt_token_count diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index 6166c76f..4713bc2d 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -181,29 +181,38 @@ class EmbeddingDatabase: else: return + # textual inversion embeddings if 'string_to_param' in data: param_dict = data['string_to_param'] param_dict = getattr(param_dict, '_parameters', param_dict) # fix for torch 1.12.1 loading saved file from torch 1.11 assert len(param_dict) == 1, 'embedding file has multiple terms in it' emb = next(iter(param_dict.items()))[1] - # diffuser concepts - elif type(data) == dict and type(next(iter(data.values()))) == torch.Tensor: + vec = emb.detach().to(devices.device, dtype=torch.float32) + shape = vec.shape[-1] + vectors = vec.shape[0] + elif type(data) == dict and 'clip_g' in data and 'clip_l' in data: # SDXL embedding + vec = {k: v.detach().to(devices.device, dtype=torch.float32) for k, v in data.items()} + shape = data['clip_g'].shape[-1] + data['clip_l'].shape[-1] + vectors = data['clip_g'].shape[0] + elif type(data) == dict and type(next(iter(data.values()))) == torch.Tensor: # diffuser concepts assert len(data.keys()) == 1, 'embedding file has multiple terms in it' emb = next(iter(data.values())) if len(emb.shape) == 1: emb = emb.unsqueeze(0) + vec = emb.detach().to(devices.device, dtype=torch.float32) + shape = vec.shape[-1] + vectors = vec.shape[0] else: raise Exception(f"Couldn't identify {filename} as neither textual inversion embedding nor diffuser concept.") - vec = emb.detach().to(devices.device, dtype=torch.float32) embedding = Embedding(vec, name) embedding.step = data.get('step', None) embedding.sd_checkpoint = data.get('sd_checkpoint', None) embedding.sd_checkpoint_name = data.get('sd_checkpoint_name', None) - embedding.vectors = vec.shape[0] - embedding.shape = vec.shape[-1] + embedding.vectors = vectors + embedding.shape = shape embedding.filename = path embedding.set_hash(hashes.sha256(embedding.filename, "textual_inversion/" + name) or '') -- cgit v1.2.1 From b95a41ad724c8993aec4e752bd6d0c9c150be963 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Sun, 30 Jul 2023 00:01:53 +0900 Subject: rework img2img batch image save --- modules/img2img.py | 27 +++++++-------------------- 1 file changed, 7 insertions(+), 20 deletions(-) diff --git a/modules/img2img.py b/modules/img2img.py index 132cd100..fc8a81eb 100644 --- a/modules/img2img.py +++ b/modules/img2img.py @@ -32,11 +32,6 @@ def process_batch(p, input_dir, output_dir, inpaint_mask_dir, args, to_scale=Fal print(f"Will process {len(images)} images, creating {p.n_iter * p.batch_size} new images for each.") - save_normally = output_dir == '' - - p.do_not_save_grid = True - p.do_not_save_samples = not save_normally - state.job_count = len(images) * p.n_iter # extract "default" params to use in case getting png info fails @@ -111,21 +106,13 @@ def process_batch(p, input_dir, output_dir, inpaint_mask_dir, args, to_scale=Fal proc = modules.scripts.scripts_img2img.run(p, *args) if proc is None: - proc = process_images(p) - - for n, processed_image in enumerate(proc.images): - filename = image_path.stem - infotext = proc.infotext(p, n) - relpath = os.path.dirname(os.path.relpath(image, input_dir)) - - if n > 0: - filename += f"-{n}" - - if not save_normally: - os.makedirs(os.path.join(output_dir, relpath), exist_ok=True) - if processed_image.mode == 'RGBA': - processed_image = processed_image.convert("RGB") - save_image(processed_image, os.path.join(output_dir, relpath), None, extension=opts.samples_format, info=infotext, forced_filename=filename, save_to_dirs=False) + p.outpath_samples = output_dir + p.override_settings['save_to_dirs'] = False + if p.n_iter > 1 or p.batch_size > 1: + p.override_settings['samples_filename_pattern'] = f'{image_path.stem}-[generation_number]' + else: + p.override_settings['samples_filename_pattern'] = f'{image_path.stem}' + process_images(p) def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_styles, init_img, sketch, init_img_with_mask, inpaint_color_sketch, inpaint_color_sketch_orig, init_img_inpaint, init_mask_inpaint, steps: int, sampler_index: int, mask_blur: int, mask_alpha: float, inpainting_fill: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, image_cfg_scale: float, denoising_strength: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, selected_scale_tab: int, height: int, width: int, scale_by: float, resize_mode: int, inpaint_full_res: bool, inpaint_full_res_padding: int, inpainting_mask_invert: int, img2img_batch_input_dir: str, img2img_batch_output_dir: str, img2img_batch_inpaint_mask_dir: str, override_settings_texts, img2img_batch_use_png_info: bool, img2img_batch_png_info_props: list, img2img_batch_png_info_dir: str, request: gr.Request, *args): -- cgit v1.2.1 From 9857537053a13a8963b70b67757201a3e1f941f1 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Sun, 30 Jul 2023 00:06:25 +0900 Subject: lint --- modules/img2img.py | 1 - 1 file changed, 1 deletion(-) diff --git a/modules/img2img.py b/modules/img2img.py index fc8a81eb..fee6f8a5 100644 --- a/modules/img2img.py +++ b/modules/img2img.py @@ -10,7 +10,6 @@ from modules import sd_samplers, images as imgutil from modules.generation_parameters_copypaste import create_override_settings_dict, parse_generation_parameters from modules.processing import Processed, StableDiffusionProcessingImg2Img, process_images from modules.shared import opts, state -from modules.images import save_image import modules.shared as shared import modules.processing as processing from modules.ui import plaintext_to_html -- cgit v1.2.1 From 53ccdefc015d7a05e03b603d6c7356574c240026 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Sun, 30 Jul 2023 00:34:04 +0900 Subject: don't override default if output_dir is blank --- modules/img2img.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/modules/img2img.py b/modules/img2img.py index fee6f8a5..d41f34d0 100644 --- a/modules/img2img.py +++ b/modules/img2img.py @@ -105,12 +105,13 @@ def process_batch(p, input_dir, output_dir, inpaint_mask_dir, args, to_scale=Fal proc = modules.scripts.scripts_img2img.run(p, *args) if proc is None: - p.outpath_samples = output_dir - p.override_settings['save_to_dirs'] = False - if p.n_iter > 1 or p.batch_size > 1: - p.override_settings['samples_filename_pattern'] = f'{image_path.stem}-[generation_number]' - else: - p.override_settings['samples_filename_pattern'] = f'{image_path.stem}' + if output_dir: + p.outpath_samples = output_dir + p.override_settings['save_to_dirs'] = False + if p.n_iter > 1 or p.batch_size > 1: + p.override_settings['samples_filename_pattern'] = f'{image_path.stem}-[generation_number]' + else: + p.override_settings['samples_filename_pattern'] = f'{image_path.stem}' process_images(p) -- cgit v1.2.1 From fb44838176f50a07ff787d8004f5ad514b427ae8 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Sun, 30 Jul 2023 14:47:24 +0900 Subject: strip output_dir --- modules/img2img.py | 1 + 1 file changed, 1 insertion(+) diff --git a/modules/img2img.py b/modules/img2img.py index d41f34d0..68e415ef 100644 --- a/modules/img2img.py +++ b/modules/img2img.py @@ -17,6 +17,7 @@ import modules.scripts def process_batch(p, input_dir, output_dir, inpaint_mask_dir, args, to_scale=False, scale_by=1.0, use_png_info=False, png_info_props=None, png_info_dir=None): + output_dir = output_dir.strip() processing.fix_seed(p) images = list(shared.walk_files(input_dir, allowed_extensions=(".png", ".jpg", ".jpeg", ".webp", ".tif", ".tiff"))) -- cgit v1.2.1 From 8a40e30d08d4bd2fc9b3b8b5d697abfda7d31bc0 Mon Sep 17 00:00:00 2001 From: Robert Barron Date: Sun, 30 Jul 2023 01:22:00 -0700 Subject: add support for whitespace after the number in constructions like [foo:bar: 0.5 ] and (foo : 0.5 ) --- modules/prompt_parser.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/modules/prompt_parser.py b/modules/prompt_parser.py index 203ae1ac..8169a459 100644 --- a/modules/prompt_parser.py +++ b/modules/prompt_parser.py @@ -19,7 +19,7 @@ prompt: (emphasized | scheduled | alternate | plain | WHITESPACE)* !emphasized: "(" prompt ")" | "(" prompt ":" prompt ")" | "[" prompt "]" -scheduled: "[" [prompt ":"] prompt ":" [WHITESPACE] NUMBER "]" +scheduled: "[" [prompt ":"] prompt ":" [WHITESPACE] NUMBER [WHITESPACE] "]" alternate: "[" prompt ("|" prompt)+ "]" WHITESPACE: /\s+/ plain: /([^\\\[\]():|]|\\.)+/ @@ -60,11 +60,11 @@ def get_learned_conditioning_prompt_schedules(prompts, steps): class CollectSteps(lark.Visitor): def scheduled(self, tree): - tree.children[-1] = float(tree.children[-1]) - if tree.children[-1] < 1: - tree.children[-1] *= steps - tree.children[-1] = min(steps, int(tree.children[-1])) - res.append(tree.children[-1]) + tree.children[-2] = float(tree.children[-2]) + if tree.children[-2] < 1: + tree.children[-2] *= steps + tree.children[-2] = min(steps, int(tree.children[-2])) + res.append(tree.children[-2]) def alternate(self, tree): res.extend(range(1, steps+1)) @@ -75,7 +75,7 @@ def get_learned_conditioning_prompt_schedules(prompts, steps): def at_step(step, tree): class AtStep(lark.Transformer): def scheduled(self, args): - before, after, _, when = args + before, after, _, when, _ = args yield before or () if step <= when else after def alternate(self, args): yield next(args[(step - 1)%len(args)]) @@ -333,7 +333,7 @@ re_attention = re.compile(r""" \\| \(| \[| -:([+-]?[.\d]+)\)| +:\s*([+-]?[.\d]+)\s*\)| \)| ]| [^\\()\[\]:]+| -- cgit v1.2.1 From 085c903229af823bb9d7dcf37a0a24e4138c98aa Mon Sep 17 00:00:00 2001 From: Robert Barron Date: Sun, 30 Jul 2023 00:26:25 -0700 Subject: xyz_grid: in the legend, remove pathnames from model filenames --- scripts/xyz_grid.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/scripts/xyz_grid.py b/scripts/xyz_grid.py index 1010845e..d3c78a2f 100644 --- a/scripts/xyz_grid.py +++ b/scripts/xyz_grid.py @@ -3,6 +3,7 @@ from copy import copy from itertools import permutations, chain import random import csv +import os.path from io import StringIO from PIL import Image import numpy as np @@ -182,6 +183,8 @@ def do_nothing(p, x, xs): def format_nothing(p, opt, x): return "" +def format_remove_path(p, opt, x): + return os.path.basename(x) def str_permutations(x): """dummy function for specifying it in AxisOption's type when you want to get a list of permutations""" @@ -223,7 +226,7 @@ axis_options = [ AxisOption("Prompt order", str_permutations, apply_order, format_value=format_value_join_list), AxisOptionTxt2Img("Sampler", str, apply_sampler, format_value=format_value, confirm=confirm_samplers, choices=lambda: [x.name for x in sd_samplers.samplers]), AxisOptionImg2Img("Sampler", str, apply_sampler, format_value=format_value, confirm=confirm_samplers, choices=lambda: [x.name for x in sd_samplers.samplers_for_img2img]), - AxisOption("Checkpoint name", str, apply_checkpoint, format_value=format_value, confirm=confirm_checkpoints, cost=1.0, choices=lambda: sorted(sd_models.checkpoints_list, key=str.casefold)), + AxisOption("Checkpoint name", str, apply_checkpoint, format_value=format_remove_path, confirm=confirm_checkpoints, cost=1.0, choices=lambda: sorted(sd_models.checkpoints_list, key=str.casefold)), AxisOption("Negative Guidance minimum sigma", float, apply_field("s_min_uncond")), AxisOption("Sigma Churn", float, apply_field("s_churn")), AxisOption("Sigma min", float, apply_field("s_tmin")), -- cgit v1.2.1 From 3bca90b249d749ed5429f76e380d2ffa52fc0d41 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sun, 30 Jul 2023 13:48:27 +0300 Subject: hires fix checkpoint selection --- modules/generation_parameters_copypaste.py | 3 ++ modules/processing.py | 47 +++++++++++++++++++----------- modules/sd_models.py | 22 ++++++++------ modules/shared.py | 19 ++++++++---- modules/txt2img.py | 3 +- modules/ui.py | 8 ++++- 6 files changed, 68 insertions(+), 34 deletions(-) diff --git a/modules/generation_parameters_copypaste.py b/modules/generation_parameters_copypaste.py index a3448be9..4e286558 100644 --- a/modules/generation_parameters_copypaste.py +++ b/modules/generation_parameters_copypaste.py @@ -280,6 +280,9 @@ Steps: 20, Sampler: Euler a, CFG scale: 7, Seed: 965400086, Size: 512x512, Model if "Hires sampler" not in res: res["Hires sampler"] = "Use same sampler" + if "Hires checkpoint" not in res: + res["Hires checkpoint"] = "Use same checkpoint" + if "Hires prompt" not in res: res["Hires prompt"] = "" diff --git a/modules/processing.py b/modules/processing.py index b0992ee1..7026487a 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -935,7 +935,7 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): cached_hr_uc = [None, None] cached_hr_c = [None, None] - def __init__(self, enable_hr: bool = False, denoising_strength: float = 0.75, firstphase_width: int = 0, firstphase_height: int = 0, hr_scale: float = 2.0, hr_upscaler: str = None, hr_second_pass_steps: int = 0, hr_resize_x: int = 0, hr_resize_y: int = 0, hr_sampler_name: str = None, hr_prompt: str = '', hr_negative_prompt: str = '', **kwargs): + def __init__(self, enable_hr: bool = False, denoising_strength: float = 0.75, firstphase_width: int = 0, firstphase_height: int = 0, hr_scale: float = 2.0, hr_upscaler: str = None, hr_second_pass_steps: int = 0, hr_resize_x: int = 0, hr_resize_y: int = 0, hr_checkpoint_name: str = None, hr_sampler_name: str = None, hr_prompt: str = '', hr_negative_prompt: str = '', **kwargs): super().__init__(**kwargs) self.enable_hr = enable_hr self.denoising_strength = denoising_strength @@ -946,11 +946,14 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): self.hr_resize_y = hr_resize_y self.hr_upscale_to_x = hr_resize_x self.hr_upscale_to_y = hr_resize_y + self.hr_checkpoint_name = hr_checkpoint_name + self.hr_checkpoint_info = None self.hr_sampler_name = hr_sampler_name self.hr_prompt = hr_prompt self.hr_negative_prompt = hr_negative_prompt self.all_hr_prompts = None self.all_hr_negative_prompts = None + self.latent_scale_mode = None if firstphase_width != 0 or firstphase_height != 0: self.hr_upscale_to_x = self.width @@ -973,6 +976,14 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): def init(self, all_prompts, all_seeds, all_subseeds): if self.enable_hr: + if self.hr_checkpoint_name: + self.hr_checkpoint_info = sd_models.get_closet_checkpoint_match(self.hr_checkpoint_name) + + if self.hr_checkpoint_info is None: + raise Exception(f'Could not find checkpoint with name {self.hr_checkpoint_name}') + + self.extra_generation_params["Hires checkpoint"] = self.hr_checkpoint_info.short_title + if self.hr_sampler_name is not None and self.hr_sampler_name != self.sampler_name: self.extra_generation_params["Hires sampler"] = self.hr_sampler_name @@ -982,6 +993,11 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): if tuple(self.hr_negative_prompt) != tuple(self.negative_prompt): self.extra_generation_params["Hires negative prompt"] = self.hr_negative_prompt + self.latent_scale_mode = shared.latent_upscale_modes.get(self.hr_upscaler, None) if self.hr_upscaler is not None else shared.latent_upscale_modes.get(shared.latent_upscale_default_mode, "nearest") + if self.enable_hr and self.latent_scale_mode is None: + if not any(x.name == self.hr_upscaler for x in shared.sd_upscalers): + raise Exception(f"could not find upscaler named {self.hr_upscaler}") + if opts.use_old_hires_fix_width_height and self.applied_old_hires_behavior_to != (self.width, self.height): self.hr_resize_x = self.width self.hr_resize_y = self.height @@ -1020,14 +1036,6 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): self.truncate_x = (self.hr_upscale_to_x - target_w) // opt_f self.truncate_y = (self.hr_upscale_to_y - target_h) // opt_f - # special case: the user has chosen to do nothing - if self.hr_upscale_to_x == self.width and self.hr_upscale_to_y == self.height: - self.enable_hr = False - self.denoising_strength = None - self.extra_generation_params.pop("Hires upscale", None) - self.extra_generation_params.pop("Hires resize", None) - return - if not state.processing_has_refined_job_count: if state.job_count == -1: state.job_count = self.n_iter @@ -1045,17 +1053,22 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): def sample(self, conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength, prompts): self.sampler = sd_samplers.create_sampler(self.sampler_name, self.sd_model) - latent_scale_mode = shared.latent_upscale_modes.get(self.hr_upscaler, None) if self.hr_upscaler is not None else shared.latent_upscale_modes.get(shared.latent_upscale_default_mode, "nearest") - if self.enable_hr and latent_scale_mode is None: - if not any(x.name == self.hr_upscaler for x in shared.sd_upscalers): - raise Exception(f"could not find upscaler named {self.hr_upscaler}") - x = create_random_tensors([opt_C, self.height // opt_f, self.width // opt_f], seeds=seeds, subseeds=subseeds, subseed_strength=self.subseed_strength, seed_resize_from_h=self.seed_resize_from_h, seed_resize_from_w=self.seed_resize_from_w, p=self) samples = self.sampler.sample(self, x, conditioning, unconditional_conditioning, image_conditioning=self.txt2img_image_conditioning(x)) if not self.enable_hr: return samples + current = shared.sd_model.sd_checkpoint_info + try: + if self.hr_checkpoint_info is not None: + sd_models.reload_model_weights(info=self.hr_checkpoint_info) + + return self.sample_hr_pass(samples, seeds, subseeds, subseed_strength, prompts) + finally: + sd_models.reload_model_weights(info=current) + + def sample_hr_pass(self, samples, seeds, subseeds, subseed_strength, prompts): self.is_hr_pass = True target_width = self.hr_upscale_to_x @@ -1073,11 +1086,11 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): info = create_infotext(self, self.all_prompts, self.all_seeds, self.all_subseeds, [], iteration=self.iteration, position_in_batch=index) images.save_image(image, self.outpath_samples, "", seeds[index], prompts[index], opts.samples_format, info=info, p=self, suffix="-before-highres-fix") - if latent_scale_mode is not None: + if self.latent_scale_mode is not None: for i in range(samples.shape[0]): save_intermediate(samples, i) - samples = torch.nn.functional.interpolate(samples, size=(target_height // opt_f, target_width // opt_f), mode=latent_scale_mode["mode"], antialias=latent_scale_mode["antialias"]) + samples = torch.nn.functional.interpolate(samples, size=(target_height // opt_f, target_width // opt_f), mode=self.latent_scale_mode["mode"], antialias=self.latent_scale_mode["antialias"]) # Avoid making the inpainting conditioning unless necessary as # this does need some extra compute to decode / encode the image again. @@ -1193,7 +1206,7 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): self.hr_uc = None self.hr_c = None - if self.enable_hr: + if self.enable_hr and self.hr_checkpoint_info is None: if shared.opts.hires_fix_use_firstpass_conds: self.calculate_hr_conds() diff --git a/modules/sd_models.py b/modules/sd_models.py index acb1e817..cb67e425 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -52,6 +52,7 @@ class CheckpointInfo: self.shorthash = self.sha256[0:10] if self.sha256 else None self.title = name if self.shorthash is None else f'{name} [{self.shorthash}]' + self.short_title = self.name_for_extra if self.shorthash is None else f'{self.name_for_extra} [{self.shorthash}]' self.ids = [self.hash, self.model_name, self.title, name, f'{name} [{self.hash}]'] + ([self.shorthash, self.sha256, f'{self.name} [{self.shorthash}]'] if self.shorthash else []) @@ -81,6 +82,7 @@ class CheckpointInfo: checkpoints_list.pop(self.title) self.title = f'{self.name} [{self.shorthash}]' + self.short_title = f'{self.name_for_extra} [{self.shorthash}]' self.register() return self.shorthash @@ -101,14 +103,8 @@ def setup_model(): enable_midas_autodownload() -def checkpoint_tiles(): - def convert(name): - return int(name) if name.isdigit() else name.lower() - - def alphanumeric_key(key): - return [convert(c) for c in re.split('([0-9]+)', key)] - - return sorted([x.title for x in checkpoints_list.values()], key=alphanumeric_key) +def checkpoint_tiles(use_short=False): + return [x.short_title if use_short else x.title for x in checkpoints_list.values()] def list_models(): @@ -131,11 +127,14 @@ def list_models(): elif cmd_ckpt is not None and cmd_ckpt != shared.default_sd_model_file: print(f"Checkpoint in --ckpt argument not found (Possible it was moved to {model_path}: {cmd_ckpt}", file=sys.stderr) - for filename in sorted(model_list, key=str.lower): + for filename in model_list: checkpoint_info = CheckpointInfo(filename) checkpoint_info.register() +re_strip_checksum = re.compile(r"\s*\[[^]]+]\s*$") + + def get_closet_checkpoint_match(search_string): checkpoint_info = checkpoint_aliases.get(search_string, None) if checkpoint_info is not None: @@ -145,6 +144,11 @@ def get_closet_checkpoint_match(search_string): if found: return found[0] + search_string_without_checksum = re.sub(re_strip_checksum, '', search_string) + found = sorted([info for info in checkpoints_list.values() if search_string_without_checksum in info.title], key=lambda x: len(x.title)) + if found: + return found[0] + return None diff --git a/modules/shared.py b/modules/shared.py index aa72c9c8..807fb9e3 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -220,12 +220,19 @@ class State: return import modules.sd_samplers - if opts.show_progress_grid: - self.assign_current_image(modules.sd_samplers.samples_to_image_grid(self.current_latent)) - else: - self.assign_current_image(modules.sd_samplers.sample_to_image(self.current_latent)) - self.current_image_sampling_step = self.sampling_step + try: + if opts.show_progress_grid: + self.assign_current_image(modules.sd_samplers.samples_to_image_grid(self.current_latent)) + else: + self.assign_current_image(modules.sd_samplers.sample_to_image(self.current_latent)) + + self.current_image_sampling_step = self.sampling_step + + except Exception: + # when switching models during genration, VAE would be on CPU, so creating an image will fail. + # we silently ignore this error + errors.record_exception() def assign_current_image(self, image): self.current_image = image @@ -512,7 +519,7 @@ options_templates.update(options_section(('ui', "User interface"), { "ui_tab_order": OptionInfo([], "UI tab order", ui_components.DropdownMulti, lambda: {"choices": list(tab_names)}).needs_restart(), "hidden_tabs": OptionInfo([], "Hidden UI tabs", ui_components.DropdownMulti, lambda: {"choices": list(tab_names)}).needs_restart(), "ui_reorder_list": OptionInfo([], "txt2img/img2img UI item order", ui_components.DropdownMulti, lambda: {"choices": list(shared_items.ui_reorder_categories())}).info("selected items appear first").needs_restart(), - "hires_fix_show_sampler": OptionInfo(False, "Hires fix: show hires sampler selection").needs_restart(), + "hires_fix_show_sampler": OptionInfo(False, "Hires fix: show hires checkpoint and sampler selection").needs_restart(), "hires_fix_show_prompts": OptionInfo(False, "Hires fix: show hires prompt and negative prompt").needs_restart(), "disable_token_counters": OptionInfo(False, "Disable prompt token counters").needs_restart(), })) diff --git a/modules/txt2img.py b/modules/txt2img.py index 29d94e8c..935ed418 100644 --- a/modules/txt2img.py +++ b/modules/txt2img.py @@ -9,7 +9,7 @@ from modules.ui import plaintext_to_html import gradio as gr -def txt2img(id_task: str, prompt: str, negative_prompt: str, prompt_styles, steps: int, sampler_index: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, height: int, width: int, enable_hr: bool, denoising_strength: float, hr_scale: float, hr_upscaler: str, hr_second_pass_steps: int, hr_resize_x: int, hr_resize_y: int, hr_sampler_index: int, hr_prompt: str, hr_negative_prompt, override_settings_texts, request: gr.Request, *args): +def txt2img(id_task: str, prompt: str, negative_prompt: str, prompt_styles, steps: int, sampler_index: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, height: int, width: int, enable_hr: bool, denoising_strength: float, hr_scale: float, hr_upscaler: str, hr_second_pass_steps: int, hr_resize_x: int, hr_resize_y: int, hr_checkpoint_name: str, hr_sampler_index: int, hr_prompt: str, hr_negative_prompt, override_settings_texts, request: gr.Request, *args): override_settings = create_override_settings_dict(override_settings_texts) p = processing.StableDiffusionProcessingTxt2Img( @@ -41,6 +41,7 @@ def txt2img(id_task: str, prompt: str, negative_prompt: str, prompt_styles, step hr_second_pass_steps=hr_second_pass_steps, hr_resize_x=hr_resize_x, hr_resize_y=hr_resize_y, + hr_checkpoint_name=None if hr_checkpoint_name == 'Use same checkpoint' else hr_checkpoint_name, hr_sampler_name=sd_samplers.samplers_for_img2img[hr_sampler_index - 1].name if hr_sampler_index != 0 else None, hr_prompt=hr_prompt, hr_negative_prompt=hr_negative_prompt, diff --git a/modules/ui.py b/modules/ui.py index 07ecee7b..6d8265f2 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -476,6 +476,10 @@ def create_ui(): hr_resize_y = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize height to", value=0, elem_id="txt2img_hr_resize_y") with FormRow(elem_id="txt2img_hires_fix_row3", variant="compact", visible=opts.hires_fix_show_sampler) as hr_sampler_container: + checkpoint_choices = lambda: ["Use same checkpoint"] + modules.sd_models.checkpoint_tiles(use_short=True) + hr_checkpoint_name = gr.Dropdown(label='Hires checkpoint', elem_id="hr_checkpoint", choices=checkpoint_choices(), value="Use same checkpoint") + create_refresh_button(hr_checkpoint_name, modules.sd_models.list_models, lambda: {"choices": checkpoint_choices()}, "hr_checkpoint_refresh") + hr_sampler_index = gr.Dropdown(label='Hires sampling method', elem_id="hr_sampler", choices=["Use same sampler"] + [x.name for x in samplers_for_img2img], value="Use same sampler", type="index") with FormRow(elem_id="txt2img_hires_fix_row4", variant="compact", visible=opts.hires_fix_show_prompts) as hr_prompts_container: @@ -553,6 +557,7 @@ def create_ui(): hr_second_pass_steps, hr_resize_x, hr_resize_y, + hr_checkpoint_name, hr_sampler_index, hr_prompt, hr_negative_prompt, @@ -630,8 +635,9 @@ def create_ui(): (hr_second_pass_steps, "Hires steps"), (hr_resize_x, "Hires resize-1"), (hr_resize_y, "Hires resize-2"), + (hr_checkpoint_name, "Hires checkpoint"), (hr_sampler_index, "Hires sampler"), - (hr_sampler_container, lambda d: gr.update(visible=True) if d.get("Hires sampler", "Use same sampler") != "Use same sampler" else gr.update()), + (hr_sampler_container, lambda d: gr.update(visible=True) if d.get("Hires sampler", "Use same sampler") != "Use same sampler" or d.get("Hires checkpoint", "Use same checkpoint") != "Use same checkpoint" else gr.update()), (hr_prompt, "Hires prompt"), (hr_negative_prompt, "Hires negative prompt"), (hr_prompts_container, lambda d: gr.update(visible=True) if d.get("Hires prompt", "") != "" or d.get("Hires negative prompt", "") != "" else gr.update()), -- cgit v1.2.1 From 40cd59207b96f9e522fdc104b43279880b671ce4 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sun, 30 Jul 2023 14:10:26 +0300 Subject: make it work with SDXL --- modules/processing.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/modules/processing.py b/modules/processing.py index 7026487a..b8af1301 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -1197,8 +1197,11 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): if self.hr_c is not None: return - self.hr_uc = self.get_conds_with_caching(prompt_parser.get_learned_conditioning, self.hr_negative_prompts, self.steps * self.step_multiplier, [self.cached_hr_uc, self.cached_uc], self.hr_extra_network_data) - self.hr_c = self.get_conds_with_caching(prompt_parser.get_multicond_learned_conditioning, self.hr_prompts, self.steps * self.step_multiplier, [self.cached_hr_c, self.cached_c], self.hr_extra_network_data) + hr_prompts = prompt_parser.SdConditioning(self.hr_prompts, width=self.hr_upscale_to_x, height=self.hr_upscale_to_y) + hr_negative_prompts = prompt_parser.SdConditioning(self.hr_negative_prompts, width=self.hr_upscale_to_x, height=self.hr_upscale_to_y, is_negative_prompt=True) + + self.hr_uc = self.get_conds_with_caching(prompt_parser.get_learned_conditioning, hr_negative_prompts, self.steps * self.step_multiplier, [self.cached_hr_uc, self.cached_uc], self.hr_extra_network_data) + self.hr_c = self.get_conds_with_caching(prompt_parser.get_multicond_learned_conditioning, hr_prompts, self.steps * self.step_multiplier, [self.cached_hr_c, self.cached_c], self.hr_extra_network_data) def setup_conds(self): super().setup_conds() -- cgit v1.2.1 From 77761e7bad8a7cbffc9028dc0b2f63169aaf25f9 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sun, 30 Jul 2023 14:10:33 +0300 Subject: linter --- modules/processing.py | 2 +- modules/ui.py | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/modules/processing.py b/modules/processing.py index b8af1301..21dbef16 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -1055,6 +1055,7 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): x = create_random_tensors([opt_C, self.height // opt_f, self.width // opt_f], seeds=seeds, subseeds=subseeds, subseed_strength=self.subseed_strength, seed_resize_from_h=self.seed_resize_from_h, seed_resize_from_w=self.seed_resize_from_w, p=self) samples = self.sampler.sample(self, x, conditioning, unconditional_conditioning, image_conditioning=self.txt2img_image_conditioning(x)) + del x if not self.enable_hr: return samples @@ -1137,7 +1138,6 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): noise = create_random_tensors(samples.shape[1:], seeds=seeds, subseeds=subseeds, subseed_strength=subseed_strength, p=self) # GC now before running the next img2img to prevent running out of memory - x = None devices.torch_gc() if not self.disable_extra_networks: diff --git a/modules/ui.py b/modules/ui.py index 6d8265f2..6fc9de83 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -476,9 +476,9 @@ def create_ui(): hr_resize_y = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize height to", value=0, elem_id="txt2img_hr_resize_y") with FormRow(elem_id="txt2img_hires_fix_row3", variant="compact", visible=opts.hires_fix_show_sampler) as hr_sampler_container: - checkpoint_choices = lambda: ["Use same checkpoint"] + modules.sd_models.checkpoint_tiles(use_short=True) - hr_checkpoint_name = gr.Dropdown(label='Hires checkpoint', elem_id="hr_checkpoint", choices=checkpoint_choices(), value="Use same checkpoint") - create_refresh_button(hr_checkpoint_name, modules.sd_models.list_models, lambda: {"choices": checkpoint_choices()}, "hr_checkpoint_refresh") + + hr_checkpoint_name = gr.Dropdown(label='Hires checkpoint', elem_id="hr_checkpoint", choices=["Use same checkpoint"] + modules.sd_models.checkpoint_tiles(use_short=True), value="Use same checkpoint") + create_refresh_button(hr_checkpoint_name, modules.sd_models.list_models, lambda: {"choices": ["Use same checkpoint"] + modules.sd_models.checkpoint_tiles(use_short=True)}, "hr_checkpoint_refresh") hr_sampler_index = gr.Dropdown(label='Hires sampling method', elem_id="hr_sampler", choices=["Use same sampler"] + [x.name for x in samplers_for_img2img], value="Use same sampler", type="index") -- cgit v1.2.1 From eec540b22798ddcf8a03d947519c36635d77d722 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sun, 30 Jul 2023 15:04:12 +0300 Subject: repair non-latent upscaling broken for SDXL --- modules/processing.py | 1 + 1 file changed, 1 insertion(+) diff --git a/modules/processing.py b/modules/processing.py index 21dbef16..6fb14516 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -1119,6 +1119,7 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): decoded_samples = torch.from_numpy(np.array(batch_images)) decoded_samples = decoded_samples.to(shared.device) decoded_samples = 2. * decoded_samples - 1. + decoded_samples = decoded_samples.to(shared.device, dtype=devices.dtype_vae) samples = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(decoded_samples)) -- cgit v1.2.1 From a64fbe89288802f8b5ec8ca7bcab5aaf2c7bfea5 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sun, 30 Jul 2023 15:12:09 +0300 Subject: make it possible to use checkpoints of different types (SD1, SDXL) in first and second pass of hires fix --- modules/processing.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/modules/processing.py b/modules/processing.py index 6fb14516..c4da208f 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -1060,16 +1060,21 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): if not self.enable_hr: return samples + if self.latent_scale_mode is None: + decoded_samples = decode_first_stage(self.sd_model, samples) + else: + decoded_samples = None + current = shared.sd_model.sd_checkpoint_info try: if self.hr_checkpoint_info is not None: sd_models.reload_model_weights(info=self.hr_checkpoint_info) - return self.sample_hr_pass(samples, seeds, subseeds, subseed_strength, prompts) + return self.sample_hr_pass(samples, decoded_samples, seeds, subseeds, subseed_strength, prompts) finally: sd_models.reload_model_weights(info=current) - def sample_hr_pass(self, samples, seeds, subseeds, subseed_strength, prompts): + def sample_hr_pass(self, samples, decoded_samples, seeds, subseeds, subseed_strength, prompts): self.is_hr_pass = True target_width = self.hr_upscale_to_x @@ -1100,7 +1105,6 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): else: image_conditioning = self.txt2img_image_conditioning(samples) else: - decoded_samples = decode_first_stage(self.sd_model, samples) lowres_samples = torch.clamp((decoded_samples + 1.0) / 2.0, min=0.0, max=1.0) batch_images = [] -- cgit v1.2.1 From cc53db6652b11e6f7bca42c3aa93bd6761ed3d3f Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sun, 30 Jul 2023 15:30:33 +0300 Subject: this time for sure --- modules/processing.py | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/modules/processing.py b/modules/processing.py index c4da208f..3190b964 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -538,8 +538,12 @@ def create_random_tensors(shape, seeds, subseeds=None, subseed_strength=0.0, see return x +class DecodedSamples(list): + already_decoded = True + + def decode_latent_batch(model, batch, target_device=None, check_for_nans=False): - samples = [] + samples = DecodedSamples() for i in range(batch.shape[0]): sample = decode_first_stage(model, batch[i:i + 1])[0] @@ -793,7 +797,11 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: with devices.without_autocast() if devices.unet_needs_upcast else devices.autocast(): samples_ddim = p.sample(conditioning=p.c, unconditional_conditioning=p.uc, seeds=p.seeds, subseeds=p.subseeds, subseed_strength=p.subseed_strength, prompts=p.prompts) - x_samples_ddim = decode_latent_batch(p.sd_model, samples_ddim, target_device=devices.cpu, check_for_nans=True) + if getattr(samples_ddim, 'already_decoded', False): + x_samples_ddim = samples_ddim + else: + x_samples_ddim = decode_latent_batch(p.sd_model, samples_ddim, target_device=devices.cpu, check_for_nans=True) + x_samples_ddim = torch.stack(x_samples_ddim).float() x_samples_ddim = torch.clamp((x_samples_ddim + 1.0) / 2.0, min=0.0, max=1.0) @@ -1161,9 +1169,11 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): sd_models.apply_token_merging(self.sd_model, self.get_token_merging_ratio()) + decoded_samples = decode_latent_batch(self.sd_model, samples, target_device=devices.cpu, check_for_nans=True) + self.is_hr_pass = False - return samples + return decoded_samples def close(self): super().close() -- cgit v1.2.1 From 02038036ff571e0f04a94c3e279609666e239dec Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sun, 30 Jul 2023 16:16:31 +0300 Subject: make it so that VAE NaNs autodetection also works during first pass of hires fix --- modules/processing.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/processing.py b/modules/processing.py index 3190b964..0677de81 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -1069,7 +1069,7 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): return samples if self.latent_scale_mode is None: - decoded_samples = decode_first_stage(self.sd_model, samples) + decoded_samples = torch.stack(decode_latent_batch(self.sd_model, samples, target_device=devices.cpu, check_for_nans=True)) else: decoded_samples = None -- cgit v1.2.1 From 0af4127fd14360ebb12c6569d98aebf8047abbfc Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sun, 30 Jul 2023 19:36:24 +0300 Subject: delete the field that is preventing the model from being unloaded and is causing increased RAM usage --- modules/processing.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/modules/processing.py b/modules/processing.py index 0677de81..b09433b0 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -1076,11 +1076,15 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): current = shared.sd_model.sd_checkpoint_info try: if self.hr_checkpoint_info is not None: + del self.sampler sd_models.reload_model_weights(info=self.hr_checkpoint_info) + devices.torch_gc() return self.sample_hr_pass(samples, decoded_samples, seeds, subseeds, subseed_strength, prompts) finally: + del self.sampler sd_models.reload_model_weights(info=current) + devices.torch_gc() def sample_hr_pass(self, samples, decoded_samples, seeds, subseeds, subseed_strength, prompts): self.is_hr_pass = True -- cgit v1.2.1 From dca121e9035ba36b3f7484c8a31a7776d85c0960 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Mon, 31 Jul 2023 09:13:07 +0300 Subject: set the field to None instead --- modules/processing.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/processing.py b/modules/processing.py index b09433b0..35e7b87e 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -1076,13 +1076,13 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): current = shared.sd_model.sd_checkpoint_info try: if self.hr_checkpoint_info is not None: - del self.sampler + self.sampler = None sd_models.reload_model_weights(info=self.hr_checkpoint_info) devices.torch_gc() return self.sample_hr_pass(samples, decoded_samples, seeds, subseeds, subseed_strength, prompts) finally: - del self.sampler + self.sampler = None sd_models.reload_model_weights(info=current) devices.torch_gc() -- cgit v1.2.1 From 29d7e31d89e9d686784eacbdbfc5b15959eb4449 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Mon, 31 Jul 2023 10:43:26 +0300 Subject: repair AttributeError: 'NoneType' object has no attribute 'conditioning_key' --- modules/processing.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/modules/processing.py b/modules/processing.py index 35e7b87e..1f0c0b3b 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -1104,6 +1104,13 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): info = create_infotext(self, self.all_prompts, self.all_seeds, self.all_subseeds, [], iteration=self.iteration, position_in_batch=index) images.save_image(image, self.outpath_samples, "", seeds[index], prompts[index], opts.samples_format, info=info, p=self, suffix="-before-highres-fix") + img2img_sampler_name = self.hr_sampler_name or self.sampler_name + + if self.sampler_name in ['PLMS', 'UniPC']: # PLMS/UniPC do not support img2img so we just silently switch to DDIM + img2img_sampler_name = 'DDIM' + + self.sampler = sd_samplers.create_sampler(img2img_sampler_name, self.sd_model) + if self.latent_scale_mode is not None: for i in range(samples.shape[0]): save_intermediate(samples, i) @@ -1143,13 +1150,6 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): shared.state.nextjob() - img2img_sampler_name = self.hr_sampler_name or self.sampler_name - - if self.sampler_name in ['PLMS', 'UniPC']: # PLMS/UniPC do not support img2img so we just silently switch to DDIM - img2img_sampler_name = 'DDIM' - - self.sampler = sd_samplers.create_sampler(img2img_sampler_name, self.sd_model) - samples = samples[:, :, self.truncate_y//2:samples.shape[2]-(self.truncate_y+1)//2, self.truncate_x//2:samples.shape[3]-(self.truncate_x+1)//2] noise = create_random_tensors(samples.shape[1:], seeds=seeds, subseeds=subseeds, subseed_strength=subseed_strength, p=self) -- cgit v1.2.1 From 4d9b096663288e2aa738723fa63950f3d41f6170 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Mon, 31 Jul 2023 10:43:31 +0300 Subject: additional memory improvements when switching between models of different types --- modules/sd_models.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/modules/sd_models.py b/modules/sd_models.py index cb67e425..4855037a 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -582,7 +582,10 @@ def reload_model_weights(sd_model=None, info=None): timer.record("find config") if sd_model is None or checkpoint_config != sd_model.used_config: - del sd_model + if sd_model is not None: + sd_model.to(device="meta") + + devices.torch_gc() load_model(checkpoint_info, already_loaded_state_dict=state_dict) return model_data.sd_model -- cgit v1.2.1 From fb87a05fe8364c8871538355a8e24587c733a492 Mon Sep 17 00:00:00 2001 From: Aarni Koskela Date: Mon, 31 Jul 2023 11:23:26 +0200 Subject: Don't crash if out of local storage quota Fixes #12206 (works around it) --- javascript/ui.js | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/javascript/ui.js b/javascript/ui.js index d70a681b..abf23a78 100644 --- a/javascript/ui.js +++ b/javascript/ui.js @@ -152,7 +152,11 @@ function submit() { showSubmitButtons('txt2img', false); var id = randomId(); - localStorage.setItem("txt2img_task_id", id); + try { + localStorage.setItem("txt2img_task_id", id); + } catch (e) { + console.warn(`Failed to save txt2img task id to localStorage: ${e}`); + } requestProgress(id, gradioApp().getElementById('txt2img_gallery_container'), gradioApp().getElementById('txt2img_gallery'), function() { showSubmitButtons('txt2img', true); @@ -171,7 +175,11 @@ function submit_img2img() { showSubmitButtons('img2img', false); var id = randomId(); - localStorage.setItem("img2img_task_id", id); + try { + localStorage.setItem("img2img_task_id", id); + } catch (e) { + console.warn(`Failed to save img2img task id to localStorage: ${e}`); + } requestProgress(id, gradioApp().getElementById('img2img_gallery_container'), gradioApp().getElementById('img2img_gallery'), function() { showSubmitButtons('img2img', true); @@ -191,8 +199,6 @@ function restoreProgressTxt2img() { showRestoreProgressButton("txt2img", false); var id = localStorage.getItem("txt2img_task_id"); - id = localStorage.getItem("txt2img_task_id"); - if (id) { requestProgress(id, gradioApp().getElementById('txt2img_gallery_container'), gradioApp().getElementById('txt2img_gallery'), function() { showSubmitButtons('txt2img', true); -- cgit v1.2.1 From c09bc2c60856ca1ab2243386176badf909affdbe Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Mon, 31 Jul 2023 13:20:26 +0300 Subject: fix "clamp_scalar_cpu" not implemented for 'Half' --- modules/processing.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/processing.py b/modules/processing.py index 1f0c0b3b..f8f8bddc 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -1069,7 +1069,7 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): return samples if self.latent_scale_mode is None: - decoded_samples = torch.stack(decode_latent_batch(self.sd_model, samples, target_device=devices.cpu, check_for_nans=True)) + decoded_samples = torch.stack(decode_latent_batch(self.sd_model, samples, target_device=devices.cpu, check_for_nans=True)).to(dtype=torch.float32) else: decoded_samples = None -- cgit v1.2.1 From c10633f93a646b06f62bf1b24adba52f539dd6b6 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Mon, 31 Jul 2023 22:01:53 +0300 Subject: fix memory leak when generation fails --- modules/call_queue.py | 4 +++- modules/errors.py | 3 ++- modules/sysinfo.py | 6 +++++- 3 files changed, 10 insertions(+), 3 deletions(-) diff --git a/modules/call_queue.py b/modules/call_queue.py index 61aa240f..f2eb17d6 100644 --- a/modules/call_queue.py +++ b/modules/call_queue.py @@ -3,7 +3,7 @@ import html import threading import time -from modules import shared, progress, errors +from modules import shared, progress, errors, devices queue_lock = threading.Lock() @@ -75,6 +75,8 @@ def wrap_gradio_call(func, extra_outputs=None, add_stats=False): error_message = f'{type(e).__name__}: {e}' res = extra_outputs_array + [f"
{html.escape(error_message)}
"] + devices.torch_gc() + shared.state.skipped = False shared.state.interrupted = False shared.state.job_count = 0 diff --git a/modules/errors.py b/modules/errors.py index 5271a9fe..dffabe45 100644 --- a/modules/errors.py +++ b/modules/errors.py @@ -14,7 +14,8 @@ def record_exception(): if exception_records and exception_records[-1] == e: return - exception_records.append((e, tb)) + from modules import sysinfo + exception_records.append(sysinfo.format_exception(e, tb)) if len(exception_records) > 5: exception_records.pop(0) diff --git a/modules/sysinfo.py b/modules/sysinfo.py index 5f15ac4f..cf24c6dd 100644 --- a/modules/sysinfo.py +++ b/modules/sysinfo.py @@ -109,11 +109,15 @@ def format_traceback(tb): return [[f"{x.filename}, line {x.lineno}, {x.name}", x.line] for x in traceback.extract_tb(tb)] +def format_exception(e, tb): + return {"exception": str(e), "traceback": format_traceback(tb)} + + def get_exceptions(): try: from modules import errors - return [{"exception": str(e), "traceback": format_traceback(tb)} for e, tb in reversed(errors.exception_records)] + return list(reversed(errors.exception_records)) except Exception as e: return str(e) -- cgit v1.2.1 From b235022c615a7384f73c05fe240d8f4a28d103d4 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Tue, 1 Aug 2023 00:24:48 +0300 Subject: option to keep multiple models in memory --- modules/lowvram.py | 3 + modules/sd_hijack.py | 6 +- modules/sd_hijack_inpainting.py | 5 +- modules/sd_models.py | 136 +++++++++++++++++++++++++++++++++------- modules/sd_models_xl.py | 8 +-- modules/shared.py | 12 +++- 6 files changed, 135 insertions(+), 35 deletions(-) diff --git a/modules/lowvram.py b/modules/lowvram.py index 3f830664..96f52b7b 100644 --- a/modules/lowvram.py +++ b/modules/lowvram.py @@ -15,6 +15,9 @@ def send_everything_to_cpu(): def setup_for_low_vram(sd_model, use_medvram): + if getattr(sd_model, 'lowvram', False): + return + sd_model.lowvram = True parents = {} diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py index cfa5f0eb..7d692e3c 100644 --- a/modules/sd_hijack.py +++ b/modules/sd_hijack.py @@ -30,8 +30,10 @@ ldm.modules.attention.MemoryEfficientCrossAttention = ldm.modules.attention.Cros ldm.modules.attention.BasicTransformerBlock.ATTENTION_MODES["softmax-xformers"] = ldm.modules.attention.CrossAttention # silence new console spam from SD2 -ldm.modules.attention.print = lambda *args: None -ldm.modules.diffusionmodules.model.print = lambda *args: None +ldm.modules.attention.print = shared.ldm_print +ldm.modules.diffusionmodules.model.print = shared.ldm_print +ldm.util.print = shared.ldm_print +ldm.models.diffusion.ddpm.print = shared.ldm_print optimizers = [] current_optimizer: sd_hijack_optimizations.SdOptimization = None diff --git a/modules/sd_hijack_inpainting.py b/modules/sd_hijack_inpainting.py index c1977b19..97350f4f 100644 --- a/modules/sd_hijack_inpainting.py +++ b/modules/sd_hijack_inpainting.py @@ -91,7 +91,4 @@ def p_sample_plms(self, x, c, t, index, repeat_noise=False, use_original_steps=F return x_prev, pred_x0, e_t -def do_inpainting_hijack(): - # p_sample_plms is needed because PLMS can't work with dicts as conditionings - - ldm.models.diffusion.plms.PLMSSampler.p_sample_plms = p_sample_plms +ldm.models.diffusion.plms.PLMSSampler.p_sample_plms = p_sample_plms diff --git a/modules/sd_models.py b/modules/sd_models.py index acb1e817..77195f2f 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -15,7 +15,6 @@ import ldm.modules.midas as midas from ldm.util import instantiate_from_config from modules import paths, shared, modelloader, devices, script_callbacks, sd_vae, sd_disable_initialization, errors, hashes, sd_models_config, sd_unet, sd_models_xl -from modules.sd_hijack_inpainting import do_inpainting_hijack from modules.timer import Timer import tomesd @@ -423,6 +422,7 @@ sdxl_refiner_clip_weight = 'conditioner.embedders.0.model.ln_final.weight' class SdModelData: def __init__(self): self.sd_model = None + self.loaded_sd_models = [] self.was_loaded_at_least_once = False self.lock = threading.Lock() @@ -437,6 +437,7 @@ class SdModelData: try: load_model() + except Exception as e: errors.display(e, "loading stable diffusion model", full_traceback=True) print("", file=sys.stderr) @@ -448,11 +449,24 @@ class SdModelData: def set_sd_model(self, v): self.sd_model = v + try: + self.loaded_sd_models.remove(v) + except ValueError: + pass + + if v is not None: + self.loaded_sd_models.insert(0, v) + model_data = SdModelData() def get_empty_cond(sd_model): + from modules import extra_networks, processing + + p = processing.StableDiffusionProcessingTxt2Img() + extra_networks.activate(p, {}) + if hasattr(sd_model, 'conditioner'): d = sd_model.get_learned_conditioning([""]) return d['crossattn'] @@ -460,19 +474,43 @@ def get_empty_cond(sd_model): return sd_model.cond_stage_model([""]) +def send_model_to_cpu(m): + from modules import lowvram + + if shared.cmd_opts.lowvram or shared.cmd_opts.medvram: + lowvram.send_everything_to_cpu() + else: + m.to(devices.cpu) + + devices.torch_gc() + + +def send_model_to_device(m): + from modules import lowvram + + if shared.cmd_opts.lowvram or shared.cmd_opts.medvram: + lowvram.setup_for_low_vram(m, shared.cmd_opts.medvram) + else: + m.to(shared.device) + + +def send_model_to_trash(m): + m.to(device="meta") + devices.torch_gc() + + def load_model(checkpoint_info=None, already_loaded_state_dict=None): - from modules import lowvram, sd_hijack + from modules import sd_hijack checkpoint_info = checkpoint_info or select_checkpoint() + timer = Timer() + if model_data.sd_model: - sd_hijack.model_hijack.undo_hijack(model_data.sd_model) + send_model_to_trash(model_data.sd_model) model_data.sd_model = None - gc.collect() devices.torch_gc() - do_inpainting_hijack() - - timer = Timer() + timer.record("unload existing model") if already_loaded_state_dict is not None: state_dict = already_loaded_state_dict @@ -512,12 +550,9 @@ def load_model(checkpoint_info=None, already_loaded_state_dict=None): with sd_disable_initialization.LoadStateDictOnMeta(state_dict, devices.cpu): load_model_weights(sd_model, checkpoint_info, state_dict, timer) + timer.record("load weights from state dict") - if shared.cmd_opts.lowvram or shared.cmd_opts.medvram: - lowvram.setup_for_low_vram(sd_model, shared.cmd_opts.medvram) - else: - sd_model.to(shared.device) - + send_model_to_device(sd_model) timer.record("move model to device") sd_hijack.model_hijack.hijack(sd_model) @@ -525,7 +560,7 @@ def load_model(checkpoint_info=None, already_loaded_state_dict=None): timer.record("hijack") sd_model.eval() - model_data.sd_model = sd_model + model_data.set_sd_model(sd_model) model_data.was_loaded_at_least_once = True sd_hijack.model_hijack.embedding_db.load_textual_inversion_embeddings(force_reload=True) # Reload embeddings after model load as they may or may not fit the model @@ -546,10 +581,61 @@ def load_model(checkpoint_info=None, already_loaded_state_dict=None): return sd_model +def reuse_model_from_already_loaded(sd_model, checkpoint_info, timer): + """ + Checks if the desired checkpoint from checkpoint_info is not already loaded in model_data.loaded_sd_models. + If it is loaded, returns that (moving it to GPU if necessary, and moving the currently loadded model to CPU if necessary). + If not, returns the model that can be used to load weights from checkpoint_info's file. + If no such model exists, returns None. + Additionaly deletes loaded models that are over the limit set in settings (sd_checkpoints_limit). + """ + + already_loaded = None + for i in reversed(range(len(model_data.loaded_sd_models))): + loaded_model = model_data.loaded_sd_models[i] + if loaded_model.sd_checkpoint_info.filename == checkpoint_info.filename: + already_loaded = loaded_model + continue + + if len(model_data.loaded_sd_models) > shared.opts.sd_checkpoints_limit > 0: + print(f"Unloading model {len(model_data.loaded_sd_models)} over the limit of {shared.opts.sd_checkpoints_limit}: {loaded_model.sd_checkpoint_info.title}") + model_data.loaded_sd_models.pop() + send_model_to_trash(loaded_model) + timer.record("send model to trash") + + if shared.opts.sd_checkpoints_keep_in_cpu: + send_model_to_cpu(sd_model) + timer.record("send model to cpu") + + if already_loaded is not None: + send_model_to_device(already_loaded) + timer.record("send model to device") + + model_data.set_sd_model(already_loaded) + print(f"Using already loaded model {already_loaded.sd_checkpoint_info.title}: done in {timer.summary()}") + return model_data.sd_model + elif shared.opts.sd_checkpoints_limit > 1 and len(model_data.loaded_sd_models) < shared.opts.sd_checkpoints_limit: + print(f"Loading model {checkpoint_info.title} ({len(model_data.loaded_sd_models) + 1} out of {shared.opts.sd_checkpoints_limit})") + + model_data.sd_model = None + load_model(checkpoint_info) + return model_data.sd_model + elif len(model_data.loaded_sd_models) > 0: + sd_model = model_data.loaded_sd_models.pop() + model_data.sd_model = sd_model + + print(f"Reusing loaded model {sd_model.sd_checkpoint_info.title} to load {checkpoint_info.title}") + return sd_model + else: + return None + + def reload_model_weights(sd_model=None, info=None): - from modules import lowvram, devices, sd_hijack + from modules import devices, sd_hijack checkpoint_info = info or select_checkpoint() + timer = Timer() + if not sd_model: sd_model = model_data.sd_model @@ -558,19 +644,17 @@ def reload_model_weights(sd_model=None, info=None): else: current_checkpoint_info = sd_model.sd_checkpoint_info if sd_model.sd_model_checkpoint == checkpoint_info.filename: - return - - sd_unet.apply_unet("None") + return sd_model - if shared.cmd_opts.lowvram or shared.cmd_opts.medvram: - lowvram.send_everything_to_cpu() - else: - sd_model.to(devices.cpu) + sd_model = reuse_model_from_already_loaded(sd_model, checkpoint_info, timer) + if sd_model is not None and sd_model.sd_checkpoint_info.filename == checkpoint_info.filename: + return sd_model + if sd_model is not None: + sd_unet.apply_unet("None") + send_model_to_cpu(sd_model) sd_hijack.model_hijack.undo_hijack(sd_model) - timer = Timer() - state_dict = get_checkpoint_state_dict(checkpoint_info, timer) checkpoint_config = sd_models_config.find_checkpoint_config(state_dict, checkpoint_info) @@ -578,7 +662,9 @@ def reload_model_weights(sd_model=None, info=None): timer.record("find config") if sd_model is None or checkpoint_config != sd_model.used_config: - del sd_model + if sd_model is not None: + send_model_to_trash(sd_model) + load_model(checkpoint_info, already_loaded_state_dict=state_dict) return model_data.sd_model @@ -601,6 +687,8 @@ def reload_model_weights(sd_model=None, info=None): print(f"Weights loaded in {timer.summary()}.") + model_data.set_sd_model(sd_model) + return sd_model diff --git a/modules/sd_models_xl.py b/modules/sd_models_xl.py index bc219508..01123321 100644 --- a/modules/sd_models_xl.py +++ b/modules/sd_models_xl.py @@ -98,10 +98,10 @@ def extend_sdxl(model): model.conditioner.wrapped = torch.nn.Module() -sgm.modules.attention.print = lambda *args: None -sgm.modules.diffusionmodules.model.print = lambda *args: None -sgm.modules.diffusionmodules.openaimodel.print = lambda *args: None -sgm.modules.encoders.modules.print = lambda *args: None +sgm.modules.attention.print = shared.ldm_print +sgm.modules.diffusionmodules.model.print = shared.ldm_print +sgm.modules.diffusionmodules.openaimodel.print = shared.ldm_print +sgm.modules.encoders.modules.print = shared.ldm_print # this gets the code to load the vanilla attention that we override sgm.modules.attention.SDP_IS_AVAILABLE = True diff --git a/modules/shared.py b/modules/shared.py index aa72c9c8..0184fcd0 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -392,6 +392,7 @@ options_templates.update(options_section(('system', "System"), { "print_hypernet_extra": OptionInfo(False, "Print extra hypernetwork information to console."), "list_hidden_files": OptionInfo(True, "Load models/files in hidden directories").info("directory is hidden if its name starts with \".\""), "disable_mmap_load_safetensors": OptionInfo(False, "Disable memmapping for loading .safetensors files.").info("fixes very slow loading speed in some cases"), + "hide_ldm_prints": OptionInfo(True, "Prevent Stability-AI's ldm/sgm modules from printing noise to console."), })) options_templates.update(options_section(('training', "Training"), { @@ -411,7 +412,9 @@ options_templates.update(options_section(('training', "Training"), { options_templates.update(options_section(('sd', "Stable Diffusion"), { "sd_model_checkpoint": OptionInfo(None, "Stable Diffusion checkpoint", gr.Dropdown, lambda: {"choices": list_checkpoint_tiles()}, refresh=refresh_checkpoints), - "sd_checkpoint_cache": OptionInfo(0, "Checkpoints to cache in RAM", gr.Slider, {"minimum": 0, "maximum": 10, "step": 1}), + "sd_checkpoints_limit": OptionInfo(1, "Maximum number of checkpoints loaded at the same time", gr.Slider, {"minimum": 1, "maximum": 10, "step": 1}), + "sd_checkpoints_keep_in_cpu": OptionInfo(True, "Only keep one model on device").info("will keep models other than the currently used one in RAM rather than VRAM"), + "sd_checkpoint_cache": OptionInfo(0, "Checkpoints to cache in RAM", gr.Slider, {"minimum": 0, "maximum": 10, "step": 1}).info("obsolete; set to 0 and use the two settings above instead"), "sd_vae_checkpoint_cache": OptionInfo(0, "VAE Checkpoints to cache in RAM", gr.Slider, {"minimum": 0, "maximum": 10, "step": 1}), "sd_vae": OptionInfo("Automatic", "SD VAE", gr.Dropdown, lambda: {"choices": shared_items.sd_vae_items()}, refresh=shared_items.refresh_vae_list).info("choose VAE model: Automatic = use one with same filename as checkpoint; None = use VAE from checkpoint"), "sd_vae_as_default": OptionInfo(True, "Ignore selected VAE for stable diffusion checkpoints that have their own .vae.pt next to them"), @@ -889,3 +892,10 @@ def walk_files(path, allowed_extensions=None): continue yield os.path.join(root, filename) + + +def ldm_print(*args, **kwargs): + if opts.hide_ldm_prints: + return + + print(*args, **kwargs) -- cgit v1.2.1 From 151b8ed3a62714793e2a212ac609a03dda0b1e26 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Tue, 1 Aug 2023 00:38:34 +0300 Subject: repair PLMS --- modules/sd_hijack.py | 4 +++- modules/sd_hijack_inpainting.py | 3 ++- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py index 7d692e3c..9722c967 100644 --- a/modules/sd_hijack.py +++ b/modules/sd_hijack.py @@ -6,7 +6,7 @@ import modules.textual_inversion.textual_inversion from modules import devices, sd_hijack_optimizations, shared, script_callbacks, errors, sd_unet from modules.hypernetworks import hypernetwork from modules.shared import cmd_opts -from modules import sd_hijack_clip, sd_hijack_open_clip, sd_hijack_unet, sd_hijack_xlmr, xlmr +from modules import sd_hijack_clip, sd_hijack_open_clip, sd_hijack_unet, sd_hijack_xlmr, xlmr, sd_hijack_inpainting import ldm.modules.attention import ldm.modules.diffusionmodules.model @@ -35,6 +35,8 @@ ldm.modules.diffusionmodules.model.print = shared.ldm_print ldm.util.print = shared.ldm_print ldm.models.diffusion.ddpm.print = shared.ldm_print +sd_hijack_inpainting.do_inpainting_hijack() + optimizers = [] current_optimizer: sd_hijack_optimizations.SdOptimization = None diff --git a/modules/sd_hijack_inpainting.py b/modules/sd_hijack_inpainting.py index 97350f4f..2d44b856 100644 --- a/modules/sd_hijack_inpainting.py +++ b/modules/sd_hijack_inpainting.py @@ -91,4 +91,5 @@ def p_sample_plms(self, x, c, t, index, repeat_noise=False, use_original_steps=F return x_prev, pred_x0, e_t -ldm.models.diffusion.plms.PLMSSampler.p_sample_plms = p_sample_plms +def do_inpainting_hijack(): + ldm.models.diffusion.plms.PLMSSampler.p_sample_plms = p_sample_plms -- cgit v1.2.1 From 2f1d5b6b04fd38b1fca1b0193b800533398d91ca Mon Sep 17 00:00:00 2001 From: Jabasukuriputo Wang Date: Tue, 1 Aug 2023 11:20:59 +0800 Subject: attempt to fix workspace status when doing git clone --- modules/launch_utils.py | 24 +++++++++++++++++++++++- 1 file changed, 23 insertions(+), 1 deletion(-) diff --git a/modules/launch_utils.py b/modules/launch_utils.py index f77b577a..c7bb9370 100644 --- a/modules/launch_utils.py +++ b/modules/launch_utils.py @@ -139,6 +139,12 @@ def check_run_python(code: str) -> bool: return result.returncode == 0 +def git_fix_workspace(dir): + run(f'"{git}" -C "{dir}" fetch --refetch --no-auto-gc', f"Fetching all contents for {name}", f"Couldn't fetch {name}", live=True) + run(f'"{git}" -C "{dir}" gc --aggressive --prune=now', f"Pruning {name}", f"Couldn't prune {name}", live=True) + return + + def git_clone(url, dir, name, commithash=None): # TODO clone into temporary dir and move if successful @@ -151,7 +157,23 @@ def git_clone(url, dir, name, commithash=None): return run(f'"{git}" -C "{dir}" fetch', f"Fetching updates for {name}...", f"Couldn't fetch {name}") - run(f'"{git}" -C "{dir}" checkout {commithash}', f"Checking out commit for {name} with hash: {commithash}...", f"Couldn't checkout commit {commithash} for {name}", live=True) + + if commithash is not None: + try: + run(f'"{git}" -C "{dir}" checkout {commithash}', f"Checking out commit for {name} with hash: {commithash}...", f"Couldn't checkout commit {commithash} for {name}", live=True) + except RuntimeError: + print(f"Unable to checkout {name} with hash {commithash}, attempting autofix...") + git_fix_workspace(dir) + run(f'"{git}" -C "{dir}" checkout {commithash}', f"Checking out commit for {name} with hash: {commithash}...", f"Couldn't checkout commit {commithash} for {name}", live=True) + else: + try: + run(f'"{git}" -C "{dir}" reset --hard FETCH_HEAD', f"Checking out latest commit for {name}...", f"Couldn't checkout latest commit for {name}", live=True) + except RuntimeError: + print(f"Unable to checkout {name}, attempting autofix...") + git_fix_workspace(dir) + run(f'"{git}" -C "{dir}" reset --hard FETCH_HEAD', f"Checking out latest commit for {name}...", f"Couldn't checkout latest commit for {name}", live=True) + + return run(f'"{git}" clone "{url}" "{dir}"', f"Cloning {name} into {dir}...", f"Couldn't clone {name}", live=True) -- cgit v1.2.1 From 955542a6540e3c2c27b39dc515c0ee3f8044b57b Mon Sep 17 00:00:00 2001 From: Jabasukuriputo Wang Date: Tue, 1 Aug 2023 11:24:54 +0800 Subject: also check on rev-parse --- modules/launch_utils.py | 34 ++++++++++++++++------------------ 1 file changed, 16 insertions(+), 18 deletions(-) diff --git a/modules/launch_utils.py b/modules/launch_utils.py index c7bb9370..87c577e0 100644 --- a/modules/launch_utils.py +++ b/modules/launch_utils.py @@ -152,27 +152,25 @@ def git_clone(url, dir, name, commithash=None): if commithash is None: return - current_hash = run(f'"{git}" -C "{dir}" rev-parse HEAD', None, f"Couldn't determine {name}'s hash: {commithash}", live=False).strip() - if current_hash == commithash: - return + try: + current_hash = subprocess.check_output([git, "-C", dir, "rev-parse", "HEAD"], shell=False, encoding='utf8').strip() + if current_hash == commithash: + return + except RuntimeError: + print(f"Unable to determine {name}'s hash, attempting autofix...") + git_fix_workspace(dir) + current_hash = subprocess.check_output([git, "-C", dir, "rev-parse", "HEAD"], shell=False, encoding='utf8').strip() + if current_hash == commithash: + return run(f'"{git}" -C "{dir}" fetch', f"Fetching updates for {name}...", f"Couldn't fetch {name}") - if commithash is not None: - try: - run(f'"{git}" -C "{dir}" checkout {commithash}', f"Checking out commit for {name} with hash: {commithash}...", f"Couldn't checkout commit {commithash} for {name}", live=True) - except RuntimeError: - print(f"Unable to checkout {name} with hash {commithash}, attempting autofix...") - git_fix_workspace(dir) - run(f'"{git}" -C "{dir}" checkout {commithash}', f"Checking out commit for {name} with hash: {commithash}...", f"Couldn't checkout commit {commithash} for {name}", live=True) - else: - try: - run(f'"{git}" -C "{dir}" reset --hard FETCH_HEAD', f"Checking out latest commit for {name}...", f"Couldn't checkout latest commit for {name}", live=True) - except RuntimeError: - print(f"Unable to checkout {name}, attempting autofix...") - git_fix_workspace(dir) - run(f'"{git}" -C "{dir}" reset --hard FETCH_HEAD', f"Checking out latest commit for {name}...", f"Couldn't checkout latest commit for {name}", live=True) - + try: + run(f'"{git}" -C "{dir}" checkout {commithash}', f"Checking out commit for {name} with hash: {commithash}...", f"Couldn't checkout commit {commithash} for {name}", live=True) + except RuntimeError: + print(f"Unable to checkout {name} with hash {commithash}, attempting autofix...") + git_fix_workspace(dir) + run(f'"{git}" -C "{dir}" checkout {commithash}', f"Checking out commit for {name} with hash: {commithash}...", f"Couldn't checkout commit {commithash} for {name}", live=True) return -- cgit v1.2.1 From c46525b70b54e4f6eaa8326d20777ecbad959a20 Mon Sep 17 00:00:00 2001 From: Jabasukuriputo Wang Date: Tue, 1 Aug 2023 11:26:17 +0800 Subject: fix exception --- modules/launch_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/launch_utils.py b/modules/launch_utils.py index 87c577e0..4be25990 100644 --- a/modules/launch_utils.py +++ b/modules/launch_utils.py @@ -156,7 +156,7 @@ def git_clone(url, dir, name, commithash=None): current_hash = subprocess.check_output([git, "-C", dir, "rev-parse", "HEAD"], shell=False, encoding='utf8').strip() if current_hash == commithash: return - except RuntimeError: + except Exception: print(f"Unable to determine {name}'s hash, attempting autofix...") git_fix_workspace(dir) current_hash = subprocess.check_output([git, "-C", dir, "rev-parse", "HEAD"], shell=False, encoding='utf8').strip() -- cgit v1.2.1 From 8b036d8a8253996f2a9c977bea63babbe59eb348 Mon Sep 17 00:00:00 2001 From: Jabasukuriputo Wang Date: Tue, 1 Aug 2023 11:26:59 +0800 Subject: fix --- modules/launch_utils.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/modules/launch_utils.py b/modules/launch_utils.py index 4be25990..7225af08 100644 --- a/modules/launch_utils.py +++ b/modules/launch_utils.py @@ -139,7 +139,7 @@ def check_run_python(code: str) -> bool: return result.returncode == 0 -def git_fix_workspace(dir): +def git_fix_workspace(dir, name): run(f'"{git}" -C "{dir}" fetch --refetch --no-auto-gc', f"Fetching all contents for {name}", f"Couldn't fetch {name}", live=True) run(f'"{git}" -C "{dir}" gc --aggressive --prune=now', f"Pruning {name}", f"Couldn't prune {name}", live=True) return @@ -158,7 +158,7 @@ def git_clone(url, dir, name, commithash=None): return except Exception: print(f"Unable to determine {name}'s hash, attempting autofix...") - git_fix_workspace(dir) + git_fix_workspace(dir, name) current_hash = subprocess.check_output([git, "-C", dir, "rev-parse", "HEAD"], shell=False, encoding='utf8').strip() if current_hash == commithash: return @@ -169,7 +169,7 @@ def git_clone(url, dir, name, commithash=None): run(f'"{git}" -C "{dir}" checkout {commithash}', f"Checking out commit for {name} with hash: {commithash}...", f"Couldn't checkout commit {commithash} for {name}", live=True) except RuntimeError: print(f"Unable to checkout {name} with hash {commithash}, attempting autofix...") - git_fix_workspace(dir) + git_fix_workspace(dir, name) run(f'"{git}" -C "{dir}" checkout {commithash}', f"Checking out commit for {name} with hash: {commithash}...", f"Couldn't checkout commit {commithash} for {name}", live=True) return -- cgit v1.2.1 From 4b43480fe8b65a3bd24dc9bc03a7e910c9b0314f Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Tue, 1 Aug 2023 07:08:11 +0300 Subject: show metadata for SD checkpoints in the extra networks UI --- modules/sd_models.py | 26 ++++++++++++++++---------- modules/ui_extra_networks_checkpoints.py | 1 + 2 files changed, 17 insertions(+), 10 deletions(-) diff --git a/modules/sd_models.py b/modules/sd_models.py index acb1e817..1af7fd78 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -14,7 +14,7 @@ import ldm.modules.midas as midas from ldm.util import instantiate_from_config -from modules import paths, shared, modelloader, devices, script_callbacks, sd_vae, sd_disable_initialization, errors, hashes, sd_models_config, sd_unet, sd_models_xl +from modules import paths, shared, modelloader, devices, script_callbacks, sd_vae, sd_disable_initialization, errors, hashes, sd_models_config, sd_unet, sd_models_xl, cache from modules.sd_hijack_inpainting import do_inpainting_hijack from modules.timer import Timer import tomesd @@ -33,6 +33,8 @@ class CheckpointInfo: self.filename = filename abspath = os.path.abspath(filename) + self.is_safetensors = os.path.splitext(filename)[1].lower() == ".safetensors" + if shared.cmd_opts.ckpt_dir is not None and abspath.startswith(shared.cmd_opts.ckpt_dir): name = abspath.replace(shared.cmd_opts.ckpt_dir, '') elif abspath.startswith(model_path): @@ -43,6 +45,19 @@ class CheckpointInfo: if name.startswith("\\") or name.startswith("/"): name = name[1:] + def read_metadata(): + metadata = read_metadata_from_safetensors(filename) + self.modelspec_thumbnail = metadata.pop('modelspec.thumbnail', None) + + return metadata + + self.metadata = {} + if self.is_safetensors: + try: + self.metadata = cache.cached_data_for_file('safetensors-metadata', "checkpoint/" + name, filename, read_metadata) + except Exception as e: + errors.display(e, f"reading metadata for {filename}") + self.name = name self.name_for_extra = os.path.splitext(os.path.basename(filename))[0] self.model_name = os.path.splitext(name.replace("/", "_").replace("\\", "_"))[0] @@ -55,15 +70,6 @@ class CheckpointInfo: self.ids = [self.hash, self.model_name, self.title, name, f'{name} [{self.hash}]'] + ([self.shorthash, self.sha256, f'{self.name} [{self.shorthash}]'] if self.shorthash else []) - self.metadata = {} - - _, ext = os.path.splitext(self.filename) - if ext.lower() == ".safetensors": - try: - self.metadata = read_metadata_from_safetensors(filename) - except Exception as e: - errors.display(e, f"reading checkpoint metadata: {filename}") - def register(self): checkpoints_list[self.title] = self for id in self.ids: diff --git a/modules/ui_extra_networks_checkpoints.py b/modules/ui_extra_networks_checkpoints.py index 76780cfd..2bb0a222 100644 --- a/modules/ui_extra_networks_checkpoints.py +++ b/modules/ui_extra_networks_checkpoints.py @@ -23,6 +23,7 @@ class ExtraNetworksPageCheckpoints(ui_extra_networks.ExtraNetworksPage): "search_term": self.search_terms_from_path(checkpoint.filename) + " " + (checkpoint.sha256 or ""), "onclick": '"' + html.escape(f"""return selectCheckpoint({quote_js(name)})""") + '"', "local_preview": f"{path}.{shared.opts.samples_format}", + "metadata": checkpoint.metadata, "sort_keys": {'default': index, **self.get_sort_keys(checkpoint.filename)}, } -- cgit v1.2.1 From 2860c3be3e29d7f0a7d8e1ada6594078aee3aebb Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Tue, 1 Aug 2023 07:10:42 +0300 Subject: add filename to to the table in user metadata editor --- modules/ui_extra_networks_user_metadata.py | 1 + 1 file changed, 1 insertion(+) diff --git a/modules/ui_extra_networks_user_metadata.py b/modules/ui_extra_networks_user_metadata.py index 63d4b503..1cb9eb6f 100644 --- a/modules/ui_extra_networks_user_metadata.py +++ b/modules/ui_extra_networks_user_metadata.py @@ -96,6 +96,7 @@ class UserMetadataEditor: stats = os.stat(filename) params = [ + ('Filename: ', os.path.basename(filename)), ('File size: ', sysinfo.pretty_bytes(stats.st_size)), ('Modified: ', datetime.datetime.fromtimestamp(stats.st_mtime).strftime('%Y-%m-%d %H:%M')), ] -- cgit v1.2.1 From c6b826d796d10e1428506dc988a940a184f7d09b Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Tue, 1 Aug 2023 07:15:15 +0300 Subject: Split history: mv modules/ui.py modules/ui_checkpoint_merger.py --- modules/ui.py | 1621 --------------------------------------- modules/ui_checkpoint_merger.py | 1621 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 1621 insertions(+), 1621 deletions(-) delete mode 100644 modules/ui.py create mode 100644 modules/ui_checkpoint_merger.py diff --git a/modules/ui.py b/modules/ui.py deleted file mode 100644 index 07ecee7b..00000000 --- a/modules/ui.py +++ /dev/null @@ -1,1621 +0,0 @@ -import datetime -import json -import mimetypes -import os -import sys -from functools import reduce -import warnings - -import gradio as gr -import gradio.utils -import numpy as np -from PIL import Image, PngImagePlugin # noqa: F401 -from modules.call_queue import wrap_gradio_gpu_call, wrap_queued_call, wrap_gradio_call - -from modules import sd_hijack, sd_models, script_callbacks, ui_extensions, deepbooru, sd_vae, extra_networks, ui_common, ui_postprocessing, progress, ui_loadsave, errors, shared_items, ui_settings, timer, sysinfo -from modules.ui_components import FormRow, FormGroup, ToolButton, FormHTML -from modules.paths import script_path -from modules.ui_common import create_refresh_button -from modules.ui_gradio_extensions import reload_javascript - - -from modules.shared import opts, cmd_opts - -import modules.codeformer_model -import modules.generation_parameters_copypaste as parameters_copypaste -import modules.gfpgan_model -import modules.hypernetworks.ui -import modules.scripts -import modules.shared as shared -import modules.styles -import modules.textual_inversion.ui -from modules import prompt_parser -from modules.sd_hijack import model_hijack -from modules.sd_samplers import samplers, samplers_for_img2img -from modules.textual_inversion import textual_inversion -import modules.hypernetworks.ui -from modules.generation_parameters_copypaste import image_from_url_text -import modules.extras - -create_setting_component = ui_settings.create_setting_component - -warnings.filterwarnings("default" if opts.show_warnings else "ignore", category=UserWarning) - -# this is a fix for Windows users. Without it, javascript files will be served with text/html content-type and the browser will not show any UI -mimetypes.init() -mimetypes.add_type('application/javascript', '.js') - -if not cmd_opts.share and not cmd_opts.listen: - # fix gradio phoning home - gradio.utils.version_check = lambda: None - gradio.utils.get_local_ip_address = lambda: '127.0.0.1' - -if cmd_opts.ngrok is not None: - import modules.ngrok as ngrok - print('ngrok authtoken detected, trying to connect...') - ngrok.connect( - cmd_opts.ngrok, - cmd_opts.port if cmd_opts.port is not None else 7860, - cmd_opts.ngrok_options - ) - - -def gr_show(visible=True): - return {"visible": visible, "__type__": "update"} - - -sample_img2img = "assets/stable-samples/img2img/sketch-mountains-input.jpg" -sample_img2img = sample_img2img if os.path.exists(sample_img2img) else None - -# Using constants for these since the variation selector isn't visible. -# Important that they exactly match script.js for tooltip to work. -random_symbol = '\U0001f3b2\ufe0f' # 🎲️ -reuse_symbol = '\u267b\ufe0f' # ♻️ -paste_symbol = '\u2199\ufe0f' # ↙ -refresh_symbol = '\U0001f504' # 🔄 -save_style_symbol = '\U0001f4be' # 💾 -apply_style_symbol = '\U0001f4cb' # 📋 -clear_prompt_symbol = '\U0001f5d1\ufe0f' # 🗑️ -extra_networks_symbol = '\U0001F3B4' # 🎴 -switch_values_symbol = '\U000021C5' # ⇅ -restore_progress_symbol = '\U0001F300' # 🌀 -detect_image_size_symbol = '\U0001F4D0' # 📐 -up_down_symbol = '\u2195\ufe0f' # ↕️ - - -plaintext_to_html = ui_common.plaintext_to_html - - -def send_gradio_gallery_to_image(x): - if len(x) == 0: - return None - return image_from_url_text(x[0]) - - -def add_style(name: str, prompt: str, negative_prompt: str): - if name is None: - return [gr_show() for x in range(4)] - - style = modules.styles.PromptStyle(name, prompt, negative_prompt) - shared.prompt_styles.styles[style.name] = style - # Save all loaded prompt styles: this allows us to update the storage format in the future more easily, because we - # reserialize all styles every time we save them - shared.prompt_styles.save_styles(shared.styles_filename) - - return [gr.Dropdown.update(visible=True, choices=list(shared.prompt_styles.styles)) for _ in range(2)] - - -def calc_resolution_hires(enable, width, height, hr_scale, hr_resize_x, hr_resize_y): - from modules import processing, devices - - if not enable: - return "" - - p = processing.StableDiffusionProcessingTxt2Img(width=width, height=height, enable_hr=True, hr_scale=hr_scale, hr_resize_x=hr_resize_x, hr_resize_y=hr_resize_y) - - with devices.autocast(): - p.init([""], [0], [0]) - - return f"resize: from {p.width}x{p.height} to {p.hr_resize_x or p.hr_upscale_to_x}x{p.hr_resize_y or p.hr_upscale_to_y}" - - -def resize_from_to_html(width, height, scale_by): - target_width = int(width * scale_by) - target_height = int(height * scale_by) - - if not target_width or not target_height: - return "no image selected" - - return f"resize: from {width}x{height} to {target_width}x{target_height}" - - -def apply_styles(prompt, prompt_neg, styles): - prompt = shared.prompt_styles.apply_styles_to_prompt(prompt, styles) - prompt_neg = shared.prompt_styles.apply_negative_styles_to_prompt(prompt_neg, styles) - - return [gr.Textbox.update(value=prompt), gr.Textbox.update(value=prompt_neg), gr.Dropdown.update(value=[])] - - -def process_interrogate(interrogation_function, mode, ii_input_dir, ii_output_dir, *ii_singles): - if mode in {0, 1, 3, 4}: - return [interrogation_function(ii_singles[mode]), None] - elif mode == 2: - return [interrogation_function(ii_singles[mode]["image"]), None] - elif mode == 5: - assert not shared.cmd_opts.hide_ui_dir_config, "Launched with --hide-ui-dir-config, batch img2img disabled" - images = shared.listfiles(ii_input_dir) - print(f"Will process {len(images)} images.") - if ii_output_dir != "": - os.makedirs(ii_output_dir, exist_ok=True) - else: - ii_output_dir = ii_input_dir - - for image in images: - img = Image.open(image) - filename = os.path.basename(image) - left, _ = os.path.splitext(filename) - print(interrogation_function(img), file=open(os.path.join(ii_output_dir, f"{left}.txt"), 'a', encoding='utf-8')) - - return [gr.update(), None] - - -def interrogate(image): - prompt = shared.interrogator.interrogate(image.convert("RGB")) - return gr.update() if prompt is None else prompt - - -def interrogate_deepbooru(image): - prompt = deepbooru.model.tag(image) - return gr.update() if prompt is None else prompt - - -def create_seed_inputs(target_interface): - with FormRow(elem_id=f"{target_interface}_seed_row", variant="compact"): - seed = (gr.Textbox if cmd_opts.use_textbox_seed else gr.Number)(label='Seed', value=-1, elem_id=f"{target_interface}_seed") - seed.style(container=False) - random_seed = ToolButton(random_symbol, elem_id=f"{target_interface}_random_seed", label='Random seed') - reuse_seed = ToolButton(reuse_symbol, elem_id=f"{target_interface}_reuse_seed", label='Reuse seed') - - seed_checkbox = gr.Checkbox(label='Extra', elem_id=f"{target_interface}_subseed_show", value=False) - - # Components to show/hide based on the 'Extra' checkbox - seed_extras = [] - - with FormRow(visible=False, elem_id=f"{target_interface}_subseed_row") as seed_extra_row_1: - seed_extras.append(seed_extra_row_1) - subseed = gr.Number(label='Variation seed', value=-1, elem_id=f"{target_interface}_subseed") - subseed.style(container=False) - random_subseed = ToolButton(random_symbol, elem_id=f"{target_interface}_random_subseed") - reuse_subseed = ToolButton(reuse_symbol, elem_id=f"{target_interface}_reuse_subseed") - subseed_strength = gr.Slider(label='Variation strength', value=0.0, minimum=0, maximum=1, step=0.01, elem_id=f"{target_interface}_subseed_strength") - - with FormRow(visible=False) as seed_extra_row_2: - seed_extras.append(seed_extra_row_2) - seed_resize_from_w = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize seed from width", value=0, elem_id=f"{target_interface}_seed_resize_from_w") - seed_resize_from_h = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize seed from height", value=0, elem_id=f"{target_interface}_seed_resize_from_h") - - random_seed.click(fn=None, _js="function(){setRandomSeed('" + target_interface + "_seed')}", show_progress=False, inputs=[], outputs=[]) - random_subseed.click(fn=None, _js="function(){setRandomSeed('" + target_interface + "_subseed')}", show_progress=False, inputs=[], outputs=[]) - - def change_visibility(show): - return {comp: gr_show(show) for comp in seed_extras} - - seed_checkbox.change(change_visibility, show_progress=False, inputs=[seed_checkbox], outputs=seed_extras) - - return seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox - - - -def connect_clear_prompt(button): - """Given clear button, prompt, and token_counter objects, setup clear prompt button click event""" - button.click( - _js="clear_prompt", - fn=None, - inputs=[], - outputs=[], - ) - - -def connect_reuse_seed(seed: gr.Number, reuse_seed: gr.Button, generation_info: gr.Textbox, dummy_component, is_subseed): - """ Connects a 'reuse (sub)seed' button's click event so that it copies last used - (sub)seed value from generation info the to the seed field. If copying subseed and subseed strength - was 0, i.e. no variation seed was used, it copies the normal seed value instead.""" - def copy_seed(gen_info_string: str, index): - res = -1 - - try: - gen_info = json.loads(gen_info_string) - index -= gen_info.get('index_of_first_image', 0) - - if is_subseed and gen_info.get('subseed_strength', 0) > 0: - all_subseeds = gen_info.get('all_subseeds', [-1]) - res = all_subseeds[index if 0 <= index < len(all_subseeds) else 0] - else: - all_seeds = gen_info.get('all_seeds', [-1]) - res = all_seeds[index if 0 <= index < len(all_seeds) else 0] - - except json.decoder.JSONDecodeError: - if gen_info_string: - errors.report(f"Error parsing JSON generation info: {gen_info_string}") - - return [res, gr_show(False)] - - reuse_seed.click( - fn=copy_seed, - _js="(x, y) => [x, selected_gallery_index()]", - show_progress=False, - inputs=[generation_info, dummy_component], - outputs=[seed, dummy_component] - ) - - -def update_token_counter(text, steps): - try: - text, _ = extra_networks.parse_prompt(text) - - _, prompt_flat_list, _ = prompt_parser.get_multicond_prompt_list([text]) - prompt_schedules = prompt_parser.get_learned_conditioning_prompt_schedules(prompt_flat_list, steps) - - except Exception: - # a parsing error can happen here during typing, and we don't want to bother the user with - # messages related to it in console - prompt_schedules = [[[steps, text]]] - - flat_prompts = reduce(lambda list1, list2: list1+list2, prompt_schedules) - prompts = [prompt_text for step, prompt_text in flat_prompts] - token_count, max_length = max([model_hijack.get_prompt_lengths(prompt) for prompt in prompts], key=lambda args: args[0]) - return f"{token_count}/{max_length}" - - -def create_toprow(is_img2img): - id_part = "img2img" if is_img2img else "txt2img" - - with gr.Row(elem_id=f"{id_part}_toprow", variant="compact"): - with gr.Column(elem_id=f"{id_part}_prompt_container", scale=6): - with gr.Row(): - with gr.Column(scale=80): - with gr.Row(): - prompt = gr.Textbox(label="Prompt", elem_id=f"{id_part}_prompt", show_label=False, lines=3, placeholder="Prompt (press Ctrl+Enter or Alt+Enter to generate)", elem_classes=["prompt"]) - - with gr.Row(): - with gr.Column(scale=80): - with gr.Row(): - negative_prompt = gr.Textbox(label="Negative prompt", elem_id=f"{id_part}_neg_prompt", show_label=False, lines=3, placeholder="Negative prompt (press Ctrl+Enter or Alt+Enter to generate)", elem_classes=["prompt"]) - - button_interrogate = None - button_deepbooru = None - if is_img2img: - with gr.Column(scale=1, elem_classes="interrogate-col"): - button_interrogate = gr.Button('Interrogate\nCLIP', elem_id="interrogate") - button_deepbooru = gr.Button('Interrogate\nDeepBooru', elem_id="deepbooru") - - with gr.Column(scale=1, elem_id=f"{id_part}_actions_column"): - with gr.Row(elem_id=f"{id_part}_generate_box", elem_classes="generate-box"): - interrupt = gr.Button('Interrupt', elem_id=f"{id_part}_interrupt", elem_classes="generate-box-interrupt") - skip = gr.Button('Skip', elem_id=f"{id_part}_skip", elem_classes="generate-box-skip") - submit = gr.Button('Generate', elem_id=f"{id_part}_generate", variant='primary') - - skip.click( - fn=lambda: shared.state.skip(), - inputs=[], - outputs=[], - ) - - interrupt.click( - fn=lambda: shared.state.interrupt(), - inputs=[], - outputs=[], - ) - - with gr.Row(elem_id=f"{id_part}_tools"): - paste = ToolButton(value=paste_symbol, elem_id="paste") - clear_prompt_button = ToolButton(value=clear_prompt_symbol, elem_id=f"{id_part}_clear_prompt") - extra_networks_button = ToolButton(value=extra_networks_symbol, elem_id=f"{id_part}_extra_networks") - prompt_style_apply = ToolButton(value=apply_style_symbol, elem_id=f"{id_part}_style_apply") - save_style = ToolButton(value=save_style_symbol, elem_id=f"{id_part}_style_create") - restore_progress_button = ToolButton(value=restore_progress_symbol, elem_id=f"{id_part}_restore_progress", visible=False) - - token_counter = gr.HTML(value="0/75", elem_id=f"{id_part}_token_counter", elem_classes=["token-counter"]) - token_button = gr.Button(visible=False, elem_id=f"{id_part}_token_button") - negative_token_counter = gr.HTML(value="0/75", elem_id=f"{id_part}_negative_token_counter", elem_classes=["token-counter"]) - negative_token_button = gr.Button(visible=False, elem_id=f"{id_part}_negative_token_button") - - clear_prompt_button.click( - fn=lambda *x: x, - _js="confirm_clear_prompt", - inputs=[prompt, negative_prompt], - outputs=[prompt, negative_prompt], - ) - - with gr.Row(elem_id=f"{id_part}_styles_row"): - prompt_styles = gr.Dropdown(label="Styles", elem_id=f"{id_part}_styles", choices=[k for k, v in shared.prompt_styles.styles.items()], value=[], multiselect=True) - create_refresh_button(prompt_styles, shared.prompt_styles.reload, lambda: {"choices": [k for k, v in shared.prompt_styles.styles.items()]}, f"refresh_{id_part}_styles") - - return prompt, prompt_styles, negative_prompt, submit, button_interrogate, button_deepbooru, prompt_style_apply, save_style, paste, extra_networks_button, token_counter, token_button, negative_token_counter, negative_token_button, restore_progress_button - - -def setup_progressbar(*args, **kwargs): - pass - - -def apply_setting(key, value): - if value is None: - return gr.update() - - if shared.cmd_opts.freeze_settings: - return gr.update() - - # dont allow model to be swapped when model hash exists in prompt - if key == "sd_model_checkpoint" and opts.disable_weights_auto_swap: - return gr.update() - - if key == "sd_model_checkpoint": - ckpt_info = sd_models.get_closet_checkpoint_match(value) - - if ckpt_info is not None: - value = ckpt_info.title - else: - return gr.update() - - comp_args = opts.data_labels[key].component_args - if comp_args and isinstance(comp_args, dict) and comp_args.get('visible') is False: - return - - valtype = type(opts.data_labels[key].default) - oldval = opts.data.get(key, None) - opts.data[key] = valtype(value) if valtype != type(None) else value - if oldval != value and opts.data_labels[key].onchange is not None: - opts.data_labels[key].onchange() - - opts.save(shared.config_filename) - return getattr(opts, key) - - -def create_output_panel(tabname, outdir): - return ui_common.create_output_panel(tabname, outdir) - - -def create_sampler_and_steps_selection(choices, tabname): - if opts.samplers_in_dropdown: - with FormRow(elem_id=f"sampler_selection_{tabname}"): - sampler_index = gr.Dropdown(label='Sampling method', elem_id=f"{tabname}_sampling", choices=[x.name for x in choices], value=choices[0].name, type="index") - steps = gr.Slider(minimum=1, maximum=150, step=1, elem_id=f"{tabname}_steps", label="Sampling steps", value=20) - else: - with FormGroup(elem_id=f"sampler_selection_{tabname}"): - steps = gr.Slider(minimum=1, maximum=150, step=1, elem_id=f"{tabname}_steps", label="Sampling steps", value=20) - sampler_index = gr.Radio(label='Sampling method', elem_id=f"{tabname}_sampling", choices=[x.name for x in choices], value=choices[0].name, type="index") - - return steps, sampler_index - - -def ordered_ui_categories(): - user_order = {x.strip(): i * 2 + 1 for i, x in enumerate(shared.opts.ui_reorder_list)} - - for _, category in sorted(enumerate(shared_items.ui_reorder_categories()), key=lambda x: user_order.get(x[1], x[0] * 2 + 0)): - yield category - - -def create_override_settings_dropdown(tabname, row): - dropdown = gr.Dropdown([], label="Override settings", visible=False, elem_id=f"{tabname}_override_settings", multiselect=True) - - dropdown.change( - fn=lambda x: gr.Dropdown.update(visible=bool(x)), - inputs=[dropdown], - outputs=[dropdown], - ) - - return dropdown - - -def create_ui(): - import modules.img2img - import modules.txt2img - - reload_javascript() - - parameters_copypaste.reset() - - modules.scripts.scripts_current = modules.scripts.scripts_txt2img - modules.scripts.scripts_txt2img.initialize_scripts(is_img2img=False) - - with gr.Blocks(analytics_enabled=False) as txt2img_interface: - txt2img_prompt, txt2img_prompt_styles, txt2img_negative_prompt, submit, _, _, txt2img_prompt_style_apply, txt2img_save_style, txt2img_paste, extra_networks_button, token_counter, token_button, negative_token_counter, negative_token_button, restore_progress_button = create_toprow(is_img2img=False) - - dummy_component = gr.Label(visible=False) - txt_prompt_img = gr.File(label="", elem_id="txt2img_prompt_image", file_count="single", type="binary", visible=False) - - with FormRow(variant='compact', elem_id="txt2img_extra_networks", visible=False) as extra_networks: - from modules import ui_extra_networks - extra_networks_ui = ui_extra_networks.create_ui(extra_networks, extra_networks_button, 'txt2img') - - with gr.Row().style(equal_height=False): - with gr.Column(variant='compact', elem_id="txt2img_settings"): - modules.scripts.scripts_txt2img.prepare_ui() - - for category in ordered_ui_categories(): - if category == "sampler": - steps, sampler_index = create_sampler_and_steps_selection(samplers, "txt2img") - - elif category == "dimensions": - with FormRow(): - with gr.Column(elem_id="txt2img_column_size", scale=4): - width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="txt2img_width") - height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="txt2img_height") - - with gr.Column(elem_id="txt2img_dimensions_row", scale=1, elem_classes="dimensions-tools"): - res_switch_btn = ToolButton(value=switch_values_symbol, elem_id="txt2img_res_switch_btn", label="Switch dims") - - if opts.dimensions_and_batch_together: - with gr.Column(elem_id="txt2img_column_batch"): - batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="txt2img_batch_count") - batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="txt2img_batch_size") - - elif category == "cfg": - cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0, elem_id="txt2img_cfg_scale") - - elif category == "seed": - seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox = create_seed_inputs('txt2img') - - elif category == "checkboxes": - with FormRow(elem_classes="checkboxes-row", variant="compact"): - restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1, elem_id="txt2img_restore_faces") - tiling = gr.Checkbox(label='Tiling', value=False, elem_id="txt2img_tiling") - enable_hr = gr.Checkbox(label='Hires. fix', value=False, elem_id="txt2img_enable_hr") - hr_final_resolution = FormHTML(value="", elem_id="txtimg_hr_finalres", label="Upscaled resolution", interactive=False) - - elif category == "hires_fix": - with FormGroup(visible=False, elem_id="txt2img_hires_fix") as hr_options: - with FormRow(elem_id="txt2img_hires_fix_row1", variant="compact"): - hr_upscaler = gr.Dropdown(label="Upscaler", elem_id="txt2img_hr_upscaler", choices=[*shared.latent_upscale_modes, *[x.name for x in shared.sd_upscalers]], value=shared.latent_upscale_default_mode) - hr_second_pass_steps = gr.Slider(minimum=0, maximum=150, step=1, label='Hires steps', value=0, elem_id="txt2img_hires_steps") - denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.7, elem_id="txt2img_denoising_strength") - - with FormRow(elem_id="txt2img_hires_fix_row2", variant="compact"): - hr_scale = gr.Slider(minimum=1.0, maximum=4.0, step=0.05, label="Upscale by", value=2.0, elem_id="txt2img_hr_scale") - hr_resize_x = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize width to", value=0, elem_id="txt2img_hr_resize_x") - hr_resize_y = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize height to", value=0, elem_id="txt2img_hr_resize_y") - - with FormRow(elem_id="txt2img_hires_fix_row3", variant="compact", visible=opts.hires_fix_show_sampler) as hr_sampler_container: - hr_sampler_index = gr.Dropdown(label='Hires sampling method', elem_id="hr_sampler", choices=["Use same sampler"] + [x.name for x in samplers_for_img2img], value="Use same sampler", type="index") - - with FormRow(elem_id="txt2img_hires_fix_row4", variant="compact", visible=opts.hires_fix_show_prompts) as hr_prompts_container: - with gr.Column(scale=80): - with gr.Row(): - hr_prompt = gr.Textbox(label="Hires prompt", elem_id="hires_prompt", show_label=False, lines=3, placeholder="Prompt for hires fix pass.\nLeave empty to use the same prompt as in first pass.", elem_classes=["prompt"]) - with gr.Column(scale=80): - with gr.Row(): - hr_negative_prompt = gr.Textbox(label="Hires negative prompt", elem_id="hires_neg_prompt", show_label=False, lines=3, placeholder="Negative prompt for hires fix pass.\nLeave empty to use the same negative prompt as in first pass.", elem_classes=["prompt"]) - - elif category == "batch": - if not opts.dimensions_and_batch_together: - with FormRow(elem_id="txt2img_column_batch"): - batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="txt2img_batch_count") - batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="txt2img_batch_size") - - elif category == "override_settings": - with FormRow(elem_id="txt2img_override_settings_row") as row: - override_settings = create_override_settings_dropdown('txt2img', row) - - elif category == "scripts": - with FormGroup(elem_id="txt2img_script_container"): - custom_inputs = modules.scripts.scripts_txt2img.setup_ui() - - else: - modules.scripts.scripts_txt2img.setup_ui_for_section(category) - - hr_resolution_preview_inputs = [enable_hr, width, height, hr_scale, hr_resize_x, hr_resize_y] - - for component in hr_resolution_preview_inputs: - event = component.release if isinstance(component, gr.Slider) else component.change - - event( - fn=calc_resolution_hires, - inputs=hr_resolution_preview_inputs, - outputs=[hr_final_resolution], - show_progress=False, - ) - event( - None, - _js="onCalcResolutionHires", - inputs=hr_resolution_preview_inputs, - outputs=[], - show_progress=False, - ) - - txt2img_gallery, generation_info, html_info, html_log = create_output_panel("txt2img", opts.outdir_txt2img_samples) - - connect_reuse_seed(seed, reuse_seed, generation_info, dummy_component, is_subseed=False) - connect_reuse_seed(subseed, reuse_subseed, generation_info, dummy_component, is_subseed=True) - - txt2img_args = dict( - fn=wrap_gradio_gpu_call(modules.txt2img.txt2img, extra_outputs=[None, '', '']), - _js="submit", - inputs=[ - dummy_component, - txt2img_prompt, - txt2img_negative_prompt, - txt2img_prompt_styles, - steps, - sampler_index, - restore_faces, - tiling, - batch_count, - batch_size, - cfg_scale, - seed, - subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox, - height, - width, - enable_hr, - denoising_strength, - hr_scale, - hr_upscaler, - hr_second_pass_steps, - hr_resize_x, - hr_resize_y, - hr_sampler_index, - hr_prompt, - hr_negative_prompt, - override_settings, - - ] + custom_inputs, - - outputs=[ - txt2img_gallery, - generation_info, - html_info, - html_log, - ], - show_progress=False, - ) - - txt2img_prompt.submit(**txt2img_args) - submit.click(**txt2img_args) - - res_switch_btn.click(fn=None, _js="function(){switchWidthHeight('txt2img')}", inputs=None, outputs=None, show_progress=False) - - restore_progress_button.click( - fn=progress.restore_progress, - _js="restoreProgressTxt2img", - inputs=[dummy_component], - outputs=[ - txt2img_gallery, - generation_info, - html_info, - html_log, - ], - show_progress=False, - ) - - txt_prompt_img.change( - fn=modules.images.image_data, - inputs=[ - txt_prompt_img - ], - outputs=[ - txt2img_prompt, - txt_prompt_img - ], - show_progress=False, - ) - - enable_hr.change( - fn=lambda x: gr_show(x), - inputs=[enable_hr], - outputs=[hr_options], - show_progress = False, - ) - - txt2img_paste_fields = [ - (txt2img_prompt, "Prompt"), - (txt2img_negative_prompt, "Negative prompt"), - (steps, "Steps"), - (sampler_index, "Sampler"), - (restore_faces, "Face restoration"), - (cfg_scale, "CFG scale"), - (seed, "Seed"), - (width, "Size-1"), - (height, "Size-2"), - (batch_size, "Batch size"), - (subseed, "Variation seed"), - (subseed_strength, "Variation seed strength"), - (seed_resize_from_w, "Seed resize from-1"), - (seed_resize_from_h, "Seed resize from-2"), - (txt2img_prompt_styles, lambda d: d["Styles array"] if isinstance(d.get("Styles array"), list) else gr.update()), - (denoising_strength, "Denoising strength"), - (enable_hr, lambda d: "Denoising strength" in d), - (hr_options, lambda d: gr.Row.update(visible="Denoising strength" in d)), - (hr_scale, "Hires upscale"), - (hr_upscaler, "Hires upscaler"), - (hr_second_pass_steps, "Hires steps"), - (hr_resize_x, "Hires resize-1"), - (hr_resize_y, "Hires resize-2"), - (hr_sampler_index, "Hires sampler"), - (hr_sampler_container, lambda d: gr.update(visible=True) if d.get("Hires sampler", "Use same sampler") != "Use same sampler" else gr.update()), - (hr_prompt, "Hires prompt"), - (hr_negative_prompt, "Hires negative prompt"), - (hr_prompts_container, lambda d: gr.update(visible=True) if d.get("Hires prompt", "") != "" or d.get("Hires negative prompt", "") != "" else gr.update()), - *modules.scripts.scripts_txt2img.infotext_fields - ] - parameters_copypaste.add_paste_fields("txt2img", None, txt2img_paste_fields, override_settings) - parameters_copypaste.register_paste_params_button(parameters_copypaste.ParamBinding( - paste_button=txt2img_paste, tabname="txt2img", source_text_component=txt2img_prompt, source_image_component=None, - )) - - txt2img_preview_params = [ - txt2img_prompt, - txt2img_negative_prompt, - steps, - sampler_index, - cfg_scale, - seed, - width, - height, - ] - - token_button.click(fn=wrap_queued_call(update_token_counter), inputs=[txt2img_prompt, steps], outputs=[token_counter]) - negative_token_button.click(fn=wrap_queued_call(update_token_counter), inputs=[txt2img_negative_prompt, steps], outputs=[negative_token_counter]) - - ui_extra_networks.setup_ui(extra_networks_ui, txt2img_gallery) - - modules.scripts.scripts_current = modules.scripts.scripts_img2img - modules.scripts.scripts_img2img.initialize_scripts(is_img2img=True) - - with gr.Blocks(analytics_enabled=False) as img2img_interface: - img2img_prompt, img2img_prompt_styles, img2img_negative_prompt, submit, img2img_interrogate, img2img_deepbooru, img2img_prompt_style_apply, img2img_save_style, img2img_paste, extra_networks_button, token_counter, token_button, negative_token_counter, negative_token_button, restore_progress_button = create_toprow(is_img2img=True) - - img2img_prompt_img = gr.File(label="", elem_id="img2img_prompt_image", file_count="single", type="binary", visible=False) - - with FormRow(variant='compact', elem_id="img2img_extra_networks", visible=False) as extra_networks: - from modules import ui_extra_networks - extra_networks_ui_img2img = ui_extra_networks.create_ui(extra_networks, extra_networks_button, 'img2img') - - with FormRow().style(equal_height=False): - with gr.Column(variant='compact', elem_id="img2img_settings"): - copy_image_buttons = [] - copy_image_destinations = {} - - def add_copy_image_controls(tab_name, elem): - with gr.Row(variant="compact", elem_id=f"img2img_copy_to_{tab_name}"): - gr.HTML("Copy image to: ", elem_id=f"img2img_label_copy_to_{tab_name}") - - for title, name in zip(['img2img', 'sketch', 'inpaint', 'inpaint sketch'], ['img2img', 'sketch', 'inpaint', 'inpaint_sketch']): - if name == tab_name: - gr.Button(title, interactive=False) - copy_image_destinations[name] = elem - continue - - button = gr.Button(title) - copy_image_buttons.append((button, name, elem)) - - with gr.Tabs(elem_id="mode_img2img"): - img2img_selected_tab = gr.State(0) - - with gr.TabItem('img2img', id='img2img', elem_id="img2img_img2img_tab") as tab_img2img: - init_img = gr.Image(label="Image for img2img", elem_id="img2img_image", show_label=False, source="upload", interactive=True, type="pil", tool="editor", image_mode="RGBA").style(height=opts.img2img_editor_height) - add_copy_image_controls('img2img', init_img) - - with gr.TabItem('Sketch', id='img2img_sketch', elem_id="img2img_img2img_sketch_tab") as tab_sketch: - sketch = gr.Image(label="Image for img2img", elem_id="img2img_sketch", show_label=False, source="upload", interactive=True, type="pil", tool="color-sketch", image_mode="RGBA").style(height=opts.img2img_editor_height) - add_copy_image_controls('sketch', sketch) - - with gr.TabItem('Inpaint', id='inpaint', elem_id="img2img_inpaint_tab") as tab_inpaint: - init_img_with_mask = gr.Image(label="Image for inpainting with mask", show_label=False, elem_id="img2maskimg", source="upload", interactive=True, type="pil", tool="sketch", image_mode="RGBA").style(height=opts.img2img_editor_height) - add_copy_image_controls('inpaint', init_img_with_mask) - - with gr.TabItem('Inpaint sketch', id='inpaint_sketch', elem_id="img2img_inpaint_sketch_tab") as tab_inpaint_color: - inpaint_color_sketch = gr.Image(label="Color sketch inpainting", show_label=False, elem_id="inpaint_sketch", source="upload", interactive=True, type="pil", tool="color-sketch", image_mode="RGBA").style(height=opts.img2img_editor_height) - inpaint_color_sketch_orig = gr.State(None) - add_copy_image_controls('inpaint_sketch', inpaint_color_sketch) - - def update_orig(image, state): - if image is not None: - same_size = state is not None and state.size == image.size - has_exact_match = np.any(np.all(np.array(image) == np.array(state), axis=-1)) - edited = same_size and has_exact_match - return image if not edited or state is None else state - - inpaint_color_sketch.change(update_orig, [inpaint_color_sketch, inpaint_color_sketch_orig], inpaint_color_sketch_orig) - - with gr.TabItem('Inpaint upload', id='inpaint_upload', elem_id="img2img_inpaint_upload_tab") as tab_inpaint_upload: - init_img_inpaint = gr.Image(label="Image for img2img", show_label=False, source="upload", interactive=True, type="pil", elem_id="img_inpaint_base") - init_mask_inpaint = gr.Image(label="Mask", source="upload", interactive=True, type="pil", elem_id="img_inpaint_mask") - - with gr.TabItem('Batch', id='batch', elem_id="img2img_batch_tab") as tab_batch: - hidden = '
Disabled when launched with --hide-ui-dir-config.' if shared.cmd_opts.hide_ui_dir_config else '' - gr.HTML( - "

Process images in a directory on the same machine where the server is running." + - "
Use an empty output directory to save pictures normally instead of writing to the output directory." + - f"
Add inpaint batch mask directory to enable inpaint batch processing." - f"{hidden}

" - ) - img2img_batch_input_dir = gr.Textbox(label="Input directory", **shared.hide_dirs, elem_id="img2img_batch_input_dir") - img2img_batch_output_dir = gr.Textbox(label="Output directory", **shared.hide_dirs, elem_id="img2img_batch_output_dir") - img2img_batch_inpaint_mask_dir = gr.Textbox(label="Inpaint batch mask directory (required for inpaint batch processing only)", **shared.hide_dirs, elem_id="img2img_batch_inpaint_mask_dir") - with gr.Accordion("PNG info", open=False): - img2img_batch_use_png_info = gr.Checkbox(label="Append png info to prompts", **shared.hide_dirs, elem_id="img2img_batch_use_png_info") - img2img_batch_png_info_dir = gr.Textbox(label="PNG info directory", **shared.hide_dirs, placeholder="Leave empty to use input directory", elem_id="img2img_batch_png_info_dir") - img2img_batch_png_info_props = gr.CheckboxGroup(["Prompt", "Negative prompt", "Seed", "CFG scale", "Sampler", "Steps"], label="Parameters to take from png info", info="Prompts from png info will be appended to prompts set in ui.") - - img2img_tabs = [tab_img2img, tab_sketch, tab_inpaint, tab_inpaint_color, tab_inpaint_upload, tab_batch] - - for i, tab in enumerate(img2img_tabs): - tab.select(fn=lambda tabnum=i: tabnum, inputs=[], outputs=[img2img_selected_tab]) - - def copy_image(img): - if isinstance(img, dict) and 'image' in img: - return img['image'] - - return img - - for button, name, elem in copy_image_buttons: - button.click( - fn=copy_image, - inputs=[elem], - outputs=[copy_image_destinations[name]], - ) - button.click( - fn=lambda: None, - _js=f"switch_to_{name.replace(' ', '_')}", - inputs=[], - outputs=[], - ) - - with FormRow(): - resize_mode = gr.Radio(label="Resize mode", elem_id="resize_mode", choices=["Just resize", "Crop and resize", "Resize and fill", "Just resize (latent upscale)"], type="index", value="Just resize") - - modules.scripts.scripts_img2img.prepare_ui() - - for category in ordered_ui_categories(): - if category == "sampler": - steps, sampler_index = create_sampler_and_steps_selection(samplers_for_img2img, "img2img") - - elif category == "dimensions": - with FormRow(): - with gr.Column(elem_id="img2img_column_size", scale=4): - selected_scale_tab = gr.State(value=0) - - with gr.Tabs(): - with gr.Tab(label="Resize to", elem_id="img2img_tab_resize_to") as tab_scale_to: - with FormRow(): - with gr.Column(elem_id="img2img_column_size", scale=4): - width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="img2img_width") - height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="img2img_height") - with gr.Column(elem_id="img2img_dimensions_row", scale=1, elem_classes="dimensions-tools"): - res_switch_btn = ToolButton(value=switch_values_symbol, elem_id="img2img_res_switch_btn") - detect_image_size_btn = ToolButton(value=detect_image_size_symbol, elem_id="img2img_detect_image_size_btn") - - with gr.Tab(label="Resize by", elem_id="img2img_tab_resize_by") as tab_scale_by: - scale_by = gr.Slider(minimum=0.05, maximum=4.0, step=0.05, label="Scale", value=1.0, elem_id="img2img_scale") - - with FormRow(): - scale_by_html = FormHTML(resize_from_to_html(0, 0, 0.0), elem_id="img2img_scale_resolution_preview") - gr.Slider(label="Unused", elem_id="img2img_unused_scale_by_slider") - button_update_resize_to = gr.Button(visible=False, elem_id="img2img_update_resize_to") - - on_change_args = dict( - fn=resize_from_to_html, - _js="currentImg2imgSourceResolution", - inputs=[dummy_component, dummy_component, scale_by], - outputs=scale_by_html, - show_progress=False, - ) - - scale_by.release(**on_change_args) - button_update_resize_to.click(**on_change_args) - - # the code below is meant to update the resolution label after the image in the image selection UI has changed. - # as it is now the event keeps firing continuously for inpaint edits, which ruins the page with constant requests. - # I assume this must be a gradio bug and for now we'll just do it for non-inpaint inputs. - for component in [init_img, sketch]: - component.change(fn=lambda: None, _js="updateImg2imgResizeToTextAfterChangingImage", inputs=[], outputs=[], show_progress=False) - - tab_scale_to.select(fn=lambda: 0, inputs=[], outputs=[selected_scale_tab]) - tab_scale_by.select(fn=lambda: 1, inputs=[], outputs=[selected_scale_tab]) - - if opts.dimensions_and_batch_together: - with gr.Column(elem_id="img2img_column_batch"): - batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="img2img_batch_count") - batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="img2img_batch_size") - - elif category == "cfg": - with FormGroup(): - with FormRow(): - cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0, elem_id="img2img_cfg_scale") - image_cfg_scale = gr.Slider(minimum=0, maximum=3.0, step=0.05, label='Image CFG Scale', value=1.5, elem_id="img2img_image_cfg_scale", visible=False) - denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.75, elem_id="img2img_denoising_strength") - - elif category == "seed": - seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox = create_seed_inputs('img2img') - - elif category == "checkboxes": - with FormRow(elem_classes="checkboxes-row", variant="compact"): - restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1, elem_id="img2img_restore_faces") - tiling = gr.Checkbox(label='Tiling', value=False, elem_id="img2img_tiling") - - elif category == "batch": - if not opts.dimensions_and_batch_together: - with FormRow(elem_id="img2img_column_batch"): - batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="img2img_batch_count") - batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="img2img_batch_size") - - elif category == "override_settings": - with FormRow(elem_id="img2img_override_settings_row") as row: - override_settings = create_override_settings_dropdown('img2img', row) - - elif category == "scripts": - with FormGroup(elem_id="img2img_script_container"): - custom_inputs = modules.scripts.scripts_img2img.setup_ui() - - elif category == "inpaint": - with FormGroup(elem_id="inpaint_controls", visible=False) as inpaint_controls: - with FormRow(): - mask_blur = gr.Slider(label='Mask blur', minimum=0, maximum=64, step=1, value=4, elem_id="img2img_mask_blur") - mask_alpha = gr.Slider(label="Mask transparency", visible=False, elem_id="img2img_mask_alpha") - - with FormRow(): - inpainting_mask_invert = gr.Radio(label='Mask mode', choices=['Inpaint masked', 'Inpaint not masked'], value='Inpaint masked', type="index", elem_id="img2img_mask_mode") - - with FormRow(): - inpainting_fill = gr.Radio(label='Masked content', choices=['fill', 'original', 'latent noise', 'latent nothing'], value='original', type="index", elem_id="img2img_inpainting_fill") - - with FormRow(): - with gr.Column(): - inpaint_full_res = gr.Radio(label="Inpaint area", choices=["Whole picture", "Only masked"], type="index", value="Whole picture", elem_id="img2img_inpaint_full_res") - - with gr.Column(scale=4): - inpaint_full_res_padding = gr.Slider(label='Only masked padding, pixels', minimum=0, maximum=256, step=4, value=32, elem_id="img2img_inpaint_full_res_padding") - - def select_img2img_tab(tab): - return gr.update(visible=tab in [2, 3, 4]), gr.update(visible=tab == 3), - - for i, elem in enumerate(img2img_tabs): - elem.select( - fn=lambda tab=i: select_img2img_tab(tab), - inputs=[], - outputs=[inpaint_controls, mask_alpha], - ) - else: - modules.scripts.scripts_img2img.setup_ui_for_section(category) - - img2img_gallery, generation_info, html_info, html_log = create_output_panel("img2img", opts.outdir_img2img_samples) - - connect_reuse_seed(seed, reuse_seed, generation_info, dummy_component, is_subseed=False) - connect_reuse_seed(subseed, reuse_subseed, generation_info, dummy_component, is_subseed=True) - - img2img_prompt_img.change( - fn=modules.images.image_data, - inputs=[ - img2img_prompt_img - ], - outputs=[ - img2img_prompt, - img2img_prompt_img - ], - show_progress=False, - ) - - img2img_args = dict( - fn=wrap_gradio_gpu_call(modules.img2img.img2img, extra_outputs=[None, '', '']), - _js="submit_img2img", - inputs=[ - dummy_component, - dummy_component, - img2img_prompt, - img2img_negative_prompt, - img2img_prompt_styles, - init_img, - sketch, - init_img_with_mask, - inpaint_color_sketch, - inpaint_color_sketch_orig, - init_img_inpaint, - init_mask_inpaint, - steps, - sampler_index, - mask_blur, - mask_alpha, - inpainting_fill, - restore_faces, - tiling, - batch_count, - batch_size, - cfg_scale, - image_cfg_scale, - denoising_strength, - seed, - subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox, - selected_scale_tab, - height, - width, - scale_by, - resize_mode, - inpaint_full_res, - inpaint_full_res_padding, - inpainting_mask_invert, - img2img_batch_input_dir, - img2img_batch_output_dir, - img2img_batch_inpaint_mask_dir, - override_settings, - img2img_batch_use_png_info, - img2img_batch_png_info_props, - img2img_batch_png_info_dir, - ] + custom_inputs, - outputs=[ - img2img_gallery, - generation_info, - html_info, - html_log, - ], - show_progress=False, - ) - - interrogate_args = dict( - _js="get_img2img_tab_index", - inputs=[ - dummy_component, - img2img_batch_input_dir, - img2img_batch_output_dir, - init_img, - sketch, - init_img_with_mask, - inpaint_color_sketch, - init_img_inpaint, - ], - outputs=[img2img_prompt, dummy_component], - ) - - img2img_prompt.submit(**img2img_args) - submit.click(**img2img_args) - - res_switch_btn.click(fn=None, _js="function(){switchWidthHeight('img2img')}", inputs=None, outputs=None, show_progress=False) - - detect_image_size_btn.click( - fn=lambda w, h, _: (w or gr.update(), h or gr.update()), - _js="currentImg2imgSourceResolution", - inputs=[dummy_component, dummy_component, dummy_component], - outputs=[width, height], - show_progress=False, - ) - - restore_progress_button.click( - fn=progress.restore_progress, - _js="restoreProgressImg2img", - inputs=[dummy_component], - outputs=[ - img2img_gallery, - generation_info, - html_info, - html_log, - ], - show_progress=False, - ) - - img2img_interrogate.click( - fn=lambda *args: process_interrogate(interrogate, *args), - **interrogate_args, - ) - - img2img_deepbooru.click( - fn=lambda *args: process_interrogate(interrogate_deepbooru, *args), - **interrogate_args, - ) - - prompts = [(txt2img_prompt, txt2img_negative_prompt), (img2img_prompt, img2img_negative_prompt)] - style_dropdowns = [txt2img_prompt_styles, img2img_prompt_styles] - style_js_funcs = ["update_txt2img_tokens", "update_img2img_tokens"] - - for button, (prompt, negative_prompt) in zip([txt2img_save_style, img2img_save_style], prompts): - button.click( - fn=add_style, - _js="ask_for_style_name", - # Have to pass empty dummy component here, because the JavaScript and Python function have to accept - # the same number of parameters, but we only know the style-name after the JavaScript prompt - inputs=[dummy_component, prompt, negative_prompt], - outputs=[txt2img_prompt_styles, img2img_prompt_styles], - ) - - for button, (prompt, negative_prompt), styles, js_func in zip([txt2img_prompt_style_apply, img2img_prompt_style_apply], prompts, style_dropdowns, style_js_funcs): - button.click( - fn=apply_styles, - _js=js_func, - inputs=[prompt, negative_prompt, styles], - outputs=[prompt, negative_prompt, styles], - ) - - token_button.click(fn=update_token_counter, inputs=[img2img_prompt, steps], outputs=[token_counter]) - negative_token_button.click(fn=wrap_queued_call(update_token_counter), inputs=[img2img_negative_prompt, steps], outputs=[negative_token_counter]) - - ui_extra_networks.setup_ui(extra_networks_ui_img2img, img2img_gallery) - - img2img_paste_fields = [ - (img2img_prompt, "Prompt"), - (img2img_negative_prompt, "Negative prompt"), - (steps, "Steps"), - (sampler_index, "Sampler"), - (restore_faces, "Face restoration"), - (cfg_scale, "CFG scale"), - (image_cfg_scale, "Image CFG scale"), - (seed, "Seed"), - (width, "Size-1"), - (height, "Size-2"), - (batch_size, "Batch size"), - (subseed, "Variation seed"), - (subseed_strength, "Variation seed strength"), - (seed_resize_from_w, "Seed resize from-1"), - (seed_resize_from_h, "Seed resize from-2"), - (img2img_prompt_styles, lambda d: d["Styles array"] if isinstance(d.get("Styles array"), list) else gr.update()), - (denoising_strength, "Denoising strength"), - (mask_blur, "Mask blur"), - *modules.scripts.scripts_img2img.infotext_fields - ] - parameters_copypaste.add_paste_fields("img2img", init_img, img2img_paste_fields, override_settings) - parameters_copypaste.add_paste_fields("inpaint", init_img_with_mask, img2img_paste_fields, override_settings) - parameters_copypaste.register_paste_params_button(parameters_copypaste.ParamBinding( - paste_button=img2img_paste, tabname="img2img", source_text_component=img2img_prompt, source_image_component=None, - )) - - modules.scripts.scripts_current = None - - with gr.Blocks(analytics_enabled=False) as extras_interface: - ui_postprocessing.create_ui() - - with gr.Blocks(analytics_enabled=False) as pnginfo_interface: - with gr.Row().style(equal_height=False): - with gr.Column(variant='panel'): - image = gr.Image(elem_id="pnginfo_image", label="Source", source="upload", interactive=True, type="pil") - - with gr.Column(variant='panel'): - html = gr.HTML() - generation_info = gr.Textbox(visible=False, elem_id="pnginfo_generation_info") - html2 = gr.HTML() - with gr.Row(): - buttons = parameters_copypaste.create_buttons(["txt2img", "img2img", "inpaint", "extras"]) - - for tabname, button in buttons.items(): - parameters_copypaste.register_paste_params_button(parameters_copypaste.ParamBinding( - paste_button=button, tabname=tabname, source_text_component=generation_info, source_image_component=image, - )) - - image.change( - fn=wrap_gradio_call(modules.extras.run_pnginfo), - inputs=[image], - outputs=[html, generation_info, html2], - ) - - def update_interp_description(value): - interp_description_css = "

{}

" - interp_descriptions = { - "No interpolation": interp_description_css.format("No interpolation will be used. Requires one model; A. Allows for format conversion and VAE baking."), - "Weighted sum": interp_description_css.format("A weighted sum will be used for interpolation. Requires two models; A and B. The result is calculated as A * (1 - M) + B * M"), - "Add difference": interp_description_css.format("The difference between the last two models will be added to the first. Requires three models; A, B and C. The result is calculated as A + (B - C) * M") - } - return interp_descriptions[value] - - with gr.Blocks(analytics_enabled=False) as modelmerger_interface: - with gr.Row().style(equal_height=False): - with gr.Column(variant='compact'): - interp_description = gr.HTML(value=update_interp_description("Weighted sum"), elem_id="modelmerger_interp_description") - - with FormRow(elem_id="modelmerger_models"): - primary_model_name = gr.Dropdown(modules.sd_models.checkpoint_tiles(), elem_id="modelmerger_primary_model_name", label="Primary model (A)") - create_refresh_button(primary_model_name, modules.sd_models.list_models, lambda: {"choices": modules.sd_models.checkpoint_tiles()}, "refresh_checkpoint_A") - - secondary_model_name = gr.Dropdown(modules.sd_models.checkpoint_tiles(), elem_id="modelmerger_secondary_model_name", label="Secondary model (B)") - create_refresh_button(secondary_model_name, modules.sd_models.list_models, lambda: {"choices": modules.sd_models.checkpoint_tiles()}, "refresh_checkpoint_B") - - tertiary_model_name = gr.Dropdown(modules.sd_models.checkpoint_tiles(), elem_id="modelmerger_tertiary_model_name", label="Tertiary model (C)") - create_refresh_button(tertiary_model_name, modules.sd_models.list_models, lambda: {"choices": modules.sd_models.checkpoint_tiles()}, "refresh_checkpoint_C") - - custom_name = gr.Textbox(label="Custom Name (Optional)", elem_id="modelmerger_custom_name") - interp_amount = gr.Slider(minimum=0.0, maximum=1.0, step=0.05, label='Multiplier (M) - set to 0 to get model A', value=0.3, elem_id="modelmerger_interp_amount") - interp_method = gr.Radio(choices=["No interpolation", "Weighted sum", "Add difference"], value="Weighted sum", label="Interpolation Method", elem_id="modelmerger_interp_method") - interp_method.change(fn=update_interp_description, inputs=[interp_method], outputs=[interp_description]) - - with FormRow(): - checkpoint_format = gr.Radio(choices=["ckpt", "safetensors"], value="safetensors", label="Checkpoint format", elem_id="modelmerger_checkpoint_format") - save_as_half = gr.Checkbox(value=False, label="Save as float16", elem_id="modelmerger_save_as_half") - save_metadata = gr.Checkbox(value=True, label="Save metadata (.safetensors only)", elem_id="modelmerger_save_metadata") - - with FormRow(): - with gr.Column(): - config_source = gr.Radio(choices=["A, B or C", "B", "C", "Don't"], value="A, B or C", label="Copy config from", type="index", elem_id="modelmerger_config_method") - - with gr.Column(): - with FormRow(): - bake_in_vae = gr.Dropdown(choices=["None"] + list(sd_vae.vae_dict), value="None", label="Bake in VAE", elem_id="modelmerger_bake_in_vae") - create_refresh_button(bake_in_vae, sd_vae.refresh_vae_list, lambda: {"choices": ["None"] + list(sd_vae.vae_dict)}, "modelmerger_refresh_bake_in_vae") - - with FormRow(): - discard_weights = gr.Textbox(value="", label="Discard weights with matching name", elem_id="modelmerger_discard_weights") - - with gr.Row(): - modelmerger_merge = gr.Button(elem_id="modelmerger_merge", value="Merge", variant='primary') - - with gr.Column(variant='compact', elem_id="modelmerger_results_container"): - with gr.Group(elem_id="modelmerger_results_panel"): - modelmerger_result = gr.HTML(elem_id="modelmerger_result", show_label=False) - - with gr.Blocks(analytics_enabled=False) as train_interface: - with gr.Row().style(equal_height=False): - gr.HTML(value="

See wiki for detailed explanation.

") - - with gr.Row(variant="compact").style(equal_height=False): - with gr.Tabs(elem_id="train_tabs"): - - with gr.Tab(label="Create embedding", id="create_embedding"): - new_embedding_name = gr.Textbox(label="Name", elem_id="train_new_embedding_name") - initialization_text = gr.Textbox(label="Initialization text", value="*", elem_id="train_initialization_text") - nvpt = gr.Slider(label="Number of vectors per token", minimum=1, maximum=75, step=1, value=1, elem_id="train_nvpt") - overwrite_old_embedding = gr.Checkbox(value=False, label="Overwrite Old Embedding", elem_id="train_overwrite_old_embedding") - - with gr.Row(): - with gr.Column(scale=3): - gr.HTML(value="") - - with gr.Column(): - create_embedding = gr.Button(value="Create embedding", variant='primary', elem_id="train_create_embedding") - - with gr.Tab(label="Create hypernetwork", id="create_hypernetwork"): - new_hypernetwork_name = gr.Textbox(label="Name", elem_id="train_new_hypernetwork_name") - new_hypernetwork_sizes = gr.CheckboxGroup(label="Modules", value=["768", "320", "640", "1280"], choices=["768", "1024", "320", "640", "1280"], elem_id="train_new_hypernetwork_sizes") - new_hypernetwork_layer_structure = gr.Textbox("1, 2, 1", label="Enter hypernetwork layer structure", placeholder="1st and last digit must be 1. ex:'1, 2, 1'", elem_id="train_new_hypernetwork_layer_structure") - new_hypernetwork_activation_func = gr.Dropdown(value="linear", label="Select activation function of hypernetwork. Recommended : Swish / Linear(none)", choices=modules.hypernetworks.ui.keys, elem_id="train_new_hypernetwork_activation_func") - new_hypernetwork_initialization_option = gr.Dropdown(value = "Normal", label="Select Layer weights initialization. Recommended: Kaiming for relu-like, Xavier for sigmoid-like, Normal otherwise", choices=["Normal", "KaimingUniform", "KaimingNormal", "XavierUniform", "XavierNormal"], elem_id="train_new_hypernetwork_initialization_option") - new_hypernetwork_add_layer_norm = gr.Checkbox(label="Add layer normalization", elem_id="train_new_hypernetwork_add_layer_norm") - new_hypernetwork_use_dropout = gr.Checkbox(label="Use dropout", elem_id="train_new_hypernetwork_use_dropout") - new_hypernetwork_dropout_structure = gr.Textbox("0, 0, 0", label="Enter hypernetwork Dropout structure (or empty). Recommended : 0~0.35 incrementing sequence: 0, 0.05, 0.15", placeholder="1st and last digit must be 0 and values should be between 0 and 1. ex:'0, 0.01, 0'") - overwrite_old_hypernetwork = gr.Checkbox(value=False, label="Overwrite Old Hypernetwork", elem_id="train_overwrite_old_hypernetwork") - - with gr.Row(): - with gr.Column(scale=3): - gr.HTML(value="") - - with gr.Column(): - create_hypernetwork = gr.Button(value="Create hypernetwork", variant='primary', elem_id="train_create_hypernetwork") - - with gr.Tab(label="Preprocess images", id="preprocess_images"): - process_src = gr.Textbox(label='Source directory', elem_id="train_process_src") - process_dst = gr.Textbox(label='Destination directory', elem_id="train_process_dst") - process_width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="train_process_width") - process_height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="train_process_height") - preprocess_txt_action = gr.Dropdown(label='Existing Caption txt Action', value="ignore", choices=["ignore", "copy", "prepend", "append"], elem_id="train_preprocess_txt_action") - - with gr.Row(): - process_keep_original_size = gr.Checkbox(label='Keep original size', elem_id="train_process_keep_original_size") - process_flip = gr.Checkbox(label='Create flipped copies', elem_id="train_process_flip") - process_split = gr.Checkbox(label='Split oversized images', elem_id="train_process_split") - process_focal_crop = gr.Checkbox(label='Auto focal point crop', elem_id="train_process_focal_crop") - process_multicrop = gr.Checkbox(label='Auto-sized crop', elem_id="train_process_multicrop") - process_caption = gr.Checkbox(label='Use BLIP for caption', elem_id="train_process_caption") - process_caption_deepbooru = gr.Checkbox(label='Use deepbooru for caption', visible=True, elem_id="train_process_caption_deepbooru") - - with gr.Row(visible=False) as process_split_extra_row: - process_split_threshold = gr.Slider(label='Split image threshold', value=0.5, minimum=0.0, maximum=1.0, step=0.05, elem_id="train_process_split_threshold") - process_overlap_ratio = gr.Slider(label='Split image overlap ratio', value=0.2, minimum=0.0, maximum=0.9, step=0.05, elem_id="train_process_overlap_ratio") - - with gr.Row(visible=False) as process_focal_crop_row: - process_focal_crop_face_weight = gr.Slider(label='Focal point face weight', value=0.9, minimum=0.0, maximum=1.0, step=0.05, elem_id="train_process_focal_crop_face_weight") - process_focal_crop_entropy_weight = gr.Slider(label='Focal point entropy weight', value=0.15, minimum=0.0, maximum=1.0, step=0.05, elem_id="train_process_focal_crop_entropy_weight") - process_focal_crop_edges_weight = gr.Slider(label='Focal point edges weight', value=0.5, minimum=0.0, maximum=1.0, step=0.05, elem_id="train_process_focal_crop_edges_weight") - process_focal_crop_debug = gr.Checkbox(label='Create debug image', elem_id="train_process_focal_crop_debug") - - with gr.Column(visible=False) as process_multicrop_col: - gr.Markdown('Each image is center-cropped with an automatically chosen width and height.') - with gr.Row(): - process_multicrop_mindim = gr.Slider(minimum=64, maximum=2048, step=8, label="Dimension lower bound", value=384, elem_id="train_process_multicrop_mindim") - process_multicrop_maxdim = gr.Slider(minimum=64, maximum=2048, step=8, label="Dimension upper bound", value=768, elem_id="train_process_multicrop_maxdim") - with gr.Row(): - process_multicrop_minarea = gr.Slider(minimum=64*64, maximum=2048*2048, step=1, label="Area lower bound", value=64*64, elem_id="train_process_multicrop_minarea") - process_multicrop_maxarea = gr.Slider(minimum=64*64, maximum=2048*2048, step=1, label="Area upper bound", value=640*640, elem_id="train_process_multicrop_maxarea") - with gr.Row(): - process_multicrop_objective = gr.Radio(["Maximize area", "Minimize error"], value="Maximize area", label="Resizing objective", elem_id="train_process_multicrop_objective") - process_multicrop_threshold = gr.Slider(minimum=0, maximum=1, step=0.01, label="Error threshold", value=0.1, elem_id="train_process_multicrop_threshold") - - with gr.Row(): - with gr.Column(scale=3): - gr.HTML(value="") - - with gr.Column(): - with gr.Row(): - interrupt_preprocessing = gr.Button("Interrupt", elem_id="train_interrupt_preprocessing") - run_preprocess = gr.Button(value="Preprocess", variant='primary', elem_id="train_run_preprocess") - - process_split.change( - fn=lambda show: gr_show(show), - inputs=[process_split], - outputs=[process_split_extra_row], - ) - - process_focal_crop.change( - fn=lambda show: gr_show(show), - inputs=[process_focal_crop], - outputs=[process_focal_crop_row], - ) - - process_multicrop.change( - fn=lambda show: gr_show(show), - inputs=[process_multicrop], - outputs=[process_multicrop_col], - ) - - def get_textual_inversion_template_names(): - return sorted(textual_inversion.textual_inversion_templates) - - with gr.Tab(label="Train", id="train"): - gr.HTML(value="

Train an embedding or Hypernetwork; you must specify a directory with a set of 1:1 ratio images [wiki]

") - with FormRow(): - train_embedding_name = gr.Dropdown(label='Embedding', elem_id="train_embedding", choices=sorted(sd_hijack.model_hijack.embedding_db.word_embeddings.keys())) - create_refresh_button(train_embedding_name, sd_hijack.model_hijack.embedding_db.load_textual_inversion_embeddings, lambda: {"choices": sorted(sd_hijack.model_hijack.embedding_db.word_embeddings.keys())}, "refresh_train_embedding_name") - - train_hypernetwork_name = gr.Dropdown(label='Hypernetwork', elem_id="train_hypernetwork", choices=sorted(shared.hypernetworks)) - create_refresh_button(train_hypernetwork_name, shared.reload_hypernetworks, lambda: {"choices": sorted(shared.hypernetworks)}, "refresh_train_hypernetwork_name") - - with FormRow(): - embedding_learn_rate = gr.Textbox(label='Embedding Learning rate', placeholder="Embedding Learning rate", value="0.005", elem_id="train_embedding_learn_rate") - hypernetwork_learn_rate = gr.Textbox(label='Hypernetwork Learning rate', placeholder="Hypernetwork Learning rate", value="0.00001", elem_id="train_hypernetwork_learn_rate") - - with FormRow(): - clip_grad_mode = gr.Dropdown(value="disabled", label="Gradient Clipping", choices=["disabled", "value", "norm"]) - clip_grad_value = gr.Textbox(placeholder="Gradient clip value", value="0.1", show_label=False) - - with FormRow(): - batch_size = gr.Number(label='Batch size', value=1, precision=0, elem_id="train_batch_size") - gradient_step = gr.Number(label='Gradient accumulation steps', value=1, precision=0, elem_id="train_gradient_step") - - dataset_directory = gr.Textbox(label='Dataset directory', placeholder="Path to directory with input images", elem_id="train_dataset_directory") - log_directory = gr.Textbox(label='Log directory', placeholder="Path to directory where to write outputs", value="textual_inversion", elem_id="train_log_directory") - - with FormRow(): - template_file = gr.Dropdown(label='Prompt template', value="style_filewords.txt", elem_id="train_template_file", choices=get_textual_inversion_template_names()) - create_refresh_button(template_file, textual_inversion.list_textual_inversion_templates, lambda: {"choices": get_textual_inversion_template_names()}, "refrsh_train_template_file") - - training_width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="train_training_width") - training_height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="train_training_height") - varsize = gr.Checkbox(label="Do not resize images", value=False, elem_id="train_varsize") - steps = gr.Number(label='Max steps', value=100000, precision=0, elem_id="train_steps") - - with FormRow(): - create_image_every = gr.Number(label='Save an image to log directory every N steps, 0 to disable', value=500, precision=0, elem_id="train_create_image_every") - save_embedding_every = gr.Number(label='Save a copy of embedding to log directory every N steps, 0 to disable', value=500, precision=0, elem_id="train_save_embedding_every") - - use_weight = gr.Checkbox(label="Use PNG alpha channel as loss weight", value=False, elem_id="use_weight") - - save_image_with_stored_embedding = gr.Checkbox(label='Save images with embedding in PNG chunks', value=True, elem_id="train_save_image_with_stored_embedding") - preview_from_txt2img = gr.Checkbox(label='Read parameters (prompt, etc...) from txt2img tab when making previews', value=False, elem_id="train_preview_from_txt2img") - - shuffle_tags = gr.Checkbox(label="Shuffle tags by ',' when creating prompts.", value=False, elem_id="train_shuffle_tags") - tag_drop_out = gr.Slider(minimum=0, maximum=1, step=0.1, label="Drop out tags when creating prompts.", value=0, elem_id="train_tag_drop_out") - - latent_sampling_method = gr.Radio(label='Choose latent sampling method', value="once", choices=['once', 'deterministic', 'random'], elem_id="train_latent_sampling_method") - - with gr.Row(): - train_embedding = gr.Button(value="Train Embedding", variant='primary', elem_id="train_train_embedding") - interrupt_training = gr.Button(value="Interrupt", elem_id="train_interrupt_training") - train_hypernetwork = gr.Button(value="Train Hypernetwork", variant='primary', elem_id="train_train_hypernetwork") - - params = script_callbacks.UiTrainTabParams(txt2img_preview_params) - - script_callbacks.ui_train_tabs_callback(params) - - with gr.Column(elem_id='ti_gallery_container'): - ti_output = gr.Text(elem_id="ti_output", value="", show_label=False) - gr.Gallery(label='Output', show_label=False, elem_id='ti_gallery').style(columns=4) - gr.HTML(elem_id="ti_progress", value="") - ti_outcome = gr.HTML(elem_id="ti_error", value="") - - create_embedding.click( - fn=modules.textual_inversion.ui.create_embedding, - inputs=[ - new_embedding_name, - initialization_text, - nvpt, - overwrite_old_embedding, - ], - outputs=[ - train_embedding_name, - ti_output, - ti_outcome, - ] - ) - - create_hypernetwork.click( - fn=modules.hypernetworks.ui.create_hypernetwork, - inputs=[ - new_hypernetwork_name, - new_hypernetwork_sizes, - overwrite_old_hypernetwork, - new_hypernetwork_layer_structure, - new_hypernetwork_activation_func, - new_hypernetwork_initialization_option, - new_hypernetwork_add_layer_norm, - new_hypernetwork_use_dropout, - new_hypernetwork_dropout_structure - ], - outputs=[ - train_hypernetwork_name, - ti_output, - ti_outcome, - ] - ) - - run_preprocess.click( - fn=wrap_gradio_gpu_call(modules.textual_inversion.ui.preprocess, extra_outputs=[gr.update()]), - _js="start_training_textual_inversion", - inputs=[ - dummy_component, - process_src, - process_dst, - process_width, - process_height, - preprocess_txt_action, - process_keep_original_size, - process_flip, - process_split, - process_caption, - process_caption_deepbooru, - process_split_threshold, - process_overlap_ratio, - process_focal_crop, - process_focal_crop_face_weight, - process_focal_crop_entropy_weight, - process_focal_crop_edges_weight, - process_focal_crop_debug, - process_multicrop, - process_multicrop_mindim, - process_multicrop_maxdim, - process_multicrop_minarea, - process_multicrop_maxarea, - process_multicrop_objective, - process_multicrop_threshold, - ], - outputs=[ - ti_output, - ti_outcome, - ], - ) - - train_embedding.click( - fn=wrap_gradio_gpu_call(modules.textual_inversion.ui.train_embedding, extra_outputs=[gr.update()]), - _js="start_training_textual_inversion", - inputs=[ - dummy_component, - train_embedding_name, - embedding_learn_rate, - batch_size, - gradient_step, - dataset_directory, - log_directory, - training_width, - training_height, - varsize, - steps, - clip_grad_mode, - clip_grad_value, - shuffle_tags, - tag_drop_out, - latent_sampling_method, - use_weight, - create_image_every, - save_embedding_every, - template_file, - save_image_with_stored_embedding, - preview_from_txt2img, - *txt2img_preview_params, - ], - outputs=[ - ti_output, - ti_outcome, - ] - ) - - train_hypernetwork.click( - fn=wrap_gradio_gpu_call(modules.hypernetworks.ui.train_hypernetwork, extra_outputs=[gr.update()]), - _js="start_training_textual_inversion", - inputs=[ - dummy_component, - train_hypernetwork_name, - hypernetwork_learn_rate, - batch_size, - gradient_step, - dataset_directory, - log_directory, - training_width, - training_height, - varsize, - steps, - clip_grad_mode, - clip_grad_value, - shuffle_tags, - tag_drop_out, - latent_sampling_method, - use_weight, - create_image_every, - save_embedding_every, - template_file, - preview_from_txt2img, - *txt2img_preview_params, - ], - outputs=[ - ti_output, - ti_outcome, - ] - ) - - interrupt_training.click( - fn=lambda: shared.state.interrupt(), - inputs=[], - outputs=[], - ) - - interrupt_preprocessing.click( - fn=lambda: shared.state.interrupt(), - inputs=[], - outputs=[], - ) - - loadsave = ui_loadsave.UiLoadsave(cmd_opts.ui_config_file) - - settings = ui_settings.UiSettings() - settings.create_ui(loadsave, dummy_component) - - interfaces = [ - (txt2img_interface, "txt2img", "txt2img"), - (img2img_interface, "img2img", "img2img"), - (extras_interface, "Extras", "extras"), - (pnginfo_interface, "PNG Info", "pnginfo"), - (modelmerger_interface, "Checkpoint Merger", "modelmerger"), - (train_interface, "Train", "train"), - ] - - interfaces += script_callbacks.ui_tabs_callback() - interfaces += [(settings.interface, "Settings", "settings")] - - extensions_interface = ui_extensions.create_ui() - interfaces += [(extensions_interface, "Extensions", "extensions")] - - shared.tab_names = [] - for _interface, label, _ifid in interfaces: - shared.tab_names.append(label) - - with gr.Blocks(theme=shared.gradio_theme, analytics_enabled=False, title="Stable Diffusion") as demo: - settings.add_quicksettings() - - parameters_copypaste.connect_paste_params_buttons() - - with gr.Tabs(elem_id="tabs") as tabs: - tab_order = {k: i for i, k in enumerate(opts.ui_tab_order)} - sorted_interfaces = sorted(interfaces, key=lambda x: tab_order.get(x[1], 9999)) - - for interface, label, ifid in sorted_interfaces: - if label in shared.opts.hidden_tabs: - continue - with gr.TabItem(label, id=ifid, elem_id=f"tab_{ifid}"): - interface.render() - - for interface, _label, ifid in interfaces: - if ifid in ["extensions", "settings"]: - continue - - loadsave.add_block(interface, ifid) - - loadsave.add_component(f"webui/Tabs@{tabs.elem_id}", tabs) - - loadsave.setup_ui() - - if os.path.exists(os.path.join(script_path, "notification.mp3")): - gr.Audio(interactive=False, value=os.path.join(script_path, "notification.mp3"), elem_id="audio_notification", visible=False) - - footer = shared.html("footer.html") - footer = footer.format(versions=versions_html(), api_docs="/docs" if shared.cmd_opts.api else "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/API") - gr.HTML(footer, elem_id="footer") - - settings.add_functionality(demo) - - update_image_cfg_scale_visibility = lambda: gr.update(visible=shared.sd_model and shared.sd_model.cond_stage_key == "edit") - settings.text_settings.change(fn=update_image_cfg_scale_visibility, inputs=[], outputs=[image_cfg_scale]) - demo.load(fn=update_image_cfg_scale_visibility, inputs=[], outputs=[image_cfg_scale]) - - def modelmerger(*args): - try: - results = modules.extras.run_modelmerger(*args) - except Exception as e: - errors.report("Error loading/saving model file", exc_info=True) - modules.sd_models.list_models() # to remove the potentially missing models from the list - return [*[gr.Dropdown.update(choices=modules.sd_models.checkpoint_tiles()) for _ in range(4)], f"Error merging checkpoints: {e}"] - return results - - modelmerger_merge.click(fn=lambda: '', inputs=[], outputs=[modelmerger_result]) - modelmerger_merge.click( - fn=wrap_gradio_gpu_call(modelmerger, extra_outputs=lambda: [gr.update() for _ in range(4)]), - _js='modelmerger', - inputs=[ - dummy_component, - primary_model_name, - secondary_model_name, - tertiary_model_name, - interp_method, - interp_amount, - save_as_half, - custom_name, - checkpoint_format, - config_source, - bake_in_vae, - discard_weights, - save_metadata, - ], - outputs=[ - primary_model_name, - secondary_model_name, - tertiary_model_name, - settings.component_dict['sd_model_checkpoint'], - modelmerger_result, - ] - ) - - loadsave.dump_defaults() - demo.ui_loadsave = loadsave - - # Required as a workaround for change() event not triggering when loading values from ui-config.json - interp_description.value = update_interp_description(interp_method.value) - - return demo - - -def versions_html(): - import torch - import launch - - python_version = ".".join([str(x) for x in sys.version_info[0:3]]) - commit = launch.commit_hash() - tag = launch.git_tag() - - if shared.xformers_available: - import xformers - xformers_version = xformers.__version__ - else: - xformers_version = "N/A" - - return f""" -version: {tag} - •  -python: {python_version} - •  -torch: {getattr(torch, '__long_version__',torch.__version__)} - •  -xformers: {xformers_version} - •  -gradio: {gr.__version__} - •  -checkpoint: N/A -""" - - -def setup_ui_api(app): - from pydantic import BaseModel, Field - from typing import List - - class QuicksettingsHint(BaseModel): - name: str = Field(title="Name of the quicksettings field") - label: str = Field(title="Label of the quicksettings field") - - def quicksettings_hint(): - return [QuicksettingsHint(name=k, label=v.label) for k, v in opts.data_labels.items()] - - app.add_api_route("/internal/quicksettings-hint", quicksettings_hint, methods=["GET"], response_model=List[QuicksettingsHint]) - - app.add_api_route("/internal/ping", lambda: {}, methods=["GET"]) - - app.add_api_route("/internal/profile-startup", lambda: timer.startup_record, methods=["GET"]) - - def download_sysinfo(attachment=False): - from fastapi.responses import PlainTextResponse - - text = sysinfo.get() - filename = f"sysinfo-{datetime.datetime.utcnow().strftime('%Y-%m-%d-%H-%M')}.txt" - - return PlainTextResponse(text, headers={'Content-Disposition': f'{"attachment" if attachment else "inline"}; filename="{filename}"'}) - - app.add_api_route("/internal/sysinfo", download_sysinfo, methods=["GET"]) - app.add_api_route("/internal/sysinfo-download", lambda: download_sysinfo(attachment=True), methods=["GET"]) - diff --git a/modules/ui_checkpoint_merger.py b/modules/ui_checkpoint_merger.py new file mode 100644 index 00000000..07ecee7b --- /dev/null +++ b/modules/ui_checkpoint_merger.py @@ -0,0 +1,1621 @@ +import datetime +import json +import mimetypes +import os +import sys +from functools import reduce +import warnings + +import gradio as gr +import gradio.utils +import numpy as np +from PIL import Image, PngImagePlugin # noqa: F401 +from modules.call_queue import wrap_gradio_gpu_call, wrap_queued_call, wrap_gradio_call + +from modules import sd_hijack, sd_models, script_callbacks, ui_extensions, deepbooru, sd_vae, extra_networks, ui_common, ui_postprocessing, progress, ui_loadsave, errors, shared_items, ui_settings, timer, sysinfo +from modules.ui_components import FormRow, FormGroup, ToolButton, FormHTML +from modules.paths import script_path +from modules.ui_common import create_refresh_button +from modules.ui_gradio_extensions import reload_javascript + + +from modules.shared import opts, cmd_opts + +import modules.codeformer_model +import modules.generation_parameters_copypaste as parameters_copypaste +import modules.gfpgan_model +import modules.hypernetworks.ui +import modules.scripts +import modules.shared as shared +import modules.styles +import modules.textual_inversion.ui +from modules import prompt_parser +from modules.sd_hijack import model_hijack +from modules.sd_samplers import samplers, samplers_for_img2img +from modules.textual_inversion import textual_inversion +import modules.hypernetworks.ui +from modules.generation_parameters_copypaste import image_from_url_text +import modules.extras + +create_setting_component = ui_settings.create_setting_component + +warnings.filterwarnings("default" if opts.show_warnings else "ignore", category=UserWarning) + +# this is a fix for Windows users. Without it, javascript files will be served with text/html content-type and the browser will not show any UI +mimetypes.init() +mimetypes.add_type('application/javascript', '.js') + +if not cmd_opts.share and not cmd_opts.listen: + # fix gradio phoning home + gradio.utils.version_check = lambda: None + gradio.utils.get_local_ip_address = lambda: '127.0.0.1' + +if cmd_opts.ngrok is not None: + import modules.ngrok as ngrok + print('ngrok authtoken detected, trying to connect...') + ngrok.connect( + cmd_opts.ngrok, + cmd_opts.port if cmd_opts.port is not None else 7860, + cmd_opts.ngrok_options + ) + + +def gr_show(visible=True): + return {"visible": visible, "__type__": "update"} + + +sample_img2img = "assets/stable-samples/img2img/sketch-mountains-input.jpg" +sample_img2img = sample_img2img if os.path.exists(sample_img2img) else None + +# Using constants for these since the variation selector isn't visible. +# Important that they exactly match script.js for tooltip to work. +random_symbol = '\U0001f3b2\ufe0f' # 🎲️ +reuse_symbol = '\u267b\ufe0f' # ♻️ +paste_symbol = '\u2199\ufe0f' # ↙ +refresh_symbol = '\U0001f504' # 🔄 +save_style_symbol = '\U0001f4be' # 💾 +apply_style_symbol = '\U0001f4cb' # 📋 +clear_prompt_symbol = '\U0001f5d1\ufe0f' # 🗑️ +extra_networks_symbol = '\U0001F3B4' # 🎴 +switch_values_symbol = '\U000021C5' # ⇅ +restore_progress_symbol = '\U0001F300' # 🌀 +detect_image_size_symbol = '\U0001F4D0' # 📐 +up_down_symbol = '\u2195\ufe0f' # ↕️ + + +plaintext_to_html = ui_common.plaintext_to_html + + +def send_gradio_gallery_to_image(x): + if len(x) == 0: + return None + return image_from_url_text(x[0]) + + +def add_style(name: str, prompt: str, negative_prompt: str): + if name is None: + return [gr_show() for x in range(4)] + + style = modules.styles.PromptStyle(name, prompt, negative_prompt) + shared.prompt_styles.styles[style.name] = style + # Save all loaded prompt styles: this allows us to update the storage format in the future more easily, because we + # reserialize all styles every time we save them + shared.prompt_styles.save_styles(shared.styles_filename) + + return [gr.Dropdown.update(visible=True, choices=list(shared.prompt_styles.styles)) for _ in range(2)] + + +def calc_resolution_hires(enable, width, height, hr_scale, hr_resize_x, hr_resize_y): + from modules import processing, devices + + if not enable: + return "" + + p = processing.StableDiffusionProcessingTxt2Img(width=width, height=height, enable_hr=True, hr_scale=hr_scale, hr_resize_x=hr_resize_x, hr_resize_y=hr_resize_y) + + with devices.autocast(): + p.init([""], [0], [0]) + + return f"resize: from {p.width}x{p.height} to {p.hr_resize_x or p.hr_upscale_to_x}x{p.hr_resize_y or p.hr_upscale_to_y}" + + +def resize_from_to_html(width, height, scale_by): + target_width = int(width * scale_by) + target_height = int(height * scale_by) + + if not target_width or not target_height: + return "no image selected" + + return f"resize: from {width}x{height} to {target_width}x{target_height}" + + +def apply_styles(prompt, prompt_neg, styles): + prompt = shared.prompt_styles.apply_styles_to_prompt(prompt, styles) + prompt_neg = shared.prompt_styles.apply_negative_styles_to_prompt(prompt_neg, styles) + + return [gr.Textbox.update(value=prompt), gr.Textbox.update(value=prompt_neg), gr.Dropdown.update(value=[])] + + +def process_interrogate(interrogation_function, mode, ii_input_dir, ii_output_dir, *ii_singles): + if mode in {0, 1, 3, 4}: + return [interrogation_function(ii_singles[mode]), None] + elif mode == 2: + return [interrogation_function(ii_singles[mode]["image"]), None] + elif mode == 5: + assert not shared.cmd_opts.hide_ui_dir_config, "Launched with --hide-ui-dir-config, batch img2img disabled" + images = shared.listfiles(ii_input_dir) + print(f"Will process {len(images)} images.") + if ii_output_dir != "": + os.makedirs(ii_output_dir, exist_ok=True) + else: + ii_output_dir = ii_input_dir + + for image in images: + img = Image.open(image) + filename = os.path.basename(image) + left, _ = os.path.splitext(filename) + print(interrogation_function(img), file=open(os.path.join(ii_output_dir, f"{left}.txt"), 'a', encoding='utf-8')) + + return [gr.update(), None] + + +def interrogate(image): + prompt = shared.interrogator.interrogate(image.convert("RGB")) + return gr.update() if prompt is None else prompt + + +def interrogate_deepbooru(image): + prompt = deepbooru.model.tag(image) + return gr.update() if prompt is None else prompt + + +def create_seed_inputs(target_interface): + with FormRow(elem_id=f"{target_interface}_seed_row", variant="compact"): + seed = (gr.Textbox if cmd_opts.use_textbox_seed else gr.Number)(label='Seed', value=-1, elem_id=f"{target_interface}_seed") + seed.style(container=False) + random_seed = ToolButton(random_symbol, elem_id=f"{target_interface}_random_seed", label='Random seed') + reuse_seed = ToolButton(reuse_symbol, elem_id=f"{target_interface}_reuse_seed", label='Reuse seed') + + seed_checkbox = gr.Checkbox(label='Extra', elem_id=f"{target_interface}_subseed_show", value=False) + + # Components to show/hide based on the 'Extra' checkbox + seed_extras = [] + + with FormRow(visible=False, elem_id=f"{target_interface}_subseed_row") as seed_extra_row_1: + seed_extras.append(seed_extra_row_1) + subseed = gr.Number(label='Variation seed', value=-1, elem_id=f"{target_interface}_subseed") + subseed.style(container=False) + random_subseed = ToolButton(random_symbol, elem_id=f"{target_interface}_random_subseed") + reuse_subseed = ToolButton(reuse_symbol, elem_id=f"{target_interface}_reuse_subseed") + subseed_strength = gr.Slider(label='Variation strength', value=0.0, minimum=0, maximum=1, step=0.01, elem_id=f"{target_interface}_subseed_strength") + + with FormRow(visible=False) as seed_extra_row_2: + seed_extras.append(seed_extra_row_2) + seed_resize_from_w = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize seed from width", value=0, elem_id=f"{target_interface}_seed_resize_from_w") + seed_resize_from_h = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize seed from height", value=0, elem_id=f"{target_interface}_seed_resize_from_h") + + random_seed.click(fn=None, _js="function(){setRandomSeed('" + target_interface + "_seed')}", show_progress=False, inputs=[], outputs=[]) + random_subseed.click(fn=None, _js="function(){setRandomSeed('" + target_interface + "_subseed')}", show_progress=False, inputs=[], outputs=[]) + + def change_visibility(show): + return {comp: gr_show(show) for comp in seed_extras} + + seed_checkbox.change(change_visibility, show_progress=False, inputs=[seed_checkbox], outputs=seed_extras) + + return seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox + + + +def connect_clear_prompt(button): + """Given clear button, prompt, and token_counter objects, setup clear prompt button click event""" + button.click( + _js="clear_prompt", + fn=None, + inputs=[], + outputs=[], + ) + + +def connect_reuse_seed(seed: gr.Number, reuse_seed: gr.Button, generation_info: gr.Textbox, dummy_component, is_subseed): + """ Connects a 'reuse (sub)seed' button's click event so that it copies last used + (sub)seed value from generation info the to the seed field. If copying subseed and subseed strength + was 0, i.e. no variation seed was used, it copies the normal seed value instead.""" + def copy_seed(gen_info_string: str, index): + res = -1 + + try: + gen_info = json.loads(gen_info_string) + index -= gen_info.get('index_of_first_image', 0) + + if is_subseed and gen_info.get('subseed_strength', 0) > 0: + all_subseeds = gen_info.get('all_subseeds', [-1]) + res = all_subseeds[index if 0 <= index < len(all_subseeds) else 0] + else: + all_seeds = gen_info.get('all_seeds', [-1]) + res = all_seeds[index if 0 <= index < len(all_seeds) else 0] + + except json.decoder.JSONDecodeError: + if gen_info_string: + errors.report(f"Error parsing JSON generation info: {gen_info_string}") + + return [res, gr_show(False)] + + reuse_seed.click( + fn=copy_seed, + _js="(x, y) => [x, selected_gallery_index()]", + show_progress=False, + inputs=[generation_info, dummy_component], + outputs=[seed, dummy_component] + ) + + +def update_token_counter(text, steps): + try: + text, _ = extra_networks.parse_prompt(text) + + _, prompt_flat_list, _ = prompt_parser.get_multicond_prompt_list([text]) + prompt_schedules = prompt_parser.get_learned_conditioning_prompt_schedules(prompt_flat_list, steps) + + except Exception: + # a parsing error can happen here during typing, and we don't want to bother the user with + # messages related to it in console + prompt_schedules = [[[steps, text]]] + + flat_prompts = reduce(lambda list1, list2: list1+list2, prompt_schedules) + prompts = [prompt_text for step, prompt_text in flat_prompts] + token_count, max_length = max([model_hijack.get_prompt_lengths(prompt) for prompt in prompts], key=lambda args: args[0]) + return f"{token_count}/{max_length}" + + +def create_toprow(is_img2img): + id_part = "img2img" if is_img2img else "txt2img" + + with gr.Row(elem_id=f"{id_part}_toprow", variant="compact"): + with gr.Column(elem_id=f"{id_part}_prompt_container", scale=6): + with gr.Row(): + with gr.Column(scale=80): + with gr.Row(): + prompt = gr.Textbox(label="Prompt", elem_id=f"{id_part}_prompt", show_label=False, lines=3, placeholder="Prompt (press Ctrl+Enter or Alt+Enter to generate)", elem_classes=["prompt"]) + + with gr.Row(): + with gr.Column(scale=80): + with gr.Row(): + negative_prompt = gr.Textbox(label="Negative prompt", elem_id=f"{id_part}_neg_prompt", show_label=False, lines=3, placeholder="Negative prompt (press Ctrl+Enter or Alt+Enter to generate)", elem_classes=["prompt"]) + + button_interrogate = None + button_deepbooru = None + if is_img2img: + with gr.Column(scale=1, elem_classes="interrogate-col"): + button_interrogate = gr.Button('Interrogate\nCLIP', elem_id="interrogate") + button_deepbooru = gr.Button('Interrogate\nDeepBooru', elem_id="deepbooru") + + with gr.Column(scale=1, elem_id=f"{id_part}_actions_column"): + with gr.Row(elem_id=f"{id_part}_generate_box", elem_classes="generate-box"): + interrupt = gr.Button('Interrupt', elem_id=f"{id_part}_interrupt", elem_classes="generate-box-interrupt") + skip = gr.Button('Skip', elem_id=f"{id_part}_skip", elem_classes="generate-box-skip") + submit = gr.Button('Generate', elem_id=f"{id_part}_generate", variant='primary') + + skip.click( + fn=lambda: shared.state.skip(), + inputs=[], + outputs=[], + ) + + interrupt.click( + fn=lambda: shared.state.interrupt(), + inputs=[], + outputs=[], + ) + + with gr.Row(elem_id=f"{id_part}_tools"): + paste = ToolButton(value=paste_symbol, elem_id="paste") + clear_prompt_button = ToolButton(value=clear_prompt_symbol, elem_id=f"{id_part}_clear_prompt") + extra_networks_button = ToolButton(value=extra_networks_symbol, elem_id=f"{id_part}_extra_networks") + prompt_style_apply = ToolButton(value=apply_style_symbol, elem_id=f"{id_part}_style_apply") + save_style = ToolButton(value=save_style_symbol, elem_id=f"{id_part}_style_create") + restore_progress_button = ToolButton(value=restore_progress_symbol, elem_id=f"{id_part}_restore_progress", visible=False) + + token_counter = gr.HTML(value="0/75", elem_id=f"{id_part}_token_counter", elem_classes=["token-counter"]) + token_button = gr.Button(visible=False, elem_id=f"{id_part}_token_button") + negative_token_counter = gr.HTML(value="0/75", elem_id=f"{id_part}_negative_token_counter", elem_classes=["token-counter"]) + negative_token_button = gr.Button(visible=False, elem_id=f"{id_part}_negative_token_button") + + clear_prompt_button.click( + fn=lambda *x: x, + _js="confirm_clear_prompt", + inputs=[prompt, negative_prompt], + outputs=[prompt, negative_prompt], + ) + + with gr.Row(elem_id=f"{id_part}_styles_row"): + prompt_styles = gr.Dropdown(label="Styles", elem_id=f"{id_part}_styles", choices=[k for k, v in shared.prompt_styles.styles.items()], value=[], multiselect=True) + create_refresh_button(prompt_styles, shared.prompt_styles.reload, lambda: {"choices": [k for k, v in shared.prompt_styles.styles.items()]}, f"refresh_{id_part}_styles") + + return prompt, prompt_styles, negative_prompt, submit, button_interrogate, button_deepbooru, prompt_style_apply, save_style, paste, extra_networks_button, token_counter, token_button, negative_token_counter, negative_token_button, restore_progress_button + + +def setup_progressbar(*args, **kwargs): + pass + + +def apply_setting(key, value): + if value is None: + return gr.update() + + if shared.cmd_opts.freeze_settings: + return gr.update() + + # dont allow model to be swapped when model hash exists in prompt + if key == "sd_model_checkpoint" and opts.disable_weights_auto_swap: + return gr.update() + + if key == "sd_model_checkpoint": + ckpt_info = sd_models.get_closet_checkpoint_match(value) + + if ckpt_info is not None: + value = ckpt_info.title + else: + return gr.update() + + comp_args = opts.data_labels[key].component_args + if comp_args and isinstance(comp_args, dict) and comp_args.get('visible') is False: + return + + valtype = type(opts.data_labels[key].default) + oldval = opts.data.get(key, None) + opts.data[key] = valtype(value) if valtype != type(None) else value + if oldval != value and opts.data_labels[key].onchange is not None: + opts.data_labels[key].onchange() + + opts.save(shared.config_filename) + return getattr(opts, key) + + +def create_output_panel(tabname, outdir): + return ui_common.create_output_panel(tabname, outdir) + + +def create_sampler_and_steps_selection(choices, tabname): + if opts.samplers_in_dropdown: + with FormRow(elem_id=f"sampler_selection_{tabname}"): + sampler_index = gr.Dropdown(label='Sampling method', elem_id=f"{tabname}_sampling", choices=[x.name for x in choices], value=choices[0].name, type="index") + steps = gr.Slider(minimum=1, maximum=150, step=1, elem_id=f"{tabname}_steps", label="Sampling steps", value=20) + else: + with FormGroup(elem_id=f"sampler_selection_{tabname}"): + steps = gr.Slider(minimum=1, maximum=150, step=1, elem_id=f"{tabname}_steps", label="Sampling steps", value=20) + sampler_index = gr.Radio(label='Sampling method', elem_id=f"{tabname}_sampling", choices=[x.name for x in choices], value=choices[0].name, type="index") + + return steps, sampler_index + + +def ordered_ui_categories(): + user_order = {x.strip(): i * 2 + 1 for i, x in enumerate(shared.opts.ui_reorder_list)} + + for _, category in sorted(enumerate(shared_items.ui_reorder_categories()), key=lambda x: user_order.get(x[1], x[0] * 2 + 0)): + yield category + + +def create_override_settings_dropdown(tabname, row): + dropdown = gr.Dropdown([], label="Override settings", visible=False, elem_id=f"{tabname}_override_settings", multiselect=True) + + dropdown.change( + fn=lambda x: gr.Dropdown.update(visible=bool(x)), + inputs=[dropdown], + outputs=[dropdown], + ) + + return dropdown + + +def create_ui(): + import modules.img2img + import modules.txt2img + + reload_javascript() + + parameters_copypaste.reset() + + modules.scripts.scripts_current = modules.scripts.scripts_txt2img + modules.scripts.scripts_txt2img.initialize_scripts(is_img2img=False) + + with gr.Blocks(analytics_enabled=False) as txt2img_interface: + txt2img_prompt, txt2img_prompt_styles, txt2img_negative_prompt, submit, _, _, txt2img_prompt_style_apply, txt2img_save_style, txt2img_paste, extra_networks_button, token_counter, token_button, negative_token_counter, negative_token_button, restore_progress_button = create_toprow(is_img2img=False) + + dummy_component = gr.Label(visible=False) + txt_prompt_img = gr.File(label="", elem_id="txt2img_prompt_image", file_count="single", type="binary", visible=False) + + with FormRow(variant='compact', elem_id="txt2img_extra_networks", visible=False) as extra_networks: + from modules import ui_extra_networks + extra_networks_ui = ui_extra_networks.create_ui(extra_networks, extra_networks_button, 'txt2img') + + with gr.Row().style(equal_height=False): + with gr.Column(variant='compact', elem_id="txt2img_settings"): + modules.scripts.scripts_txt2img.prepare_ui() + + for category in ordered_ui_categories(): + if category == "sampler": + steps, sampler_index = create_sampler_and_steps_selection(samplers, "txt2img") + + elif category == "dimensions": + with FormRow(): + with gr.Column(elem_id="txt2img_column_size", scale=4): + width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="txt2img_width") + height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="txt2img_height") + + with gr.Column(elem_id="txt2img_dimensions_row", scale=1, elem_classes="dimensions-tools"): + res_switch_btn = ToolButton(value=switch_values_symbol, elem_id="txt2img_res_switch_btn", label="Switch dims") + + if opts.dimensions_and_batch_together: + with gr.Column(elem_id="txt2img_column_batch"): + batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="txt2img_batch_count") + batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="txt2img_batch_size") + + elif category == "cfg": + cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0, elem_id="txt2img_cfg_scale") + + elif category == "seed": + seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox = create_seed_inputs('txt2img') + + elif category == "checkboxes": + with FormRow(elem_classes="checkboxes-row", variant="compact"): + restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1, elem_id="txt2img_restore_faces") + tiling = gr.Checkbox(label='Tiling', value=False, elem_id="txt2img_tiling") + enable_hr = gr.Checkbox(label='Hires. fix', value=False, elem_id="txt2img_enable_hr") + hr_final_resolution = FormHTML(value="", elem_id="txtimg_hr_finalres", label="Upscaled resolution", interactive=False) + + elif category == "hires_fix": + with FormGroup(visible=False, elem_id="txt2img_hires_fix") as hr_options: + with FormRow(elem_id="txt2img_hires_fix_row1", variant="compact"): + hr_upscaler = gr.Dropdown(label="Upscaler", elem_id="txt2img_hr_upscaler", choices=[*shared.latent_upscale_modes, *[x.name for x in shared.sd_upscalers]], value=shared.latent_upscale_default_mode) + hr_second_pass_steps = gr.Slider(minimum=0, maximum=150, step=1, label='Hires steps', value=0, elem_id="txt2img_hires_steps") + denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.7, elem_id="txt2img_denoising_strength") + + with FormRow(elem_id="txt2img_hires_fix_row2", variant="compact"): + hr_scale = gr.Slider(minimum=1.0, maximum=4.0, step=0.05, label="Upscale by", value=2.0, elem_id="txt2img_hr_scale") + hr_resize_x = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize width to", value=0, elem_id="txt2img_hr_resize_x") + hr_resize_y = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize height to", value=0, elem_id="txt2img_hr_resize_y") + + with FormRow(elem_id="txt2img_hires_fix_row3", variant="compact", visible=opts.hires_fix_show_sampler) as hr_sampler_container: + hr_sampler_index = gr.Dropdown(label='Hires sampling method', elem_id="hr_sampler", choices=["Use same sampler"] + [x.name for x in samplers_for_img2img], value="Use same sampler", type="index") + + with FormRow(elem_id="txt2img_hires_fix_row4", variant="compact", visible=opts.hires_fix_show_prompts) as hr_prompts_container: + with gr.Column(scale=80): + with gr.Row(): + hr_prompt = gr.Textbox(label="Hires prompt", elem_id="hires_prompt", show_label=False, lines=3, placeholder="Prompt for hires fix pass.\nLeave empty to use the same prompt as in first pass.", elem_classes=["prompt"]) + with gr.Column(scale=80): + with gr.Row(): + hr_negative_prompt = gr.Textbox(label="Hires negative prompt", elem_id="hires_neg_prompt", show_label=False, lines=3, placeholder="Negative prompt for hires fix pass.\nLeave empty to use the same negative prompt as in first pass.", elem_classes=["prompt"]) + + elif category == "batch": + if not opts.dimensions_and_batch_together: + with FormRow(elem_id="txt2img_column_batch"): + batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="txt2img_batch_count") + batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="txt2img_batch_size") + + elif category == "override_settings": + with FormRow(elem_id="txt2img_override_settings_row") as row: + override_settings = create_override_settings_dropdown('txt2img', row) + + elif category == "scripts": + with FormGroup(elem_id="txt2img_script_container"): + custom_inputs = modules.scripts.scripts_txt2img.setup_ui() + + else: + modules.scripts.scripts_txt2img.setup_ui_for_section(category) + + hr_resolution_preview_inputs = [enable_hr, width, height, hr_scale, hr_resize_x, hr_resize_y] + + for component in hr_resolution_preview_inputs: + event = component.release if isinstance(component, gr.Slider) else component.change + + event( + fn=calc_resolution_hires, + inputs=hr_resolution_preview_inputs, + outputs=[hr_final_resolution], + show_progress=False, + ) + event( + None, + _js="onCalcResolutionHires", + inputs=hr_resolution_preview_inputs, + outputs=[], + show_progress=False, + ) + + txt2img_gallery, generation_info, html_info, html_log = create_output_panel("txt2img", opts.outdir_txt2img_samples) + + connect_reuse_seed(seed, reuse_seed, generation_info, dummy_component, is_subseed=False) + connect_reuse_seed(subseed, reuse_subseed, generation_info, dummy_component, is_subseed=True) + + txt2img_args = dict( + fn=wrap_gradio_gpu_call(modules.txt2img.txt2img, extra_outputs=[None, '', '']), + _js="submit", + inputs=[ + dummy_component, + txt2img_prompt, + txt2img_negative_prompt, + txt2img_prompt_styles, + steps, + sampler_index, + restore_faces, + tiling, + batch_count, + batch_size, + cfg_scale, + seed, + subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox, + height, + width, + enable_hr, + denoising_strength, + hr_scale, + hr_upscaler, + hr_second_pass_steps, + hr_resize_x, + hr_resize_y, + hr_sampler_index, + hr_prompt, + hr_negative_prompt, + override_settings, + + ] + custom_inputs, + + outputs=[ + txt2img_gallery, + generation_info, + html_info, + html_log, + ], + show_progress=False, + ) + + txt2img_prompt.submit(**txt2img_args) + submit.click(**txt2img_args) + + res_switch_btn.click(fn=None, _js="function(){switchWidthHeight('txt2img')}", inputs=None, outputs=None, show_progress=False) + + restore_progress_button.click( + fn=progress.restore_progress, + _js="restoreProgressTxt2img", + inputs=[dummy_component], + outputs=[ + txt2img_gallery, + generation_info, + html_info, + html_log, + ], + show_progress=False, + ) + + txt_prompt_img.change( + fn=modules.images.image_data, + inputs=[ + txt_prompt_img + ], + outputs=[ + txt2img_prompt, + txt_prompt_img + ], + show_progress=False, + ) + + enable_hr.change( + fn=lambda x: gr_show(x), + inputs=[enable_hr], + outputs=[hr_options], + show_progress = False, + ) + + txt2img_paste_fields = [ + (txt2img_prompt, "Prompt"), + (txt2img_negative_prompt, "Negative prompt"), + (steps, "Steps"), + (sampler_index, "Sampler"), + (restore_faces, "Face restoration"), + (cfg_scale, "CFG scale"), + (seed, "Seed"), + (width, "Size-1"), + (height, "Size-2"), + (batch_size, "Batch size"), + (subseed, "Variation seed"), + (subseed_strength, "Variation seed strength"), + (seed_resize_from_w, "Seed resize from-1"), + (seed_resize_from_h, "Seed resize from-2"), + (txt2img_prompt_styles, lambda d: d["Styles array"] if isinstance(d.get("Styles array"), list) else gr.update()), + (denoising_strength, "Denoising strength"), + (enable_hr, lambda d: "Denoising strength" in d), + (hr_options, lambda d: gr.Row.update(visible="Denoising strength" in d)), + (hr_scale, "Hires upscale"), + (hr_upscaler, "Hires upscaler"), + (hr_second_pass_steps, "Hires steps"), + (hr_resize_x, "Hires resize-1"), + (hr_resize_y, "Hires resize-2"), + (hr_sampler_index, "Hires sampler"), + (hr_sampler_container, lambda d: gr.update(visible=True) if d.get("Hires sampler", "Use same sampler") != "Use same sampler" else gr.update()), + (hr_prompt, "Hires prompt"), + (hr_negative_prompt, "Hires negative prompt"), + (hr_prompts_container, lambda d: gr.update(visible=True) if d.get("Hires prompt", "") != "" or d.get("Hires negative prompt", "") != "" else gr.update()), + *modules.scripts.scripts_txt2img.infotext_fields + ] + parameters_copypaste.add_paste_fields("txt2img", None, txt2img_paste_fields, override_settings) + parameters_copypaste.register_paste_params_button(parameters_copypaste.ParamBinding( + paste_button=txt2img_paste, tabname="txt2img", source_text_component=txt2img_prompt, source_image_component=None, + )) + + txt2img_preview_params = [ + txt2img_prompt, + txt2img_negative_prompt, + steps, + sampler_index, + cfg_scale, + seed, + width, + height, + ] + + token_button.click(fn=wrap_queued_call(update_token_counter), inputs=[txt2img_prompt, steps], outputs=[token_counter]) + negative_token_button.click(fn=wrap_queued_call(update_token_counter), inputs=[txt2img_negative_prompt, steps], outputs=[negative_token_counter]) + + ui_extra_networks.setup_ui(extra_networks_ui, txt2img_gallery) + + modules.scripts.scripts_current = modules.scripts.scripts_img2img + modules.scripts.scripts_img2img.initialize_scripts(is_img2img=True) + + with gr.Blocks(analytics_enabled=False) as img2img_interface: + img2img_prompt, img2img_prompt_styles, img2img_negative_prompt, submit, img2img_interrogate, img2img_deepbooru, img2img_prompt_style_apply, img2img_save_style, img2img_paste, extra_networks_button, token_counter, token_button, negative_token_counter, negative_token_button, restore_progress_button = create_toprow(is_img2img=True) + + img2img_prompt_img = gr.File(label="", elem_id="img2img_prompt_image", file_count="single", type="binary", visible=False) + + with FormRow(variant='compact', elem_id="img2img_extra_networks", visible=False) as extra_networks: + from modules import ui_extra_networks + extra_networks_ui_img2img = ui_extra_networks.create_ui(extra_networks, extra_networks_button, 'img2img') + + with FormRow().style(equal_height=False): + with gr.Column(variant='compact', elem_id="img2img_settings"): + copy_image_buttons = [] + copy_image_destinations = {} + + def add_copy_image_controls(tab_name, elem): + with gr.Row(variant="compact", elem_id=f"img2img_copy_to_{tab_name}"): + gr.HTML("Copy image to: ", elem_id=f"img2img_label_copy_to_{tab_name}") + + for title, name in zip(['img2img', 'sketch', 'inpaint', 'inpaint sketch'], ['img2img', 'sketch', 'inpaint', 'inpaint_sketch']): + if name == tab_name: + gr.Button(title, interactive=False) + copy_image_destinations[name] = elem + continue + + button = gr.Button(title) + copy_image_buttons.append((button, name, elem)) + + with gr.Tabs(elem_id="mode_img2img"): + img2img_selected_tab = gr.State(0) + + with gr.TabItem('img2img', id='img2img', elem_id="img2img_img2img_tab") as tab_img2img: + init_img = gr.Image(label="Image for img2img", elem_id="img2img_image", show_label=False, source="upload", interactive=True, type="pil", tool="editor", image_mode="RGBA").style(height=opts.img2img_editor_height) + add_copy_image_controls('img2img', init_img) + + with gr.TabItem('Sketch', id='img2img_sketch', elem_id="img2img_img2img_sketch_tab") as tab_sketch: + sketch = gr.Image(label="Image for img2img", elem_id="img2img_sketch", show_label=False, source="upload", interactive=True, type="pil", tool="color-sketch", image_mode="RGBA").style(height=opts.img2img_editor_height) + add_copy_image_controls('sketch', sketch) + + with gr.TabItem('Inpaint', id='inpaint', elem_id="img2img_inpaint_tab") as tab_inpaint: + init_img_with_mask = gr.Image(label="Image for inpainting with mask", show_label=False, elem_id="img2maskimg", source="upload", interactive=True, type="pil", tool="sketch", image_mode="RGBA").style(height=opts.img2img_editor_height) + add_copy_image_controls('inpaint', init_img_with_mask) + + with gr.TabItem('Inpaint sketch', id='inpaint_sketch', elem_id="img2img_inpaint_sketch_tab") as tab_inpaint_color: + inpaint_color_sketch = gr.Image(label="Color sketch inpainting", show_label=False, elem_id="inpaint_sketch", source="upload", interactive=True, type="pil", tool="color-sketch", image_mode="RGBA").style(height=opts.img2img_editor_height) + inpaint_color_sketch_orig = gr.State(None) + add_copy_image_controls('inpaint_sketch', inpaint_color_sketch) + + def update_orig(image, state): + if image is not None: + same_size = state is not None and state.size == image.size + has_exact_match = np.any(np.all(np.array(image) == np.array(state), axis=-1)) + edited = same_size and has_exact_match + return image if not edited or state is None else state + + inpaint_color_sketch.change(update_orig, [inpaint_color_sketch, inpaint_color_sketch_orig], inpaint_color_sketch_orig) + + with gr.TabItem('Inpaint upload', id='inpaint_upload', elem_id="img2img_inpaint_upload_tab") as tab_inpaint_upload: + init_img_inpaint = gr.Image(label="Image for img2img", show_label=False, source="upload", interactive=True, type="pil", elem_id="img_inpaint_base") + init_mask_inpaint = gr.Image(label="Mask", source="upload", interactive=True, type="pil", elem_id="img_inpaint_mask") + + with gr.TabItem('Batch', id='batch', elem_id="img2img_batch_tab") as tab_batch: + hidden = '
Disabled when launched with --hide-ui-dir-config.' if shared.cmd_opts.hide_ui_dir_config else '' + gr.HTML( + "

Process images in a directory on the same machine where the server is running." + + "
Use an empty output directory to save pictures normally instead of writing to the output directory." + + f"
Add inpaint batch mask directory to enable inpaint batch processing." + f"{hidden}

" + ) + img2img_batch_input_dir = gr.Textbox(label="Input directory", **shared.hide_dirs, elem_id="img2img_batch_input_dir") + img2img_batch_output_dir = gr.Textbox(label="Output directory", **shared.hide_dirs, elem_id="img2img_batch_output_dir") + img2img_batch_inpaint_mask_dir = gr.Textbox(label="Inpaint batch mask directory (required for inpaint batch processing only)", **shared.hide_dirs, elem_id="img2img_batch_inpaint_mask_dir") + with gr.Accordion("PNG info", open=False): + img2img_batch_use_png_info = gr.Checkbox(label="Append png info to prompts", **shared.hide_dirs, elem_id="img2img_batch_use_png_info") + img2img_batch_png_info_dir = gr.Textbox(label="PNG info directory", **shared.hide_dirs, placeholder="Leave empty to use input directory", elem_id="img2img_batch_png_info_dir") + img2img_batch_png_info_props = gr.CheckboxGroup(["Prompt", "Negative prompt", "Seed", "CFG scale", "Sampler", "Steps"], label="Parameters to take from png info", info="Prompts from png info will be appended to prompts set in ui.") + + img2img_tabs = [tab_img2img, tab_sketch, tab_inpaint, tab_inpaint_color, tab_inpaint_upload, tab_batch] + + for i, tab in enumerate(img2img_tabs): + tab.select(fn=lambda tabnum=i: tabnum, inputs=[], outputs=[img2img_selected_tab]) + + def copy_image(img): + if isinstance(img, dict) and 'image' in img: + return img['image'] + + return img + + for button, name, elem in copy_image_buttons: + button.click( + fn=copy_image, + inputs=[elem], + outputs=[copy_image_destinations[name]], + ) + button.click( + fn=lambda: None, + _js=f"switch_to_{name.replace(' ', '_')}", + inputs=[], + outputs=[], + ) + + with FormRow(): + resize_mode = gr.Radio(label="Resize mode", elem_id="resize_mode", choices=["Just resize", "Crop and resize", "Resize and fill", "Just resize (latent upscale)"], type="index", value="Just resize") + + modules.scripts.scripts_img2img.prepare_ui() + + for category in ordered_ui_categories(): + if category == "sampler": + steps, sampler_index = create_sampler_and_steps_selection(samplers_for_img2img, "img2img") + + elif category == "dimensions": + with FormRow(): + with gr.Column(elem_id="img2img_column_size", scale=4): + selected_scale_tab = gr.State(value=0) + + with gr.Tabs(): + with gr.Tab(label="Resize to", elem_id="img2img_tab_resize_to") as tab_scale_to: + with FormRow(): + with gr.Column(elem_id="img2img_column_size", scale=4): + width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="img2img_width") + height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="img2img_height") + with gr.Column(elem_id="img2img_dimensions_row", scale=1, elem_classes="dimensions-tools"): + res_switch_btn = ToolButton(value=switch_values_symbol, elem_id="img2img_res_switch_btn") + detect_image_size_btn = ToolButton(value=detect_image_size_symbol, elem_id="img2img_detect_image_size_btn") + + with gr.Tab(label="Resize by", elem_id="img2img_tab_resize_by") as tab_scale_by: + scale_by = gr.Slider(minimum=0.05, maximum=4.0, step=0.05, label="Scale", value=1.0, elem_id="img2img_scale") + + with FormRow(): + scale_by_html = FormHTML(resize_from_to_html(0, 0, 0.0), elem_id="img2img_scale_resolution_preview") + gr.Slider(label="Unused", elem_id="img2img_unused_scale_by_slider") + button_update_resize_to = gr.Button(visible=False, elem_id="img2img_update_resize_to") + + on_change_args = dict( + fn=resize_from_to_html, + _js="currentImg2imgSourceResolution", + inputs=[dummy_component, dummy_component, scale_by], + outputs=scale_by_html, + show_progress=False, + ) + + scale_by.release(**on_change_args) + button_update_resize_to.click(**on_change_args) + + # the code below is meant to update the resolution label after the image in the image selection UI has changed. + # as it is now the event keeps firing continuously for inpaint edits, which ruins the page with constant requests. + # I assume this must be a gradio bug and for now we'll just do it for non-inpaint inputs. + for component in [init_img, sketch]: + component.change(fn=lambda: None, _js="updateImg2imgResizeToTextAfterChangingImage", inputs=[], outputs=[], show_progress=False) + + tab_scale_to.select(fn=lambda: 0, inputs=[], outputs=[selected_scale_tab]) + tab_scale_by.select(fn=lambda: 1, inputs=[], outputs=[selected_scale_tab]) + + if opts.dimensions_and_batch_together: + with gr.Column(elem_id="img2img_column_batch"): + batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="img2img_batch_count") + batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="img2img_batch_size") + + elif category == "cfg": + with FormGroup(): + with FormRow(): + cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0, elem_id="img2img_cfg_scale") + image_cfg_scale = gr.Slider(minimum=0, maximum=3.0, step=0.05, label='Image CFG Scale', value=1.5, elem_id="img2img_image_cfg_scale", visible=False) + denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.75, elem_id="img2img_denoising_strength") + + elif category == "seed": + seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox = create_seed_inputs('img2img') + + elif category == "checkboxes": + with FormRow(elem_classes="checkboxes-row", variant="compact"): + restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1, elem_id="img2img_restore_faces") + tiling = gr.Checkbox(label='Tiling', value=False, elem_id="img2img_tiling") + + elif category == "batch": + if not opts.dimensions_and_batch_together: + with FormRow(elem_id="img2img_column_batch"): + batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="img2img_batch_count") + batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="img2img_batch_size") + + elif category == "override_settings": + with FormRow(elem_id="img2img_override_settings_row") as row: + override_settings = create_override_settings_dropdown('img2img', row) + + elif category == "scripts": + with FormGroup(elem_id="img2img_script_container"): + custom_inputs = modules.scripts.scripts_img2img.setup_ui() + + elif category == "inpaint": + with FormGroup(elem_id="inpaint_controls", visible=False) as inpaint_controls: + with FormRow(): + mask_blur = gr.Slider(label='Mask blur', minimum=0, maximum=64, step=1, value=4, elem_id="img2img_mask_blur") + mask_alpha = gr.Slider(label="Mask transparency", visible=False, elem_id="img2img_mask_alpha") + + with FormRow(): + inpainting_mask_invert = gr.Radio(label='Mask mode', choices=['Inpaint masked', 'Inpaint not masked'], value='Inpaint masked', type="index", elem_id="img2img_mask_mode") + + with FormRow(): + inpainting_fill = gr.Radio(label='Masked content', choices=['fill', 'original', 'latent noise', 'latent nothing'], value='original', type="index", elem_id="img2img_inpainting_fill") + + with FormRow(): + with gr.Column(): + inpaint_full_res = gr.Radio(label="Inpaint area", choices=["Whole picture", "Only masked"], type="index", value="Whole picture", elem_id="img2img_inpaint_full_res") + + with gr.Column(scale=4): + inpaint_full_res_padding = gr.Slider(label='Only masked padding, pixels', minimum=0, maximum=256, step=4, value=32, elem_id="img2img_inpaint_full_res_padding") + + def select_img2img_tab(tab): + return gr.update(visible=tab in [2, 3, 4]), gr.update(visible=tab == 3), + + for i, elem in enumerate(img2img_tabs): + elem.select( + fn=lambda tab=i: select_img2img_tab(tab), + inputs=[], + outputs=[inpaint_controls, mask_alpha], + ) + else: + modules.scripts.scripts_img2img.setup_ui_for_section(category) + + img2img_gallery, generation_info, html_info, html_log = create_output_panel("img2img", opts.outdir_img2img_samples) + + connect_reuse_seed(seed, reuse_seed, generation_info, dummy_component, is_subseed=False) + connect_reuse_seed(subseed, reuse_subseed, generation_info, dummy_component, is_subseed=True) + + img2img_prompt_img.change( + fn=modules.images.image_data, + inputs=[ + img2img_prompt_img + ], + outputs=[ + img2img_prompt, + img2img_prompt_img + ], + show_progress=False, + ) + + img2img_args = dict( + fn=wrap_gradio_gpu_call(modules.img2img.img2img, extra_outputs=[None, '', '']), + _js="submit_img2img", + inputs=[ + dummy_component, + dummy_component, + img2img_prompt, + img2img_negative_prompt, + img2img_prompt_styles, + init_img, + sketch, + init_img_with_mask, + inpaint_color_sketch, + inpaint_color_sketch_orig, + init_img_inpaint, + init_mask_inpaint, + steps, + sampler_index, + mask_blur, + mask_alpha, + inpainting_fill, + restore_faces, + tiling, + batch_count, + batch_size, + cfg_scale, + image_cfg_scale, + denoising_strength, + seed, + subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox, + selected_scale_tab, + height, + width, + scale_by, + resize_mode, + inpaint_full_res, + inpaint_full_res_padding, + inpainting_mask_invert, + img2img_batch_input_dir, + img2img_batch_output_dir, + img2img_batch_inpaint_mask_dir, + override_settings, + img2img_batch_use_png_info, + img2img_batch_png_info_props, + img2img_batch_png_info_dir, + ] + custom_inputs, + outputs=[ + img2img_gallery, + generation_info, + html_info, + html_log, + ], + show_progress=False, + ) + + interrogate_args = dict( + _js="get_img2img_tab_index", + inputs=[ + dummy_component, + img2img_batch_input_dir, + img2img_batch_output_dir, + init_img, + sketch, + init_img_with_mask, + inpaint_color_sketch, + init_img_inpaint, + ], + outputs=[img2img_prompt, dummy_component], + ) + + img2img_prompt.submit(**img2img_args) + submit.click(**img2img_args) + + res_switch_btn.click(fn=None, _js="function(){switchWidthHeight('img2img')}", inputs=None, outputs=None, show_progress=False) + + detect_image_size_btn.click( + fn=lambda w, h, _: (w or gr.update(), h or gr.update()), + _js="currentImg2imgSourceResolution", + inputs=[dummy_component, dummy_component, dummy_component], + outputs=[width, height], + show_progress=False, + ) + + restore_progress_button.click( + fn=progress.restore_progress, + _js="restoreProgressImg2img", + inputs=[dummy_component], + outputs=[ + img2img_gallery, + generation_info, + html_info, + html_log, + ], + show_progress=False, + ) + + img2img_interrogate.click( + fn=lambda *args: process_interrogate(interrogate, *args), + **interrogate_args, + ) + + img2img_deepbooru.click( + fn=lambda *args: process_interrogate(interrogate_deepbooru, *args), + **interrogate_args, + ) + + prompts = [(txt2img_prompt, txt2img_negative_prompt), (img2img_prompt, img2img_negative_prompt)] + style_dropdowns = [txt2img_prompt_styles, img2img_prompt_styles] + style_js_funcs = ["update_txt2img_tokens", "update_img2img_tokens"] + + for button, (prompt, negative_prompt) in zip([txt2img_save_style, img2img_save_style], prompts): + button.click( + fn=add_style, + _js="ask_for_style_name", + # Have to pass empty dummy component here, because the JavaScript and Python function have to accept + # the same number of parameters, but we only know the style-name after the JavaScript prompt + inputs=[dummy_component, prompt, negative_prompt], + outputs=[txt2img_prompt_styles, img2img_prompt_styles], + ) + + for button, (prompt, negative_prompt), styles, js_func in zip([txt2img_prompt_style_apply, img2img_prompt_style_apply], prompts, style_dropdowns, style_js_funcs): + button.click( + fn=apply_styles, + _js=js_func, + inputs=[prompt, negative_prompt, styles], + outputs=[prompt, negative_prompt, styles], + ) + + token_button.click(fn=update_token_counter, inputs=[img2img_prompt, steps], outputs=[token_counter]) + negative_token_button.click(fn=wrap_queued_call(update_token_counter), inputs=[img2img_negative_prompt, steps], outputs=[negative_token_counter]) + + ui_extra_networks.setup_ui(extra_networks_ui_img2img, img2img_gallery) + + img2img_paste_fields = [ + (img2img_prompt, "Prompt"), + (img2img_negative_prompt, "Negative prompt"), + (steps, "Steps"), + (sampler_index, "Sampler"), + (restore_faces, "Face restoration"), + (cfg_scale, "CFG scale"), + (image_cfg_scale, "Image CFG scale"), + (seed, "Seed"), + (width, "Size-1"), + (height, "Size-2"), + (batch_size, "Batch size"), + (subseed, "Variation seed"), + (subseed_strength, "Variation seed strength"), + (seed_resize_from_w, "Seed resize from-1"), + (seed_resize_from_h, "Seed resize from-2"), + (img2img_prompt_styles, lambda d: d["Styles array"] if isinstance(d.get("Styles array"), list) else gr.update()), + (denoising_strength, "Denoising strength"), + (mask_blur, "Mask blur"), + *modules.scripts.scripts_img2img.infotext_fields + ] + parameters_copypaste.add_paste_fields("img2img", init_img, img2img_paste_fields, override_settings) + parameters_copypaste.add_paste_fields("inpaint", init_img_with_mask, img2img_paste_fields, override_settings) + parameters_copypaste.register_paste_params_button(parameters_copypaste.ParamBinding( + paste_button=img2img_paste, tabname="img2img", source_text_component=img2img_prompt, source_image_component=None, + )) + + modules.scripts.scripts_current = None + + with gr.Blocks(analytics_enabled=False) as extras_interface: + ui_postprocessing.create_ui() + + with gr.Blocks(analytics_enabled=False) as pnginfo_interface: + with gr.Row().style(equal_height=False): + with gr.Column(variant='panel'): + image = gr.Image(elem_id="pnginfo_image", label="Source", source="upload", interactive=True, type="pil") + + with gr.Column(variant='panel'): + html = gr.HTML() + generation_info = gr.Textbox(visible=False, elem_id="pnginfo_generation_info") + html2 = gr.HTML() + with gr.Row(): + buttons = parameters_copypaste.create_buttons(["txt2img", "img2img", "inpaint", "extras"]) + + for tabname, button in buttons.items(): + parameters_copypaste.register_paste_params_button(parameters_copypaste.ParamBinding( + paste_button=button, tabname=tabname, source_text_component=generation_info, source_image_component=image, + )) + + image.change( + fn=wrap_gradio_call(modules.extras.run_pnginfo), + inputs=[image], + outputs=[html, generation_info, html2], + ) + + def update_interp_description(value): + interp_description_css = "

{}

" + interp_descriptions = { + "No interpolation": interp_description_css.format("No interpolation will be used. Requires one model; A. Allows for format conversion and VAE baking."), + "Weighted sum": interp_description_css.format("A weighted sum will be used for interpolation. Requires two models; A and B. The result is calculated as A * (1 - M) + B * M"), + "Add difference": interp_description_css.format("The difference between the last two models will be added to the first. Requires three models; A, B and C. The result is calculated as A + (B - C) * M") + } + return interp_descriptions[value] + + with gr.Blocks(analytics_enabled=False) as modelmerger_interface: + with gr.Row().style(equal_height=False): + with gr.Column(variant='compact'): + interp_description = gr.HTML(value=update_interp_description("Weighted sum"), elem_id="modelmerger_interp_description") + + with FormRow(elem_id="modelmerger_models"): + primary_model_name = gr.Dropdown(modules.sd_models.checkpoint_tiles(), elem_id="modelmerger_primary_model_name", label="Primary model (A)") + create_refresh_button(primary_model_name, modules.sd_models.list_models, lambda: {"choices": modules.sd_models.checkpoint_tiles()}, "refresh_checkpoint_A") + + secondary_model_name = gr.Dropdown(modules.sd_models.checkpoint_tiles(), elem_id="modelmerger_secondary_model_name", label="Secondary model (B)") + create_refresh_button(secondary_model_name, modules.sd_models.list_models, lambda: {"choices": modules.sd_models.checkpoint_tiles()}, "refresh_checkpoint_B") + + tertiary_model_name = gr.Dropdown(modules.sd_models.checkpoint_tiles(), elem_id="modelmerger_tertiary_model_name", label="Tertiary model (C)") + create_refresh_button(tertiary_model_name, modules.sd_models.list_models, lambda: {"choices": modules.sd_models.checkpoint_tiles()}, "refresh_checkpoint_C") + + custom_name = gr.Textbox(label="Custom Name (Optional)", elem_id="modelmerger_custom_name") + interp_amount = gr.Slider(minimum=0.0, maximum=1.0, step=0.05, label='Multiplier (M) - set to 0 to get model A', value=0.3, elem_id="modelmerger_interp_amount") + interp_method = gr.Radio(choices=["No interpolation", "Weighted sum", "Add difference"], value="Weighted sum", label="Interpolation Method", elem_id="modelmerger_interp_method") + interp_method.change(fn=update_interp_description, inputs=[interp_method], outputs=[interp_description]) + + with FormRow(): + checkpoint_format = gr.Radio(choices=["ckpt", "safetensors"], value="safetensors", label="Checkpoint format", elem_id="modelmerger_checkpoint_format") + save_as_half = gr.Checkbox(value=False, label="Save as float16", elem_id="modelmerger_save_as_half") + save_metadata = gr.Checkbox(value=True, label="Save metadata (.safetensors only)", elem_id="modelmerger_save_metadata") + + with FormRow(): + with gr.Column(): + config_source = gr.Radio(choices=["A, B or C", "B", "C", "Don't"], value="A, B or C", label="Copy config from", type="index", elem_id="modelmerger_config_method") + + with gr.Column(): + with FormRow(): + bake_in_vae = gr.Dropdown(choices=["None"] + list(sd_vae.vae_dict), value="None", label="Bake in VAE", elem_id="modelmerger_bake_in_vae") + create_refresh_button(bake_in_vae, sd_vae.refresh_vae_list, lambda: {"choices": ["None"] + list(sd_vae.vae_dict)}, "modelmerger_refresh_bake_in_vae") + + with FormRow(): + discard_weights = gr.Textbox(value="", label="Discard weights with matching name", elem_id="modelmerger_discard_weights") + + with gr.Row(): + modelmerger_merge = gr.Button(elem_id="modelmerger_merge", value="Merge", variant='primary') + + with gr.Column(variant='compact', elem_id="modelmerger_results_container"): + with gr.Group(elem_id="modelmerger_results_panel"): + modelmerger_result = gr.HTML(elem_id="modelmerger_result", show_label=False) + + with gr.Blocks(analytics_enabled=False) as train_interface: + with gr.Row().style(equal_height=False): + gr.HTML(value="

See wiki for detailed explanation.

") + + with gr.Row(variant="compact").style(equal_height=False): + with gr.Tabs(elem_id="train_tabs"): + + with gr.Tab(label="Create embedding", id="create_embedding"): + new_embedding_name = gr.Textbox(label="Name", elem_id="train_new_embedding_name") + initialization_text = gr.Textbox(label="Initialization text", value="*", elem_id="train_initialization_text") + nvpt = gr.Slider(label="Number of vectors per token", minimum=1, maximum=75, step=1, value=1, elem_id="train_nvpt") + overwrite_old_embedding = gr.Checkbox(value=False, label="Overwrite Old Embedding", elem_id="train_overwrite_old_embedding") + + with gr.Row(): + with gr.Column(scale=3): + gr.HTML(value="") + + with gr.Column(): + create_embedding = gr.Button(value="Create embedding", variant='primary', elem_id="train_create_embedding") + + with gr.Tab(label="Create hypernetwork", id="create_hypernetwork"): + new_hypernetwork_name = gr.Textbox(label="Name", elem_id="train_new_hypernetwork_name") + new_hypernetwork_sizes = gr.CheckboxGroup(label="Modules", value=["768", "320", "640", "1280"], choices=["768", "1024", "320", "640", "1280"], elem_id="train_new_hypernetwork_sizes") + new_hypernetwork_layer_structure = gr.Textbox("1, 2, 1", label="Enter hypernetwork layer structure", placeholder="1st and last digit must be 1. ex:'1, 2, 1'", elem_id="train_new_hypernetwork_layer_structure") + new_hypernetwork_activation_func = gr.Dropdown(value="linear", label="Select activation function of hypernetwork. Recommended : Swish / Linear(none)", choices=modules.hypernetworks.ui.keys, elem_id="train_new_hypernetwork_activation_func") + new_hypernetwork_initialization_option = gr.Dropdown(value = "Normal", label="Select Layer weights initialization. Recommended: Kaiming for relu-like, Xavier for sigmoid-like, Normal otherwise", choices=["Normal", "KaimingUniform", "KaimingNormal", "XavierUniform", "XavierNormal"], elem_id="train_new_hypernetwork_initialization_option") + new_hypernetwork_add_layer_norm = gr.Checkbox(label="Add layer normalization", elem_id="train_new_hypernetwork_add_layer_norm") + new_hypernetwork_use_dropout = gr.Checkbox(label="Use dropout", elem_id="train_new_hypernetwork_use_dropout") + new_hypernetwork_dropout_structure = gr.Textbox("0, 0, 0", label="Enter hypernetwork Dropout structure (or empty). Recommended : 0~0.35 incrementing sequence: 0, 0.05, 0.15", placeholder="1st and last digit must be 0 and values should be between 0 and 1. ex:'0, 0.01, 0'") + overwrite_old_hypernetwork = gr.Checkbox(value=False, label="Overwrite Old Hypernetwork", elem_id="train_overwrite_old_hypernetwork") + + with gr.Row(): + with gr.Column(scale=3): + gr.HTML(value="") + + with gr.Column(): + create_hypernetwork = gr.Button(value="Create hypernetwork", variant='primary', elem_id="train_create_hypernetwork") + + with gr.Tab(label="Preprocess images", id="preprocess_images"): + process_src = gr.Textbox(label='Source directory', elem_id="train_process_src") + process_dst = gr.Textbox(label='Destination directory', elem_id="train_process_dst") + process_width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="train_process_width") + process_height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="train_process_height") + preprocess_txt_action = gr.Dropdown(label='Existing Caption txt Action', value="ignore", choices=["ignore", "copy", "prepend", "append"], elem_id="train_preprocess_txt_action") + + with gr.Row(): + process_keep_original_size = gr.Checkbox(label='Keep original size', elem_id="train_process_keep_original_size") + process_flip = gr.Checkbox(label='Create flipped copies', elem_id="train_process_flip") + process_split = gr.Checkbox(label='Split oversized images', elem_id="train_process_split") + process_focal_crop = gr.Checkbox(label='Auto focal point crop', elem_id="train_process_focal_crop") + process_multicrop = gr.Checkbox(label='Auto-sized crop', elem_id="train_process_multicrop") + process_caption = gr.Checkbox(label='Use BLIP for caption', elem_id="train_process_caption") + process_caption_deepbooru = gr.Checkbox(label='Use deepbooru for caption', visible=True, elem_id="train_process_caption_deepbooru") + + with gr.Row(visible=False) as process_split_extra_row: + process_split_threshold = gr.Slider(label='Split image threshold', value=0.5, minimum=0.0, maximum=1.0, step=0.05, elem_id="train_process_split_threshold") + process_overlap_ratio = gr.Slider(label='Split image overlap ratio', value=0.2, minimum=0.0, maximum=0.9, step=0.05, elem_id="train_process_overlap_ratio") + + with gr.Row(visible=False) as process_focal_crop_row: + process_focal_crop_face_weight = gr.Slider(label='Focal point face weight', value=0.9, minimum=0.0, maximum=1.0, step=0.05, elem_id="train_process_focal_crop_face_weight") + process_focal_crop_entropy_weight = gr.Slider(label='Focal point entropy weight', value=0.15, minimum=0.0, maximum=1.0, step=0.05, elem_id="train_process_focal_crop_entropy_weight") + process_focal_crop_edges_weight = gr.Slider(label='Focal point edges weight', value=0.5, minimum=0.0, maximum=1.0, step=0.05, elem_id="train_process_focal_crop_edges_weight") + process_focal_crop_debug = gr.Checkbox(label='Create debug image', elem_id="train_process_focal_crop_debug") + + with gr.Column(visible=False) as process_multicrop_col: + gr.Markdown('Each image is center-cropped with an automatically chosen width and height.') + with gr.Row(): + process_multicrop_mindim = gr.Slider(minimum=64, maximum=2048, step=8, label="Dimension lower bound", value=384, elem_id="train_process_multicrop_mindim") + process_multicrop_maxdim = gr.Slider(minimum=64, maximum=2048, step=8, label="Dimension upper bound", value=768, elem_id="train_process_multicrop_maxdim") + with gr.Row(): + process_multicrop_minarea = gr.Slider(minimum=64*64, maximum=2048*2048, step=1, label="Area lower bound", value=64*64, elem_id="train_process_multicrop_minarea") + process_multicrop_maxarea = gr.Slider(minimum=64*64, maximum=2048*2048, step=1, label="Area upper bound", value=640*640, elem_id="train_process_multicrop_maxarea") + with gr.Row(): + process_multicrop_objective = gr.Radio(["Maximize area", "Minimize error"], value="Maximize area", label="Resizing objective", elem_id="train_process_multicrop_objective") + process_multicrop_threshold = gr.Slider(minimum=0, maximum=1, step=0.01, label="Error threshold", value=0.1, elem_id="train_process_multicrop_threshold") + + with gr.Row(): + with gr.Column(scale=3): + gr.HTML(value="") + + with gr.Column(): + with gr.Row(): + interrupt_preprocessing = gr.Button("Interrupt", elem_id="train_interrupt_preprocessing") + run_preprocess = gr.Button(value="Preprocess", variant='primary', elem_id="train_run_preprocess") + + process_split.change( + fn=lambda show: gr_show(show), + inputs=[process_split], + outputs=[process_split_extra_row], + ) + + process_focal_crop.change( + fn=lambda show: gr_show(show), + inputs=[process_focal_crop], + outputs=[process_focal_crop_row], + ) + + process_multicrop.change( + fn=lambda show: gr_show(show), + inputs=[process_multicrop], + outputs=[process_multicrop_col], + ) + + def get_textual_inversion_template_names(): + return sorted(textual_inversion.textual_inversion_templates) + + with gr.Tab(label="Train", id="train"): + gr.HTML(value="

Train an embedding or Hypernetwork; you must specify a directory with a set of 1:1 ratio images [wiki]

") + with FormRow(): + train_embedding_name = gr.Dropdown(label='Embedding', elem_id="train_embedding", choices=sorted(sd_hijack.model_hijack.embedding_db.word_embeddings.keys())) + create_refresh_button(train_embedding_name, sd_hijack.model_hijack.embedding_db.load_textual_inversion_embeddings, lambda: {"choices": sorted(sd_hijack.model_hijack.embedding_db.word_embeddings.keys())}, "refresh_train_embedding_name") + + train_hypernetwork_name = gr.Dropdown(label='Hypernetwork', elem_id="train_hypernetwork", choices=sorted(shared.hypernetworks)) + create_refresh_button(train_hypernetwork_name, shared.reload_hypernetworks, lambda: {"choices": sorted(shared.hypernetworks)}, "refresh_train_hypernetwork_name") + + with FormRow(): + embedding_learn_rate = gr.Textbox(label='Embedding Learning rate', placeholder="Embedding Learning rate", value="0.005", elem_id="train_embedding_learn_rate") + hypernetwork_learn_rate = gr.Textbox(label='Hypernetwork Learning rate', placeholder="Hypernetwork Learning rate", value="0.00001", elem_id="train_hypernetwork_learn_rate") + + with FormRow(): + clip_grad_mode = gr.Dropdown(value="disabled", label="Gradient Clipping", choices=["disabled", "value", "norm"]) + clip_grad_value = gr.Textbox(placeholder="Gradient clip value", value="0.1", show_label=False) + + with FormRow(): + batch_size = gr.Number(label='Batch size', value=1, precision=0, elem_id="train_batch_size") + gradient_step = gr.Number(label='Gradient accumulation steps', value=1, precision=0, elem_id="train_gradient_step") + + dataset_directory = gr.Textbox(label='Dataset directory', placeholder="Path to directory with input images", elem_id="train_dataset_directory") + log_directory = gr.Textbox(label='Log directory', placeholder="Path to directory where to write outputs", value="textual_inversion", elem_id="train_log_directory") + + with FormRow(): + template_file = gr.Dropdown(label='Prompt template', value="style_filewords.txt", elem_id="train_template_file", choices=get_textual_inversion_template_names()) + create_refresh_button(template_file, textual_inversion.list_textual_inversion_templates, lambda: {"choices": get_textual_inversion_template_names()}, "refrsh_train_template_file") + + training_width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="train_training_width") + training_height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="train_training_height") + varsize = gr.Checkbox(label="Do not resize images", value=False, elem_id="train_varsize") + steps = gr.Number(label='Max steps', value=100000, precision=0, elem_id="train_steps") + + with FormRow(): + create_image_every = gr.Number(label='Save an image to log directory every N steps, 0 to disable', value=500, precision=0, elem_id="train_create_image_every") + save_embedding_every = gr.Number(label='Save a copy of embedding to log directory every N steps, 0 to disable', value=500, precision=0, elem_id="train_save_embedding_every") + + use_weight = gr.Checkbox(label="Use PNG alpha channel as loss weight", value=False, elem_id="use_weight") + + save_image_with_stored_embedding = gr.Checkbox(label='Save images with embedding in PNG chunks', value=True, elem_id="train_save_image_with_stored_embedding") + preview_from_txt2img = gr.Checkbox(label='Read parameters (prompt, etc...) from txt2img tab when making previews', value=False, elem_id="train_preview_from_txt2img") + + shuffle_tags = gr.Checkbox(label="Shuffle tags by ',' when creating prompts.", value=False, elem_id="train_shuffle_tags") + tag_drop_out = gr.Slider(minimum=0, maximum=1, step=0.1, label="Drop out tags when creating prompts.", value=0, elem_id="train_tag_drop_out") + + latent_sampling_method = gr.Radio(label='Choose latent sampling method', value="once", choices=['once', 'deterministic', 'random'], elem_id="train_latent_sampling_method") + + with gr.Row(): + train_embedding = gr.Button(value="Train Embedding", variant='primary', elem_id="train_train_embedding") + interrupt_training = gr.Button(value="Interrupt", elem_id="train_interrupt_training") + train_hypernetwork = gr.Button(value="Train Hypernetwork", variant='primary', elem_id="train_train_hypernetwork") + + params = script_callbacks.UiTrainTabParams(txt2img_preview_params) + + script_callbacks.ui_train_tabs_callback(params) + + with gr.Column(elem_id='ti_gallery_container'): + ti_output = gr.Text(elem_id="ti_output", value="", show_label=False) + gr.Gallery(label='Output', show_label=False, elem_id='ti_gallery').style(columns=4) + gr.HTML(elem_id="ti_progress", value="") + ti_outcome = gr.HTML(elem_id="ti_error", value="") + + create_embedding.click( + fn=modules.textual_inversion.ui.create_embedding, + inputs=[ + new_embedding_name, + initialization_text, + nvpt, + overwrite_old_embedding, + ], + outputs=[ + train_embedding_name, + ti_output, + ti_outcome, + ] + ) + + create_hypernetwork.click( + fn=modules.hypernetworks.ui.create_hypernetwork, + inputs=[ + new_hypernetwork_name, + new_hypernetwork_sizes, + overwrite_old_hypernetwork, + new_hypernetwork_layer_structure, + new_hypernetwork_activation_func, + new_hypernetwork_initialization_option, + new_hypernetwork_add_layer_norm, + new_hypernetwork_use_dropout, + new_hypernetwork_dropout_structure + ], + outputs=[ + train_hypernetwork_name, + ti_output, + ti_outcome, + ] + ) + + run_preprocess.click( + fn=wrap_gradio_gpu_call(modules.textual_inversion.ui.preprocess, extra_outputs=[gr.update()]), + _js="start_training_textual_inversion", + inputs=[ + dummy_component, + process_src, + process_dst, + process_width, + process_height, + preprocess_txt_action, + process_keep_original_size, + process_flip, + process_split, + process_caption, + process_caption_deepbooru, + process_split_threshold, + process_overlap_ratio, + process_focal_crop, + process_focal_crop_face_weight, + process_focal_crop_entropy_weight, + process_focal_crop_edges_weight, + process_focal_crop_debug, + process_multicrop, + process_multicrop_mindim, + process_multicrop_maxdim, + process_multicrop_minarea, + process_multicrop_maxarea, + process_multicrop_objective, + process_multicrop_threshold, + ], + outputs=[ + ti_output, + ti_outcome, + ], + ) + + train_embedding.click( + fn=wrap_gradio_gpu_call(modules.textual_inversion.ui.train_embedding, extra_outputs=[gr.update()]), + _js="start_training_textual_inversion", + inputs=[ + dummy_component, + train_embedding_name, + embedding_learn_rate, + batch_size, + gradient_step, + dataset_directory, + log_directory, + training_width, + training_height, + varsize, + steps, + clip_grad_mode, + clip_grad_value, + shuffle_tags, + tag_drop_out, + latent_sampling_method, + use_weight, + create_image_every, + save_embedding_every, + template_file, + save_image_with_stored_embedding, + preview_from_txt2img, + *txt2img_preview_params, + ], + outputs=[ + ti_output, + ti_outcome, + ] + ) + + train_hypernetwork.click( + fn=wrap_gradio_gpu_call(modules.hypernetworks.ui.train_hypernetwork, extra_outputs=[gr.update()]), + _js="start_training_textual_inversion", + inputs=[ + dummy_component, + train_hypernetwork_name, + hypernetwork_learn_rate, + batch_size, + gradient_step, + dataset_directory, + log_directory, + training_width, + training_height, + varsize, + steps, + clip_grad_mode, + clip_grad_value, + shuffle_tags, + tag_drop_out, + latent_sampling_method, + use_weight, + create_image_every, + save_embedding_every, + template_file, + preview_from_txt2img, + *txt2img_preview_params, + ], + outputs=[ + ti_output, + ti_outcome, + ] + ) + + interrupt_training.click( + fn=lambda: shared.state.interrupt(), + inputs=[], + outputs=[], + ) + + interrupt_preprocessing.click( + fn=lambda: shared.state.interrupt(), + inputs=[], + outputs=[], + ) + + loadsave = ui_loadsave.UiLoadsave(cmd_opts.ui_config_file) + + settings = ui_settings.UiSettings() + settings.create_ui(loadsave, dummy_component) + + interfaces = [ + (txt2img_interface, "txt2img", "txt2img"), + (img2img_interface, "img2img", "img2img"), + (extras_interface, "Extras", "extras"), + (pnginfo_interface, "PNG Info", "pnginfo"), + (modelmerger_interface, "Checkpoint Merger", "modelmerger"), + (train_interface, "Train", "train"), + ] + + interfaces += script_callbacks.ui_tabs_callback() + interfaces += [(settings.interface, "Settings", "settings")] + + extensions_interface = ui_extensions.create_ui() + interfaces += [(extensions_interface, "Extensions", "extensions")] + + shared.tab_names = [] + for _interface, label, _ifid in interfaces: + shared.tab_names.append(label) + + with gr.Blocks(theme=shared.gradio_theme, analytics_enabled=False, title="Stable Diffusion") as demo: + settings.add_quicksettings() + + parameters_copypaste.connect_paste_params_buttons() + + with gr.Tabs(elem_id="tabs") as tabs: + tab_order = {k: i for i, k in enumerate(opts.ui_tab_order)} + sorted_interfaces = sorted(interfaces, key=lambda x: tab_order.get(x[1], 9999)) + + for interface, label, ifid in sorted_interfaces: + if label in shared.opts.hidden_tabs: + continue + with gr.TabItem(label, id=ifid, elem_id=f"tab_{ifid}"): + interface.render() + + for interface, _label, ifid in interfaces: + if ifid in ["extensions", "settings"]: + continue + + loadsave.add_block(interface, ifid) + + loadsave.add_component(f"webui/Tabs@{tabs.elem_id}", tabs) + + loadsave.setup_ui() + + if os.path.exists(os.path.join(script_path, "notification.mp3")): + gr.Audio(interactive=False, value=os.path.join(script_path, "notification.mp3"), elem_id="audio_notification", visible=False) + + footer = shared.html("footer.html") + footer = footer.format(versions=versions_html(), api_docs="/docs" if shared.cmd_opts.api else "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/API") + gr.HTML(footer, elem_id="footer") + + settings.add_functionality(demo) + + update_image_cfg_scale_visibility = lambda: gr.update(visible=shared.sd_model and shared.sd_model.cond_stage_key == "edit") + settings.text_settings.change(fn=update_image_cfg_scale_visibility, inputs=[], outputs=[image_cfg_scale]) + demo.load(fn=update_image_cfg_scale_visibility, inputs=[], outputs=[image_cfg_scale]) + + def modelmerger(*args): + try: + results = modules.extras.run_modelmerger(*args) + except Exception as e: + errors.report("Error loading/saving model file", exc_info=True) + modules.sd_models.list_models() # to remove the potentially missing models from the list + return [*[gr.Dropdown.update(choices=modules.sd_models.checkpoint_tiles()) for _ in range(4)], f"Error merging checkpoints: {e}"] + return results + + modelmerger_merge.click(fn=lambda: '', inputs=[], outputs=[modelmerger_result]) + modelmerger_merge.click( + fn=wrap_gradio_gpu_call(modelmerger, extra_outputs=lambda: [gr.update() for _ in range(4)]), + _js='modelmerger', + inputs=[ + dummy_component, + primary_model_name, + secondary_model_name, + tertiary_model_name, + interp_method, + interp_amount, + save_as_half, + custom_name, + checkpoint_format, + config_source, + bake_in_vae, + discard_weights, + save_metadata, + ], + outputs=[ + primary_model_name, + secondary_model_name, + tertiary_model_name, + settings.component_dict['sd_model_checkpoint'], + modelmerger_result, + ] + ) + + loadsave.dump_defaults() + demo.ui_loadsave = loadsave + + # Required as a workaround for change() event not triggering when loading values from ui-config.json + interp_description.value = update_interp_description(interp_method.value) + + return demo + + +def versions_html(): + import torch + import launch + + python_version = ".".join([str(x) for x in sys.version_info[0:3]]) + commit = launch.commit_hash() + tag = launch.git_tag() + + if shared.xformers_available: + import xformers + xformers_version = xformers.__version__ + else: + xformers_version = "N/A" + + return f""" +version: {tag} + •  +python: {python_version} + •  +torch: {getattr(torch, '__long_version__',torch.__version__)} + •  +xformers: {xformers_version} + •  +gradio: {gr.__version__} + •  +checkpoint: N/A +""" + + +def setup_ui_api(app): + from pydantic import BaseModel, Field + from typing import List + + class QuicksettingsHint(BaseModel): + name: str = Field(title="Name of the quicksettings field") + label: str = Field(title="Label of the quicksettings field") + + def quicksettings_hint(): + return [QuicksettingsHint(name=k, label=v.label) for k, v in opts.data_labels.items()] + + app.add_api_route("/internal/quicksettings-hint", quicksettings_hint, methods=["GET"], response_model=List[QuicksettingsHint]) + + app.add_api_route("/internal/ping", lambda: {}, methods=["GET"]) + + app.add_api_route("/internal/profile-startup", lambda: timer.startup_record, methods=["GET"]) + + def download_sysinfo(attachment=False): + from fastapi.responses import PlainTextResponse + + text = sysinfo.get() + filename = f"sysinfo-{datetime.datetime.utcnow().strftime('%Y-%m-%d-%H-%M')}.txt" + + return PlainTextResponse(text, headers={'Content-Disposition': f'{"attachment" if attachment else "inline"}; filename="{filename}"'}) + + app.add_api_route("/internal/sysinfo", download_sysinfo, methods=["GET"]) + app.add_api_route("/internal/sysinfo-download", lambda: download_sysinfo(attachment=True), methods=["GET"]) + -- cgit v1.2.1 From b98fa1c397a677c99f375982817d44bdecc3f813 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Tue, 1 Aug 2023 07:15:15 +0300 Subject: Split history: mv modules/ui.py temp --- modules/ui.py | 1621 --------------------------------------------------------- temp | 1621 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 1621 insertions(+), 1621 deletions(-) delete mode 100644 modules/ui.py create mode 100644 temp diff --git a/modules/ui.py b/modules/ui.py deleted file mode 100644 index 07ecee7b..00000000 --- a/modules/ui.py +++ /dev/null @@ -1,1621 +0,0 @@ -import datetime -import json -import mimetypes -import os -import sys -from functools import reduce -import warnings - -import gradio as gr -import gradio.utils -import numpy as np -from PIL import Image, PngImagePlugin # noqa: F401 -from modules.call_queue import wrap_gradio_gpu_call, wrap_queued_call, wrap_gradio_call - -from modules import sd_hijack, sd_models, script_callbacks, ui_extensions, deepbooru, sd_vae, extra_networks, ui_common, ui_postprocessing, progress, ui_loadsave, errors, shared_items, ui_settings, timer, sysinfo -from modules.ui_components import FormRow, FormGroup, ToolButton, FormHTML -from modules.paths import script_path -from modules.ui_common import create_refresh_button -from modules.ui_gradio_extensions import reload_javascript - - -from modules.shared import opts, cmd_opts - -import modules.codeformer_model -import modules.generation_parameters_copypaste as parameters_copypaste -import modules.gfpgan_model -import modules.hypernetworks.ui -import modules.scripts -import modules.shared as shared -import modules.styles -import modules.textual_inversion.ui -from modules import prompt_parser -from modules.sd_hijack import model_hijack -from modules.sd_samplers import samplers, samplers_for_img2img -from modules.textual_inversion import textual_inversion -import modules.hypernetworks.ui -from modules.generation_parameters_copypaste import image_from_url_text -import modules.extras - -create_setting_component = ui_settings.create_setting_component - -warnings.filterwarnings("default" if opts.show_warnings else "ignore", category=UserWarning) - -# this is a fix for Windows users. Without it, javascript files will be served with text/html content-type and the browser will not show any UI -mimetypes.init() -mimetypes.add_type('application/javascript', '.js') - -if not cmd_opts.share and not cmd_opts.listen: - # fix gradio phoning home - gradio.utils.version_check = lambda: None - gradio.utils.get_local_ip_address = lambda: '127.0.0.1' - -if cmd_opts.ngrok is not None: - import modules.ngrok as ngrok - print('ngrok authtoken detected, trying to connect...') - ngrok.connect( - cmd_opts.ngrok, - cmd_opts.port if cmd_opts.port is not None else 7860, - cmd_opts.ngrok_options - ) - - -def gr_show(visible=True): - return {"visible": visible, "__type__": "update"} - - -sample_img2img = "assets/stable-samples/img2img/sketch-mountains-input.jpg" -sample_img2img = sample_img2img if os.path.exists(sample_img2img) else None - -# Using constants for these since the variation selector isn't visible. -# Important that they exactly match script.js for tooltip to work. -random_symbol = '\U0001f3b2\ufe0f' # 🎲️ -reuse_symbol = '\u267b\ufe0f' # ♻️ -paste_symbol = '\u2199\ufe0f' # ↙ -refresh_symbol = '\U0001f504' # 🔄 -save_style_symbol = '\U0001f4be' # 💾 -apply_style_symbol = '\U0001f4cb' # 📋 -clear_prompt_symbol = '\U0001f5d1\ufe0f' # 🗑️ -extra_networks_symbol = '\U0001F3B4' # 🎴 -switch_values_symbol = '\U000021C5' # ⇅ -restore_progress_symbol = '\U0001F300' # 🌀 -detect_image_size_symbol = '\U0001F4D0' # 📐 -up_down_symbol = '\u2195\ufe0f' # ↕️ - - -plaintext_to_html = ui_common.plaintext_to_html - - -def send_gradio_gallery_to_image(x): - if len(x) == 0: - return None - return image_from_url_text(x[0]) - - -def add_style(name: str, prompt: str, negative_prompt: str): - if name is None: - return [gr_show() for x in range(4)] - - style = modules.styles.PromptStyle(name, prompt, negative_prompt) - shared.prompt_styles.styles[style.name] = style - # Save all loaded prompt styles: this allows us to update the storage format in the future more easily, because we - # reserialize all styles every time we save them - shared.prompt_styles.save_styles(shared.styles_filename) - - return [gr.Dropdown.update(visible=True, choices=list(shared.prompt_styles.styles)) for _ in range(2)] - - -def calc_resolution_hires(enable, width, height, hr_scale, hr_resize_x, hr_resize_y): - from modules import processing, devices - - if not enable: - return "" - - p = processing.StableDiffusionProcessingTxt2Img(width=width, height=height, enable_hr=True, hr_scale=hr_scale, hr_resize_x=hr_resize_x, hr_resize_y=hr_resize_y) - - with devices.autocast(): - p.init([""], [0], [0]) - - return f"resize: from {p.width}x{p.height} to {p.hr_resize_x or p.hr_upscale_to_x}x{p.hr_resize_y or p.hr_upscale_to_y}" - - -def resize_from_to_html(width, height, scale_by): - target_width = int(width * scale_by) - target_height = int(height * scale_by) - - if not target_width or not target_height: - return "no image selected" - - return f"resize: from {width}x{height} to {target_width}x{target_height}" - - -def apply_styles(prompt, prompt_neg, styles): - prompt = shared.prompt_styles.apply_styles_to_prompt(prompt, styles) - prompt_neg = shared.prompt_styles.apply_negative_styles_to_prompt(prompt_neg, styles) - - return [gr.Textbox.update(value=prompt), gr.Textbox.update(value=prompt_neg), gr.Dropdown.update(value=[])] - - -def process_interrogate(interrogation_function, mode, ii_input_dir, ii_output_dir, *ii_singles): - if mode in {0, 1, 3, 4}: - return [interrogation_function(ii_singles[mode]), None] - elif mode == 2: - return [interrogation_function(ii_singles[mode]["image"]), None] - elif mode == 5: - assert not shared.cmd_opts.hide_ui_dir_config, "Launched with --hide-ui-dir-config, batch img2img disabled" - images = shared.listfiles(ii_input_dir) - print(f"Will process {len(images)} images.") - if ii_output_dir != "": - os.makedirs(ii_output_dir, exist_ok=True) - else: - ii_output_dir = ii_input_dir - - for image in images: - img = Image.open(image) - filename = os.path.basename(image) - left, _ = os.path.splitext(filename) - print(interrogation_function(img), file=open(os.path.join(ii_output_dir, f"{left}.txt"), 'a', encoding='utf-8')) - - return [gr.update(), None] - - -def interrogate(image): - prompt = shared.interrogator.interrogate(image.convert("RGB")) - return gr.update() if prompt is None else prompt - - -def interrogate_deepbooru(image): - prompt = deepbooru.model.tag(image) - return gr.update() if prompt is None else prompt - - -def create_seed_inputs(target_interface): - with FormRow(elem_id=f"{target_interface}_seed_row", variant="compact"): - seed = (gr.Textbox if cmd_opts.use_textbox_seed else gr.Number)(label='Seed', value=-1, elem_id=f"{target_interface}_seed") - seed.style(container=False) - random_seed = ToolButton(random_symbol, elem_id=f"{target_interface}_random_seed", label='Random seed') - reuse_seed = ToolButton(reuse_symbol, elem_id=f"{target_interface}_reuse_seed", label='Reuse seed') - - seed_checkbox = gr.Checkbox(label='Extra', elem_id=f"{target_interface}_subseed_show", value=False) - - # Components to show/hide based on the 'Extra' checkbox - seed_extras = [] - - with FormRow(visible=False, elem_id=f"{target_interface}_subseed_row") as seed_extra_row_1: - seed_extras.append(seed_extra_row_1) - subseed = gr.Number(label='Variation seed', value=-1, elem_id=f"{target_interface}_subseed") - subseed.style(container=False) - random_subseed = ToolButton(random_symbol, elem_id=f"{target_interface}_random_subseed") - reuse_subseed = ToolButton(reuse_symbol, elem_id=f"{target_interface}_reuse_subseed") - subseed_strength = gr.Slider(label='Variation strength', value=0.0, minimum=0, maximum=1, step=0.01, elem_id=f"{target_interface}_subseed_strength") - - with FormRow(visible=False) as seed_extra_row_2: - seed_extras.append(seed_extra_row_2) - seed_resize_from_w = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize seed from width", value=0, elem_id=f"{target_interface}_seed_resize_from_w") - seed_resize_from_h = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize seed from height", value=0, elem_id=f"{target_interface}_seed_resize_from_h") - - random_seed.click(fn=None, _js="function(){setRandomSeed('" + target_interface + "_seed')}", show_progress=False, inputs=[], outputs=[]) - random_subseed.click(fn=None, _js="function(){setRandomSeed('" + target_interface + "_subseed')}", show_progress=False, inputs=[], outputs=[]) - - def change_visibility(show): - return {comp: gr_show(show) for comp in seed_extras} - - seed_checkbox.change(change_visibility, show_progress=False, inputs=[seed_checkbox], outputs=seed_extras) - - return seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox - - - -def connect_clear_prompt(button): - """Given clear button, prompt, and token_counter objects, setup clear prompt button click event""" - button.click( - _js="clear_prompt", - fn=None, - inputs=[], - outputs=[], - ) - - -def connect_reuse_seed(seed: gr.Number, reuse_seed: gr.Button, generation_info: gr.Textbox, dummy_component, is_subseed): - """ Connects a 'reuse (sub)seed' button's click event so that it copies last used - (sub)seed value from generation info the to the seed field. If copying subseed and subseed strength - was 0, i.e. no variation seed was used, it copies the normal seed value instead.""" - def copy_seed(gen_info_string: str, index): - res = -1 - - try: - gen_info = json.loads(gen_info_string) - index -= gen_info.get('index_of_first_image', 0) - - if is_subseed and gen_info.get('subseed_strength', 0) > 0: - all_subseeds = gen_info.get('all_subseeds', [-1]) - res = all_subseeds[index if 0 <= index < len(all_subseeds) else 0] - else: - all_seeds = gen_info.get('all_seeds', [-1]) - res = all_seeds[index if 0 <= index < len(all_seeds) else 0] - - except json.decoder.JSONDecodeError: - if gen_info_string: - errors.report(f"Error parsing JSON generation info: {gen_info_string}") - - return [res, gr_show(False)] - - reuse_seed.click( - fn=copy_seed, - _js="(x, y) => [x, selected_gallery_index()]", - show_progress=False, - inputs=[generation_info, dummy_component], - outputs=[seed, dummy_component] - ) - - -def update_token_counter(text, steps): - try: - text, _ = extra_networks.parse_prompt(text) - - _, prompt_flat_list, _ = prompt_parser.get_multicond_prompt_list([text]) - prompt_schedules = prompt_parser.get_learned_conditioning_prompt_schedules(prompt_flat_list, steps) - - except Exception: - # a parsing error can happen here during typing, and we don't want to bother the user with - # messages related to it in console - prompt_schedules = [[[steps, text]]] - - flat_prompts = reduce(lambda list1, list2: list1+list2, prompt_schedules) - prompts = [prompt_text for step, prompt_text in flat_prompts] - token_count, max_length = max([model_hijack.get_prompt_lengths(prompt) for prompt in prompts], key=lambda args: args[0]) - return f"{token_count}/{max_length}" - - -def create_toprow(is_img2img): - id_part = "img2img" if is_img2img else "txt2img" - - with gr.Row(elem_id=f"{id_part}_toprow", variant="compact"): - with gr.Column(elem_id=f"{id_part}_prompt_container", scale=6): - with gr.Row(): - with gr.Column(scale=80): - with gr.Row(): - prompt = gr.Textbox(label="Prompt", elem_id=f"{id_part}_prompt", show_label=False, lines=3, placeholder="Prompt (press Ctrl+Enter or Alt+Enter to generate)", elem_classes=["prompt"]) - - with gr.Row(): - with gr.Column(scale=80): - with gr.Row(): - negative_prompt = gr.Textbox(label="Negative prompt", elem_id=f"{id_part}_neg_prompt", show_label=False, lines=3, placeholder="Negative prompt (press Ctrl+Enter or Alt+Enter to generate)", elem_classes=["prompt"]) - - button_interrogate = None - button_deepbooru = None - if is_img2img: - with gr.Column(scale=1, elem_classes="interrogate-col"): - button_interrogate = gr.Button('Interrogate\nCLIP', elem_id="interrogate") - button_deepbooru = gr.Button('Interrogate\nDeepBooru', elem_id="deepbooru") - - with gr.Column(scale=1, elem_id=f"{id_part}_actions_column"): - with gr.Row(elem_id=f"{id_part}_generate_box", elem_classes="generate-box"): - interrupt = gr.Button('Interrupt', elem_id=f"{id_part}_interrupt", elem_classes="generate-box-interrupt") - skip = gr.Button('Skip', elem_id=f"{id_part}_skip", elem_classes="generate-box-skip") - submit = gr.Button('Generate', elem_id=f"{id_part}_generate", variant='primary') - - skip.click( - fn=lambda: shared.state.skip(), - inputs=[], - outputs=[], - ) - - interrupt.click( - fn=lambda: shared.state.interrupt(), - inputs=[], - outputs=[], - ) - - with gr.Row(elem_id=f"{id_part}_tools"): - paste = ToolButton(value=paste_symbol, elem_id="paste") - clear_prompt_button = ToolButton(value=clear_prompt_symbol, elem_id=f"{id_part}_clear_prompt") - extra_networks_button = ToolButton(value=extra_networks_symbol, elem_id=f"{id_part}_extra_networks") - prompt_style_apply = ToolButton(value=apply_style_symbol, elem_id=f"{id_part}_style_apply") - save_style = ToolButton(value=save_style_symbol, elem_id=f"{id_part}_style_create") - restore_progress_button = ToolButton(value=restore_progress_symbol, elem_id=f"{id_part}_restore_progress", visible=False) - - token_counter = gr.HTML(value="0/75", elem_id=f"{id_part}_token_counter", elem_classes=["token-counter"]) - token_button = gr.Button(visible=False, elem_id=f"{id_part}_token_button") - negative_token_counter = gr.HTML(value="0/75", elem_id=f"{id_part}_negative_token_counter", elem_classes=["token-counter"]) - negative_token_button = gr.Button(visible=False, elem_id=f"{id_part}_negative_token_button") - - clear_prompt_button.click( - fn=lambda *x: x, - _js="confirm_clear_prompt", - inputs=[prompt, negative_prompt], - outputs=[prompt, negative_prompt], - ) - - with gr.Row(elem_id=f"{id_part}_styles_row"): - prompt_styles = gr.Dropdown(label="Styles", elem_id=f"{id_part}_styles", choices=[k for k, v in shared.prompt_styles.styles.items()], value=[], multiselect=True) - create_refresh_button(prompt_styles, shared.prompt_styles.reload, lambda: {"choices": [k for k, v in shared.prompt_styles.styles.items()]}, f"refresh_{id_part}_styles") - - return prompt, prompt_styles, negative_prompt, submit, button_interrogate, button_deepbooru, prompt_style_apply, save_style, paste, extra_networks_button, token_counter, token_button, negative_token_counter, negative_token_button, restore_progress_button - - -def setup_progressbar(*args, **kwargs): - pass - - -def apply_setting(key, value): - if value is None: - return gr.update() - - if shared.cmd_opts.freeze_settings: - return gr.update() - - # dont allow model to be swapped when model hash exists in prompt - if key == "sd_model_checkpoint" and opts.disable_weights_auto_swap: - return gr.update() - - if key == "sd_model_checkpoint": - ckpt_info = sd_models.get_closet_checkpoint_match(value) - - if ckpt_info is not None: - value = ckpt_info.title - else: - return gr.update() - - comp_args = opts.data_labels[key].component_args - if comp_args and isinstance(comp_args, dict) and comp_args.get('visible') is False: - return - - valtype = type(opts.data_labels[key].default) - oldval = opts.data.get(key, None) - opts.data[key] = valtype(value) if valtype != type(None) else value - if oldval != value and opts.data_labels[key].onchange is not None: - opts.data_labels[key].onchange() - - opts.save(shared.config_filename) - return getattr(opts, key) - - -def create_output_panel(tabname, outdir): - return ui_common.create_output_panel(tabname, outdir) - - -def create_sampler_and_steps_selection(choices, tabname): - if opts.samplers_in_dropdown: - with FormRow(elem_id=f"sampler_selection_{tabname}"): - sampler_index = gr.Dropdown(label='Sampling method', elem_id=f"{tabname}_sampling", choices=[x.name for x in choices], value=choices[0].name, type="index") - steps = gr.Slider(minimum=1, maximum=150, step=1, elem_id=f"{tabname}_steps", label="Sampling steps", value=20) - else: - with FormGroup(elem_id=f"sampler_selection_{tabname}"): - steps = gr.Slider(minimum=1, maximum=150, step=1, elem_id=f"{tabname}_steps", label="Sampling steps", value=20) - sampler_index = gr.Radio(label='Sampling method', elem_id=f"{tabname}_sampling", choices=[x.name for x in choices], value=choices[0].name, type="index") - - return steps, sampler_index - - -def ordered_ui_categories(): - user_order = {x.strip(): i * 2 + 1 for i, x in enumerate(shared.opts.ui_reorder_list)} - - for _, category in sorted(enumerate(shared_items.ui_reorder_categories()), key=lambda x: user_order.get(x[1], x[0] * 2 + 0)): - yield category - - -def create_override_settings_dropdown(tabname, row): - dropdown = gr.Dropdown([], label="Override settings", visible=False, elem_id=f"{tabname}_override_settings", multiselect=True) - - dropdown.change( - fn=lambda x: gr.Dropdown.update(visible=bool(x)), - inputs=[dropdown], - outputs=[dropdown], - ) - - return dropdown - - -def create_ui(): - import modules.img2img - import modules.txt2img - - reload_javascript() - - parameters_copypaste.reset() - - modules.scripts.scripts_current = modules.scripts.scripts_txt2img - modules.scripts.scripts_txt2img.initialize_scripts(is_img2img=False) - - with gr.Blocks(analytics_enabled=False) as txt2img_interface: - txt2img_prompt, txt2img_prompt_styles, txt2img_negative_prompt, submit, _, _, txt2img_prompt_style_apply, txt2img_save_style, txt2img_paste, extra_networks_button, token_counter, token_button, negative_token_counter, negative_token_button, restore_progress_button = create_toprow(is_img2img=False) - - dummy_component = gr.Label(visible=False) - txt_prompt_img = gr.File(label="", elem_id="txt2img_prompt_image", file_count="single", type="binary", visible=False) - - with FormRow(variant='compact', elem_id="txt2img_extra_networks", visible=False) as extra_networks: - from modules import ui_extra_networks - extra_networks_ui = ui_extra_networks.create_ui(extra_networks, extra_networks_button, 'txt2img') - - with gr.Row().style(equal_height=False): - with gr.Column(variant='compact', elem_id="txt2img_settings"): - modules.scripts.scripts_txt2img.prepare_ui() - - for category in ordered_ui_categories(): - if category == "sampler": - steps, sampler_index = create_sampler_and_steps_selection(samplers, "txt2img") - - elif category == "dimensions": - with FormRow(): - with gr.Column(elem_id="txt2img_column_size", scale=4): - width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="txt2img_width") - height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="txt2img_height") - - with gr.Column(elem_id="txt2img_dimensions_row", scale=1, elem_classes="dimensions-tools"): - res_switch_btn = ToolButton(value=switch_values_symbol, elem_id="txt2img_res_switch_btn", label="Switch dims") - - if opts.dimensions_and_batch_together: - with gr.Column(elem_id="txt2img_column_batch"): - batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="txt2img_batch_count") - batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="txt2img_batch_size") - - elif category == "cfg": - cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0, elem_id="txt2img_cfg_scale") - - elif category == "seed": - seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox = create_seed_inputs('txt2img') - - elif category == "checkboxes": - with FormRow(elem_classes="checkboxes-row", variant="compact"): - restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1, elem_id="txt2img_restore_faces") - tiling = gr.Checkbox(label='Tiling', value=False, elem_id="txt2img_tiling") - enable_hr = gr.Checkbox(label='Hires. fix', value=False, elem_id="txt2img_enable_hr") - hr_final_resolution = FormHTML(value="", elem_id="txtimg_hr_finalres", label="Upscaled resolution", interactive=False) - - elif category == "hires_fix": - with FormGroup(visible=False, elem_id="txt2img_hires_fix") as hr_options: - with FormRow(elem_id="txt2img_hires_fix_row1", variant="compact"): - hr_upscaler = gr.Dropdown(label="Upscaler", elem_id="txt2img_hr_upscaler", choices=[*shared.latent_upscale_modes, *[x.name for x in shared.sd_upscalers]], value=shared.latent_upscale_default_mode) - hr_second_pass_steps = gr.Slider(minimum=0, maximum=150, step=1, label='Hires steps', value=0, elem_id="txt2img_hires_steps") - denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.7, elem_id="txt2img_denoising_strength") - - with FormRow(elem_id="txt2img_hires_fix_row2", variant="compact"): - hr_scale = gr.Slider(minimum=1.0, maximum=4.0, step=0.05, label="Upscale by", value=2.0, elem_id="txt2img_hr_scale") - hr_resize_x = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize width to", value=0, elem_id="txt2img_hr_resize_x") - hr_resize_y = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize height to", value=0, elem_id="txt2img_hr_resize_y") - - with FormRow(elem_id="txt2img_hires_fix_row3", variant="compact", visible=opts.hires_fix_show_sampler) as hr_sampler_container: - hr_sampler_index = gr.Dropdown(label='Hires sampling method', elem_id="hr_sampler", choices=["Use same sampler"] + [x.name for x in samplers_for_img2img], value="Use same sampler", type="index") - - with FormRow(elem_id="txt2img_hires_fix_row4", variant="compact", visible=opts.hires_fix_show_prompts) as hr_prompts_container: - with gr.Column(scale=80): - with gr.Row(): - hr_prompt = gr.Textbox(label="Hires prompt", elem_id="hires_prompt", show_label=False, lines=3, placeholder="Prompt for hires fix pass.\nLeave empty to use the same prompt as in first pass.", elem_classes=["prompt"]) - with gr.Column(scale=80): - with gr.Row(): - hr_negative_prompt = gr.Textbox(label="Hires negative prompt", elem_id="hires_neg_prompt", show_label=False, lines=3, placeholder="Negative prompt for hires fix pass.\nLeave empty to use the same negative prompt as in first pass.", elem_classes=["prompt"]) - - elif category == "batch": - if not opts.dimensions_and_batch_together: - with FormRow(elem_id="txt2img_column_batch"): - batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="txt2img_batch_count") - batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="txt2img_batch_size") - - elif category == "override_settings": - with FormRow(elem_id="txt2img_override_settings_row") as row: - override_settings = create_override_settings_dropdown('txt2img', row) - - elif category == "scripts": - with FormGroup(elem_id="txt2img_script_container"): - custom_inputs = modules.scripts.scripts_txt2img.setup_ui() - - else: - modules.scripts.scripts_txt2img.setup_ui_for_section(category) - - hr_resolution_preview_inputs = [enable_hr, width, height, hr_scale, hr_resize_x, hr_resize_y] - - for component in hr_resolution_preview_inputs: - event = component.release if isinstance(component, gr.Slider) else component.change - - event( - fn=calc_resolution_hires, - inputs=hr_resolution_preview_inputs, - outputs=[hr_final_resolution], - show_progress=False, - ) - event( - None, - _js="onCalcResolutionHires", - inputs=hr_resolution_preview_inputs, - outputs=[], - show_progress=False, - ) - - txt2img_gallery, generation_info, html_info, html_log = create_output_panel("txt2img", opts.outdir_txt2img_samples) - - connect_reuse_seed(seed, reuse_seed, generation_info, dummy_component, is_subseed=False) - connect_reuse_seed(subseed, reuse_subseed, generation_info, dummy_component, is_subseed=True) - - txt2img_args = dict( - fn=wrap_gradio_gpu_call(modules.txt2img.txt2img, extra_outputs=[None, '', '']), - _js="submit", - inputs=[ - dummy_component, - txt2img_prompt, - txt2img_negative_prompt, - txt2img_prompt_styles, - steps, - sampler_index, - restore_faces, - tiling, - batch_count, - batch_size, - cfg_scale, - seed, - subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox, - height, - width, - enable_hr, - denoising_strength, - hr_scale, - hr_upscaler, - hr_second_pass_steps, - hr_resize_x, - hr_resize_y, - hr_sampler_index, - hr_prompt, - hr_negative_prompt, - override_settings, - - ] + custom_inputs, - - outputs=[ - txt2img_gallery, - generation_info, - html_info, - html_log, - ], - show_progress=False, - ) - - txt2img_prompt.submit(**txt2img_args) - submit.click(**txt2img_args) - - res_switch_btn.click(fn=None, _js="function(){switchWidthHeight('txt2img')}", inputs=None, outputs=None, show_progress=False) - - restore_progress_button.click( - fn=progress.restore_progress, - _js="restoreProgressTxt2img", - inputs=[dummy_component], - outputs=[ - txt2img_gallery, - generation_info, - html_info, - html_log, - ], - show_progress=False, - ) - - txt_prompt_img.change( - fn=modules.images.image_data, - inputs=[ - txt_prompt_img - ], - outputs=[ - txt2img_prompt, - txt_prompt_img - ], - show_progress=False, - ) - - enable_hr.change( - fn=lambda x: gr_show(x), - inputs=[enable_hr], - outputs=[hr_options], - show_progress = False, - ) - - txt2img_paste_fields = [ - (txt2img_prompt, "Prompt"), - (txt2img_negative_prompt, "Negative prompt"), - (steps, "Steps"), - (sampler_index, "Sampler"), - (restore_faces, "Face restoration"), - (cfg_scale, "CFG scale"), - (seed, "Seed"), - (width, "Size-1"), - (height, "Size-2"), - (batch_size, "Batch size"), - (subseed, "Variation seed"), - (subseed_strength, "Variation seed strength"), - (seed_resize_from_w, "Seed resize from-1"), - (seed_resize_from_h, "Seed resize from-2"), - (txt2img_prompt_styles, lambda d: d["Styles array"] if isinstance(d.get("Styles array"), list) else gr.update()), - (denoising_strength, "Denoising strength"), - (enable_hr, lambda d: "Denoising strength" in d), - (hr_options, lambda d: gr.Row.update(visible="Denoising strength" in d)), - (hr_scale, "Hires upscale"), - (hr_upscaler, "Hires upscaler"), - (hr_second_pass_steps, "Hires steps"), - (hr_resize_x, "Hires resize-1"), - (hr_resize_y, "Hires resize-2"), - (hr_sampler_index, "Hires sampler"), - (hr_sampler_container, lambda d: gr.update(visible=True) if d.get("Hires sampler", "Use same sampler") != "Use same sampler" else gr.update()), - (hr_prompt, "Hires prompt"), - (hr_negative_prompt, "Hires negative prompt"), - (hr_prompts_container, lambda d: gr.update(visible=True) if d.get("Hires prompt", "") != "" or d.get("Hires negative prompt", "") != "" else gr.update()), - *modules.scripts.scripts_txt2img.infotext_fields - ] - parameters_copypaste.add_paste_fields("txt2img", None, txt2img_paste_fields, override_settings) - parameters_copypaste.register_paste_params_button(parameters_copypaste.ParamBinding( - paste_button=txt2img_paste, tabname="txt2img", source_text_component=txt2img_prompt, source_image_component=None, - )) - - txt2img_preview_params = [ - txt2img_prompt, - txt2img_negative_prompt, - steps, - sampler_index, - cfg_scale, - seed, - width, - height, - ] - - token_button.click(fn=wrap_queued_call(update_token_counter), inputs=[txt2img_prompt, steps], outputs=[token_counter]) - negative_token_button.click(fn=wrap_queued_call(update_token_counter), inputs=[txt2img_negative_prompt, steps], outputs=[negative_token_counter]) - - ui_extra_networks.setup_ui(extra_networks_ui, txt2img_gallery) - - modules.scripts.scripts_current = modules.scripts.scripts_img2img - modules.scripts.scripts_img2img.initialize_scripts(is_img2img=True) - - with gr.Blocks(analytics_enabled=False) as img2img_interface: - img2img_prompt, img2img_prompt_styles, img2img_negative_prompt, submit, img2img_interrogate, img2img_deepbooru, img2img_prompt_style_apply, img2img_save_style, img2img_paste, extra_networks_button, token_counter, token_button, negative_token_counter, negative_token_button, restore_progress_button = create_toprow(is_img2img=True) - - img2img_prompt_img = gr.File(label="", elem_id="img2img_prompt_image", file_count="single", type="binary", visible=False) - - with FormRow(variant='compact', elem_id="img2img_extra_networks", visible=False) as extra_networks: - from modules import ui_extra_networks - extra_networks_ui_img2img = ui_extra_networks.create_ui(extra_networks, extra_networks_button, 'img2img') - - with FormRow().style(equal_height=False): - with gr.Column(variant='compact', elem_id="img2img_settings"): - copy_image_buttons = [] - copy_image_destinations = {} - - def add_copy_image_controls(tab_name, elem): - with gr.Row(variant="compact", elem_id=f"img2img_copy_to_{tab_name}"): - gr.HTML("Copy image to: ", elem_id=f"img2img_label_copy_to_{tab_name}") - - for title, name in zip(['img2img', 'sketch', 'inpaint', 'inpaint sketch'], ['img2img', 'sketch', 'inpaint', 'inpaint_sketch']): - if name == tab_name: - gr.Button(title, interactive=False) - copy_image_destinations[name] = elem - continue - - button = gr.Button(title) - copy_image_buttons.append((button, name, elem)) - - with gr.Tabs(elem_id="mode_img2img"): - img2img_selected_tab = gr.State(0) - - with gr.TabItem('img2img', id='img2img', elem_id="img2img_img2img_tab") as tab_img2img: - init_img = gr.Image(label="Image for img2img", elem_id="img2img_image", show_label=False, source="upload", interactive=True, type="pil", tool="editor", image_mode="RGBA").style(height=opts.img2img_editor_height) - add_copy_image_controls('img2img', init_img) - - with gr.TabItem('Sketch', id='img2img_sketch', elem_id="img2img_img2img_sketch_tab") as tab_sketch: - sketch = gr.Image(label="Image for img2img", elem_id="img2img_sketch", show_label=False, source="upload", interactive=True, type="pil", tool="color-sketch", image_mode="RGBA").style(height=opts.img2img_editor_height) - add_copy_image_controls('sketch', sketch) - - with gr.TabItem('Inpaint', id='inpaint', elem_id="img2img_inpaint_tab") as tab_inpaint: - init_img_with_mask = gr.Image(label="Image for inpainting with mask", show_label=False, elem_id="img2maskimg", source="upload", interactive=True, type="pil", tool="sketch", image_mode="RGBA").style(height=opts.img2img_editor_height) - add_copy_image_controls('inpaint', init_img_with_mask) - - with gr.TabItem('Inpaint sketch', id='inpaint_sketch', elem_id="img2img_inpaint_sketch_tab") as tab_inpaint_color: - inpaint_color_sketch = gr.Image(label="Color sketch inpainting", show_label=False, elem_id="inpaint_sketch", source="upload", interactive=True, type="pil", tool="color-sketch", image_mode="RGBA").style(height=opts.img2img_editor_height) - inpaint_color_sketch_orig = gr.State(None) - add_copy_image_controls('inpaint_sketch', inpaint_color_sketch) - - def update_orig(image, state): - if image is not None: - same_size = state is not None and state.size == image.size - has_exact_match = np.any(np.all(np.array(image) == np.array(state), axis=-1)) - edited = same_size and has_exact_match - return image if not edited or state is None else state - - inpaint_color_sketch.change(update_orig, [inpaint_color_sketch, inpaint_color_sketch_orig], inpaint_color_sketch_orig) - - with gr.TabItem('Inpaint upload', id='inpaint_upload', elem_id="img2img_inpaint_upload_tab") as tab_inpaint_upload: - init_img_inpaint = gr.Image(label="Image for img2img", show_label=False, source="upload", interactive=True, type="pil", elem_id="img_inpaint_base") - init_mask_inpaint = gr.Image(label="Mask", source="upload", interactive=True, type="pil", elem_id="img_inpaint_mask") - - with gr.TabItem('Batch', id='batch', elem_id="img2img_batch_tab") as tab_batch: - hidden = '
Disabled when launched with --hide-ui-dir-config.' if shared.cmd_opts.hide_ui_dir_config else '' - gr.HTML( - "

Process images in a directory on the same machine where the server is running." + - "
Use an empty output directory to save pictures normally instead of writing to the output directory." + - f"
Add inpaint batch mask directory to enable inpaint batch processing." - f"{hidden}

" - ) - img2img_batch_input_dir = gr.Textbox(label="Input directory", **shared.hide_dirs, elem_id="img2img_batch_input_dir") - img2img_batch_output_dir = gr.Textbox(label="Output directory", **shared.hide_dirs, elem_id="img2img_batch_output_dir") - img2img_batch_inpaint_mask_dir = gr.Textbox(label="Inpaint batch mask directory (required for inpaint batch processing only)", **shared.hide_dirs, elem_id="img2img_batch_inpaint_mask_dir") - with gr.Accordion("PNG info", open=False): - img2img_batch_use_png_info = gr.Checkbox(label="Append png info to prompts", **shared.hide_dirs, elem_id="img2img_batch_use_png_info") - img2img_batch_png_info_dir = gr.Textbox(label="PNG info directory", **shared.hide_dirs, placeholder="Leave empty to use input directory", elem_id="img2img_batch_png_info_dir") - img2img_batch_png_info_props = gr.CheckboxGroup(["Prompt", "Negative prompt", "Seed", "CFG scale", "Sampler", "Steps"], label="Parameters to take from png info", info="Prompts from png info will be appended to prompts set in ui.") - - img2img_tabs = [tab_img2img, tab_sketch, tab_inpaint, tab_inpaint_color, tab_inpaint_upload, tab_batch] - - for i, tab in enumerate(img2img_tabs): - tab.select(fn=lambda tabnum=i: tabnum, inputs=[], outputs=[img2img_selected_tab]) - - def copy_image(img): - if isinstance(img, dict) and 'image' in img: - return img['image'] - - return img - - for button, name, elem in copy_image_buttons: - button.click( - fn=copy_image, - inputs=[elem], - outputs=[copy_image_destinations[name]], - ) - button.click( - fn=lambda: None, - _js=f"switch_to_{name.replace(' ', '_')}", - inputs=[], - outputs=[], - ) - - with FormRow(): - resize_mode = gr.Radio(label="Resize mode", elem_id="resize_mode", choices=["Just resize", "Crop and resize", "Resize and fill", "Just resize (latent upscale)"], type="index", value="Just resize") - - modules.scripts.scripts_img2img.prepare_ui() - - for category in ordered_ui_categories(): - if category == "sampler": - steps, sampler_index = create_sampler_and_steps_selection(samplers_for_img2img, "img2img") - - elif category == "dimensions": - with FormRow(): - with gr.Column(elem_id="img2img_column_size", scale=4): - selected_scale_tab = gr.State(value=0) - - with gr.Tabs(): - with gr.Tab(label="Resize to", elem_id="img2img_tab_resize_to") as tab_scale_to: - with FormRow(): - with gr.Column(elem_id="img2img_column_size", scale=4): - width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="img2img_width") - height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="img2img_height") - with gr.Column(elem_id="img2img_dimensions_row", scale=1, elem_classes="dimensions-tools"): - res_switch_btn = ToolButton(value=switch_values_symbol, elem_id="img2img_res_switch_btn") - detect_image_size_btn = ToolButton(value=detect_image_size_symbol, elem_id="img2img_detect_image_size_btn") - - with gr.Tab(label="Resize by", elem_id="img2img_tab_resize_by") as tab_scale_by: - scale_by = gr.Slider(minimum=0.05, maximum=4.0, step=0.05, label="Scale", value=1.0, elem_id="img2img_scale") - - with FormRow(): - scale_by_html = FormHTML(resize_from_to_html(0, 0, 0.0), elem_id="img2img_scale_resolution_preview") - gr.Slider(label="Unused", elem_id="img2img_unused_scale_by_slider") - button_update_resize_to = gr.Button(visible=False, elem_id="img2img_update_resize_to") - - on_change_args = dict( - fn=resize_from_to_html, - _js="currentImg2imgSourceResolution", - inputs=[dummy_component, dummy_component, scale_by], - outputs=scale_by_html, - show_progress=False, - ) - - scale_by.release(**on_change_args) - button_update_resize_to.click(**on_change_args) - - # the code below is meant to update the resolution label after the image in the image selection UI has changed. - # as it is now the event keeps firing continuously for inpaint edits, which ruins the page with constant requests. - # I assume this must be a gradio bug and for now we'll just do it for non-inpaint inputs. - for component in [init_img, sketch]: - component.change(fn=lambda: None, _js="updateImg2imgResizeToTextAfterChangingImage", inputs=[], outputs=[], show_progress=False) - - tab_scale_to.select(fn=lambda: 0, inputs=[], outputs=[selected_scale_tab]) - tab_scale_by.select(fn=lambda: 1, inputs=[], outputs=[selected_scale_tab]) - - if opts.dimensions_and_batch_together: - with gr.Column(elem_id="img2img_column_batch"): - batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="img2img_batch_count") - batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="img2img_batch_size") - - elif category == "cfg": - with FormGroup(): - with FormRow(): - cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0, elem_id="img2img_cfg_scale") - image_cfg_scale = gr.Slider(minimum=0, maximum=3.0, step=0.05, label='Image CFG Scale', value=1.5, elem_id="img2img_image_cfg_scale", visible=False) - denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.75, elem_id="img2img_denoising_strength") - - elif category == "seed": - seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox = create_seed_inputs('img2img') - - elif category == "checkboxes": - with FormRow(elem_classes="checkboxes-row", variant="compact"): - restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1, elem_id="img2img_restore_faces") - tiling = gr.Checkbox(label='Tiling', value=False, elem_id="img2img_tiling") - - elif category == "batch": - if not opts.dimensions_and_batch_together: - with FormRow(elem_id="img2img_column_batch"): - batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="img2img_batch_count") - batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="img2img_batch_size") - - elif category == "override_settings": - with FormRow(elem_id="img2img_override_settings_row") as row: - override_settings = create_override_settings_dropdown('img2img', row) - - elif category == "scripts": - with FormGroup(elem_id="img2img_script_container"): - custom_inputs = modules.scripts.scripts_img2img.setup_ui() - - elif category == "inpaint": - with FormGroup(elem_id="inpaint_controls", visible=False) as inpaint_controls: - with FormRow(): - mask_blur = gr.Slider(label='Mask blur', minimum=0, maximum=64, step=1, value=4, elem_id="img2img_mask_blur") - mask_alpha = gr.Slider(label="Mask transparency", visible=False, elem_id="img2img_mask_alpha") - - with FormRow(): - inpainting_mask_invert = gr.Radio(label='Mask mode', choices=['Inpaint masked', 'Inpaint not masked'], value='Inpaint masked', type="index", elem_id="img2img_mask_mode") - - with FormRow(): - inpainting_fill = gr.Radio(label='Masked content', choices=['fill', 'original', 'latent noise', 'latent nothing'], value='original', type="index", elem_id="img2img_inpainting_fill") - - with FormRow(): - with gr.Column(): - inpaint_full_res = gr.Radio(label="Inpaint area", choices=["Whole picture", "Only masked"], type="index", value="Whole picture", elem_id="img2img_inpaint_full_res") - - with gr.Column(scale=4): - inpaint_full_res_padding = gr.Slider(label='Only masked padding, pixels', minimum=0, maximum=256, step=4, value=32, elem_id="img2img_inpaint_full_res_padding") - - def select_img2img_tab(tab): - return gr.update(visible=tab in [2, 3, 4]), gr.update(visible=tab == 3), - - for i, elem in enumerate(img2img_tabs): - elem.select( - fn=lambda tab=i: select_img2img_tab(tab), - inputs=[], - outputs=[inpaint_controls, mask_alpha], - ) - else: - modules.scripts.scripts_img2img.setup_ui_for_section(category) - - img2img_gallery, generation_info, html_info, html_log = create_output_panel("img2img", opts.outdir_img2img_samples) - - connect_reuse_seed(seed, reuse_seed, generation_info, dummy_component, is_subseed=False) - connect_reuse_seed(subseed, reuse_subseed, generation_info, dummy_component, is_subseed=True) - - img2img_prompt_img.change( - fn=modules.images.image_data, - inputs=[ - img2img_prompt_img - ], - outputs=[ - img2img_prompt, - img2img_prompt_img - ], - show_progress=False, - ) - - img2img_args = dict( - fn=wrap_gradio_gpu_call(modules.img2img.img2img, extra_outputs=[None, '', '']), - _js="submit_img2img", - inputs=[ - dummy_component, - dummy_component, - img2img_prompt, - img2img_negative_prompt, - img2img_prompt_styles, - init_img, - sketch, - init_img_with_mask, - inpaint_color_sketch, - inpaint_color_sketch_orig, - init_img_inpaint, - init_mask_inpaint, - steps, - sampler_index, - mask_blur, - mask_alpha, - inpainting_fill, - restore_faces, - tiling, - batch_count, - batch_size, - cfg_scale, - image_cfg_scale, - denoising_strength, - seed, - subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox, - selected_scale_tab, - height, - width, - scale_by, - resize_mode, - inpaint_full_res, - inpaint_full_res_padding, - inpainting_mask_invert, - img2img_batch_input_dir, - img2img_batch_output_dir, - img2img_batch_inpaint_mask_dir, - override_settings, - img2img_batch_use_png_info, - img2img_batch_png_info_props, - img2img_batch_png_info_dir, - ] + custom_inputs, - outputs=[ - img2img_gallery, - generation_info, - html_info, - html_log, - ], - show_progress=False, - ) - - interrogate_args = dict( - _js="get_img2img_tab_index", - inputs=[ - dummy_component, - img2img_batch_input_dir, - img2img_batch_output_dir, - init_img, - sketch, - init_img_with_mask, - inpaint_color_sketch, - init_img_inpaint, - ], - outputs=[img2img_prompt, dummy_component], - ) - - img2img_prompt.submit(**img2img_args) - submit.click(**img2img_args) - - res_switch_btn.click(fn=None, _js="function(){switchWidthHeight('img2img')}", inputs=None, outputs=None, show_progress=False) - - detect_image_size_btn.click( - fn=lambda w, h, _: (w or gr.update(), h or gr.update()), - _js="currentImg2imgSourceResolution", - inputs=[dummy_component, dummy_component, dummy_component], - outputs=[width, height], - show_progress=False, - ) - - restore_progress_button.click( - fn=progress.restore_progress, - _js="restoreProgressImg2img", - inputs=[dummy_component], - outputs=[ - img2img_gallery, - generation_info, - html_info, - html_log, - ], - show_progress=False, - ) - - img2img_interrogate.click( - fn=lambda *args: process_interrogate(interrogate, *args), - **interrogate_args, - ) - - img2img_deepbooru.click( - fn=lambda *args: process_interrogate(interrogate_deepbooru, *args), - **interrogate_args, - ) - - prompts = [(txt2img_prompt, txt2img_negative_prompt), (img2img_prompt, img2img_negative_prompt)] - style_dropdowns = [txt2img_prompt_styles, img2img_prompt_styles] - style_js_funcs = ["update_txt2img_tokens", "update_img2img_tokens"] - - for button, (prompt, negative_prompt) in zip([txt2img_save_style, img2img_save_style], prompts): - button.click( - fn=add_style, - _js="ask_for_style_name", - # Have to pass empty dummy component here, because the JavaScript and Python function have to accept - # the same number of parameters, but we only know the style-name after the JavaScript prompt - inputs=[dummy_component, prompt, negative_prompt], - outputs=[txt2img_prompt_styles, img2img_prompt_styles], - ) - - for button, (prompt, negative_prompt), styles, js_func in zip([txt2img_prompt_style_apply, img2img_prompt_style_apply], prompts, style_dropdowns, style_js_funcs): - button.click( - fn=apply_styles, - _js=js_func, - inputs=[prompt, negative_prompt, styles], - outputs=[prompt, negative_prompt, styles], - ) - - token_button.click(fn=update_token_counter, inputs=[img2img_prompt, steps], outputs=[token_counter]) - negative_token_button.click(fn=wrap_queued_call(update_token_counter), inputs=[img2img_negative_prompt, steps], outputs=[negative_token_counter]) - - ui_extra_networks.setup_ui(extra_networks_ui_img2img, img2img_gallery) - - img2img_paste_fields = [ - (img2img_prompt, "Prompt"), - (img2img_negative_prompt, "Negative prompt"), - (steps, "Steps"), - (sampler_index, "Sampler"), - (restore_faces, "Face restoration"), - (cfg_scale, "CFG scale"), - (image_cfg_scale, "Image CFG scale"), - (seed, "Seed"), - (width, "Size-1"), - (height, "Size-2"), - (batch_size, "Batch size"), - (subseed, "Variation seed"), - (subseed_strength, "Variation seed strength"), - (seed_resize_from_w, "Seed resize from-1"), - (seed_resize_from_h, "Seed resize from-2"), - (img2img_prompt_styles, lambda d: d["Styles array"] if isinstance(d.get("Styles array"), list) else gr.update()), - (denoising_strength, "Denoising strength"), - (mask_blur, "Mask blur"), - *modules.scripts.scripts_img2img.infotext_fields - ] - parameters_copypaste.add_paste_fields("img2img", init_img, img2img_paste_fields, override_settings) - parameters_copypaste.add_paste_fields("inpaint", init_img_with_mask, img2img_paste_fields, override_settings) - parameters_copypaste.register_paste_params_button(parameters_copypaste.ParamBinding( - paste_button=img2img_paste, tabname="img2img", source_text_component=img2img_prompt, source_image_component=None, - )) - - modules.scripts.scripts_current = None - - with gr.Blocks(analytics_enabled=False) as extras_interface: - ui_postprocessing.create_ui() - - with gr.Blocks(analytics_enabled=False) as pnginfo_interface: - with gr.Row().style(equal_height=False): - with gr.Column(variant='panel'): - image = gr.Image(elem_id="pnginfo_image", label="Source", source="upload", interactive=True, type="pil") - - with gr.Column(variant='panel'): - html = gr.HTML() - generation_info = gr.Textbox(visible=False, elem_id="pnginfo_generation_info") - html2 = gr.HTML() - with gr.Row(): - buttons = parameters_copypaste.create_buttons(["txt2img", "img2img", "inpaint", "extras"]) - - for tabname, button in buttons.items(): - parameters_copypaste.register_paste_params_button(parameters_copypaste.ParamBinding( - paste_button=button, tabname=tabname, source_text_component=generation_info, source_image_component=image, - )) - - image.change( - fn=wrap_gradio_call(modules.extras.run_pnginfo), - inputs=[image], - outputs=[html, generation_info, html2], - ) - - def update_interp_description(value): - interp_description_css = "

{}

" - interp_descriptions = { - "No interpolation": interp_description_css.format("No interpolation will be used. Requires one model; A. Allows for format conversion and VAE baking."), - "Weighted sum": interp_description_css.format("A weighted sum will be used for interpolation. Requires two models; A and B. The result is calculated as A * (1 - M) + B * M"), - "Add difference": interp_description_css.format("The difference between the last two models will be added to the first. Requires three models; A, B and C. The result is calculated as A + (B - C) * M") - } - return interp_descriptions[value] - - with gr.Blocks(analytics_enabled=False) as modelmerger_interface: - with gr.Row().style(equal_height=False): - with gr.Column(variant='compact'): - interp_description = gr.HTML(value=update_interp_description("Weighted sum"), elem_id="modelmerger_interp_description") - - with FormRow(elem_id="modelmerger_models"): - primary_model_name = gr.Dropdown(modules.sd_models.checkpoint_tiles(), elem_id="modelmerger_primary_model_name", label="Primary model (A)") - create_refresh_button(primary_model_name, modules.sd_models.list_models, lambda: {"choices": modules.sd_models.checkpoint_tiles()}, "refresh_checkpoint_A") - - secondary_model_name = gr.Dropdown(modules.sd_models.checkpoint_tiles(), elem_id="modelmerger_secondary_model_name", label="Secondary model (B)") - create_refresh_button(secondary_model_name, modules.sd_models.list_models, lambda: {"choices": modules.sd_models.checkpoint_tiles()}, "refresh_checkpoint_B") - - tertiary_model_name = gr.Dropdown(modules.sd_models.checkpoint_tiles(), elem_id="modelmerger_tertiary_model_name", label="Tertiary model (C)") - create_refresh_button(tertiary_model_name, modules.sd_models.list_models, lambda: {"choices": modules.sd_models.checkpoint_tiles()}, "refresh_checkpoint_C") - - custom_name = gr.Textbox(label="Custom Name (Optional)", elem_id="modelmerger_custom_name") - interp_amount = gr.Slider(minimum=0.0, maximum=1.0, step=0.05, label='Multiplier (M) - set to 0 to get model A', value=0.3, elem_id="modelmerger_interp_amount") - interp_method = gr.Radio(choices=["No interpolation", "Weighted sum", "Add difference"], value="Weighted sum", label="Interpolation Method", elem_id="modelmerger_interp_method") - interp_method.change(fn=update_interp_description, inputs=[interp_method], outputs=[interp_description]) - - with FormRow(): - checkpoint_format = gr.Radio(choices=["ckpt", "safetensors"], value="safetensors", label="Checkpoint format", elem_id="modelmerger_checkpoint_format") - save_as_half = gr.Checkbox(value=False, label="Save as float16", elem_id="modelmerger_save_as_half") - save_metadata = gr.Checkbox(value=True, label="Save metadata (.safetensors only)", elem_id="modelmerger_save_metadata") - - with FormRow(): - with gr.Column(): - config_source = gr.Radio(choices=["A, B or C", "B", "C", "Don't"], value="A, B or C", label="Copy config from", type="index", elem_id="modelmerger_config_method") - - with gr.Column(): - with FormRow(): - bake_in_vae = gr.Dropdown(choices=["None"] + list(sd_vae.vae_dict), value="None", label="Bake in VAE", elem_id="modelmerger_bake_in_vae") - create_refresh_button(bake_in_vae, sd_vae.refresh_vae_list, lambda: {"choices": ["None"] + list(sd_vae.vae_dict)}, "modelmerger_refresh_bake_in_vae") - - with FormRow(): - discard_weights = gr.Textbox(value="", label="Discard weights with matching name", elem_id="modelmerger_discard_weights") - - with gr.Row(): - modelmerger_merge = gr.Button(elem_id="modelmerger_merge", value="Merge", variant='primary') - - with gr.Column(variant='compact', elem_id="modelmerger_results_container"): - with gr.Group(elem_id="modelmerger_results_panel"): - modelmerger_result = gr.HTML(elem_id="modelmerger_result", show_label=False) - - with gr.Blocks(analytics_enabled=False) as train_interface: - with gr.Row().style(equal_height=False): - gr.HTML(value="

See wiki for detailed explanation.

") - - with gr.Row(variant="compact").style(equal_height=False): - with gr.Tabs(elem_id="train_tabs"): - - with gr.Tab(label="Create embedding", id="create_embedding"): - new_embedding_name = gr.Textbox(label="Name", elem_id="train_new_embedding_name") - initialization_text = gr.Textbox(label="Initialization text", value="*", elem_id="train_initialization_text") - nvpt = gr.Slider(label="Number of vectors per token", minimum=1, maximum=75, step=1, value=1, elem_id="train_nvpt") - overwrite_old_embedding = gr.Checkbox(value=False, label="Overwrite Old Embedding", elem_id="train_overwrite_old_embedding") - - with gr.Row(): - with gr.Column(scale=3): - gr.HTML(value="") - - with gr.Column(): - create_embedding = gr.Button(value="Create embedding", variant='primary', elem_id="train_create_embedding") - - with gr.Tab(label="Create hypernetwork", id="create_hypernetwork"): - new_hypernetwork_name = gr.Textbox(label="Name", elem_id="train_new_hypernetwork_name") - new_hypernetwork_sizes = gr.CheckboxGroup(label="Modules", value=["768", "320", "640", "1280"], choices=["768", "1024", "320", "640", "1280"], elem_id="train_new_hypernetwork_sizes") - new_hypernetwork_layer_structure = gr.Textbox("1, 2, 1", label="Enter hypernetwork layer structure", placeholder="1st and last digit must be 1. ex:'1, 2, 1'", elem_id="train_new_hypernetwork_layer_structure") - new_hypernetwork_activation_func = gr.Dropdown(value="linear", label="Select activation function of hypernetwork. Recommended : Swish / Linear(none)", choices=modules.hypernetworks.ui.keys, elem_id="train_new_hypernetwork_activation_func") - new_hypernetwork_initialization_option = gr.Dropdown(value = "Normal", label="Select Layer weights initialization. Recommended: Kaiming for relu-like, Xavier for sigmoid-like, Normal otherwise", choices=["Normal", "KaimingUniform", "KaimingNormal", "XavierUniform", "XavierNormal"], elem_id="train_new_hypernetwork_initialization_option") - new_hypernetwork_add_layer_norm = gr.Checkbox(label="Add layer normalization", elem_id="train_new_hypernetwork_add_layer_norm") - new_hypernetwork_use_dropout = gr.Checkbox(label="Use dropout", elem_id="train_new_hypernetwork_use_dropout") - new_hypernetwork_dropout_structure = gr.Textbox("0, 0, 0", label="Enter hypernetwork Dropout structure (or empty). Recommended : 0~0.35 incrementing sequence: 0, 0.05, 0.15", placeholder="1st and last digit must be 0 and values should be between 0 and 1. ex:'0, 0.01, 0'") - overwrite_old_hypernetwork = gr.Checkbox(value=False, label="Overwrite Old Hypernetwork", elem_id="train_overwrite_old_hypernetwork") - - with gr.Row(): - with gr.Column(scale=3): - gr.HTML(value="") - - with gr.Column(): - create_hypernetwork = gr.Button(value="Create hypernetwork", variant='primary', elem_id="train_create_hypernetwork") - - with gr.Tab(label="Preprocess images", id="preprocess_images"): - process_src = gr.Textbox(label='Source directory', elem_id="train_process_src") - process_dst = gr.Textbox(label='Destination directory', elem_id="train_process_dst") - process_width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="train_process_width") - process_height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="train_process_height") - preprocess_txt_action = gr.Dropdown(label='Existing Caption txt Action', value="ignore", choices=["ignore", "copy", "prepend", "append"], elem_id="train_preprocess_txt_action") - - with gr.Row(): - process_keep_original_size = gr.Checkbox(label='Keep original size', elem_id="train_process_keep_original_size") - process_flip = gr.Checkbox(label='Create flipped copies', elem_id="train_process_flip") - process_split = gr.Checkbox(label='Split oversized images', elem_id="train_process_split") - process_focal_crop = gr.Checkbox(label='Auto focal point crop', elem_id="train_process_focal_crop") - process_multicrop = gr.Checkbox(label='Auto-sized crop', elem_id="train_process_multicrop") - process_caption = gr.Checkbox(label='Use BLIP for caption', elem_id="train_process_caption") - process_caption_deepbooru = gr.Checkbox(label='Use deepbooru for caption', visible=True, elem_id="train_process_caption_deepbooru") - - with gr.Row(visible=False) as process_split_extra_row: - process_split_threshold = gr.Slider(label='Split image threshold', value=0.5, minimum=0.0, maximum=1.0, step=0.05, elem_id="train_process_split_threshold") - process_overlap_ratio = gr.Slider(label='Split image overlap ratio', value=0.2, minimum=0.0, maximum=0.9, step=0.05, elem_id="train_process_overlap_ratio") - - with gr.Row(visible=False) as process_focal_crop_row: - process_focal_crop_face_weight = gr.Slider(label='Focal point face weight', value=0.9, minimum=0.0, maximum=1.0, step=0.05, elem_id="train_process_focal_crop_face_weight") - process_focal_crop_entropy_weight = gr.Slider(label='Focal point entropy weight', value=0.15, minimum=0.0, maximum=1.0, step=0.05, elem_id="train_process_focal_crop_entropy_weight") - process_focal_crop_edges_weight = gr.Slider(label='Focal point edges weight', value=0.5, minimum=0.0, maximum=1.0, step=0.05, elem_id="train_process_focal_crop_edges_weight") - process_focal_crop_debug = gr.Checkbox(label='Create debug image', elem_id="train_process_focal_crop_debug") - - with gr.Column(visible=False) as process_multicrop_col: - gr.Markdown('Each image is center-cropped with an automatically chosen width and height.') - with gr.Row(): - process_multicrop_mindim = gr.Slider(minimum=64, maximum=2048, step=8, label="Dimension lower bound", value=384, elem_id="train_process_multicrop_mindim") - process_multicrop_maxdim = gr.Slider(minimum=64, maximum=2048, step=8, label="Dimension upper bound", value=768, elem_id="train_process_multicrop_maxdim") - with gr.Row(): - process_multicrop_minarea = gr.Slider(minimum=64*64, maximum=2048*2048, step=1, label="Area lower bound", value=64*64, elem_id="train_process_multicrop_minarea") - process_multicrop_maxarea = gr.Slider(minimum=64*64, maximum=2048*2048, step=1, label="Area upper bound", value=640*640, elem_id="train_process_multicrop_maxarea") - with gr.Row(): - process_multicrop_objective = gr.Radio(["Maximize area", "Minimize error"], value="Maximize area", label="Resizing objective", elem_id="train_process_multicrop_objective") - process_multicrop_threshold = gr.Slider(minimum=0, maximum=1, step=0.01, label="Error threshold", value=0.1, elem_id="train_process_multicrop_threshold") - - with gr.Row(): - with gr.Column(scale=3): - gr.HTML(value="") - - with gr.Column(): - with gr.Row(): - interrupt_preprocessing = gr.Button("Interrupt", elem_id="train_interrupt_preprocessing") - run_preprocess = gr.Button(value="Preprocess", variant='primary', elem_id="train_run_preprocess") - - process_split.change( - fn=lambda show: gr_show(show), - inputs=[process_split], - outputs=[process_split_extra_row], - ) - - process_focal_crop.change( - fn=lambda show: gr_show(show), - inputs=[process_focal_crop], - outputs=[process_focal_crop_row], - ) - - process_multicrop.change( - fn=lambda show: gr_show(show), - inputs=[process_multicrop], - outputs=[process_multicrop_col], - ) - - def get_textual_inversion_template_names(): - return sorted(textual_inversion.textual_inversion_templates) - - with gr.Tab(label="Train", id="train"): - gr.HTML(value="

Train an embedding or Hypernetwork; you must specify a directory with a set of 1:1 ratio images [wiki]

") - with FormRow(): - train_embedding_name = gr.Dropdown(label='Embedding', elem_id="train_embedding", choices=sorted(sd_hijack.model_hijack.embedding_db.word_embeddings.keys())) - create_refresh_button(train_embedding_name, sd_hijack.model_hijack.embedding_db.load_textual_inversion_embeddings, lambda: {"choices": sorted(sd_hijack.model_hijack.embedding_db.word_embeddings.keys())}, "refresh_train_embedding_name") - - train_hypernetwork_name = gr.Dropdown(label='Hypernetwork', elem_id="train_hypernetwork", choices=sorted(shared.hypernetworks)) - create_refresh_button(train_hypernetwork_name, shared.reload_hypernetworks, lambda: {"choices": sorted(shared.hypernetworks)}, "refresh_train_hypernetwork_name") - - with FormRow(): - embedding_learn_rate = gr.Textbox(label='Embedding Learning rate', placeholder="Embedding Learning rate", value="0.005", elem_id="train_embedding_learn_rate") - hypernetwork_learn_rate = gr.Textbox(label='Hypernetwork Learning rate', placeholder="Hypernetwork Learning rate", value="0.00001", elem_id="train_hypernetwork_learn_rate") - - with FormRow(): - clip_grad_mode = gr.Dropdown(value="disabled", label="Gradient Clipping", choices=["disabled", "value", "norm"]) - clip_grad_value = gr.Textbox(placeholder="Gradient clip value", value="0.1", show_label=False) - - with FormRow(): - batch_size = gr.Number(label='Batch size', value=1, precision=0, elem_id="train_batch_size") - gradient_step = gr.Number(label='Gradient accumulation steps', value=1, precision=0, elem_id="train_gradient_step") - - dataset_directory = gr.Textbox(label='Dataset directory', placeholder="Path to directory with input images", elem_id="train_dataset_directory") - log_directory = gr.Textbox(label='Log directory', placeholder="Path to directory where to write outputs", value="textual_inversion", elem_id="train_log_directory") - - with FormRow(): - template_file = gr.Dropdown(label='Prompt template', value="style_filewords.txt", elem_id="train_template_file", choices=get_textual_inversion_template_names()) - create_refresh_button(template_file, textual_inversion.list_textual_inversion_templates, lambda: {"choices": get_textual_inversion_template_names()}, "refrsh_train_template_file") - - training_width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="train_training_width") - training_height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="train_training_height") - varsize = gr.Checkbox(label="Do not resize images", value=False, elem_id="train_varsize") - steps = gr.Number(label='Max steps', value=100000, precision=0, elem_id="train_steps") - - with FormRow(): - create_image_every = gr.Number(label='Save an image to log directory every N steps, 0 to disable', value=500, precision=0, elem_id="train_create_image_every") - save_embedding_every = gr.Number(label='Save a copy of embedding to log directory every N steps, 0 to disable', value=500, precision=0, elem_id="train_save_embedding_every") - - use_weight = gr.Checkbox(label="Use PNG alpha channel as loss weight", value=False, elem_id="use_weight") - - save_image_with_stored_embedding = gr.Checkbox(label='Save images with embedding in PNG chunks', value=True, elem_id="train_save_image_with_stored_embedding") - preview_from_txt2img = gr.Checkbox(label='Read parameters (prompt, etc...) from txt2img tab when making previews', value=False, elem_id="train_preview_from_txt2img") - - shuffle_tags = gr.Checkbox(label="Shuffle tags by ',' when creating prompts.", value=False, elem_id="train_shuffle_tags") - tag_drop_out = gr.Slider(minimum=0, maximum=1, step=0.1, label="Drop out tags when creating prompts.", value=0, elem_id="train_tag_drop_out") - - latent_sampling_method = gr.Radio(label='Choose latent sampling method', value="once", choices=['once', 'deterministic', 'random'], elem_id="train_latent_sampling_method") - - with gr.Row(): - train_embedding = gr.Button(value="Train Embedding", variant='primary', elem_id="train_train_embedding") - interrupt_training = gr.Button(value="Interrupt", elem_id="train_interrupt_training") - train_hypernetwork = gr.Button(value="Train Hypernetwork", variant='primary', elem_id="train_train_hypernetwork") - - params = script_callbacks.UiTrainTabParams(txt2img_preview_params) - - script_callbacks.ui_train_tabs_callback(params) - - with gr.Column(elem_id='ti_gallery_container'): - ti_output = gr.Text(elem_id="ti_output", value="", show_label=False) - gr.Gallery(label='Output', show_label=False, elem_id='ti_gallery').style(columns=4) - gr.HTML(elem_id="ti_progress", value="") - ti_outcome = gr.HTML(elem_id="ti_error", value="") - - create_embedding.click( - fn=modules.textual_inversion.ui.create_embedding, - inputs=[ - new_embedding_name, - initialization_text, - nvpt, - overwrite_old_embedding, - ], - outputs=[ - train_embedding_name, - ti_output, - ti_outcome, - ] - ) - - create_hypernetwork.click( - fn=modules.hypernetworks.ui.create_hypernetwork, - inputs=[ - new_hypernetwork_name, - new_hypernetwork_sizes, - overwrite_old_hypernetwork, - new_hypernetwork_layer_structure, - new_hypernetwork_activation_func, - new_hypernetwork_initialization_option, - new_hypernetwork_add_layer_norm, - new_hypernetwork_use_dropout, - new_hypernetwork_dropout_structure - ], - outputs=[ - train_hypernetwork_name, - ti_output, - ti_outcome, - ] - ) - - run_preprocess.click( - fn=wrap_gradio_gpu_call(modules.textual_inversion.ui.preprocess, extra_outputs=[gr.update()]), - _js="start_training_textual_inversion", - inputs=[ - dummy_component, - process_src, - process_dst, - process_width, - process_height, - preprocess_txt_action, - process_keep_original_size, - process_flip, - process_split, - process_caption, - process_caption_deepbooru, - process_split_threshold, - process_overlap_ratio, - process_focal_crop, - process_focal_crop_face_weight, - process_focal_crop_entropy_weight, - process_focal_crop_edges_weight, - process_focal_crop_debug, - process_multicrop, - process_multicrop_mindim, - process_multicrop_maxdim, - process_multicrop_minarea, - process_multicrop_maxarea, - process_multicrop_objective, - process_multicrop_threshold, - ], - outputs=[ - ti_output, - ti_outcome, - ], - ) - - train_embedding.click( - fn=wrap_gradio_gpu_call(modules.textual_inversion.ui.train_embedding, extra_outputs=[gr.update()]), - _js="start_training_textual_inversion", - inputs=[ - dummy_component, - train_embedding_name, - embedding_learn_rate, - batch_size, - gradient_step, - dataset_directory, - log_directory, - training_width, - training_height, - varsize, - steps, - clip_grad_mode, - clip_grad_value, - shuffle_tags, - tag_drop_out, - latent_sampling_method, - use_weight, - create_image_every, - save_embedding_every, - template_file, - save_image_with_stored_embedding, - preview_from_txt2img, - *txt2img_preview_params, - ], - outputs=[ - ti_output, - ti_outcome, - ] - ) - - train_hypernetwork.click( - fn=wrap_gradio_gpu_call(modules.hypernetworks.ui.train_hypernetwork, extra_outputs=[gr.update()]), - _js="start_training_textual_inversion", - inputs=[ - dummy_component, - train_hypernetwork_name, - hypernetwork_learn_rate, - batch_size, - gradient_step, - dataset_directory, - log_directory, - training_width, - training_height, - varsize, - steps, - clip_grad_mode, - clip_grad_value, - shuffle_tags, - tag_drop_out, - latent_sampling_method, - use_weight, - create_image_every, - save_embedding_every, - template_file, - preview_from_txt2img, - *txt2img_preview_params, - ], - outputs=[ - ti_output, - ti_outcome, - ] - ) - - interrupt_training.click( - fn=lambda: shared.state.interrupt(), - inputs=[], - outputs=[], - ) - - interrupt_preprocessing.click( - fn=lambda: shared.state.interrupt(), - inputs=[], - outputs=[], - ) - - loadsave = ui_loadsave.UiLoadsave(cmd_opts.ui_config_file) - - settings = ui_settings.UiSettings() - settings.create_ui(loadsave, dummy_component) - - interfaces = [ - (txt2img_interface, "txt2img", "txt2img"), - (img2img_interface, "img2img", "img2img"), - (extras_interface, "Extras", "extras"), - (pnginfo_interface, "PNG Info", "pnginfo"), - (modelmerger_interface, "Checkpoint Merger", "modelmerger"), - (train_interface, "Train", "train"), - ] - - interfaces += script_callbacks.ui_tabs_callback() - interfaces += [(settings.interface, "Settings", "settings")] - - extensions_interface = ui_extensions.create_ui() - interfaces += [(extensions_interface, "Extensions", "extensions")] - - shared.tab_names = [] - for _interface, label, _ifid in interfaces: - shared.tab_names.append(label) - - with gr.Blocks(theme=shared.gradio_theme, analytics_enabled=False, title="Stable Diffusion") as demo: - settings.add_quicksettings() - - parameters_copypaste.connect_paste_params_buttons() - - with gr.Tabs(elem_id="tabs") as tabs: - tab_order = {k: i for i, k in enumerate(opts.ui_tab_order)} - sorted_interfaces = sorted(interfaces, key=lambda x: tab_order.get(x[1], 9999)) - - for interface, label, ifid in sorted_interfaces: - if label in shared.opts.hidden_tabs: - continue - with gr.TabItem(label, id=ifid, elem_id=f"tab_{ifid}"): - interface.render() - - for interface, _label, ifid in interfaces: - if ifid in ["extensions", "settings"]: - continue - - loadsave.add_block(interface, ifid) - - loadsave.add_component(f"webui/Tabs@{tabs.elem_id}", tabs) - - loadsave.setup_ui() - - if os.path.exists(os.path.join(script_path, "notification.mp3")): - gr.Audio(interactive=False, value=os.path.join(script_path, "notification.mp3"), elem_id="audio_notification", visible=False) - - footer = shared.html("footer.html") - footer = footer.format(versions=versions_html(), api_docs="/docs" if shared.cmd_opts.api else "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/API") - gr.HTML(footer, elem_id="footer") - - settings.add_functionality(demo) - - update_image_cfg_scale_visibility = lambda: gr.update(visible=shared.sd_model and shared.sd_model.cond_stage_key == "edit") - settings.text_settings.change(fn=update_image_cfg_scale_visibility, inputs=[], outputs=[image_cfg_scale]) - demo.load(fn=update_image_cfg_scale_visibility, inputs=[], outputs=[image_cfg_scale]) - - def modelmerger(*args): - try: - results = modules.extras.run_modelmerger(*args) - except Exception as e: - errors.report("Error loading/saving model file", exc_info=True) - modules.sd_models.list_models() # to remove the potentially missing models from the list - return [*[gr.Dropdown.update(choices=modules.sd_models.checkpoint_tiles()) for _ in range(4)], f"Error merging checkpoints: {e}"] - return results - - modelmerger_merge.click(fn=lambda: '', inputs=[], outputs=[modelmerger_result]) - modelmerger_merge.click( - fn=wrap_gradio_gpu_call(modelmerger, extra_outputs=lambda: [gr.update() for _ in range(4)]), - _js='modelmerger', - inputs=[ - dummy_component, - primary_model_name, - secondary_model_name, - tertiary_model_name, - interp_method, - interp_amount, - save_as_half, - custom_name, - checkpoint_format, - config_source, - bake_in_vae, - discard_weights, - save_metadata, - ], - outputs=[ - primary_model_name, - secondary_model_name, - tertiary_model_name, - settings.component_dict['sd_model_checkpoint'], - modelmerger_result, - ] - ) - - loadsave.dump_defaults() - demo.ui_loadsave = loadsave - - # Required as a workaround for change() event not triggering when loading values from ui-config.json - interp_description.value = update_interp_description(interp_method.value) - - return demo - - -def versions_html(): - import torch - import launch - - python_version = ".".join([str(x) for x in sys.version_info[0:3]]) - commit = launch.commit_hash() - tag = launch.git_tag() - - if shared.xformers_available: - import xformers - xformers_version = xformers.__version__ - else: - xformers_version = "N/A" - - return f""" -version: {tag} - •  -python: {python_version} - •  -torch: {getattr(torch, '__long_version__',torch.__version__)} - •  -xformers: {xformers_version} - •  -gradio: {gr.__version__} - •  -checkpoint: N/A -""" - - -def setup_ui_api(app): - from pydantic import BaseModel, Field - from typing import List - - class QuicksettingsHint(BaseModel): - name: str = Field(title="Name of the quicksettings field") - label: str = Field(title="Label of the quicksettings field") - - def quicksettings_hint(): - return [QuicksettingsHint(name=k, label=v.label) for k, v in opts.data_labels.items()] - - app.add_api_route("/internal/quicksettings-hint", quicksettings_hint, methods=["GET"], response_model=List[QuicksettingsHint]) - - app.add_api_route("/internal/ping", lambda: {}, methods=["GET"]) - - app.add_api_route("/internal/profile-startup", lambda: timer.startup_record, methods=["GET"]) - - def download_sysinfo(attachment=False): - from fastapi.responses import PlainTextResponse - - text = sysinfo.get() - filename = f"sysinfo-{datetime.datetime.utcnow().strftime('%Y-%m-%d-%H-%M')}.txt" - - return PlainTextResponse(text, headers={'Content-Disposition': f'{"attachment" if attachment else "inline"}; filename="{filename}"'}) - - app.add_api_route("/internal/sysinfo", download_sysinfo, methods=["GET"]) - app.add_api_route("/internal/sysinfo-download", lambda: download_sysinfo(attachment=True), methods=["GET"]) - diff --git a/temp b/temp new file mode 100644 index 00000000..07ecee7b --- /dev/null +++ b/temp @@ -0,0 +1,1621 @@ +import datetime +import json +import mimetypes +import os +import sys +from functools import reduce +import warnings + +import gradio as gr +import gradio.utils +import numpy as np +from PIL import Image, PngImagePlugin # noqa: F401 +from modules.call_queue import wrap_gradio_gpu_call, wrap_queued_call, wrap_gradio_call + +from modules import sd_hijack, sd_models, script_callbacks, ui_extensions, deepbooru, sd_vae, extra_networks, ui_common, ui_postprocessing, progress, ui_loadsave, errors, shared_items, ui_settings, timer, sysinfo +from modules.ui_components import FormRow, FormGroup, ToolButton, FormHTML +from modules.paths import script_path +from modules.ui_common import create_refresh_button +from modules.ui_gradio_extensions import reload_javascript + + +from modules.shared import opts, cmd_opts + +import modules.codeformer_model +import modules.generation_parameters_copypaste as parameters_copypaste +import modules.gfpgan_model +import modules.hypernetworks.ui +import modules.scripts +import modules.shared as shared +import modules.styles +import modules.textual_inversion.ui +from modules import prompt_parser +from modules.sd_hijack import model_hijack +from modules.sd_samplers import samplers, samplers_for_img2img +from modules.textual_inversion import textual_inversion +import modules.hypernetworks.ui +from modules.generation_parameters_copypaste import image_from_url_text +import modules.extras + +create_setting_component = ui_settings.create_setting_component + +warnings.filterwarnings("default" if opts.show_warnings else "ignore", category=UserWarning) + +# this is a fix for Windows users. Without it, javascript files will be served with text/html content-type and the browser will not show any UI +mimetypes.init() +mimetypes.add_type('application/javascript', '.js') + +if not cmd_opts.share and not cmd_opts.listen: + # fix gradio phoning home + gradio.utils.version_check = lambda: None + gradio.utils.get_local_ip_address = lambda: '127.0.0.1' + +if cmd_opts.ngrok is not None: + import modules.ngrok as ngrok + print('ngrok authtoken detected, trying to connect...') + ngrok.connect( + cmd_opts.ngrok, + cmd_opts.port if cmd_opts.port is not None else 7860, + cmd_opts.ngrok_options + ) + + +def gr_show(visible=True): + return {"visible": visible, "__type__": "update"} + + +sample_img2img = "assets/stable-samples/img2img/sketch-mountains-input.jpg" +sample_img2img = sample_img2img if os.path.exists(sample_img2img) else None + +# Using constants for these since the variation selector isn't visible. +# Important that they exactly match script.js for tooltip to work. +random_symbol = '\U0001f3b2\ufe0f' # 🎲️ +reuse_symbol = '\u267b\ufe0f' # ♻️ +paste_symbol = '\u2199\ufe0f' # ↙ +refresh_symbol = '\U0001f504' # 🔄 +save_style_symbol = '\U0001f4be' # 💾 +apply_style_symbol = '\U0001f4cb' # 📋 +clear_prompt_symbol = '\U0001f5d1\ufe0f' # 🗑️ +extra_networks_symbol = '\U0001F3B4' # 🎴 +switch_values_symbol = '\U000021C5' # ⇅ +restore_progress_symbol = '\U0001F300' # 🌀 +detect_image_size_symbol = '\U0001F4D0' # 📐 +up_down_symbol = '\u2195\ufe0f' # ↕️ + + +plaintext_to_html = ui_common.plaintext_to_html + + +def send_gradio_gallery_to_image(x): + if len(x) == 0: + return None + return image_from_url_text(x[0]) + + +def add_style(name: str, prompt: str, negative_prompt: str): + if name is None: + return [gr_show() for x in range(4)] + + style = modules.styles.PromptStyle(name, prompt, negative_prompt) + shared.prompt_styles.styles[style.name] = style + # Save all loaded prompt styles: this allows us to update the storage format in the future more easily, because we + # reserialize all styles every time we save them + shared.prompt_styles.save_styles(shared.styles_filename) + + return [gr.Dropdown.update(visible=True, choices=list(shared.prompt_styles.styles)) for _ in range(2)] + + +def calc_resolution_hires(enable, width, height, hr_scale, hr_resize_x, hr_resize_y): + from modules import processing, devices + + if not enable: + return "" + + p = processing.StableDiffusionProcessingTxt2Img(width=width, height=height, enable_hr=True, hr_scale=hr_scale, hr_resize_x=hr_resize_x, hr_resize_y=hr_resize_y) + + with devices.autocast(): + p.init([""], [0], [0]) + + return f"resize: from {p.width}x{p.height} to {p.hr_resize_x or p.hr_upscale_to_x}x{p.hr_resize_y or p.hr_upscale_to_y}" + + +def resize_from_to_html(width, height, scale_by): + target_width = int(width * scale_by) + target_height = int(height * scale_by) + + if not target_width or not target_height: + return "no image selected" + + return f"resize: from {width}x{height} to {target_width}x{target_height}" + + +def apply_styles(prompt, prompt_neg, styles): + prompt = shared.prompt_styles.apply_styles_to_prompt(prompt, styles) + prompt_neg = shared.prompt_styles.apply_negative_styles_to_prompt(prompt_neg, styles) + + return [gr.Textbox.update(value=prompt), gr.Textbox.update(value=prompt_neg), gr.Dropdown.update(value=[])] + + +def process_interrogate(interrogation_function, mode, ii_input_dir, ii_output_dir, *ii_singles): + if mode in {0, 1, 3, 4}: + return [interrogation_function(ii_singles[mode]), None] + elif mode == 2: + return [interrogation_function(ii_singles[mode]["image"]), None] + elif mode == 5: + assert not shared.cmd_opts.hide_ui_dir_config, "Launched with --hide-ui-dir-config, batch img2img disabled" + images = shared.listfiles(ii_input_dir) + print(f"Will process {len(images)} images.") + if ii_output_dir != "": + os.makedirs(ii_output_dir, exist_ok=True) + else: + ii_output_dir = ii_input_dir + + for image in images: + img = Image.open(image) + filename = os.path.basename(image) + left, _ = os.path.splitext(filename) + print(interrogation_function(img), file=open(os.path.join(ii_output_dir, f"{left}.txt"), 'a', encoding='utf-8')) + + return [gr.update(), None] + + +def interrogate(image): + prompt = shared.interrogator.interrogate(image.convert("RGB")) + return gr.update() if prompt is None else prompt + + +def interrogate_deepbooru(image): + prompt = deepbooru.model.tag(image) + return gr.update() if prompt is None else prompt + + +def create_seed_inputs(target_interface): + with FormRow(elem_id=f"{target_interface}_seed_row", variant="compact"): + seed = (gr.Textbox if cmd_opts.use_textbox_seed else gr.Number)(label='Seed', value=-1, elem_id=f"{target_interface}_seed") + seed.style(container=False) + random_seed = ToolButton(random_symbol, elem_id=f"{target_interface}_random_seed", label='Random seed') + reuse_seed = ToolButton(reuse_symbol, elem_id=f"{target_interface}_reuse_seed", label='Reuse seed') + + seed_checkbox = gr.Checkbox(label='Extra', elem_id=f"{target_interface}_subseed_show", value=False) + + # Components to show/hide based on the 'Extra' checkbox + seed_extras = [] + + with FormRow(visible=False, elem_id=f"{target_interface}_subseed_row") as seed_extra_row_1: + seed_extras.append(seed_extra_row_1) + subseed = gr.Number(label='Variation seed', value=-1, elem_id=f"{target_interface}_subseed") + subseed.style(container=False) + random_subseed = ToolButton(random_symbol, elem_id=f"{target_interface}_random_subseed") + reuse_subseed = ToolButton(reuse_symbol, elem_id=f"{target_interface}_reuse_subseed") + subseed_strength = gr.Slider(label='Variation strength', value=0.0, minimum=0, maximum=1, step=0.01, elem_id=f"{target_interface}_subseed_strength") + + with FormRow(visible=False) as seed_extra_row_2: + seed_extras.append(seed_extra_row_2) + seed_resize_from_w = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize seed from width", value=0, elem_id=f"{target_interface}_seed_resize_from_w") + seed_resize_from_h = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize seed from height", value=0, elem_id=f"{target_interface}_seed_resize_from_h") + + random_seed.click(fn=None, _js="function(){setRandomSeed('" + target_interface + "_seed')}", show_progress=False, inputs=[], outputs=[]) + random_subseed.click(fn=None, _js="function(){setRandomSeed('" + target_interface + "_subseed')}", show_progress=False, inputs=[], outputs=[]) + + def change_visibility(show): + return {comp: gr_show(show) for comp in seed_extras} + + seed_checkbox.change(change_visibility, show_progress=False, inputs=[seed_checkbox], outputs=seed_extras) + + return seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox + + + +def connect_clear_prompt(button): + """Given clear button, prompt, and token_counter objects, setup clear prompt button click event""" + button.click( + _js="clear_prompt", + fn=None, + inputs=[], + outputs=[], + ) + + +def connect_reuse_seed(seed: gr.Number, reuse_seed: gr.Button, generation_info: gr.Textbox, dummy_component, is_subseed): + """ Connects a 'reuse (sub)seed' button's click event so that it copies last used + (sub)seed value from generation info the to the seed field. If copying subseed and subseed strength + was 0, i.e. no variation seed was used, it copies the normal seed value instead.""" + def copy_seed(gen_info_string: str, index): + res = -1 + + try: + gen_info = json.loads(gen_info_string) + index -= gen_info.get('index_of_first_image', 0) + + if is_subseed and gen_info.get('subseed_strength', 0) > 0: + all_subseeds = gen_info.get('all_subseeds', [-1]) + res = all_subseeds[index if 0 <= index < len(all_subseeds) else 0] + else: + all_seeds = gen_info.get('all_seeds', [-1]) + res = all_seeds[index if 0 <= index < len(all_seeds) else 0] + + except json.decoder.JSONDecodeError: + if gen_info_string: + errors.report(f"Error parsing JSON generation info: {gen_info_string}") + + return [res, gr_show(False)] + + reuse_seed.click( + fn=copy_seed, + _js="(x, y) => [x, selected_gallery_index()]", + show_progress=False, + inputs=[generation_info, dummy_component], + outputs=[seed, dummy_component] + ) + + +def update_token_counter(text, steps): + try: + text, _ = extra_networks.parse_prompt(text) + + _, prompt_flat_list, _ = prompt_parser.get_multicond_prompt_list([text]) + prompt_schedules = prompt_parser.get_learned_conditioning_prompt_schedules(prompt_flat_list, steps) + + except Exception: + # a parsing error can happen here during typing, and we don't want to bother the user with + # messages related to it in console + prompt_schedules = [[[steps, text]]] + + flat_prompts = reduce(lambda list1, list2: list1+list2, prompt_schedules) + prompts = [prompt_text for step, prompt_text in flat_prompts] + token_count, max_length = max([model_hijack.get_prompt_lengths(prompt) for prompt in prompts], key=lambda args: args[0]) + return f"{token_count}/{max_length}" + + +def create_toprow(is_img2img): + id_part = "img2img" if is_img2img else "txt2img" + + with gr.Row(elem_id=f"{id_part}_toprow", variant="compact"): + with gr.Column(elem_id=f"{id_part}_prompt_container", scale=6): + with gr.Row(): + with gr.Column(scale=80): + with gr.Row(): + prompt = gr.Textbox(label="Prompt", elem_id=f"{id_part}_prompt", show_label=False, lines=3, placeholder="Prompt (press Ctrl+Enter or Alt+Enter to generate)", elem_classes=["prompt"]) + + with gr.Row(): + with gr.Column(scale=80): + with gr.Row(): + negative_prompt = gr.Textbox(label="Negative prompt", elem_id=f"{id_part}_neg_prompt", show_label=False, lines=3, placeholder="Negative prompt (press Ctrl+Enter or Alt+Enter to generate)", elem_classes=["prompt"]) + + button_interrogate = None + button_deepbooru = None + if is_img2img: + with gr.Column(scale=1, elem_classes="interrogate-col"): + button_interrogate = gr.Button('Interrogate\nCLIP', elem_id="interrogate") + button_deepbooru = gr.Button('Interrogate\nDeepBooru', elem_id="deepbooru") + + with gr.Column(scale=1, elem_id=f"{id_part}_actions_column"): + with gr.Row(elem_id=f"{id_part}_generate_box", elem_classes="generate-box"): + interrupt = gr.Button('Interrupt', elem_id=f"{id_part}_interrupt", elem_classes="generate-box-interrupt") + skip = gr.Button('Skip', elem_id=f"{id_part}_skip", elem_classes="generate-box-skip") + submit = gr.Button('Generate', elem_id=f"{id_part}_generate", variant='primary') + + skip.click( + fn=lambda: shared.state.skip(), + inputs=[], + outputs=[], + ) + + interrupt.click( + fn=lambda: shared.state.interrupt(), + inputs=[], + outputs=[], + ) + + with gr.Row(elem_id=f"{id_part}_tools"): + paste = ToolButton(value=paste_symbol, elem_id="paste") + clear_prompt_button = ToolButton(value=clear_prompt_symbol, elem_id=f"{id_part}_clear_prompt") + extra_networks_button = ToolButton(value=extra_networks_symbol, elem_id=f"{id_part}_extra_networks") + prompt_style_apply = ToolButton(value=apply_style_symbol, elem_id=f"{id_part}_style_apply") + save_style = ToolButton(value=save_style_symbol, elem_id=f"{id_part}_style_create") + restore_progress_button = ToolButton(value=restore_progress_symbol, elem_id=f"{id_part}_restore_progress", visible=False) + + token_counter = gr.HTML(value="0/75", elem_id=f"{id_part}_token_counter", elem_classes=["token-counter"]) + token_button = gr.Button(visible=False, elem_id=f"{id_part}_token_button") + negative_token_counter = gr.HTML(value="0/75", elem_id=f"{id_part}_negative_token_counter", elem_classes=["token-counter"]) + negative_token_button = gr.Button(visible=False, elem_id=f"{id_part}_negative_token_button") + + clear_prompt_button.click( + fn=lambda *x: x, + _js="confirm_clear_prompt", + inputs=[prompt, negative_prompt], + outputs=[prompt, negative_prompt], + ) + + with gr.Row(elem_id=f"{id_part}_styles_row"): + prompt_styles = gr.Dropdown(label="Styles", elem_id=f"{id_part}_styles", choices=[k for k, v in shared.prompt_styles.styles.items()], value=[], multiselect=True) + create_refresh_button(prompt_styles, shared.prompt_styles.reload, lambda: {"choices": [k for k, v in shared.prompt_styles.styles.items()]}, f"refresh_{id_part}_styles") + + return prompt, prompt_styles, negative_prompt, submit, button_interrogate, button_deepbooru, prompt_style_apply, save_style, paste, extra_networks_button, token_counter, token_button, negative_token_counter, negative_token_button, restore_progress_button + + +def setup_progressbar(*args, **kwargs): + pass + + +def apply_setting(key, value): + if value is None: + return gr.update() + + if shared.cmd_opts.freeze_settings: + return gr.update() + + # dont allow model to be swapped when model hash exists in prompt + if key == "sd_model_checkpoint" and opts.disable_weights_auto_swap: + return gr.update() + + if key == "sd_model_checkpoint": + ckpt_info = sd_models.get_closet_checkpoint_match(value) + + if ckpt_info is not None: + value = ckpt_info.title + else: + return gr.update() + + comp_args = opts.data_labels[key].component_args + if comp_args and isinstance(comp_args, dict) and comp_args.get('visible') is False: + return + + valtype = type(opts.data_labels[key].default) + oldval = opts.data.get(key, None) + opts.data[key] = valtype(value) if valtype != type(None) else value + if oldval != value and opts.data_labels[key].onchange is not None: + opts.data_labels[key].onchange() + + opts.save(shared.config_filename) + return getattr(opts, key) + + +def create_output_panel(tabname, outdir): + return ui_common.create_output_panel(tabname, outdir) + + +def create_sampler_and_steps_selection(choices, tabname): + if opts.samplers_in_dropdown: + with FormRow(elem_id=f"sampler_selection_{tabname}"): + sampler_index = gr.Dropdown(label='Sampling method', elem_id=f"{tabname}_sampling", choices=[x.name for x in choices], value=choices[0].name, type="index") + steps = gr.Slider(minimum=1, maximum=150, step=1, elem_id=f"{tabname}_steps", label="Sampling steps", value=20) + else: + with FormGroup(elem_id=f"sampler_selection_{tabname}"): + steps = gr.Slider(minimum=1, maximum=150, step=1, elem_id=f"{tabname}_steps", label="Sampling steps", value=20) + sampler_index = gr.Radio(label='Sampling method', elem_id=f"{tabname}_sampling", choices=[x.name for x in choices], value=choices[0].name, type="index") + + return steps, sampler_index + + +def ordered_ui_categories(): + user_order = {x.strip(): i * 2 + 1 for i, x in enumerate(shared.opts.ui_reorder_list)} + + for _, category in sorted(enumerate(shared_items.ui_reorder_categories()), key=lambda x: user_order.get(x[1], x[0] * 2 + 0)): + yield category + + +def create_override_settings_dropdown(tabname, row): + dropdown = gr.Dropdown([], label="Override settings", visible=False, elem_id=f"{tabname}_override_settings", multiselect=True) + + dropdown.change( + fn=lambda x: gr.Dropdown.update(visible=bool(x)), + inputs=[dropdown], + outputs=[dropdown], + ) + + return dropdown + + +def create_ui(): + import modules.img2img + import modules.txt2img + + reload_javascript() + + parameters_copypaste.reset() + + modules.scripts.scripts_current = modules.scripts.scripts_txt2img + modules.scripts.scripts_txt2img.initialize_scripts(is_img2img=False) + + with gr.Blocks(analytics_enabled=False) as txt2img_interface: + txt2img_prompt, txt2img_prompt_styles, txt2img_negative_prompt, submit, _, _, txt2img_prompt_style_apply, txt2img_save_style, txt2img_paste, extra_networks_button, token_counter, token_button, negative_token_counter, negative_token_button, restore_progress_button = create_toprow(is_img2img=False) + + dummy_component = gr.Label(visible=False) + txt_prompt_img = gr.File(label="", elem_id="txt2img_prompt_image", file_count="single", type="binary", visible=False) + + with FormRow(variant='compact', elem_id="txt2img_extra_networks", visible=False) as extra_networks: + from modules import ui_extra_networks + extra_networks_ui = ui_extra_networks.create_ui(extra_networks, extra_networks_button, 'txt2img') + + with gr.Row().style(equal_height=False): + with gr.Column(variant='compact', elem_id="txt2img_settings"): + modules.scripts.scripts_txt2img.prepare_ui() + + for category in ordered_ui_categories(): + if category == "sampler": + steps, sampler_index = create_sampler_and_steps_selection(samplers, "txt2img") + + elif category == "dimensions": + with FormRow(): + with gr.Column(elem_id="txt2img_column_size", scale=4): + width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="txt2img_width") + height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="txt2img_height") + + with gr.Column(elem_id="txt2img_dimensions_row", scale=1, elem_classes="dimensions-tools"): + res_switch_btn = ToolButton(value=switch_values_symbol, elem_id="txt2img_res_switch_btn", label="Switch dims") + + if opts.dimensions_and_batch_together: + with gr.Column(elem_id="txt2img_column_batch"): + batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="txt2img_batch_count") + batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="txt2img_batch_size") + + elif category == "cfg": + cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0, elem_id="txt2img_cfg_scale") + + elif category == "seed": + seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox = create_seed_inputs('txt2img') + + elif category == "checkboxes": + with FormRow(elem_classes="checkboxes-row", variant="compact"): + restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1, elem_id="txt2img_restore_faces") + tiling = gr.Checkbox(label='Tiling', value=False, elem_id="txt2img_tiling") + enable_hr = gr.Checkbox(label='Hires. fix', value=False, elem_id="txt2img_enable_hr") + hr_final_resolution = FormHTML(value="", elem_id="txtimg_hr_finalres", label="Upscaled resolution", interactive=False) + + elif category == "hires_fix": + with FormGroup(visible=False, elem_id="txt2img_hires_fix") as hr_options: + with FormRow(elem_id="txt2img_hires_fix_row1", variant="compact"): + hr_upscaler = gr.Dropdown(label="Upscaler", elem_id="txt2img_hr_upscaler", choices=[*shared.latent_upscale_modes, *[x.name for x in shared.sd_upscalers]], value=shared.latent_upscale_default_mode) + hr_second_pass_steps = gr.Slider(minimum=0, maximum=150, step=1, label='Hires steps', value=0, elem_id="txt2img_hires_steps") + denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.7, elem_id="txt2img_denoising_strength") + + with FormRow(elem_id="txt2img_hires_fix_row2", variant="compact"): + hr_scale = gr.Slider(minimum=1.0, maximum=4.0, step=0.05, label="Upscale by", value=2.0, elem_id="txt2img_hr_scale") + hr_resize_x = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize width to", value=0, elem_id="txt2img_hr_resize_x") + hr_resize_y = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize height to", value=0, elem_id="txt2img_hr_resize_y") + + with FormRow(elem_id="txt2img_hires_fix_row3", variant="compact", visible=opts.hires_fix_show_sampler) as hr_sampler_container: + hr_sampler_index = gr.Dropdown(label='Hires sampling method', elem_id="hr_sampler", choices=["Use same sampler"] + [x.name for x in samplers_for_img2img], value="Use same sampler", type="index") + + with FormRow(elem_id="txt2img_hires_fix_row4", variant="compact", visible=opts.hires_fix_show_prompts) as hr_prompts_container: + with gr.Column(scale=80): + with gr.Row(): + hr_prompt = gr.Textbox(label="Hires prompt", elem_id="hires_prompt", show_label=False, lines=3, placeholder="Prompt for hires fix pass.\nLeave empty to use the same prompt as in first pass.", elem_classes=["prompt"]) + with gr.Column(scale=80): + with gr.Row(): + hr_negative_prompt = gr.Textbox(label="Hires negative prompt", elem_id="hires_neg_prompt", show_label=False, lines=3, placeholder="Negative prompt for hires fix pass.\nLeave empty to use the same negative prompt as in first pass.", elem_classes=["prompt"]) + + elif category == "batch": + if not opts.dimensions_and_batch_together: + with FormRow(elem_id="txt2img_column_batch"): + batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="txt2img_batch_count") + batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="txt2img_batch_size") + + elif category == "override_settings": + with FormRow(elem_id="txt2img_override_settings_row") as row: + override_settings = create_override_settings_dropdown('txt2img', row) + + elif category == "scripts": + with FormGroup(elem_id="txt2img_script_container"): + custom_inputs = modules.scripts.scripts_txt2img.setup_ui() + + else: + modules.scripts.scripts_txt2img.setup_ui_for_section(category) + + hr_resolution_preview_inputs = [enable_hr, width, height, hr_scale, hr_resize_x, hr_resize_y] + + for component in hr_resolution_preview_inputs: + event = component.release if isinstance(component, gr.Slider) else component.change + + event( + fn=calc_resolution_hires, + inputs=hr_resolution_preview_inputs, + outputs=[hr_final_resolution], + show_progress=False, + ) + event( + None, + _js="onCalcResolutionHires", + inputs=hr_resolution_preview_inputs, + outputs=[], + show_progress=False, + ) + + txt2img_gallery, generation_info, html_info, html_log = create_output_panel("txt2img", opts.outdir_txt2img_samples) + + connect_reuse_seed(seed, reuse_seed, generation_info, dummy_component, is_subseed=False) + connect_reuse_seed(subseed, reuse_subseed, generation_info, dummy_component, is_subseed=True) + + txt2img_args = dict( + fn=wrap_gradio_gpu_call(modules.txt2img.txt2img, extra_outputs=[None, '', '']), + _js="submit", + inputs=[ + dummy_component, + txt2img_prompt, + txt2img_negative_prompt, + txt2img_prompt_styles, + steps, + sampler_index, + restore_faces, + tiling, + batch_count, + batch_size, + cfg_scale, + seed, + subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox, + height, + width, + enable_hr, + denoising_strength, + hr_scale, + hr_upscaler, + hr_second_pass_steps, + hr_resize_x, + hr_resize_y, + hr_sampler_index, + hr_prompt, + hr_negative_prompt, + override_settings, + + ] + custom_inputs, + + outputs=[ + txt2img_gallery, + generation_info, + html_info, + html_log, + ], + show_progress=False, + ) + + txt2img_prompt.submit(**txt2img_args) + submit.click(**txt2img_args) + + res_switch_btn.click(fn=None, _js="function(){switchWidthHeight('txt2img')}", inputs=None, outputs=None, show_progress=False) + + restore_progress_button.click( + fn=progress.restore_progress, + _js="restoreProgressTxt2img", + inputs=[dummy_component], + outputs=[ + txt2img_gallery, + generation_info, + html_info, + html_log, + ], + show_progress=False, + ) + + txt_prompt_img.change( + fn=modules.images.image_data, + inputs=[ + txt_prompt_img + ], + outputs=[ + txt2img_prompt, + txt_prompt_img + ], + show_progress=False, + ) + + enable_hr.change( + fn=lambda x: gr_show(x), + inputs=[enable_hr], + outputs=[hr_options], + show_progress = False, + ) + + txt2img_paste_fields = [ + (txt2img_prompt, "Prompt"), + (txt2img_negative_prompt, "Negative prompt"), + (steps, "Steps"), + (sampler_index, "Sampler"), + (restore_faces, "Face restoration"), + (cfg_scale, "CFG scale"), + (seed, "Seed"), + (width, "Size-1"), + (height, "Size-2"), + (batch_size, "Batch size"), + (subseed, "Variation seed"), + (subseed_strength, "Variation seed strength"), + (seed_resize_from_w, "Seed resize from-1"), + (seed_resize_from_h, "Seed resize from-2"), + (txt2img_prompt_styles, lambda d: d["Styles array"] if isinstance(d.get("Styles array"), list) else gr.update()), + (denoising_strength, "Denoising strength"), + (enable_hr, lambda d: "Denoising strength" in d), + (hr_options, lambda d: gr.Row.update(visible="Denoising strength" in d)), + (hr_scale, "Hires upscale"), + (hr_upscaler, "Hires upscaler"), + (hr_second_pass_steps, "Hires steps"), + (hr_resize_x, "Hires resize-1"), + (hr_resize_y, "Hires resize-2"), + (hr_sampler_index, "Hires sampler"), + (hr_sampler_container, lambda d: gr.update(visible=True) if d.get("Hires sampler", "Use same sampler") != "Use same sampler" else gr.update()), + (hr_prompt, "Hires prompt"), + (hr_negative_prompt, "Hires negative prompt"), + (hr_prompts_container, lambda d: gr.update(visible=True) if d.get("Hires prompt", "") != "" or d.get("Hires negative prompt", "") != "" else gr.update()), + *modules.scripts.scripts_txt2img.infotext_fields + ] + parameters_copypaste.add_paste_fields("txt2img", None, txt2img_paste_fields, override_settings) + parameters_copypaste.register_paste_params_button(parameters_copypaste.ParamBinding( + paste_button=txt2img_paste, tabname="txt2img", source_text_component=txt2img_prompt, source_image_component=None, + )) + + txt2img_preview_params = [ + txt2img_prompt, + txt2img_negative_prompt, + steps, + sampler_index, + cfg_scale, + seed, + width, + height, + ] + + token_button.click(fn=wrap_queued_call(update_token_counter), inputs=[txt2img_prompt, steps], outputs=[token_counter]) + negative_token_button.click(fn=wrap_queued_call(update_token_counter), inputs=[txt2img_negative_prompt, steps], outputs=[negative_token_counter]) + + ui_extra_networks.setup_ui(extra_networks_ui, txt2img_gallery) + + modules.scripts.scripts_current = modules.scripts.scripts_img2img + modules.scripts.scripts_img2img.initialize_scripts(is_img2img=True) + + with gr.Blocks(analytics_enabled=False) as img2img_interface: + img2img_prompt, img2img_prompt_styles, img2img_negative_prompt, submit, img2img_interrogate, img2img_deepbooru, img2img_prompt_style_apply, img2img_save_style, img2img_paste, extra_networks_button, token_counter, token_button, negative_token_counter, negative_token_button, restore_progress_button = create_toprow(is_img2img=True) + + img2img_prompt_img = gr.File(label="", elem_id="img2img_prompt_image", file_count="single", type="binary", visible=False) + + with FormRow(variant='compact', elem_id="img2img_extra_networks", visible=False) as extra_networks: + from modules import ui_extra_networks + extra_networks_ui_img2img = ui_extra_networks.create_ui(extra_networks, extra_networks_button, 'img2img') + + with FormRow().style(equal_height=False): + with gr.Column(variant='compact', elem_id="img2img_settings"): + copy_image_buttons = [] + copy_image_destinations = {} + + def add_copy_image_controls(tab_name, elem): + with gr.Row(variant="compact", elem_id=f"img2img_copy_to_{tab_name}"): + gr.HTML("Copy image to: ", elem_id=f"img2img_label_copy_to_{tab_name}") + + for title, name in zip(['img2img', 'sketch', 'inpaint', 'inpaint sketch'], ['img2img', 'sketch', 'inpaint', 'inpaint_sketch']): + if name == tab_name: + gr.Button(title, interactive=False) + copy_image_destinations[name] = elem + continue + + button = gr.Button(title) + copy_image_buttons.append((button, name, elem)) + + with gr.Tabs(elem_id="mode_img2img"): + img2img_selected_tab = gr.State(0) + + with gr.TabItem('img2img', id='img2img', elem_id="img2img_img2img_tab") as tab_img2img: + init_img = gr.Image(label="Image for img2img", elem_id="img2img_image", show_label=False, source="upload", interactive=True, type="pil", tool="editor", image_mode="RGBA").style(height=opts.img2img_editor_height) + add_copy_image_controls('img2img', init_img) + + with gr.TabItem('Sketch', id='img2img_sketch', elem_id="img2img_img2img_sketch_tab") as tab_sketch: + sketch = gr.Image(label="Image for img2img", elem_id="img2img_sketch", show_label=False, source="upload", interactive=True, type="pil", tool="color-sketch", image_mode="RGBA").style(height=opts.img2img_editor_height) + add_copy_image_controls('sketch', sketch) + + with gr.TabItem('Inpaint', id='inpaint', elem_id="img2img_inpaint_tab") as tab_inpaint: + init_img_with_mask = gr.Image(label="Image for inpainting with mask", show_label=False, elem_id="img2maskimg", source="upload", interactive=True, type="pil", tool="sketch", image_mode="RGBA").style(height=opts.img2img_editor_height) + add_copy_image_controls('inpaint', init_img_with_mask) + + with gr.TabItem('Inpaint sketch', id='inpaint_sketch', elem_id="img2img_inpaint_sketch_tab") as tab_inpaint_color: + inpaint_color_sketch = gr.Image(label="Color sketch inpainting", show_label=False, elem_id="inpaint_sketch", source="upload", interactive=True, type="pil", tool="color-sketch", image_mode="RGBA").style(height=opts.img2img_editor_height) + inpaint_color_sketch_orig = gr.State(None) + add_copy_image_controls('inpaint_sketch', inpaint_color_sketch) + + def update_orig(image, state): + if image is not None: + same_size = state is not None and state.size == image.size + has_exact_match = np.any(np.all(np.array(image) == np.array(state), axis=-1)) + edited = same_size and has_exact_match + return image if not edited or state is None else state + + inpaint_color_sketch.change(update_orig, [inpaint_color_sketch, inpaint_color_sketch_orig], inpaint_color_sketch_orig) + + with gr.TabItem('Inpaint upload', id='inpaint_upload', elem_id="img2img_inpaint_upload_tab") as tab_inpaint_upload: + init_img_inpaint = gr.Image(label="Image for img2img", show_label=False, source="upload", interactive=True, type="pil", elem_id="img_inpaint_base") + init_mask_inpaint = gr.Image(label="Mask", source="upload", interactive=True, type="pil", elem_id="img_inpaint_mask") + + with gr.TabItem('Batch', id='batch', elem_id="img2img_batch_tab") as tab_batch: + hidden = '
Disabled when launched with --hide-ui-dir-config.' if shared.cmd_opts.hide_ui_dir_config else '' + gr.HTML( + "

Process images in a directory on the same machine where the server is running." + + "
Use an empty output directory to save pictures normally instead of writing to the output directory." + + f"
Add inpaint batch mask directory to enable inpaint batch processing." + f"{hidden}

" + ) + img2img_batch_input_dir = gr.Textbox(label="Input directory", **shared.hide_dirs, elem_id="img2img_batch_input_dir") + img2img_batch_output_dir = gr.Textbox(label="Output directory", **shared.hide_dirs, elem_id="img2img_batch_output_dir") + img2img_batch_inpaint_mask_dir = gr.Textbox(label="Inpaint batch mask directory (required for inpaint batch processing only)", **shared.hide_dirs, elem_id="img2img_batch_inpaint_mask_dir") + with gr.Accordion("PNG info", open=False): + img2img_batch_use_png_info = gr.Checkbox(label="Append png info to prompts", **shared.hide_dirs, elem_id="img2img_batch_use_png_info") + img2img_batch_png_info_dir = gr.Textbox(label="PNG info directory", **shared.hide_dirs, placeholder="Leave empty to use input directory", elem_id="img2img_batch_png_info_dir") + img2img_batch_png_info_props = gr.CheckboxGroup(["Prompt", "Negative prompt", "Seed", "CFG scale", "Sampler", "Steps"], label="Parameters to take from png info", info="Prompts from png info will be appended to prompts set in ui.") + + img2img_tabs = [tab_img2img, tab_sketch, tab_inpaint, tab_inpaint_color, tab_inpaint_upload, tab_batch] + + for i, tab in enumerate(img2img_tabs): + tab.select(fn=lambda tabnum=i: tabnum, inputs=[], outputs=[img2img_selected_tab]) + + def copy_image(img): + if isinstance(img, dict) and 'image' in img: + return img['image'] + + return img + + for button, name, elem in copy_image_buttons: + button.click( + fn=copy_image, + inputs=[elem], + outputs=[copy_image_destinations[name]], + ) + button.click( + fn=lambda: None, + _js=f"switch_to_{name.replace(' ', '_')}", + inputs=[], + outputs=[], + ) + + with FormRow(): + resize_mode = gr.Radio(label="Resize mode", elem_id="resize_mode", choices=["Just resize", "Crop and resize", "Resize and fill", "Just resize (latent upscale)"], type="index", value="Just resize") + + modules.scripts.scripts_img2img.prepare_ui() + + for category in ordered_ui_categories(): + if category == "sampler": + steps, sampler_index = create_sampler_and_steps_selection(samplers_for_img2img, "img2img") + + elif category == "dimensions": + with FormRow(): + with gr.Column(elem_id="img2img_column_size", scale=4): + selected_scale_tab = gr.State(value=0) + + with gr.Tabs(): + with gr.Tab(label="Resize to", elem_id="img2img_tab_resize_to") as tab_scale_to: + with FormRow(): + with gr.Column(elem_id="img2img_column_size", scale=4): + width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="img2img_width") + height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="img2img_height") + with gr.Column(elem_id="img2img_dimensions_row", scale=1, elem_classes="dimensions-tools"): + res_switch_btn = ToolButton(value=switch_values_symbol, elem_id="img2img_res_switch_btn") + detect_image_size_btn = ToolButton(value=detect_image_size_symbol, elem_id="img2img_detect_image_size_btn") + + with gr.Tab(label="Resize by", elem_id="img2img_tab_resize_by") as tab_scale_by: + scale_by = gr.Slider(minimum=0.05, maximum=4.0, step=0.05, label="Scale", value=1.0, elem_id="img2img_scale") + + with FormRow(): + scale_by_html = FormHTML(resize_from_to_html(0, 0, 0.0), elem_id="img2img_scale_resolution_preview") + gr.Slider(label="Unused", elem_id="img2img_unused_scale_by_slider") + button_update_resize_to = gr.Button(visible=False, elem_id="img2img_update_resize_to") + + on_change_args = dict( + fn=resize_from_to_html, + _js="currentImg2imgSourceResolution", + inputs=[dummy_component, dummy_component, scale_by], + outputs=scale_by_html, + show_progress=False, + ) + + scale_by.release(**on_change_args) + button_update_resize_to.click(**on_change_args) + + # the code below is meant to update the resolution label after the image in the image selection UI has changed. + # as it is now the event keeps firing continuously for inpaint edits, which ruins the page with constant requests. + # I assume this must be a gradio bug and for now we'll just do it for non-inpaint inputs. + for component in [init_img, sketch]: + component.change(fn=lambda: None, _js="updateImg2imgResizeToTextAfterChangingImage", inputs=[], outputs=[], show_progress=False) + + tab_scale_to.select(fn=lambda: 0, inputs=[], outputs=[selected_scale_tab]) + tab_scale_by.select(fn=lambda: 1, inputs=[], outputs=[selected_scale_tab]) + + if opts.dimensions_and_batch_together: + with gr.Column(elem_id="img2img_column_batch"): + batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="img2img_batch_count") + batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="img2img_batch_size") + + elif category == "cfg": + with FormGroup(): + with FormRow(): + cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0, elem_id="img2img_cfg_scale") + image_cfg_scale = gr.Slider(minimum=0, maximum=3.0, step=0.05, label='Image CFG Scale', value=1.5, elem_id="img2img_image_cfg_scale", visible=False) + denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.75, elem_id="img2img_denoising_strength") + + elif category == "seed": + seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox = create_seed_inputs('img2img') + + elif category == "checkboxes": + with FormRow(elem_classes="checkboxes-row", variant="compact"): + restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1, elem_id="img2img_restore_faces") + tiling = gr.Checkbox(label='Tiling', value=False, elem_id="img2img_tiling") + + elif category == "batch": + if not opts.dimensions_and_batch_together: + with FormRow(elem_id="img2img_column_batch"): + batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="img2img_batch_count") + batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="img2img_batch_size") + + elif category == "override_settings": + with FormRow(elem_id="img2img_override_settings_row") as row: + override_settings = create_override_settings_dropdown('img2img', row) + + elif category == "scripts": + with FormGroup(elem_id="img2img_script_container"): + custom_inputs = modules.scripts.scripts_img2img.setup_ui() + + elif category == "inpaint": + with FormGroup(elem_id="inpaint_controls", visible=False) as inpaint_controls: + with FormRow(): + mask_blur = gr.Slider(label='Mask blur', minimum=0, maximum=64, step=1, value=4, elem_id="img2img_mask_blur") + mask_alpha = gr.Slider(label="Mask transparency", visible=False, elem_id="img2img_mask_alpha") + + with FormRow(): + inpainting_mask_invert = gr.Radio(label='Mask mode', choices=['Inpaint masked', 'Inpaint not masked'], value='Inpaint masked', type="index", elem_id="img2img_mask_mode") + + with FormRow(): + inpainting_fill = gr.Radio(label='Masked content', choices=['fill', 'original', 'latent noise', 'latent nothing'], value='original', type="index", elem_id="img2img_inpainting_fill") + + with FormRow(): + with gr.Column(): + inpaint_full_res = gr.Radio(label="Inpaint area", choices=["Whole picture", "Only masked"], type="index", value="Whole picture", elem_id="img2img_inpaint_full_res") + + with gr.Column(scale=4): + inpaint_full_res_padding = gr.Slider(label='Only masked padding, pixels', minimum=0, maximum=256, step=4, value=32, elem_id="img2img_inpaint_full_res_padding") + + def select_img2img_tab(tab): + return gr.update(visible=tab in [2, 3, 4]), gr.update(visible=tab == 3), + + for i, elem in enumerate(img2img_tabs): + elem.select( + fn=lambda tab=i: select_img2img_tab(tab), + inputs=[], + outputs=[inpaint_controls, mask_alpha], + ) + else: + modules.scripts.scripts_img2img.setup_ui_for_section(category) + + img2img_gallery, generation_info, html_info, html_log = create_output_panel("img2img", opts.outdir_img2img_samples) + + connect_reuse_seed(seed, reuse_seed, generation_info, dummy_component, is_subseed=False) + connect_reuse_seed(subseed, reuse_subseed, generation_info, dummy_component, is_subseed=True) + + img2img_prompt_img.change( + fn=modules.images.image_data, + inputs=[ + img2img_prompt_img + ], + outputs=[ + img2img_prompt, + img2img_prompt_img + ], + show_progress=False, + ) + + img2img_args = dict( + fn=wrap_gradio_gpu_call(modules.img2img.img2img, extra_outputs=[None, '', '']), + _js="submit_img2img", + inputs=[ + dummy_component, + dummy_component, + img2img_prompt, + img2img_negative_prompt, + img2img_prompt_styles, + init_img, + sketch, + init_img_with_mask, + inpaint_color_sketch, + inpaint_color_sketch_orig, + init_img_inpaint, + init_mask_inpaint, + steps, + sampler_index, + mask_blur, + mask_alpha, + inpainting_fill, + restore_faces, + tiling, + batch_count, + batch_size, + cfg_scale, + image_cfg_scale, + denoising_strength, + seed, + subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox, + selected_scale_tab, + height, + width, + scale_by, + resize_mode, + inpaint_full_res, + inpaint_full_res_padding, + inpainting_mask_invert, + img2img_batch_input_dir, + img2img_batch_output_dir, + img2img_batch_inpaint_mask_dir, + override_settings, + img2img_batch_use_png_info, + img2img_batch_png_info_props, + img2img_batch_png_info_dir, + ] + custom_inputs, + outputs=[ + img2img_gallery, + generation_info, + html_info, + html_log, + ], + show_progress=False, + ) + + interrogate_args = dict( + _js="get_img2img_tab_index", + inputs=[ + dummy_component, + img2img_batch_input_dir, + img2img_batch_output_dir, + init_img, + sketch, + init_img_with_mask, + inpaint_color_sketch, + init_img_inpaint, + ], + outputs=[img2img_prompt, dummy_component], + ) + + img2img_prompt.submit(**img2img_args) + submit.click(**img2img_args) + + res_switch_btn.click(fn=None, _js="function(){switchWidthHeight('img2img')}", inputs=None, outputs=None, show_progress=False) + + detect_image_size_btn.click( + fn=lambda w, h, _: (w or gr.update(), h or gr.update()), + _js="currentImg2imgSourceResolution", + inputs=[dummy_component, dummy_component, dummy_component], + outputs=[width, height], + show_progress=False, + ) + + restore_progress_button.click( + fn=progress.restore_progress, + _js="restoreProgressImg2img", + inputs=[dummy_component], + outputs=[ + img2img_gallery, + generation_info, + html_info, + html_log, + ], + show_progress=False, + ) + + img2img_interrogate.click( + fn=lambda *args: process_interrogate(interrogate, *args), + **interrogate_args, + ) + + img2img_deepbooru.click( + fn=lambda *args: process_interrogate(interrogate_deepbooru, *args), + **interrogate_args, + ) + + prompts = [(txt2img_prompt, txt2img_negative_prompt), (img2img_prompt, img2img_negative_prompt)] + style_dropdowns = [txt2img_prompt_styles, img2img_prompt_styles] + style_js_funcs = ["update_txt2img_tokens", "update_img2img_tokens"] + + for button, (prompt, negative_prompt) in zip([txt2img_save_style, img2img_save_style], prompts): + button.click( + fn=add_style, + _js="ask_for_style_name", + # Have to pass empty dummy component here, because the JavaScript and Python function have to accept + # the same number of parameters, but we only know the style-name after the JavaScript prompt + inputs=[dummy_component, prompt, negative_prompt], + outputs=[txt2img_prompt_styles, img2img_prompt_styles], + ) + + for button, (prompt, negative_prompt), styles, js_func in zip([txt2img_prompt_style_apply, img2img_prompt_style_apply], prompts, style_dropdowns, style_js_funcs): + button.click( + fn=apply_styles, + _js=js_func, + inputs=[prompt, negative_prompt, styles], + outputs=[prompt, negative_prompt, styles], + ) + + token_button.click(fn=update_token_counter, inputs=[img2img_prompt, steps], outputs=[token_counter]) + negative_token_button.click(fn=wrap_queued_call(update_token_counter), inputs=[img2img_negative_prompt, steps], outputs=[negative_token_counter]) + + ui_extra_networks.setup_ui(extra_networks_ui_img2img, img2img_gallery) + + img2img_paste_fields = [ + (img2img_prompt, "Prompt"), + (img2img_negative_prompt, "Negative prompt"), + (steps, "Steps"), + (sampler_index, "Sampler"), + (restore_faces, "Face restoration"), + (cfg_scale, "CFG scale"), + (image_cfg_scale, "Image CFG scale"), + (seed, "Seed"), + (width, "Size-1"), + (height, "Size-2"), + (batch_size, "Batch size"), + (subseed, "Variation seed"), + (subseed_strength, "Variation seed strength"), + (seed_resize_from_w, "Seed resize from-1"), + (seed_resize_from_h, "Seed resize from-2"), + (img2img_prompt_styles, lambda d: d["Styles array"] if isinstance(d.get("Styles array"), list) else gr.update()), + (denoising_strength, "Denoising strength"), + (mask_blur, "Mask blur"), + *modules.scripts.scripts_img2img.infotext_fields + ] + parameters_copypaste.add_paste_fields("img2img", init_img, img2img_paste_fields, override_settings) + parameters_copypaste.add_paste_fields("inpaint", init_img_with_mask, img2img_paste_fields, override_settings) + parameters_copypaste.register_paste_params_button(parameters_copypaste.ParamBinding( + paste_button=img2img_paste, tabname="img2img", source_text_component=img2img_prompt, source_image_component=None, + )) + + modules.scripts.scripts_current = None + + with gr.Blocks(analytics_enabled=False) as extras_interface: + ui_postprocessing.create_ui() + + with gr.Blocks(analytics_enabled=False) as pnginfo_interface: + with gr.Row().style(equal_height=False): + with gr.Column(variant='panel'): + image = gr.Image(elem_id="pnginfo_image", label="Source", source="upload", interactive=True, type="pil") + + with gr.Column(variant='panel'): + html = gr.HTML() + generation_info = gr.Textbox(visible=False, elem_id="pnginfo_generation_info") + html2 = gr.HTML() + with gr.Row(): + buttons = parameters_copypaste.create_buttons(["txt2img", "img2img", "inpaint", "extras"]) + + for tabname, button in buttons.items(): + parameters_copypaste.register_paste_params_button(parameters_copypaste.ParamBinding( + paste_button=button, tabname=tabname, source_text_component=generation_info, source_image_component=image, + )) + + image.change( + fn=wrap_gradio_call(modules.extras.run_pnginfo), + inputs=[image], + outputs=[html, generation_info, html2], + ) + + def update_interp_description(value): + interp_description_css = "

{}

" + interp_descriptions = { + "No interpolation": interp_description_css.format("No interpolation will be used. Requires one model; A. Allows for format conversion and VAE baking."), + "Weighted sum": interp_description_css.format("A weighted sum will be used for interpolation. Requires two models; A and B. The result is calculated as A * (1 - M) + B * M"), + "Add difference": interp_description_css.format("The difference between the last two models will be added to the first. Requires three models; A, B and C. The result is calculated as A + (B - C) * M") + } + return interp_descriptions[value] + + with gr.Blocks(analytics_enabled=False) as modelmerger_interface: + with gr.Row().style(equal_height=False): + with gr.Column(variant='compact'): + interp_description = gr.HTML(value=update_interp_description("Weighted sum"), elem_id="modelmerger_interp_description") + + with FormRow(elem_id="modelmerger_models"): + primary_model_name = gr.Dropdown(modules.sd_models.checkpoint_tiles(), elem_id="modelmerger_primary_model_name", label="Primary model (A)") + create_refresh_button(primary_model_name, modules.sd_models.list_models, lambda: {"choices": modules.sd_models.checkpoint_tiles()}, "refresh_checkpoint_A") + + secondary_model_name = gr.Dropdown(modules.sd_models.checkpoint_tiles(), elem_id="modelmerger_secondary_model_name", label="Secondary model (B)") + create_refresh_button(secondary_model_name, modules.sd_models.list_models, lambda: {"choices": modules.sd_models.checkpoint_tiles()}, "refresh_checkpoint_B") + + tertiary_model_name = gr.Dropdown(modules.sd_models.checkpoint_tiles(), elem_id="modelmerger_tertiary_model_name", label="Tertiary model (C)") + create_refresh_button(tertiary_model_name, modules.sd_models.list_models, lambda: {"choices": modules.sd_models.checkpoint_tiles()}, "refresh_checkpoint_C") + + custom_name = gr.Textbox(label="Custom Name (Optional)", elem_id="modelmerger_custom_name") + interp_amount = gr.Slider(minimum=0.0, maximum=1.0, step=0.05, label='Multiplier (M) - set to 0 to get model A', value=0.3, elem_id="modelmerger_interp_amount") + interp_method = gr.Radio(choices=["No interpolation", "Weighted sum", "Add difference"], value="Weighted sum", label="Interpolation Method", elem_id="modelmerger_interp_method") + interp_method.change(fn=update_interp_description, inputs=[interp_method], outputs=[interp_description]) + + with FormRow(): + checkpoint_format = gr.Radio(choices=["ckpt", "safetensors"], value="safetensors", label="Checkpoint format", elem_id="modelmerger_checkpoint_format") + save_as_half = gr.Checkbox(value=False, label="Save as float16", elem_id="modelmerger_save_as_half") + save_metadata = gr.Checkbox(value=True, label="Save metadata (.safetensors only)", elem_id="modelmerger_save_metadata") + + with FormRow(): + with gr.Column(): + config_source = gr.Radio(choices=["A, B or C", "B", "C", "Don't"], value="A, B or C", label="Copy config from", type="index", elem_id="modelmerger_config_method") + + with gr.Column(): + with FormRow(): + bake_in_vae = gr.Dropdown(choices=["None"] + list(sd_vae.vae_dict), value="None", label="Bake in VAE", elem_id="modelmerger_bake_in_vae") + create_refresh_button(bake_in_vae, sd_vae.refresh_vae_list, lambda: {"choices": ["None"] + list(sd_vae.vae_dict)}, "modelmerger_refresh_bake_in_vae") + + with FormRow(): + discard_weights = gr.Textbox(value="", label="Discard weights with matching name", elem_id="modelmerger_discard_weights") + + with gr.Row(): + modelmerger_merge = gr.Button(elem_id="modelmerger_merge", value="Merge", variant='primary') + + with gr.Column(variant='compact', elem_id="modelmerger_results_container"): + with gr.Group(elem_id="modelmerger_results_panel"): + modelmerger_result = gr.HTML(elem_id="modelmerger_result", show_label=False) + + with gr.Blocks(analytics_enabled=False) as train_interface: + with gr.Row().style(equal_height=False): + gr.HTML(value="

See wiki for detailed explanation.

") + + with gr.Row(variant="compact").style(equal_height=False): + with gr.Tabs(elem_id="train_tabs"): + + with gr.Tab(label="Create embedding", id="create_embedding"): + new_embedding_name = gr.Textbox(label="Name", elem_id="train_new_embedding_name") + initialization_text = gr.Textbox(label="Initialization text", value="*", elem_id="train_initialization_text") + nvpt = gr.Slider(label="Number of vectors per token", minimum=1, maximum=75, step=1, value=1, elem_id="train_nvpt") + overwrite_old_embedding = gr.Checkbox(value=False, label="Overwrite Old Embedding", elem_id="train_overwrite_old_embedding") + + with gr.Row(): + with gr.Column(scale=3): + gr.HTML(value="") + + with gr.Column(): + create_embedding = gr.Button(value="Create embedding", variant='primary', elem_id="train_create_embedding") + + with gr.Tab(label="Create hypernetwork", id="create_hypernetwork"): + new_hypernetwork_name = gr.Textbox(label="Name", elem_id="train_new_hypernetwork_name") + new_hypernetwork_sizes = gr.CheckboxGroup(label="Modules", value=["768", "320", "640", "1280"], choices=["768", "1024", "320", "640", "1280"], elem_id="train_new_hypernetwork_sizes") + new_hypernetwork_layer_structure = gr.Textbox("1, 2, 1", label="Enter hypernetwork layer structure", placeholder="1st and last digit must be 1. ex:'1, 2, 1'", elem_id="train_new_hypernetwork_layer_structure") + new_hypernetwork_activation_func = gr.Dropdown(value="linear", label="Select activation function of hypernetwork. Recommended : Swish / Linear(none)", choices=modules.hypernetworks.ui.keys, elem_id="train_new_hypernetwork_activation_func") + new_hypernetwork_initialization_option = gr.Dropdown(value = "Normal", label="Select Layer weights initialization. Recommended: Kaiming for relu-like, Xavier for sigmoid-like, Normal otherwise", choices=["Normal", "KaimingUniform", "KaimingNormal", "XavierUniform", "XavierNormal"], elem_id="train_new_hypernetwork_initialization_option") + new_hypernetwork_add_layer_norm = gr.Checkbox(label="Add layer normalization", elem_id="train_new_hypernetwork_add_layer_norm") + new_hypernetwork_use_dropout = gr.Checkbox(label="Use dropout", elem_id="train_new_hypernetwork_use_dropout") + new_hypernetwork_dropout_structure = gr.Textbox("0, 0, 0", label="Enter hypernetwork Dropout structure (or empty). Recommended : 0~0.35 incrementing sequence: 0, 0.05, 0.15", placeholder="1st and last digit must be 0 and values should be between 0 and 1. ex:'0, 0.01, 0'") + overwrite_old_hypernetwork = gr.Checkbox(value=False, label="Overwrite Old Hypernetwork", elem_id="train_overwrite_old_hypernetwork") + + with gr.Row(): + with gr.Column(scale=3): + gr.HTML(value="") + + with gr.Column(): + create_hypernetwork = gr.Button(value="Create hypernetwork", variant='primary', elem_id="train_create_hypernetwork") + + with gr.Tab(label="Preprocess images", id="preprocess_images"): + process_src = gr.Textbox(label='Source directory', elem_id="train_process_src") + process_dst = gr.Textbox(label='Destination directory', elem_id="train_process_dst") + process_width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="train_process_width") + process_height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="train_process_height") + preprocess_txt_action = gr.Dropdown(label='Existing Caption txt Action', value="ignore", choices=["ignore", "copy", "prepend", "append"], elem_id="train_preprocess_txt_action") + + with gr.Row(): + process_keep_original_size = gr.Checkbox(label='Keep original size', elem_id="train_process_keep_original_size") + process_flip = gr.Checkbox(label='Create flipped copies', elem_id="train_process_flip") + process_split = gr.Checkbox(label='Split oversized images', elem_id="train_process_split") + process_focal_crop = gr.Checkbox(label='Auto focal point crop', elem_id="train_process_focal_crop") + process_multicrop = gr.Checkbox(label='Auto-sized crop', elem_id="train_process_multicrop") + process_caption = gr.Checkbox(label='Use BLIP for caption', elem_id="train_process_caption") + process_caption_deepbooru = gr.Checkbox(label='Use deepbooru for caption', visible=True, elem_id="train_process_caption_deepbooru") + + with gr.Row(visible=False) as process_split_extra_row: + process_split_threshold = gr.Slider(label='Split image threshold', value=0.5, minimum=0.0, maximum=1.0, step=0.05, elem_id="train_process_split_threshold") + process_overlap_ratio = gr.Slider(label='Split image overlap ratio', value=0.2, minimum=0.0, maximum=0.9, step=0.05, elem_id="train_process_overlap_ratio") + + with gr.Row(visible=False) as process_focal_crop_row: + process_focal_crop_face_weight = gr.Slider(label='Focal point face weight', value=0.9, minimum=0.0, maximum=1.0, step=0.05, elem_id="train_process_focal_crop_face_weight") + process_focal_crop_entropy_weight = gr.Slider(label='Focal point entropy weight', value=0.15, minimum=0.0, maximum=1.0, step=0.05, elem_id="train_process_focal_crop_entropy_weight") + process_focal_crop_edges_weight = gr.Slider(label='Focal point edges weight', value=0.5, minimum=0.0, maximum=1.0, step=0.05, elem_id="train_process_focal_crop_edges_weight") + process_focal_crop_debug = gr.Checkbox(label='Create debug image', elem_id="train_process_focal_crop_debug") + + with gr.Column(visible=False) as process_multicrop_col: + gr.Markdown('Each image is center-cropped with an automatically chosen width and height.') + with gr.Row(): + process_multicrop_mindim = gr.Slider(minimum=64, maximum=2048, step=8, label="Dimension lower bound", value=384, elem_id="train_process_multicrop_mindim") + process_multicrop_maxdim = gr.Slider(minimum=64, maximum=2048, step=8, label="Dimension upper bound", value=768, elem_id="train_process_multicrop_maxdim") + with gr.Row(): + process_multicrop_minarea = gr.Slider(minimum=64*64, maximum=2048*2048, step=1, label="Area lower bound", value=64*64, elem_id="train_process_multicrop_minarea") + process_multicrop_maxarea = gr.Slider(minimum=64*64, maximum=2048*2048, step=1, label="Area upper bound", value=640*640, elem_id="train_process_multicrop_maxarea") + with gr.Row(): + process_multicrop_objective = gr.Radio(["Maximize area", "Minimize error"], value="Maximize area", label="Resizing objective", elem_id="train_process_multicrop_objective") + process_multicrop_threshold = gr.Slider(minimum=0, maximum=1, step=0.01, label="Error threshold", value=0.1, elem_id="train_process_multicrop_threshold") + + with gr.Row(): + with gr.Column(scale=3): + gr.HTML(value="") + + with gr.Column(): + with gr.Row(): + interrupt_preprocessing = gr.Button("Interrupt", elem_id="train_interrupt_preprocessing") + run_preprocess = gr.Button(value="Preprocess", variant='primary', elem_id="train_run_preprocess") + + process_split.change( + fn=lambda show: gr_show(show), + inputs=[process_split], + outputs=[process_split_extra_row], + ) + + process_focal_crop.change( + fn=lambda show: gr_show(show), + inputs=[process_focal_crop], + outputs=[process_focal_crop_row], + ) + + process_multicrop.change( + fn=lambda show: gr_show(show), + inputs=[process_multicrop], + outputs=[process_multicrop_col], + ) + + def get_textual_inversion_template_names(): + return sorted(textual_inversion.textual_inversion_templates) + + with gr.Tab(label="Train", id="train"): + gr.HTML(value="

Train an embedding or Hypernetwork; you must specify a directory with a set of 1:1 ratio images [wiki]

") + with FormRow(): + train_embedding_name = gr.Dropdown(label='Embedding', elem_id="train_embedding", choices=sorted(sd_hijack.model_hijack.embedding_db.word_embeddings.keys())) + create_refresh_button(train_embedding_name, sd_hijack.model_hijack.embedding_db.load_textual_inversion_embeddings, lambda: {"choices": sorted(sd_hijack.model_hijack.embedding_db.word_embeddings.keys())}, "refresh_train_embedding_name") + + train_hypernetwork_name = gr.Dropdown(label='Hypernetwork', elem_id="train_hypernetwork", choices=sorted(shared.hypernetworks)) + create_refresh_button(train_hypernetwork_name, shared.reload_hypernetworks, lambda: {"choices": sorted(shared.hypernetworks)}, "refresh_train_hypernetwork_name") + + with FormRow(): + embedding_learn_rate = gr.Textbox(label='Embedding Learning rate', placeholder="Embedding Learning rate", value="0.005", elem_id="train_embedding_learn_rate") + hypernetwork_learn_rate = gr.Textbox(label='Hypernetwork Learning rate', placeholder="Hypernetwork Learning rate", value="0.00001", elem_id="train_hypernetwork_learn_rate") + + with FormRow(): + clip_grad_mode = gr.Dropdown(value="disabled", label="Gradient Clipping", choices=["disabled", "value", "norm"]) + clip_grad_value = gr.Textbox(placeholder="Gradient clip value", value="0.1", show_label=False) + + with FormRow(): + batch_size = gr.Number(label='Batch size', value=1, precision=0, elem_id="train_batch_size") + gradient_step = gr.Number(label='Gradient accumulation steps', value=1, precision=0, elem_id="train_gradient_step") + + dataset_directory = gr.Textbox(label='Dataset directory', placeholder="Path to directory with input images", elem_id="train_dataset_directory") + log_directory = gr.Textbox(label='Log directory', placeholder="Path to directory where to write outputs", value="textual_inversion", elem_id="train_log_directory") + + with FormRow(): + template_file = gr.Dropdown(label='Prompt template', value="style_filewords.txt", elem_id="train_template_file", choices=get_textual_inversion_template_names()) + create_refresh_button(template_file, textual_inversion.list_textual_inversion_templates, lambda: {"choices": get_textual_inversion_template_names()}, "refrsh_train_template_file") + + training_width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="train_training_width") + training_height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="train_training_height") + varsize = gr.Checkbox(label="Do not resize images", value=False, elem_id="train_varsize") + steps = gr.Number(label='Max steps', value=100000, precision=0, elem_id="train_steps") + + with FormRow(): + create_image_every = gr.Number(label='Save an image to log directory every N steps, 0 to disable', value=500, precision=0, elem_id="train_create_image_every") + save_embedding_every = gr.Number(label='Save a copy of embedding to log directory every N steps, 0 to disable', value=500, precision=0, elem_id="train_save_embedding_every") + + use_weight = gr.Checkbox(label="Use PNG alpha channel as loss weight", value=False, elem_id="use_weight") + + save_image_with_stored_embedding = gr.Checkbox(label='Save images with embedding in PNG chunks', value=True, elem_id="train_save_image_with_stored_embedding") + preview_from_txt2img = gr.Checkbox(label='Read parameters (prompt, etc...) from txt2img tab when making previews', value=False, elem_id="train_preview_from_txt2img") + + shuffle_tags = gr.Checkbox(label="Shuffle tags by ',' when creating prompts.", value=False, elem_id="train_shuffle_tags") + tag_drop_out = gr.Slider(minimum=0, maximum=1, step=0.1, label="Drop out tags when creating prompts.", value=0, elem_id="train_tag_drop_out") + + latent_sampling_method = gr.Radio(label='Choose latent sampling method', value="once", choices=['once', 'deterministic', 'random'], elem_id="train_latent_sampling_method") + + with gr.Row(): + train_embedding = gr.Button(value="Train Embedding", variant='primary', elem_id="train_train_embedding") + interrupt_training = gr.Button(value="Interrupt", elem_id="train_interrupt_training") + train_hypernetwork = gr.Button(value="Train Hypernetwork", variant='primary', elem_id="train_train_hypernetwork") + + params = script_callbacks.UiTrainTabParams(txt2img_preview_params) + + script_callbacks.ui_train_tabs_callback(params) + + with gr.Column(elem_id='ti_gallery_container'): + ti_output = gr.Text(elem_id="ti_output", value="", show_label=False) + gr.Gallery(label='Output', show_label=False, elem_id='ti_gallery').style(columns=4) + gr.HTML(elem_id="ti_progress", value="") + ti_outcome = gr.HTML(elem_id="ti_error", value="") + + create_embedding.click( + fn=modules.textual_inversion.ui.create_embedding, + inputs=[ + new_embedding_name, + initialization_text, + nvpt, + overwrite_old_embedding, + ], + outputs=[ + train_embedding_name, + ti_output, + ti_outcome, + ] + ) + + create_hypernetwork.click( + fn=modules.hypernetworks.ui.create_hypernetwork, + inputs=[ + new_hypernetwork_name, + new_hypernetwork_sizes, + overwrite_old_hypernetwork, + new_hypernetwork_layer_structure, + new_hypernetwork_activation_func, + new_hypernetwork_initialization_option, + new_hypernetwork_add_layer_norm, + new_hypernetwork_use_dropout, + new_hypernetwork_dropout_structure + ], + outputs=[ + train_hypernetwork_name, + ti_output, + ti_outcome, + ] + ) + + run_preprocess.click( + fn=wrap_gradio_gpu_call(modules.textual_inversion.ui.preprocess, extra_outputs=[gr.update()]), + _js="start_training_textual_inversion", + inputs=[ + dummy_component, + process_src, + process_dst, + process_width, + process_height, + preprocess_txt_action, + process_keep_original_size, + process_flip, + process_split, + process_caption, + process_caption_deepbooru, + process_split_threshold, + process_overlap_ratio, + process_focal_crop, + process_focal_crop_face_weight, + process_focal_crop_entropy_weight, + process_focal_crop_edges_weight, + process_focal_crop_debug, + process_multicrop, + process_multicrop_mindim, + process_multicrop_maxdim, + process_multicrop_minarea, + process_multicrop_maxarea, + process_multicrop_objective, + process_multicrop_threshold, + ], + outputs=[ + ti_output, + ti_outcome, + ], + ) + + train_embedding.click( + fn=wrap_gradio_gpu_call(modules.textual_inversion.ui.train_embedding, extra_outputs=[gr.update()]), + _js="start_training_textual_inversion", + inputs=[ + dummy_component, + train_embedding_name, + embedding_learn_rate, + batch_size, + gradient_step, + dataset_directory, + log_directory, + training_width, + training_height, + varsize, + steps, + clip_grad_mode, + clip_grad_value, + shuffle_tags, + tag_drop_out, + latent_sampling_method, + use_weight, + create_image_every, + save_embedding_every, + template_file, + save_image_with_stored_embedding, + preview_from_txt2img, + *txt2img_preview_params, + ], + outputs=[ + ti_output, + ti_outcome, + ] + ) + + train_hypernetwork.click( + fn=wrap_gradio_gpu_call(modules.hypernetworks.ui.train_hypernetwork, extra_outputs=[gr.update()]), + _js="start_training_textual_inversion", + inputs=[ + dummy_component, + train_hypernetwork_name, + hypernetwork_learn_rate, + batch_size, + gradient_step, + dataset_directory, + log_directory, + training_width, + training_height, + varsize, + steps, + clip_grad_mode, + clip_grad_value, + shuffle_tags, + tag_drop_out, + latent_sampling_method, + use_weight, + create_image_every, + save_embedding_every, + template_file, + preview_from_txt2img, + *txt2img_preview_params, + ], + outputs=[ + ti_output, + ti_outcome, + ] + ) + + interrupt_training.click( + fn=lambda: shared.state.interrupt(), + inputs=[], + outputs=[], + ) + + interrupt_preprocessing.click( + fn=lambda: shared.state.interrupt(), + inputs=[], + outputs=[], + ) + + loadsave = ui_loadsave.UiLoadsave(cmd_opts.ui_config_file) + + settings = ui_settings.UiSettings() + settings.create_ui(loadsave, dummy_component) + + interfaces = [ + (txt2img_interface, "txt2img", "txt2img"), + (img2img_interface, "img2img", "img2img"), + (extras_interface, "Extras", "extras"), + (pnginfo_interface, "PNG Info", "pnginfo"), + (modelmerger_interface, "Checkpoint Merger", "modelmerger"), + (train_interface, "Train", "train"), + ] + + interfaces += script_callbacks.ui_tabs_callback() + interfaces += [(settings.interface, "Settings", "settings")] + + extensions_interface = ui_extensions.create_ui() + interfaces += [(extensions_interface, "Extensions", "extensions")] + + shared.tab_names = [] + for _interface, label, _ifid in interfaces: + shared.tab_names.append(label) + + with gr.Blocks(theme=shared.gradio_theme, analytics_enabled=False, title="Stable Diffusion") as demo: + settings.add_quicksettings() + + parameters_copypaste.connect_paste_params_buttons() + + with gr.Tabs(elem_id="tabs") as tabs: + tab_order = {k: i for i, k in enumerate(opts.ui_tab_order)} + sorted_interfaces = sorted(interfaces, key=lambda x: tab_order.get(x[1], 9999)) + + for interface, label, ifid in sorted_interfaces: + if label in shared.opts.hidden_tabs: + continue + with gr.TabItem(label, id=ifid, elem_id=f"tab_{ifid}"): + interface.render() + + for interface, _label, ifid in interfaces: + if ifid in ["extensions", "settings"]: + continue + + loadsave.add_block(interface, ifid) + + loadsave.add_component(f"webui/Tabs@{tabs.elem_id}", tabs) + + loadsave.setup_ui() + + if os.path.exists(os.path.join(script_path, "notification.mp3")): + gr.Audio(interactive=False, value=os.path.join(script_path, "notification.mp3"), elem_id="audio_notification", visible=False) + + footer = shared.html("footer.html") + footer = footer.format(versions=versions_html(), api_docs="/docs" if shared.cmd_opts.api else "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/API") + gr.HTML(footer, elem_id="footer") + + settings.add_functionality(demo) + + update_image_cfg_scale_visibility = lambda: gr.update(visible=shared.sd_model and shared.sd_model.cond_stage_key == "edit") + settings.text_settings.change(fn=update_image_cfg_scale_visibility, inputs=[], outputs=[image_cfg_scale]) + demo.load(fn=update_image_cfg_scale_visibility, inputs=[], outputs=[image_cfg_scale]) + + def modelmerger(*args): + try: + results = modules.extras.run_modelmerger(*args) + except Exception as e: + errors.report("Error loading/saving model file", exc_info=True) + modules.sd_models.list_models() # to remove the potentially missing models from the list + return [*[gr.Dropdown.update(choices=modules.sd_models.checkpoint_tiles()) for _ in range(4)], f"Error merging checkpoints: {e}"] + return results + + modelmerger_merge.click(fn=lambda: '', inputs=[], outputs=[modelmerger_result]) + modelmerger_merge.click( + fn=wrap_gradio_gpu_call(modelmerger, extra_outputs=lambda: [gr.update() for _ in range(4)]), + _js='modelmerger', + inputs=[ + dummy_component, + primary_model_name, + secondary_model_name, + tertiary_model_name, + interp_method, + interp_amount, + save_as_half, + custom_name, + checkpoint_format, + config_source, + bake_in_vae, + discard_weights, + save_metadata, + ], + outputs=[ + primary_model_name, + secondary_model_name, + tertiary_model_name, + settings.component_dict['sd_model_checkpoint'], + modelmerger_result, + ] + ) + + loadsave.dump_defaults() + demo.ui_loadsave = loadsave + + # Required as a workaround for change() event not triggering when loading values from ui-config.json + interp_description.value = update_interp_description(interp_method.value) + + return demo + + +def versions_html(): + import torch + import launch + + python_version = ".".join([str(x) for x in sys.version_info[0:3]]) + commit = launch.commit_hash() + tag = launch.git_tag() + + if shared.xformers_available: + import xformers + xformers_version = xformers.__version__ + else: + xformers_version = "N/A" + + return f""" +version: {tag} + •  +python: {python_version} + •  +torch: {getattr(torch, '__long_version__',torch.__version__)} + •  +xformers: {xformers_version} + •  +gradio: {gr.__version__} + •  +checkpoint: N/A +""" + + +def setup_ui_api(app): + from pydantic import BaseModel, Field + from typing import List + + class QuicksettingsHint(BaseModel): + name: str = Field(title="Name of the quicksettings field") + label: str = Field(title="Label of the quicksettings field") + + def quicksettings_hint(): + return [QuicksettingsHint(name=k, label=v.label) for k, v in opts.data_labels.items()] + + app.add_api_route("/internal/quicksettings-hint", quicksettings_hint, methods=["GET"], response_model=List[QuicksettingsHint]) + + app.add_api_route("/internal/ping", lambda: {}, methods=["GET"]) + + app.add_api_route("/internal/profile-startup", lambda: timer.startup_record, methods=["GET"]) + + def download_sysinfo(attachment=False): + from fastapi.responses import PlainTextResponse + + text = sysinfo.get() + filename = f"sysinfo-{datetime.datetime.utcnow().strftime('%Y-%m-%d-%H-%M')}.txt" + + return PlainTextResponse(text, headers={'Content-Disposition': f'{"attachment" if attachment else "inline"}; filename="{filename}"'}) + + app.add_api_route("/internal/sysinfo", download_sysinfo, methods=["GET"]) + app.add_api_route("/internal/sysinfo-download", lambda: download_sysinfo(attachment=True), methods=["GET"]) + -- cgit v1.2.1 From 00429544905aaea124fee29b87e3a3b9469c64b0 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Tue, 1 Aug 2023 07:15:16 +0300 Subject: Split history: mv temp modules/ui.py --- modules/ui.py | 1621 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++ temp | 1621 --------------------------------------------------------- 2 files changed, 1621 insertions(+), 1621 deletions(-) create mode 100644 modules/ui.py delete mode 100644 temp diff --git a/modules/ui.py b/modules/ui.py new file mode 100644 index 00000000..07ecee7b --- /dev/null +++ b/modules/ui.py @@ -0,0 +1,1621 @@ +import datetime +import json +import mimetypes +import os +import sys +from functools import reduce +import warnings + +import gradio as gr +import gradio.utils +import numpy as np +from PIL import Image, PngImagePlugin # noqa: F401 +from modules.call_queue import wrap_gradio_gpu_call, wrap_queued_call, wrap_gradio_call + +from modules import sd_hijack, sd_models, script_callbacks, ui_extensions, deepbooru, sd_vae, extra_networks, ui_common, ui_postprocessing, progress, ui_loadsave, errors, shared_items, ui_settings, timer, sysinfo +from modules.ui_components import FormRow, FormGroup, ToolButton, FormHTML +from modules.paths import script_path +from modules.ui_common import create_refresh_button +from modules.ui_gradio_extensions import reload_javascript + + +from modules.shared import opts, cmd_opts + +import modules.codeformer_model +import modules.generation_parameters_copypaste as parameters_copypaste +import modules.gfpgan_model +import modules.hypernetworks.ui +import modules.scripts +import modules.shared as shared +import modules.styles +import modules.textual_inversion.ui +from modules import prompt_parser +from modules.sd_hijack import model_hijack +from modules.sd_samplers import samplers, samplers_for_img2img +from modules.textual_inversion import textual_inversion +import modules.hypernetworks.ui +from modules.generation_parameters_copypaste import image_from_url_text +import modules.extras + +create_setting_component = ui_settings.create_setting_component + +warnings.filterwarnings("default" if opts.show_warnings else "ignore", category=UserWarning) + +# this is a fix for Windows users. Without it, javascript files will be served with text/html content-type and the browser will not show any UI +mimetypes.init() +mimetypes.add_type('application/javascript', '.js') + +if not cmd_opts.share and not cmd_opts.listen: + # fix gradio phoning home + gradio.utils.version_check = lambda: None + gradio.utils.get_local_ip_address = lambda: '127.0.0.1' + +if cmd_opts.ngrok is not None: + import modules.ngrok as ngrok + print('ngrok authtoken detected, trying to connect...') + ngrok.connect( + cmd_opts.ngrok, + cmd_opts.port if cmd_opts.port is not None else 7860, + cmd_opts.ngrok_options + ) + + +def gr_show(visible=True): + return {"visible": visible, "__type__": "update"} + + +sample_img2img = "assets/stable-samples/img2img/sketch-mountains-input.jpg" +sample_img2img = sample_img2img if os.path.exists(sample_img2img) else None + +# Using constants for these since the variation selector isn't visible. +# Important that they exactly match script.js for tooltip to work. +random_symbol = '\U0001f3b2\ufe0f' # 🎲️ +reuse_symbol = '\u267b\ufe0f' # ♻️ +paste_symbol = '\u2199\ufe0f' # ↙ +refresh_symbol = '\U0001f504' # 🔄 +save_style_symbol = '\U0001f4be' # 💾 +apply_style_symbol = '\U0001f4cb' # 📋 +clear_prompt_symbol = '\U0001f5d1\ufe0f' # 🗑️ +extra_networks_symbol = '\U0001F3B4' # 🎴 +switch_values_symbol = '\U000021C5' # ⇅ +restore_progress_symbol = '\U0001F300' # 🌀 +detect_image_size_symbol = '\U0001F4D0' # 📐 +up_down_symbol = '\u2195\ufe0f' # ↕️ + + +plaintext_to_html = ui_common.plaintext_to_html + + +def send_gradio_gallery_to_image(x): + if len(x) == 0: + return None + return image_from_url_text(x[0]) + + +def add_style(name: str, prompt: str, negative_prompt: str): + if name is None: + return [gr_show() for x in range(4)] + + style = modules.styles.PromptStyle(name, prompt, negative_prompt) + shared.prompt_styles.styles[style.name] = style + # Save all loaded prompt styles: this allows us to update the storage format in the future more easily, because we + # reserialize all styles every time we save them + shared.prompt_styles.save_styles(shared.styles_filename) + + return [gr.Dropdown.update(visible=True, choices=list(shared.prompt_styles.styles)) for _ in range(2)] + + +def calc_resolution_hires(enable, width, height, hr_scale, hr_resize_x, hr_resize_y): + from modules import processing, devices + + if not enable: + return "" + + p = processing.StableDiffusionProcessingTxt2Img(width=width, height=height, enable_hr=True, hr_scale=hr_scale, hr_resize_x=hr_resize_x, hr_resize_y=hr_resize_y) + + with devices.autocast(): + p.init([""], [0], [0]) + + return f"resize: from {p.width}x{p.height} to {p.hr_resize_x or p.hr_upscale_to_x}x{p.hr_resize_y or p.hr_upscale_to_y}" + + +def resize_from_to_html(width, height, scale_by): + target_width = int(width * scale_by) + target_height = int(height * scale_by) + + if not target_width or not target_height: + return "no image selected" + + return f"resize: from {width}x{height} to {target_width}x{target_height}" + + +def apply_styles(prompt, prompt_neg, styles): + prompt = shared.prompt_styles.apply_styles_to_prompt(prompt, styles) + prompt_neg = shared.prompt_styles.apply_negative_styles_to_prompt(prompt_neg, styles) + + return [gr.Textbox.update(value=prompt), gr.Textbox.update(value=prompt_neg), gr.Dropdown.update(value=[])] + + +def process_interrogate(interrogation_function, mode, ii_input_dir, ii_output_dir, *ii_singles): + if mode in {0, 1, 3, 4}: + return [interrogation_function(ii_singles[mode]), None] + elif mode == 2: + return [interrogation_function(ii_singles[mode]["image"]), None] + elif mode == 5: + assert not shared.cmd_opts.hide_ui_dir_config, "Launched with --hide-ui-dir-config, batch img2img disabled" + images = shared.listfiles(ii_input_dir) + print(f"Will process {len(images)} images.") + if ii_output_dir != "": + os.makedirs(ii_output_dir, exist_ok=True) + else: + ii_output_dir = ii_input_dir + + for image in images: + img = Image.open(image) + filename = os.path.basename(image) + left, _ = os.path.splitext(filename) + print(interrogation_function(img), file=open(os.path.join(ii_output_dir, f"{left}.txt"), 'a', encoding='utf-8')) + + return [gr.update(), None] + + +def interrogate(image): + prompt = shared.interrogator.interrogate(image.convert("RGB")) + return gr.update() if prompt is None else prompt + + +def interrogate_deepbooru(image): + prompt = deepbooru.model.tag(image) + return gr.update() if prompt is None else prompt + + +def create_seed_inputs(target_interface): + with FormRow(elem_id=f"{target_interface}_seed_row", variant="compact"): + seed = (gr.Textbox if cmd_opts.use_textbox_seed else gr.Number)(label='Seed', value=-1, elem_id=f"{target_interface}_seed") + seed.style(container=False) + random_seed = ToolButton(random_symbol, elem_id=f"{target_interface}_random_seed", label='Random seed') + reuse_seed = ToolButton(reuse_symbol, elem_id=f"{target_interface}_reuse_seed", label='Reuse seed') + + seed_checkbox = gr.Checkbox(label='Extra', elem_id=f"{target_interface}_subseed_show", value=False) + + # Components to show/hide based on the 'Extra' checkbox + seed_extras = [] + + with FormRow(visible=False, elem_id=f"{target_interface}_subseed_row") as seed_extra_row_1: + seed_extras.append(seed_extra_row_1) + subseed = gr.Number(label='Variation seed', value=-1, elem_id=f"{target_interface}_subseed") + subseed.style(container=False) + random_subseed = ToolButton(random_symbol, elem_id=f"{target_interface}_random_subseed") + reuse_subseed = ToolButton(reuse_symbol, elem_id=f"{target_interface}_reuse_subseed") + subseed_strength = gr.Slider(label='Variation strength', value=0.0, minimum=0, maximum=1, step=0.01, elem_id=f"{target_interface}_subseed_strength") + + with FormRow(visible=False) as seed_extra_row_2: + seed_extras.append(seed_extra_row_2) + seed_resize_from_w = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize seed from width", value=0, elem_id=f"{target_interface}_seed_resize_from_w") + seed_resize_from_h = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize seed from height", value=0, elem_id=f"{target_interface}_seed_resize_from_h") + + random_seed.click(fn=None, _js="function(){setRandomSeed('" + target_interface + "_seed')}", show_progress=False, inputs=[], outputs=[]) + random_subseed.click(fn=None, _js="function(){setRandomSeed('" + target_interface + "_subseed')}", show_progress=False, inputs=[], outputs=[]) + + def change_visibility(show): + return {comp: gr_show(show) for comp in seed_extras} + + seed_checkbox.change(change_visibility, show_progress=False, inputs=[seed_checkbox], outputs=seed_extras) + + return seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox + + + +def connect_clear_prompt(button): + """Given clear button, prompt, and token_counter objects, setup clear prompt button click event""" + button.click( + _js="clear_prompt", + fn=None, + inputs=[], + outputs=[], + ) + + +def connect_reuse_seed(seed: gr.Number, reuse_seed: gr.Button, generation_info: gr.Textbox, dummy_component, is_subseed): + """ Connects a 'reuse (sub)seed' button's click event so that it copies last used + (sub)seed value from generation info the to the seed field. If copying subseed and subseed strength + was 0, i.e. no variation seed was used, it copies the normal seed value instead.""" + def copy_seed(gen_info_string: str, index): + res = -1 + + try: + gen_info = json.loads(gen_info_string) + index -= gen_info.get('index_of_first_image', 0) + + if is_subseed and gen_info.get('subseed_strength', 0) > 0: + all_subseeds = gen_info.get('all_subseeds', [-1]) + res = all_subseeds[index if 0 <= index < len(all_subseeds) else 0] + else: + all_seeds = gen_info.get('all_seeds', [-1]) + res = all_seeds[index if 0 <= index < len(all_seeds) else 0] + + except json.decoder.JSONDecodeError: + if gen_info_string: + errors.report(f"Error parsing JSON generation info: {gen_info_string}") + + return [res, gr_show(False)] + + reuse_seed.click( + fn=copy_seed, + _js="(x, y) => [x, selected_gallery_index()]", + show_progress=False, + inputs=[generation_info, dummy_component], + outputs=[seed, dummy_component] + ) + + +def update_token_counter(text, steps): + try: + text, _ = extra_networks.parse_prompt(text) + + _, prompt_flat_list, _ = prompt_parser.get_multicond_prompt_list([text]) + prompt_schedules = prompt_parser.get_learned_conditioning_prompt_schedules(prompt_flat_list, steps) + + except Exception: + # a parsing error can happen here during typing, and we don't want to bother the user with + # messages related to it in console + prompt_schedules = [[[steps, text]]] + + flat_prompts = reduce(lambda list1, list2: list1+list2, prompt_schedules) + prompts = [prompt_text for step, prompt_text in flat_prompts] + token_count, max_length = max([model_hijack.get_prompt_lengths(prompt) for prompt in prompts], key=lambda args: args[0]) + return f"{token_count}/{max_length}" + + +def create_toprow(is_img2img): + id_part = "img2img" if is_img2img else "txt2img" + + with gr.Row(elem_id=f"{id_part}_toprow", variant="compact"): + with gr.Column(elem_id=f"{id_part}_prompt_container", scale=6): + with gr.Row(): + with gr.Column(scale=80): + with gr.Row(): + prompt = gr.Textbox(label="Prompt", elem_id=f"{id_part}_prompt", show_label=False, lines=3, placeholder="Prompt (press Ctrl+Enter or Alt+Enter to generate)", elem_classes=["prompt"]) + + with gr.Row(): + with gr.Column(scale=80): + with gr.Row(): + negative_prompt = gr.Textbox(label="Negative prompt", elem_id=f"{id_part}_neg_prompt", show_label=False, lines=3, placeholder="Negative prompt (press Ctrl+Enter or Alt+Enter to generate)", elem_classes=["prompt"]) + + button_interrogate = None + button_deepbooru = None + if is_img2img: + with gr.Column(scale=1, elem_classes="interrogate-col"): + button_interrogate = gr.Button('Interrogate\nCLIP', elem_id="interrogate") + button_deepbooru = gr.Button('Interrogate\nDeepBooru', elem_id="deepbooru") + + with gr.Column(scale=1, elem_id=f"{id_part}_actions_column"): + with gr.Row(elem_id=f"{id_part}_generate_box", elem_classes="generate-box"): + interrupt = gr.Button('Interrupt', elem_id=f"{id_part}_interrupt", elem_classes="generate-box-interrupt") + skip = gr.Button('Skip', elem_id=f"{id_part}_skip", elem_classes="generate-box-skip") + submit = gr.Button('Generate', elem_id=f"{id_part}_generate", variant='primary') + + skip.click( + fn=lambda: shared.state.skip(), + inputs=[], + outputs=[], + ) + + interrupt.click( + fn=lambda: shared.state.interrupt(), + inputs=[], + outputs=[], + ) + + with gr.Row(elem_id=f"{id_part}_tools"): + paste = ToolButton(value=paste_symbol, elem_id="paste") + clear_prompt_button = ToolButton(value=clear_prompt_symbol, elem_id=f"{id_part}_clear_prompt") + extra_networks_button = ToolButton(value=extra_networks_symbol, elem_id=f"{id_part}_extra_networks") + prompt_style_apply = ToolButton(value=apply_style_symbol, elem_id=f"{id_part}_style_apply") + save_style = ToolButton(value=save_style_symbol, elem_id=f"{id_part}_style_create") + restore_progress_button = ToolButton(value=restore_progress_symbol, elem_id=f"{id_part}_restore_progress", visible=False) + + token_counter = gr.HTML(value="0/75", elem_id=f"{id_part}_token_counter", elem_classes=["token-counter"]) + token_button = gr.Button(visible=False, elem_id=f"{id_part}_token_button") + negative_token_counter = gr.HTML(value="0/75", elem_id=f"{id_part}_negative_token_counter", elem_classes=["token-counter"]) + negative_token_button = gr.Button(visible=False, elem_id=f"{id_part}_negative_token_button") + + clear_prompt_button.click( + fn=lambda *x: x, + _js="confirm_clear_prompt", + inputs=[prompt, negative_prompt], + outputs=[prompt, negative_prompt], + ) + + with gr.Row(elem_id=f"{id_part}_styles_row"): + prompt_styles = gr.Dropdown(label="Styles", elem_id=f"{id_part}_styles", choices=[k for k, v in shared.prompt_styles.styles.items()], value=[], multiselect=True) + create_refresh_button(prompt_styles, shared.prompt_styles.reload, lambda: {"choices": [k for k, v in shared.prompt_styles.styles.items()]}, f"refresh_{id_part}_styles") + + return prompt, prompt_styles, negative_prompt, submit, button_interrogate, button_deepbooru, prompt_style_apply, save_style, paste, extra_networks_button, token_counter, token_button, negative_token_counter, negative_token_button, restore_progress_button + + +def setup_progressbar(*args, **kwargs): + pass + + +def apply_setting(key, value): + if value is None: + return gr.update() + + if shared.cmd_opts.freeze_settings: + return gr.update() + + # dont allow model to be swapped when model hash exists in prompt + if key == "sd_model_checkpoint" and opts.disable_weights_auto_swap: + return gr.update() + + if key == "sd_model_checkpoint": + ckpt_info = sd_models.get_closet_checkpoint_match(value) + + if ckpt_info is not None: + value = ckpt_info.title + else: + return gr.update() + + comp_args = opts.data_labels[key].component_args + if comp_args and isinstance(comp_args, dict) and comp_args.get('visible') is False: + return + + valtype = type(opts.data_labels[key].default) + oldval = opts.data.get(key, None) + opts.data[key] = valtype(value) if valtype != type(None) else value + if oldval != value and opts.data_labels[key].onchange is not None: + opts.data_labels[key].onchange() + + opts.save(shared.config_filename) + return getattr(opts, key) + + +def create_output_panel(tabname, outdir): + return ui_common.create_output_panel(tabname, outdir) + + +def create_sampler_and_steps_selection(choices, tabname): + if opts.samplers_in_dropdown: + with FormRow(elem_id=f"sampler_selection_{tabname}"): + sampler_index = gr.Dropdown(label='Sampling method', elem_id=f"{tabname}_sampling", choices=[x.name for x in choices], value=choices[0].name, type="index") + steps = gr.Slider(minimum=1, maximum=150, step=1, elem_id=f"{tabname}_steps", label="Sampling steps", value=20) + else: + with FormGroup(elem_id=f"sampler_selection_{tabname}"): + steps = gr.Slider(minimum=1, maximum=150, step=1, elem_id=f"{tabname}_steps", label="Sampling steps", value=20) + sampler_index = gr.Radio(label='Sampling method', elem_id=f"{tabname}_sampling", choices=[x.name for x in choices], value=choices[0].name, type="index") + + return steps, sampler_index + + +def ordered_ui_categories(): + user_order = {x.strip(): i * 2 + 1 for i, x in enumerate(shared.opts.ui_reorder_list)} + + for _, category in sorted(enumerate(shared_items.ui_reorder_categories()), key=lambda x: user_order.get(x[1], x[0] * 2 + 0)): + yield category + + +def create_override_settings_dropdown(tabname, row): + dropdown = gr.Dropdown([], label="Override settings", visible=False, elem_id=f"{tabname}_override_settings", multiselect=True) + + dropdown.change( + fn=lambda x: gr.Dropdown.update(visible=bool(x)), + inputs=[dropdown], + outputs=[dropdown], + ) + + return dropdown + + +def create_ui(): + import modules.img2img + import modules.txt2img + + reload_javascript() + + parameters_copypaste.reset() + + modules.scripts.scripts_current = modules.scripts.scripts_txt2img + modules.scripts.scripts_txt2img.initialize_scripts(is_img2img=False) + + with gr.Blocks(analytics_enabled=False) as txt2img_interface: + txt2img_prompt, txt2img_prompt_styles, txt2img_negative_prompt, submit, _, _, txt2img_prompt_style_apply, txt2img_save_style, txt2img_paste, extra_networks_button, token_counter, token_button, negative_token_counter, negative_token_button, restore_progress_button = create_toprow(is_img2img=False) + + dummy_component = gr.Label(visible=False) + txt_prompt_img = gr.File(label="", elem_id="txt2img_prompt_image", file_count="single", type="binary", visible=False) + + with FormRow(variant='compact', elem_id="txt2img_extra_networks", visible=False) as extra_networks: + from modules import ui_extra_networks + extra_networks_ui = ui_extra_networks.create_ui(extra_networks, extra_networks_button, 'txt2img') + + with gr.Row().style(equal_height=False): + with gr.Column(variant='compact', elem_id="txt2img_settings"): + modules.scripts.scripts_txt2img.prepare_ui() + + for category in ordered_ui_categories(): + if category == "sampler": + steps, sampler_index = create_sampler_and_steps_selection(samplers, "txt2img") + + elif category == "dimensions": + with FormRow(): + with gr.Column(elem_id="txt2img_column_size", scale=4): + width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="txt2img_width") + height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="txt2img_height") + + with gr.Column(elem_id="txt2img_dimensions_row", scale=1, elem_classes="dimensions-tools"): + res_switch_btn = ToolButton(value=switch_values_symbol, elem_id="txt2img_res_switch_btn", label="Switch dims") + + if opts.dimensions_and_batch_together: + with gr.Column(elem_id="txt2img_column_batch"): + batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="txt2img_batch_count") + batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="txt2img_batch_size") + + elif category == "cfg": + cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0, elem_id="txt2img_cfg_scale") + + elif category == "seed": + seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox = create_seed_inputs('txt2img') + + elif category == "checkboxes": + with FormRow(elem_classes="checkboxes-row", variant="compact"): + restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1, elem_id="txt2img_restore_faces") + tiling = gr.Checkbox(label='Tiling', value=False, elem_id="txt2img_tiling") + enable_hr = gr.Checkbox(label='Hires. fix', value=False, elem_id="txt2img_enable_hr") + hr_final_resolution = FormHTML(value="", elem_id="txtimg_hr_finalres", label="Upscaled resolution", interactive=False) + + elif category == "hires_fix": + with FormGroup(visible=False, elem_id="txt2img_hires_fix") as hr_options: + with FormRow(elem_id="txt2img_hires_fix_row1", variant="compact"): + hr_upscaler = gr.Dropdown(label="Upscaler", elem_id="txt2img_hr_upscaler", choices=[*shared.latent_upscale_modes, *[x.name for x in shared.sd_upscalers]], value=shared.latent_upscale_default_mode) + hr_second_pass_steps = gr.Slider(minimum=0, maximum=150, step=1, label='Hires steps', value=0, elem_id="txt2img_hires_steps") + denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.7, elem_id="txt2img_denoising_strength") + + with FormRow(elem_id="txt2img_hires_fix_row2", variant="compact"): + hr_scale = gr.Slider(minimum=1.0, maximum=4.0, step=0.05, label="Upscale by", value=2.0, elem_id="txt2img_hr_scale") + hr_resize_x = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize width to", value=0, elem_id="txt2img_hr_resize_x") + hr_resize_y = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize height to", value=0, elem_id="txt2img_hr_resize_y") + + with FormRow(elem_id="txt2img_hires_fix_row3", variant="compact", visible=opts.hires_fix_show_sampler) as hr_sampler_container: + hr_sampler_index = gr.Dropdown(label='Hires sampling method', elem_id="hr_sampler", choices=["Use same sampler"] + [x.name for x in samplers_for_img2img], value="Use same sampler", type="index") + + with FormRow(elem_id="txt2img_hires_fix_row4", variant="compact", visible=opts.hires_fix_show_prompts) as hr_prompts_container: + with gr.Column(scale=80): + with gr.Row(): + hr_prompt = gr.Textbox(label="Hires prompt", elem_id="hires_prompt", show_label=False, lines=3, placeholder="Prompt for hires fix pass.\nLeave empty to use the same prompt as in first pass.", elem_classes=["prompt"]) + with gr.Column(scale=80): + with gr.Row(): + hr_negative_prompt = gr.Textbox(label="Hires negative prompt", elem_id="hires_neg_prompt", show_label=False, lines=3, placeholder="Negative prompt for hires fix pass.\nLeave empty to use the same negative prompt as in first pass.", elem_classes=["prompt"]) + + elif category == "batch": + if not opts.dimensions_and_batch_together: + with FormRow(elem_id="txt2img_column_batch"): + batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="txt2img_batch_count") + batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="txt2img_batch_size") + + elif category == "override_settings": + with FormRow(elem_id="txt2img_override_settings_row") as row: + override_settings = create_override_settings_dropdown('txt2img', row) + + elif category == "scripts": + with FormGroup(elem_id="txt2img_script_container"): + custom_inputs = modules.scripts.scripts_txt2img.setup_ui() + + else: + modules.scripts.scripts_txt2img.setup_ui_for_section(category) + + hr_resolution_preview_inputs = [enable_hr, width, height, hr_scale, hr_resize_x, hr_resize_y] + + for component in hr_resolution_preview_inputs: + event = component.release if isinstance(component, gr.Slider) else component.change + + event( + fn=calc_resolution_hires, + inputs=hr_resolution_preview_inputs, + outputs=[hr_final_resolution], + show_progress=False, + ) + event( + None, + _js="onCalcResolutionHires", + inputs=hr_resolution_preview_inputs, + outputs=[], + show_progress=False, + ) + + txt2img_gallery, generation_info, html_info, html_log = create_output_panel("txt2img", opts.outdir_txt2img_samples) + + connect_reuse_seed(seed, reuse_seed, generation_info, dummy_component, is_subseed=False) + connect_reuse_seed(subseed, reuse_subseed, generation_info, dummy_component, is_subseed=True) + + txt2img_args = dict( + fn=wrap_gradio_gpu_call(modules.txt2img.txt2img, extra_outputs=[None, '', '']), + _js="submit", + inputs=[ + dummy_component, + txt2img_prompt, + txt2img_negative_prompt, + txt2img_prompt_styles, + steps, + sampler_index, + restore_faces, + tiling, + batch_count, + batch_size, + cfg_scale, + seed, + subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox, + height, + width, + enable_hr, + denoising_strength, + hr_scale, + hr_upscaler, + hr_second_pass_steps, + hr_resize_x, + hr_resize_y, + hr_sampler_index, + hr_prompt, + hr_negative_prompt, + override_settings, + + ] + custom_inputs, + + outputs=[ + txt2img_gallery, + generation_info, + html_info, + html_log, + ], + show_progress=False, + ) + + txt2img_prompt.submit(**txt2img_args) + submit.click(**txt2img_args) + + res_switch_btn.click(fn=None, _js="function(){switchWidthHeight('txt2img')}", inputs=None, outputs=None, show_progress=False) + + restore_progress_button.click( + fn=progress.restore_progress, + _js="restoreProgressTxt2img", + inputs=[dummy_component], + outputs=[ + txt2img_gallery, + generation_info, + html_info, + html_log, + ], + show_progress=False, + ) + + txt_prompt_img.change( + fn=modules.images.image_data, + inputs=[ + txt_prompt_img + ], + outputs=[ + txt2img_prompt, + txt_prompt_img + ], + show_progress=False, + ) + + enable_hr.change( + fn=lambda x: gr_show(x), + inputs=[enable_hr], + outputs=[hr_options], + show_progress = False, + ) + + txt2img_paste_fields = [ + (txt2img_prompt, "Prompt"), + (txt2img_negative_prompt, "Negative prompt"), + (steps, "Steps"), + (sampler_index, "Sampler"), + (restore_faces, "Face restoration"), + (cfg_scale, "CFG scale"), + (seed, "Seed"), + (width, "Size-1"), + (height, "Size-2"), + (batch_size, "Batch size"), + (subseed, "Variation seed"), + (subseed_strength, "Variation seed strength"), + (seed_resize_from_w, "Seed resize from-1"), + (seed_resize_from_h, "Seed resize from-2"), + (txt2img_prompt_styles, lambda d: d["Styles array"] if isinstance(d.get("Styles array"), list) else gr.update()), + (denoising_strength, "Denoising strength"), + (enable_hr, lambda d: "Denoising strength" in d), + (hr_options, lambda d: gr.Row.update(visible="Denoising strength" in d)), + (hr_scale, "Hires upscale"), + (hr_upscaler, "Hires upscaler"), + (hr_second_pass_steps, "Hires steps"), + (hr_resize_x, "Hires resize-1"), + (hr_resize_y, "Hires resize-2"), + (hr_sampler_index, "Hires sampler"), + (hr_sampler_container, lambda d: gr.update(visible=True) if d.get("Hires sampler", "Use same sampler") != "Use same sampler" else gr.update()), + (hr_prompt, "Hires prompt"), + (hr_negative_prompt, "Hires negative prompt"), + (hr_prompts_container, lambda d: gr.update(visible=True) if d.get("Hires prompt", "") != "" or d.get("Hires negative prompt", "") != "" else gr.update()), + *modules.scripts.scripts_txt2img.infotext_fields + ] + parameters_copypaste.add_paste_fields("txt2img", None, txt2img_paste_fields, override_settings) + parameters_copypaste.register_paste_params_button(parameters_copypaste.ParamBinding( + paste_button=txt2img_paste, tabname="txt2img", source_text_component=txt2img_prompt, source_image_component=None, + )) + + txt2img_preview_params = [ + txt2img_prompt, + txt2img_negative_prompt, + steps, + sampler_index, + cfg_scale, + seed, + width, + height, + ] + + token_button.click(fn=wrap_queued_call(update_token_counter), inputs=[txt2img_prompt, steps], outputs=[token_counter]) + negative_token_button.click(fn=wrap_queued_call(update_token_counter), inputs=[txt2img_negative_prompt, steps], outputs=[negative_token_counter]) + + ui_extra_networks.setup_ui(extra_networks_ui, txt2img_gallery) + + modules.scripts.scripts_current = modules.scripts.scripts_img2img + modules.scripts.scripts_img2img.initialize_scripts(is_img2img=True) + + with gr.Blocks(analytics_enabled=False) as img2img_interface: + img2img_prompt, img2img_prompt_styles, img2img_negative_prompt, submit, img2img_interrogate, img2img_deepbooru, img2img_prompt_style_apply, img2img_save_style, img2img_paste, extra_networks_button, token_counter, token_button, negative_token_counter, negative_token_button, restore_progress_button = create_toprow(is_img2img=True) + + img2img_prompt_img = gr.File(label="", elem_id="img2img_prompt_image", file_count="single", type="binary", visible=False) + + with FormRow(variant='compact', elem_id="img2img_extra_networks", visible=False) as extra_networks: + from modules import ui_extra_networks + extra_networks_ui_img2img = ui_extra_networks.create_ui(extra_networks, extra_networks_button, 'img2img') + + with FormRow().style(equal_height=False): + with gr.Column(variant='compact', elem_id="img2img_settings"): + copy_image_buttons = [] + copy_image_destinations = {} + + def add_copy_image_controls(tab_name, elem): + with gr.Row(variant="compact", elem_id=f"img2img_copy_to_{tab_name}"): + gr.HTML("Copy image to: ", elem_id=f"img2img_label_copy_to_{tab_name}") + + for title, name in zip(['img2img', 'sketch', 'inpaint', 'inpaint sketch'], ['img2img', 'sketch', 'inpaint', 'inpaint_sketch']): + if name == tab_name: + gr.Button(title, interactive=False) + copy_image_destinations[name] = elem + continue + + button = gr.Button(title) + copy_image_buttons.append((button, name, elem)) + + with gr.Tabs(elem_id="mode_img2img"): + img2img_selected_tab = gr.State(0) + + with gr.TabItem('img2img', id='img2img', elem_id="img2img_img2img_tab") as tab_img2img: + init_img = gr.Image(label="Image for img2img", elem_id="img2img_image", show_label=False, source="upload", interactive=True, type="pil", tool="editor", image_mode="RGBA").style(height=opts.img2img_editor_height) + add_copy_image_controls('img2img', init_img) + + with gr.TabItem('Sketch', id='img2img_sketch', elem_id="img2img_img2img_sketch_tab") as tab_sketch: + sketch = gr.Image(label="Image for img2img", elem_id="img2img_sketch", show_label=False, source="upload", interactive=True, type="pil", tool="color-sketch", image_mode="RGBA").style(height=opts.img2img_editor_height) + add_copy_image_controls('sketch', sketch) + + with gr.TabItem('Inpaint', id='inpaint', elem_id="img2img_inpaint_tab") as tab_inpaint: + init_img_with_mask = gr.Image(label="Image for inpainting with mask", show_label=False, elem_id="img2maskimg", source="upload", interactive=True, type="pil", tool="sketch", image_mode="RGBA").style(height=opts.img2img_editor_height) + add_copy_image_controls('inpaint', init_img_with_mask) + + with gr.TabItem('Inpaint sketch', id='inpaint_sketch', elem_id="img2img_inpaint_sketch_tab") as tab_inpaint_color: + inpaint_color_sketch = gr.Image(label="Color sketch inpainting", show_label=False, elem_id="inpaint_sketch", source="upload", interactive=True, type="pil", tool="color-sketch", image_mode="RGBA").style(height=opts.img2img_editor_height) + inpaint_color_sketch_orig = gr.State(None) + add_copy_image_controls('inpaint_sketch', inpaint_color_sketch) + + def update_orig(image, state): + if image is not None: + same_size = state is not None and state.size == image.size + has_exact_match = np.any(np.all(np.array(image) == np.array(state), axis=-1)) + edited = same_size and has_exact_match + return image if not edited or state is None else state + + inpaint_color_sketch.change(update_orig, [inpaint_color_sketch, inpaint_color_sketch_orig], inpaint_color_sketch_orig) + + with gr.TabItem('Inpaint upload', id='inpaint_upload', elem_id="img2img_inpaint_upload_tab") as tab_inpaint_upload: + init_img_inpaint = gr.Image(label="Image for img2img", show_label=False, source="upload", interactive=True, type="pil", elem_id="img_inpaint_base") + init_mask_inpaint = gr.Image(label="Mask", source="upload", interactive=True, type="pil", elem_id="img_inpaint_mask") + + with gr.TabItem('Batch', id='batch', elem_id="img2img_batch_tab") as tab_batch: + hidden = '
Disabled when launched with --hide-ui-dir-config.' if shared.cmd_opts.hide_ui_dir_config else '' + gr.HTML( + "

Process images in a directory on the same machine where the server is running." + + "
Use an empty output directory to save pictures normally instead of writing to the output directory." + + f"
Add inpaint batch mask directory to enable inpaint batch processing." + f"{hidden}

" + ) + img2img_batch_input_dir = gr.Textbox(label="Input directory", **shared.hide_dirs, elem_id="img2img_batch_input_dir") + img2img_batch_output_dir = gr.Textbox(label="Output directory", **shared.hide_dirs, elem_id="img2img_batch_output_dir") + img2img_batch_inpaint_mask_dir = gr.Textbox(label="Inpaint batch mask directory (required for inpaint batch processing only)", **shared.hide_dirs, elem_id="img2img_batch_inpaint_mask_dir") + with gr.Accordion("PNG info", open=False): + img2img_batch_use_png_info = gr.Checkbox(label="Append png info to prompts", **shared.hide_dirs, elem_id="img2img_batch_use_png_info") + img2img_batch_png_info_dir = gr.Textbox(label="PNG info directory", **shared.hide_dirs, placeholder="Leave empty to use input directory", elem_id="img2img_batch_png_info_dir") + img2img_batch_png_info_props = gr.CheckboxGroup(["Prompt", "Negative prompt", "Seed", "CFG scale", "Sampler", "Steps"], label="Parameters to take from png info", info="Prompts from png info will be appended to prompts set in ui.") + + img2img_tabs = [tab_img2img, tab_sketch, tab_inpaint, tab_inpaint_color, tab_inpaint_upload, tab_batch] + + for i, tab in enumerate(img2img_tabs): + tab.select(fn=lambda tabnum=i: tabnum, inputs=[], outputs=[img2img_selected_tab]) + + def copy_image(img): + if isinstance(img, dict) and 'image' in img: + return img['image'] + + return img + + for button, name, elem in copy_image_buttons: + button.click( + fn=copy_image, + inputs=[elem], + outputs=[copy_image_destinations[name]], + ) + button.click( + fn=lambda: None, + _js=f"switch_to_{name.replace(' ', '_')}", + inputs=[], + outputs=[], + ) + + with FormRow(): + resize_mode = gr.Radio(label="Resize mode", elem_id="resize_mode", choices=["Just resize", "Crop and resize", "Resize and fill", "Just resize (latent upscale)"], type="index", value="Just resize") + + modules.scripts.scripts_img2img.prepare_ui() + + for category in ordered_ui_categories(): + if category == "sampler": + steps, sampler_index = create_sampler_and_steps_selection(samplers_for_img2img, "img2img") + + elif category == "dimensions": + with FormRow(): + with gr.Column(elem_id="img2img_column_size", scale=4): + selected_scale_tab = gr.State(value=0) + + with gr.Tabs(): + with gr.Tab(label="Resize to", elem_id="img2img_tab_resize_to") as tab_scale_to: + with FormRow(): + with gr.Column(elem_id="img2img_column_size", scale=4): + width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="img2img_width") + height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="img2img_height") + with gr.Column(elem_id="img2img_dimensions_row", scale=1, elem_classes="dimensions-tools"): + res_switch_btn = ToolButton(value=switch_values_symbol, elem_id="img2img_res_switch_btn") + detect_image_size_btn = ToolButton(value=detect_image_size_symbol, elem_id="img2img_detect_image_size_btn") + + with gr.Tab(label="Resize by", elem_id="img2img_tab_resize_by") as tab_scale_by: + scale_by = gr.Slider(minimum=0.05, maximum=4.0, step=0.05, label="Scale", value=1.0, elem_id="img2img_scale") + + with FormRow(): + scale_by_html = FormHTML(resize_from_to_html(0, 0, 0.0), elem_id="img2img_scale_resolution_preview") + gr.Slider(label="Unused", elem_id="img2img_unused_scale_by_slider") + button_update_resize_to = gr.Button(visible=False, elem_id="img2img_update_resize_to") + + on_change_args = dict( + fn=resize_from_to_html, + _js="currentImg2imgSourceResolution", + inputs=[dummy_component, dummy_component, scale_by], + outputs=scale_by_html, + show_progress=False, + ) + + scale_by.release(**on_change_args) + button_update_resize_to.click(**on_change_args) + + # the code below is meant to update the resolution label after the image in the image selection UI has changed. + # as it is now the event keeps firing continuously for inpaint edits, which ruins the page with constant requests. + # I assume this must be a gradio bug and for now we'll just do it for non-inpaint inputs. + for component in [init_img, sketch]: + component.change(fn=lambda: None, _js="updateImg2imgResizeToTextAfterChangingImage", inputs=[], outputs=[], show_progress=False) + + tab_scale_to.select(fn=lambda: 0, inputs=[], outputs=[selected_scale_tab]) + tab_scale_by.select(fn=lambda: 1, inputs=[], outputs=[selected_scale_tab]) + + if opts.dimensions_and_batch_together: + with gr.Column(elem_id="img2img_column_batch"): + batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="img2img_batch_count") + batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="img2img_batch_size") + + elif category == "cfg": + with FormGroup(): + with FormRow(): + cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0, elem_id="img2img_cfg_scale") + image_cfg_scale = gr.Slider(minimum=0, maximum=3.0, step=0.05, label='Image CFG Scale', value=1.5, elem_id="img2img_image_cfg_scale", visible=False) + denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.75, elem_id="img2img_denoising_strength") + + elif category == "seed": + seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox = create_seed_inputs('img2img') + + elif category == "checkboxes": + with FormRow(elem_classes="checkboxes-row", variant="compact"): + restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1, elem_id="img2img_restore_faces") + tiling = gr.Checkbox(label='Tiling', value=False, elem_id="img2img_tiling") + + elif category == "batch": + if not opts.dimensions_and_batch_together: + with FormRow(elem_id="img2img_column_batch"): + batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="img2img_batch_count") + batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="img2img_batch_size") + + elif category == "override_settings": + with FormRow(elem_id="img2img_override_settings_row") as row: + override_settings = create_override_settings_dropdown('img2img', row) + + elif category == "scripts": + with FormGroup(elem_id="img2img_script_container"): + custom_inputs = modules.scripts.scripts_img2img.setup_ui() + + elif category == "inpaint": + with FormGroup(elem_id="inpaint_controls", visible=False) as inpaint_controls: + with FormRow(): + mask_blur = gr.Slider(label='Mask blur', minimum=0, maximum=64, step=1, value=4, elem_id="img2img_mask_blur") + mask_alpha = gr.Slider(label="Mask transparency", visible=False, elem_id="img2img_mask_alpha") + + with FormRow(): + inpainting_mask_invert = gr.Radio(label='Mask mode', choices=['Inpaint masked', 'Inpaint not masked'], value='Inpaint masked', type="index", elem_id="img2img_mask_mode") + + with FormRow(): + inpainting_fill = gr.Radio(label='Masked content', choices=['fill', 'original', 'latent noise', 'latent nothing'], value='original', type="index", elem_id="img2img_inpainting_fill") + + with FormRow(): + with gr.Column(): + inpaint_full_res = gr.Radio(label="Inpaint area", choices=["Whole picture", "Only masked"], type="index", value="Whole picture", elem_id="img2img_inpaint_full_res") + + with gr.Column(scale=4): + inpaint_full_res_padding = gr.Slider(label='Only masked padding, pixels', minimum=0, maximum=256, step=4, value=32, elem_id="img2img_inpaint_full_res_padding") + + def select_img2img_tab(tab): + return gr.update(visible=tab in [2, 3, 4]), gr.update(visible=tab == 3), + + for i, elem in enumerate(img2img_tabs): + elem.select( + fn=lambda tab=i: select_img2img_tab(tab), + inputs=[], + outputs=[inpaint_controls, mask_alpha], + ) + else: + modules.scripts.scripts_img2img.setup_ui_for_section(category) + + img2img_gallery, generation_info, html_info, html_log = create_output_panel("img2img", opts.outdir_img2img_samples) + + connect_reuse_seed(seed, reuse_seed, generation_info, dummy_component, is_subseed=False) + connect_reuse_seed(subseed, reuse_subseed, generation_info, dummy_component, is_subseed=True) + + img2img_prompt_img.change( + fn=modules.images.image_data, + inputs=[ + img2img_prompt_img + ], + outputs=[ + img2img_prompt, + img2img_prompt_img + ], + show_progress=False, + ) + + img2img_args = dict( + fn=wrap_gradio_gpu_call(modules.img2img.img2img, extra_outputs=[None, '', '']), + _js="submit_img2img", + inputs=[ + dummy_component, + dummy_component, + img2img_prompt, + img2img_negative_prompt, + img2img_prompt_styles, + init_img, + sketch, + init_img_with_mask, + inpaint_color_sketch, + inpaint_color_sketch_orig, + init_img_inpaint, + init_mask_inpaint, + steps, + sampler_index, + mask_blur, + mask_alpha, + inpainting_fill, + restore_faces, + tiling, + batch_count, + batch_size, + cfg_scale, + image_cfg_scale, + denoising_strength, + seed, + subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox, + selected_scale_tab, + height, + width, + scale_by, + resize_mode, + inpaint_full_res, + inpaint_full_res_padding, + inpainting_mask_invert, + img2img_batch_input_dir, + img2img_batch_output_dir, + img2img_batch_inpaint_mask_dir, + override_settings, + img2img_batch_use_png_info, + img2img_batch_png_info_props, + img2img_batch_png_info_dir, + ] + custom_inputs, + outputs=[ + img2img_gallery, + generation_info, + html_info, + html_log, + ], + show_progress=False, + ) + + interrogate_args = dict( + _js="get_img2img_tab_index", + inputs=[ + dummy_component, + img2img_batch_input_dir, + img2img_batch_output_dir, + init_img, + sketch, + init_img_with_mask, + inpaint_color_sketch, + init_img_inpaint, + ], + outputs=[img2img_prompt, dummy_component], + ) + + img2img_prompt.submit(**img2img_args) + submit.click(**img2img_args) + + res_switch_btn.click(fn=None, _js="function(){switchWidthHeight('img2img')}", inputs=None, outputs=None, show_progress=False) + + detect_image_size_btn.click( + fn=lambda w, h, _: (w or gr.update(), h or gr.update()), + _js="currentImg2imgSourceResolution", + inputs=[dummy_component, dummy_component, dummy_component], + outputs=[width, height], + show_progress=False, + ) + + restore_progress_button.click( + fn=progress.restore_progress, + _js="restoreProgressImg2img", + inputs=[dummy_component], + outputs=[ + img2img_gallery, + generation_info, + html_info, + html_log, + ], + show_progress=False, + ) + + img2img_interrogate.click( + fn=lambda *args: process_interrogate(interrogate, *args), + **interrogate_args, + ) + + img2img_deepbooru.click( + fn=lambda *args: process_interrogate(interrogate_deepbooru, *args), + **interrogate_args, + ) + + prompts = [(txt2img_prompt, txt2img_negative_prompt), (img2img_prompt, img2img_negative_prompt)] + style_dropdowns = [txt2img_prompt_styles, img2img_prompt_styles] + style_js_funcs = ["update_txt2img_tokens", "update_img2img_tokens"] + + for button, (prompt, negative_prompt) in zip([txt2img_save_style, img2img_save_style], prompts): + button.click( + fn=add_style, + _js="ask_for_style_name", + # Have to pass empty dummy component here, because the JavaScript and Python function have to accept + # the same number of parameters, but we only know the style-name after the JavaScript prompt + inputs=[dummy_component, prompt, negative_prompt], + outputs=[txt2img_prompt_styles, img2img_prompt_styles], + ) + + for button, (prompt, negative_prompt), styles, js_func in zip([txt2img_prompt_style_apply, img2img_prompt_style_apply], prompts, style_dropdowns, style_js_funcs): + button.click( + fn=apply_styles, + _js=js_func, + inputs=[prompt, negative_prompt, styles], + outputs=[prompt, negative_prompt, styles], + ) + + token_button.click(fn=update_token_counter, inputs=[img2img_prompt, steps], outputs=[token_counter]) + negative_token_button.click(fn=wrap_queued_call(update_token_counter), inputs=[img2img_negative_prompt, steps], outputs=[negative_token_counter]) + + ui_extra_networks.setup_ui(extra_networks_ui_img2img, img2img_gallery) + + img2img_paste_fields = [ + (img2img_prompt, "Prompt"), + (img2img_negative_prompt, "Negative prompt"), + (steps, "Steps"), + (sampler_index, "Sampler"), + (restore_faces, "Face restoration"), + (cfg_scale, "CFG scale"), + (image_cfg_scale, "Image CFG scale"), + (seed, "Seed"), + (width, "Size-1"), + (height, "Size-2"), + (batch_size, "Batch size"), + (subseed, "Variation seed"), + (subseed_strength, "Variation seed strength"), + (seed_resize_from_w, "Seed resize from-1"), + (seed_resize_from_h, "Seed resize from-2"), + (img2img_prompt_styles, lambda d: d["Styles array"] if isinstance(d.get("Styles array"), list) else gr.update()), + (denoising_strength, "Denoising strength"), + (mask_blur, "Mask blur"), + *modules.scripts.scripts_img2img.infotext_fields + ] + parameters_copypaste.add_paste_fields("img2img", init_img, img2img_paste_fields, override_settings) + parameters_copypaste.add_paste_fields("inpaint", init_img_with_mask, img2img_paste_fields, override_settings) + parameters_copypaste.register_paste_params_button(parameters_copypaste.ParamBinding( + paste_button=img2img_paste, tabname="img2img", source_text_component=img2img_prompt, source_image_component=None, + )) + + modules.scripts.scripts_current = None + + with gr.Blocks(analytics_enabled=False) as extras_interface: + ui_postprocessing.create_ui() + + with gr.Blocks(analytics_enabled=False) as pnginfo_interface: + with gr.Row().style(equal_height=False): + with gr.Column(variant='panel'): + image = gr.Image(elem_id="pnginfo_image", label="Source", source="upload", interactive=True, type="pil") + + with gr.Column(variant='panel'): + html = gr.HTML() + generation_info = gr.Textbox(visible=False, elem_id="pnginfo_generation_info") + html2 = gr.HTML() + with gr.Row(): + buttons = parameters_copypaste.create_buttons(["txt2img", "img2img", "inpaint", "extras"]) + + for tabname, button in buttons.items(): + parameters_copypaste.register_paste_params_button(parameters_copypaste.ParamBinding( + paste_button=button, tabname=tabname, source_text_component=generation_info, source_image_component=image, + )) + + image.change( + fn=wrap_gradio_call(modules.extras.run_pnginfo), + inputs=[image], + outputs=[html, generation_info, html2], + ) + + def update_interp_description(value): + interp_description_css = "

{}

" + interp_descriptions = { + "No interpolation": interp_description_css.format("No interpolation will be used. Requires one model; A. Allows for format conversion and VAE baking."), + "Weighted sum": interp_description_css.format("A weighted sum will be used for interpolation. Requires two models; A and B. The result is calculated as A * (1 - M) + B * M"), + "Add difference": interp_description_css.format("The difference between the last two models will be added to the first. Requires three models; A, B and C. The result is calculated as A + (B - C) * M") + } + return interp_descriptions[value] + + with gr.Blocks(analytics_enabled=False) as modelmerger_interface: + with gr.Row().style(equal_height=False): + with gr.Column(variant='compact'): + interp_description = gr.HTML(value=update_interp_description("Weighted sum"), elem_id="modelmerger_interp_description") + + with FormRow(elem_id="modelmerger_models"): + primary_model_name = gr.Dropdown(modules.sd_models.checkpoint_tiles(), elem_id="modelmerger_primary_model_name", label="Primary model (A)") + create_refresh_button(primary_model_name, modules.sd_models.list_models, lambda: {"choices": modules.sd_models.checkpoint_tiles()}, "refresh_checkpoint_A") + + secondary_model_name = gr.Dropdown(modules.sd_models.checkpoint_tiles(), elem_id="modelmerger_secondary_model_name", label="Secondary model (B)") + create_refresh_button(secondary_model_name, modules.sd_models.list_models, lambda: {"choices": modules.sd_models.checkpoint_tiles()}, "refresh_checkpoint_B") + + tertiary_model_name = gr.Dropdown(modules.sd_models.checkpoint_tiles(), elem_id="modelmerger_tertiary_model_name", label="Tertiary model (C)") + create_refresh_button(tertiary_model_name, modules.sd_models.list_models, lambda: {"choices": modules.sd_models.checkpoint_tiles()}, "refresh_checkpoint_C") + + custom_name = gr.Textbox(label="Custom Name (Optional)", elem_id="modelmerger_custom_name") + interp_amount = gr.Slider(minimum=0.0, maximum=1.0, step=0.05, label='Multiplier (M) - set to 0 to get model A', value=0.3, elem_id="modelmerger_interp_amount") + interp_method = gr.Radio(choices=["No interpolation", "Weighted sum", "Add difference"], value="Weighted sum", label="Interpolation Method", elem_id="modelmerger_interp_method") + interp_method.change(fn=update_interp_description, inputs=[interp_method], outputs=[interp_description]) + + with FormRow(): + checkpoint_format = gr.Radio(choices=["ckpt", "safetensors"], value="safetensors", label="Checkpoint format", elem_id="modelmerger_checkpoint_format") + save_as_half = gr.Checkbox(value=False, label="Save as float16", elem_id="modelmerger_save_as_half") + save_metadata = gr.Checkbox(value=True, label="Save metadata (.safetensors only)", elem_id="modelmerger_save_metadata") + + with FormRow(): + with gr.Column(): + config_source = gr.Radio(choices=["A, B or C", "B", "C", "Don't"], value="A, B or C", label="Copy config from", type="index", elem_id="modelmerger_config_method") + + with gr.Column(): + with FormRow(): + bake_in_vae = gr.Dropdown(choices=["None"] + list(sd_vae.vae_dict), value="None", label="Bake in VAE", elem_id="modelmerger_bake_in_vae") + create_refresh_button(bake_in_vae, sd_vae.refresh_vae_list, lambda: {"choices": ["None"] + list(sd_vae.vae_dict)}, "modelmerger_refresh_bake_in_vae") + + with FormRow(): + discard_weights = gr.Textbox(value="", label="Discard weights with matching name", elem_id="modelmerger_discard_weights") + + with gr.Row(): + modelmerger_merge = gr.Button(elem_id="modelmerger_merge", value="Merge", variant='primary') + + with gr.Column(variant='compact', elem_id="modelmerger_results_container"): + with gr.Group(elem_id="modelmerger_results_panel"): + modelmerger_result = gr.HTML(elem_id="modelmerger_result", show_label=False) + + with gr.Blocks(analytics_enabled=False) as train_interface: + with gr.Row().style(equal_height=False): + gr.HTML(value="

See wiki for detailed explanation.

") + + with gr.Row(variant="compact").style(equal_height=False): + with gr.Tabs(elem_id="train_tabs"): + + with gr.Tab(label="Create embedding", id="create_embedding"): + new_embedding_name = gr.Textbox(label="Name", elem_id="train_new_embedding_name") + initialization_text = gr.Textbox(label="Initialization text", value="*", elem_id="train_initialization_text") + nvpt = gr.Slider(label="Number of vectors per token", minimum=1, maximum=75, step=1, value=1, elem_id="train_nvpt") + overwrite_old_embedding = gr.Checkbox(value=False, label="Overwrite Old Embedding", elem_id="train_overwrite_old_embedding") + + with gr.Row(): + with gr.Column(scale=3): + gr.HTML(value="") + + with gr.Column(): + create_embedding = gr.Button(value="Create embedding", variant='primary', elem_id="train_create_embedding") + + with gr.Tab(label="Create hypernetwork", id="create_hypernetwork"): + new_hypernetwork_name = gr.Textbox(label="Name", elem_id="train_new_hypernetwork_name") + new_hypernetwork_sizes = gr.CheckboxGroup(label="Modules", value=["768", "320", "640", "1280"], choices=["768", "1024", "320", "640", "1280"], elem_id="train_new_hypernetwork_sizes") + new_hypernetwork_layer_structure = gr.Textbox("1, 2, 1", label="Enter hypernetwork layer structure", placeholder="1st and last digit must be 1. ex:'1, 2, 1'", elem_id="train_new_hypernetwork_layer_structure") + new_hypernetwork_activation_func = gr.Dropdown(value="linear", label="Select activation function of hypernetwork. Recommended : Swish / Linear(none)", choices=modules.hypernetworks.ui.keys, elem_id="train_new_hypernetwork_activation_func") + new_hypernetwork_initialization_option = gr.Dropdown(value = "Normal", label="Select Layer weights initialization. Recommended: Kaiming for relu-like, Xavier for sigmoid-like, Normal otherwise", choices=["Normal", "KaimingUniform", "KaimingNormal", "XavierUniform", "XavierNormal"], elem_id="train_new_hypernetwork_initialization_option") + new_hypernetwork_add_layer_norm = gr.Checkbox(label="Add layer normalization", elem_id="train_new_hypernetwork_add_layer_norm") + new_hypernetwork_use_dropout = gr.Checkbox(label="Use dropout", elem_id="train_new_hypernetwork_use_dropout") + new_hypernetwork_dropout_structure = gr.Textbox("0, 0, 0", label="Enter hypernetwork Dropout structure (or empty). Recommended : 0~0.35 incrementing sequence: 0, 0.05, 0.15", placeholder="1st and last digit must be 0 and values should be between 0 and 1. ex:'0, 0.01, 0'") + overwrite_old_hypernetwork = gr.Checkbox(value=False, label="Overwrite Old Hypernetwork", elem_id="train_overwrite_old_hypernetwork") + + with gr.Row(): + with gr.Column(scale=3): + gr.HTML(value="") + + with gr.Column(): + create_hypernetwork = gr.Button(value="Create hypernetwork", variant='primary', elem_id="train_create_hypernetwork") + + with gr.Tab(label="Preprocess images", id="preprocess_images"): + process_src = gr.Textbox(label='Source directory', elem_id="train_process_src") + process_dst = gr.Textbox(label='Destination directory', elem_id="train_process_dst") + process_width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="train_process_width") + process_height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="train_process_height") + preprocess_txt_action = gr.Dropdown(label='Existing Caption txt Action', value="ignore", choices=["ignore", "copy", "prepend", "append"], elem_id="train_preprocess_txt_action") + + with gr.Row(): + process_keep_original_size = gr.Checkbox(label='Keep original size', elem_id="train_process_keep_original_size") + process_flip = gr.Checkbox(label='Create flipped copies', elem_id="train_process_flip") + process_split = gr.Checkbox(label='Split oversized images', elem_id="train_process_split") + process_focal_crop = gr.Checkbox(label='Auto focal point crop', elem_id="train_process_focal_crop") + process_multicrop = gr.Checkbox(label='Auto-sized crop', elem_id="train_process_multicrop") + process_caption = gr.Checkbox(label='Use BLIP for caption', elem_id="train_process_caption") + process_caption_deepbooru = gr.Checkbox(label='Use deepbooru for caption', visible=True, elem_id="train_process_caption_deepbooru") + + with gr.Row(visible=False) as process_split_extra_row: + process_split_threshold = gr.Slider(label='Split image threshold', value=0.5, minimum=0.0, maximum=1.0, step=0.05, elem_id="train_process_split_threshold") + process_overlap_ratio = gr.Slider(label='Split image overlap ratio', value=0.2, minimum=0.0, maximum=0.9, step=0.05, elem_id="train_process_overlap_ratio") + + with gr.Row(visible=False) as process_focal_crop_row: + process_focal_crop_face_weight = gr.Slider(label='Focal point face weight', value=0.9, minimum=0.0, maximum=1.0, step=0.05, elem_id="train_process_focal_crop_face_weight") + process_focal_crop_entropy_weight = gr.Slider(label='Focal point entropy weight', value=0.15, minimum=0.0, maximum=1.0, step=0.05, elem_id="train_process_focal_crop_entropy_weight") + process_focal_crop_edges_weight = gr.Slider(label='Focal point edges weight', value=0.5, minimum=0.0, maximum=1.0, step=0.05, elem_id="train_process_focal_crop_edges_weight") + process_focal_crop_debug = gr.Checkbox(label='Create debug image', elem_id="train_process_focal_crop_debug") + + with gr.Column(visible=False) as process_multicrop_col: + gr.Markdown('Each image is center-cropped with an automatically chosen width and height.') + with gr.Row(): + process_multicrop_mindim = gr.Slider(minimum=64, maximum=2048, step=8, label="Dimension lower bound", value=384, elem_id="train_process_multicrop_mindim") + process_multicrop_maxdim = gr.Slider(minimum=64, maximum=2048, step=8, label="Dimension upper bound", value=768, elem_id="train_process_multicrop_maxdim") + with gr.Row(): + process_multicrop_minarea = gr.Slider(minimum=64*64, maximum=2048*2048, step=1, label="Area lower bound", value=64*64, elem_id="train_process_multicrop_minarea") + process_multicrop_maxarea = gr.Slider(minimum=64*64, maximum=2048*2048, step=1, label="Area upper bound", value=640*640, elem_id="train_process_multicrop_maxarea") + with gr.Row(): + process_multicrop_objective = gr.Radio(["Maximize area", "Minimize error"], value="Maximize area", label="Resizing objective", elem_id="train_process_multicrop_objective") + process_multicrop_threshold = gr.Slider(minimum=0, maximum=1, step=0.01, label="Error threshold", value=0.1, elem_id="train_process_multicrop_threshold") + + with gr.Row(): + with gr.Column(scale=3): + gr.HTML(value="") + + with gr.Column(): + with gr.Row(): + interrupt_preprocessing = gr.Button("Interrupt", elem_id="train_interrupt_preprocessing") + run_preprocess = gr.Button(value="Preprocess", variant='primary', elem_id="train_run_preprocess") + + process_split.change( + fn=lambda show: gr_show(show), + inputs=[process_split], + outputs=[process_split_extra_row], + ) + + process_focal_crop.change( + fn=lambda show: gr_show(show), + inputs=[process_focal_crop], + outputs=[process_focal_crop_row], + ) + + process_multicrop.change( + fn=lambda show: gr_show(show), + inputs=[process_multicrop], + outputs=[process_multicrop_col], + ) + + def get_textual_inversion_template_names(): + return sorted(textual_inversion.textual_inversion_templates) + + with gr.Tab(label="Train", id="train"): + gr.HTML(value="

Train an embedding or Hypernetwork; you must specify a directory with a set of 1:1 ratio images [wiki]

") + with FormRow(): + train_embedding_name = gr.Dropdown(label='Embedding', elem_id="train_embedding", choices=sorted(sd_hijack.model_hijack.embedding_db.word_embeddings.keys())) + create_refresh_button(train_embedding_name, sd_hijack.model_hijack.embedding_db.load_textual_inversion_embeddings, lambda: {"choices": sorted(sd_hijack.model_hijack.embedding_db.word_embeddings.keys())}, "refresh_train_embedding_name") + + train_hypernetwork_name = gr.Dropdown(label='Hypernetwork', elem_id="train_hypernetwork", choices=sorted(shared.hypernetworks)) + create_refresh_button(train_hypernetwork_name, shared.reload_hypernetworks, lambda: {"choices": sorted(shared.hypernetworks)}, "refresh_train_hypernetwork_name") + + with FormRow(): + embedding_learn_rate = gr.Textbox(label='Embedding Learning rate', placeholder="Embedding Learning rate", value="0.005", elem_id="train_embedding_learn_rate") + hypernetwork_learn_rate = gr.Textbox(label='Hypernetwork Learning rate', placeholder="Hypernetwork Learning rate", value="0.00001", elem_id="train_hypernetwork_learn_rate") + + with FormRow(): + clip_grad_mode = gr.Dropdown(value="disabled", label="Gradient Clipping", choices=["disabled", "value", "norm"]) + clip_grad_value = gr.Textbox(placeholder="Gradient clip value", value="0.1", show_label=False) + + with FormRow(): + batch_size = gr.Number(label='Batch size', value=1, precision=0, elem_id="train_batch_size") + gradient_step = gr.Number(label='Gradient accumulation steps', value=1, precision=0, elem_id="train_gradient_step") + + dataset_directory = gr.Textbox(label='Dataset directory', placeholder="Path to directory with input images", elem_id="train_dataset_directory") + log_directory = gr.Textbox(label='Log directory', placeholder="Path to directory where to write outputs", value="textual_inversion", elem_id="train_log_directory") + + with FormRow(): + template_file = gr.Dropdown(label='Prompt template', value="style_filewords.txt", elem_id="train_template_file", choices=get_textual_inversion_template_names()) + create_refresh_button(template_file, textual_inversion.list_textual_inversion_templates, lambda: {"choices": get_textual_inversion_template_names()}, "refrsh_train_template_file") + + training_width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="train_training_width") + training_height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="train_training_height") + varsize = gr.Checkbox(label="Do not resize images", value=False, elem_id="train_varsize") + steps = gr.Number(label='Max steps', value=100000, precision=0, elem_id="train_steps") + + with FormRow(): + create_image_every = gr.Number(label='Save an image to log directory every N steps, 0 to disable', value=500, precision=0, elem_id="train_create_image_every") + save_embedding_every = gr.Number(label='Save a copy of embedding to log directory every N steps, 0 to disable', value=500, precision=0, elem_id="train_save_embedding_every") + + use_weight = gr.Checkbox(label="Use PNG alpha channel as loss weight", value=False, elem_id="use_weight") + + save_image_with_stored_embedding = gr.Checkbox(label='Save images with embedding in PNG chunks', value=True, elem_id="train_save_image_with_stored_embedding") + preview_from_txt2img = gr.Checkbox(label='Read parameters (prompt, etc...) from txt2img tab when making previews', value=False, elem_id="train_preview_from_txt2img") + + shuffle_tags = gr.Checkbox(label="Shuffle tags by ',' when creating prompts.", value=False, elem_id="train_shuffle_tags") + tag_drop_out = gr.Slider(minimum=0, maximum=1, step=0.1, label="Drop out tags when creating prompts.", value=0, elem_id="train_tag_drop_out") + + latent_sampling_method = gr.Radio(label='Choose latent sampling method', value="once", choices=['once', 'deterministic', 'random'], elem_id="train_latent_sampling_method") + + with gr.Row(): + train_embedding = gr.Button(value="Train Embedding", variant='primary', elem_id="train_train_embedding") + interrupt_training = gr.Button(value="Interrupt", elem_id="train_interrupt_training") + train_hypernetwork = gr.Button(value="Train Hypernetwork", variant='primary', elem_id="train_train_hypernetwork") + + params = script_callbacks.UiTrainTabParams(txt2img_preview_params) + + script_callbacks.ui_train_tabs_callback(params) + + with gr.Column(elem_id='ti_gallery_container'): + ti_output = gr.Text(elem_id="ti_output", value="", show_label=False) + gr.Gallery(label='Output', show_label=False, elem_id='ti_gallery').style(columns=4) + gr.HTML(elem_id="ti_progress", value="") + ti_outcome = gr.HTML(elem_id="ti_error", value="") + + create_embedding.click( + fn=modules.textual_inversion.ui.create_embedding, + inputs=[ + new_embedding_name, + initialization_text, + nvpt, + overwrite_old_embedding, + ], + outputs=[ + train_embedding_name, + ti_output, + ti_outcome, + ] + ) + + create_hypernetwork.click( + fn=modules.hypernetworks.ui.create_hypernetwork, + inputs=[ + new_hypernetwork_name, + new_hypernetwork_sizes, + overwrite_old_hypernetwork, + new_hypernetwork_layer_structure, + new_hypernetwork_activation_func, + new_hypernetwork_initialization_option, + new_hypernetwork_add_layer_norm, + new_hypernetwork_use_dropout, + new_hypernetwork_dropout_structure + ], + outputs=[ + train_hypernetwork_name, + ti_output, + ti_outcome, + ] + ) + + run_preprocess.click( + fn=wrap_gradio_gpu_call(modules.textual_inversion.ui.preprocess, extra_outputs=[gr.update()]), + _js="start_training_textual_inversion", + inputs=[ + dummy_component, + process_src, + process_dst, + process_width, + process_height, + preprocess_txt_action, + process_keep_original_size, + process_flip, + process_split, + process_caption, + process_caption_deepbooru, + process_split_threshold, + process_overlap_ratio, + process_focal_crop, + process_focal_crop_face_weight, + process_focal_crop_entropy_weight, + process_focal_crop_edges_weight, + process_focal_crop_debug, + process_multicrop, + process_multicrop_mindim, + process_multicrop_maxdim, + process_multicrop_minarea, + process_multicrop_maxarea, + process_multicrop_objective, + process_multicrop_threshold, + ], + outputs=[ + ti_output, + ti_outcome, + ], + ) + + train_embedding.click( + fn=wrap_gradio_gpu_call(modules.textual_inversion.ui.train_embedding, extra_outputs=[gr.update()]), + _js="start_training_textual_inversion", + inputs=[ + dummy_component, + train_embedding_name, + embedding_learn_rate, + batch_size, + gradient_step, + dataset_directory, + log_directory, + training_width, + training_height, + varsize, + steps, + clip_grad_mode, + clip_grad_value, + shuffle_tags, + tag_drop_out, + latent_sampling_method, + use_weight, + create_image_every, + save_embedding_every, + template_file, + save_image_with_stored_embedding, + preview_from_txt2img, + *txt2img_preview_params, + ], + outputs=[ + ti_output, + ti_outcome, + ] + ) + + train_hypernetwork.click( + fn=wrap_gradio_gpu_call(modules.hypernetworks.ui.train_hypernetwork, extra_outputs=[gr.update()]), + _js="start_training_textual_inversion", + inputs=[ + dummy_component, + train_hypernetwork_name, + hypernetwork_learn_rate, + batch_size, + gradient_step, + dataset_directory, + log_directory, + training_width, + training_height, + varsize, + steps, + clip_grad_mode, + clip_grad_value, + shuffle_tags, + tag_drop_out, + latent_sampling_method, + use_weight, + create_image_every, + save_embedding_every, + template_file, + preview_from_txt2img, + *txt2img_preview_params, + ], + outputs=[ + ti_output, + ti_outcome, + ] + ) + + interrupt_training.click( + fn=lambda: shared.state.interrupt(), + inputs=[], + outputs=[], + ) + + interrupt_preprocessing.click( + fn=lambda: shared.state.interrupt(), + inputs=[], + outputs=[], + ) + + loadsave = ui_loadsave.UiLoadsave(cmd_opts.ui_config_file) + + settings = ui_settings.UiSettings() + settings.create_ui(loadsave, dummy_component) + + interfaces = [ + (txt2img_interface, "txt2img", "txt2img"), + (img2img_interface, "img2img", "img2img"), + (extras_interface, "Extras", "extras"), + (pnginfo_interface, "PNG Info", "pnginfo"), + (modelmerger_interface, "Checkpoint Merger", "modelmerger"), + (train_interface, "Train", "train"), + ] + + interfaces += script_callbacks.ui_tabs_callback() + interfaces += [(settings.interface, "Settings", "settings")] + + extensions_interface = ui_extensions.create_ui() + interfaces += [(extensions_interface, "Extensions", "extensions")] + + shared.tab_names = [] + for _interface, label, _ifid in interfaces: + shared.tab_names.append(label) + + with gr.Blocks(theme=shared.gradio_theme, analytics_enabled=False, title="Stable Diffusion") as demo: + settings.add_quicksettings() + + parameters_copypaste.connect_paste_params_buttons() + + with gr.Tabs(elem_id="tabs") as tabs: + tab_order = {k: i for i, k in enumerate(opts.ui_tab_order)} + sorted_interfaces = sorted(interfaces, key=lambda x: tab_order.get(x[1], 9999)) + + for interface, label, ifid in sorted_interfaces: + if label in shared.opts.hidden_tabs: + continue + with gr.TabItem(label, id=ifid, elem_id=f"tab_{ifid}"): + interface.render() + + for interface, _label, ifid in interfaces: + if ifid in ["extensions", "settings"]: + continue + + loadsave.add_block(interface, ifid) + + loadsave.add_component(f"webui/Tabs@{tabs.elem_id}", tabs) + + loadsave.setup_ui() + + if os.path.exists(os.path.join(script_path, "notification.mp3")): + gr.Audio(interactive=False, value=os.path.join(script_path, "notification.mp3"), elem_id="audio_notification", visible=False) + + footer = shared.html("footer.html") + footer = footer.format(versions=versions_html(), api_docs="/docs" if shared.cmd_opts.api else "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/API") + gr.HTML(footer, elem_id="footer") + + settings.add_functionality(demo) + + update_image_cfg_scale_visibility = lambda: gr.update(visible=shared.sd_model and shared.sd_model.cond_stage_key == "edit") + settings.text_settings.change(fn=update_image_cfg_scale_visibility, inputs=[], outputs=[image_cfg_scale]) + demo.load(fn=update_image_cfg_scale_visibility, inputs=[], outputs=[image_cfg_scale]) + + def modelmerger(*args): + try: + results = modules.extras.run_modelmerger(*args) + except Exception as e: + errors.report("Error loading/saving model file", exc_info=True) + modules.sd_models.list_models() # to remove the potentially missing models from the list + return [*[gr.Dropdown.update(choices=modules.sd_models.checkpoint_tiles()) for _ in range(4)], f"Error merging checkpoints: {e}"] + return results + + modelmerger_merge.click(fn=lambda: '', inputs=[], outputs=[modelmerger_result]) + modelmerger_merge.click( + fn=wrap_gradio_gpu_call(modelmerger, extra_outputs=lambda: [gr.update() for _ in range(4)]), + _js='modelmerger', + inputs=[ + dummy_component, + primary_model_name, + secondary_model_name, + tertiary_model_name, + interp_method, + interp_amount, + save_as_half, + custom_name, + checkpoint_format, + config_source, + bake_in_vae, + discard_weights, + save_metadata, + ], + outputs=[ + primary_model_name, + secondary_model_name, + tertiary_model_name, + settings.component_dict['sd_model_checkpoint'], + modelmerger_result, + ] + ) + + loadsave.dump_defaults() + demo.ui_loadsave = loadsave + + # Required as a workaround for change() event not triggering when loading values from ui-config.json + interp_description.value = update_interp_description(interp_method.value) + + return demo + + +def versions_html(): + import torch + import launch + + python_version = ".".join([str(x) for x in sys.version_info[0:3]]) + commit = launch.commit_hash() + tag = launch.git_tag() + + if shared.xformers_available: + import xformers + xformers_version = xformers.__version__ + else: + xformers_version = "N/A" + + return f""" +version: {tag} + •  +python: {python_version} + •  +torch: {getattr(torch, '__long_version__',torch.__version__)} + •  +xformers: {xformers_version} + •  +gradio: {gr.__version__} + •  +checkpoint: N/A +""" + + +def setup_ui_api(app): + from pydantic import BaseModel, Field + from typing import List + + class QuicksettingsHint(BaseModel): + name: str = Field(title="Name of the quicksettings field") + label: str = Field(title="Label of the quicksettings field") + + def quicksettings_hint(): + return [QuicksettingsHint(name=k, label=v.label) for k, v in opts.data_labels.items()] + + app.add_api_route("/internal/quicksettings-hint", quicksettings_hint, methods=["GET"], response_model=List[QuicksettingsHint]) + + app.add_api_route("/internal/ping", lambda: {}, methods=["GET"]) + + app.add_api_route("/internal/profile-startup", lambda: timer.startup_record, methods=["GET"]) + + def download_sysinfo(attachment=False): + from fastapi.responses import PlainTextResponse + + text = sysinfo.get() + filename = f"sysinfo-{datetime.datetime.utcnow().strftime('%Y-%m-%d-%H-%M')}.txt" + + return PlainTextResponse(text, headers={'Content-Disposition': f'{"attachment" if attachment else "inline"}; filename="{filename}"'}) + + app.add_api_route("/internal/sysinfo", download_sysinfo, methods=["GET"]) + app.add_api_route("/internal/sysinfo-download", lambda: download_sysinfo(attachment=True), methods=["GET"]) + diff --git a/temp b/temp deleted file mode 100644 index 07ecee7b..00000000 --- a/temp +++ /dev/null @@ -1,1621 +0,0 @@ -import datetime -import json -import mimetypes -import os -import sys -from functools import reduce -import warnings - -import gradio as gr -import gradio.utils -import numpy as np -from PIL import Image, PngImagePlugin # noqa: F401 -from modules.call_queue import wrap_gradio_gpu_call, wrap_queued_call, wrap_gradio_call - -from modules import sd_hijack, sd_models, script_callbacks, ui_extensions, deepbooru, sd_vae, extra_networks, ui_common, ui_postprocessing, progress, ui_loadsave, errors, shared_items, ui_settings, timer, sysinfo -from modules.ui_components import FormRow, FormGroup, ToolButton, FormHTML -from modules.paths import script_path -from modules.ui_common import create_refresh_button -from modules.ui_gradio_extensions import reload_javascript - - -from modules.shared import opts, cmd_opts - -import modules.codeformer_model -import modules.generation_parameters_copypaste as parameters_copypaste -import modules.gfpgan_model -import modules.hypernetworks.ui -import modules.scripts -import modules.shared as shared -import modules.styles -import modules.textual_inversion.ui -from modules import prompt_parser -from modules.sd_hijack import model_hijack -from modules.sd_samplers import samplers, samplers_for_img2img -from modules.textual_inversion import textual_inversion -import modules.hypernetworks.ui -from modules.generation_parameters_copypaste import image_from_url_text -import modules.extras - -create_setting_component = ui_settings.create_setting_component - -warnings.filterwarnings("default" if opts.show_warnings else "ignore", category=UserWarning) - -# this is a fix for Windows users. Without it, javascript files will be served with text/html content-type and the browser will not show any UI -mimetypes.init() -mimetypes.add_type('application/javascript', '.js') - -if not cmd_opts.share and not cmd_opts.listen: - # fix gradio phoning home - gradio.utils.version_check = lambda: None - gradio.utils.get_local_ip_address = lambda: '127.0.0.1' - -if cmd_opts.ngrok is not None: - import modules.ngrok as ngrok - print('ngrok authtoken detected, trying to connect...') - ngrok.connect( - cmd_opts.ngrok, - cmd_opts.port if cmd_opts.port is not None else 7860, - cmd_opts.ngrok_options - ) - - -def gr_show(visible=True): - return {"visible": visible, "__type__": "update"} - - -sample_img2img = "assets/stable-samples/img2img/sketch-mountains-input.jpg" -sample_img2img = sample_img2img if os.path.exists(sample_img2img) else None - -# Using constants for these since the variation selector isn't visible. -# Important that they exactly match script.js for tooltip to work. -random_symbol = '\U0001f3b2\ufe0f' # 🎲️ -reuse_symbol = '\u267b\ufe0f' # ♻️ -paste_symbol = '\u2199\ufe0f' # ↙ -refresh_symbol = '\U0001f504' # 🔄 -save_style_symbol = '\U0001f4be' # 💾 -apply_style_symbol = '\U0001f4cb' # 📋 -clear_prompt_symbol = '\U0001f5d1\ufe0f' # 🗑️ -extra_networks_symbol = '\U0001F3B4' # 🎴 -switch_values_symbol = '\U000021C5' # ⇅ -restore_progress_symbol = '\U0001F300' # 🌀 -detect_image_size_symbol = '\U0001F4D0' # 📐 -up_down_symbol = '\u2195\ufe0f' # ↕️ - - -plaintext_to_html = ui_common.plaintext_to_html - - -def send_gradio_gallery_to_image(x): - if len(x) == 0: - return None - return image_from_url_text(x[0]) - - -def add_style(name: str, prompt: str, negative_prompt: str): - if name is None: - return [gr_show() for x in range(4)] - - style = modules.styles.PromptStyle(name, prompt, negative_prompt) - shared.prompt_styles.styles[style.name] = style - # Save all loaded prompt styles: this allows us to update the storage format in the future more easily, because we - # reserialize all styles every time we save them - shared.prompt_styles.save_styles(shared.styles_filename) - - return [gr.Dropdown.update(visible=True, choices=list(shared.prompt_styles.styles)) for _ in range(2)] - - -def calc_resolution_hires(enable, width, height, hr_scale, hr_resize_x, hr_resize_y): - from modules import processing, devices - - if not enable: - return "" - - p = processing.StableDiffusionProcessingTxt2Img(width=width, height=height, enable_hr=True, hr_scale=hr_scale, hr_resize_x=hr_resize_x, hr_resize_y=hr_resize_y) - - with devices.autocast(): - p.init([""], [0], [0]) - - return f"resize: from {p.width}x{p.height} to {p.hr_resize_x or p.hr_upscale_to_x}x{p.hr_resize_y or p.hr_upscale_to_y}" - - -def resize_from_to_html(width, height, scale_by): - target_width = int(width * scale_by) - target_height = int(height * scale_by) - - if not target_width or not target_height: - return "no image selected" - - return f"resize: from {width}x{height} to {target_width}x{target_height}" - - -def apply_styles(prompt, prompt_neg, styles): - prompt = shared.prompt_styles.apply_styles_to_prompt(prompt, styles) - prompt_neg = shared.prompt_styles.apply_negative_styles_to_prompt(prompt_neg, styles) - - return [gr.Textbox.update(value=prompt), gr.Textbox.update(value=prompt_neg), gr.Dropdown.update(value=[])] - - -def process_interrogate(interrogation_function, mode, ii_input_dir, ii_output_dir, *ii_singles): - if mode in {0, 1, 3, 4}: - return [interrogation_function(ii_singles[mode]), None] - elif mode == 2: - return [interrogation_function(ii_singles[mode]["image"]), None] - elif mode == 5: - assert not shared.cmd_opts.hide_ui_dir_config, "Launched with --hide-ui-dir-config, batch img2img disabled" - images = shared.listfiles(ii_input_dir) - print(f"Will process {len(images)} images.") - if ii_output_dir != "": - os.makedirs(ii_output_dir, exist_ok=True) - else: - ii_output_dir = ii_input_dir - - for image in images: - img = Image.open(image) - filename = os.path.basename(image) - left, _ = os.path.splitext(filename) - print(interrogation_function(img), file=open(os.path.join(ii_output_dir, f"{left}.txt"), 'a', encoding='utf-8')) - - return [gr.update(), None] - - -def interrogate(image): - prompt = shared.interrogator.interrogate(image.convert("RGB")) - return gr.update() if prompt is None else prompt - - -def interrogate_deepbooru(image): - prompt = deepbooru.model.tag(image) - return gr.update() if prompt is None else prompt - - -def create_seed_inputs(target_interface): - with FormRow(elem_id=f"{target_interface}_seed_row", variant="compact"): - seed = (gr.Textbox if cmd_opts.use_textbox_seed else gr.Number)(label='Seed', value=-1, elem_id=f"{target_interface}_seed") - seed.style(container=False) - random_seed = ToolButton(random_symbol, elem_id=f"{target_interface}_random_seed", label='Random seed') - reuse_seed = ToolButton(reuse_symbol, elem_id=f"{target_interface}_reuse_seed", label='Reuse seed') - - seed_checkbox = gr.Checkbox(label='Extra', elem_id=f"{target_interface}_subseed_show", value=False) - - # Components to show/hide based on the 'Extra' checkbox - seed_extras = [] - - with FormRow(visible=False, elem_id=f"{target_interface}_subseed_row") as seed_extra_row_1: - seed_extras.append(seed_extra_row_1) - subseed = gr.Number(label='Variation seed', value=-1, elem_id=f"{target_interface}_subseed") - subseed.style(container=False) - random_subseed = ToolButton(random_symbol, elem_id=f"{target_interface}_random_subseed") - reuse_subseed = ToolButton(reuse_symbol, elem_id=f"{target_interface}_reuse_subseed") - subseed_strength = gr.Slider(label='Variation strength', value=0.0, minimum=0, maximum=1, step=0.01, elem_id=f"{target_interface}_subseed_strength") - - with FormRow(visible=False) as seed_extra_row_2: - seed_extras.append(seed_extra_row_2) - seed_resize_from_w = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize seed from width", value=0, elem_id=f"{target_interface}_seed_resize_from_w") - seed_resize_from_h = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize seed from height", value=0, elem_id=f"{target_interface}_seed_resize_from_h") - - random_seed.click(fn=None, _js="function(){setRandomSeed('" + target_interface + "_seed')}", show_progress=False, inputs=[], outputs=[]) - random_subseed.click(fn=None, _js="function(){setRandomSeed('" + target_interface + "_subseed')}", show_progress=False, inputs=[], outputs=[]) - - def change_visibility(show): - return {comp: gr_show(show) for comp in seed_extras} - - seed_checkbox.change(change_visibility, show_progress=False, inputs=[seed_checkbox], outputs=seed_extras) - - return seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox - - - -def connect_clear_prompt(button): - """Given clear button, prompt, and token_counter objects, setup clear prompt button click event""" - button.click( - _js="clear_prompt", - fn=None, - inputs=[], - outputs=[], - ) - - -def connect_reuse_seed(seed: gr.Number, reuse_seed: gr.Button, generation_info: gr.Textbox, dummy_component, is_subseed): - """ Connects a 'reuse (sub)seed' button's click event so that it copies last used - (sub)seed value from generation info the to the seed field. If copying subseed and subseed strength - was 0, i.e. no variation seed was used, it copies the normal seed value instead.""" - def copy_seed(gen_info_string: str, index): - res = -1 - - try: - gen_info = json.loads(gen_info_string) - index -= gen_info.get('index_of_first_image', 0) - - if is_subseed and gen_info.get('subseed_strength', 0) > 0: - all_subseeds = gen_info.get('all_subseeds', [-1]) - res = all_subseeds[index if 0 <= index < len(all_subseeds) else 0] - else: - all_seeds = gen_info.get('all_seeds', [-1]) - res = all_seeds[index if 0 <= index < len(all_seeds) else 0] - - except json.decoder.JSONDecodeError: - if gen_info_string: - errors.report(f"Error parsing JSON generation info: {gen_info_string}") - - return [res, gr_show(False)] - - reuse_seed.click( - fn=copy_seed, - _js="(x, y) => [x, selected_gallery_index()]", - show_progress=False, - inputs=[generation_info, dummy_component], - outputs=[seed, dummy_component] - ) - - -def update_token_counter(text, steps): - try: - text, _ = extra_networks.parse_prompt(text) - - _, prompt_flat_list, _ = prompt_parser.get_multicond_prompt_list([text]) - prompt_schedules = prompt_parser.get_learned_conditioning_prompt_schedules(prompt_flat_list, steps) - - except Exception: - # a parsing error can happen here during typing, and we don't want to bother the user with - # messages related to it in console - prompt_schedules = [[[steps, text]]] - - flat_prompts = reduce(lambda list1, list2: list1+list2, prompt_schedules) - prompts = [prompt_text for step, prompt_text in flat_prompts] - token_count, max_length = max([model_hijack.get_prompt_lengths(prompt) for prompt in prompts], key=lambda args: args[0]) - return f"{token_count}/{max_length}" - - -def create_toprow(is_img2img): - id_part = "img2img" if is_img2img else "txt2img" - - with gr.Row(elem_id=f"{id_part}_toprow", variant="compact"): - with gr.Column(elem_id=f"{id_part}_prompt_container", scale=6): - with gr.Row(): - with gr.Column(scale=80): - with gr.Row(): - prompt = gr.Textbox(label="Prompt", elem_id=f"{id_part}_prompt", show_label=False, lines=3, placeholder="Prompt (press Ctrl+Enter or Alt+Enter to generate)", elem_classes=["prompt"]) - - with gr.Row(): - with gr.Column(scale=80): - with gr.Row(): - negative_prompt = gr.Textbox(label="Negative prompt", elem_id=f"{id_part}_neg_prompt", show_label=False, lines=3, placeholder="Negative prompt (press Ctrl+Enter or Alt+Enter to generate)", elem_classes=["prompt"]) - - button_interrogate = None - button_deepbooru = None - if is_img2img: - with gr.Column(scale=1, elem_classes="interrogate-col"): - button_interrogate = gr.Button('Interrogate\nCLIP', elem_id="interrogate") - button_deepbooru = gr.Button('Interrogate\nDeepBooru', elem_id="deepbooru") - - with gr.Column(scale=1, elem_id=f"{id_part}_actions_column"): - with gr.Row(elem_id=f"{id_part}_generate_box", elem_classes="generate-box"): - interrupt = gr.Button('Interrupt', elem_id=f"{id_part}_interrupt", elem_classes="generate-box-interrupt") - skip = gr.Button('Skip', elem_id=f"{id_part}_skip", elem_classes="generate-box-skip") - submit = gr.Button('Generate', elem_id=f"{id_part}_generate", variant='primary') - - skip.click( - fn=lambda: shared.state.skip(), - inputs=[], - outputs=[], - ) - - interrupt.click( - fn=lambda: shared.state.interrupt(), - inputs=[], - outputs=[], - ) - - with gr.Row(elem_id=f"{id_part}_tools"): - paste = ToolButton(value=paste_symbol, elem_id="paste") - clear_prompt_button = ToolButton(value=clear_prompt_symbol, elem_id=f"{id_part}_clear_prompt") - extra_networks_button = ToolButton(value=extra_networks_symbol, elem_id=f"{id_part}_extra_networks") - prompt_style_apply = ToolButton(value=apply_style_symbol, elem_id=f"{id_part}_style_apply") - save_style = ToolButton(value=save_style_symbol, elem_id=f"{id_part}_style_create") - restore_progress_button = ToolButton(value=restore_progress_symbol, elem_id=f"{id_part}_restore_progress", visible=False) - - token_counter = gr.HTML(value="0/75", elem_id=f"{id_part}_token_counter", elem_classes=["token-counter"]) - token_button = gr.Button(visible=False, elem_id=f"{id_part}_token_button") - negative_token_counter = gr.HTML(value="0/75", elem_id=f"{id_part}_negative_token_counter", elem_classes=["token-counter"]) - negative_token_button = gr.Button(visible=False, elem_id=f"{id_part}_negative_token_button") - - clear_prompt_button.click( - fn=lambda *x: x, - _js="confirm_clear_prompt", - inputs=[prompt, negative_prompt], - outputs=[prompt, negative_prompt], - ) - - with gr.Row(elem_id=f"{id_part}_styles_row"): - prompt_styles = gr.Dropdown(label="Styles", elem_id=f"{id_part}_styles", choices=[k for k, v in shared.prompt_styles.styles.items()], value=[], multiselect=True) - create_refresh_button(prompt_styles, shared.prompt_styles.reload, lambda: {"choices": [k for k, v in shared.prompt_styles.styles.items()]}, f"refresh_{id_part}_styles") - - return prompt, prompt_styles, negative_prompt, submit, button_interrogate, button_deepbooru, prompt_style_apply, save_style, paste, extra_networks_button, token_counter, token_button, negative_token_counter, negative_token_button, restore_progress_button - - -def setup_progressbar(*args, **kwargs): - pass - - -def apply_setting(key, value): - if value is None: - return gr.update() - - if shared.cmd_opts.freeze_settings: - return gr.update() - - # dont allow model to be swapped when model hash exists in prompt - if key == "sd_model_checkpoint" and opts.disable_weights_auto_swap: - return gr.update() - - if key == "sd_model_checkpoint": - ckpt_info = sd_models.get_closet_checkpoint_match(value) - - if ckpt_info is not None: - value = ckpt_info.title - else: - return gr.update() - - comp_args = opts.data_labels[key].component_args - if comp_args and isinstance(comp_args, dict) and comp_args.get('visible') is False: - return - - valtype = type(opts.data_labels[key].default) - oldval = opts.data.get(key, None) - opts.data[key] = valtype(value) if valtype != type(None) else value - if oldval != value and opts.data_labels[key].onchange is not None: - opts.data_labels[key].onchange() - - opts.save(shared.config_filename) - return getattr(opts, key) - - -def create_output_panel(tabname, outdir): - return ui_common.create_output_panel(tabname, outdir) - - -def create_sampler_and_steps_selection(choices, tabname): - if opts.samplers_in_dropdown: - with FormRow(elem_id=f"sampler_selection_{tabname}"): - sampler_index = gr.Dropdown(label='Sampling method', elem_id=f"{tabname}_sampling", choices=[x.name for x in choices], value=choices[0].name, type="index") - steps = gr.Slider(minimum=1, maximum=150, step=1, elem_id=f"{tabname}_steps", label="Sampling steps", value=20) - else: - with FormGroup(elem_id=f"sampler_selection_{tabname}"): - steps = gr.Slider(minimum=1, maximum=150, step=1, elem_id=f"{tabname}_steps", label="Sampling steps", value=20) - sampler_index = gr.Radio(label='Sampling method', elem_id=f"{tabname}_sampling", choices=[x.name for x in choices], value=choices[0].name, type="index") - - return steps, sampler_index - - -def ordered_ui_categories(): - user_order = {x.strip(): i * 2 + 1 for i, x in enumerate(shared.opts.ui_reorder_list)} - - for _, category in sorted(enumerate(shared_items.ui_reorder_categories()), key=lambda x: user_order.get(x[1], x[0] * 2 + 0)): - yield category - - -def create_override_settings_dropdown(tabname, row): - dropdown = gr.Dropdown([], label="Override settings", visible=False, elem_id=f"{tabname}_override_settings", multiselect=True) - - dropdown.change( - fn=lambda x: gr.Dropdown.update(visible=bool(x)), - inputs=[dropdown], - outputs=[dropdown], - ) - - return dropdown - - -def create_ui(): - import modules.img2img - import modules.txt2img - - reload_javascript() - - parameters_copypaste.reset() - - modules.scripts.scripts_current = modules.scripts.scripts_txt2img - modules.scripts.scripts_txt2img.initialize_scripts(is_img2img=False) - - with gr.Blocks(analytics_enabled=False) as txt2img_interface: - txt2img_prompt, txt2img_prompt_styles, txt2img_negative_prompt, submit, _, _, txt2img_prompt_style_apply, txt2img_save_style, txt2img_paste, extra_networks_button, token_counter, token_button, negative_token_counter, negative_token_button, restore_progress_button = create_toprow(is_img2img=False) - - dummy_component = gr.Label(visible=False) - txt_prompt_img = gr.File(label="", elem_id="txt2img_prompt_image", file_count="single", type="binary", visible=False) - - with FormRow(variant='compact', elem_id="txt2img_extra_networks", visible=False) as extra_networks: - from modules import ui_extra_networks - extra_networks_ui = ui_extra_networks.create_ui(extra_networks, extra_networks_button, 'txt2img') - - with gr.Row().style(equal_height=False): - with gr.Column(variant='compact', elem_id="txt2img_settings"): - modules.scripts.scripts_txt2img.prepare_ui() - - for category in ordered_ui_categories(): - if category == "sampler": - steps, sampler_index = create_sampler_and_steps_selection(samplers, "txt2img") - - elif category == "dimensions": - with FormRow(): - with gr.Column(elem_id="txt2img_column_size", scale=4): - width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="txt2img_width") - height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="txt2img_height") - - with gr.Column(elem_id="txt2img_dimensions_row", scale=1, elem_classes="dimensions-tools"): - res_switch_btn = ToolButton(value=switch_values_symbol, elem_id="txt2img_res_switch_btn", label="Switch dims") - - if opts.dimensions_and_batch_together: - with gr.Column(elem_id="txt2img_column_batch"): - batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="txt2img_batch_count") - batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="txt2img_batch_size") - - elif category == "cfg": - cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0, elem_id="txt2img_cfg_scale") - - elif category == "seed": - seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox = create_seed_inputs('txt2img') - - elif category == "checkboxes": - with FormRow(elem_classes="checkboxes-row", variant="compact"): - restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1, elem_id="txt2img_restore_faces") - tiling = gr.Checkbox(label='Tiling', value=False, elem_id="txt2img_tiling") - enable_hr = gr.Checkbox(label='Hires. fix', value=False, elem_id="txt2img_enable_hr") - hr_final_resolution = FormHTML(value="", elem_id="txtimg_hr_finalres", label="Upscaled resolution", interactive=False) - - elif category == "hires_fix": - with FormGroup(visible=False, elem_id="txt2img_hires_fix") as hr_options: - with FormRow(elem_id="txt2img_hires_fix_row1", variant="compact"): - hr_upscaler = gr.Dropdown(label="Upscaler", elem_id="txt2img_hr_upscaler", choices=[*shared.latent_upscale_modes, *[x.name for x in shared.sd_upscalers]], value=shared.latent_upscale_default_mode) - hr_second_pass_steps = gr.Slider(minimum=0, maximum=150, step=1, label='Hires steps', value=0, elem_id="txt2img_hires_steps") - denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.7, elem_id="txt2img_denoising_strength") - - with FormRow(elem_id="txt2img_hires_fix_row2", variant="compact"): - hr_scale = gr.Slider(minimum=1.0, maximum=4.0, step=0.05, label="Upscale by", value=2.0, elem_id="txt2img_hr_scale") - hr_resize_x = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize width to", value=0, elem_id="txt2img_hr_resize_x") - hr_resize_y = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize height to", value=0, elem_id="txt2img_hr_resize_y") - - with FormRow(elem_id="txt2img_hires_fix_row3", variant="compact", visible=opts.hires_fix_show_sampler) as hr_sampler_container: - hr_sampler_index = gr.Dropdown(label='Hires sampling method', elem_id="hr_sampler", choices=["Use same sampler"] + [x.name for x in samplers_for_img2img], value="Use same sampler", type="index") - - with FormRow(elem_id="txt2img_hires_fix_row4", variant="compact", visible=opts.hires_fix_show_prompts) as hr_prompts_container: - with gr.Column(scale=80): - with gr.Row(): - hr_prompt = gr.Textbox(label="Hires prompt", elem_id="hires_prompt", show_label=False, lines=3, placeholder="Prompt for hires fix pass.\nLeave empty to use the same prompt as in first pass.", elem_classes=["prompt"]) - with gr.Column(scale=80): - with gr.Row(): - hr_negative_prompt = gr.Textbox(label="Hires negative prompt", elem_id="hires_neg_prompt", show_label=False, lines=3, placeholder="Negative prompt for hires fix pass.\nLeave empty to use the same negative prompt as in first pass.", elem_classes=["prompt"]) - - elif category == "batch": - if not opts.dimensions_and_batch_together: - with FormRow(elem_id="txt2img_column_batch"): - batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="txt2img_batch_count") - batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="txt2img_batch_size") - - elif category == "override_settings": - with FormRow(elem_id="txt2img_override_settings_row") as row: - override_settings = create_override_settings_dropdown('txt2img', row) - - elif category == "scripts": - with FormGroup(elem_id="txt2img_script_container"): - custom_inputs = modules.scripts.scripts_txt2img.setup_ui() - - else: - modules.scripts.scripts_txt2img.setup_ui_for_section(category) - - hr_resolution_preview_inputs = [enable_hr, width, height, hr_scale, hr_resize_x, hr_resize_y] - - for component in hr_resolution_preview_inputs: - event = component.release if isinstance(component, gr.Slider) else component.change - - event( - fn=calc_resolution_hires, - inputs=hr_resolution_preview_inputs, - outputs=[hr_final_resolution], - show_progress=False, - ) - event( - None, - _js="onCalcResolutionHires", - inputs=hr_resolution_preview_inputs, - outputs=[], - show_progress=False, - ) - - txt2img_gallery, generation_info, html_info, html_log = create_output_panel("txt2img", opts.outdir_txt2img_samples) - - connect_reuse_seed(seed, reuse_seed, generation_info, dummy_component, is_subseed=False) - connect_reuse_seed(subseed, reuse_subseed, generation_info, dummy_component, is_subseed=True) - - txt2img_args = dict( - fn=wrap_gradio_gpu_call(modules.txt2img.txt2img, extra_outputs=[None, '', '']), - _js="submit", - inputs=[ - dummy_component, - txt2img_prompt, - txt2img_negative_prompt, - txt2img_prompt_styles, - steps, - sampler_index, - restore_faces, - tiling, - batch_count, - batch_size, - cfg_scale, - seed, - subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox, - height, - width, - enable_hr, - denoising_strength, - hr_scale, - hr_upscaler, - hr_second_pass_steps, - hr_resize_x, - hr_resize_y, - hr_sampler_index, - hr_prompt, - hr_negative_prompt, - override_settings, - - ] + custom_inputs, - - outputs=[ - txt2img_gallery, - generation_info, - html_info, - html_log, - ], - show_progress=False, - ) - - txt2img_prompt.submit(**txt2img_args) - submit.click(**txt2img_args) - - res_switch_btn.click(fn=None, _js="function(){switchWidthHeight('txt2img')}", inputs=None, outputs=None, show_progress=False) - - restore_progress_button.click( - fn=progress.restore_progress, - _js="restoreProgressTxt2img", - inputs=[dummy_component], - outputs=[ - txt2img_gallery, - generation_info, - html_info, - html_log, - ], - show_progress=False, - ) - - txt_prompt_img.change( - fn=modules.images.image_data, - inputs=[ - txt_prompt_img - ], - outputs=[ - txt2img_prompt, - txt_prompt_img - ], - show_progress=False, - ) - - enable_hr.change( - fn=lambda x: gr_show(x), - inputs=[enable_hr], - outputs=[hr_options], - show_progress = False, - ) - - txt2img_paste_fields = [ - (txt2img_prompt, "Prompt"), - (txt2img_negative_prompt, "Negative prompt"), - (steps, "Steps"), - (sampler_index, "Sampler"), - (restore_faces, "Face restoration"), - (cfg_scale, "CFG scale"), - (seed, "Seed"), - (width, "Size-1"), - (height, "Size-2"), - (batch_size, "Batch size"), - (subseed, "Variation seed"), - (subseed_strength, "Variation seed strength"), - (seed_resize_from_w, "Seed resize from-1"), - (seed_resize_from_h, "Seed resize from-2"), - (txt2img_prompt_styles, lambda d: d["Styles array"] if isinstance(d.get("Styles array"), list) else gr.update()), - (denoising_strength, "Denoising strength"), - (enable_hr, lambda d: "Denoising strength" in d), - (hr_options, lambda d: gr.Row.update(visible="Denoising strength" in d)), - (hr_scale, "Hires upscale"), - (hr_upscaler, "Hires upscaler"), - (hr_second_pass_steps, "Hires steps"), - (hr_resize_x, "Hires resize-1"), - (hr_resize_y, "Hires resize-2"), - (hr_sampler_index, "Hires sampler"), - (hr_sampler_container, lambda d: gr.update(visible=True) if d.get("Hires sampler", "Use same sampler") != "Use same sampler" else gr.update()), - (hr_prompt, "Hires prompt"), - (hr_negative_prompt, "Hires negative prompt"), - (hr_prompts_container, lambda d: gr.update(visible=True) if d.get("Hires prompt", "") != "" or d.get("Hires negative prompt", "") != "" else gr.update()), - *modules.scripts.scripts_txt2img.infotext_fields - ] - parameters_copypaste.add_paste_fields("txt2img", None, txt2img_paste_fields, override_settings) - parameters_copypaste.register_paste_params_button(parameters_copypaste.ParamBinding( - paste_button=txt2img_paste, tabname="txt2img", source_text_component=txt2img_prompt, source_image_component=None, - )) - - txt2img_preview_params = [ - txt2img_prompt, - txt2img_negative_prompt, - steps, - sampler_index, - cfg_scale, - seed, - width, - height, - ] - - token_button.click(fn=wrap_queued_call(update_token_counter), inputs=[txt2img_prompt, steps], outputs=[token_counter]) - negative_token_button.click(fn=wrap_queued_call(update_token_counter), inputs=[txt2img_negative_prompt, steps], outputs=[negative_token_counter]) - - ui_extra_networks.setup_ui(extra_networks_ui, txt2img_gallery) - - modules.scripts.scripts_current = modules.scripts.scripts_img2img - modules.scripts.scripts_img2img.initialize_scripts(is_img2img=True) - - with gr.Blocks(analytics_enabled=False) as img2img_interface: - img2img_prompt, img2img_prompt_styles, img2img_negative_prompt, submit, img2img_interrogate, img2img_deepbooru, img2img_prompt_style_apply, img2img_save_style, img2img_paste, extra_networks_button, token_counter, token_button, negative_token_counter, negative_token_button, restore_progress_button = create_toprow(is_img2img=True) - - img2img_prompt_img = gr.File(label="", elem_id="img2img_prompt_image", file_count="single", type="binary", visible=False) - - with FormRow(variant='compact', elem_id="img2img_extra_networks", visible=False) as extra_networks: - from modules import ui_extra_networks - extra_networks_ui_img2img = ui_extra_networks.create_ui(extra_networks, extra_networks_button, 'img2img') - - with FormRow().style(equal_height=False): - with gr.Column(variant='compact', elem_id="img2img_settings"): - copy_image_buttons = [] - copy_image_destinations = {} - - def add_copy_image_controls(tab_name, elem): - with gr.Row(variant="compact", elem_id=f"img2img_copy_to_{tab_name}"): - gr.HTML("Copy image to: ", elem_id=f"img2img_label_copy_to_{tab_name}") - - for title, name in zip(['img2img', 'sketch', 'inpaint', 'inpaint sketch'], ['img2img', 'sketch', 'inpaint', 'inpaint_sketch']): - if name == tab_name: - gr.Button(title, interactive=False) - copy_image_destinations[name] = elem - continue - - button = gr.Button(title) - copy_image_buttons.append((button, name, elem)) - - with gr.Tabs(elem_id="mode_img2img"): - img2img_selected_tab = gr.State(0) - - with gr.TabItem('img2img', id='img2img', elem_id="img2img_img2img_tab") as tab_img2img: - init_img = gr.Image(label="Image for img2img", elem_id="img2img_image", show_label=False, source="upload", interactive=True, type="pil", tool="editor", image_mode="RGBA").style(height=opts.img2img_editor_height) - add_copy_image_controls('img2img', init_img) - - with gr.TabItem('Sketch', id='img2img_sketch', elem_id="img2img_img2img_sketch_tab") as tab_sketch: - sketch = gr.Image(label="Image for img2img", elem_id="img2img_sketch", show_label=False, source="upload", interactive=True, type="pil", tool="color-sketch", image_mode="RGBA").style(height=opts.img2img_editor_height) - add_copy_image_controls('sketch', sketch) - - with gr.TabItem('Inpaint', id='inpaint', elem_id="img2img_inpaint_tab") as tab_inpaint: - init_img_with_mask = gr.Image(label="Image for inpainting with mask", show_label=False, elem_id="img2maskimg", source="upload", interactive=True, type="pil", tool="sketch", image_mode="RGBA").style(height=opts.img2img_editor_height) - add_copy_image_controls('inpaint', init_img_with_mask) - - with gr.TabItem('Inpaint sketch', id='inpaint_sketch', elem_id="img2img_inpaint_sketch_tab") as tab_inpaint_color: - inpaint_color_sketch = gr.Image(label="Color sketch inpainting", show_label=False, elem_id="inpaint_sketch", source="upload", interactive=True, type="pil", tool="color-sketch", image_mode="RGBA").style(height=opts.img2img_editor_height) - inpaint_color_sketch_orig = gr.State(None) - add_copy_image_controls('inpaint_sketch', inpaint_color_sketch) - - def update_orig(image, state): - if image is not None: - same_size = state is not None and state.size == image.size - has_exact_match = np.any(np.all(np.array(image) == np.array(state), axis=-1)) - edited = same_size and has_exact_match - return image if not edited or state is None else state - - inpaint_color_sketch.change(update_orig, [inpaint_color_sketch, inpaint_color_sketch_orig], inpaint_color_sketch_orig) - - with gr.TabItem('Inpaint upload', id='inpaint_upload', elem_id="img2img_inpaint_upload_tab") as tab_inpaint_upload: - init_img_inpaint = gr.Image(label="Image for img2img", show_label=False, source="upload", interactive=True, type="pil", elem_id="img_inpaint_base") - init_mask_inpaint = gr.Image(label="Mask", source="upload", interactive=True, type="pil", elem_id="img_inpaint_mask") - - with gr.TabItem('Batch', id='batch', elem_id="img2img_batch_tab") as tab_batch: - hidden = '
Disabled when launched with --hide-ui-dir-config.' if shared.cmd_opts.hide_ui_dir_config else '' - gr.HTML( - "

Process images in a directory on the same machine where the server is running." + - "
Use an empty output directory to save pictures normally instead of writing to the output directory." + - f"
Add inpaint batch mask directory to enable inpaint batch processing." - f"{hidden}

" - ) - img2img_batch_input_dir = gr.Textbox(label="Input directory", **shared.hide_dirs, elem_id="img2img_batch_input_dir") - img2img_batch_output_dir = gr.Textbox(label="Output directory", **shared.hide_dirs, elem_id="img2img_batch_output_dir") - img2img_batch_inpaint_mask_dir = gr.Textbox(label="Inpaint batch mask directory (required for inpaint batch processing only)", **shared.hide_dirs, elem_id="img2img_batch_inpaint_mask_dir") - with gr.Accordion("PNG info", open=False): - img2img_batch_use_png_info = gr.Checkbox(label="Append png info to prompts", **shared.hide_dirs, elem_id="img2img_batch_use_png_info") - img2img_batch_png_info_dir = gr.Textbox(label="PNG info directory", **shared.hide_dirs, placeholder="Leave empty to use input directory", elem_id="img2img_batch_png_info_dir") - img2img_batch_png_info_props = gr.CheckboxGroup(["Prompt", "Negative prompt", "Seed", "CFG scale", "Sampler", "Steps"], label="Parameters to take from png info", info="Prompts from png info will be appended to prompts set in ui.") - - img2img_tabs = [tab_img2img, tab_sketch, tab_inpaint, tab_inpaint_color, tab_inpaint_upload, tab_batch] - - for i, tab in enumerate(img2img_tabs): - tab.select(fn=lambda tabnum=i: tabnum, inputs=[], outputs=[img2img_selected_tab]) - - def copy_image(img): - if isinstance(img, dict) and 'image' in img: - return img['image'] - - return img - - for button, name, elem in copy_image_buttons: - button.click( - fn=copy_image, - inputs=[elem], - outputs=[copy_image_destinations[name]], - ) - button.click( - fn=lambda: None, - _js=f"switch_to_{name.replace(' ', '_')}", - inputs=[], - outputs=[], - ) - - with FormRow(): - resize_mode = gr.Radio(label="Resize mode", elem_id="resize_mode", choices=["Just resize", "Crop and resize", "Resize and fill", "Just resize (latent upscale)"], type="index", value="Just resize") - - modules.scripts.scripts_img2img.prepare_ui() - - for category in ordered_ui_categories(): - if category == "sampler": - steps, sampler_index = create_sampler_and_steps_selection(samplers_for_img2img, "img2img") - - elif category == "dimensions": - with FormRow(): - with gr.Column(elem_id="img2img_column_size", scale=4): - selected_scale_tab = gr.State(value=0) - - with gr.Tabs(): - with gr.Tab(label="Resize to", elem_id="img2img_tab_resize_to") as tab_scale_to: - with FormRow(): - with gr.Column(elem_id="img2img_column_size", scale=4): - width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="img2img_width") - height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="img2img_height") - with gr.Column(elem_id="img2img_dimensions_row", scale=1, elem_classes="dimensions-tools"): - res_switch_btn = ToolButton(value=switch_values_symbol, elem_id="img2img_res_switch_btn") - detect_image_size_btn = ToolButton(value=detect_image_size_symbol, elem_id="img2img_detect_image_size_btn") - - with gr.Tab(label="Resize by", elem_id="img2img_tab_resize_by") as tab_scale_by: - scale_by = gr.Slider(minimum=0.05, maximum=4.0, step=0.05, label="Scale", value=1.0, elem_id="img2img_scale") - - with FormRow(): - scale_by_html = FormHTML(resize_from_to_html(0, 0, 0.0), elem_id="img2img_scale_resolution_preview") - gr.Slider(label="Unused", elem_id="img2img_unused_scale_by_slider") - button_update_resize_to = gr.Button(visible=False, elem_id="img2img_update_resize_to") - - on_change_args = dict( - fn=resize_from_to_html, - _js="currentImg2imgSourceResolution", - inputs=[dummy_component, dummy_component, scale_by], - outputs=scale_by_html, - show_progress=False, - ) - - scale_by.release(**on_change_args) - button_update_resize_to.click(**on_change_args) - - # the code below is meant to update the resolution label after the image in the image selection UI has changed. - # as it is now the event keeps firing continuously for inpaint edits, which ruins the page with constant requests. - # I assume this must be a gradio bug and for now we'll just do it for non-inpaint inputs. - for component in [init_img, sketch]: - component.change(fn=lambda: None, _js="updateImg2imgResizeToTextAfterChangingImage", inputs=[], outputs=[], show_progress=False) - - tab_scale_to.select(fn=lambda: 0, inputs=[], outputs=[selected_scale_tab]) - tab_scale_by.select(fn=lambda: 1, inputs=[], outputs=[selected_scale_tab]) - - if opts.dimensions_and_batch_together: - with gr.Column(elem_id="img2img_column_batch"): - batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="img2img_batch_count") - batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="img2img_batch_size") - - elif category == "cfg": - with FormGroup(): - with FormRow(): - cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0, elem_id="img2img_cfg_scale") - image_cfg_scale = gr.Slider(minimum=0, maximum=3.0, step=0.05, label='Image CFG Scale', value=1.5, elem_id="img2img_image_cfg_scale", visible=False) - denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.75, elem_id="img2img_denoising_strength") - - elif category == "seed": - seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox = create_seed_inputs('img2img') - - elif category == "checkboxes": - with FormRow(elem_classes="checkboxes-row", variant="compact"): - restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1, elem_id="img2img_restore_faces") - tiling = gr.Checkbox(label='Tiling', value=False, elem_id="img2img_tiling") - - elif category == "batch": - if not opts.dimensions_and_batch_together: - with FormRow(elem_id="img2img_column_batch"): - batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="img2img_batch_count") - batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="img2img_batch_size") - - elif category == "override_settings": - with FormRow(elem_id="img2img_override_settings_row") as row: - override_settings = create_override_settings_dropdown('img2img', row) - - elif category == "scripts": - with FormGroup(elem_id="img2img_script_container"): - custom_inputs = modules.scripts.scripts_img2img.setup_ui() - - elif category == "inpaint": - with FormGroup(elem_id="inpaint_controls", visible=False) as inpaint_controls: - with FormRow(): - mask_blur = gr.Slider(label='Mask blur', minimum=0, maximum=64, step=1, value=4, elem_id="img2img_mask_blur") - mask_alpha = gr.Slider(label="Mask transparency", visible=False, elem_id="img2img_mask_alpha") - - with FormRow(): - inpainting_mask_invert = gr.Radio(label='Mask mode', choices=['Inpaint masked', 'Inpaint not masked'], value='Inpaint masked', type="index", elem_id="img2img_mask_mode") - - with FormRow(): - inpainting_fill = gr.Radio(label='Masked content', choices=['fill', 'original', 'latent noise', 'latent nothing'], value='original', type="index", elem_id="img2img_inpainting_fill") - - with FormRow(): - with gr.Column(): - inpaint_full_res = gr.Radio(label="Inpaint area", choices=["Whole picture", "Only masked"], type="index", value="Whole picture", elem_id="img2img_inpaint_full_res") - - with gr.Column(scale=4): - inpaint_full_res_padding = gr.Slider(label='Only masked padding, pixels', minimum=0, maximum=256, step=4, value=32, elem_id="img2img_inpaint_full_res_padding") - - def select_img2img_tab(tab): - return gr.update(visible=tab in [2, 3, 4]), gr.update(visible=tab == 3), - - for i, elem in enumerate(img2img_tabs): - elem.select( - fn=lambda tab=i: select_img2img_tab(tab), - inputs=[], - outputs=[inpaint_controls, mask_alpha], - ) - else: - modules.scripts.scripts_img2img.setup_ui_for_section(category) - - img2img_gallery, generation_info, html_info, html_log = create_output_panel("img2img", opts.outdir_img2img_samples) - - connect_reuse_seed(seed, reuse_seed, generation_info, dummy_component, is_subseed=False) - connect_reuse_seed(subseed, reuse_subseed, generation_info, dummy_component, is_subseed=True) - - img2img_prompt_img.change( - fn=modules.images.image_data, - inputs=[ - img2img_prompt_img - ], - outputs=[ - img2img_prompt, - img2img_prompt_img - ], - show_progress=False, - ) - - img2img_args = dict( - fn=wrap_gradio_gpu_call(modules.img2img.img2img, extra_outputs=[None, '', '']), - _js="submit_img2img", - inputs=[ - dummy_component, - dummy_component, - img2img_prompt, - img2img_negative_prompt, - img2img_prompt_styles, - init_img, - sketch, - init_img_with_mask, - inpaint_color_sketch, - inpaint_color_sketch_orig, - init_img_inpaint, - init_mask_inpaint, - steps, - sampler_index, - mask_blur, - mask_alpha, - inpainting_fill, - restore_faces, - tiling, - batch_count, - batch_size, - cfg_scale, - image_cfg_scale, - denoising_strength, - seed, - subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox, - selected_scale_tab, - height, - width, - scale_by, - resize_mode, - inpaint_full_res, - inpaint_full_res_padding, - inpainting_mask_invert, - img2img_batch_input_dir, - img2img_batch_output_dir, - img2img_batch_inpaint_mask_dir, - override_settings, - img2img_batch_use_png_info, - img2img_batch_png_info_props, - img2img_batch_png_info_dir, - ] + custom_inputs, - outputs=[ - img2img_gallery, - generation_info, - html_info, - html_log, - ], - show_progress=False, - ) - - interrogate_args = dict( - _js="get_img2img_tab_index", - inputs=[ - dummy_component, - img2img_batch_input_dir, - img2img_batch_output_dir, - init_img, - sketch, - init_img_with_mask, - inpaint_color_sketch, - init_img_inpaint, - ], - outputs=[img2img_prompt, dummy_component], - ) - - img2img_prompt.submit(**img2img_args) - submit.click(**img2img_args) - - res_switch_btn.click(fn=None, _js="function(){switchWidthHeight('img2img')}", inputs=None, outputs=None, show_progress=False) - - detect_image_size_btn.click( - fn=lambda w, h, _: (w or gr.update(), h or gr.update()), - _js="currentImg2imgSourceResolution", - inputs=[dummy_component, dummy_component, dummy_component], - outputs=[width, height], - show_progress=False, - ) - - restore_progress_button.click( - fn=progress.restore_progress, - _js="restoreProgressImg2img", - inputs=[dummy_component], - outputs=[ - img2img_gallery, - generation_info, - html_info, - html_log, - ], - show_progress=False, - ) - - img2img_interrogate.click( - fn=lambda *args: process_interrogate(interrogate, *args), - **interrogate_args, - ) - - img2img_deepbooru.click( - fn=lambda *args: process_interrogate(interrogate_deepbooru, *args), - **interrogate_args, - ) - - prompts = [(txt2img_prompt, txt2img_negative_prompt), (img2img_prompt, img2img_negative_prompt)] - style_dropdowns = [txt2img_prompt_styles, img2img_prompt_styles] - style_js_funcs = ["update_txt2img_tokens", "update_img2img_tokens"] - - for button, (prompt, negative_prompt) in zip([txt2img_save_style, img2img_save_style], prompts): - button.click( - fn=add_style, - _js="ask_for_style_name", - # Have to pass empty dummy component here, because the JavaScript and Python function have to accept - # the same number of parameters, but we only know the style-name after the JavaScript prompt - inputs=[dummy_component, prompt, negative_prompt], - outputs=[txt2img_prompt_styles, img2img_prompt_styles], - ) - - for button, (prompt, negative_prompt), styles, js_func in zip([txt2img_prompt_style_apply, img2img_prompt_style_apply], prompts, style_dropdowns, style_js_funcs): - button.click( - fn=apply_styles, - _js=js_func, - inputs=[prompt, negative_prompt, styles], - outputs=[prompt, negative_prompt, styles], - ) - - token_button.click(fn=update_token_counter, inputs=[img2img_prompt, steps], outputs=[token_counter]) - negative_token_button.click(fn=wrap_queued_call(update_token_counter), inputs=[img2img_negative_prompt, steps], outputs=[negative_token_counter]) - - ui_extra_networks.setup_ui(extra_networks_ui_img2img, img2img_gallery) - - img2img_paste_fields = [ - (img2img_prompt, "Prompt"), - (img2img_negative_prompt, "Negative prompt"), - (steps, "Steps"), - (sampler_index, "Sampler"), - (restore_faces, "Face restoration"), - (cfg_scale, "CFG scale"), - (image_cfg_scale, "Image CFG scale"), - (seed, "Seed"), - (width, "Size-1"), - (height, "Size-2"), - (batch_size, "Batch size"), - (subseed, "Variation seed"), - (subseed_strength, "Variation seed strength"), - (seed_resize_from_w, "Seed resize from-1"), - (seed_resize_from_h, "Seed resize from-2"), - (img2img_prompt_styles, lambda d: d["Styles array"] if isinstance(d.get("Styles array"), list) else gr.update()), - (denoising_strength, "Denoising strength"), - (mask_blur, "Mask blur"), - *modules.scripts.scripts_img2img.infotext_fields - ] - parameters_copypaste.add_paste_fields("img2img", init_img, img2img_paste_fields, override_settings) - parameters_copypaste.add_paste_fields("inpaint", init_img_with_mask, img2img_paste_fields, override_settings) - parameters_copypaste.register_paste_params_button(parameters_copypaste.ParamBinding( - paste_button=img2img_paste, tabname="img2img", source_text_component=img2img_prompt, source_image_component=None, - )) - - modules.scripts.scripts_current = None - - with gr.Blocks(analytics_enabled=False) as extras_interface: - ui_postprocessing.create_ui() - - with gr.Blocks(analytics_enabled=False) as pnginfo_interface: - with gr.Row().style(equal_height=False): - with gr.Column(variant='panel'): - image = gr.Image(elem_id="pnginfo_image", label="Source", source="upload", interactive=True, type="pil") - - with gr.Column(variant='panel'): - html = gr.HTML() - generation_info = gr.Textbox(visible=False, elem_id="pnginfo_generation_info") - html2 = gr.HTML() - with gr.Row(): - buttons = parameters_copypaste.create_buttons(["txt2img", "img2img", "inpaint", "extras"]) - - for tabname, button in buttons.items(): - parameters_copypaste.register_paste_params_button(parameters_copypaste.ParamBinding( - paste_button=button, tabname=tabname, source_text_component=generation_info, source_image_component=image, - )) - - image.change( - fn=wrap_gradio_call(modules.extras.run_pnginfo), - inputs=[image], - outputs=[html, generation_info, html2], - ) - - def update_interp_description(value): - interp_description_css = "

{}

" - interp_descriptions = { - "No interpolation": interp_description_css.format("No interpolation will be used. Requires one model; A. Allows for format conversion and VAE baking."), - "Weighted sum": interp_description_css.format("A weighted sum will be used for interpolation. Requires two models; A and B. The result is calculated as A * (1 - M) + B * M"), - "Add difference": interp_description_css.format("The difference between the last two models will be added to the first. Requires three models; A, B and C. The result is calculated as A + (B - C) * M") - } - return interp_descriptions[value] - - with gr.Blocks(analytics_enabled=False) as modelmerger_interface: - with gr.Row().style(equal_height=False): - with gr.Column(variant='compact'): - interp_description = gr.HTML(value=update_interp_description("Weighted sum"), elem_id="modelmerger_interp_description") - - with FormRow(elem_id="modelmerger_models"): - primary_model_name = gr.Dropdown(modules.sd_models.checkpoint_tiles(), elem_id="modelmerger_primary_model_name", label="Primary model (A)") - create_refresh_button(primary_model_name, modules.sd_models.list_models, lambda: {"choices": modules.sd_models.checkpoint_tiles()}, "refresh_checkpoint_A") - - secondary_model_name = gr.Dropdown(modules.sd_models.checkpoint_tiles(), elem_id="modelmerger_secondary_model_name", label="Secondary model (B)") - create_refresh_button(secondary_model_name, modules.sd_models.list_models, lambda: {"choices": modules.sd_models.checkpoint_tiles()}, "refresh_checkpoint_B") - - tertiary_model_name = gr.Dropdown(modules.sd_models.checkpoint_tiles(), elem_id="modelmerger_tertiary_model_name", label="Tertiary model (C)") - create_refresh_button(tertiary_model_name, modules.sd_models.list_models, lambda: {"choices": modules.sd_models.checkpoint_tiles()}, "refresh_checkpoint_C") - - custom_name = gr.Textbox(label="Custom Name (Optional)", elem_id="modelmerger_custom_name") - interp_amount = gr.Slider(minimum=0.0, maximum=1.0, step=0.05, label='Multiplier (M) - set to 0 to get model A', value=0.3, elem_id="modelmerger_interp_amount") - interp_method = gr.Radio(choices=["No interpolation", "Weighted sum", "Add difference"], value="Weighted sum", label="Interpolation Method", elem_id="modelmerger_interp_method") - interp_method.change(fn=update_interp_description, inputs=[interp_method], outputs=[interp_description]) - - with FormRow(): - checkpoint_format = gr.Radio(choices=["ckpt", "safetensors"], value="safetensors", label="Checkpoint format", elem_id="modelmerger_checkpoint_format") - save_as_half = gr.Checkbox(value=False, label="Save as float16", elem_id="modelmerger_save_as_half") - save_metadata = gr.Checkbox(value=True, label="Save metadata (.safetensors only)", elem_id="modelmerger_save_metadata") - - with FormRow(): - with gr.Column(): - config_source = gr.Radio(choices=["A, B or C", "B", "C", "Don't"], value="A, B or C", label="Copy config from", type="index", elem_id="modelmerger_config_method") - - with gr.Column(): - with FormRow(): - bake_in_vae = gr.Dropdown(choices=["None"] + list(sd_vae.vae_dict), value="None", label="Bake in VAE", elem_id="modelmerger_bake_in_vae") - create_refresh_button(bake_in_vae, sd_vae.refresh_vae_list, lambda: {"choices": ["None"] + list(sd_vae.vae_dict)}, "modelmerger_refresh_bake_in_vae") - - with FormRow(): - discard_weights = gr.Textbox(value="", label="Discard weights with matching name", elem_id="modelmerger_discard_weights") - - with gr.Row(): - modelmerger_merge = gr.Button(elem_id="modelmerger_merge", value="Merge", variant='primary') - - with gr.Column(variant='compact', elem_id="modelmerger_results_container"): - with gr.Group(elem_id="modelmerger_results_panel"): - modelmerger_result = gr.HTML(elem_id="modelmerger_result", show_label=False) - - with gr.Blocks(analytics_enabled=False) as train_interface: - with gr.Row().style(equal_height=False): - gr.HTML(value="

See wiki for detailed explanation.

") - - with gr.Row(variant="compact").style(equal_height=False): - with gr.Tabs(elem_id="train_tabs"): - - with gr.Tab(label="Create embedding", id="create_embedding"): - new_embedding_name = gr.Textbox(label="Name", elem_id="train_new_embedding_name") - initialization_text = gr.Textbox(label="Initialization text", value="*", elem_id="train_initialization_text") - nvpt = gr.Slider(label="Number of vectors per token", minimum=1, maximum=75, step=1, value=1, elem_id="train_nvpt") - overwrite_old_embedding = gr.Checkbox(value=False, label="Overwrite Old Embedding", elem_id="train_overwrite_old_embedding") - - with gr.Row(): - with gr.Column(scale=3): - gr.HTML(value="") - - with gr.Column(): - create_embedding = gr.Button(value="Create embedding", variant='primary', elem_id="train_create_embedding") - - with gr.Tab(label="Create hypernetwork", id="create_hypernetwork"): - new_hypernetwork_name = gr.Textbox(label="Name", elem_id="train_new_hypernetwork_name") - new_hypernetwork_sizes = gr.CheckboxGroup(label="Modules", value=["768", "320", "640", "1280"], choices=["768", "1024", "320", "640", "1280"], elem_id="train_new_hypernetwork_sizes") - new_hypernetwork_layer_structure = gr.Textbox("1, 2, 1", label="Enter hypernetwork layer structure", placeholder="1st and last digit must be 1. ex:'1, 2, 1'", elem_id="train_new_hypernetwork_layer_structure") - new_hypernetwork_activation_func = gr.Dropdown(value="linear", label="Select activation function of hypernetwork. Recommended : Swish / Linear(none)", choices=modules.hypernetworks.ui.keys, elem_id="train_new_hypernetwork_activation_func") - new_hypernetwork_initialization_option = gr.Dropdown(value = "Normal", label="Select Layer weights initialization. Recommended: Kaiming for relu-like, Xavier for sigmoid-like, Normal otherwise", choices=["Normal", "KaimingUniform", "KaimingNormal", "XavierUniform", "XavierNormal"], elem_id="train_new_hypernetwork_initialization_option") - new_hypernetwork_add_layer_norm = gr.Checkbox(label="Add layer normalization", elem_id="train_new_hypernetwork_add_layer_norm") - new_hypernetwork_use_dropout = gr.Checkbox(label="Use dropout", elem_id="train_new_hypernetwork_use_dropout") - new_hypernetwork_dropout_structure = gr.Textbox("0, 0, 0", label="Enter hypernetwork Dropout structure (or empty). Recommended : 0~0.35 incrementing sequence: 0, 0.05, 0.15", placeholder="1st and last digit must be 0 and values should be between 0 and 1. ex:'0, 0.01, 0'") - overwrite_old_hypernetwork = gr.Checkbox(value=False, label="Overwrite Old Hypernetwork", elem_id="train_overwrite_old_hypernetwork") - - with gr.Row(): - with gr.Column(scale=3): - gr.HTML(value="") - - with gr.Column(): - create_hypernetwork = gr.Button(value="Create hypernetwork", variant='primary', elem_id="train_create_hypernetwork") - - with gr.Tab(label="Preprocess images", id="preprocess_images"): - process_src = gr.Textbox(label='Source directory', elem_id="train_process_src") - process_dst = gr.Textbox(label='Destination directory', elem_id="train_process_dst") - process_width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="train_process_width") - process_height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="train_process_height") - preprocess_txt_action = gr.Dropdown(label='Existing Caption txt Action', value="ignore", choices=["ignore", "copy", "prepend", "append"], elem_id="train_preprocess_txt_action") - - with gr.Row(): - process_keep_original_size = gr.Checkbox(label='Keep original size', elem_id="train_process_keep_original_size") - process_flip = gr.Checkbox(label='Create flipped copies', elem_id="train_process_flip") - process_split = gr.Checkbox(label='Split oversized images', elem_id="train_process_split") - process_focal_crop = gr.Checkbox(label='Auto focal point crop', elem_id="train_process_focal_crop") - process_multicrop = gr.Checkbox(label='Auto-sized crop', elem_id="train_process_multicrop") - process_caption = gr.Checkbox(label='Use BLIP for caption', elem_id="train_process_caption") - process_caption_deepbooru = gr.Checkbox(label='Use deepbooru for caption', visible=True, elem_id="train_process_caption_deepbooru") - - with gr.Row(visible=False) as process_split_extra_row: - process_split_threshold = gr.Slider(label='Split image threshold', value=0.5, minimum=0.0, maximum=1.0, step=0.05, elem_id="train_process_split_threshold") - process_overlap_ratio = gr.Slider(label='Split image overlap ratio', value=0.2, minimum=0.0, maximum=0.9, step=0.05, elem_id="train_process_overlap_ratio") - - with gr.Row(visible=False) as process_focal_crop_row: - process_focal_crop_face_weight = gr.Slider(label='Focal point face weight', value=0.9, minimum=0.0, maximum=1.0, step=0.05, elem_id="train_process_focal_crop_face_weight") - process_focal_crop_entropy_weight = gr.Slider(label='Focal point entropy weight', value=0.15, minimum=0.0, maximum=1.0, step=0.05, elem_id="train_process_focal_crop_entropy_weight") - process_focal_crop_edges_weight = gr.Slider(label='Focal point edges weight', value=0.5, minimum=0.0, maximum=1.0, step=0.05, elem_id="train_process_focal_crop_edges_weight") - process_focal_crop_debug = gr.Checkbox(label='Create debug image', elem_id="train_process_focal_crop_debug") - - with gr.Column(visible=False) as process_multicrop_col: - gr.Markdown('Each image is center-cropped with an automatically chosen width and height.') - with gr.Row(): - process_multicrop_mindim = gr.Slider(minimum=64, maximum=2048, step=8, label="Dimension lower bound", value=384, elem_id="train_process_multicrop_mindim") - process_multicrop_maxdim = gr.Slider(minimum=64, maximum=2048, step=8, label="Dimension upper bound", value=768, elem_id="train_process_multicrop_maxdim") - with gr.Row(): - process_multicrop_minarea = gr.Slider(minimum=64*64, maximum=2048*2048, step=1, label="Area lower bound", value=64*64, elem_id="train_process_multicrop_minarea") - process_multicrop_maxarea = gr.Slider(minimum=64*64, maximum=2048*2048, step=1, label="Area upper bound", value=640*640, elem_id="train_process_multicrop_maxarea") - with gr.Row(): - process_multicrop_objective = gr.Radio(["Maximize area", "Minimize error"], value="Maximize area", label="Resizing objective", elem_id="train_process_multicrop_objective") - process_multicrop_threshold = gr.Slider(minimum=0, maximum=1, step=0.01, label="Error threshold", value=0.1, elem_id="train_process_multicrop_threshold") - - with gr.Row(): - with gr.Column(scale=3): - gr.HTML(value="") - - with gr.Column(): - with gr.Row(): - interrupt_preprocessing = gr.Button("Interrupt", elem_id="train_interrupt_preprocessing") - run_preprocess = gr.Button(value="Preprocess", variant='primary', elem_id="train_run_preprocess") - - process_split.change( - fn=lambda show: gr_show(show), - inputs=[process_split], - outputs=[process_split_extra_row], - ) - - process_focal_crop.change( - fn=lambda show: gr_show(show), - inputs=[process_focal_crop], - outputs=[process_focal_crop_row], - ) - - process_multicrop.change( - fn=lambda show: gr_show(show), - inputs=[process_multicrop], - outputs=[process_multicrop_col], - ) - - def get_textual_inversion_template_names(): - return sorted(textual_inversion.textual_inversion_templates) - - with gr.Tab(label="Train", id="train"): - gr.HTML(value="

Train an embedding or Hypernetwork; you must specify a directory with a set of 1:1 ratio images [wiki]

") - with FormRow(): - train_embedding_name = gr.Dropdown(label='Embedding', elem_id="train_embedding", choices=sorted(sd_hijack.model_hijack.embedding_db.word_embeddings.keys())) - create_refresh_button(train_embedding_name, sd_hijack.model_hijack.embedding_db.load_textual_inversion_embeddings, lambda: {"choices": sorted(sd_hijack.model_hijack.embedding_db.word_embeddings.keys())}, "refresh_train_embedding_name") - - train_hypernetwork_name = gr.Dropdown(label='Hypernetwork', elem_id="train_hypernetwork", choices=sorted(shared.hypernetworks)) - create_refresh_button(train_hypernetwork_name, shared.reload_hypernetworks, lambda: {"choices": sorted(shared.hypernetworks)}, "refresh_train_hypernetwork_name") - - with FormRow(): - embedding_learn_rate = gr.Textbox(label='Embedding Learning rate', placeholder="Embedding Learning rate", value="0.005", elem_id="train_embedding_learn_rate") - hypernetwork_learn_rate = gr.Textbox(label='Hypernetwork Learning rate', placeholder="Hypernetwork Learning rate", value="0.00001", elem_id="train_hypernetwork_learn_rate") - - with FormRow(): - clip_grad_mode = gr.Dropdown(value="disabled", label="Gradient Clipping", choices=["disabled", "value", "norm"]) - clip_grad_value = gr.Textbox(placeholder="Gradient clip value", value="0.1", show_label=False) - - with FormRow(): - batch_size = gr.Number(label='Batch size', value=1, precision=0, elem_id="train_batch_size") - gradient_step = gr.Number(label='Gradient accumulation steps', value=1, precision=0, elem_id="train_gradient_step") - - dataset_directory = gr.Textbox(label='Dataset directory', placeholder="Path to directory with input images", elem_id="train_dataset_directory") - log_directory = gr.Textbox(label='Log directory', placeholder="Path to directory where to write outputs", value="textual_inversion", elem_id="train_log_directory") - - with FormRow(): - template_file = gr.Dropdown(label='Prompt template', value="style_filewords.txt", elem_id="train_template_file", choices=get_textual_inversion_template_names()) - create_refresh_button(template_file, textual_inversion.list_textual_inversion_templates, lambda: {"choices": get_textual_inversion_template_names()}, "refrsh_train_template_file") - - training_width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="train_training_width") - training_height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="train_training_height") - varsize = gr.Checkbox(label="Do not resize images", value=False, elem_id="train_varsize") - steps = gr.Number(label='Max steps', value=100000, precision=0, elem_id="train_steps") - - with FormRow(): - create_image_every = gr.Number(label='Save an image to log directory every N steps, 0 to disable', value=500, precision=0, elem_id="train_create_image_every") - save_embedding_every = gr.Number(label='Save a copy of embedding to log directory every N steps, 0 to disable', value=500, precision=0, elem_id="train_save_embedding_every") - - use_weight = gr.Checkbox(label="Use PNG alpha channel as loss weight", value=False, elem_id="use_weight") - - save_image_with_stored_embedding = gr.Checkbox(label='Save images with embedding in PNG chunks', value=True, elem_id="train_save_image_with_stored_embedding") - preview_from_txt2img = gr.Checkbox(label='Read parameters (prompt, etc...) from txt2img tab when making previews', value=False, elem_id="train_preview_from_txt2img") - - shuffle_tags = gr.Checkbox(label="Shuffle tags by ',' when creating prompts.", value=False, elem_id="train_shuffle_tags") - tag_drop_out = gr.Slider(minimum=0, maximum=1, step=0.1, label="Drop out tags when creating prompts.", value=0, elem_id="train_tag_drop_out") - - latent_sampling_method = gr.Radio(label='Choose latent sampling method', value="once", choices=['once', 'deterministic', 'random'], elem_id="train_latent_sampling_method") - - with gr.Row(): - train_embedding = gr.Button(value="Train Embedding", variant='primary', elem_id="train_train_embedding") - interrupt_training = gr.Button(value="Interrupt", elem_id="train_interrupt_training") - train_hypernetwork = gr.Button(value="Train Hypernetwork", variant='primary', elem_id="train_train_hypernetwork") - - params = script_callbacks.UiTrainTabParams(txt2img_preview_params) - - script_callbacks.ui_train_tabs_callback(params) - - with gr.Column(elem_id='ti_gallery_container'): - ti_output = gr.Text(elem_id="ti_output", value="", show_label=False) - gr.Gallery(label='Output', show_label=False, elem_id='ti_gallery').style(columns=4) - gr.HTML(elem_id="ti_progress", value="") - ti_outcome = gr.HTML(elem_id="ti_error", value="") - - create_embedding.click( - fn=modules.textual_inversion.ui.create_embedding, - inputs=[ - new_embedding_name, - initialization_text, - nvpt, - overwrite_old_embedding, - ], - outputs=[ - train_embedding_name, - ti_output, - ti_outcome, - ] - ) - - create_hypernetwork.click( - fn=modules.hypernetworks.ui.create_hypernetwork, - inputs=[ - new_hypernetwork_name, - new_hypernetwork_sizes, - overwrite_old_hypernetwork, - new_hypernetwork_layer_structure, - new_hypernetwork_activation_func, - new_hypernetwork_initialization_option, - new_hypernetwork_add_layer_norm, - new_hypernetwork_use_dropout, - new_hypernetwork_dropout_structure - ], - outputs=[ - train_hypernetwork_name, - ti_output, - ti_outcome, - ] - ) - - run_preprocess.click( - fn=wrap_gradio_gpu_call(modules.textual_inversion.ui.preprocess, extra_outputs=[gr.update()]), - _js="start_training_textual_inversion", - inputs=[ - dummy_component, - process_src, - process_dst, - process_width, - process_height, - preprocess_txt_action, - process_keep_original_size, - process_flip, - process_split, - process_caption, - process_caption_deepbooru, - process_split_threshold, - process_overlap_ratio, - process_focal_crop, - process_focal_crop_face_weight, - process_focal_crop_entropy_weight, - process_focal_crop_edges_weight, - process_focal_crop_debug, - process_multicrop, - process_multicrop_mindim, - process_multicrop_maxdim, - process_multicrop_minarea, - process_multicrop_maxarea, - process_multicrop_objective, - process_multicrop_threshold, - ], - outputs=[ - ti_output, - ti_outcome, - ], - ) - - train_embedding.click( - fn=wrap_gradio_gpu_call(modules.textual_inversion.ui.train_embedding, extra_outputs=[gr.update()]), - _js="start_training_textual_inversion", - inputs=[ - dummy_component, - train_embedding_name, - embedding_learn_rate, - batch_size, - gradient_step, - dataset_directory, - log_directory, - training_width, - training_height, - varsize, - steps, - clip_grad_mode, - clip_grad_value, - shuffle_tags, - tag_drop_out, - latent_sampling_method, - use_weight, - create_image_every, - save_embedding_every, - template_file, - save_image_with_stored_embedding, - preview_from_txt2img, - *txt2img_preview_params, - ], - outputs=[ - ti_output, - ti_outcome, - ] - ) - - train_hypernetwork.click( - fn=wrap_gradio_gpu_call(modules.hypernetworks.ui.train_hypernetwork, extra_outputs=[gr.update()]), - _js="start_training_textual_inversion", - inputs=[ - dummy_component, - train_hypernetwork_name, - hypernetwork_learn_rate, - batch_size, - gradient_step, - dataset_directory, - log_directory, - training_width, - training_height, - varsize, - steps, - clip_grad_mode, - clip_grad_value, - shuffle_tags, - tag_drop_out, - latent_sampling_method, - use_weight, - create_image_every, - save_embedding_every, - template_file, - preview_from_txt2img, - *txt2img_preview_params, - ], - outputs=[ - ti_output, - ti_outcome, - ] - ) - - interrupt_training.click( - fn=lambda: shared.state.interrupt(), - inputs=[], - outputs=[], - ) - - interrupt_preprocessing.click( - fn=lambda: shared.state.interrupt(), - inputs=[], - outputs=[], - ) - - loadsave = ui_loadsave.UiLoadsave(cmd_opts.ui_config_file) - - settings = ui_settings.UiSettings() - settings.create_ui(loadsave, dummy_component) - - interfaces = [ - (txt2img_interface, "txt2img", "txt2img"), - (img2img_interface, "img2img", "img2img"), - (extras_interface, "Extras", "extras"), - (pnginfo_interface, "PNG Info", "pnginfo"), - (modelmerger_interface, "Checkpoint Merger", "modelmerger"), - (train_interface, "Train", "train"), - ] - - interfaces += script_callbacks.ui_tabs_callback() - interfaces += [(settings.interface, "Settings", "settings")] - - extensions_interface = ui_extensions.create_ui() - interfaces += [(extensions_interface, "Extensions", "extensions")] - - shared.tab_names = [] - for _interface, label, _ifid in interfaces: - shared.tab_names.append(label) - - with gr.Blocks(theme=shared.gradio_theme, analytics_enabled=False, title="Stable Diffusion") as demo: - settings.add_quicksettings() - - parameters_copypaste.connect_paste_params_buttons() - - with gr.Tabs(elem_id="tabs") as tabs: - tab_order = {k: i for i, k in enumerate(opts.ui_tab_order)} - sorted_interfaces = sorted(interfaces, key=lambda x: tab_order.get(x[1], 9999)) - - for interface, label, ifid in sorted_interfaces: - if label in shared.opts.hidden_tabs: - continue - with gr.TabItem(label, id=ifid, elem_id=f"tab_{ifid}"): - interface.render() - - for interface, _label, ifid in interfaces: - if ifid in ["extensions", "settings"]: - continue - - loadsave.add_block(interface, ifid) - - loadsave.add_component(f"webui/Tabs@{tabs.elem_id}", tabs) - - loadsave.setup_ui() - - if os.path.exists(os.path.join(script_path, "notification.mp3")): - gr.Audio(interactive=False, value=os.path.join(script_path, "notification.mp3"), elem_id="audio_notification", visible=False) - - footer = shared.html("footer.html") - footer = footer.format(versions=versions_html(), api_docs="/docs" if shared.cmd_opts.api else "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/API") - gr.HTML(footer, elem_id="footer") - - settings.add_functionality(demo) - - update_image_cfg_scale_visibility = lambda: gr.update(visible=shared.sd_model and shared.sd_model.cond_stage_key == "edit") - settings.text_settings.change(fn=update_image_cfg_scale_visibility, inputs=[], outputs=[image_cfg_scale]) - demo.load(fn=update_image_cfg_scale_visibility, inputs=[], outputs=[image_cfg_scale]) - - def modelmerger(*args): - try: - results = modules.extras.run_modelmerger(*args) - except Exception as e: - errors.report("Error loading/saving model file", exc_info=True) - modules.sd_models.list_models() # to remove the potentially missing models from the list - return [*[gr.Dropdown.update(choices=modules.sd_models.checkpoint_tiles()) for _ in range(4)], f"Error merging checkpoints: {e}"] - return results - - modelmerger_merge.click(fn=lambda: '', inputs=[], outputs=[modelmerger_result]) - modelmerger_merge.click( - fn=wrap_gradio_gpu_call(modelmerger, extra_outputs=lambda: [gr.update() for _ in range(4)]), - _js='modelmerger', - inputs=[ - dummy_component, - primary_model_name, - secondary_model_name, - tertiary_model_name, - interp_method, - interp_amount, - save_as_half, - custom_name, - checkpoint_format, - config_source, - bake_in_vae, - discard_weights, - save_metadata, - ], - outputs=[ - primary_model_name, - secondary_model_name, - tertiary_model_name, - settings.component_dict['sd_model_checkpoint'], - modelmerger_result, - ] - ) - - loadsave.dump_defaults() - demo.ui_loadsave = loadsave - - # Required as a workaround for change() event not triggering when loading values from ui-config.json - interp_description.value = update_interp_description(interp_method.value) - - return demo - - -def versions_html(): - import torch - import launch - - python_version = ".".join([str(x) for x in sys.version_info[0:3]]) - commit = launch.commit_hash() - tag = launch.git_tag() - - if shared.xformers_available: - import xformers - xformers_version = xformers.__version__ - else: - xformers_version = "N/A" - - return f""" -version: {tag} - •  -python: {python_version} - •  -torch: {getattr(torch, '__long_version__',torch.__version__)} - •  -xformers: {xformers_version} - •  -gradio: {gr.__version__} - •  -checkpoint: N/A -""" - - -def setup_ui_api(app): - from pydantic import BaseModel, Field - from typing import List - - class QuicksettingsHint(BaseModel): - name: str = Field(title="Name of the quicksettings field") - label: str = Field(title="Label of the quicksettings field") - - def quicksettings_hint(): - return [QuicksettingsHint(name=k, label=v.label) for k, v in opts.data_labels.items()] - - app.add_api_route("/internal/quicksettings-hint", quicksettings_hint, methods=["GET"], response_model=List[QuicksettingsHint]) - - app.add_api_route("/internal/ping", lambda: {}, methods=["GET"]) - - app.add_api_route("/internal/profile-startup", lambda: timer.startup_record, methods=["GET"]) - - def download_sysinfo(attachment=False): - from fastapi.responses import PlainTextResponse - - text = sysinfo.get() - filename = f"sysinfo-{datetime.datetime.utcnow().strftime('%Y-%m-%d-%H-%M')}.txt" - - return PlainTextResponse(text, headers={'Content-Disposition': f'{"attachment" if attachment else "inline"}; filename="{filename}"'}) - - app.add_api_route("/internal/sysinfo", download_sysinfo, methods=["GET"]) - app.add_api_route("/internal/sysinfo-download", lambda: download_sysinfo(attachment=True), methods=["GET"]) - -- cgit v1.2.1 From 6d3a0c950626e887f20bfc9946b84f9685303bab Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Tue, 1 Aug 2023 07:43:43 +0300 Subject: move checkpoint merger UI to its own file --- modules/ui.py | 97 +-- modules/ui_checkpoint_merger.py | 1657 ++------------------------------------- 2 files changed, 76 insertions(+), 1678 deletions(-) diff --git a/modules/ui.py b/modules/ui.py index 07ecee7b..ac2787eb 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -12,7 +12,7 @@ import numpy as np from PIL import Image, PngImagePlugin # noqa: F401 from modules.call_queue import wrap_gradio_gpu_call, wrap_queued_call, wrap_gradio_call -from modules import sd_hijack, sd_models, script_callbacks, ui_extensions, deepbooru, sd_vae, extra_networks, ui_common, ui_postprocessing, progress, ui_loadsave, errors, shared_items, ui_settings, timer, sysinfo +from modules import sd_hijack, sd_models, script_callbacks, ui_extensions, deepbooru, extra_networks, ui_common, ui_postprocessing, progress, ui_loadsave, errors, shared_items, ui_settings, timer, sysinfo, ui_checkpoint_merger from modules.ui_components import FormRow, FormGroup, ToolButton, FormHTML from modules.paths import script_path from modules.ui_common import create_refresh_button @@ -1083,58 +1083,7 @@ def create_ui(): outputs=[html, generation_info, html2], ) - def update_interp_description(value): - interp_description_css = "

{}

" - interp_descriptions = { - "No interpolation": interp_description_css.format("No interpolation will be used. Requires one model; A. Allows for format conversion and VAE baking."), - "Weighted sum": interp_description_css.format("A weighted sum will be used for interpolation. Requires two models; A and B. The result is calculated as A * (1 - M) + B * M"), - "Add difference": interp_description_css.format("The difference between the last two models will be added to the first. Requires three models; A, B and C. The result is calculated as A + (B - C) * M") - } - return interp_descriptions[value] - - with gr.Blocks(analytics_enabled=False) as modelmerger_interface: - with gr.Row().style(equal_height=False): - with gr.Column(variant='compact'): - interp_description = gr.HTML(value=update_interp_description("Weighted sum"), elem_id="modelmerger_interp_description") - - with FormRow(elem_id="modelmerger_models"): - primary_model_name = gr.Dropdown(modules.sd_models.checkpoint_tiles(), elem_id="modelmerger_primary_model_name", label="Primary model (A)") - create_refresh_button(primary_model_name, modules.sd_models.list_models, lambda: {"choices": modules.sd_models.checkpoint_tiles()}, "refresh_checkpoint_A") - - secondary_model_name = gr.Dropdown(modules.sd_models.checkpoint_tiles(), elem_id="modelmerger_secondary_model_name", label="Secondary model (B)") - create_refresh_button(secondary_model_name, modules.sd_models.list_models, lambda: {"choices": modules.sd_models.checkpoint_tiles()}, "refresh_checkpoint_B") - - tertiary_model_name = gr.Dropdown(modules.sd_models.checkpoint_tiles(), elem_id="modelmerger_tertiary_model_name", label="Tertiary model (C)") - create_refresh_button(tertiary_model_name, modules.sd_models.list_models, lambda: {"choices": modules.sd_models.checkpoint_tiles()}, "refresh_checkpoint_C") - - custom_name = gr.Textbox(label="Custom Name (Optional)", elem_id="modelmerger_custom_name") - interp_amount = gr.Slider(minimum=0.0, maximum=1.0, step=0.05, label='Multiplier (M) - set to 0 to get model A', value=0.3, elem_id="modelmerger_interp_amount") - interp_method = gr.Radio(choices=["No interpolation", "Weighted sum", "Add difference"], value="Weighted sum", label="Interpolation Method", elem_id="modelmerger_interp_method") - interp_method.change(fn=update_interp_description, inputs=[interp_method], outputs=[interp_description]) - - with FormRow(): - checkpoint_format = gr.Radio(choices=["ckpt", "safetensors"], value="safetensors", label="Checkpoint format", elem_id="modelmerger_checkpoint_format") - save_as_half = gr.Checkbox(value=False, label="Save as float16", elem_id="modelmerger_save_as_half") - save_metadata = gr.Checkbox(value=True, label="Save metadata (.safetensors only)", elem_id="modelmerger_save_metadata") - - with FormRow(): - with gr.Column(): - config_source = gr.Radio(choices=["A, B or C", "B", "C", "Don't"], value="A, B or C", label="Copy config from", type="index", elem_id="modelmerger_config_method") - - with gr.Column(): - with FormRow(): - bake_in_vae = gr.Dropdown(choices=["None"] + list(sd_vae.vae_dict), value="None", label="Bake in VAE", elem_id="modelmerger_bake_in_vae") - create_refresh_button(bake_in_vae, sd_vae.refresh_vae_list, lambda: {"choices": ["None"] + list(sd_vae.vae_dict)}, "modelmerger_refresh_bake_in_vae") - - with FormRow(): - discard_weights = gr.Textbox(value="", label="Discard weights with matching name", elem_id="modelmerger_discard_weights") - - with gr.Row(): - modelmerger_merge = gr.Button(elem_id="modelmerger_merge", value="Merge", variant='primary') - - with gr.Column(variant='compact', elem_id="modelmerger_results_container"): - with gr.Group(elem_id="modelmerger_results_panel"): - modelmerger_result = gr.HTML(elem_id="modelmerger_result", show_label=False) + modelmerger_ui = ui_checkpoint_merger.UiCheckpointMerger() with gr.Blocks(analytics_enabled=False) as train_interface: with gr.Row().style(equal_height=False): @@ -1464,7 +1413,7 @@ def create_ui(): (img2img_interface, "img2img", "img2img"), (extras_interface, "Extras", "extras"), (pnginfo_interface, "PNG Info", "pnginfo"), - (modelmerger_interface, "Checkpoint Merger", "modelmerger"), + (modelmerger_ui.blocks, "Checkpoint Merger", "modelmerger"), (train_interface, "Train", "train"), ] @@ -1516,49 +1465,11 @@ def create_ui(): settings.text_settings.change(fn=update_image_cfg_scale_visibility, inputs=[], outputs=[image_cfg_scale]) demo.load(fn=update_image_cfg_scale_visibility, inputs=[], outputs=[image_cfg_scale]) - def modelmerger(*args): - try: - results = modules.extras.run_modelmerger(*args) - except Exception as e: - errors.report("Error loading/saving model file", exc_info=True) - modules.sd_models.list_models() # to remove the potentially missing models from the list - return [*[gr.Dropdown.update(choices=modules.sd_models.checkpoint_tiles()) for _ in range(4)], f"Error merging checkpoints: {e}"] - return results - - modelmerger_merge.click(fn=lambda: '', inputs=[], outputs=[modelmerger_result]) - modelmerger_merge.click( - fn=wrap_gradio_gpu_call(modelmerger, extra_outputs=lambda: [gr.update() for _ in range(4)]), - _js='modelmerger', - inputs=[ - dummy_component, - primary_model_name, - secondary_model_name, - tertiary_model_name, - interp_method, - interp_amount, - save_as_half, - custom_name, - checkpoint_format, - config_source, - bake_in_vae, - discard_weights, - save_metadata, - ], - outputs=[ - primary_model_name, - secondary_model_name, - tertiary_model_name, - settings.component_dict['sd_model_checkpoint'], - modelmerger_result, - ] - ) + modelmerger_ui.setup_ui(dummy_component=dummy_component, sd_model_checkpoint_component=settings.component_dict['sd_model_checkpoint']) loadsave.dump_defaults() demo.ui_loadsave = loadsave - # Required as a workaround for change() event not triggering when loading values from ui-config.json - interp_description.value = update_interp_description(interp_method.value) - return demo diff --git a/modules/ui_checkpoint_merger.py b/modules/ui_checkpoint_merger.py index 07ecee7b..8e72258a 100644 --- a/modules/ui_checkpoint_merger.py +++ b/modules/ui_checkpoint_merger.py @@ -1,1621 +1,108 @@ -import datetime -import json -import mimetypes -import os -import sys -from functools import reduce -import warnings import gradio as gr -import gradio.utils -import numpy as np -from PIL import Image, PngImagePlugin # noqa: F401 -from modules.call_queue import wrap_gradio_gpu_call, wrap_queued_call, wrap_gradio_call -from modules import sd_hijack, sd_models, script_callbacks, ui_extensions, deepbooru, sd_vae, extra_networks, ui_common, ui_postprocessing, progress, ui_loadsave, errors, shared_items, ui_settings, timer, sysinfo -from modules.ui_components import FormRow, FormGroup, ToolButton, FormHTML -from modules.paths import script_path +from modules import sd_models, sd_vae, errors, extras, call_queue +from modules.ui_components import FormRow from modules.ui_common import create_refresh_button -from modules.ui_gradio_extensions import reload_javascript -from modules.shared import opts, cmd_opts +def update_interp_description(value): + interp_description_css = "

{}

" + interp_descriptions = { + "No interpolation": interp_description_css.format("No interpolation will be used. Requires one model; A. Allows for format conversion and VAE baking."), + "Weighted sum": interp_description_css.format("A weighted sum will be used for interpolation. Requires two models; A and B. The result is calculated as A * (1 - M) + B * M"), + "Add difference": interp_description_css.format("The difference between the last two models will be added to the first. Requires three models; A, B and C. The result is calculated as A + (B - C) * M") + } + return interp_descriptions[value] -import modules.codeformer_model -import modules.generation_parameters_copypaste as parameters_copypaste -import modules.gfpgan_model -import modules.hypernetworks.ui -import modules.scripts -import modules.shared as shared -import modules.styles -import modules.textual_inversion.ui -from modules import prompt_parser -from modules.sd_hijack import model_hijack -from modules.sd_samplers import samplers, samplers_for_img2img -from modules.textual_inversion import textual_inversion -import modules.hypernetworks.ui -from modules.generation_parameters_copypaste import image_from_url_text -import modules.extras -create_setting_component = ui_settings.create_setting_component - -warnings.filterwarnings("default" if opts.show_warnings else "ignore", category=UserWarning) - -# this is a fix for Windows users. Without it, javascript files will be served with text/html content-type and the browser will not show any UI -mimetypes.init() -mimetypes.add_type('application/javascript', '.js') - -if not cmd_opts.share and not cmd_opts.listen: - # fix gradio phoning home - gradio.utils.version_check = lambda: None - gradio.utils.get_local_ip_address = lambda: '127.0.0.1' - -if cmd_opts.ngrok is not None: - import modules.ngrok as ngrok - print('ngrok authtoken detected, trying to connect...') - ngrok.connect( - cmd_opts.ngrok, - cmd_opts.port if cmd_opts.port is not None else 7860, - cmd_opts.ngrok_options - ) - - -def gr_show(visible=True): - return {"visible": visible, "__type__": "update"} - - -sample_img2img = "assets/stable-samples/img2img/sketch-mountains-input.jpg" -sample_img2img = sample_img2img if os.path.exists(sample_img2img) else None - -# Using constants for these since the variation selector isn't visible. -# Important that they exactly match script.js for tooltip to work. -random_symbol = '\U0001f3b2\ufe0f' # 🎲️ -reuse_symbol = '\u267b\ufe0f' # ♻️ -paste_symbol = '\u2199\ufe0f' # ↙ -refresh_symbol = '\U0001f504' # 🔄 -save_style_symbol = '\U0001f4be' # 💾 -apply_style_symbol = '\U0001f4cb' # 📋 -clear_prompt_symbol = '\U0001f5d1\ufe0f' # 🗑️ -extra_networks_symbol = '\U0001F3B4' # 🎴 -switch_values_symbol = '\U000021C5' # ⇅ -restore_progress_symbol = '\U0001F300' # 🌀 -detect_image_size_symbol = '\U0001F4D0' # 📐 -up_down_symbol = '\u2195\ufe0f' # ↕️ - - -plaintext_to_html = ui_common.plaintext_to_html - - -def send_gradio_gallery_to_image(x): - if len(x) == 0: - return None - return image_from_url_text(x[0]) - - -def add_style(name: str, prompt: str, negative_prompt: str): - if name is None: - return [gr_show() for x in range(4)] - - style = modules.styles.PromptStyle(name, prompt, negative_prompt) - shared.prompt_styles.styles[style.name] = style - # Save all loaded prompt styles: this allows us to update the storage format in the future more easily, because we - # reserialize all styles every time we save them - shared.prompt_styles.save_styles(shared.styles_filename) - - return [gr.Dropdown.update(visible=True, choices=list(shared.prompt_styles.styles)) for _ in range(2)] - - -def calc_resolution_hires(enable, width, height, hr_scale, hr_resize_x, hr_resize_y): - from modules import processing, devices - - if not enable: - return "" - - p = processing.StableDiffusionProcessingTxt2Img(width=width, height=height, enable_hr=True, hr_scale=hr_scale, hr_resize_x=hr_resize_x, hr_resize_y=hr_resize_y) - - with devices.autocast(): - p.init([""], [0], [0]) - - return f"resize: from {p.width}x{p.height} to {p.hr_resize_x or p.hr_upscale_to_x}x{p.hr_resize_y or p.hr_upscale_to_y}" - - -def resize_from_to_html(width, height, scale_by): - target_width = int(width * scale_by) - target_height = int(height * scale_by) - - if not target_width or not target_height: - return "no image selected" - - return f"resize: from {width}x{height} to {target_width}x{target_height}" - - -def apply_styles(prompt, prompt_neg, styles): - prompt = shared.prompt_styles.apply_styles_to_prompt(prompt, styles) - prompt_neg = shared.prompt_styles.apply_negative_styles_to_prompt(prompt_neg, styles) - - return [gr.Textbox.update(value=prompt), gr.Textbox.update(value=prompt_neg), gr.Dropdown.update(value=[])] - - -def process_interrogate(interrogation_function, mode, ii_input_dir, ii_output_dir, *ii_singles): - if mode in {0, 1, 3, 4}: - return [interrogation_function(ii_singles[mode]), None] - elif mode == 2: - return [interrogation_function(ii_singles[mode]["image"]), None] - elif mode == 5: - assert not shared.cmd_opts.hide_ui_dir_config, "Launched with --hide-ui-dir-config, batch img2img disabled" - images = shared.listfiles(ii_input_dir) - print(f"Will process {len(images)} images.") - if ii_output_dir != "": - os.makedirs(ii_output_dir, exist_ok=True) - else: - ii_output_dir = ii_input_dir - - for image in images: - img = Image.open(image) - filename = os.path.basename(image) - left, _ = os.path.splitext(filename) - print(interrogation_function(img), file=open(os.path.join(ii_output_dir, f"{left}.txt"), 'a', encoding='utf-8')) - - return [gr.update(), None] - - -def interrogate(image): - prompt = shared.interrogator.interrogate(image.convert("RGB")) - return gr.update() if prompt is None else prompt - - -def interrogate_deepbooru(image): - prompt = deepbooru.model.tag(image) - return gr.update() if prompt is None else prompt - - -def create_seed_inputs(target_interface): - with FormRow(elem_id=f"{target_interface}_seed_row", variant="compact"): - seed = (gr.Textbox if cmd_opts.use_textbox_seed else gr.Number)(label='Seed', value=-1, elem_id=f"{target_interface}_seed") - seed.style(container=False) - random_seed = ToolButton(random_symbol, elem_id=f"{target_interface}_random_seed", label='Random seed') - reuse_seed = ToolButton(reuse_symbol, elem_id=f"{target_interface}_reuse_seed", label='Reuse seed') - - seed_checkbox = gr.Checkbox(label='Extra', elem_id=f"{target_interface}_subseed_show", value=False) - - # Components to show/hide based on the 'Extra' checkbox - seed_extras = [] - - with FormRow(visible=False, elem_id=f"{target_interface}_subseed_row") as seed_extra_row_1: - seed_extras.append(seed_extra_row_1) - subseed = gr.Number(label='Variation seed', value=-1, elem_id=f"{target_interface}_subseed") - subseed.style(container=False) - random_subseed = ToolButton(random_symbol, elem_id=f"{target_interface}_random_subseed") - reuse_subseed = ToolButton(reuse_symbol, elem_id=f"{target_interface}_reuse_subseed") - subseed_strength = gr.Slider(label='Variation strength', value=0.0, minimum=0, maximum=1, step=0.01, elem_id=f"{target_interface}_subseed_strength") - - with FormRow(visible=False) as seed_extra_row_2: - seed_extras.append(seed_extra_row_2) - seed_resize_from_w = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize seed from width", value=0, elem_id=f"{target_interface}_seed_resize_from_w") - seed_resize_from_h = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize seed from height", value=0, elem_id=f"{target_interface}_seed_resize_from_h") - - random_seed.click(fn=None, _js="function(){setRandomSeed('" + target_interface + "_seed')}", show_progress=False, inputs=[], outputs=[]) - random_subseed.click(fn=None, _js="function(){setRandomSeed('" + target_interface + "_subseed')}", show_progress=False, inputs=[], outputs=[]) - - def change_visibility(show): - return {comp: gr_show(show) for comp in seed_extras} - - seed_checkbox.change(change_visibility, show_progress=False, inputs=[seed_checkbox], outputs=seed_extras) - - return seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox - - - -def connect_clear_prompt(button): - """Given clear button, prompt, and token_counter objects, setup clear prompt button click event""" - button.click( - _js="clear_prompt", - fn=None, - inputs=[], - outputs=[], - ) - - -def connect_reuse_seed(seed: gr.Number, reuse_seed: gr.Button, generation_info: gr.Textbox, dummy_component, is_subseed): - """ Connects a 'reuse (sub)seed' button's click event so that it copies last used - (sub)seed value from generation info the to the seed field. If copying subseed and subseed strength - was 0, i.e. no variation seed was used, it copies the normal seed value instead.""" - def copy_seed(gen_info_string: str, index): - res = -1 - - try: - gen_info = json.loads(gen_info_string) - index -= gen_info.get('index_of_first_image', 0) - - if is_subseed and gen_info.get('subseed_strength', 0) > 0: - all_subseeds = gen_info.get('all_subseeds', [-1]) - res = all_subseeds[index if 0 <= index < len(all_subseeds) else 0] - else: - all_seeds = gen_info.get('all_seeds', [-1]) - res = all_seeds[index if 0 <= index < len(all_seeds) else 0] - - except json.decoder.JSONDecodeError: - if gen_info_string: - errors.report(f"Error parsing JSON generation info: {gen_info_string}") - - return [res, gr_show(False)] - - reuse_seed.click( - fn=copy_seed, - _js="(x, y) => [x, selected_gallery_index()]", - show_progress=False, - inputs=[generation_info, dummy_component], - outputs=[seed, dummy_component] - ) - - -def update_token_counter(text, steps): +def modelmerger(*args): try: - text, _ = extra_networks.parse_prompt(text) - - _, prompt_flat_list, _ = prompt_parser.get_multicond_prompt_list([text]) - prompt_schedules = prompt_parser.get_learned_conditioning_prompt_schedules(prompt_flat_list, steps) - - except Exception: - # a parsing error can happen here during typing, and we don't want to bother the user with - # messages related to it in console - prompt_schedules = [[[steps, text]]] - - flat_prompts = reduce(lambda list1, list2: list1+list2, prompt_schedules) - prompts = [prompt_text for step, prompt_text in flat_prompts] - token_count, max_length = max([model_hijack.get_prompt_lengths(prompt) for prompt in prompts], key=lambda args: args[0]) - return f"{token_count}/{max_length}" - - -def create_toprow(is_img2img): - id_part = "img2img" if is_img2img else "txt2img" - - with gr.Row(elem_id=f"{id_part}_toprow", variant="compact"): - with gr.Column(elem_id=f"{id_part}_prompt_container", scale=6): - with gr.Row(): - with gr.Column(scale=80): - with gr.Row(): - prompt = gr.Textbox(label="Prompt", elem_id=f"{id_part}_prompt", show_label=False, lines=3, placeholder="Prompt (press Ctrl+Enter or Alt+Enter to generate)", elem_classes=["prompt"]) - - with gr.Row(): - with gr.Column(scale=80): - with gr.Row(): - negative_prompt = gr.Textbox(label="Negative prompt", elem_id=f"{id_part}_neg_prompt", show_label=False, lines=3, placeholder="Negative prompt (press Ctrl+Enter or Alt+Enter to generate)", elem_classes=["prompt"]) - - button_interrogate = None - button_deepbooru = None - if is_img2img: - with gr.Column(scale=1, elem_classes="interrogate-col"): - button_interrogate = gr.Button('Interrogate\nCLIP', elem_id="interrogate") - button_deepbooru = gr.Button('Interrogate\nDeepBooru', elem_id="deepbooru") - - with gr.Column(scale=1, elem_id=f"{id_part}_actions_column"): - with gr.Row(elem_id=f"{id_part}_generate_box", elem_classes="generate-box"): - interrupt = gr.Button('Interrupt', elem_id=f"{id_part}_interrupt", elem_classes="generate-box-interrupt") - skip = gr.Button('Skip', elem_id=f"{id_part}_skip", elem_classes="generate-box-skip") - submit = gr.Button('Generate', elem_id=f"{id_part}_generate", variant='primary') - - skip.click( - fn=lambda: shared.state.skip(), - inputs=[], - outputs=[], - ) - - interrupt.click( - fn=lambda: shared.state.interrupt(), - inputs=[], - outputs=[], - ) - - with gr.Row(elem_id=f"{id_part}_tools"): - paste = ToolButton(value=paste_symbol, elem_id="paste") - clear_prompt_button = ToolButton(value=clear_prompt_symbol, elem_id=f"{id_part}_clear_prompt") - extra_networks_button = ToolButton(value=extra_networks_symbol, elem_id=f"{id_part}_extra_networks") - prompt_style_apply = ToolButton(value=apply_style_symbol, elem_id=f"{id_part}_style_apply") - save_style = ToolButton(value=save_style_symbol, elem_id=f"{id_part}_style_create") - restore_progress_button = ToolButton(value=restore_progress_symbol, elem_id=f"{id_part}_restore_progress", visible=False) - - token_counter = gr.HTML(value="0/75", elem_id=f"{id_part}_token_counter", elem_classes=["token-counter"]) - token_button = gr.Button(visible=False, elem_id=f"{id_part}_token_button") - negative_token_counter = gr.HTML(value="0/75", elem_id=f"{id_part}_negative_token_counter", elem_classes=["token-counter"]) - negative_token_button = gr.Button(visible=False, elem_id=f"{id_part}_negative_token_button") - - clear_prompt_button.click( - fn=lambda *x: x, - _js="confirm_clear_prompt", - inputs=[prompt, negative_prompt], - outputs=[prompt, negative_prompt], - ) - - with gr.Row(elem_id=f"{id_part}_styles_row"): - prompt_styles = gr.Dropdown(label="Styles", elem_id=f"{id_part}_styles", choices=[k for k, v in shared.prompt_styles.styles.items()], value=[], multiselect=True) - create_refresh_button(prompt_styles, shared.prompt_styles.reload, lambda: {"choices": [k for k, v in shared.prompt_styles.styles.items()]}, f"refresh_{id_part}_styles") - - return prompt, prompt_styles, negative_prompt, submit, button_interrogate, button_deepbooru, prompt_style_apply, save_style, paste, extra_networks_button, token_counter, token_button, negative_token_counter, negative_token_button, restore_progress_button - - -def setup_progressbar(*args, **kwargs): - pass - - -def apply_setting(key, value): - if value is None: - return gr.update() - - if shared.cmd_opts.freeze_settings: - return gr.update() - - # dont allow model to be swapped when model hash exists in prompt - if key == "sd_model_checkpoint" and opts.disable_weights_auto_swap: - return gr.update() - - if key == "sd_model_checkpoint": - ckpt_info = sd_models.get_closet_checkpoint_match(value) - - if ckpt_info is not None: - value = ckpt_info.title - else: - return gr.update() - - comp_args = opts.data_labels[key].component_args - if comp_args and isinstance(comp_args, dict) and comp_args.get('visible') is False: - return - - valtype = type(opts.data_labels[key].default) - oldval = opts.data.get(key, None) - opts.data[key] = valtype(value) if valtype != type(None) else value - if oldval != value and opts.data_labels[key].onchange is not None: - opts.data_labels[key].onchange() - - opts.save(shared.config_filename) - return getattr(opts, key) - - -def create_output_panel(tabname, outdir): - return ui_common.create_output_panel(tabname, outdir) - - -def create_sampler_and_steps_selection(choices, tabname): - if opts.samplers_in_dropdown: - with FormRow(elem_id=f"sampler_selection_{tabname}"): - sampler_index = gr.Dropdown(label='Sampling method', elem_id=f"{tabname}_sampling", choices=[x.name for x in choices], value=choices[0].name, type="index") - steps = gr.Slider(minimum=1, maximum=150, step=1, elem_id=f"{tabname}_steps", label="Sampling steps", value=20) - else: - with FormGroup(elem_id=f"sampler_selection_{tabname}"): - steps = gr.Slider(minimum=1, maximum=150, step=1, elem_id=f"{tabname}_steps", label="Sampling steps", value=20) - sampler_index = gr.Radio(label='Sampling method', elem_id=f"{tabname}_sampling", choices=[x.name for x in choices], value=choices[0].name, type="index") - - return steps, sampler_index - - -def ordered_ui_categories(): - user_order = {x.strip(): i * 2 + 1 for i, x in enumerate(shared.opts.ui_reorder_list)} - - for _, category in sorted(enumerate(shared_items.ui_reorder_categories()), key=lambda x: user_order.get(x[1], x[0] * 2 + 0)): - yield category - - -def create_override_settings_dropdown(tabname, row): - dropdown = gr.Dropdown([], label="Override settings", visible=False, elem_id=f"{tabname}_override_settings", multiselect=True) - - dropdown.change( - fn=lambda x: gr.Dropdown.update(visible=bool(x)), - inputs=[dropdown], - outputs=[dropdown], - ) - - return dropdown - - -def create_ui(): - import modules.img2img - import modules.txt2img - - reload_javascript() - - parameters_copypaste.reset() - - modules.scripts.scripts_current = modules.scripts.scripts_txt2img - modules.scripts.scripts_txt2img.initialize_scripts(is_img2img=False) - - with gr.Blocks(analytics_enabled=False) as txt2img_interface: - txt2img_prompt, txt2img_prompt_styles, txt2img_negative_prompt, submit, _, _, txt2img_prompt_style_apply, txt2img_save_style, txt2img_paste, extra_networks_button, token_counter, token_button, negative_token_counter, negative_token_button, restore_progress_button = create_toprow(is_img2img=False) - - dummy_component = gr.Label(visible=False) - txt_prompt_img = gr.File(label="", elem_id="txt2img_prompt_image", file_count="single", type="binary", visible=False) - - with FormRow(variant='compact', elem_id="txt2img_extra_networks", visible=False) as extra_networks: - from modules import ui_extra_networks - extra_networks_ui = ui_extra_networks.create_ui(extra_networks, extra_networks_button, 'txt2img') - - with gr.Row().style(equal_height=False): - with gr.Column(variant='compact', elem_id="txt2img_settings"): - modules.scripts.scripts_txt2img.prepare_ui() - - for category in ordered_ui_categories(): - if category == "sampler": - steps, sampler_index = create_sampler_and_steps_selection(samplers, "txt2img") - - elif category == "dimensions": - with FormRow(): - with gr.Column(elem_id="txt2img_column_size", scale=4): - width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="txt2img_width") - height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="txt2img_height") - - with gr.Column(elem_id="txt2img_dimensions_row", scale=1, elem_classes="dimensions-tools"): - res_switch_btn = ToolButton(value=switch_values_symbol, elem_id="txt2img_res_switch_btn", label="Switch dims") - - if opts.dimensions_and_batch_together: - with gr.Column(elem_id="txt2img_column_batch"): - batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="txt2img_batch_count") - batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="txt2img_batch_size") - - elif category == "cfg": - cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0, elem_id="txt2img_cfg_scale") - - elif category == "seed": - seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox = create_seed_inputs('txt2img') - - elif category == "checkboxes": - with FormRow(elem_classes="checkboxes-row", variant="compact"): - restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1, elem_id="txt2img_restore_faces") - tiling = gr.Checkbox(label='Tiling', value=False, elem_id="txt2img_tiling") - enable_hr = gr.Checkbox(label='Hires. fix', value=False, elem_id="txt2img_enable_hr") - hr_final_resolution = FormHTML(value="", elem_id="txtimg_hr_finalres", label="Upscaled resolution", interactive=False) - - elif category == "hires_fix": - with FormGroup(visible=False, elem_id="txt2img_hires_fix") as hr_options: - with FormRow(elem_id="txt2img_hires_fix_row1", variant="compact"): - hr_upscaler = gr.Dropdown(label="Upscaler", elem_id="txt2img_hr_upscaler", choices=[*shared.latent_upscale_modes, *[x.name for x in shared.sd_upscalers]], value=shared.latent_upscale_default_mode) - hr_second_pass_steps = gr.Slider(minimum=0, maximum=150, step=1, label='Hires steps', value=0, elem_id="txt2img_hires_steps") - denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.7, elem_id="txt2img_denoising_strength") - - with FormRow(elem_id="txt2img_hires_fix_row2", variant="compact"): - hr_scale = gr.Slider(minimum=1.0, maximum=4.0, step=0.05, label="Upscale by", value=2.0, elem_id="txt2img_hr_scale") - hr_resize_x = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize width to", value=0, elem_id="txt2img_hr_resize_x") - hr_resize_y = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize height to", value=0, elem_id="txt2img_hr_resize_y") - - with FormRow(elem_id="txt2img_hires_fix_row3", variant="compact", visible=opts.hires_fix_show_sampler) as hr_sampler_container: - hr_sampler_index = gr.Dropdown(label='Hires sampling method', elem_id="hr_sampler", choices=["Use same sampler"] + [x.name for x in samplers_for_img2img], value="Use same sampler", type="index") - - with FormRow(elem_id="txt2img_hires_fix_row4", variant="compact", visible=opts.hires_fix_show_prompts) as hr_prompts_container: - with gr.Column(scale=80): - with gr.Row(): - hr_prompt = gr.Textbox(label="Hires prompt", elem_id="hires_prompt", show_label=False, lines=3, placeholder="Prompt for hires fix pass.\nLeave empty to use the same prompt as in first pass.", elem_classes=["prompt"]) - with gr.Column(scale=80): - with gr.Row(): - hr_negative_prompt = gr.Textbox(label="Hires negative prompt", elem_id="hires_neg_prompt", show_label=False, lines=3, placeholder="Negative prompt for hires fix pass.\nLeave empty to use the same negative prompt as in first pass.", elem_classes=["prompt"]) - - elif category == "batch": - if not opts.dimensions_and_batch_together: - with FormRow(elem_id="txt2img_column_batch"): - batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="txt2img_batch_count") - batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="txt2img_batch_size") - - elif category == "override_settings": - with FormRow(elem_id="txt2img_override_settings_row") as row: - override_settings = create_override_settings_dropdown('txt2img', row) - - elif category == "scripts": - with FormGroup(elem_id="txt2img_script_container"): - custom_inputs = modules.scripts.scripts_txt2img.setup_ui() - - else: - modules.scripts.scripts_txt2img.setup_ui_for_section(category) - - hr_resolution_preview_inputs = [enable_hr, width, height, hr_scale, hr_resize_x, hr_resize_y] - - for component in hr_resolution_preview_inputs: - event = component.release if isinstance(component, gr.Slider) else component.change - - event( - fn=calc_resolution_hires, - inputs=hr_resolution_preview_inputs, - outputs=[hr_final_resolution], - show_progress=False, - ) - event( - None, - _js="onCalcResolutionHires", - inputs=hr_resolution_preview_inputs, - outputs=[], - show_progress=False, - ) - - txt2img_gallery, generation_info, html_info, html_log = create_output_panel("txt2img", opts.outdir_txt2img_samples) - - connect_reuse_seed(seed, reuse_seed, generation_info, dummy_component, is_subseed=False) - connect_reuse_seed(subseed, reuse_subseed, generation_info, dummy_component, is_subseed=True) - - txt2img_args = dict( - fn=wrap_gradio_gpu_call(modules.txt2img.txt2img, extra_outputs=[None, '', '']), - _js="submit", - inputs=[ - dummy_component, - txt2img_prompt, - txt2img_negative_prompt, - txt2img_prompt_styles, - steps, - sampler_index, - restore_faces, - tiling, - batch_count, - batch_size, - cfg_scale, - seed, - subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox, - height, - width, - enable_hr, - denoising_strength, - hr_scale, - hr_upscaler, - hr_second_pass_steps, - hr_resize_x, - hr_resize_y, - hr_sampler_index, - hr_prompt, - hr_negative_prompt, - override_settings, - - ] + custom_inputs, - - outputs=[ - txt2img_gallery, - generation_info, - html_info, - html_log, - ], - show_progress=False, - ) - - txt2img_prompt.submit(**txt2img_args) - submit.click(**txt2img_args) - - res_switch_btn.click(fn=None, _js="function(){switchWidthHeight('txt2img')}", inputs=None, outputs=None, show_progress=False) - - restore_progress_button.click( - fn=progress.restore_progress, - _js="restoreProgressTxt2img", - inputs=[dummy_component], - outputs=[ - txt2img_gallery, - generation_info, - html_info, - html_log, - ], - show_progress=False, - ) - - txt_prompt_img.change( - fn=modules.images.image_data, - inputs=[ - txt_prompt_img - ], - outputs=[ - txt2img_prompt, - txt_prompt_img - ], - show_progress=False, - ) - - enable_hr.change( - fn=lambda x: gr_show(x), - inputs=[enable_hr], - outputs=[hr_options], - show_progress = False, - ) - - txt2img_paste_fields = [ - (txt2img_prompt, "Prompt"), - (txt2img_negative_prompt, "Negative prompt"), - (steps, "Steps"), - (sampler_index, "Sampler"), - (restore_faces, "Face restoration"), - (cfg_scale, "CFG scale"), - (seed, "Seed"), - (width, "Size-1"), - (height, "Size-2"), - (batch_size, "Batch size"), - (subseed, "Variation seed"), - (subseed_strength, "Variation seed strength"), - (seed_resize_from_w, "Seed resize from-1"), - (seed_resize_from_h, "Seed resize from-2"), - (txt2img_prompt_styles, lambda d: d["Styles array"] if isinstance(d.get("Styles array"), list) else gr.update()), - (denoising_strength, "Denoising strength"), - (enable_hr, lambda d: "Denoising strength" in d), - (hr_options, lambda d: gr.Row.update(visible="Denoising strength" in d)), - (hr_scale, "Hires upscale"), - (hr_upscaler, "Hires upscaler"), - (hr_second_pass_steps, "Hires steps"), - (hr_resize_x, "Hires resize-1"), - (hr_resize_y, "Hires resize-2"), - (hr_sampler_index, "Hires sampler"), - (hr_sampler_container, lambda d: gr.update(visible=True) if d.get("Hires sampler", "Use same sampler") != "Use same sampler" else gr.update()), - (hr_prompt, "Hires prompt"), - (hr_negative_prompt, "Hires negative prompt"), - (hr_prompts_container, lambda d: gr.update(visible=True) if d.get("Hires prompt", "") != "" or d.get("Hires negative prompt", "") != "" else gr.update()), - *modules.scripts.scripts_txt2img.infotext_fields - ] - parameters_copypaste.add_paste_fields("txt2img", None, txt2img_paste_fields, override_settings) - parameters_copypaste.register_paste_params_button(parameters_copypaste.ParamBinding( - paste_button=txt2img_paste, tabname="txt2img", source_text_component=txt2img_prompt, source_image_component=None, - )) - - txt2img_preview_params = [ - txt2img_prompt, - txt2img_negative_prompt, - steps, - sampler_index, - cfg_scale, - seed, - width, - height, - ] - - token_button.click(fn=wrap_queued_call(update_token_counter), inputs=[txt2img_prompt, steps], outputs=[token_counter]) - negative_token_button.click(fn=wrap_queued_call(update_token_counter), inputs=[txt2img_negative_prompt, steps], outputs=[negative_token_counter]) - - ui_extra_networks.setup_ui(extra_networks_ui, txt2img_gallery) - - modules.scripts.scripts_current = modules.scripts.scripts_img2img - modules.scripts.scripts_img2img.initialize_scripts(is_img2img=True) - - with gr.Blocks(analytics_enabled=False) as img2img_interface: - img2img_prompt, img2img_prompt_styles, img2img_negative_prompt, submit, img2img_interrogate, img2img_deepbooru, img2img_prompt_style_apply, img2img_save_style, img2img_paste, extra_networks_button, token_counter, token_button, negative_token_counter, negative_token_button, restore_progress_button = create_toprow(is_img2img=True) - - img2img_prompt_img = gr.File(label="", elem_id="img2img_prompt_image", file_count="single", type="binary", visible=False) - - with FormRow(variant='compact', elem_id="img2img_extra_networks", visible=False) as extra_networks: - from modules import ui_extra_networks - extra_networks_ui_img2img = ui_extra_networks.create_ui(extra_networks, extra_networks_button, 'img2img') - - with FormRow().style(equal_height=False): - with gr.Column(variant='compact', elem_id="img2img_settings"): - copy_image_buttons = [] - copy_image_destinations = {} - - def add_copy_image_controls(tab_name, elem): - with gr.Row(variant="compact", elem_id=f"img2img_copy_to_{tab_name}"): - gr.HTML("Copy image to: ", elem_id=f"img2img_label_copy_to_{tab_name}") - - for title, name in zip(['img2img', 'sketch', 'inpaint', 'inpaint sketch'], ['img2img', 'sketch', 'inpaint', 'inpaint_sketch']): - if name == tab_name: - gr.Button(title, interactive=False) - copy_image_destinations[name] = elem - continue - - button = gr.Button(title) - copy_image_buttons.append((button, name, elem)) - - with gr.Tabs(elem_id="mode_img2img"): - img2img_selected_tab = gr.State(0) - - with gr.TabItem('img2img', id='img2img', elem_id="img2img_img2img_tab") as tab_img2img: - init_img = gr.Image(label="Image for img2img", elem_id="img2img_image", show_label=False, source="upload", interactive=True, type="pil", tool="editor", image_mode="RGBA").style(height=opts.img2img_editor_height) - add_copy_image_controls('img2img', init_img) - - with gr.TabItem('Sketch', id='img2img_sketch', elem_id="img2img_img2img_sketch_tab") as tab_sketch: - sketch = gr.Image(label="Image for img2img", elem_id="img2img_sketch", show_label=False, source="upload", interactive=True, type="pil", tool="color-sketch", image_mode="RGBA").style(height=opts.img2img_editor_height) - add_copy_image_controls('sketch', sketch) - - with gr.TabItem('Inpaint', id='inpaint', elem_id="img2img_inpaint_tab") as tab_inpaint: - init_img_with_mask = gr.Image(label="Image for inpainting with mask", show_label=False, elem_id="img2maskimg", source="upload", interactive=True, type="pil", tool="sketch", image_mode="RGBA").style(height=opts.img2img_editor_height) - add_copy_image_controls('inpaint', init_img_with_mask) - - with gr.TabItem('Inpaint sketch', id='inpaint_sketch', elem_id="img2img_inpaint_sketch_tab") as tab_inpaint_color: - inpaint_color_sketch = gr.Image(label="Color sketch inpainting", show_label=False, elem_id="inpaint_sketch", source="upload", interactive=True, type="pil", tool="color-sketch", image_mode="RGBA").style(height=opts.img2img_editor_height) - inpaint_color_sketch_orig = gr.State(None) - add_copy_image_controls('inpaint_sketch', inpaint_color_sketch) - - def update_orig(image, state): - if image is not None: - same_size = state is not None and state.size == image.size - has_exact_match = np.any(np.all(np.array(image) == np.array(state), axis=-1)) - edited = same_size and has_exact_match - return image if not edited or state is None else state - - inpaint_color_sketch.change(update_orig, [inpaint_color_sketch, inpaint_color_sketch_orig], inpaint_color_sketch_orig) - - with gr.TabItem('Inpaint upload', id='inpaint_upload', elem_id="img2img_inpaint_upload_tab") as tab_inpaint_upload: - init_img_inpaint = gr.Image(label="Image for img2img", show_label=False, source="upload", interactive=True, type="pil", elem_id="img_inpaint_base") - init_mask_inpaint = gr.Image(label="Mask", source="upload", interactive=True, type="pil", elem_id="img_inpaint_mask") - - with gr.TabItem('Batch', id='batch', elem_id="img2img_batch_tab") as tab_batch: - hidden = '
Disabled when launched with --hide-ui-dir-config.' if shared.cmd_opts.hide_ui_dir_config else '' - gr.HTML( - "

Process images in a directory on the same machine where the server is running." + - "
Use an empty output directory to save pictures normally instead of writing to the output directory." + - f"
Add inpaint batch mask directory to enable inpaint batch processing." - f"{hidden}

" - ) - img2img_batch_input_dir = gr.Textbox(label="Input directory", **shared.hide_dirs, elem_id="img2img_batch_input_dir") - img2img_batch_output_dir = gr.Textbox(label="Output directory", **shared.hide_dirs, elem_id="img2img_batch_output_dir") - img2img_batch_inpaint_mask_dir = gr.Textbox(label="Inpaint batch mask directory (required for inpaint batch processing only)", **shared.hide_dirs, elem_id="img2img_batch_inpaint_mask_dir") - with gr.Accordion("PNG info", open=False): - img2img_batch_use_png_info = gr.Checkbox(label="Append png info to prompts", **shared.hide_dirs, elem_id="img2img_batch_use_png_info") - img2img_batch_png_info_dir = gr.Textbox(label="PNG info directory", **shared.hide_dirs, placeholder="Leave empty to use input directory", elem_id="img2img_batch_png_info_dir") - img2img_batch_png_info_props = gr.CheckboxGroup(["Prompt", "Negative prompt", "Seed", "CFG scale", "Sampler", "Steps"], label="Parameters to take from png info", info="Prompts from png info will be appended to prompts set in ui.") - - img2img_tabs = [tab_img2img, tab_sketch, tab_inpaint, tab_inpaint_color, tab_inpaint_upload, tab_batch] - - for i, tab in enumerate(img2img_tabs): - tab.select(fn=lambda tabnum=i: tabnum, inputs=[], outputs=[img2img_selected_tab]) - - def copy_image(img): - if isinstance(img, dict) and 'image' in img: - return img['image'] - - return img - - for button, name, elem in copy_image_buttons: - button.click( - fn=copy_image, - inputs=[elem], - outputs=[copy_image_destinations[name]], - ) - button.click( - fn=lambda: None, - _js=f"switch_to_{name.replace(' ', '_')}", - inputs=[], - outputs=[], - ) - - with FormRow(): - resize_mode = gr.Radio(label="Resize mode", elem_id="resize_mode", choices=["Just resize", "Crop and resize", "Resize and fill", "Just resize (latent upscale)"], type="index", value="Just resize") - - modules.scripts.scripts_img2img.prepare_ui() - - for category in ordered_ui_categories(): - if category == "sampler": - steps, sampler_index = create_sampler_and_steps_selection(samplers_for_img2img, "img2img") - - elif category == "dimensions": - with FormRow(): - with gr.Column(elem_id="img2img_column_size", scale=4): - selected_scale_tab = gr.State(value=0) - - with gr.Tabs(): - with gr.Tab(label="Resize to", elem_id="img2img_tab_resize_to") as tab_scale_to: - with FormRow(): - with gr.Column(elem_id="img2img_column_size", scale=4): - width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="img2img_width") - height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="img2img_height") - with gr.Column(elem_id="img2img_dimensions_row", scale=1, elem_classes="dimensions-tools"): - res_switch_btn = ToolButton(value=switch_values_symbol, elem_id="img2img_res_switch_btn") - detect_image_size_btn = ToolButton(value=detect_image_size_symbol, elem_id="img2img_detect_image_size_btn") + results = extras.run_modelmerger(*args) + except Exception as e: + errors.report("Error loading/saving model file", exc_info=True) + sd_models.list_models() # to remove the potentially missing models from the list + return [*[gr.Dropdown.update(choices=sd_models.checkpoint_tiles()) for _ in range(4)], f"Error merging checkpoints: {e}"] + return results - with gr.Tab(label="Resize by", elem_id="img2img_tab_resize_by") as tab_scale_by: - scale_by = gr.Slider(minimum=0.05, maximum=4.0, step=0.05, label="Scale", value=1.0, elem_id="img2img_scale") - with FormRow(): - scale_by_html = FormHTML(resize_from_to_html(0, 0, 0.0), elem_id="img2img_scale_resolution_preview") - gr.Slider(label="Unused", elem_id="img2img_unused_scale_by_slider") - button_update_resize_to = gr.Button(visible=False, elem_id="img2img_update_resize_to") +class UiCheckpointMerger: + def __init__(self): + with gr.Blocks(analytics_enabled=False) as modelmerger_interface: + with gr.Row().style(equal_height=False): + with gr.Column(variant='compact'): + self.interp_description = gr.HTML(value=update_interp_description("Weighted sum"), elem_id="modelmerger_interp_description") - on_change_args = dict( - fn=resize_from_to_html, - _js="currentImg2imgSourceResolution", - inputs=[dummy_component, dummy_component, scale_by], - outputs=scale_by_html, - show_progress=False, - ) + with FormRow(elem_id="modelmerger_models"): + self.primary_model_name = gr.Dropdown(sd_models.checkpoint_tiles(), elem_id="modelmerger_primary_model_name", label="Primary model (A)") + create_refresh_button(self.primary_model_name, sd_models.list_models, lambda: {"choices": sd_models.checkpoint_tiles()}, "refresh_checkpoint_A") - scale_by.release(**on_change_args) - button_update_resize_to.click(**on_change_args) + self.secondary_model_name = gr.Dropdown(sd_models.checkpoint_tiles(), elem_id="modelmerger_secondary_model_name", label="Secondary model (B)") + create_refresh_button(self.secondary_model_name, sd_models.list_models, lambda: {"choices": sd_models.checkpoint_tiles()}, "refresh_checkpoint_B") - # the code below is meant to update the resolution label after the image in the image selection UI has changed. - # as it is now the event keeps firing continuously for inpaint edits, which ruins the page with constant requests. - # I assume this must be a gradio bug and for now we'll just do it for non-inpaint inputs. - for component in [init_img, sketch]: - component.change(fn=lambda: None, _js="updateImg2imgResizeToTextAfterChangingImage", inputs=[], outputs=[], show_progress=False) + self.tertiary_model_name = gr.Dropdown(sd_models.checkpoint_tiles(), elem_id="modelmerger_tertiary_model_name", label="Tertiary model (C)") + create_refresh_button(self.tertiary_model_name, sd_models.list_models, lambda: {"choices": sd_models.checkpoint_tiles()}, "refresh_checkpoint_C") - tab_scale_to.select(fn=lambda: 0, inputs=[], outputs=[selected_scale_tab]) - tab_scale_by.select(fn=lambda: 1, inputs=[], outputs=[selected_scale_tab]) - - if opts.dimensions_and_batch_together: - with gr.Column(elem_id="img2img_column_batch"): - batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="img2img_batch_count") - batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="img2img_batch_size") - - elif category == "cfg": - with FormGroup(): - with FormRow(): - cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0, elem_id="img2img_cfg_scale") - image_cfg_scale = gr.Slider(minimum=0, maximum=3.0, step=0.05, label='Image CFG Scale', value=1.5, elem_id="img2img_image_cfg_scale", visible=False) - denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.75, elem_id="img2img_denoising_strength") - - elif category == "seed": - seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox = create_seed_inputs('img2img') - - elif category == "checkboxes": - with FormRow(elem_classes="checkboxes-row", variant="compact"): - restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1, elem_id="img2img_restore_faces") - tiling = gr.Checkbox(label='Tiling', value=False, elem_id="img2img_tiling") - - elif category == "batch": - if not opts.dimensions_and_batch_together: - with FormRow(elem_id="img2img_column_batch"): - batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="img2img_batch_count") - batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="img2img_batch_size") - - elif category == "override_settings": - with FormRow(elem_id="img2img_override_settings_row") as row: - override_settings = create_override_settings_dropdown('img2img', row) - - elif category == "scripts": - with FormGroup(elem_id="img2img_script_container"): - custom_inputs = modules.scripts.scripts_img2img.setup_ui() - - elif category == "inpaint": - with FormGroup(elem_id="inpaint_controls", visible=False) as inpaint_controls: - with FormRow(): - mask_blur = gr.Slider(label='Mask blur', minimum=0, maximum=64, step=1, value=4, elem_id="img2img_mask_blur") - mask_alpha = gr.Slider(label="Mask transparency", visible=False, elem_id="img2img_mask_alpha") - - with FormRow(): - inpainting_mask_invert = gr.Radio(label='Mask mode', choices=['Inpaint masked', 'Inpaint not masked'], value='Inpaint masked', type="index", elem_id="img2img_mask_mode") - - with FormRow(): - inpainting_fill = gr.Radio(label='Masked content', choices=['fill', 'original', 'latent noise', 'latent nothing'], value='original', type="index", elem_id="img2img_inpainting_fill") - - with FormRow(): - with gr.Column(): - inpaint_full_res = gr.Radio(label="Inpaint area", choices=["Whole picture", "Only masked"], type="index", value="Whole picture", elem_id="img2img_inpaint_full_res") - - with gr.Column(scale=4): - inpaint_full_res_padding = gr.Slider(label='Only masked padding, pixels', minimum=0, maximum=256, step=4, value=32, elem_id="img2img_inpaint_full_res_padding") - - def select_img2img_tab(tab): - return gr.update(visible=tab in [2, 3, 4]), gr.update(visible=tab == 3), - - for i, elem in enumerate(img2img_tabs): - elem.select( - fn=lambda tab=i: select_img2img_tab(tab), - inputs=[], - outputs=[inpaint_controls, mask_alpha], - ) - else: - modules.scripts.scripts_img2img.setup_ui_for_section(category) - - img2img_gallery, generation_info, html_info, html_log = create_output_panel("img2img", opts.outdir_img2img_samples) - - connect_reuse_seed(seed, reuse_seed, generation_info, dummy_component, is_subseed=False) - connect_reuse_seed(subseed, reuse_subseed, generation_info, dummy_component, is_subseed=True) - - img2img_prompt_img.change( - fn=modules.images.image_data, - inputs=[ - img2img_prompt_img - ], - outputs=[ - img2img_prompt, - img2img_prompt_img - ], - show_progress=False, - ) - - img2img_args = dict( - fn=wrap_gradio_gpu_call(modules.img2img.img2img, extra_outputs=[None, '', '']), - _js="submit_img2img", - inputs=[ - dummy_component, - dummy_component, - img2img_prompt, - img2img_negative_prompt, - img2img_prompt_styles, - init_img, - sketch, - init_img_with_mask, - inpaint_color_sketch, - inpaint_color_sketch_orig, - init_img_inpaint, - init_mask_inpaint, - steps, - sampler_index, - mask_blur, - mask_alpha, - inpainting_fill, - restore_faces, - tiling, - batch_count, - batch_size, - cfg_scale, - image_cfg_scale, - denoising_strength, - seed, - subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox, - selected_scale_tab, - height, - width, - scale_by, - resize_mode, - inpaint_full_res, - inpaint_full_res_padding, - inpainting_mask_invert, - img2img_batch_input_dir, - img2img_batch_output_dir, - img2img_batch_inpaint_mask_dir, - override_settings, - img2img_batch_use_png_info, - img2img_batch_png_info_props, - img2img_batch_png_info_dir, - ] + custom_inputs, - outputs=[ - img2img_gallery, - generation_info, - html_info, - html_log, - ], - show_progress=False, - ) - - interrogate_args = dict( - _js="get_img2img_tab_index", - inputs=[ - dummy_component, - img2img_batch_input_dir, - img2img_batch_output_dir, - init_img, - sketch, - init_img_with_mask, - inpaint_color_sketch, - init_img_inpaint, - ], - outputs=[img2img_prompt, dummy_component], - ) - - img2img_prompt.submit(**img2img_args) - submit.click(**img2img_args) - - res_switch_btn.click(fn=None, _js="function(){switchWidthHeight('img2img')}", inputs=None, outputs=None, show_progress=False) - - detect_image_size_btn.click( - fn=lambda w, h, _: (w or gr.update(), h or gr.update()), - _js="currentImg2imgSourceResolution", - inputs=[dummy_component, dummy_component, dummy_component], - outputs=[width, height], - show_progress=False, - ) - - restore_progress_button.click( - fn=progress.restore_progress, - _js="restoreProgressImg2img", - inputs=[dummy_component], - outputs=[ - img2img_gallery, - generation_info, - html_info, - html_log, - ], - show_progress=False, - ) - - img2img_interrogate.click( - fn=lambda *args: process_interrogate(interrogate, *args), - **interrogate_args, - ) - - img2img_deepbooru.click( - fn=lambda *args: process_interrogate(interrogate_deepbooru, *args), - **interrogate_args, - ) - - prompts = [(txt2img_prompt, txt2img_negative_prompt), (img2img_prompt, img2img_negative_prompt)] - style_dropdowns = [txt2img_prompt_styles, img2img_prompt_styles] - style_js_funcs = ["update_txt2img_tokens", "update_img2img_tokens"] - - for button, (prompt, negative_prompt) in zip([txt2img_save_style, img2img_save_style], prompts): - button.click( - fn=add_style, - _js="ask_for_style_name", - # Have to pass empty dummy component here, because the JavaScript and Python function have to accept - # the same number of parameters, but we only know the style-name after the JavaScript prompt - inputs=[dummy_component, prompt, negative_prompt], - outputs=[txt2img_prompt_styles, img2img_prompt_styles], - ) - - for button, (prompt, negative_prompt), styles, js_func in zip([txt2img_prompt_style_apply, img2img_prompt_style_apply], prompts, style_dropdowns, style_js_funcs): - button.click( - fn=apply_styles, - _js=js_func, - inputs=[prompt, negative_prompt, styles], - outputs=[prompt, negative_prompt, styles], - ) - - token_button.click(fn=update_token_counter, inputs=[img2img_prompt, steps], outputs=[token_counter]) - negative_token_button.click(fn=wrap_queued_call(update_token_counter), inputs=[img2img_negative_prompt, steps], outputs=[negative_token_counter]) - - ui_extra_networks.setup_ui(extra_networks_ui_img2img, img2img_gallery) - - img2img_paste_fields = [ - (img2img_prompt, "Prompt"), - (img2img_negative_prompt, "Negative prompt"), - (steps, "Steps"), - (sampler_index, "Sampler"), - (restore_faces, "Face restoration"), - (cfg_scale, "CFG scale"), - (image_cfg_scale, "Image CFG scale"), - (seed, "Seed"), - (width, "Size-1"), - (height, "Size-2"), - (batch_size, "Batch size"), - (subseed, "Variation seed"), - (subseed_strength, "Variation seed strength"), - (seed_resize_from_w, "Seed resize from-1"), - (seed_resize_from_h, "Seed resize from-2"), - (img2img_prompt_styles, lambda d: d["Styles array"] if isinstance(d.get("Styles array"), list) else gr.update()), - (denoising_strength, "Denoising strength"), - (mask_blur, "Mask blur"), - *modules.scripts.scripts_img2img.infotext_fields - ] - parameters_copypaste.add_paste_fields("img2img", init_img, img2img_paste_fields, override_settings) - parameters_copypaste.add_paste_fields("inpaint", init_img_with_mask, img2img_paste_fields, override_settings) - parameters_copypaste.register_paste_params_button(parameters_copypaste.ParamBinding( - paste_button=img2img_paste, tabname="img2img", source_text_component=img2img_prompt, source_image_component=None, - )) - - modules.scripts.scripts_current = None - - with gr.Blocks(analytics_enabled=False) as extras_interface: - ui_postprocessing.create_ui() - - with gr.Blocks(analytics_enabled=False) as pnginfo_interface: - with gr.Row().style(equal_height=False): - with gr.Column(variant='panel'): - image = gr.Image(elem_id="pnginfo_image", label="Source", source="upload", interactive=True, type="pil") - - with gr.Column(variant='panel'): - html = gr.HTML() - generation_info = gr.Textbox(visible=False, elem_id="pnginfo_generation_info") - html2 = gr.HTML() - with gr.Row(): - buttons = parameters_copypaste.create_buttons(["txt2img", "img2img", "inpaint", "extras"]) - - for tabname, button in buttons.items(): - parameters_copypaste.register_paste_params_button(parameters_copypaste.ParamBinding( - paste_button=button, tabname=tabname, source_text_component=generation_info, source_image_component=image, - )) - - image.change( - fn=wrap_gradio_call(modules.extras.run_pnginfo), - inputs=[image], - outputs=[html, generation_info, html2], - ) - - def update_interp_description(value): - interp_description_css = "

{}

" - interp_descriptions = { - "No interpolation": interp_description_css.format("No interpolation will be used. Requires one model; A. Allows for format conversion and VAE baking."), - "Weighted sum": interp_description_css.format("A weighted sum will be used for interpolation. Requires two models; A and B. The result is calculated as A * (1 - M) + B * M"), - "Add difference": interp_description_css.format("The difference between the last two models will be added to the first. Requires three models; A, B and C. The result is calculated as A + (B - C) * M") - } - return interp_descriptions[value] - - with gr.Blocks(analytics_enabled=False) as modelmerger_interface: - with gr.Row().style(equal_height=False): - with gr.Column(variant='compact'): - interp_description = gr.HTML(value=update_interp_description("Weighted sum"), elem_id="modelmerger_interp_description") - - with FormRow(elem_id="modelmerger_models"): - primary_model_name = gr.Dropdown(modules.sd_models.checkpoint_tiles(), elem_id="modelmerger_primary_model_name", label="Primary model (A)") - create_refresh_button(primary_model_name, modules.sd_models.list_models, lambda: {"choices": modules.sd_models.checkpoint_tiles()}, "refresh_checkpoint_A") - - secondary_model_name = gr.Dropdown(modules.sd_models.checkpoint_tiles(), elem_id="modelmerger_secondary_model_name", label="Secondary model (B)") - create_refresh_button(secondary_model_name, modules.sd_models.list_models, lambda: {"choices": modules.sd_models.checkpoint_tiles()}, "refresh_checkpoint_B") - - tertiary_model_name = gr.Dropdown(modules.sd_models.checkpoint_tiles(), elem_id="modelmerger_tertiary_model_name", label="Tertiary model (C)") - create_refresh_button(tertiary_model_name, modules.sd_models.list_models, lambda: {"choices": modules.sd_models.checkpoint_tiles()}, "refresh_checkpoint_C") - - custom_name = gr.Textbox(label="Custom Name (Optional)", elem_id="modelmerger_custom_name") - interp_amount = gr.Slider(minimum=0.0, maximum=1.0, step=0.05, label='Multiplier (M) - set to 0 to get model A', value=0.3, elem_id="modelmerger_interp_amount") - interp_method = gr.Radio(choices=["No interpolation", "Weighted sum", "Add difference"], value="Weighted sum", label="Interpolation Method", elem_id="modelmerger_interp_method") - interp_method.change(fn=update_interp_description, inputs=[interp_method], outputs=[interp_description]) - - with FormRow(): - checkpoint_format = gr.Radio(choices=["ckpt", "safetensors"], value="safetensors", label="Checkpoint format", elem_id="modelmerger_checkpoint_format") - save_as_half = gr.Checkbox(value=False, label="Save as float16", elem_id="modelmerger_save_as_half") - save_metadata = gr.Checkbox(value=True, label="Save metadata (.safetensors only)", elem_id="modelmerger_save_metadata") - - with FormRow(): - with gr.Column(): - config_source = gr.Radio(choices=["A, B or C", "B", "C", "Don't"], value="A, B or C", label="Copy config from", type="index", elem_id="modelmerger_config_method") - - with gr.Column(): - with FormRow(): - bake_in_vae = gr.Dropdown(choices=["None"] + list(sd_vae.vae_dict), value="None", label="Bake in VAE", elem_id="modelmerger_bake_in_vae") - create_refresh_button(bake_in_vae, sd_vae.refresh_vae_list, lambda: {"choices": ["None"] + list(sd_vae.vae_dict)}, "modelmerger_refresh_bake_in_vae") - - with FormRow(): - discard_weights = gr.Textbox(value="", label="Discard weights with matching name", elem_id="modelmerger_discard_weights") - - with gr.Row(): - modelmerger_merge = gr.Button(elem_id="modelmerger_merge", value="Merge", variant='primary') - - with gr.Column(variant='compact', elem_id="modelmerger_results_container"): - with gr.Group(elem_id="modelmerger_results_panel"): - modelmerger_result = gr.HTML(elem_id="modelmerger_result", show_label=False) - - with gr.Blocks(analytics_enabled=False) as train_interface: - with gr.Row().style(equal_height=False): - gr.HTML(value="

See wiki for detailed explanation.

") - - with gr.Row(variant="compact").style(equal_height=False): - with gr.Tabs(elem_id="train_tabs"): - - with gr.Tab(label="Create embedding", id="create_embedding"): - new_embedding_name = gr.Textbox(label="Name", elem_id="train_new_embedding_name") - initialization_text = gr.Textbox(label="Initialization text", value="*", elem_id="train_initialization_text") - nvpt = gr.Slider(label="Number of vectors per token", minimum=1, maximum=75, step=1, value=1, elem_id="train_nvpt") - overwrite_old_embedding = gr.Checkbox(value=False, label="Overwrite Old Embedding", elem_id="train_overwrite_old_embedding") - - with gr.Row(): - with gr.Column(scale=3): - gr.HTML(value="") - - with gr.Column(): - create_embedding = gr.Button(value="Create embedding", variant='primary', elem_id="train_create_embedding") - - with gr.Tab(label="Create hypernetwork", id="create_hypernetwork"): - new_hypernetwork_name = gr.Textbox(label="Name", elem_id="train_new_hypernetwork_name") - new_hypernetwork_sizes = gr.CheckboxGroup(label="Modules", value=["768", "320", "640", "1280"], choices=["768", "1024", "320", "640", "1280"], elem_id="train_new_hypernetwork_sizes") - new_hypernetwork_layer_structure = gr.Textbox("1, 2, 1", label="Enter hypernetwork layer structure", placeholder="1st and last digit must be 1. ex:'1, 2, 1'", elem_id="train_new_hypernetwork_layer_structure") - new_hypernetwork_activation_func = gr.Dropdown(value="linear", label="Select activation function of hypernetwork. Recommended : Swish / Linear(none)", choices=modules.hypernetworks.ui.keys, elem_id="train_new_hypernetwork_activation_func") - new_hypernetwork_initialization_option = gr.Dropdown(value = "Normal", label="Select Layer weights initialization. Recommended: Kaiming for relu-like, Xavier for sigmoid-like, Normal otherwise", choices=["Normal", "KaimingUniform", "KaimingNormal", "XavierUniform", "XavierNormal"], elem_id="train_new_hypernetwork_initialization_option") - new_hypernetwork_add_layer_norm = gr.Checkbox(label="Add layer normalization", elem_id="train_new_hypernetwork_add_layer_norm") - new_hypernetwork_use_dropout = gr.Checkbox(label="Use dropout", elem_id="train_new_hypernetwork_use_dropout") - new_hypernetwork_dropout_structure = gr.Textbox("0, 0, 0", label="Enter hypernetwork Dropout structure (or empty). Recommended : 0~0.35 incrementing sequence: 0, 0.05, 0.15", placeholder="1st and last digit must be 0 and values should be between 0 and 1. ex:'0, 0.01, 0'") - overwrite_old_hypernetwork = gr.Checkbox(value=False, label="Overwrite Old Hypernetwork", elem_id="train_overwrite_old_hypernetwork") - - with gr.Row(): - with gr.Column(scale=3): - gr.HTML(value="") - - with gr.Column(): - create_hypernetwork = gr.Button(value="Create hypernetwork", variant='primary', elem_id="train_create_hypernetwork") - - with gr.Tab(label="Preprocess images", id="preprocess_images"): - process_src = gr.Textbox(label='Source directory', elem_id="train_process_src") - process_dst = gr.Textbox(label='Destination directory', elem_id="train_process_dst") - process_width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="train_process_width") - process_height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="train_process_height") - preprocess_txt_action = gr.Dropdown(label='Existing Caption txt Action', value="ignore", choices=["ignore", "copy", "prepend", "append"], elem_id="train_preprocess_txt_action") - - with gr.Row(): - process_keep_original_size = gr.Checkbox(label='Keep original size', elem_id="train_process_keep_original_size") - process_flip = gr.Checkbox(label='Create flipped copies', elem_id="train_process_flip") - process_split = gr.Checkbox(label='Split oversized images', elem_id="train_process_split") - process_focal_crop = gr.Checkbox(label='Auto focal point crop', elem_id="train_process_focal_crop") - process_multicrop = gr.Checkbox(label='Auto-sized crop', elem_id="train_process_multicrop") - process_caption = gr.Checkbox(label='Use BLIP for caption', elem_id="train_process_caption") - process_caption_deepbooru = gr.Checkbox(label='Use deepbooru for caption', visible=True, elem_id="train_process_caption_deepbooru") - - with gr.Row(visible=False) as process_split_extra_row: - process_split_threshold = gr.Slider(label='Split image threshold', value=0.5, minimum=0.0, maximum=1.0, step=0.05, elem_id="train_process_split_threshold") - process_overlap_ratio = gr.Slider(label='Split image overlap ratio', value=0.2, minimum=0.0, maximum=0.9, step=0.05, elem_id="train_process_overlap_ratio") - - with gr.Row(visible=False) as process_focal_crop_row: - process_focal_crop_face_weight = gr.Slider(label='Focal point face weight', value=0.9, minimum=0.0, maximum=1.0, step=0.05, elem_id="train_process_focal_crop_face_weight") - process_focal_crop_entropy_weight = gr.Slider(label='Focal point entropy weight', value=0.15, minimum=0.0, maximum=1.0, step=0.05, elem_id="train_process_focal_crop_entropy_weight") - process_focal_crop_edges_weight = gr.Slider(label='Focal point edges weight', value=0.5, minimum=0.0, maximum=1.0, step=0.05, elem_id="train_process_focal_crop_edges_weight") - process_focal_crop_debug = gr.Checkbox(label='Create debug image', elem_id="train_process_focal_crop_debug") - - with gr.Column(visible=False) as process_multicrop_col: - gr.Markdown('Each image is center-cropped with an automatically chosen width and height.') - with gr.Row(): - process_multicrop_mindim = gr.Slider(minimum=64, maximum=2048, step=8, label="Dimension lower bound", value=384, elem_id="train_process_multicrop_mindim") - process_multicrop_maxdim = gr.Slider(minimum=64, maximum=2048, step=8, label="Dimension upper bound", value=768, elem_id="train_process_multicrop_maxdim") - with gr.Row(): - process_multicrop_minarea = gr.Slider(minimum=64*64, maximum=2048*2048, step=1, label="Area lower bound", value=64*64, elem_id="train_process_multicrop_minarea") - process_multicrop_maxarea = gr.Slider(minimum=64*64, maximum=2048*2048, step=1, label="Area upper bound", value=640*640, elem_id="train_process_multicrop_maxarea") - with gr.Row(): - process_multicrop_objective = gr.Radio(["Maximize area", "Minimize error"], value="Maximize area", label="Resizing objective", elem_id="train_process_multicrop_objective") - process_multicrop_threshold = gr.Slider(minimum=0, maximum=1, step=0.01, label="Error threshold", value=0.1, elem_id="train_process_multicrop_threshold") - - with gr.Row(): - with gr.Column(scale=3): - gr.HTML(value="") - - with gr.Column(): - with gr.Row(): - interrupt_preprocessing = gr.Button("Interrupt", elem_id="train_interrupt_preprocessing") - run_preprocess = gr.Button(value="Preprocess", variant='primary', elem_id="train_run_preprocess") - - process_split.change( - fn=lambda show: gr_show(show), - inputs=[process_split], - outputs=[process_split_extra_row], - ) - - process_focal_crop.change( - fn=lambda show: gr_show(show), - inputs=[process_focal_crop], - outputs=[process_focal_crop_row], - ) - - process_multicrop.change( - fn=lambda show: gr_show(show), - inputs=[process_multicrop], - outputs=[process_multicrop_col], - ) - - def get_textual_inversion_template_names(): - return sorted(textual_inversion.textual_inversion_templates) - - with gr.Tab(label="Train", id="train"): - gr.HTML(value="

Train an embedding or Hypernetwork; you must specify a directory with a set of 1:1 ratio images [wiki]

") - with FormRow(): - train_embedding_name = gr.Dropdown(label='Embedding', elem_id="train_embedding", choices=sorted(sd_hijack.model_hijack.embedding_db.word_embeddings.keys())) - create_refresh_button(train_embedding_name, sd_hijack.model_hijack.embedding_db.load_textual_inversion_embeddings, lambda: {"choices": sorted(sd_hijack.model_hijack.embedding_db.word_embeddings.keys())}, "refresh_train_embedding_name") - - train_hypernetwork_name = gr.Dropdown(label='Hypernetwork', elem_id="train_hypernetwork", choices=sorted(shared.hypernetworks)) - create_refresh_button(train_hypernetwork_name, shared.reload_hypernetworks, lambda: {"choices": sorted(shared.hypernetworks)}, "refresh_train_hypernetwork_name") - - with FormRow(): - embedding_learn_rate = gr.Textbox(label='Embedding Learning rate', placeholder="Embedding Learning rate", value="0.005", elem_id="train_embedding_learn_rate") - hypernetwork_learn_rate = gr.Textbox(label='Hypernetwork Learning rate', placeholder="Hypernetwork Learning rate", value="0.00001", elem_id="train_hypernetwork_learn_rate") - - with FormRow(): - clip_grad_mode = gr.Dropdown(value="disabled", label="Gradient Clipping", choices=["disabled", "value", "norm"]) - clip_grad_value = gr.Textbox(placeholder="Gradient clip value", value="0.1", show_label=False) + self.custom_name = gr.Textbox(label="Custom Name (Optional)", elem_id="modelmerger_custom_name") + self.interp_amount = gr.Slider(minimum=0.0, maximum=1.0, step=0.05, label='Multiplier (M) - set to 0 to get model A', value=0.3, elem_id="modelmerger_interp_amount") + self.interp_method = gr.Radio(choices=["No interpolation", "Weighted sum", "Add difference"], value="Weighted sum", label="Interpolation Method", elem_id="modelmerger_interp_method") + self.interp_method.change(fn=update_interp_description, inputs=[self.interp_method], outputs=[self.interp_description]) with FormRow(): - batch_size = gr.Number(label='Batch size', value=1, precision=0, elem_id="train_batch_size") - gradient_step = gr.Number(label='Gradient accumulation steps', value=1, precision=0, elem_id="train_gradient_step") - - dataset_directory = gr.Textbox(label='Dataset directory', placeholder="Path to directory with input images", elem_id="train_dataset_directory") - log_directory = gr.Textbox(label='Log directory', placeholder="Path to directory where to write outputs", value="textual_inversion", elem_id="train_log_directory") + self.checkpoint_format = gr.Radio(choices=["ckpt", "safetensors"], value="safetensors", label="Checkpoint format", elem_id="modelmerger_checkpoint_format") + self.save_as_half = gr.Checkbox(value=False, label="Save as float16", elem_id="modelmerger_save_as_half") + self.save_metadata = gr.Checkbox(value=True, label="Save metadata (.safetensors only)", elem_id="modelmerger_save_metadata") with FormRow(): - template_file = gr.Dropdown(label='Prompt template', value="style_filewords.txt", elem_id="train_template_file", choices=get_textual_inversion_template_names()) - create_refresh_button(template_file, textual_inversion.list_textual_inversion_templates, lambda: {"choices": get_textual_inversion_template_names()}, "refrsh_train_template_file") + with gr.Column(): + self.config_source = gr.Radio(choices=["A, B or C", "B", "C", "Don't"], value="A, B or C", label="Copy config from", type="index", elem_id="modelmerger_config_method") - training_width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="train_training_width") - training_height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="train_training_height") - varsize = gr.Checkbox(label="Do not resize images", value=False, elem_id="train_varsize") - steps = gr.Number(label='Max steps', value=100000, precision=0, elem_id="train_steps") + with gr.Column(): + with FormRow(): + self.bake_in_vae = gr.Dropdown(choices=["None"] + list(sd_vae.vae_dict), value="None", label="Bake in VAE", elem_id="modelmerger_bake_in_vae") + create_refresh_button(self.bake_in_vae, sd_vae.refresh_vae_list, lambda: {"choices": ["None"] + list(sd_vae.vae_dict)}, "modelmerger_refresh_bake_in_vae") with FormRow(): - create_image_every = gr.Number(label='Save an image to log directory every N steps, 0 to disable', value=500, precision=0, elem_id="train_create_image_every") - save_embedding_every = gr.Number(label='Save a copy of embedding to log directory every N steps, 0 to disable', value=500, precision=0, elem_id="train_save_embedding_every") - - use_weight = gr.Checkbox(label="Use PNG alpha channel as loss weight", value=False, elem_id="use_weight") - - save_image_with_stored_embedding = gr.Checkbox(label='Save images with embedding in PNG chunks', value=True, elem_id="train_save_image_with_stored_embedding") - preview_from_txt2img = gr.Checkbox(label='Read parameters (prompt, etc...) from txt2img tab when making previews', value=False, elem_id="train_preview_from_txt2img") - - shuffle_tags = gr.Checkbox(label="Shuffle tags by ',' when creating prompts.", value=False, elem_id="train_shuffle_tags") - tag_drop_out = gr.Slider(minimum=0, maximum=1, step=0.1, label="Drop out tags when creating prompts.", value=0, elem_id="train_tag_drop_out") - - latent_sampling_method = gr.Radio(label='Choose latent sampling method', value="once", choices=['once', 'deterministic', 'random'], elem_id="train_latent_sampling_method") + self.discard_weights = gr.Textbox(value="", label="Discard weights with matching name", elem_id="modelmerger_discard_weights") with gr.Row(): - train_embedding = gr.Button(value="Train Embedding", variant='primary', elem_id="train_train_embedding") - interrupt_training = gr.Button(value="Interrupt", elem_id="train_interrupt_training") - train_hypernetwork = gr.Button(value="Train Hypernetwork", variant='primary', elem_id="train_train_hypernetwork") - - params = script_callbacks.UiTrainTabParams(txt2img_preview_params) - - script_callbacks.ui_train_tabs_callback(params) - - with gr.Column(elem_id='ti_gallery_container'): - ti_output = gr.Text(elem_id="ti_output", value="", show_label=False) - gr.Gallery(label='Output', show_label=False, elem_id='ti_gallery').style(columns=4) - gr.HTML(elem_id="ti_progress", value="") - ti_outcome = gr.HTML(elem_id="ti_error", value="") - - create_embedding.click( - fn=modules.textual_inversion.ui.create_embedding, - inputs=[ - new_embedding_name, - initialization_text, - nvpt, - overwrite_old_embedding, - ], - outputs=[ - train_embedding_name, - ti_output, - ti_outcome, - ] - ) - - create_hypernetwork.click( - fn=modules.hypernetworks.ui.create_hypernetwork, - inputs=[ - new_hypernetwork_name, - new_hypernetwork_sizes, - overwrite_old_hypernetwork, - new_hypernetwork_layer_structure, - new_hypernetwork_activation_func, - new_hypernetwork_initialization_option, - new_hypernetwork_add_layer_norm, - new_hypernetwork_use_dropout, - new_hypernetwork_dropout_structure - ], - outputs=[ - train_hypernetwork_name, - ti_output, - ti_outcome, - ] - ) - - run_preprocess.click( - fn=wrap_gradio_gpu_call(modules.textual_inversion.ui.preprocess, extra_outputs=[gr.update()]), - _js="start_training_textual_inversion", - inputs=[ - dummy_component, - process_src, - process_dst, - process_width, - process_height, - preprocess_txt_action, - process_keep_original_size, - process_flip, - process_split, - process_caption, - process_caption_deepbooru, - process_split_threshold, - process_overlap_ratio, - process_focal_crop, - process_focal_crop_face_weight, - process_focal_crop_entropy_weight, - process_focal_crop_edges_weight, - process_focal_crop_debug, - process_multicrop, - process_multicrop_mindim, - process_multicrop_maxdim, - process_multicrop_minarea, - process_multicrop_maxarea, - process_multicrop_objective, - process_multicrop_threshold, - ], - outputs=[ - ti_output, - ti_outcome, - ], - ) - - train_embedding.click( - fn=wrap_gradio_gpu_call(modules.textual_inversion.ui.train_embedding, extra_outputs=[gr.update()]), - _js="start_training_textual_inversion", - inputs=[ - dummy_component, - train_embedding_name, - embedding_learn_rate, - batch_size, - gradient_step, - dataset_directory, - log_directory, - training_width, - training_height, - varsize, - steps, - clip_grad_mode, - clip_grad_value, - shuffle_tags, - tag_drop_out, - latent_sampling_method, - use_weight, - create_image_every, - save_embedding_every, - template_file, - save_image_with_stored_embedding, - preview_from_txt2img, - *txt2img_preview_params, - ], - outputs=[ - ti_output, - ti_outcome, - ] - ) - - train_hypernetwork.click( - fn=wrap_gradio_gpu_call(modules.hypernetworks.ui.train_hypernetwork, extra_outputs=[gr.update()]), - _js="start_training_textual_inversion", - inputs=[ - dummy_component, - train_hypernetwork_name, - hypernetwork_learn_rate, - batch_size, - gradient_step, - dataset_directory, - log_directory, - training_width, - training_height, - varsize, - steps, - clip_grad_mode, - clip_grad_value, - shuffle_tags, - tag_drop_out, - latent_sampling_method, - use_weight, - create_image_every, - save_embedding_every, - template_file, - preview_from_txt2img, - *txt2img_preview_params, - ], - outputs=[ - ti_output, - ti_outcome, - ] - ) - - interrupt_training.click( - fn=lambda: shared.state.interrupt(), - inputs=[], - outputs=[], - ) - - interrupt_preprocessing.click( - fn=lambda: shared.state.interrupt(), - inputs=[], - outputs=[], - ) - - loadsave = ui_loadsave.UiLoadsave(cmd_opts.ui_config_file) - - settings = ui_settings.UiSettings() - settings.create_ui(loadsave, dummy_component) - - interfaces = [ - (txt2img_interface, "txt2img", "txt2img"), - (img2img_interface, "img2img", "img2img"), - (extras_interface, "Extras", "extras"), - (pnginfo_interface, "PNG Info", "pnginfo"), - (modelmerger_interface, "Checkpoint Merger", "modelmerger"), - (train_interface, "Train", "train"), - ] - - interfaces += script_callbacks.ui_tabs_callback() - interfaces += [(settings.interface, "Settings", "settings")] - - extensions_interface = ui_extensions.create_ui() - interfaces += [(extensions_interface, "Extensions", "extensions")] - - shared.tab_names = [] - for _interface, label, _ifid in interfaces: - shared.tab_names.append(label) - - with gr.Blocks(theme=shared.gradio_theme, analytics_enabled=False, title="Stable Diffusion") as demo: - settings.add_quicksettings() - - parameters_copypaste.connect_paste_params_buttons() - - with gr.Tabs(elem_id="tabs") as tabs: - tab_order = {k: i for i, k in enumerate(opts.ui_tab_order)} - sorted_interfaces = sorted(interfaces, key=lambda x: tab_order.get(x[1], 9999)) - - for interface, label, ifid in sorted_interfaces: - if label in shared.opts.hidden_tabs: - continue - with gr.TabItem(label, id=ifid, elem_id=f"tab_{ifid}"): - interface.render() - - for interface, _label, ifid in interfaces: - if ifid in ["extensions", "settings"]: - continue - - loadsave.add_block(interface, ifid) - - loadsave.add_component(f"webui/Tabs@{tabs.elem_id}", tabs) - - loadsave.setup_ui() + self.modelmerger_merge = gr.Button(elem_id="modelmerger_merge", value="Merge", variant='primary') - if os.path.exists(os.path.join(script_path, "notification.mp3")): - gr.Audio(interactive=False, value=os.path.join(script_path, "notification.mp3"), elem_id="audio_notification", visible=False) + with gr.Column(variant='compact', elem_id="modelmerger_results_container"): + with gr.Group(elem_id="modelmerger_results_panel"): + self.modelmerger_result = gr.HTML(elem_id="modelmerger_result", show_label=False) - footer = shared.html("footer.html") - footer = footer.format(versions=versions_html(), api_docs="/docs" if shared.cmd_opts.api else "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/API") - gr.HTML(footer, elem_id="footer") + self.blocks = modelmerger_interface - settings.add_functionality(demo) - - update_image_cfg_scale_visibility = lambda: gr.update(visible=shared.sd_model and shared.sd_model.cond_stage_key == "edit") - settings.text_settings.change(fn=update_image_cfg_scale_visibility, inputs=[], outputs=[image_cfg_scale]) - demo.load(fn=update_image_cfg_scale_visibility, inputs=[], outputs=[image_cfg_scale]) - - def modelmerger(*args): - try: - results = modules.extras.run_modelmerger(*args) - except Exception as e: - errors.report("Error loading/saving model file", exc_info=True) - modules.sd_models.list_models() # to remove the potentially missing models from the list - return [*[gr.Dropdown.update(choices=modules.sd_models.checkpoint_tiles()) for _ in range(4)], f"Error merging checkpoints: {e}"] - return results - - modelmerger_merge.click(fn=lambda: '', inputs=[], outputs=[modelmerger_result]) - modelmerger_merge.click( - fn=wrap_gradio_gpu_call(modelmerger, extra_outputs=lambda: [gr.update() for _ in range(4)]), + def setup_ui(self, dummy_component, sd_model_checkpoint_component): + self.modelmerger_merge.click(fn=lambda: '', inputs=[], outputs=[self.modelmerger_result]) + self.modelmerger_merge.click( + fn=call_queue.wrap_gradio_gpu_call(modelmerger, extra_outputs=lambda: [gr.update() for _ in range(4)]), _js='modelmerger', inputs=[ dummy_component, - primary_model_name, - secondary_model_name, - tertiary_model_name, - interp_method, - interp_amount, - save_as_half, - custom_name, - checkpoint_format, - config_source, - bake_in_vae, - discard_weights, - save_metadata, + self.primary_model_name, + self.secondary_model_name, + self.tertiary_model_name, + self.interp_method, + self.interp_amount, + self.save_as_half, + self.custom_name, + self.checkpoint_format, + self.config_source, + self.bake_in_vae, + self.discard_weights, + self.save_metadata, ], outputs=[ - primary_model_name, - secondary_model_name, - tertiary_model_name, - settings.component_dict['sd_model_checkpoint'], - modelmerger_result, + self.primary_model_name, + self.secondary_model_name, + self.tertiary_model_name, + sd_model_checkpoint_component, + self.modelmerger_result, ] ) - loadsave.dump_defaults() - demo.ui_loadsave = loadsave - - # Required as a workaround for change() event not triggering when loading values from ui-config.json - interp_description.value = update_interp_description(interp_method.value) - - return demo - - -def versions_html(): - import torch - import launch - - python_version = ".".join([str(x) for x in sys.version_info[0:3]]) - commit = launch.commit_hash() - tag = launch.git_tag() - - if shared.xformers_available: - import xformers - xformers_version = xformers.__version__ - else: - xformers_version = "N/A" - - return f""" -version: {tag} - •  -python: {python_version} - •  -torch: {getattr(torch, '__long_version__',torch.__version__)} - •  -xformers: {xformers_version} - •  -gradio: {gr.__version__} - •  -checkpoint: N/A -""" - - -def setup_ui_api(app): - from pydantic import BaseModel, Field - from typing import List - - class QuicksettingsHint(BaseModel): - name: str = Field(title="Name of the quicksettings field") - label: str = Field(title="Label of the quicksettings field") - - def quicksettings_hint(): - return [QuicksettingsHint(name=k, label=v.label) for k, v in opts.data_labels.items()] - - app.add_api_route("/internal/quicksettings-hint", quicksettings_hint, methods=["GET"], response_model=List[QuicksettingsHint]) - - app.add_api_route("/internal/ping", lambda: {}, methods=["GET"]) - - app.add_api_route("/internal/profile-startup", lambda: timer.startup_record, methods=["GET"]) - - def download_sysinfo(attachment=False): - from fastapi.responses import PlainTextResponse - - text = sysinfo.get() - filename = f"sysinfo-{datetime.datetime.utcnow().strftime('%Y-%m-%d-%H-%M')}.txt" - - return PlainTextResponse(text, headers={'Content-Disposition': f'{"attachment" if attachment else "inline"}; filename="{filename}"'}) - - app.add_api_route("/internal/sysinfo", download_sysinfo, methods=["GET"]) - app.add_api_route("/internal/sysinfo-download", lambda: download_sysinfo(attachment=True), methods=["GET"]) + # Required as a workaround for change() event not triggering when loading values from ui-config.json + self.interp_description.value = update_interp_description(self.interp_method.value) -- cgit v1.2.1 From 07be13caa357b14f6afa247566d53339522b8e66 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Tue, 1 Aug 2023 08:27:54 +0300 Subject: add metadata to checkpoint merger --- modules/extras.py | 39 +++++++++++++++++++++++++++++++++------ modules/sd_models.py | 2 +- modules/ui_checkpoint_merger.py | 20 ++++++++++++++++++-- 3 files changed, 52 insertions(+), 9 deletions(-) diff --git a/modules/extras.py b/modules/extras.py index e9c0263e..2a310ae3 100644 --- a/modules/extras.py +++ b/modules/extras.py @@ -7,7 +7,7 @@ import json import torch import tqdm -from modules import shared, images, sd_models, sd_vae, sd_models_config +from modules import shared, images, sd_models, sd_vae, sd_models_config, errors from modules.ui_common import plaintext_to_html import gradio as gr import safetensors.torch @@ -72,7 +72,20 @@ def to_half(tensor, enable): return tensor -def run_modelmerger(id_task, primary_model_name, secondary_model_name, tertiary_model_name, interp_method, multiplier, save_as_half, custom_name, checkpoint_format, config_source, bake_in_vae, discard_weights, save_metadata): +def read_metadata(primary_model_name, secondary_model_name, tertiary_model_name): + metadata = {} + + for checkpoint_name in [primary_model_name, secondary_model_name, tertiary_model_name]: + checkpoint_info = sd_models.checkpoints_list.get(checkpoint_name, None) + if checkpoint_info is None: + continue + + metadata.update(checkpoint_info.metadata) + + return json.dumps(metadata, indent=4, ensure_ascii=False) + + +def run_modelmerger(id_task, primary_model_name, secondary_model_name, tertiary_model_name, interp_method, multiplier, save_as_half, custom_name, checkpoint_format, config_source, bake_in_vae, discard_weights, save_metadata, add_merge_recipe, copy_metadata_fields, metadata_json): shared.state.begin(job="model-merge") def fail(message): @@ -241,11 +254,25 @@ def run_modelmerger(id_task, primary_model_name, secondary_model_name, tertiary_ shared.state.textinfo = "Saving" print(f"Saving to {output_modelname}...") - metadata = None + metadata = {} + + if save_metadata and copy_metadata_fields: + if primary_model_info: + metadata.update(primary_model_info.metadata) + if secondary_model_info: + metadata.update(secondary_model_info.metadata) + if tertiary_model_info: + metadata.update(tertiary_model_info.metadata) if save_metadata: - metadata = {"format": "pt"} + try: + metadata.update(json.loads(metadata_json)) + except Exception as e: + errors.display(e, "readin metadata from json") + + metadata["format"] = "pt" + if save_metadata and add_merge_recipe: merge_recipe = { "type": "webui", # indicate this model was merged with webui's built-in merger "primary_model_hash": primary_model_info.sha256, @@ -261,7 +288,6 @@ def run_modelmerger(id_task, primary_model_name, secondary_model_name, tertiary_ "is_inpainting": result_is_inpainting_model, "is_instruct_pix2pix": result_is_instruct_pix2pix_model } - metadata["sd_merge_recipe"] = json.dumps(merge_recipe) sd_merge_models = {} @@ -281,11 +307,12 @@ def run_modelmerger(id_task, primary_model_name, secondary_model_name, tertiary_ if tertiary_model_info: add_model_metadata(tertiary_model_info) + metadata["sd_merge_recipe"] = json.dumps(merge_recipe) metadata["sd_merge_models"] = json.dumps(sd_merge_models) _, extension = os.path.splitext(output_modelname) if extension.lower() == ".safetensors": - safetensors.torch.save_file(theta_0, output_modelname, metadata=metadata) + safetensors.torch.save_file(theta_0, output_modelname, metadata=metadata if len(metadata)>0 else None) else: torch.save(theta_0, output_modelname) diff --git a/modules/sd_models.py b/modules/sd_models.py index 1af7fd78..8f72f21d 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -85,7 +85,7 @@ class CheckpointInfo: if self.shorthash not in self.ids: self.ids += [self.shorthash, self.sha256, f'{self.name} [{self.shorthash}]'] - checkpoints_list.pop(self.title) + checkpoints_list.pop(self.title, None) self.title = f'{self.name} [{self.shorthash}]' self.register() diff --git a/modules/ui_checkpoint_merger.py b/modules/ui_checkpoint_merger.py index 8e72258a..4863d861 100644 --- a/modules/ui_checkpoint_merger.py +++ b/modules/ui_checkpoint_merger.py @@ -51,7 +51,6 @@ class UiCheckpointMerger: with FormRow(): self.checkpoint_format = gr.Radio(choices=["ckpt", "safetensors"], value="safetensors", label="Checkpoint format", elem_id="modelmerger_checkpoint_format") self.save_as_half = gr.Checkbox(value=False, label="Save as float16", elem_id="modelmerger_save_as_half") - self.save_metadata = gr.Checkbox(value=True, label="Save metadata (.safetensors only)", elem_id="modelmerger_save_metadata") with FormRow(): with gr.Column(): @@ -65,16 +64,30 @@ class UiCheckpointMerger: with FormRow(): self.discard_weights = gr.Textbox(value="", label="Discard weights with matching name", elem_id="modelmerger_discard_weights") - with gr.Row(): + with gr.Accordion("Metadata", open=False) as metadata_editor: + with FormRow(): + self.save_metadata = gr.Checkbox(value=True, label="Save metadata", elem_id="modelmerger_save_metadata") + self.add_merge_recipe = gr.Checkbox(value=True, label="Add merge recipe metadata", elem_id="modelmerger_add_recipe") + self.copy_metadata_fields = gr.Checkbox(value=True, label="Copy metadata from merged models", elem_id="modelmerger_copy_metadata") + + self.metadata_json = gr.TextArea('{}', label="Metadata in JSON format") + self.read_metadata = gr.Button("Read metadata from selected checkpoints") + + with FormRow(): self.modelmerger_merge = gr.Button(elem_id="modelmerger_merge", value="Merge", variant='primary') with gr.Column(variant='compact', elem_id="modelmerger_results_container"): with gr.Group(elem_id="modelmerger_results_panel"): self.modelmerger_result = gr.HTML(elem_id="modelmerger_result", show_label=False) + self.metadata_editor = metadata_editor self.blocks = modelmerger_interface def setup_ui(self, dummy_component, sd_model_checkpoint_component): + self.checkpoint_format.change(lambda fmt: gr.update(visible=fmt == 'safetensors'), inputs=[self.checkpoint_format], outputs=[self.metadata_editor], show_progress=False) + + self.read_metadata.click(extras.read_metadata, inputs=[self.primary_model_name, self.secondary_model_name, self.tertiary_model_name], outputs=[self.metadata_json]) + self.modelmerger_merge.click(fn=lambda: '', inputs=[], outputs=[self.modelmerger_result]) self.modelmerger_merge.click( fn=call_queue.wrap_gradio_gpu_call(modelmerger, extra_outputs=lambda: [gr.update() for _ in range(4)]), @@ -93,6 +106,9 @@ class UiCheckpointMerger: self.bake_in_vae, self.discard_weights, self.save_metadata, + self.add_merge_recipe, + self.copy_metadata_fields, + self.metadata_json, ], outputs=[ self.primary_model_name, -- cgit v1.2.1 From 401ba1b879ecb36b556d677ba66881ee9cbe7fbf Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Tue, 1 Aug 2023 09:22:53 +0300 Subject: XYZ plot do not fail if an exception occurs --- scripts/xyz_grid.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/scripts/xyz_grid.py b/scripts/xyz_grid.py index d3c78a2f..103bf104 100644 --- a/scripts/xyz_grid.py +++ b/scripts/xyz_grid.py @@ -11,7 +11,7 @@ import numpy as np import modules.scripts as scripts import gradio as gr -from modules import images, sd_samplers, processing, sd_models, sd_vae, sd_samplers_kdiffusion +from modules import images, sd_samplers, processing, sd_models, sd_vae, sd_samplers_kdiffusion, errors from modules.processing import process_images, Processed, StableDiffusionProcessingTxt2Img from modules.shared import opts, state import modules.shared as shared @@ -651,7 +651,12 @@ class Script(scripts.Script): y_opt.apply(pc, y, ys) z_opt.apply(pc, z, zs) - res = process_images(pc) + try: + res = process_images(pc) + except Exception as e: + errors.display(e, "generating image for xyz plot") + + res = Processed(p, [], p.seed, "") # Sets subgrid infotexts subgrid_index = 1 + iz -- cgit v1.2.1 From 6a0d498c8ec5287a75e2a3bc8a4fa79e82e64c18 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Tue, 1 Aug 2023 12:50:23 +0300 Subject: support tooltip kwarg for gradio elements --- javascript/hints.js | 11 +++++++++++ modules/scripts.py | 14 ++++++++++++++ 2 files changed, 25 insertions(+) diff --git a/javascript/hints.js b/javascript/hints.js index 4167cb28..6de9372e 100644 --- a/javascript/hints.js +++ b/javascript/hints.js @@ -190,3 +190,14 @@ onUiUpdate(function(mutationRecords) { tooltipCheckTimer = setTimeout(processTooltipCheckNodes, 1000); } }); + +onUiLoaded(function() { + for (var comp of window.gradio_config.components) { + if (comp.props.webui_tooltip && comp.props.elem_id) { + var elem = gradioApp().getElementById(comp.props.elem_id); + if (elem) { + elem.title = comp.props.webui_tooltip; + } + } + } +}); diff --git a/modules/scripts.py b/modules/scripts.py index 5b4edcac..edf7347e 100644 --- a/modules/scripts.py +++ b/modules/scripts.py @@ -646,6 +646,8 @@ def add_classes_to_gradio_component(comp): def IOComponent_init(self, *args, **kwargs): + self.webui_tooltip = kwargs.pop('tooltip', None) + if scripts_current is not None: scripts_current.before_component(self, **kwargs) @@ -663,8 +665,20 @@ def IOComponent_init(self, *args, **kwargs): return res +def Block_get_config(self): + config = original_Block_get_config(self) + + webui_tooltip = getattr(self, 'webui_tooltip', None) + if webui_tooltip: + config["webui_tooltip"] = webui_tooltip + + return config + + original_IOComponent_init = gr.components.IOComponent.__init__ +original_Block_get_config = gr.components.Block.get_config gr.components.IOComponent.__init__ = IOComponent_init +gr.components.Block.get_config = Block_get_config def BlockContext_init(self, *args, **kwargs): -- cgit v1.2.1 From 390bffa81b747a7eb38ac7a0cd6dfb9fcc388151 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Tue, 1 Aug 2023 17:13:15 +0300 Subject: repair merge error --- modules/sd_models.py | 1 - 1 file changed, 1 deletion(-) diff --git a/modules/sd_models.py b/modules/sd_models.py index 40a450df..3c451a4b 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -15,7 +15,6 @@ import ldm.modules.midas as midas from ldm.util import instantiate_from_config from modules import paths, shared, modelloader, devices, script_callbacks, sd_vae, sd_disable_initialization, errors, hashes, sd_models_config, sd_unet, sd_models_xl, cache -from modules.sd_hijack_inpainting import do_inpainting_hijack from modules.timer import Timer import tomesd -- cgit v1.2.1 From 10ff071e3368a028ddb6d68d3c3bfd028094aabb Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Wed, 2 Aug 2023 18:37:16 +0300 Subject: update doggettx cross attention optimization to not use an unreasonable amount of memory in some edge cases -- suggestion by MorkTheOrk --- modules/sd_hijack_optimizations.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/sd_hijack_optimizations.py b/modules/sd_hijack_optimizations.py index b5f85ba5..0e810eec 100644 --- a/modules/sd_hijack_optimizations.py +++ b/modules/sd_hijack_optimizations.py @@ -256,9 +256,9 @@ def split_cross_attention_forward(self, x, context=None, mask=None, **kwargs): raise RuntimeError(f'Not enough memory, use lower resolution (max approx. {max_res}x{max_res}). ' f'Need: {mem_required / 64 / gb:0.1f}GB free, Have:{mem_free_total / gb:0.1f}GB free') - slice_size = q.shape[1] // steps if (q.shape[1] % steps) == 0 else q.shape[1] + slice_size = q.shape[1] // steps for i in range(0, q.shape[1], slice_size): - end = i + slice_size + end = min(i + slice_size, q.shape[1]) s1 = einsum('b i d, b j d -> b i j', q[:, i:end], k) s2 = s1.softmax(dim=-1, dtype=q.dtype) -- cgit v1.2.1 From ccb92339348f6973de39cde062982a51a4cd0818 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Wed, 2 Aug 2023 18:53:09 +0300 Subject: add yet another torch_gc to reclaim some of VRAM after the initial stage of img2img --- modules/processing.py | 1 + 1 file changed, 1 insertion(+) diff --git a/modules/processing.py b/modules/processing.py index b0992ee1..0b66cd2a 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -1348,6 +1348,7 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing): image = image.to(shared.device, dtype=devices.dtype_vae) self.init_latent = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(image)) + devices.torch_gc() if self.resize_mode == 3: self.init_latent = torch.nn.functional.interpolate(self.init_latent, size=(self.height // opt_f, self.width // opt_f), mode="bilinear") -- cgit v1.2.1 From 84b6fcd02ca6d6ab48c4b6be4bb8724b1c2e7014 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Thu, 3 Aug 2023 00:00:23 +0300 Subject: add NV option for Random number generator source setting, which allows to generate same pictures on CPU/AMD/Mac as on NVidia videocards. --- modules/devices.py | 39 ++++++++++++++- modules/processing.py | 6 +-- modules/rng_philox.py | 100 ++++++++++++++++++++++++++++++++++++++ modules/sd_samplers_kdiffusion.py | 5 +- modules/shared.py | 2 +- 5 files changed, 142 insertions(+), 10 deletions(-) create mode 100644 modules/rng_philox.py diff --git a/modules/devices.py b/modules/devices.py index 57e51da3..b58776d8 100644 --- a/modules/devices.py +++ b/modules/devices.py @@ -3,7 +3,7 @@ import contextlib from functools import lru_cache import torch -from modules import errors +from modules import errors, rng_philox if sys.platform == "darwin": from modules import mac_specific @@ -90,23 +90,58 @@ def cond_cast_float(input): return input.float() if unet_needs_upcast else input +nv_rng = None + + def randn(seed, shape): from modules.shared import opts - torch.manual_seed(seed) + manual_seed(seed) + + if opts.randn_source == "NV": + return torch.asarray(nv_rng.randn(shape), device=device) + if opts.randn_source == "CPU" or device.type == 'mps': return torch.randn(shape, device=cpu).to(device) + return torch.randn(shape, device=device) +def randn_like(x): + from modules.shared import opts + + if opts.randn_source == "NV": + return torch.asarray(nv_rng.randn(x.shape), device=x.device, dtype=x.dtype) + + if opts.randn_source == "CPU" or x.device.type == 'mps': + return torch.randn_like(x, device=cpu).to(x.device) + + return torch.randn_like(x) + + def randn_without_seed(shape): from modules.shared import opts + if opts.randn_source == "NV": + return torch.asarray(nv_rng.randn(shape), device=device) + if opts.randn_source == "CPU" or device.type == 'mps': return torch.randn(shape, device=cpu).to(device) + return torch.randn(shape, device=device) +def manual_seed(seed): + from modules.shared import opts + + if opts.randn_source == "NV": + global nv_rng + nv_rng = rng_philox.Generator(seed) + return + + torch.manual_seed(seed) + + def autocast(disable=False): from modules import shared diff --git a/modules/processing.py b/modules/processing.py index 0b66cd2a..8f34c8b4 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -492,7 +492,7 @@ def create_random_tensors(shape, seeds, subseeds=None, subseed_strength=0.0, see noise_shape = shape if seed_resize_from_h <= 0 or seed_resize_from_w <= 0 else (shape[0], seed_resize_from_h//8, seed_resize_from_w//8) subnoise = None - if subseeds is not None: + if subseeds is not None and subseed_strength != 0: subseed = 0 if i >= len(subseeds) else subseeds[i] subnoise = devices.randn(subseed, noise_shape) @@ -524,7 +524,7 @@ def create_random_tensors(shape, seeds, subseeds=None, subseed_strength=0.0, see cnt = p.sampler.number_of_needed_noises(p) if eta_noise_seed_delta > 0: - torch.manual_seed(seed + eta_noise_seed_delta) + devices.manual_seed(seed + eta_noise_seed_delta) for j in range(cnt): sampler_noises[j].append(devices.randn_without_seed(tuple(noise_shape))) @@ -636,7 +636,7 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter "Token merging ratio": None if token_merging_ratio == 0 else token_merging_ratio, "Token merging ratio hr": None if not enable_hr or token_merging_ratio_hr == 0 else token_merging_ratio_hr, "Init image hash": getattr(p, 'init_img_hash', None), - "RNG": opts.randn_source if opts.randn_source != "GPU" else None, + "RNG": opts.randn_source if opts.randn_source != "GPU" and opts.randn_source != "NV" else None, "NGMS": None if p.s_min_uncond == 0 else p.s_min_uncond, **p.extra_generation_params, "Version": program_version() if opts.add_version_to_infotext else None, diff --git a/modules/rng_philox.py b/modules/rng_philox.py new file mode 100644 index 00000000..b5c02483 --- /dev/null +++ b/modules/rng_philox.py @@ -0,0 +1,100 @@ +"""RNG imitiating torch cuda randn on CPU. You are welcome. + +Usage: + +``` +g = Generator(seed=0) +print(g.randn(shape=(3, 4))) +``` + +Expected output: +``` +[[-0.92466259 -0.42534415 -2.6438457 0.14518388] + [-0.12086647 -0.57972564 -0.62285122 -0.32838709] + [-1.07454231 -0.36314407 -1.67105067 2.26550497]] +``` +""" + +import numpy as np + +philox_m = [0xD2511F53, 0xCD9E8D57] +philox_w = [0x9E3779B9, 0xBB67AE85] + +two_pow32_inv = np.array([2.3283064e-10], dtype=np.float32) +two_pow32_inv_2pi = np.array([2.3283064e-10 * 6.2831855], dtype=np.float32) + + +def uint32(x): + """Converts (N,) np.uint64 array into (2, N) np.unit32 array.""" + return np.moveaxis(x.view(np.uint32).reshape(-1, 2), 0, 1) + + +def philox4_round(counter, key): + """A single round of the Philox 4x32 random number generator.""" + + v1 = uint32(counter[0].astype(np.uint64) * philox_m[0]) + v2 = uint32(counter[2].astype(np.uint64) * philox_m[1]) + + counter[0] = v2[1] ^ counter[1] ^ key[0] + counter[1] = v2[0] + counter[2] = v1[1] ^ counter[3] ^ key[1] + counter[3] = v1[0] + + +def philox4_32(counter, key, rounds=10): + """Generates 32-bit random numbers using the Philox 4x32 random number generator. + + Parameters: + counter (numpy.ndarray): A 4xN array of 32-bit integers representing the counter values (offset into generation). + key (numpy.ndarray): A 2xN array of 32-bit integers representing the key values (seed). + rounds (int): The number of rounds to perform. + + Returns: + numpy.ndarray: A 4xN array of 32-bit integers containing the generated random numbers. + """ + + for _ in range(rounds - 1): + philox4_round(counter, key) + + key[0] = key[0] + philox_w[0] + key[1] = key[1] + philox_w[1] + + philox4_round(counter, key) + return counter + + +def box_muller(x, y): + """Returns just the first out of two numbers generated by Box–Muller transform algorithm.""" + u = x.astype(np.float32) * two_pow32_inv + two_pow32_inv / 2 + v = y.astype(np.float32) * two_pow32_inv_2pi + two_pow32_inv_2pi / 2 + + s = np.sqrt(-2.0 * np.log(u)) + + r1 = s * np.sin(v) + return r1.astype(np.float32) + + +class Generator: + """RNG that produces same outputs as torch.randn(..., device='cuda') on CPU""" + + def __init__(self, seed): + self.seed = seed + self.offset = 0 + + def randn(self, shape): + """Generate a sequence of n standard normal random variables using the Philox 4x32 random number generator and the Box-Muller transform.""" + + n = 1 + for x in shape: + n *= x + + counter = np.zeros((4, n), dtype=np.uint32) + counter[0] = self.offset + counter[2] = np.arange(n, dtype=np.uint32) # up to 2^32 numbers can be generated - if you want more you'd need to spill into counter[3] + self.offset += 1 + + key = uint32(np.array([[self.seed] * n], dtype=np.uint64)) + + g = philox4_32(counter, key) + + return box_muller(g[0], g[1]).reshape(shape) # discard g[2] and g[3] diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py index e0da3425..d72c1b5f 100644 --- a/modules/sd_samplers_kdiffusion.py +++ b/modules/sd_samplers_kdiffusion.py @@ -260,10 +260,7 @@ class TorchHijack: if noise.shape == x.shape: return noise - if opts.randn_source == "CPU" or x.device.type == 'mps': - return torch.randn_like(x, device=devices.cpu).to(x.device) - else: - return torch.randn_like(x) + return devices.randn_like(x) class KDiffusionSampler: diff --git a/modules/shared.py b/modules/shared.py index aa72c9c8..7103b4ca 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -428,7 +428,7 @@ options_templates.update(options_section(('sd', "Stable Diffusion"), { "CLIP_stop_at_last_layers": OptionInfo(1, "Clip skip", gr.Slider, {"minimum": 1, "maximum": 12, "step": 1}).link("wiki", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Features#clip-skip").info("ignore last layers of CLIP network; 1 ignores none, 2 ignores one layer"), "upcast_attn": OptionInfo(False, "Upcast cross attention layer to float32"), "auto_vae_precision": OptionInfo(True, "Automaticlly revert VAE to 32-bit floats").info("triggers when a tensor with NaNs is produced in VAE; disabling the option in this case will result in a black square image"), - "randn_source": OptionInfo("GPU", "Random number generator source.", gr.Radio, {"choices": ["GPU", "CPU"]}).info("changes seeds drastically; use CPU to produce the same picture across different videocard vendors"), + "randn_source": OptionInfo("GPU", "Random number generator source.", gr.Radio, {"choices": ["GPU", "CPU", "NV"]}).info("changes seeds drastically; use CPU to produce the same picture across different videocard vendors; use NV to produce same picture as on NVidia videocards"), })) options_templates.update(options_section(('sdxl', "Stable Diffusion XL"), { -- cgit v1.2.1 From a1825ee741bb21b35561d58db8cb316d7f5d0c79 Mon Sep 17 00:00:00 2001 From: Splendide Imaginarius <119545140+Splendide-Imaginarius@users.noreply.github.com> Date: Thu, 3 Aug 2023 02:03:35 +0000 Subject: Make StableDiffusionProcessingImg2Img.mask_blur a property MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fixes breakage when mask_blur is set after construction. See https://github.com/Coyote-A/ultimate-upscale-for-automatic1111/issues/111#issuecomment-1652091424 Thanks to Алексей Трофимов and eunnone for reporting the issue. --- modules/processing.py | 21 ++++++++++++++++++--- 1 file changed, 18 insertions(+), 3 deletions(-) mode change 100644 => 100755 modules/processing.py diff --git a/modules/processing.py b/modules/processing.py old mode 100644 new mode 100755 index b0992ee1..44d20fb7 --- a/modules/processing.py +++ b/modules/processing.py @@ -1232,11 +1232,10 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing): self.image_mask = mask self.latent_mask = None self.mask_for_overlay = None - if mask_blur is not None: - mask_blur_x = mask_blur - mask_blur_y = mask_blur self.mask_blur_x = mask_blur_x self.mask_blur_y = mask_blur_y + if mask_blur is not None: + self.mask_blur = mask_blur self.inpainting_fill = inpainting_fill self.inpaint_full_res = inpaint_full_res self.inpaint_full_res_padding = inpaint_full_res_padding @@ -1246,6 +1245,22 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing): self.nmask = None self.image_conditioning = None + @property + def mask_blur(self): + if self.mask_blur_x == self.mask_blur_y: + return self.mask_blur_x + return None + + @mask_blur.setter + def mask_blur(self, value): + self.mask_blur_x = value + self.mask_blur_y = value + + @mask_blur.deleter + def mask_blur(self): + del self.mask_blur_x + del self.mask_blur_y + def init(self, all_prompts, all_seeds, all_subseeds): self.sampler = sd_samplers.create_sampler(self.sampler_name, self.sd_model) crop_region = None -- cgit v1.2.1 From fca42949a3593c5a2f646e30cc99be2c02566aa2 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Thu, 3 Aug 2023 07:18:55 +0300 Subject: rework torchsde._brownian.brownian_interval replacement to use device.randn_local and respect the NV setting. --- modules/devices.py | 44 +++++++++++++++++++++++++++++++++++++------ modules/sd_samplers_common.py | 12 ++++++------ 2 files changed, 44 insertions(+), 12 deletions(-) diff --git a/modules/devices.py b/modules/devices.py index b58776d8..00a00b18 100644 --- a/modules/devices.py +++ b/modules/devices.py @@ -71,14 +71,17 @@ def enable_tf32(): torch.backends.cudnn.allow_tf32 = True - errors.run(enable_tf32, "Enabling TF32") -cpu = torch.device("cpu") -device = device_interrogate = device_gfpgan = device_esrgan = device_codeformer = None -dtype = torch.float16 -dtype_vae = torch.float16 -dtype_unet = torch.float16 +cpu: torch.device = torch.device("cpu") +device: torch.device = None +device_interrogate: torch.device = None +device_gfpgan: torch.device = None +device_esrgan: torch.device = None +device_codeformer: torch.device = None +dtype: torch.dtype = torch.float16 +dtype_vae: torch.dtype = torch.float16 +dtype_unet: torch.dtype = torch.float16 unet_needs_upcast = False @@ -94,6 +97,10 @@ nv_rng = None def randn(seed, shape): + """Generate a tensor with random numbers from a normal distribution using seed. + + Uses the seed parameter to set the global torch seed; to generate more with that seed, use randn_like/randn_without_seed.""" + from modules.shared import opts manual_seed(seed) @@ -107,7 +114,27 @@ def randn(seed, shape): return torch.randn(shape, device=device) +def randn_local(seed, shape): + """Generate a tensor with random numbers from a normal distribution using seed. + + Does not change the global random number generator. You can only generate the seed's first tensor using this function.""" + + from modules.shared import opts + + if opts.randn_source == "NV": + rng = rng_philox.Generator(seed) + return torch.asarray(rng.randn(shape), device=device) + + local_device = cpu if opts.randn_source == "CPU" or device.type == 'mps' else device + local_generator = torch.Generator(local_device).manual_seed(int(seed)) + return torch.randn(shape, device=local_device, generator=local_generator).to(device) + + def randn_like(x): + """Generate a tensor with random numbers from a normal distribution using the previously initialized genrator. + + Use either randn() or manual_seed() to initialize the generator.""" + from modules.shared import opts if opts.randn_source == "NV": @@ -120,6 +147,10 @@ def randn_like(x): def randn_without_seed(shape): + """Generate a tensor with random numbers from a normal distribution using the previously initialized genrator. + + Use either randn() or manual_seed() to initialize the generator.""" + from modules.shared import opts if opts.randn_source == "NV": @@ -132,6 +163,7 @@ def randn_without_seed(shape): def manual_seed(seed): + """Set up a global random number generator using the specified seed.""" from modules.shared import opts if opts.randn_source == "NV": diff --git a/modules/sd_samplers_common.py b/modules/sd_samplers_common.py index 763829f1..5deda761 100644 --- a/modules/sd_samplers_common.py +++ b/modules/sd_samplers_common.py @@ -2,10 +2,8 @@ from collections import namedtuple import numpy as np import torch from PIL import Image -from modules import devices, processing, images, sd_vae_approx, sd_samplers, sd_vae_taesd - +from modules import devices, processing, images, sd_vae_approx, sd_samplers, sd_vae_taesd, shared from modules.shared import opts, state -import modules.shared as shared SamplerData = namedtuple('SamplerData', ['name', 'constructor', 'aliases', 'options']) @@ -85,11 +83,13 @@ class InterruptedException(BaseException): pass -if opts.randn_source == "CPU": +def replace_torchsde_browinan(): import torchsde._brownian.brownian_interval def torchsde_randn(size, dtype, device, seed): - generator = torch.Generator(devices.cpu).manual_seed(int(seed)) - return torch.randn(size, dtype=dtype, device=devices.cpu, generator=generator).to(device) + return devices.randn_local(seed, size).to(device=device, dtype=dtype) torchsde._brownian.brownian_interval._randn = torchsde_randn + + +replace_torchsde_browinan() -- cgit v1.2.1 From 0904df84e296c10fe0ccb37a3b689e6b96d87e41 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Thu, 3 Aug 2023 07:53:03 +0300 Subject: minor performance improvements for philox --- modules/rng_philox.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/modules/rng_philox.py b/modules/rng_philox.py index b5c02483..5532cf9d 100644 --- a/modules/rng_philox.py +++ b/modules/rng_philox.py @@ -26,7 +26,7 @@ two_pow32_inv_2pi = np.array([2.3283064e-10 * 6.2831855], dtype=np.float32) def uint32(x): """Converts (N,) np.uint64 array into (2, N) np.unit32 array.""" - return np.moveaxis(x.view(np.uint32).reshape(-1, 2), 0, 1) + return x.view(np.uint32).reshape(-1, 2).transpose(1, 0) def philox4_round(counter, key): @@ -65,8 +65,8 @@ def philox4_32(counter, key, rounds=10): def box_muller(x, y): """Returns just the first out of two numbers generated by Box–Muller transform algorithm.""" - u = x.astype(np.float32) * two_pow32_inv + two_pow32_inv / 2 - v = y.astype(np.float32) * two_pow32_inv_2pi + two_pow32_inv_2pi / 2 + u = x * two_pow32_inv + two_pow32_inv / 2 + v = y * two_pow32_inv_2pi + two_pow32_inv_2pi / 2 s = np.sqrt(-2.0 * np.log(u)) @@ -93,7 +93,9 @@ class Generator: counter[2] = np.arange(n, dtype=np.uint32) # up to 2^32 numbers can be generated - if you want more you'd need to spill into counter[3] self.offset += 1 - key = uint32(np.array([[self.seed] * n], dtype=np.uint64)) + key = np.empty(n, dtype=np.uint64) + key.fill(self.seed) + key = uint32(key) g = philox4_32(counter, key) -- cgit v1.2.1 From f56a309432996936a41040ea723d76a8cb488f87 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Thu, 3 Aug 2023 10:25:49 +0900 Subject: fix missing TI hash --- modules/sd_hijack_clip.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/modules/sd_hijack_clip.py b/modules/sd_hijack_clip.py index 2f9d569b..8f29057a 100644 --- a/modules/sd_hijack_clip.py +++ b/modules/sd_hijack_clip.py @@ -245,6 +245,8 @@ class FrozenCLIPEmbedderWithCustomWordsBase(torch.nn.Module): hashes.append(f"{name}: {shorthash}") if hashes: + if self.hijack.extra_generation_params.get("TI hashes"): + hashes.append(self.hijack.extra_generation_params.get("TI hashes")) self.hijack.extra_generation_params["TI hashes"] = ", ".join(hashes) if getattr(self.wrapped, 'return_pooled', False): -- cgit v1.2.1 From 20549a50cb3c41868ce561c6658bfaa0d20ac7ba Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Thu, 3 Aug 2023 22:46:57 +0300 Subject: add style editor dialog rework toprow for img2img and txt2img to use a class with fields fix the console error when editing checkpoint user metadata --- modules/sd_models.py | 2 +- modules/styles.py | 5 +- modules/ui.py | 230 ++++++++++--------------- modules/ui_common.py | 32 +++- modules/ui_extra_networks_checkpoints.py | 2 +- modules/ui_extra_networks_hypernets.py | 2 +- modules/ui_extra_networks_textual_inversion.py | 2 +- modules/ui_prompt_styles.py | 110 ++++++++++++ style.css | 13 ++ 9 files changed, 248 insertions(+), 150 deletions(-) create mode 100644 modules/ui_prompt_styles.py diff --git a/modules/sd_models.py b/modules/sd_models.py index 8f72f21d..1d93d893 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -68,7 +68,7 @@ class CheckpointInfo: self.title = name if self.shorthash is None else f'{name} [{self.shorthash}]' - self.ids = [self.hash, self.model_name, self.title, name, f'{name} [{self.hash}]'] + ([self.shorthash, self.sha256, f'{self.name} [{self.shorthash}]'] if self.shorthash else []) + self.ids = [self.hash, self.model_name, self.title, name, self.name_for_extra, f'{name} [{self.hash}]'] + ([self.shorthash, self.sha256, f'{self.name} [{self.shorthash}]'] if self.shorthash else []) def register(self): checkpoints_list[self.title] = self diff --git a/modules/styles.py b/modules/styles.py index ec0e1bc5..0740fe1b 100644 --- a/modules/styles.py +++ b/modules/styles.py @@ -106,10 +106,7 @@ class StyleDatabase: if os.path.exists(path): shutil.copy(path, f"{path}.bak") - fd = os.open(path, os.O_RDWR | os.O_CREAT) - with os.fdopen(fd, "w", encoding="utf-8-sig", newline='') as file: - # _fields is actually part of the public API: typing.NamedTuple is a replacement for collections.NamedTuple, - # and collections.NamedTuple has explicit documentation for accessing _fields. Same goes for _asdict() + with open(path, "w", encoding="utf-8-sig", newline='') as file: writer = csv.DictWriter(file, fieldnames=PromptStyle._fields) writer.writeheader() writer.writerows(style._asdict() for k, style in self.styles.items()) diff --git a/modules/ui.py b/modules/ui.py index ac2787eb..c059dcec 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -12,7 +12,7 @@ import numpy as np from PIL import Image, PngImagePlugin # noqa: F401 from modules.call_queue import wrap_gradio_gpu_call, wrap_queued_call, wrap_gradio_call -from modules import sd_hijack, sd_models, script_callbacks, ui_extensions, deepbooru, extra_networks, ui_common, ui_postprocessing, progress, ui_loadsave, errors, shared_items, ui_settings, timer, sysinfo, ui_checkpoint_merger +from modules import sd_hijack, sd_models, script_callbacks, ui_extensions, deepbooru, extra_networks, ui_common, ui_postprocessing, progress, ui_loadsave, errors, shared_items, ui_settings, timer, sysinfo, ui_checkpoint_merger, ui_prompt_styles from modules.ui_components import FormRow, FormGroup, ToolButton, FormHTML from modules.paths import script_path from modules.ui_common import create_refresh_button @@ -92,19 +92,6 @@ def send_gradio_gallery_to_image(x): return image_from_url_text(x[0]) -def add_style(name: str, prompt: str, negative_prompt: str): - if name is None: - return [gr_show() for x in range(4)] - - style = modules.styles.PromptStyle(name, prompt, negative_prompt) - shared.prompt_styles.styles[style.name] = style - # Save all loaded prompt styles: this allows us to update the storage format in the future more easily, because we - # reserialize all styles every time we save them - shared.prompt_styles.save_styles(shared.styles_filename) - - return [gr.Dropdown.update(visible=True, choices=list(shared.prompt_styles.styles)) for _ in range(2)] - - def calc_resolution_hires(enable, width, height, hr_scale, hr_resize_x, hr_resize_y): from modules import processing, devices @@ -129,13 +116,6 @@ def resize_from_to_html(width, height, scale_by): return f"resize: from {width}x{height} to {target_width}x{target_height}" -def apply_styles(prompt, prompt_neg, styles): - prompt = shared.prompt_styles.apply_styles_to_prompt(prompt, styles) - prompt_neg = shared.prompt_styles.apply_negative_styles_to_prompt(prompt_neg, styles) - - return [gr.Textbox.update(value=prompt), gr.Textbox.update(value=prompt_neg), gr.Dropdown.update(value=[])] - - def process_interrogate(interrogation_function, mode, ii_input_dir, ii_output_dir, *ii_singles): if mode in {0, 1, 3, 4}: return [interrogation_function(ii_singles[mode]), None] @@ -267,71 +247,67 @@ def update_token_counter(text, steps): return f"{token_count}/{max_length}" -def create_toprow(is_img2img): - id_part = "img2img" if is_img2img else "txt2img" +class Toprow: + def __init__(self, is_img2img): + id_part = "img2img" if is_img2img else "txt2img" + self.id_part = id_part - with gr.Row(elem_id=f"{id_part}_toprow", variant="compact"): - with gr.Column(elem_id=f"{id_part}_prompt_container", scale=6): - with gr.Row(): - with gr.Column(scale=80): - with gr.Row(): - prompt = gr.Textbox(label="Prompt", elem_id=f"{id_part}_prompt", show_label=False, lines=3, placeholder="Prompt (press Ctrl+Enter or Alt+Enter to generate)", elem_classes=["prompt"]) - - with gr.Row(): - with gr.Column(scale=80): - with gr.Row(): - negative_prompt = gr.Textbox(label="Negative prompt", elem_id=f"{id_part}_neg_prompt", show_label=False, lines=3, placeholder="Negative prompt (press Ctrl+Enter or Alt+Enter to generate)", elem_classes=["prompt"]) - - button_interrogate = None - button_deepbooru = None - if is_img2img: - with gr.Column(scale=1, elem_classes="interrogate-col"): - button_interrogate = gr.Button('Interrogate\nCLIP', elem_id="interrogate") - button_deepbooru = gr.Button('Interrogate\nDeepBooru', elem_id="deepbooru") - - with gr.Column(scale=1, elem_id=f"{id_part}_actions_column"): - with gr.Row(elem_id=f"{id_part}_generate_box", elem_classes="generate-box"): - interrupt = gr.Button('Interrupt', elem_id=f"{id_part}_interrupt", elem_classes="generate-box-interrupt") - skip = gr.Button('Skip', elem_id=f"{id_part}_skip", elem_classes="generate-box-skip") - submit = gr.Button('Generate', elem_id=f"{id_part}_generate", variant='primary') - - skip.click( - fn=lambda: shared.state.skip(), - inputs=[], - outputs=[], - ) + with gr.Row(elem_id=f"{id_part}_toprow", variant="compact"): + with gr.Column(elem_id=f"{id_part}_prompt_container", scale=6): + with gr.Row(): + with gr.Column(scale=80): + with gr.Row(): + self.prompt = gr.Textbox(label="Prompt", elem_id=f"{id_part}_prompt", show_label=False, lines=3, placeholder="Prompt (press Ctrl+Enter or Alt+Enter to generate)", elem_classes=["prompt"]) - interrupt.click( - fn=lambda: shared.state.interrupt(), - inputs=[], - outputs=[], - ) + with gr.Row(): + with gr.Column(scale=80): + with gr.Row(): + self.negative_prompt = gr.Textbox(label="Negative prompt", elem_id=f"{id_part}_neg_prompt", show_label=False, lines=3, placeholder="Negative prompt (press Ctrl+Enter or Alt+Enter to generate)", elem_classes=["prompt"]) + + self.button_interrogate = None + self.button_deepbooru = None + if is_img2img: + with gr.Column(scale=1, elem_classes="interrogate-col"): + self.button_interrogate = gr.Button('Interrogate\nCLIP', elem_id="interrogate") + self.button_deepbooru = gr.Button('Interrogate\nDeepBooru', elem_id="deepbooru") + + with gr.Column(scale=1, elem_id=f"{id_part}_actions_column"): + with gr.Row(elem_id=f"{id_part}_generate_box", elem_classes="generate-box"): + self.interrupt = gr.Button('Interrupt', elem_id=f"{id_part}_interrupt", elem_classes="generate-box-interrupt") + self.skip = gr.Button('Skip', elem_id=f"{id_part}_skip", elem_classes="generate-box-skip") + self.submit = gr.Button('Generate', elem_id=f"{id_part}_generate", variant='primary') + + self.skip.click( + fn=lambda: shared.state.skip(), + inputs=[], + outputs=[], + ) - with gr.Row(elem_id=f"{id_part}_tools"): - paste = ToolButton(value=paste_symbol, elem_id="paste") - clear_prompt_button = ToolButton(value=clear_prompt_symbol, elem_id=f"{id_part}_clear_prompt") - extra_networks_button = ToolButton(value=extra_networks_symbol, elem_id=f"{id_part}_extra_networks") - prompt_style_apply = ToolButton(value=apply_style_symbol, elem_id=f"{id_part}_style_apply") - save_style = ToolButton(value=save_style_symbol, elem_id=f"{id_part}_style_create") - restore_progress_button = ToolButton(value=restore_progress_symbol, elem_id=f"{id_part}_restore_progress", visible=False) - - token_counter = gr.HTML(value="0/75", elem_id=f"{id_part}_token_counter", elem_classes=["token-counter"]) - token_button = gr.Button(visible=False, elem_id=f"{id_part}_token_button") - negative_token_counter = gr.HTML(value="0/75", elem_id=f"{id_part}_negative_token_counter", elem_classes=["token-counter"]) - negative_token_button = gr.Button(visible=False, elem_id=f"{id_part}_negative_token_button") - - clear_prompt_button.click( - fn=lambda *x: x, - _js="confirm_clear_prompt", - inputs=[prompt, negative_prompt], - outputs=[prompt, negative_prompt], - ) + self.interrupt.click( + fn=lambda: shared.state.interrupt(), + inputs=[], + outputs=[], + ) - with gr.Row(elem_id=f"{id_part}_styles_row"): - prompt_styles = gr.Dropdown(label="Styles", elem_id=f"{id_part}_styles", choices=[k for k, v in shared.prompt_styles.styles.items()], value=[], multiselect=True) - create_refresh_button(prompt_styles, shared.prompt_styles.reload, lambda: {"choices": [k for k, v in shared.prompt_styles.styles.items()]}, f"refresh_{id_part}_styles") + with gr.Row(elem_id=f"{id_part}_tools"): + self.paste = ToolButton(value=paste_symbol, elem_id="paste") + self.clear_prompt_button = ToolButton(value=clear_prompt_symbol, elem_id=f"{id_part}_clear_prompt") + self.extra_networks_button = ToolButton(value=extra_networks_symbol, elem_id=f"{id_part}_extra_networks") + self.restore_progress_button = ToolButton(value=restore_progress_symbol, elem_id=f"{id_part}_restore_progress", visible=False) + + self.token_counter = gr.HTML(value="0/75", elem_id=f"{id_part}_token_counter", elem_classes=["token-counter"]) + self.token_button = gr.Button(visible=False, elem_id=f"{id_part}_token_button") + self.negative_token_counter = gr.HTML(value="0/75", elem_id=f"{id_part}_negative_token_counter", elem_classes=["token-counter"]) + self.negative_token_button = gr.Button(visible=False, elem_id=f"{id_part}_negative_token_button") + + self.clear_prompt_button.click( + fn=lambda *x: x, + _js="confirm_clear_prompt", + inputs=[self.prompt, self.negative_prompt], + outputs=[self.prompt, self.negative_prompt], + ) - return prompt, prompt_styles, negative_prompt, submit, button_interrogate, button_deepbooru, prompt_style_apply, save_style, paste, extra_networks_button, token_counter, token_button, negative_token_counter, negative_token_button, restore_progress_button + self.ui_styles = ui_prompt_styles.UiPromptStyles(id_part, self.prompt, self.negative_prompt) def setup_progressbar(*args, **kwargs): @@ -419,14 +395,14 @@ def create_ui(): modules.scripts.scripts_txt2img.initialize_scripts(is_img2img=False) with gr.Blocks(analytics_enabled=False) as txt2img_interface: - txt2img_prompt, txt2img_prompt_styles, txt2img_negative_prompt, submit, _, _, txt2img_prompt_style_apply, txt2img_save_style, txt2img_paste, extra_networks_button, token_counter, token_button, negative_token_counter, negative_token_button, restore_progress_button = create_toprow(is_img2img=False) + toprow = txt2img_toprow = Toprow(is_img2img=False) dummy_component = gr.Label(visible=False) txt_prompt_img = gr.File(label="", elem_id="txt2img_prompt_image", file_count="single", type="binary", visible=False) with FormRow(variant='compact', elem_id="txt2img_extra_networks", visible=False) as extra_networks: from modules import ui_extra_networks - extra_networks_ui = ui_extra_networks.create_ui(extra_networks, extra_networks_button, 'txt2img') + extra_networks_ui = ui_extra_networks.create_ui(extra_networks, toprow.extra_networks_button, 'txt2img') with gr.Row().style(equal_height=False): with gr.Column(variant='compact', elem_id="txt2img_settings"): @@ -532,9 +508,9 @@ def create_ui(): _js="submit", inputs=[ dummy_component, - txt2img_prompt, - txt2img_negative_prompt, - txt2img_prompt_styles, + toprow.prompt, + toprow.negative_prompt, + toprow.ui_styles.dropdown, steps, sampler_index, restore_faces, @@ -569,12 +545,12 @@ def create_ui(): show_progress=False, ) - txt2img_prompt.submit(**txt2img_args) - submit.click(**txt2img_args) + toprow.prompt.submit(**txt2img_args) + toprow.submit.click(**txt2img_args) res_switch_btn.click(fn=None, _js="function(){switchWidthHeight('txt2img')}", inputs=None, outputs=None, show_progress=False) - restore_progress_button.click( + toprow.restore_progress_button.click( fn=progress.restore_progress, _js="restoreProgressTxt2img", inputs=[dummy_component], @@ -593,7 +569,7 @@ def create_ui(): txt_prompt_img ], outputs=[ - txt2img_prompt, + toprow.prompt, txt_prompt_img ], show_progress=False, @@ -607,8 +583,8 @@ def create_ui(): ) txt2img_paste_fields = [ - (txt2img_prompt, "Prompt"), - (txt2img_negative_prompt, "Negative prompt"), + (toprow.prompt, "Prompt"), + (toprow.negative_prompt, "Negative prompt"), (steps, "Steps"), (sampler_index, "Sampler"), (restore_faces, "Face restoration"), @@ -621,7 +597,7 @@ def create_ui(): (subseed_strength, "Variation seed strength"), (seed_resize_from_w, "Seed resize from-1"), (seed_resize_from_h, "Seed resize from-2"), - (txt2img_prompt_styles, lambda d: d["Styles array"] if isinstance(d.get("Styles array"), list) else gr.update()), + (toprow.ui_styles.dropdown, lambda d: d["Styles array"] if isinstance(d.get("Styles array"), list) else gr.update()), (denoising_strength, "Denoising strength"), (enable_hr, lambda d: "Denoising strength" in d), (hr_options, lambda d: gr.Row.update(visible="Denoising strength" in d)), @@ -639,12 +615,12 @@ def create_ui(): ] parameters_copypaste.add_paste_fields("txt2img", None, txt2img_paste_fields, override_settings) parameters_copypaste.register_paste_params_button(parameters_copypaste.ParamBinding( - paste_button=txt2img_paste, tabname="txt2img", source_text_component=txt2img_prompt, source_image_component=None, + paste_button=toprow.paste, tabname="txt2img", source_text_component=toprow.prompt, source_image_component=None, )) txt2img_preview_params = [ - txt2img_prompt, - txt2img_negative_prompt, + toprow.prompt, + toprow.negative_prompt, steps, sampler_index, cfg_scale, @@ -653,8 +629,8 @@ def create_ui(): height, ] - token_button.click(fn=wrap_queued_call(update_token_counter), inputs=[txt2img_prompt, steps], outputs=[token_counter]) - negative_token_button.click(fn=wrap_queued_call(update_token_counter), inputs=[txt2img_negative_prompt, steps], outputs=[negative_token_counter]) + toprow.token_button.click(fn=wrap_queued_call(update_token_counter), inputs=[toprow.prompt, steps], outputs=[toprow.token_counter]) + toprow.negative_token_button.click(fn=wrap_queued_call(update_token_counter), inputs=[toprow.negative_prompt, steps], outputs=[toprow.negative_token_counter]) ui_extra_networks.setup_ui(extra_networks_ui, txt2img_gallery) @@ -662,13 +638,13 @@ def create_ui(): modules.scripts.scripts_img2img.initialize_scripts(is_img2img=True) with gr.Blocks(analytics_enabled=False) as img2img_interface: - img2img_prompt, img2img_prompt_styles, img2img_negative_prompt, submit, img2img_interrogate, img2img_deepbooru, img2img_prompt_style_apply, img2img_save_style, img2img_paste, extra_networks_button, token_counter, token_button, negative_token_counter, negative_token_button, restore_progress_button = create_toprow(is_img2img=True) + toprow = img2img_toprow = Toprow(is_img2img=True) img2img_prompt_img = gr.File(label="", elem_id="img2img_prompt_image", file_count="single", type="binary", visible=False) with FormRow(variant='compact', elem_id="img2img_extra_networks", visible=False) as extra_networks: from modules import ui_extra_networks - extra_networks_ui_img2img = ui_extra_networks.create_ui(extra_networks, extra_networks_button, 'img2img') + extra_networks_ui_img2img = ui_extra_networks.create_ui(extra_networks, toprow.extra_networks_button, 'img2img') with FormRow().style(equal_height=False): with gr.Column(variant='compact', elem_id="img2img_settings"): @@ -889,7 +865,7 @@ def create_ui(): img2img_prompt_img ], outputs=[ - img2img_prompt, + toprow.prompt, img2img_prompt_img ], show_progress=False, @@ -901,9 +877,9 @@ def create_ui(): inputs=[ dummy_component, dummy_component, - img2img_prompt, - img2img_negative_prompt, - img2img_prompt_styles, + toprow.prompt, + toprow.negative_prompt, + toprow.ui_styles.dropdown, init_img, sketch, init_img_with_mask, @@ -962,11 +938,11 @@ def create_ui(): inpaint_color_sketch, init_img_inpaint, ], - outputs=[img2img_prompt, dummy_component], + outputs=[toprow.prompt, dummy_component], ) - img2img_prompt.submit(**img2img_args) - submit.click(**img2img_args) + toprow.prompt.submit(**img2img_args) + toprow.submit.click(**img2img_args) res_switch_btn.click(fn=None, _js="function(){switchWidthHeight('img2img')}", inputs=None, outputs=None, show_progress=False) @@ -978,7 +954,7 @@ def create_ui(): show_progress=False, ) - restore_progress_button.click( + toprow.restore_progress_button.click( fn=progress.restore_progress, _js="restoreProgressImg2img", inputs=[dummy_component], @@ -991,46 +967,24 @@ def create_ui(): show_progress=False, ) - img2img_interrogate.click( + toprow.button_interrogate.click( fn=lambda *args: process_interrogate(interrogate, *args), **interrogate_args, ) - img2img_deepbooru.click( + toprow.button_deepbooru.click( fn=lambda *args: process_interrogate(interrogate_deepbooru, *args), **interrogate_args, ) - prompts = [(txt2img_prompt, txt2img_negative_prompt), (img2img_prompt, img2img_negative_prompt)] - style_dropdowns = [txt2img_prompt_styles, img2img_prompt_styles] - style_js_funcs = ["update_txt2img_tokens", "update_img2img_tokens"] - - for button, (prompt, negative_prompt) in zip([txt2img_save_style, img2img_save_style], prompts): - button.click( - fn=add_style, - _js="ask_for_style_name", - # Have to pass empty dummy component here, because the JavaScript and Python function have to accept - # the same number of parameters, but we only know the style-name after the JavaScript prompt - inputs=[dummy_component, prompt, negative_prompt], - outputs=[txt2img_prompt_styles, img2img_prompt_styles], - ) - - for button, (prompt, negative_prompt), styles, js_func in zip([txt2img_prompt_style_apply, img2img_prompt_style_apply], prompts, style_dropdowns, style_js_funcs): - button.click( - fn=apply_styles, - _js=js_func, - inputs=[prompt, negative_prompt, styles], - outputs=[prompt, negative_prompt, styles], - ) - - token_button.click(fn=update_token_counter, inputs=[img2img_prompt, steps], outputs=[token_counter]) - negative_token_button.click(fn=wrap_queued_call(update_token_counter), inputs=[img2img_negative_prompt, steps], outputs=[negative_token_counter]) + toprow.token_button.click(fn=update_token_counter, inputs=[toprow.prompt, steps], outputs=[toprow.token_counter]) + toprow.negative_token_button.click(fn=wrap_queued_call(update_token_counter), inputs=[toprow.negative_prompt, steps], outputs=[toprow.negative_token_counter]) ui_extra_networks.setup_ui(extra_networks_ui_img2img, img2img_gallery) img2img_paste_fields = [ - (img2img_prompt, "Prompt"), - (img2img_negative_prompt, "Negative prompt"), + (toprow.prompt, "Prompt"), + (toprow.negative_prompt, "Negative prompt"), (steps, "Steps"), (sampler_index, "Sampler"), (restore_faces, "Face restoration"), @@ -1044,7 +998,7 @@ def create_ui(): (subseed_strength, "Variation seed strength"), (seed_resize_from_w, "Seed resize from-1"), (seed_resize_from_h, "Seed resize from-2"), - (img2img_prompt_styles, lambda d: d["Styles array"] if isinstance(d.get("Styles array"), list) else gr.update()), + (toprow.ui_styles.dropdown, lambda d: d["Styles array"] if isinstance(d.get("Styles array"), list) else gr.update()), (denoising_strength, "Denoising strength"), (mask_blur, "Mask blur"), *modules.scripts.scripts_img2img.infotext_fields @@ -1052,7 +1006,7 @@ def create_ui(): parameters_copypaste.add_paste_fields("img2img", init_img, img2img_paste_fields, override_settings) parameters_copypaste.add_paste_fields("inpaint", init_img_with_mask, img2img_paste_fields, override_settings) parameters_copypaste.register_paste_params_button(parameters_copypaste.ParamBinding( - paste_button=img2img_paste, tabname="img2img", source_text_component=img2img_prompt, source_image_component=None, + paste_button=toprow.paste, tabname="img2img", source_text_component=toprow.prompt, source_image_component=None, )) modules.scripts.scripts_current = None diff --git a/modules/ui_common.py b/modules/ui_common.py index 11eb2a4b..ba75fa73 100644 --- a/modules/ui_common.py +++ b/modules/ui_common.py @@ -223,20 +223,44 @@ Requested path was: {f} def create_refresh_button(refresh_component, refresh_method, refreshed_args, elem_id): + refresh_components = refresh_component if isinstance(refresh_component, list) else [refresh_component] + + label = None + for comp in refresh_components: + label = getattr(comp, 'label', None) + if label is not None: + break + def refresh(): refresh_method() args = refreshed_args() if callable(refreshed_args) else refreshed_args for k, v in args.items(): - setattr(refresh_component, k, v) + for comp in refresh_components: + setattr(comp, k, v) - return gr.update(**(args or {})) + return [gr.update(**(args or {})) for _ in refresh_components] - refresh_button = ToolButton(value=refresh_symbol, elem_id=elem_id) + refresh_button = ToolButton(value=refresh_symbol, elem_id=elem_id, tooltip=f"{label}: refresh" if label else "Refresh") refresh_button.click( fn=refresh, inputs=[], - outputs=[refresh_component] + outputs=[*refresh_components] ) return refresh_button + +def setup_dialog(button_show, dialog, *, button_close=None): + """Sets up the UI so that the dialog (gr.Box) is invisible, and is only shown when buttons_show is clicked, in a fullscreen modal window.""" + + dialog.visible = False + + button_show.click( + fn=lambda: gr.update(visible=True), + inputs=[], + outputs=[dialog], + ).then(fn=None, _js="function(){ popup(gradioApp().getElementById('" + dialog.elem_id + "')); }") + + if button_close: + button_close.click(fn=None, _js="closePopup") + diff --git a/modules/ui_extra_networks_checkpoints.py b/modules/ui_extra_networks_checkpoints.py index 2bb0a222..891d8f2c 100644 --- a/modules/ui_extra_networks_checkpoints.py +++ b/modules/ui_extra_networks_checkpoints.py @@ -12,7 +12,7 @@ class ExtraNetworksPageCheckpoints(ui_extra_networks.ExtraNetworksPage): def refresh(self): shared.refresh_checkpoints() - def create_item(self, name, index=None): + def create_item(self, name, index=None, enable_filter=True): checkpoint: sd_models.CheckpointInfo = sd_models.checkpoint_aliases.get(name) path, ext = os.path.splitext(checkpoint.filename) return { diff --git a/modules/ui_extra_networks_hypernets.py b/modules/ui_extra_networks_hypernets.py index e53ccb42..514a4562 100644 --- a/modules/ui_extra_networks_hypernets.py +++ b/modules/ui_extra_networks_hypernets.py @@ -11,7 +11,7 @@ class ExtraNetworksPageHypernetworks(ui_extra_networks.ExtraNetworksPage): def refresh(self): shared.reload_hypernetworks() - def create_item(self, name, index=None): + def create_item(self, name, index=None, enable_filter=True): full_path = shared.hypernetworks[name] path, ext = os.path.splitext(full_path) diff --git a/modules/ui_extra_networks_textual_inversion.py b/modules/ui_extra_networks_textual_inversion.py index d1794e50..73134698 100644 --- a/modules/ui_extra_networks_textual_inversion.py +++ b/modules/ui_extra_networks_textual_inversion.py @@ -12,7 +12,7 @@ class ExtraNetworksPageTextualInversion(ui_extra_networks.ExtraNetworksPage): def refresh(self): sd_hijack.model_hijack.embedding_db.load_textual_inversion_embeddings(force_reload=True) - def create_item(self, name, index=None): + def create_item(self, name, index=None, enable_filter=True): embedding = sd_hijack.model_hijack.embedding_db.word_embeddings.get(name) path, ext = os.path.splitext(embedding.filename) diff --git a/modules/ui_prompt_styles.py b/modules/ui_prompt_styles.py new file mode 100644 index 00000000..85eb3a64 --- /dev/null +++ b/modules/ui_prompt_styles.py @@ -0,0 +1,110 @@ +import gradio as gr + +from modules import shared, ui_common, ui_components, styles + +styles_edit_symbol = '\U0001f58c\uFE0F' # 🖌️ +styles_materialize_symbol = '\U0001f4cb' # 📋 + + +def select_style(name): + style = shared.prompt_styles.styles.get(name) + existing = style is not None + empty = not name + + prompt = style.prompt if style else gr.update() + negative_prompt = style.negative_prompt if style else gr.update() + + return prompt, negative_prompt, gr.update(visible=existing), gr.update(visible=not empty) + + +def save_style(name, prompt, negative_prompt): + if not name: + return gr.update(visible=False) + + style = styles.PromptStyle(name, prompt, negative_prompt) + shared.prompt_styles.styles[style.name] = style + shared.prompt_styles.save_styles(shared.styles_filename) + + return gr.update(visible=True) + + +def delete_style(name): + if name == "": + return + + shared.prompt_styles.styles.pop(name, None) + shared.prompt_styles.save_styles(shared.styles_filename) + + return '', '', '' + + +def materialize_styles(prompt, negative_prompt, styles): + prompt = shared.prompt_styles.apply_styles_to_prompt(prompt, styles) + negative_prompt = shared.prompt_styles.apply_negative_styles_to_prompt(negative_prompt, styles) + + return [gr.Textbox.update(value=prompt), gr.Textbox.update(value=negative_prompt), gr.Dropdown.update(value=[])] + + +def refresh_styles(): + return gr.update(choices=list(shared.prompt_styles.styles)), gr.update(choices=list(shared.prompt_styles.styles)) + + +class UiPromptStyles: + def __init__(self, tabname, main_ui_prompt, main_ui_negative_prompt): + self.tabname = tabname + + with gr.Row(elem_id=f"{tabname}_styles_row"): + self.dropdown = gr.Dropdown(label="Styles", show_label=False, elem_id=f"{tabname}_styles", choices=list(shared.prompt_styles.styles), value=[], multiselect=True, tooltip="Styles") + edit_button = ui_components.ToolButton(value=styles_edit_symbol, elem_id=f"{tabname}_styles_edit_button", tooltip="Edit styles") + + with gr.Box(elem_id=f"{tabname}_styles_dialog", elem_classes="popup-dialog") as styles_dialog: + with gr.Row(): + self.selection = gr.Dropdown(label="Styles", elem_id=f"{tabname}_styles_edit_select", choices=list(shared.prompt_styles.styles), value=[], allow_custom_value=True, info="Styles allow you to add custom text to prompt. Use the {prompt} token in style text, and it will be replaced with user's prompt when applying style. Otherwise, style's text will be added to the end of the prompt.") + ui_common.create_refresh_button([self.dropdown, self.selection], shared.prompt_styles.reload, lambda: {"choices": list(shared.prompt_styles.styles)}, f"refresh_{tabname}_styles") + self.materialize = ui_components.ToolButton(value=styles_materialize_symbol, elem_id=f"{tabname}_style_apply", tooltip="Apply all selected styles from the style selction dropdown in main UI to the prompt.") + + with gr.Row(): + self.prompt = gr.Textbox(label="Prompt", show_label=True, elem_id=f"{tabname}_edit_style_prompt", lines=3) + + with gr.Row(): + self.neg_prompt = gr.Textbox(label="Negative prompt", show_label=True, elem_id=f"{tabname}_edit_style_neg_prompt", lines=3) + + with gr.Row(): + self.save = gr.Button('Save', variant='primary', elem_id=f'{tabname}_edit_style_save', visible=False) + self.delete = gr.Button('Delete', variant='primary', elem_id=f'{tabname}_edit_style_delete', visible=False) + self.close = gr.Button('Close', variant='secondary', elem_id=f'{tabname}_edit_style_close') + + self.selection.change( + fn=select_style, + inputs=[self.selection], + outputs=[self.prompt, self.neg_prompt, self.delete, self.save], + show_progress=False, + ) + + self.save.click( + fn=save_style, + inputs=[self.selection, self.prompt, self.neg_prompt], + outputs=[self.delete], + show_progress=False, + ).then(refresh_styles, outputs=[self.dropdown, self.selection], show_progress=False) + + self.delete.click( + fn=delete_style, + _js='function(name){ if(name == "") return ""; return confirm("Delete style " + name + "?") ? name : ""; }', + inputs=[self.selection], + outputs=[self.selection, self.prompt, self.neg_prompt], + show_progress=False, + ).then(refresh_styles, outputs=[self.dropdown, self.selection], show_progress=False) + + self.materialize.click( + fn=materialize_styles, + inputs=[main_ui_prompt, main_ui_negative_prompt, self.dropdown], + outputs=[main_ui_prompt, main_ui_negative_prompt, self.dropdown], + show_progress=False, + ).then(fn=None, _js="function(){update_"+tabname+"_tokens(); closePopup();}", show_progress=False) + + ui_common.setup_dialog(button_show=edit_button, dialog=styles_dialog, button_close=self.close) + + + + diff --git a/style.css b/style.css index 6c92d6e7..cf8470e4 100644 --- a/style.css +++ b/style.css @@ -972,3 +972,16 @@ div.block.gradio-box.edit-user-metadata { .edit-user-metadata-buttons{ margin-top: 1.5em; } + + + + +div.block.gradio-box.popup-dialog, .popup-dialog { + width: 56em; + background: var(--body-background-fill); + padding: 2em !important; +} + +div.block.gradio-box.popup-dialog > div:last-child, .popup-dialog > div:last-child{ + margin-top: 1em; +} -- cgit v1.2.1 From af528552d6006268e352f422d533a69a6a718a5d Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Thu, 3 Aug 2023 23:18:50 +0300 Subject: fix linter issues --- modules/ui.py | 57 +++++++++++++++++++++++++-------------------------------- webui.py | 4 ++-- 2 files changed, 27 insertions(+), 34 deletions(-) diff --git a/modules/ui.py b/modules/ui.py index c059dcec..70364a8b 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -12,30 +12,23 @@ import numpy as np from PIL import Image, PngImagePlugin # noqa: F401 from modules.call_queue import wrap_gradio_gpu_call, wrap_queued_call, wrap_gradio_call -from modules import sd_hijack, sd_models, script_callbacks, ui_extensions, deepbooru, extra_networks, ui_common, ui_postprocessing, progress, ui_loadsave, errors, shared_items, ui_settings, timer, sysinfo, ui_checkpoint_merger, ui_prompt_styles +from modules import sd_hijack, sd_models, script_callbacks, ui_extensions, deepbooru, extra_networks, ui_common, ui_postprocessing, progress, ui_loadsave, errors, shared_items, ui_settings, timer, sysinfo, ui_checkpoint_merger, ui_prompt_styles, scripts from modules.ui_components import FormRow, FormGroup, ToolButton, FormHTML from modules.paths import script_path from modules.ui_common import create_refresh_button from modules.ui_gradio_extensions import reload_javascript - from modules.shared import opts, cmd_opts -import modules.codeformer_model import modules.generation_parameters_copypaste as parameters_copypaste -import modules.gfpgan_model -import modules.hypernetworks.ui -import modules.scripts +import modules.hypernetworks.ui as hypernetworks_ui +import modules.textual_inversion.ui as textual_inversion_ui +import modules.textual_inversion.textual_inversion as textual_inversion import modules.shared as shared -import modules.styles -import modules.textual_inversion.ui from modules import prompt_parser from modules.sd_hijack import model_hijack from modules.sd_samplers import samplers, samplers_for_img2img -from modules.textual_inversion import textual_inversion -import modules.hypernetworks.ui from modules.generation_parameters_copypaste import image_from_url_text -import modules.extras create_setting_component = ui_settings.create_setting_component @@ -391,11 +384,11 @@ def create_ui(): parameters_copypaste.reset() - modules.scripts.scripts_current = modules.scripts.scripts_txt2img - modules.scripts.scripts_txt2img.initialize_scripts(is_img2img=False) + scripts.scripts_current = scripts.scripts_txt2img + scripts.scripts_txt2img.initialize_scripts(is_img2img=False) with gr.Blocks(analytics_enabled=False) as txt2img_interface: - toprow = txt2img_toprow = Toprow(is_img2img=False) + toprow = Toprow(is_img2img=False) dummy_component = gr.Label(visible=False) txt_prompt_img = gr.File(label="", elem_id="txt2img_prompt_image", file_count="single", type="binary", visible=False) @@ -406,7 +399,7 @@ def create_ui(): with gr.Row().style(equal_height=False): with gr.Column(variant='compact', elem_id="txt2img_settings"): - modules.scripts.scripts_txt2img.prepare_ui() + scripts.scripts_txt2img.prepare_ui() for category in ordered_ui_categories(): if category == "sampler": @@ -474,10 +467,10 @@ def create_ui(): elif category == "scripts": with FormGroup(elem_id="txt2img_script_container"): - custom_inputs = modules.scripts.scripts_txt2img.setup_ui() + custom_inputs = scripts.scripts_txt2img.setup_ui() else: - modules.scripts.scripts_txt2img.setup_ui_for_section(category) + scripts.scripts_txt2img.setup_ui_for_section(category) hr_resolution_preview_inputs = [enable_hr, width, height, hr_scale, hr_resize_x, hr_resize_y] @@ -611,7 +604,7 @@ def create_ui(): (hr_prompt, "Hires prompt"), (hr_negative_prompt, "Hires negative prompt"), (hr_prompts_container, lambda d: gr.update(visible=True) if d.get("Hires prompt", "") != "" or d.get("Hires negative prompt", "") != "" else gr.update()), - *modules.scripts.scripts_txt2img.infotext_fields + *scripts.scripts_txt2img.infotext_fields ] parameters_copypaste.add_paste_fields("txt2img", None, txt2img_paste_fields, override_settings) parameters_copypaste.register_paste_params_button(parameters_copypaste.ParamBinding( @@ -634,11 +627,11 @@ def create_ui(): ui_extra_networks.setup_ui(extra_networks_ui, txt2img_gallery) - modules.scripts.scripts_current = modules.scripts.scripts_img2img - modules.scripts.scripts_img2img.initialize_scripts(is_img2img=True) + scripts.scripts_current = scripts.scripts_img2img + scripts.scripts_img2img.initialize_scripts(is_img2img=True) with gr.Blocks(analytics_enabled=False) as img2img_interface: - toprow = img2img_toprow = Toprow(is_img2img=True) + toprow = Toprow(is_img2img=True) img2img_prompt_img = gr.File(label="", elem_id="img2img_prompt_image", file_count="single", type="binary", visible=False) @@ -740,7 +733,7 @@ def create_ui(): with FormRow(): resize_mode = gr.Radio(label="Resize mode", elem_id="resize_mode", choices=["Just resize", "Crop and resize", "Resize and fill", "Just resize (latent upscale)"], type="index", value="Just resize") - modules.scripts.scripts_img2img.prepare_ui() + scripts.scripts_img2img.prepare_ui() for category in ordered_ui_categories(): if category == "sampler": @@ -821,7 +814,7 @@ def create_ui(): elif category == "scripts": with FormGroup(elem_id="img2img_script_container"): - custom_inputs = modules.scripts.scripts_img2img.setup_ui() + custom_inputs = scripts.scripts_img2img.setup_ui() elif category == "inpaint": with FormGroup(elem_id="inpaint_controls", visible=False) as inpaint_controls: @@ -852,7 +845,7 @@ def create_ui(): outputs=[inpaint_controls, mask_alpha], ) else: - modules.scripts.scripts_img2img.setup_ui_for_section(category) + scripts.scripts_img2img.setup_ui_for_section(category) img2img_gallery, generation_info, html_info, html_log = create_output_panel("img2img", opts.outdir_img2img_samples) @@ -1001,7 +994,7 @@ def create_ui(): (toprow.ui_styles.dropdown, lambda d: d["Styles array"] if isinstance(d.get("Styles array"), list) else gr.update()), (denoising_strength, "Denoising strength"), (mask_blur, "Mask blur"), - *modules.scripts.scripts_img2img.infotext_fields + *scripts.scripts_img2img.infotext_fields ] parameters_copypaste.add_paste_fields("img2img", init_img, img2img_paste_fields, override_settings) parameters_copypaste.add_paste_fields("inpaint", init_img_with_mask, img2img_paste_fields, override_settings) @@ -1009,7 +1002,7 @@ def create_ui(): paste_button=toprow.paste, tabname="img2img", source_text_component=toprow.prompt, source_image_component=None, )) - modules.scripts.scripts_current = None + scripts.scripts_current = None with gr.Blocks(analytics_enabled=False) as extras_interface: ui_postprocessing.create_ui() @@ -1063,7 +1056,7 @@ def create_ui(): new_hypernetwork_name = gr.Textbox(label="Name", elem_id="train_new_hypernetwork_name") new_hypernetwork_sizes = gr.CheckboxGroup(label="Modules", value=["768", "320", "640", "1280"], choices=["768", "1024", "320", "640", "1280"], elem_id="train_new_hypernetwork_sizes") new_hypernetwork_layer_structure = gr.Textbox("1, 2, 1", label="Enter hypernetwork layer structure", placeholder="1st and last digit must be 1. ex:'1, 2, 1'", elem_id="train_new_hypernetwork_layer_structure") - new_hypernetwork_activation_func = gr.Dropdown(value="linear", label="Select activation function of hypernetwork. Recommended : Swish / Linear(none)", choices=modules.hypernetworks.ui.keys, elem_id="train_new_hypernetwork_activation_func") + new_hypernetwork_activation_func = gr.Dropdown(value="linear", label="Select activation function of hypernetwork. Recommended : Swish / Linear(none)", choices=hypernetworks_ui.keys, elem_id="train_new_hypernetwork_activation_func") new_hypernetwork_initialization_option = gr.Dropdown(value = "Normal", label="Select Layer weights initialization. Recommended: Kaiming for relu-like, Xavier for sigmoid-like, Normal otherwise", choices=["Normal", "KaimingUniform", "KaimingNormal", "XavierUniform", "XavierNormal"], elem_id="train_new_hypernetwork_initialization_option") new_hypernetwork_add_layer_norm = gr.Checkbox(label="Add layer normalization", elem_id="train_new_hypernetwork_add_layer_norm") new_hypernetwork_use_dropout = gr.Checkbox(label="Use dropout", elem_id="train_new_hypernetwork_use_dropout") @@ -1208,7 +1201,7 @@ def create_ui(): ti_outcome = gr.HTML(elem_id="ti_error", value="") create_embedding.click( - fn=modules.textual_inversion.ui.create_embedding, + fn=textual_inversion_ui.create_embedding, inputs=[ new_embedding_name, initialization_text, @@ -1223,7 +1216,7 @@ def create_ui(): ) create_hypernetwork.click( - fn=modules.hypernetworks.ui.create_hypernetwork, + fn=hypernetworks_ui.create_hypernetwork, inputs=[ new_hypernetwork_name, new_hypernetwork_sizes, @@ -1243,7 +1236,7 @@ def create_ui(): ) run_preprocess.click( - fn=wrap_gradio_gpu_call(modules.textual_inversion.ui.preprocess, extra_outputs=[gr.update()]), + fn=wrap_gradio_gpu_call(textual_inversion_ui.preprocess, extra_outputs=[gr.update()]), _js="start_training_textual_inversion", inputs=[ dummy_component, @@ -1279,7 +1272,7 @@ def create_ui(): ) train_embedding.click( - fn=wrap_gradio_gpu_call(modules.textual_inversion.ui.train_embedding, extra_outputs=[gr.update()]), + fn=wrap_gradio_gpu_call(textual_inversion_ui.train_embedding, extra_outputs=[gr.update()]), _js="start_training_textual_inversion", inputs=[ dummy_component, @@ -1313,7 +1306,7 @@ def create_ui(): ) train_hypernetwork.click( - fn=wrap_gradio_gpu_call(modules.hypernetworks.ui.train_hypernetwork, extra_outputs=[gr.update()]), + fn=wrap_gradio_gpu_call(hypernetworks_ui.train_hypernetwork, extra_outputs=[gr.update()]), _js="start_training_textual_inversion", inputs=[ dummy_component, diff --git a/webui.py b/webui.py index 2dc4f1aa..8d84e5a4 100644 --- a/webui.py +++ b/webui.py @@ -58,10 +58,10 @@ if ".dev" in torch.__version__ or "+git" in torch.__version__: torch.__long_version__ = torch.__version__ torch.__version__ = re.search(r'[\d.]+[\d]', torch.__version__).group(0) -from modules import shared, sd_samplers, upscaler, extensions, localization, ui_tempdir, ui_extra_networks, config_states import modules.codeformer_model as codeformer -import modules.face_restoration import modules.gfpgan_model as gfpgan +from modules import shared, sd_samplers, upscaler, extensions, localization, ui_tempdir, ui_extra_networks, config_states +import modules.face_restoration import modules.img2img import modules.lowvram -- cgit v1.2.1 From 09c1be96748584b08b6299024bb7b64bafb09d09 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Thu, 3 Aug 2023 23:31:04 +0300 Subject: put some of the shared functionality into toprow write a comment for the toprow --- modules/ui.py | 39 ++++++++++++--------------------------- 1 file changed, 12 insertions(+), 27 deletions(-) diff --git a/modules/ui.py b/modules/ui.py index 70364a8b..03306ba9 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -25,6 +25,7 @@ import modules.hypernetworks.ui as hypernetworks_ui import modules.textual_inversion.ui as textual_inversion_ui import modules.textual_inversion.textual_inversion as textual_inversion import modules.shared as shared +import modules.images from modules import prompt_parser from modules.sd_hijack import model_hijack from modules.sd_samplers import samplers, samplers_for_img2img @@ -241,6 +242,8 @@ def update_token_counter(text, steps): class Toprow: + """Creates a top row UI with prompts, generate button, styles, extra little buttons for things, and enables some functionality related to their operation""" + def __init__(self, is_img2img): id_part = "img2img" if is_img2img else "txt2img" self.id_part = id_part @@ -251,12 +254,14 @@ class Toprow: with gr.Column(scale=80): with gr.Row(): self.prompt = gr.Textbox(label="Prompt", elem_id=f"{id_part}_prompt", show_label=False, lines=3, placeholder="Prompt (press Ctrl+Enter or Alt+Enter to generate)", elem_classes=["prompt"]) + self.prompt_img = gr.File(label="", elem_id=f"{id_part}_prompt_image", file_count="single", type="binary", visible=False) with gr.Row(): with gr.Column(scale=80): with gr.Row(): self.negative_prompt = gr.Textbox(label="Negative prompt", elem_id=f"{id_part}_neg_prompt", show_label=False, lines=3, placeholder="Negative prompt (press Ctrl+Enter or Alt+Enter to generate)", elem_classes=["prompt"]) + self.button_interrogate = None self.button_deepbooru = None if is_img2img: @@ -302,6 +307,13 @@ class Toprow: self.ui_styles = ui_prompt_styles.UiPromptStyles(id_part, self.prompt, self.negative_prompt) + self.prompt_img.change( + fn=modules.images.image_data, + inputs=[self.prompt_img], + outputs=[self.prompt, self.prompt_img], + show_progress=False, + ) + def setup_progressbar(*args, **kwargs): pass @@ -391,7 +403,6 @@ def create_ui(): toprow = Toprow(is_img2img=False) dummy_component = gr.Label(visible=False) - txt_prompt_img = gr.File(label="", elem_id="txt2img_prompt_image", file_count="single", type="binary", visible=False) with FormRow(variant='compact', elem_id="txt2img_extra_networks", visible=False) as extra_networks: from modules import ui_extra_networks @@ -556,18 +567,6 @@ def create_ui(): show_progress=False, ) - txt_prompt_img.change( - fn=modules.images.image_data, - inputs=[ - txt_prompt_img - ], - outputs=[ - toprow.prompt, - txt_prompt_img - ], - show_progress=False, - ) - enable_hr.change( fn=lambda x: gr_show(x), inputs=[enable_hr], @@ -633,8 +632,6 @@ def create_ui(): with gr.Blocks(analytics_enabled=False) as img2img_interface: toprow = Toprow(is_img2img=True) - img2img_prompt_img = gr.File(label="", elem_id="img2img_prompt_image", file_count="single", type="binary", visible=False) - with FormRow(variant='compact', elem_id="img2img_extra_networks", visible=False) as extra_networks: from modules import ui_extra_networks extra_networks_ui_img2img = ui_extra_networks.create_ui(extra_networks, toprow.extra_networks_button, 'img2img') @@ -852,18 +849,6 @@ def create_ui(): connect_reuse_seed(seed, reuse_seed, generation_info, dummy_component, is_subseed=False) connect_reuse_seed(subseed, reuse_subseed, generation_info, dummy_component, is_subseed=True) - img2img_prompt_img.change( - fn=modules.images.image_data, - inputs=[ - img2img_prompt_img - ], - outputs=[ - toprow.prompt, - img2img_prompt_img - ], - show_progress=False, - ) - img2img_args = dict( fn=wrap_gradio_gpu_call(modules.img2img.img2img, extra_outputs=[None, '', '']), _js="submit_img2img", -- cgit v1.2.1 From 24f21583cdba2ae6cc51773b956c6ce068d3dfe4 Mon Sep 17 00:00:00 2001 From: AnyISalIn Date: Fri, 4 Aug 2023 11:43:27 +0800 Subject: fix: prevent cache model.state_dict() after model hijack Signed-off-by: AnyISalIn --- modules/sd_models.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/modules/sd_models.py b/modules/sd_models.py index 1d93d893..ba15b451 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -303,12 +303,13 @@ def load_model_weights(model, checkpoint_info: CheckpointInfo, state_dict, timer sd_models_xl.extend_sdxl(model) model.load_state_dict(state_dict, strict=False) - del state_dict timer.record("apply weights to model") if shared.opts.sd_checkpoint_cache > 0: # cache newly loaded model - checkpoints_loaded[checkpoint_info] = model.state_dict().copy() + checkpoints_loaded[checkpoint_info] = state_dict + + del state_dict if shared.cmd_opts.opt_channelslast: model.to(memory_format=torch.channels_last) -- cgit v1.2.1 From bbfff771d7337707bf501b27f98da2f7a7c06f73 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Sat, 29 Jul 2023 01:07:35 +0900 Subject: --disable-all-extensions --disable-extra-extensions --- modules/cmd_args.py | 2 ++ modules/extensions.py | 10 +++++++--- modules/ui_extensions.py | 18 +++++++++++------- 3 files changed, 20 insertions(+), 10 deletions(-) diff --git a/modules/cmd_args.py b/modules/cmd_args.py index dd5fadc4..1262f1a4 100644 --- a/modules/cmd_args.py +++ b/modules/cmd_args.py @@ -111,3 +111,5 @@ parser.add_argument('--subpath', type=str, help='customize the subpath for gradi parser.add_argument('--add-stop-route', action='store_true', help='add /_stop route to stop server') parser.add_argument('--api-server-stop', action='store_true', help='enable server stop/restart/kill via api') parser.add_argument('--timeout-keep-alive', type=int, default=30, help='set timeout_keep_alive for uvicorn') +parser.add_argument("--disable-all-extensions", action='store_true', help="prevent all extensions from running regardless of any other settings", default=False) +parser.add_argument("--disable-extra-extensions", action='store_true', help=" prevent all extensions except built-in from running regardless of any other settings", default=False) diff --git a/modules/extensions.py b/modules/extensions.py index 3ad5ed53..e4633af4 100644 --- a/modules/extensions.py +++ b/modules/extensions.py @@ -11,9 +11,9 @@ os.makedirs(extensions_dir, exist_ok=True) def active(): - if shared.opts.disable_all_extensions == "all": + if shared.cmd_opts.disable_all_extensions or shared.opts.disable_all_extensions == "all": return [] - elif shared.opts.disable_all_extensions == "extra": + elif shared.cmd_opts.disable_extra_extensions or shared.opts.disable_all_extensions == "extra": return [x for x in extensions if x.enabled and x.is_builtin] else: return [x for x in extensions if x.enabled] @@ -141,8 +141,12 @@ def list_extensions(): if not os.path.isdir(extensions_dir): return - if shared.opts.disable_all_extensions == "all": + if shared.cmd_opts.disable_all_extensions: + print("*** \"--disable-all-extensions\" arg was used, will not load any extensions ***") + elif shared.opts.disable_all_extensions == "all": print("*** \"Disable all extensions\" option was set, will not load any extensions ***") + elif shared.cmd_opts.disable_extra_extensions: + print("*** \"--disable-extra-extensions\" arg was used, will only load built-in extensions ***") elif shared.opts.disable_all_extensions == "extra": print("*** \"Disable all extensions\" option was set, will only load built-in extensions ***") diff --git a/modules/ui_extensions.py b/modules/ui_extensions.py index f3e4fba7..bd28bfcf 100644 --- a/modules/ui_extensions.py +++ b/modules/ui_extensions.py @@ -164,7 +164,7 @@ def extension_table(): ext_status = ext.status style = "" - if shared.opts.disable_all_extensions == "extra" and not ext.is_builtin or shared.opts.disable_all_extensions == "all": + if shared.cmd_opts.disable_extra_extensions and not ext.is_builtin or shared.opts.disable_all_extensions == "extra" and not ext.is_builtin or shared.cmd_opts.disable_all_extensions or shared.opts.disable_all_extensions == "all": style = STYLE_PRIMARY version_link = ext.version @@ -537,12 +537,16 @@ def create_ui(): extensions_update_list = gr.Text(elem_id="extensions_update_list", visible=False).style(container=False) html = "" - if shared.opts.disable_all_extensions != "none": - html = """ - - "Disable all extensions" was set, change it to "none" to load all extensions again - - """ + + if shared.cmd_opts.disable_all_extensions or shared.cmd_opts.disable_extra_extensions or shared.opts.disable_all_extensions != "none": + if shared.cmd_opts.disable_all_extensions: + msg = '"--disable-all-extensions" was used, remove it to load all extensions again' + elif shared.opts.disable_all_extensions != "none": + msg = '"Disable all extensions" was set, change it to "none" to load all extensions again' + elif shared.cmd_opts.disable_extra_extensions: + msg = '"--disable-extra-extensions" was used, remove it to load all extensions again' + html = f'{msg}' + info = gr.HTML(html) extensions_table = gr.HTML('Loading...') ui.load(fn=extension_table, inputs=[], outputs=[extensions_table]) -- cgit v1.2.1 From 8b37734244088eb3c5b6390c5c3f23f577c5136a Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Fri, 4 Aug 2023 00:10:14 -0400 Subject: XYZ: Support hires sampler, cleanup --- scripts/xyz_grid.py | 13 +++---------- 1 file changed, 3 insertions(+), 10 deletions(-) diff --git a/scripts/xyz_grid.py b/scripts/xyz_grid.py index 103bf104..2b2504b3 100644 --- a/scripts/xyz_grid.py +++ b/scripts/xyz_grid.py @@ -67,14 +67,6 @@ def apply_order(p, x, xs): p.prompt = prompt_tmp + p.prompt -def apply_sampler(p, x, xs): - sampler_name = sd_samplers.samplers_map.get(x.lower(), None) - if sampler_name is None: - raise RuntimeError(f"Unknown sampler: {x}") - - p.sampler_name = sampler_name - - def confirm_samplers(p, xs): for x in xs: if x.lower() not in sd_samplers.samplers_map: @@ -224,8 +216,9 @@ axis_options = [ AxisOptionImg2Img("Image CFG Scale", float, apply_field("image_cfg_scale")), AxisOption("Prompt S/R", str, apply_prompt, format_value=format_value), AxisOption("Prompt order", str_permutations, apply_order, format_value=format_value_join_list), - AxisOptionTxt2Img("Sampler", str, apply_sampler, format_value=format_value, confirm=confirm_samplers, choices=lambda: [x.name for x in sd_samplers.samplers]), - AxisOptionImg2Img("Sampler", str, apply_sampler, format_value=format_value, confirm=confirm_samplers, choices=lambda: [x.name for x in sd_samplers.samplers_for_img2img]), + AxisOptionTxt2Img("Sampler", str, apply_field("sampler_name"), format_value=format_value, confirm=confirm_samplers, choices=lambda: [x.name for x in sd_samplers.samplers]), + AxisOptionTxt2Img("Hires sampler", str, apply_field("hr_sampler_name"), format_value=format_value, confirm=confirm_samplers, choices=lambda: [x.name for x in sd_samplers.samplers]), + AxisOptionImg2Img("Sampler", str, apply_field("sampler_name"), format_value=format_value, confirm=confirm_samplers, choices=lambda: [x.name for x in sd_samplers.samplers_for_img2img]), AxisOption("Checkpoint name", str, apply_checkpoint, format_value=format_remove_path, confirm=confirm_checkpoints, cost=1.0, choices=lambda: sorted(sd_models.checkpoints_list, key=str.casefold)), AxisOption("Negative Guidance minimum sigma", float, apply_field("s_min_uncond")), AxisOption("Sigma Churn", float, apply_field("s_churn")), -- cgit v1.2.1 From f7813fad1cf6907ef77e88a14bc3cf488d85be59 Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Fri, 4 Aug 2023 00:19:30 -0400 Subject: XYZ: Use default label format for hires sampler If both sampler and hires sampler are used this makes the distinction more clear. --- scripts/xyz_grid.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/xyz_grid.py b/scripts/xyz_grid.py index 2b2504b3..4b8a3369 100644 --- a/scripts/xyz_grid.py +++ b/scripts/xyz_grid.py @@ -217,7 +217,7 @@ axis_options = [ AxisOption("Prompt S/R", str, apply_prompt, format_value=format_value), AxisOption("Prompt order", str_permutations, apply_order, format_value=format_value_join_list), AxisOptionTxt2Img("Sampler", str, apply_field("sampler_name"), format_value=format_value, confirm=confirm_samplers, choices=lambda: [x.name for x in sd_samplers.samplers]), - AxisOptionTxt2Img("Hires sampler", str, apply_field("hr_sampler_name"), format_value=format_value, confirm=confirm_samplers, choices=lambda: [x.name for x in sd_samplers.samplers]), + AxisOptionTxt2Img("Hires sampler", str, apply_field("hr_sampler_name"), confirm=confirm_samplers, choices=lambda: [x.name for x in sd_samplers.samplers]), AxisOptionImg2Img("Sampler", str, apply_field("sampler_name"), format_value=format_value, confirm=confirm_samplers, choices=lambda: [x.name for x in sd_samplers.samplers_for_img2img]), AxisOption("Checkpoint name", str, apply_checkpoint, format_value=format_remove_path, confirm=confirm_checkpoints, cost=1.0, choices=lambda: sorted(sd_models.checkpoints_list, key=str.casefold)), AxisOption("Negative Guidance minimum sigma", float, apply_field("s_min_uncond")), -- cgit v1.2.1 From f4d9297127eb8b804ddcc77abe1e2bd9a725176a Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Fri, 4 Aug 2023 13:27:25 +0900 Subject: use samplers_for_img2img for Hires sampler --- scripts/xyz_grid.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/xyz_grid.py b/scripts/xyz_grid.py index 4b8a3369..d37b428f 100644 --- a/scripts/xyz_grid.py +++ b/scripts/xyz_grid.py @@ -217,7 +217,7 @@ axis_options = [ AxisOption("Prompt S/R", str, apply_prompt, format_value=format_value), AxisOption("Prompt order", str_permutations, apply_order, format_value=format_value_join_list), AxisOptionTxt2Img("Sampler", str, apply_field("sampler_name"), format_value=format_value, confirm=confirm_samplers, choices=lambda: [x.name for x in sd_samplers.samplers]), - AxisOptionTxt2Img("Hires sampler", str, apply_field("hr_sampler_name"), confirm=confirm_samplers, choices=lambda: [x.name for x in sd_samplers.samplers]), + AxisOptionTxt2Img("Hires sampler", str, apply_field("hr_sampler_name"), confirm=confirm_samplers, choices=lambda: [x.name for x in sd_samplers.samplers_for_img2img]), AxisOptionImg2Img("Sampler", str, apply_field("sampler_name"), format_value=format_value, confirm=confirm_samplers, choices=lambda: [x.name for x in sd_samplers.samplers_for_img2img]), AxisOption("Checkpoint name", str, apply_checkpoint, format_value=format_remove_path, confirm=confirm_checkpoints, cost=1.0, choices=lambda: sorted(sd_models.checkpoints_list, key=str.casefold)), AxisOption("Negative Guidance minimum sigma", float, apply_field("s_min_uncond")), -- cgit v1.2.1 From 3bd2c68eb4df7630b88b3b05e1e11b7a4ad40185 Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Fri, 4 Aug 2023 00:51:49 -0400 Subject: Add exponential scheduler for DPM-Solver++(2M) SDE Better quality results than Karras. Related discussion: https://gist.github.com/crowsonkb/3ed16fba35c73ece7cf4b9a2095f2b78 --- modules/sd_samplers_kdiffusion.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py index d72c1b5f..8bb639f5 100644 --- a/modules/sd_samplers_kdiffusion.py +++ b/modules/sd_samplers_kdiffusion.py @@ -30,6 +30,7 @@ samplers_k_diffusion = [ ('DPM++ 2M Karras', 'sample_dpmpp_2m', ['k_dpmpp_2m_ka'], {'scheduler': 'karras'}), ('DPM++ SDE Karras', 'sample_dpmpp_sde', ['k_dpmpp_sde_ka'], {'scheduler': 'karras', "second_order": True, "brownian_noise": True}), ('DPM++ 2M SDE Karras', 'sample_dpmpp_2m_sde', ['k_dpmpp_2m_sde_ka'], {'scheduler': 'karras', "brownian_noise": True}), + ('DPM++ 2M SDE Exponential', 'sample_dpmpp_2m_sde', ['k_dpmpp_2m_sde_exp'], {'scheduler': 'exponential', "brownian_noise": True}), ('Restart', sd_samplers_extra.restart_sampler, ['restart'], {'scheduler': 'karras'}), ] @@ -375,6 +376,9 @@ class KDiffusionSampler: sigma_min, sigma_max = (0.1, 10) if opts.use_old_karras_scheduler_sigmas else (self.model_wrap.sigmas[0].item(), self.model_wrap.sigmas[-1].item()) sigmas = k_diffusion.sampling.get_sigmas_karras(n=steps, sigma_min=sigma_min, sigma_max=sigma_max, device=shared.device) + elif self.config is not None and self.config.options.get('scheduler', None) == 'exponential': + m_sigma_min, m_sigma_max = (self.model_wrap.sigmas[0].item(), self.model_wrap.sigmas[-1].item()) + sigmas = k_diffusion.sampling.get_sigmas_exponential(n=steps, sigma_min=m_sigma_min, sigma_max=m_sigma_max, device=shared.device) else: sigmas = self.model_wrap.get_sigmas(steps) -- cgit v1.2.1 From 7f1d087cba681ddd12cd54152090b176f26bd25c Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Fri, 4 Aug 2023 12:01:28 +0900 Subject: sort VAE --- modules/sd_vae.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/modules/sd_vae.py b/modules/sd_vae.py index e4ff2994..84271db0 100644 --- a/modules/sd_vae.py +++ b/modules/sd_vae.py @@ -50,6 +50,7 @@ def get_filename(filepath): def refresh_vae_list(): + global vae_dict vae_dict.clear() paths = [ @@ -83,6 +84,8 @@ def refresh_vae_list(): name = get_filename(filepath) vae_dict[name] = filepath + vae_dict = dict(sorted(vae_dict.items(), key=lambda item: shared.natural_sort_key(item[0]))) + def find_vae_near_checkpoint(checkpoint_file): checkpoint_path = os.path.basename(checkpoint_file).rsplit('.', 1)[0] -- cgit v1.2.1 From 362789a3793025c698fa42372fd66c3c4f2d6413 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Fri, 4 Aug 2023 07:50:17 +0300 Subject: gradio 3.39 --- extensions-builtin/Lora/ui_edit_user_metadata.py | 2 +- modules/gradio_extensons.py | 60 ++++++++++++++++++++++++ modules/scripts.py | 60 ------------------------ modules/shared.py | 3 +- modules/ui.py | 24 +++++----- modules/ui_checkpoint_merger.py | 2 +- modules/ui_common.py | 2 +- modules/ui_components.py | 2 +- modules/ui_extensions.py | 8 ++-- modules/ui_postprocessing.py | 2 +- requirements.txt | 2 +- requirements_versions.txt | 2 +- style.css | 12 ++++- 13 files changed, 95 insertions(+), 86 deletions(-) create mode 100644 modules/gradio_extensons.py diff --git a/extensions-builtin/Lora/ui_edit_user_metadata.py b/extensions-builtin/Lora/ui_edit_user_metadata.py index 2ca997f7..390d9dde 100644 --- a/extensions-builtin/Lora/ui_edit_user_metadata.py +++ b/extensions-builtin/Lora/ui_edit_user_metadata.py @@ -167,7 +167,7 @@ class LoraUserMetadataEditor(ui_extra_networks_user_metadata.UserMetadataEditor) random_prompt = gr.Textbox(label='Random prompt', lines=4, max_lines=4, interactive=False) with gr.Column(scale=1, min_width=120): - generate_random_prompt = gr.Button('Generate').style(full_width=True, size="lg") + generate_random_prompt = gr.Button('Generate', size="lg", scale=1) self.edit_notes = gr.TextArea(label='Notes', lines=4) diff --git a/modules/gradio_extensons.py b/modules/gradio_extensons.py new file mode 100644 index 00000000..5af7fd8e --- /dev/null +++ b/modules/gradio_extensons.py @@ -0,0 +1,60 @@ +import gradio as gr + +from modules import scripts + +def add_classes_to_gradio_component(comp): + """ + this adds gradio-* to the component for css styling (ie gradio-button to gr.Button), as well as some others + """ + + comp.elem_classes = [f"gradio-{comp.get_block_name()}", *(comp.elem_classes or [])] + + if getattr(comp, 'multiselect', False): + comp.elem_classes.append('multiselect') + + +def IOComponent_init(self, *args, **kwargs): + self.webui_tooltip = kwargs.pop('tooltip', None) + + if scripts.scripts_current is not None: + scripts.scripts_current.before_component(self, **kwargs) + + scripts.script_callbacks.before_component_callback(self, **kwargs) + + res = original_IOComponent_init(self, *args, **kwargs) + + add_classes_to_gradio_component(self) + + scripts.script_callbacks.after_component_callback(self, **kwargs) + + if scripts.scripts_current is not None: + scripts.scripts_current.after_component(self, **kwargs) + + return res + + +def Block_get_config(self): + config = original_Block_get_config(self) + + webui_tooltip = getattr(self, 'webui_tooltip', None) + if webui_tooltip: + config["webui_tooltip"] = webui_tooltip + + return config + + +def BlockContext_init(self, *args, **kwargs): + res = original_BlockContext_init(self, *args, **kwargs) + + add_classes_to_gradio_component(self) + + return res + + +original_IOComponent_init = gr.components.IOComponent.__init__ +original_Block_get_config = gr.blocks.Block.get_config +original_BlockContext_init = gr.blocks.BlockContext.__init__ + +gr.components.IOComponent.__init__ = IOComponent_init +gr.blocks.Block.get_config = Block_get_config +gr.blocks.BlockContext.__init__ = BlockContext_init diff --git a/modules/scripts.py b/modules/scripts.py index edf7347e..f7d060aa 100644 --- a/modules/scripts.py +++ b/modules/scripts.py @@ -631,63 +631,3 @@ def reload_script_body_only(): reload_scripts = load_scripts # compatibility alias - - -def add_classes_to_gradio_component(comp): - """ - this adds gradio-* to the component for css styling (ie gradio-button to gr.Button), as well as some others - """ - - comp.elem_classes = [f"gradio-{comp.get_block_name()}", *(comp.elem_classes or [])] - - if getattr(comp, 'multiselect', False): - comp.elem_classes.append('multiselect') - - - -def IOComponent_init(self, *args, **kwargs): - self.webui_tooltip = kwargs.pop('tooltip', None) - - if scripts_current is not None: - scripts_current.before_component(self, **kwargs) - - script_callbacks.before_component_callback(self, **kwargs) - - res = original_IOComponent_init(self, *args, **kwargs) - - add_classes_to_gradio_component(self) - - script_callbacks.after_component_callback(self, **kwargs) - - if scripts_current is not None: - scripts_current.after_component(self, **kwargs) - - return res - - -def Block_get_config(self): - config = original_Block_get_config(self) - - webui_tooltip = getattr(self, 'webui_tooltip', None) - if webui_tooltip: - config["webui_tooltip"] = webui_tooltip - - return config - - -original_IOComponent_init = gr.components.IOComponent.__init__ -original_Block_get_config = gr.components.Block.get_config -gr.components.IOComponent.__init__ = IOComponent_init -gr.components.Block.get_config = Block_get_config - - -def BlockContext_init(self, *args, **kwargs): - res = original_BlockContext_init(self, *args, **kwargs) - - add_classes_to_gradio_component(self) - - return res - - -original_BlockContext_init = gr.blocks.BlockContext.__init__ -gr.blocks.BlockContext.__init__ = BlockContext_init diff --git a/modules/shared.py b/modules/shared.py index 7103b4ca..cec030f7 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -385,7 +385,8 @@ options_templates.update(options_section(('face-restoration', "Face restoration" })) options_templates.update(options_section(('system', "System"), { - "show_warnings": OptionInfo(False, "Show warnings in console."), + "show_warnings": OptionInfo(False, "Show warnings in console.").needs_restart(), + "show_gradio_deprecation_warnings": OptionInfo(True, "Show gradio deprecation warnings in console.").needs_restart(), "memmon_poll_rate": OptionInfo(8, "VRAM usage polls per second during generation.", gr.Slider, {"minimum": 0, "maximum": 40, "step": 1}).info("0 = disable"), "samples_log_stdout": OptionInfo(False, "Always print all generation info to standard output"), "multiple_tqdm": OptionInfo(True, "Add a second progress bar to the console that shows progress for an entire job."), diff --git a/modules/ui.py b/modules/ui.py index 03306ba9..822a7660 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -12,6 +12,7 @@ import numpy as np from PIL import Image, PngImagePlugin # noqa: F401 from modules.call_queue import wrap_gradio_gpu_call, wrap_queued_call, wrap_gradio_call +from modules import gradio_extensons # noqa: F401 from modules import sd_hijack, sd_models, script_callbacks, ui_extensions, deepbooru, extra_networks, ui_common, ui_postprocessing, progress, ui_loadsave, errors, shared_items, ui_settings, timer, sysinfo, ui_checkpoint_merger, ui_prompt_styles, scripts from modules.ui_components import FormRow, FormGroup, ToolButton, FormHTML from modules.paths import script_path @@ -34,6 +35,7 @@ from modules.generation_parameters_copypaste import image_from_url_text create_setting_component = ui_settings.create_setting_component warnings.filterwarnings("default" if opts.show_warnings else "ignore", category=UserWarning) +warnings.filterwarnings("default" if opts.show_gradio_deprecation_warnings else "ignore", category=gr.deprecation.GradioDeprecationWarning) # this is a fix for Windows users. Without it, javascript files will be served with text/html content-type and the browser will not show any UI mimetypes.init() @@ -146,7 +148,6 @@ def interrogate_deepbooru(image): def create_seed_inputs(target_interface): with FormRow(elem_id=f"{target_interface}_seed_row", variant="compact"): seed = (gr.Textbox if cmd_opts.use_textbox_seed else gr.Number)(label='Seed', value=-1, elem_id=f"{target_interface}_seed") - seed.style(container=False) random_seed = ToolButton(random_symbol, elem_id=f"{target_interface}_random_seed", label='Random seed') reuse_seed = ToolButton(reuse_symbol, elem_id=f"{target_interface}_reuse_seed", label='Reuse seed') @@ -158,7 +159,6 @@ def create_seed_inputs(target_interface): with FormRow(visible=False, elem_id=f"{target_interface}_subseed_row") as seed_extra_row_1: seed_extras.append(seed_extra_row_1) subseed = gr.Number(label='Variation seed', value=-1, elem_id=f"{target_interface}_subseed") - subseed.style(container=False) random_subseed = ToolButton(random_symbol, elem_id=f"{target_interface}_random_subseed") reuse_subseed = ToolButton(reuse_symbol, elem_id=f"{target_interface}_reuse_subseed") subseed_strength = gr.Slider(label='Variation strength', value=0.0, minimum=0, maximum=1, step=0.01, elem_id=f"{target_interface}_subseed_strength") @@ -408,7 +408,7 @@ def create_ui(): from modules import ui_extra_networks extra_networks_ui = ui_extra_networks.create_ui(extra_networks, toprow.extra_networks_button, 'txt2img') - with gr.Row().style(equal_height=False): + with gr.Row(equal_height=False): with gr.Column(variant='compact', elem_id="txt2img_settings"): scripts.scripts_txt2img.prepare_ui() @@ -636,7 +636,7 @@ def create_ui(): from modules import ui_extra_networks extra_networks_ui_img2img = ui_extra_networks.create_ui(extra_networks, toprow.extra_networks_button, 'img2img') - with FormRow().style(equal_height=False): + with FormRow(equal_height=False): with gr.Column(variant='compact', elem_id="img2img_settings"): copy_image_buttons = [] copy_image_destinations = {} @@ -658,19 +658,19 @@ def create_ui(): img2img_selected_tab = gr.State(0) with gr.TabItem('img2img', id='img2img', elem_id="img2img_img2img_tab") as tab_img2img: - init_img = gr.Image(label="Image for img2img", elem_id="img2img_image", show_label=False, source="upload", interactive=True, type="pil", tool="editor", image_mode="RGBA").style(height=opts.img2img_editor_height) + init_img = gr.Image(label="Image for img2img", elem_id="img2img_image", show_label=False, source="upload", interactive=True, type="pil", tool="editor", image_mode="RGBA", height=opts.img2img_editor_height) add_copy_image_controls('img2img', init_img) with gr.TabItem('Sketch', id='img2img_sketch', elem_id="img2img_img2img_sketch_tab") as tab_sketch: - sketch = gr.Image(label="Image for img2img", elem_id="img2img_sketch", show_label=False, source="upload", interactive=True, type="pil", tool="color-sketch", image_mode="RGBA").style(height=opts.img2img_editor_height) + sketch = gr.Image(label="Image for img2img", elem_id="img2img_sketch", show_label=False, source="upload", interactive=True, type="pil", tool="color-sketch", image_mode="RGBA", height=opts.img2img_editor_height) add_copy_image_controls('sketch', sketch) with gr.TabItem('Inpaint', id='inpaint', elem_id="img2img_inpaint_tab") as tab_inpaint: - init_img_with_mask = gr.Image(label="Image for inpainting with mask", show_label=False, elem_id="img2maskimg", source="upload", interactive=True, type="pil", tool="sketch", image_mode="RGBA").style(height=opts.img2img_editor_height) + init_img_with_mask = gr.Image(label="Image for inpainting with mask", show_label=False, elem_id="img2maskimg", source="upload", interactive=True, type="pil", tool="sketch", image_mode="RGBA", height=opts.img2img_editor_height) add_copy_image_controls('inpaint', init_img_with_mask) with gr.TabItem('Inpaint sketch', id='inpaint_sketch', elem_id="img2img_inpaint_sketch_tab") as tab_inpaint_color: - inpaint_color_sketch = gr.Image(label="Color sketch inpainting", show_label=False, elem_id="inpaint_sketch", source="upload", interactive=True, type="pil", tool="color-sketch", image_mode="RGBA").style(height=opts.img2img_editor_height) + inpaint_color_sketch = gr.Image(label="Color sketch inpainting", show_label=False, elem_id="inpaint_sketch", source="upload", interactive=True, type="pil", tool="color-sketch", image_mode="RGBA", height=opts.img2img_editor_height) inpaint_color_sketch_orig = gr.State(None) add_copy_image_controls('inpaint_sketch', inpaint_color_sketch) @@ -993,7 +993,7 @@ def create_ui(): ui_postprocessing.create_ui() with gr.Blocks(analytics_enabled=False) as pnginfo_interface: - with gr.Row().style(equal_height=False): + with gr.Row(equal_height=False): with gr.Column(variant='panel'): image = gr.Image(elem_id="pnginfo_image", label="Source", source="upload", interactive=True, type="pil") @@ -1018,10 +1018,10 @@ def create_ui(): modelmerger_ui = ui_checkpoint_merger.UiCheckpointMerger() with gr.Blocks(analytics_enabled=False) as train_interface: - with gr.Row().style(equal_height=False): + with gr.Row(equal_height=False): gr.HTML(value="

See wiki for detailed explanation.

") - with gr.Row(variant="compact").style(equal_height=False): + with gr.Row(variant="compact", equal_height=False): with gr.Tabs(elem_id="train_tabs"): with gr.Tab(label="Create embedding", id="create_embedding"): @@ -1181,7 +1181,7 @@ def create_ui(): with gr.Column(elem_id='ti_gallery_container'): ti_output = gr.Text(elem_id="ti_output", value="", show_label=False) - gr.Gallery(label='Output', show_label=False, elem_id='ti_gallery').style(columns=4) + gr.Gallery(label='Output', show_label=False, elem_id='ti_gallery', columns=4) gr.HTML(elem_id="ti_progress", value="") ti_outcome = gr.HTML(elem_id="ti_error", value="") diff --git a/modules/ui_checkpoint_merger.py b/modules/ui_checkpoint_merger.py index 4863d861..f9c5dd6b 100644 --- a/modules/ui_checkpoint_merger.py +++ b/modules/ui_checkpoint_merger.py @@ -29,7 +29,7 @@ def modelmerger(*args): class UiCheckpointMerger: def __init__(self): with gr.Blocks(analytics_enabled=False) as modelmerger_interface: - with gr.Row().style(equal_height=False): + with gr.Row(equal_height=False): with gr.Column(variant='compact'): self.interp_description = gr.HTML(value=update_interp_description("Weighted sum"), elem_id="modelmerger_interp_description") diff --git a/modules/ui_common.py b/modules/ui_common.py index ba75fa73..eefe0c0e 100644 --- a/modules/ui_common.py +++ b/modules/ui_common.py @@ -134,7 +134,7 @@ Requested path was: {f} with gr.Column(variant='panel', elem_id=f"{tabname}_results"): with gr.Group(elem_id=f"{tabname}_gallery_container"): - result_gallery = gr.Gallery(label='Output', show_label=False, elem_id=f"{tabname}_gallery").style(columns=4) + result_gallery = gr.Gallery(label='Output', show_label=False, elem_id=f"{tabname}_gallery", columns=4) generation_info = None with gr.Column(): diff --git a/modules/ui_components.py b/modules/ui_components.py index 64451df7..8f8a7088 100644 --- a/modules/ui_components.py +++ b/modules/ui_components.py @@ -35,7 +35,7 @@ class FormColumn(FormComponent, gr.Column): class FormGroup(FormComponent, gr.Group): - """Same as gr.Row but fits inside gradio forms""" + """Same as gr.Group but fits inside gradio forms""" def get_block_name(self): return "group" diff --git a/modules/ui_extensions.py b/modules/ui_extensions.py index bd28bfcf..15a8b0bf 100644 --- a/modules/ui_extensions.py +++ b/modules/ui_extensions.py @@ -533,8 +533,8 @@ def create_ui(): apply = gr.Button(value=apply_label, variant="primary") check = gr.Button(value="Check for updates") extensions_disable_all = gr.Radio(label="Disable all extensions", choices=["none", "extra", "all"], value=shared.opts.disable_all_extensions, elem_id="extensions_disable_all") - extensions_disabled_list = gr.Text(elem_id="extensions_disabled_list", visible=False).style(container=False) - extensions_update_list = gr.Text(elem_id="extensions_update_list", visible=False).style(container=False) + extensions_disabled_list = gr.Text(elem_id="extensions_disabled_list", visible=False, container=False) + extensions_update_list = gr.Text(elem_id="extensions_update_list", visible=False, container=False) html = "" @@ -569,7 +569,7 @@ def create_ui(): with gr.Row(): refresh_available_extensions_button = gr.Button(value="Load from:", variant="primary") extensions_index_url = os.environ.get('WEBUI_EXTENSIONS_INDEX', "https://raw.githubusercontent.com/AUTOMATIC1111/stable-diffusion-webui-extensions/master/index.json") - available_extensions_index = gr.Text(value=extensions_index_url, label="Extension index URL").style(container=False) + available_extensions_index = gr.Text(value=extensions_index_url, label="Extension index URL", container=False) extension_to_install = gr.Text(elem_id="extension_to_install", visible=False) install_extension_button = gr.Button(elem_id="install_extension_button", visible=False) @@ -578,7 +578,7 @@ def create_ui(): sort_column = gr.Radio(value="newest first", label="Order", choices=["newest first", "oldest first", "a-z", "z-a", "internal order",'update time', 'create time', "stars"], type="index") with gr.Row(): - search_extensions_text = gr.Text(label="Search").style(container=False) + search_extensions_text = gr.Text(label="Search", container=False) install_result = gr.HTML() available_extensions_table = gr.HTML() diff --git a/modules/ui_postprocessing.py b/modules/ui_postprocessing.py index c7dc1154..802e1ce7 100644 --- a/modules/ui_postprocessing.py +++ b/modules/ui_postprocessing.py @@ -6,7 +6,7 @@ import modules.generation_parameters_copypaste as parameters_copypaste def create_ui(): tab_index = gr.State(value=0) - with gr.Row().style(equal_height=False, variant='compact'): + with gr.Row(equal_height=False, variant='compact'): with gr.Column(variant='compact'): with gr.Tabs(elem_id="mode_extras"): with gr.TabItem('Single Image', id="single_image", elem_id="extras_single_tab") as tab_single: diff --git a/requirements.txt b/requirements.txt index b3f8a7f4..afdc6ee2 100644 --- a/requirements.txt +++ b/requirements.txt @@ -7,7 +7,7 @@ blendmodes clean-fid einops gfpgan -gradio==3.32.0 +gradio==3.39.0 inflection jsonmerge kornia diff --git a/requirements_versions.txt b/requirements_versions.txt index d07ab456..82b8732d 100644 --- a/requirements_versions.txt +++ b/requirements_versions.txt @@ -7,7 +7,7 @@ clean-fid==0.1.35 einops==0.4.1 fastapi==0.94.0 gfpgan==1.3.8 -gradio==3.32.0 +gradio==3.39.0 httpcore==0.15 inflection==0.5.1 jsonmerge==1.8.0 diff --git a/style.css b/style.css index cf8470e4..86b4f61e 100644 --- a/style.css +++ b/style.css @@ -8,6 +8,7 @@ --checkbox-label-gap: 0.25em 0.1em; --section-header-text-size: 12pt; --block-background-fill: transparent; + } .block.padded:not(.gradio-accordion) { @@ -42,7 +43,8 @@ div.form{ .block.gradio-radio, .block.gradio-checkboxgroup, .block.gradio-number, -.block.gradio-colorpicker +.block.gradio-colorpicker, +div.gradio-group { border-width: 0 !important; box-shadow: none !important; @@ -133,6 +135,11 @@ a{ cursor: pointer; } +div.styler{ + border: none; + background: var(--background-fill-primary); +} + /* general styled components */ @@ -164,7 +171,7 @@ a{ .checkboxes-row > div{ flex: 0; white-space: nowrap; - min-width: auto; + min-width: auto !important; } button.custom-button{ @@ -388,6 +395,7 @@ div#extras_scale_to_tab div.form{ #quicksettings > div, #quicksettings > fieldset{ max-width: 24em; min-width: 24em; + width: 24em; padding: 0; border: none; box-shadow: none; -- cgit v1.2.1 From 073c0ebba380acbd73be8262feba41212165ff84 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Fri, 4 Aug 2023 08:04:06 +0300 Subject: add gradio version warning --- modules/errors.py | 50 ++++++++++++++++++++++++++++++++++++++++++++++++++ webui.py | 41 +++++++---------------------------------- 2 files changed, 57 insertions(+), 34 deletions(-) diff --git a/modules/errors.py b/modules/errors.py index dffabe45..192cd8ff 100644 --- a/modules/errors.py +++ b/modules/errors.py @@ -84,3 +84,53 @@ def run(code, task): code() except Exception as e: display(task, e) + + +def check_versions(): + from packaging import version + from modules import shared + + import torch + import gradio + + expected_torch_version = "2.0.0" + expected_xformers_version = "0.0.20" + expected_gradio_version = "3.39.0" + + if version.parse(torch.__version__) < version.parse(expected_torch_version): + print_error_explanation(f""" +You are running torch {torch.__version__}. +The program is tested to work with torch {expected_torch_version}. +To reinstall the desired version, run with commandline flag --reinstall-torch. +Beware that this will cause a lot of large files to be downloaded, as well as +there are reports of issues with training tab on the latest version. + +Use --skip-version-check commandline argument to disable this check. + """.strip()) + + if shared.xformers_available: + import xformers + + if version.parse(xformers.__version__) < version.parse(expected_xformers_version): + print_error_explanation(f""" +You are running xformers {xformers.__version__}. +The program is tested to work with xformers {expected_xformers_version}. +To reinstall the desired version, run with commandline flag --reinstall-xformers. + +Use --skip-version-check commandline argument to disable this check. + """.strip()) + + if gradio.__version__ != expected_gradio_version: + print_error_explanation(f""" +You are running gradio {gradio.__version__}. +The program is designed to work with gradio {expected_gradio_version}. +Using a different version of gradio is extremely likely to break the program. + +Reasons why you have the mismatched gradio version can be: + - you use --skip-install flag. + - you use webui.py to start the program instead of launch.py. + - an extension installs the incompatible gradio version. + +Use --skip-version-check commandline argument to disable this check. + """.strip()) + diff --git a/webui.py b/webui.py index 8d84e5a4..1803ea8a 100644 --- a/webui.py +++ b/webui.py @@ -14,7 +14,6 @@ from typing import Iterable from fastapi import FastAPI from fastapi.middleware.cors import CORSMiddleware from fastapi.middleware.gzip import GZipMiddleware -from packaging import version import logging @@ -50,6 +49,7 @@ startup_timer.record("setup paths") import ldm.modules.encoders.modules # noqa: F401 startup_timer.record("import ldm") + from modules import extra_networks from modules.call_queue import wrap_gradio_gpu_call, wrap_queued_call, queue_lock # noqa: F401 @@ -58,9 +58,14 @@ if ".dev" in torch.__version__ or "+git" in torch.__version__: torch.__long_version__ = torch.__version__ torch.__version__ = re.search(r'[\d.]+[\d]', torch.__version__).group(0) +from modules import shared + +if not shared.cmd_opts.skip_version_check: + errors.check_versions() + import modules.codeformer_model as codeformer import modules.gfpgan_model as gfpgan -from modules import shared, sd_samplers, upscaler, extensions, localization, ui_tempdir, ui_extra_networks, config_states +from modules import sd_samplers, upscaler, extensions, localization, ui_tempdir, ui_extra_networks, config_states import modules.face_restoration import modules.img2img @@ -130,37 +135,6 @@ def fix_asyncio_event_loop_policy(): asyncio.set_event_loop_policy(AnyThreadEventLoopPolicy()) -def check_versions(): - if shared.cmd_opts.skip_version_check: - return - - expected_torch_version = "2.0.0" - - if version.parse(torch.__version__) < version.parse(expected_torch_version): - errors.print_error_explanation(f""" -You are running torch {torch.__version__}. -The program is tested to work with torch {expected_torch_version}. -To reinstall the desired version, run with commandline flag --reinstall-torch. -Beware that this will cause a lot of large files to be downloaded, as well as -there are reports of issues with training tab on the latest version. - -Use --skip-version-check commandline argument to disable this check. - """.strip()) - - expected_xformers_version = "0.0.20" - if shared.xformers_available: - import xformers - - if version.parse(xformers.__version__) < version.parse(expected_xformers_version): - errors.print_error_explanation(f""" -You are running xformers {xformers.__version__}. -The program is tested to work with xformers {expected_xformers_version}. -To reinstall the desired version, run with commandline flag --reinstall-xformers. - -Use --skip-version-check commandline argument to disable this check. - """.strip()) - - def restore_config_state_file(): config_state_file = shared.opts.restore_config_state_file if config_state_file == "": @@ -248,7 +222,6 @@ def initialize(): fix_asyncio_event_loop_policy() validate_tls_options() configure_sigint_handler() - check_versions() modelloader.cleanup_models() configure_opts_onchange() -- cgit v1.2.1 From 75336dfc84cae280036bc52a6805eb10d9ae30ba Mon Sep 17 00:00:00 2001 From: Kohaku-Blueleaf <59680068+KohakuBlueleaf@users.noreply.github.com> Date: Fri, 4 Aug 2023 13:38:52 +0800 Subject: add TAESD for i2i and t2i --- modules/processing.py | 13 +++++------ modules/sd_samplers_common.py | 38 ++++++++++++++++++++++++++----- modules/sd_vae_approx.py | 2 +- modules/sd_vae_taesd.py | 52 ++++++++++++++++++++++++++++++++++++------- modules/shared.py | 2 ++ 5 files changed, 86 insertions(+), 21 deletions(-) diff --git a/modules/processing.py b/modules/processing.py index 8f34c8b4..099d86b7 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -573,9 +573,10 @@ def decode_latent_batch(model, batch, target_device=None, check_for_nans=False): def decode_first_stage(model, x): - x = model.decode_first_stage(x.to(devices.dtype_vae)) - - return x + from modules.sd_samplers_common import samples_to_images_tensor, approximation_indexes + x = x.to(devices.dtype_vae) + approx_index = approximation_indexes.get(opts.sd_vae_decode_method, 0) + return samples_to_images_tensor(x, approx_index, model) def get_fixed_seed(seed): @@ -1344,10 +1345,8 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing): raise RuntimeError(f"bad number of images passed: {len(imgs)}; expecting {self.batch_size} or less") image = torch.from_numpy(batch_images) - image = 2. * image - 1. - image = image.to(shared.device, dtype=devices.dtype_vae) - - self.init_latent = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(image)) + from modules.sd_samplers_common import images_tensor_to_samples, approximation_indexes + self.init_latent = images_tensor_to_samples(image, approximation_indexes.get(opts.sd_vae_encode_method), self.sd_model) devices.torch_gc() if self.resize_mode == 3: diff --git a/modules/sd_samplers_common.py b/modules/sd_samplers_common.py index 5deda761..5a45e8eb 100644 --- a/modules/sd_samplers_common.py +++ b/modules/sd_samplers_common.py @@ -23,19 +23,29 @@ def setup_img2img_steps(p, steps=None): approximation_indexes = {"Full": 0, "Approx NN": 1, "Approx cheap": 2, "TAESD": 3} -def single_sample_to_image(sample, approximation=None): +def samples_to_images_tensor(sample, approximation=None, model=None): + '''latents -> images [-1, 1]''' if approximation is None: approximation = approximation_indexes.get(opts.show_progress_type, 0) if approximation == 2: - x_sample = sd_vae_approx.cheap_approximation(sample) * 0.5 + 0.5 + x_sample = sd_vae_approx.cheap_approximation(sample) elif approximation == 1: - x_sample = sd_vae_approx.model()(sample.to(devices.device, devices.dtype).unsqueeze(0))[0].detach() * 0.5 + 0.5 + x_sample = sd_vae_approx.model()(sample.to(devices.device, devices.dtype)).detach() elif approximation == 3: x_sample = sample * 1.5 - x_sample = sd_vae_taesd.model()(x_sample.to(devices.device, devices.dtype).unsqueeze(0))[0].detach() + x_sample = sd_vae_taesd.decoder_model()(x_sample.to(devices.device, devices.dtype)).detach() + x_sample = x_sample * 2 - 1 else: - x_sample = processing.decode_first_stage(shared.sd_model, sample.unsqueeze(0))[0] * 0.5 + 0.5 + if model is None: + model = shared.sd_model + x_sample = model.decode_first_stage(sample) + + return x_sample + + +def single_sample_to_image(sample, approximation=None): + x_sample = samples_to_images_tensor(sample.unsqueeze(0), approximation)[0] * 0.5 + 0.5 x_sample = torch.clamp(x_sample, min=0.0, max=1.0) x_sample = 255. * np.moveaxis(x_sample.cpu().numpy(), 0, 2) @@ -52,6 +62,24 @@ def samples_to_image_grid(samples, approximation=None): return images.image_grid([single_sample_to_image(sample, approximation) for sample in samples]) +def images_tensor_to_samples(image, approximation=None, model=None): + '''image[0, 1] -> latent''' + if approximation is None: + approximation = approximation_indexes.get(opts.sd_vae_encode_method, 0) + + if approximation == 3: + image = image.to(devices.device, devices.dtype) + x_latent = sd_vae_taesd.encoder_model()(image) / 1.5 + else: + if model is None: + model = shared.sd_model + image = image.to(shared.device, dtype=devices.dtype_vae) + image = image * 2 - 1 + x_latent = model.get_first_stage_encoding(model.encode_first_stage(image)) + + return x_latent + + def store_latent(decoded): state.current_latent = decoded diff --git a/modules/sd_vae_approx.py b/modules/sd_vae_approx.py index 86bd658a..3965e223 100644 --- a/modules/sd_vae_approx.py +++ b/modules/sd_vae_approx.py @@ -81,6 +81,6 @@ def cheap_approximation(sample): coefs = torch.tensor(coeffs).to(sample.device) - x_sample = torch.einsum("lxy,lr -> rxy", sample, coefs) + x_sample = torch.einsum("...lxy,lr -> ...rxy", sample, coefs) return x_sample diff --git a/modules/sd_vae_taesd.py b/modules/sd_vae_taesd.py index 5bf7c76e..808eb362 100644 --- a/modules/sd_vae_taesd.py +++ b/modules/sd_vae_taesd.py @@ -44,7 +44,17 @@ def decoder(): ) -class TAESD(nn.Module): +def encoder(): + return nn.Sequential( + conv(3, 64), Block(64, 64), + conv(64, 64, stride=2, bias=False), Block(64, 64), Block(64, 64), Block(64, 64), + conv(64, 64, stride=2, bias=False), Block(64, 64), Block(64, 64), Block(64, 64), + conv(64, 64, stride=2, bias=False), Block(64, 64), Block(64, 64), Block(64, 64), + conv(64, 4), + ) + + +class TAESDDecoder(nn.Module): latent_magnitude = 3 latent_shift = 0.5 @@ -55,21 +65,28 @@ class TAESD(nn.Module): self.decoder.load_state_dict( torch.load(decoder_path, map_location='cpu' if devices.device.type != 'cuda' else None)) - @staticmethod - def unscale_latents(x): - """[0, 1] -> raw latents""" - return x.sub(TAESD.latent_shift).mul(2 * TAESD.latent_magnitude) + +class TAESDEncoder(nn.Module): + latent_magnitude = 3 + latent_shift = 0.5 + + def __init__(self, encoder_path="taesd_encoder.pth"): + """Initialize pretrained TAESD on the given device from the given checkpoints.""" + super().__init__() + self.encoder = encoder() + self.encoder.load_state_dict( + torch.load(encoder_path, map_location='cpu' if devices.device.type != 'cuda' else None)) def download_model(model_path, model_url): if not os.path.exists(model_path): os.makedirs(os.path.dirname(model_path), exist_ok=True) - print(f'Downloading TAESD decoder to: {model_path}') + print(f'Downloading TAESD model to: {model_path}') torch.hub.download_url_to_file(model_url, model_path) -def model(): +def decoder_model(): model_name = "taesdxl_decoder.pth" if getattr(shared.sd_model, 'is_sdxl', False) else "taesd_decoder.pth" loaded_model = sd_vae_taesd_models.get(model_name) @@ -78,7 +95,7 @@ def model(): download_model(model_path, 'https://github.com/madebyollin/taesd/raw/main/' + model_name) if os.path.exists(model_path): - loaded_model = TAESD(model_path) + loaded_model = TAESDDecoder(model_path) loaded_model.eval() loaded_model.to(devices.device, devices.dtype) sd_vae_taesd_models[model_name] = loaded_model @@ -86,3 +103,22 @@ def model(): raise FileNotFoundError('TAESD model not found') return loaded_model.decoder + + +def encoder_model(): + model_name = "taesdxl_encoder.pth" if getattr(shared.sd_model, 'is_sdxl', False) else "taesd_encoder.pth" + loaded_model = sd_vae_taesd_models.get(model_name) + + if loaded_model is None: + model_path = os.path.join(paths_internal.models_path, "VAE-taesd", model_name) + download_model(model_path, 'https://github.com/madebyollin/taesd/raw/main/' + model_name) + + if os.path.exists(model_path): + loaded_model = TAESDEncoder(model_path) + loaded_model.eval() + loaded_model.to(devices.device, devices.dtype) + sd_vae_taesd_models[model_name] = loaded_model + else: + raise FileNotFoundError('TAESD model not found') + + return loaded_model.encoder diff --git a/modules/shared.py b/modules/shared.py index cec030f7..61ba9347 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -430,6 +430,8 @@ options_templates.update(options_section(('sd', "Stable Diffusion"), { "upcast_attn": OptionInfo(False, "Upcast cross attention layer to float32"), "auto_vae_precision": OptionInfo(True, "Automaticlly revert VAE to 32-bit floats").info("triggers when a tensor with NaNs is produced in VAE; disabling the option in this case will result in a black square image"), "randn_source": OptionInfo("GPU", "Random number generator source.", gr.Radio, {"choices": ["GPU", "CPU", "NV"]}).info("changes seeds drastically; use CPU to produce the same picture across different videocard vendors; use NV to produce same picture as on NVidia videocards"), + "sd_vae_encode_method": OptionInfo("Full", "VAE type for encode", gr.Radio, {"choices": ["Full", "TAESD"]}).info("method to encode image to latent (use in img2img or inpaint mask)"), + "sd_vae_decode_method": OptionInfo("Full", "VAE type for decode", gr.Radio, {"choices": ["Full", "TAESD"]}).info("method to decode latent to image"), })) options_templates.update(options_section(('sdxl', "Stable Diffusion XL"), { -- cgit v1.2.1 From c134a480164bef017cd4b33fae57a31a86556beb Mon Sep 17 00:00:00 2001 From: Kohaku-Blueleaf <59680068+KohakuBlueleaf@users.noreply.github.com> Date: Fri, 4 Aug 2023 13:40:20 +0800 Subject: Fix code style --- modules/sd_samplers_common.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/sd_samplers_common.py b/modules/sd_samplers_common.py index 5a45e8eb..d444cac1 100644 --- a/modules/sd_samplers_common.py +++ b/modules/sd_samplers_common.py @@ -2,7 +2,7 @@ from collections import namedtuple import numpy as np import torch from PIL import Image -from modules import devices, processing, images, sd_vae_approx, sd_samplers, sd_vae_taesd, shared +from modules import devices, images, sd_vae_approx, sd_samplers, sd_vae_taesd, shared from modules.shared import opts, state SamplerData = namedtuple('SamplerData', ['name', 'constructor', 'aliases', 'options']) @@ -40,7 +40,7 @@ def samples_to_images_tensor(sample, approximation=None, model=None): if model is None: model = shared.sd_model x_sample = model.decode_first_stage(sample) - + return x_sample -- cgit v1.2.1 From f0c1063a707a4a43823b0ed00e2a8eeb22a9ed0a Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Fri, 4 Aug 2023 09:09:09 +0300 Subject: resolve some of circular import issues for kohaku --- modules/hypernetworks/hypernetwork.py | 5 ++--- modules/processing.py | 7 +------ modules/sd_hijack.py | 6 +++--- modules/sd_samplers_common.py | 10 ++++++++-- modules/textual_inversion/textual_inversion.py | 4 +++- 5 files changed, 17 insertions(+), 15 deletions(-) diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py index c4821d21..70f1cbd2 100644 --- a/modules/hypernetworks/hypernetwork.py +++ b/modules/hypernetworks/hypernetwork.py @@ -10,7 +10,7 @@ import torch import tqdm from einops import rearrange, repeat from ldm.util import default -from modules import devices, processing, sd_models, shared, sd_samplers, hashes, sd_hijack_checkpoint, errors +from modules import devices, sd_models, shared, sd_samplers, hashes, sd_hijack_checkpoint, errors from modules.textual_inversion import textual_inversion, logging from modules.textual_inversion.learn_schedule import LearnRateScheduler from torch import einsum @@ -469,8 +469,7 @@ def create_hypernetwork(name, enable_sizes, overwrite_old, layer_structure=None, def train_hypernetwork(id_task, hypernetwork_name, learn_rate, batch_size, gradient_step, data_root, log_directory, training_width, training_height, varsize, steps, clip_grad_mode, clip_grad_value, shuffle_tags, tag_drop_out, latent_sampling_method, use_weight, create_image_every, save_hypernetwork_every, template_filename, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height): - # images allows training previews to have infotext. Importing it at the top causes a circular import problem. - from modules import images + from modules import images, processing save_hypernetwork_every = save_hypernetwork_every or 0 create_image_every = create_image_every or 0 diff --git a/modules/processing.py b/modules/processing.py index 8f34c8b4..8086a2b0 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -30,6 +30,7 @@ from ldm.models.diffusion.ddpm import LatentDepth2ImageDiffusion from einops import repeat, rearrange from blendmodes.blend import blendLayers, BlendType +decode_first_stage = sd_samplers_common.decode_first_stage # some of those options should not be changed at all because they would break the model, so I removed them from options. opt_C = 4 @@ -572,12 +573,6 @@ def decode_latent_batch(model, batch, target_device=None, check_for_nans=False): return samples -def decode_first_stage(model, x): - x = model.decode_first_stage(x.to(devices.dtype_vae)) - - return x - - def get_fixed_seed(seed): if seed is None or seed == '' or seed == -1: return int(random.randrange(4294967294)) diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py index cfa5f0eb..609fd56c 100644 --- a/modules/sd_hijack.py +++ b/modules/sd_hijack.py @@ -2,7 +2,6 @@ import torch from torch.nn.functional import silu from types import MethodType -import modules.textual_inversion.textual_inversion from modules import devices, sd_hijack_optimizations, shared, script_callbacks, errors, sd_unet from modules.hypernetworks import hypernetwork from modules.shared import cmd_opts @@ -164,12 +163,13 @@ class StableDiffusionModelHijack: clip = None optimization_method = None - embedding_db = modules.textual_inversion.textual_inversion.EmbeddingDatabase() - def __init__(self): + import modules.textual_inversion.textual_inversion + self.extra_generation_params = {} self.comments = [] + self.embedding_db = modules.textual_inversion.textual_inversion.EmbeddingDatabase() self.embedding_db.add_embedding_dir(cmd_opts.embeddings_dir) def apply_optimizations(self, option=None): diff --git a/modules/sd_samplers_common.py b/modules/sd_samplers_common.py index 5deda761..b3d344e7 100644 --- a/modules/sd_samplers_common.py +++ b/modules/sd_samplers_common.py @@ -2,7 +2,7 @@ from collections import namedtuple import numpy as np import torch from PIL import Image -from modules import devices, processing, images, sd_vae_approx, sd_samplers, sd_vae_taesd, shared +from modules import devices, images, sd_vae_approx, sd_samplers, sd_vae_taesd, shared from modules.shared import opts, state SamplerData = namedtuple('SamplerData', ['name', 'constructor', 'aliases', 'options']) @@ -35,7 +35,7 @@ def single_sample_to_image(sample, approximation=None): x_sample = sample * 1.5 x_sample = sd_vae_taesd.model()(x_sample.to(devices.device, devices.dtype).unsqueeze(0))[0].detach() else: - x_sample = processing.decode_first_stage(shared.sd_model, sample.unsqueeze(0))[0] * 0.5 + 0.5 + x_sample = decode_first_stage(shared.sd_model, sample.unsqueeze(0))[0] * 0.5 + 0.5 x_sample = torch.clamp(x_sample, min=0.0, max=1.0) x_sample = 255. * np.moveaxis(x_sample.cpu().numpy(), 0, 2) @@ -44,6 +44,12 @@ def single_sample_to_image(sample, approximation=None): return Image.fromarray(x_sample) +def decode_first_stage(model, x): + x = model.decode_first_stage(x.to(devices.dtype_vae)) + + return x + + def sample_to_image(samples, index=0, approximation=None): return single_sample_to_image(samples[index], approximation) diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index 4713bc2d..aa79dc09 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -13,7 +13,7 @@ import numpy as np from PIL import Image, PngImagePlugin from torch.utils.tensorboard import SummaryWriter -from modules import shared, devices, sd_hijack, processing, sd_models, images, sd_samplers, sd_hijack_checkpoint, errors, hashes +from modules import shared, devices, sd_hijack, sd_models, images, sd_samplers, sd_hijack_checkpoint, errors, hashes import modules.textual_inversion.dataset from modules.textual_inversion.learn_schedule import LearnRateScheduler @@ -387,6 +387,8 @@ def validate_train_inputs(model_name, learn_rate, batch_size, gradient_step, dat def train_embedding(id_task, embedding_name, learn_rate, batch_size, gradient_step, data_root, log_directory, training_width, training_height, varsize, steps, clip_grad_mode, clip_grad_value, shuffle_tags, tag_drop_out, latent_sampling_method, use_weight, create_image_every, save_embedding_every, template_filename, save_image_with_stored_embedding, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height): + from modules import processing + save_embedding_every = save_embedding_every or 0 create_image_every = create_image_every or 0 template_file = textual_inversion_templates.get(template_filename, None) -- cgit v1.2.1 From 1f6bfdea80f58f292aeebb9a001689a118d71c01 Mon Sep 17 00:00:00 2001 From: Kohaku-Blueleaf <59680068+KohakuBlueleaf@users.noreply.github.com> Date: Fri, 4 Aug 2023 14:38:52 +0800 Subject: move the modified decode into smapler_common --- modules/sd_samplers_common.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/modules/sd_samplers_common.py b/modules/sd_samplers_common.py index 2cfa4ac6..7269514f 100644 --- a/modules/sd_samplers_common.py +++ b/modules/sd_samplers_common.py @@ -55,9 +55,9 @@ def single_sample_to_image(sample, approximation=None): def decode_first_stage(model, x): - x = model.decode_first_stage(x.to(devices.dtype_vae)) - - return x + x = x.to(devices.dtype_vae) + approx_index = approximation_indexes.get(opts.sd_vae_decode_method, 0) + return samples_to_images_tensor(x, approx_index, model) def sample_to_image(samples, index=0, approximation=None): -- cgit v1.2.1 From ac8dfd9386785127c2a71ee2c1ae4f950a46f4fd Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Fri, 4 Aug 2023 03:52:22 -0400 Subject: Toggle extras checkbox for infotext paste --- modules/ui.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/modules/ui.py b/modules/ui.py index 822a7660..fde79a8a 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -603,6 +603,7 @@ def create_ui(): (hr_prompt, "Hires prompt"), (hr_negative_prompt, "Hires negative prompt"), (hr_prompts_container, lambda d: gr.update(visible=True) if d.get("Hires prompt", "") != "" or d.get("Hires negative prompt", "") != "" else gr.update()), + (seed_checkbox, lambda d: any(x in ["Variation seed", "Variation seed strength", "Seed resize from-1", "Seed resize from-2"] for x in d)), *scripts.scripts_txt2img.infotext_fields ] parameters_copypaste.add_paste_fields("txt2img", None, txt2img_paste_fields, override_settings) @@ -979,6 +980,7 @@ def create_ui(): (toprow.ui_styles.dropdown, lambda d: d["Styles array"] if isinstance(d.get("Styles array"), list) else gr.update()), (denoising_strength, "Denoising strength"), (mask_blur, "Mask blur"), + (seed_checkbox, lambda d: any(x in ["Variation seed", "Variation seed strength", "Seed resize from-1", "Seed resize from-2"] for x in d)), *scripts.scripts_img2img.infotext_fields ] parameters_copypaste.add_paste_fields("img2img", init_img, img2img_paste_fields, override_settings) -- cgit v1.2.1 From d89a915b74cd999e13e559d6a702c7da9404db5e Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Fri, 4 Aug 2023 04:03:37 -0400 Subject: Only enable hr fix if hr scale or upscale in infotext on paste --- modules/ui.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/ui.py b/modules/ui.py index 822a7660..4628bc89 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -591,8 +591,8 @@ def create_ui(): (seed_resize_from_h, "Seed resize from-2"), (toprow.ui_styles.dropdown, lambda d: d["Styles array"] if isinstance(d.get("Styles array"), list) else gr.update()), (denoising_strength, "Denoising strength"), - (enable_hr, lambda d: "Denoising strength" in d), - (hr_options, lambda d: gr.Row.update(visible="Denoising strength" in d)), + (enable_hr, lambda d: "Denoising strength" in d and any(x in ["Hires upscale", "Hires upscaler"] for x in d)), + (hr_options, lambda d: gr.Row.update(visible="Denoising strength" in d and any(x in ["Hires upscale", "Hires upscaler"] for x in d))), (hr_scale, "Hires upscale"), (hr_upscaler, "Hires upscaler"), (hr_second_pass_steps, "Hires steps"), -- cgit v1.2.1 From 82b415c9c141d8616e1e9ccb55e47a1884d652ba Mon Sep 17 00:00:00 2001 From: daxijiu <127850313+daxijiu@users.noreply.github.com> Date: Fri, 4 Aug 2023 16:03:49 +0800 Subject: fix some content are ignore by localization in setting "Face restoration model" and "Select which Real-ESRGAN models" and in extras "upscaler 1 & 2" are ignore by localization --- javascript/localization.js | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/javascript/localization.js b/javascript/localization.js index eb22b8a7..0c9032f9 100644 --- a/javascript/localization.js +++ b/javascript/localization.js @@ -11,11 +11,11 @@ var ignore_ids_for_localization = { train_hypernetwork: 'OPTION', txt2img_styles: 'OPTION', img2img_styles: 'OPTION', - setting_random_artist_categories: 'SPAN', - setting_face_restoration_model: 'SPAN', - setting_realesrgan_enabled_models: 'SPAN', - extras_upscaler_1: 'SPAN', - extras_upscaler_2: 'SPAN', + setting_random_artist_categories: 'OPTION', + setting_face_restoration_model: 'OPTION', + setting_realesrgan_enabled_models: 'OPTION', + extras_upscaler_1: 'OPTION', + extras_upscaler_2: 'OPTION', }; var re_num = /^[.\d]+$/; -- cgit v1.2.1 From 67312653d766fbbebaecae87b26d5e49789970d3 Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Fri, 4 Aug 2023 04:40:56 -0400 Subject: Cleanup hr infotext paste check --- modules/ui.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/ui.py b/modules/ui.py index 4628bc89..cc298031 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -592,7 +592,7 @@ def create_ui(): (toprow.ui_styles.dropdown, lambda d: d["Styles array"] if isinstance(d.get("Styles array"), list) else gr.update()), (denoising_strength, "Denoising strength"), (enable_hr, lambda d: "Denoising strength" in d and any(x in ["Hires upscale", "Hires upscaler"] for x in d)), - (hr_options, lambda d: gr.Row.update(visible="Denoising strength" in d and any(x in ["Hires upscale", "Hires upscaler"] for x in d))), + (hr_options, lambda d: gr.Row.update(visible="Denoising strength" in d and ("Hires upscale" in d or "Hires upscaler" in d or "Hires resize-1" in d))), (hr_scale, "Hires upscale"), (hr_upscaler, "Hires upscaler"), (hr_second_pass_steps, "Hires steps"), -- cgit v1.2.1 From 7c5480eb969a421786089c6f8bc664a7444e75ee Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Fri, 4 Aug 2023 04:42:35 -0400 Subject: Cleanup hr infotext paste check mk2 --- modules/ui.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/ui.py b/modules/ui.py index cc298031..8b38d213 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -591,7 +591,7 @@ def create_ui(): (seed_resize_from_h, "Seed resize from-2"), (toprow.ui_styles.dropdown, lambda d: d["Styles array"] if isinstance(d.get("Styles array"), list) else gr.update()), (denoising_strength, "Denoising strength"), - (enable_hr, lambda d: "Denoising strength" in d and any(x in ["Hires upscale", "Hires upscaler"] for x in d)), + (enable_hr, lambda d: "Denoising strength" in d and ("Hires upscale" in d or "Hires upscaler" in d or "Hires resize-1" in d)), (hr_options, lambda d: gr.Row.update(visible="Denoising strength" in d and ("Hires upscale" in d or "Hires upscaler" in d or "Hires resize-1" in d))), (hr_scale, "Hires upscale"), (hr_upscaler, "Hires upscaler"), -- cgit v1.2.1 From f5994e84a213e40f9f6f0eb24df1d4fe38a45a2e Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Fri, 4 Aug 2023 04:57:01 -0400 Subject: Cleanup extras checkbox infotext paste check --- modules/ui.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/ui.py b/modules/ui.py index fde79a8a..2f3f74b5 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -585,6 +585,7 @@ def create_ui(): (width, "Size-1"), (height, "Size-2"), (batch_size, "Batch size"), + (seed_checkbox, lambda d: "Variation seed" in d or "Seed resize from-1" in d), (subseed, "Variation seed"), (subseed_strength, "Variation seed strength"), (seed_resize_from_w, "Seed resize from-1"), @@ -603,7 +604,6 @@ def create_ui(): (hr_prompt, "Hires prompt"), (hr_negative_prompt, "Hires negative prompt"), (hr_prompts_container, lambda d: gr.update(visible=True) if d.get("Hires prompt", "") != "" or d.get("Hires negative prompt", "") != "" else gr.update()), - (seed_checkbox, lambda d: any(x in ["Variation seed", "Variation seed strength", "Seed resize from-1", "Seed resize from-2"] for x in d)), *scripts.scripts_txt2img.infotext_fields ] parameters_copypaste.add_paste_fields("txt2img", None, txt2img_paste_fields, override_settings) @@ -973,6 +973,7 @@ def create_ui(): (width, "Size-1"), (height, "Size-2"), (batch_size, "Batch size"), + (seed_checkbox, lambda d: "Variation seed" in d or "Seed resize from-1" in d), (subseed, "Variation seed"), (subseed_strength, "Variation seed strength"), (seed_resize_from_w, "Seed resize from-1"), @@ -980,7 +981,6 @@ def create_ui(): (toprow.ui_styles.dropdown, lambda d: d["Styles array"] if isinstance(d.get("Styles array"), list) else gr.update()), (denoising_strength, "Denoising strength"), (mask_blur, "Mask blur"), - (seed_checkbox, lambda d: any(x in ["Variation seed", "Variation seed strength", "Seed resize from-1", "Seed resize from-2"] for x in d)), *scripts.scripts_img2img.infotext_fields ] parameters_copypaste.add_paste_fields("img2img", init_img, img2img_paste_fields, override_settings) -- cgit v1.2.1 From df9fd1d3ae5e01a9de67ba4aa34e96e9f789704a Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Fri, 4 Aug 2023 05:31:38 -0400 Subject: Fix inpaint mask for Gradio 3.39.0 --- modules/img2img.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/modules/img2img.py b/modules/img2img.py index 68e415ef..4ae2ba72 100644 --- a/modules/img2img.py +++ b/modules/img2img.py @@ -129,9 +129,7 @@ def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_s mask = None elif mode == 2: # inpaint image, mask = init_img_with_mask["image"], init_img_with_mask["mask"] - alpha_mask = ImageOps.invert(image.split()[-1]).convert('L').point(lambda x: 255 if x > 0 else 0, mode='1') - mask = mask.convert('L').point(lambda x: 255 if x > 128 else 0, mode='1') - mask = ImageChops.lighter(alpha_mask, mask).convert('L') + mask = mask.convert('RGBA').split()[3].convert('L').point(lambda x: 255 if x > 0 else 0) image = image.convert("RGB") elif mode == 3: # inpaint sketch image = inpaint_color_sketch -- cgit v1.2.1 From e219211ff6ffcdb4094334dbc4bff9a2d33af55c Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Fri, 4 Aug 2023 05:35:47 -0400 Subject: Remove unused import in img2img --- modules/img2img.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/img2img.py b/modules/img2img.py index 4ae2ba72..ed21e82c 100644 --- a/modules/img2img.py +++ b/modules/img2img.py @@ -3,7 +3,7 @@ from contextlib import closing from pathlib import Path import numpy as np -from PIL import Image, ImageOps, ImageFilter, ImageEnhance, ImageChops, UnidentifiedImageError +from PIL import Image, ImageOps, ImageFilter, ImageEnhance, UnidentifiedImageError import gradio as gr from modules import sd_samplers, images as imgutil -- cgit v1.2.1 From 2dc2bc4ab53837e326a9c70ae250031ff6e8c929 Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Fri, 4 Aug 2023 05:40:13 -0400 Subject: Fix string quotes --- modules/img2img.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/img2img.py b/modules/img2img.py index ed21e82c..b50678a6 100644 --- a/modules/img2img.py +++ b/modules/img2img.py @@ -129,7 +129,7 @@ def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_s mask = None elif mode == 2: # inpaint image, mask = init_img_with_mask["image"], init_img_with_mask["mask"] - mask = mask.convert('RGBA').split()[3].convert('L').point(lambda x: 255 if x > 0 else 0) + mask = mask.convert("RGBA").split()[3].convert("L").point(lambda x: 255 if x > 0 else 0) image = image.convert("RGB") elif mode == 3: # inpaint sketch image = inpaint_color_sketch -- cgit v1.2.1 From cd4e053e5e73b90a129d5ebe5a0334a07765598f Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Fri, 4 Aug 2023 05:43:53 -0400 Subject: Simply img2img mask conversion, fix threshold --- modules/img2img.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/img2img.py b/modules/img2img.py index b50678a6..85d92064 100644 --- a/modules/img2img.py +++ b/modules/img2img.py @@ -129,7 +129,7 @@ def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_s mask = None elif mode == 2: # inpaint image, mask = init_img_with_mask["image"], init_img_with_mask["mask"] - mask = mask.convert("RGBA").split()[3].convert("L").point(lambda x: 255 if x > 0 else 0) + mask = mask.split()[-1].convert('L').point(lambda x: 255 if x > 128 else 0) image = image.convert("RGB") elif mode == 3: # inpaint sketch image = inpaint_color_sketch -- cgit v1.2.1 From 99f5f8e76b31c86d3091b92414a1586c29508086 Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Fri, 4 Aug 2023 05:47:25 -0400 Subject: Fix string quotes --- modules/img2img.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/img2img.py b/modules/img2img.py index 85d92064..d8e1c534 100644 --- a/modules/img2img.py +++ b/modules/img2img.py @@ -129,7 +129,7 @@ def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_s mask = None elif mode == 2: # inpaint image, mask = init_img_with_mask["image"], init_img_with_mask["mask"] - mask = mask.split()[-1].convert('L').point(lambda x: 255 if x > 128 else 0) + mask = mask.split()[-1].convert("L").point(lambda x: 255 if x > 128 else 0) image = image.convert("RGB") elif mode == 3: # inpaint sketch image = inpaint_color_sketch -- cgit v1.2.1 From 094c416a801b16c7d8e1944e2e9fae2c9e98bf12 Mon Sep 17 00:00:00 2001 From: Kohaku-Blueleaf <59680068+KohakuBlueleaf@users.noreply.github.com> Date: Fri, 4 Aug 2023 17:53:16 +0800 Subject: change all encode --- modules/processing.py | 14 ++++++-------- modules/sd_samplers_common.py | 2 +- run.ps1 | 1 + run_local.ps1 | 3 +++ update.ps1 | 1 + 5 files changed, 12 insertions(+), 9 deletions(-) create mode 100644 run.ps1 create mode 100644 run_local.ps1 create mode 100644 update.ps1 diff --git a/modules/processing.py b/modules/processing.py index aae39866..544667a4 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -16,6 +16,7 @@ from typing import Any, Dict, List import modules.sd_hijack from modules import devices, prompt_parser, masking, sd_samplers, lowvram, generation_parameters_copypaste, extra_networks, sd_vae_approx, scripts, sd_samplers_common, sd_unet, errors from modules.sd_hijack import model_hijack +from modules.sd_samplers_common import images_tensor_to_samples, decode_first_stage, approximation_indexes from modules.shared import opts, cmd_opts, state import modules.shared as shared import modules.paths as paths @@ -30,7 +31,6 @@ from ldm.models.diffusion.ddpm import LatentDepth2ImageDiffusion from einops import repeat, rearrange from blendmodes.blend import blendLayers, BlendType -decode_first_stage = sd_samplers_common.decode_first_stage # some of those options should not be changed at all because they would break the model, so I removed them from options. opt_C = 4 @@ -84,7 +84,7 @@ def txt2img_image_conditioning(sd_model, x, width, height): # The "masked-image" in this case will just be all zeros since the entire image is masked. image_conditioning = torch.zeros(x.shape[0], 3, height, width, device=x.device) - image_conditioning = sd_model.get_first_stage_encoding(sd_model.encode_first_stage(image_conditioning)) + image_conditioning = images_tensor_to_samples(image_conditioning, approximation_indexes.get(opts.sd_vae_encode_method)) # Add the fake full 1s mask to the first dimension. image_conditioning = torch.nn.functional.pad(image_conditioning, (0, 0, 0, 0, 1, 0), value=1.0) @@ -203,7 +203,7 @@ class StableDiffusionProcessing: midas_in = torch.from_numpy(transformed["midas_in"][None, ...]).to(device=shared.device) midas_in = repeat(midas_in, "1 ... -> n ...", n=self.batch_size) - conditioning_image = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(source_image)) + conditioning_image = images_tensor_to_samples(source_image*0.5+0.5, approximation_indexes.get(opts.sd_vae_encode_method)) conditioning = torch.nn.functional.interpolate( self.sd_model.depth_model(midas_in), size=conditioning_image.shape[2:], @@ -216,7 +216,7 @@ class StableDiffusionProcessing: return conditioning def edit_image_conditioning(self, source_image): - conditioning_image = self.sd_model.encode_first_stage(source_image).mode() + conditioning_image = images_tensor_to_samples(source_image*0.5+0.5, approximation_indexes.get(opts.sd_vae_encode_method)) return conditioning_image @@ -255,7 +255,7 @@ class StableDiffusionProcessing: ) # Encode the new masked image using first stage of network. - conditioning_image = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(conditioning_image)) + conditioning_image = images_tensor_to_samples(conditioning_image*0.5+0.5, approximation_indexes.get(opts.sd_vae_encode_method)) # Create the concatenated conditioning tensor to be fed to `c_concat` conditioning_mask = torch.nn.functional.interpolate(conditioning_mask, size=latent_image.shape[-2:]) @@ -1099,9 +1099,8 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): decoded_samples = torch.from_numpy(np.array(batch_images)) decoded_samples = decoded_samples.to(shared.device) - decoded_samples = 2. * decoded_samples - 1. - samples = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(decoded_samples)) + samples = images_tensor_to_samples(decoded_samples, approximation_indexes.get(opts.sd_vae_encode_method)) image_conditioning = self.img2img_image_conditioning(decoded_samples, samples) @@ -1339,7 +1338,6 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing): raise RuntimeError(f"bad number of images passed: {len(imgs)}; expecting {self.batch_size} or less") image = torch.from_numpy(batch_images) - from modules.sd_samplers_common import images_tensor_to_samples, approximation_indexes self.init_latent = images_tensor_to_samples(image, approximation_indexes.get(opts.sd_vae_encode_method), self.sd_model) devices.torch_gc() diff --git a/modules/sd_samplers_common.py b/modules/sd_samplers_common.py index 7269514f..42a29fc9 100644 --- a/modules/sd_samplers_common.py +++ b/modules/sd_samplers_common.py @@ -75,7 +75,7 @@ def images_tensor_to_samples(image, approximation=None, model=None): if approximation == 3: image = image.to(devices.device, devices.dtype) - x_latent = sd_vae_taesd.encoder_model()(image) / 1.5 + x_latent = sd_vae_taesd.encoder_model()(image) else: if model is None: model = shared.sd_model diff --git a/run.ps1 b/run.ps1 new file mode 100644 index 00000000..82c1660b --- /dev/null +++ b/run.ps1 @@ -0,0 +1 @@ +.\venv\Scripts\accelerate-launch.exe --num_cpu_threads_per_process=6 --api .\launch.py --listen --port 17415 --xformers --opt-channelslast \ No newline at end of file diff --git a/run_local.ps1 b/run_local.ps1 new file mode 100644 index 00000000..e2ac43db --- /dev/null +++ b/run_local.ps1 @@ -0,0 +1,3 @@ +.\venv\Scripts\Activate.ps1 +python .\launch.py --xformers --opt-channelslast --api +. $PSCommandPath \ No newline at end of file diff --git a/update.ps1 b/update.ps1 new file mode 100644 index 00000000..9960bead --- /dev/null +++ b/update.ps1 @@ -0,0 +1 @@ +git stash push && git pull --rebase && git stash pop \ No newline at end of file -- cgit v1.2.1 From 6346d8eeaa17ba0f7e41618908519f6e9bfe07e0 Mon Sep 17 00:00:00 2001 From: Kohaku-Blueleaf <59680068+KohakuBlueleaf@users.noreply.github.com> Date: Fri, 4 Aug 2023 17:53:30 +0800 Subject: Revert "change all encode" This reverts commit 094c416a801b16c7d8e1944e2e9fae2c9e98bf12. --- modules/processing.py | 14 ++++++++------ modules/sd_samplers_common.py | 2 +- run.ps1 | 1 - run_local.ps1 | 3 --- update.ps1 | 1 - 5 files changed, 9 insertions(+), 12 deletions(-) delete mode 100644 run.ps1 delete mode 100644 run_local.ps1 delete mode 100644 update.ps1 diff --git a/modules/processing.py b/modules/processing.py index 544667a4..aae39866 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -16,7 +16,6 @@ from typing import Any, Dict, List import modules.sd_hijack from modules import devices, prompt_parser, masking, sd_samplers, lowvram, generation_parameters_copypaste, extra_networks, sd_vae_approx, scripts, sd_samplers_common, sd_unet, errors from modules.sd_hijack import model_hijack -from modules.sd_samplers_common import images_tensor_to_samples, decode_first_stage, approximation_indexes from modules.shared import opts, cmd_opts, state import modules.shared as shared import modules.paths as paths @@ -31,6 +30,7 @@ from ldm.models.diffusion.ddpm import LatentDepth2ImageDiffusion from einops import repeat, rearrange from blendmodes.blend import blendLayers, BlendType +decode_first_stage = sd_samplers_common.decode_first_stage # some of those options should not be changed at all because they would break the model, so I removed them from options. opt_C = 4 @@ -84,7 +84,7 @@ def txt2img_image_conditioning(sd_model, x, width, height): # The "masked-image" in this case will just be all zeros since the entire image is masked. image_conditioning = torch.zeros(x.shape[0], 3, height, width, device=x.device) - image_conditioning = images_tensor_to_samples(image_conditioning, approximation_indexes.get(opts.sd_vae_encode_method)) + image_conditioning = sd_model.get_first_stage_encoding(sd_model.encode_first_stage(image_conditioning)) # Add the fake full 1s mask to the first dimension. image_conditioning = torch.nn.functional.pad(image_conditioning, (0, 0, 0, 0, 1, 0), value=1.0) @@ -203,7 +203,7 @@ class StableDiffusionProcessing: midas_in = torch.from_numpy(transformed["midas_in"][None, ...]).to(device=shared.device) midas_in = repeat(midas_in, "1 ... -> n ...", n=self.batch_size) - conditioning_image = images_tensor_to_samples(source_image*0.5+0.5, approximation_indexes.get(opts.sd_vae_encode_method)) + conditioning_image = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(source_image)) conditioning = torch.nn.functional.interpolate( self.sd_model.depth_model(midas_in), size=conditioning_image.shape[2:], @@ -216,7 +216,7 @@ class StableDiffusionProcessing: return conditioning def edit_image_conditioning(self, source_image): - conditioning_image = images_tensor_to_samples(source_image*0.5+0.5, approximation_indexes.get(opts.sd_vae_encode_method)) + conditioning_image = self.sd_model.encode_first_stage(source_image).mode() return conditioning_image @@ -255,7 +255,7 @@ class StableDiffusionProcessing: ) # Encode the new masked image using first stage of network. - conditioning_image = images_tensor_to_samples(conditioning_image*0.5+0.5, approximation_indexes.get(opts.sd_vae_encode_method)) + conditioning_image = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(conditioning_image)) # Create the concatenated conditioning tensor to be fed to `c_concat` conditioning_mask = torch.nn.functional.interpolate(conditioning_mask, size=latent_image.shape[-2:]) @@ -1099,8 +1099,9 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): decoded_samples = torch.from_numpy(np.array(batch_images)) decoded_samples = decoded_samples.to(shared.device) + decoded_samples = 2. * decoded_samples - 1. - samples = images_tensor_to_samples(decoded_samples, approximation_indexes.get(opts.sd_vae_encode_method)) + samples = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(decoded_samples)) image_conditioning = self.img2img_image_conditioning(decoded_samples, samples) @@ -1338,6 +1339,7 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing): raise RuntimeError(f"bad number of images passed: {len(imgs)}; expecting {self.batch_size} or less") image = torch.from_numpy(batch_images) + from modules.sd_samplers_common import images_tensor_to_samples, approximation_indexes self.init_latent = images_tensor_to_samples(image, approximation_indexes.get(opts.sd_vae_encode_method), self.sd_model) devices.torch_gc() diff --git a/modules/sd_samplers_common.py b/modules/sd_samplers_common.py index 42a29fc9..7269514f 100644 --- a/modules/sd_samplers_common.py +++ b/modules/sd_samplers_common.py @@ -75,7 +75,7 @@ def images_tensor_to_samples(image, approximation=None, model=None): if approximation == 3: image = image.to(devices.device, devices.dtype) - x_latent = sd_vae_taesd.encoder_model()(image) + x_latent = sd_vae_taesd.encoder_model()(image) / 1.5 else: if model is None: model = shared.sd_model diff --git a/run.ps1 b/run.ps1 deleted file mode 100644 index 82c1660b..00000000 --- a/run.ps1 +++ /dev/null @@ -1 +0,0 @@ -.\venv\Scripts\accelerate-launch.exe --num_cpu_threads_per_process=6 --api .\launch.py --listen --port 17415 --xformers --opt-channelslast \ No newline at end of file diff --git a/run_local.ps1 b/run_local.ps1 deleted file mode 100644 index e2ac43db..00000000 --- a/run_local.ps1 +++ /dev/null @@ -1,3 +0,0 @@ -.\venv\Scripts\Activate.ps1 -python .\launch.py --xformers --opt-channelslast --api -. $PSCommandPath \ No newline at end of file diff --git a/update.ps1 b/update.ps1 deleted file mode 100644 index 9960bead..00000000 --- a/update.ps1 +++ /dev/null @@ -1 +0,0 @@ -git stash push && git pull --rebase && git stash pop \ No newline at end of file -- cgit v1.2.1 From 073342c8878adc208be1eaab2705ba865d7b3ea1 Mon Sep 17 00:00:00 2001 From: Kohaku-Blueleaf <59680068+KohakuBlueleaf@users.noreply.github.com> Date: Fri, 4 Aug 2023 17:55:52 +0800 Subject: remove noneed scale --- modules/sd_samplers_common.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/sd_samplers_common.py b/modules/sd_samplers_common.py index 7269514f..42a29fc9 100644 --- a/modules/sd_samplers_common.py +++ b/modules/sd_samplers_common.py @@ -75,7 +75,7 @@ def images_tensor_to_samples(image, approximation=None, model=None): if approximation == 3: image = image.to(devices.device, devices.dtype) - x_latent = sd_vae_taesd.encoder_model()(image) / 1.5 + x_latent = sd_vae_taesd.encoder_model()(image) else: if model is None: model = shared.sd_model -- cgit v1.2.1 From 21000f13a169263a7da2c66b300130d7a6339c7d Mon Sep 17 00:00:00 2001 From: Kohaku-Blueleaf <59680068+KohakuBlueleaf@users.noreply.github.com> Date: Fri, 4 Aug 2023 18:23:14 +0800 Subject: replace get_first_stage_encoding --- modules/processing.py | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/modules/processing.py b/modules/processing.py index aae39866..aa6d4d2a 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -16,6 +16,7 @@ from typing import Any, Dict, List import modules.sd_hijack from modules import devices, prompt_parser, masking, sd_samplers, lowvram, generation_parameters_copypaste, extra_networks, sd_vae_approx, scripts, sd_samplers_common, sd_unet, errors from modules.sd_hijack import model_hijack +from modules.sd_samplers_common import images_tensor_to_samples, decode_first_stage, approximation_indexes from modules.shared import opts, cmd_opts, state import modules.shared as shared import modules.paths as paths @@ -30,7 +31,6 @@ from ldm.models.diffusion.ddpm import LatentDepth2ImageDiffusion from einops import repeat, rearrange from blendmodes.blend import blendLayers, BlendType -decode_first_stage = sd_samplers_common.decode_first_stage # some of those options should not be changed at all because they would break the model, so I removed them from options. opt_C = 4 @@ -84,7 +84,7 @@ def txt2img_image_conditioning(sd_model, x, width, height): # The "masked-image" in this case will just be all zeros since the entire image is masked. image_conditioning = torch.zeros(x.shape[0], 3, height, width, device=x.device) - image_conditioning = sd_model.get_first_stage_encoding(sd_model.encode_first_stage(image_conditioning)) + image_conditioning = images_tensor_to_samples(image_conditioning, approximation_indexes.get(opts.sd_vae_encode_method)) # Add the fake full 1s mask to the first dimension. image_conditioning = torch.nn.functional.pad(image_conditioning, (0, 0, 0, 0, 1, 0), value=1.0) @@ -203,7 +203,7 @@ class StableDiffusionProcessing: midas_in = torch.from_numpy(transformed["midas_in"][None, ...]).to(device=shared.device) midas_in = repeat(midas_in, "1 ... -> n ...", n=self.batch_size) - conditioning_image = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(source_image)) + conditioning_image = images_tensor_to_samples(source_image*0.5+0.5, approximation_indexes.get(opts.sd_vae_encode_method)) conditioning = torch.nn.functional.interpolate( self.sd_model.depth_model(midas_in), size=conditioning_image.shape[2:], @@ -216,7 +216,7 @@ class StableDiffusionProcessing: return conditioning def edit_image_conditioning(self, source_image): - conditioning_image = self.sd_model.encode_first_stage(source_image).mode() + conditioning_image = images_tensor_to_samples(source_image*0.5+0.5, approximation_indexes.get(opts.sd_vae_encode_method)) return conditioning_image @@ -1099,9 +1099,8 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): decoded_samples = torch.from_numpy(np.array(batch_images)) decoded_samples = decoded_samples.to(shared.device) - decoded_samples = 2. * decoded_samples - 1. - samples = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(decoded_samples)) + samples = images_tensor_to_samples(decoded_samples, approximation_indexes.get(opts.sd_vae_encode_method)) image_conditioning = self.img2img_image_conditioning(decoded_samples, samples) @@ -1339,7 +1338,6 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing): raise RuntimeError(f"bad number of images passed: {len(imgs)}; expecting {self.batch_size} or less") image = torch.from_numpy(batch_images) - from modules.sd_samplers_common import images_tensor_to_samples, approximation_indexes self.init_latent = images_tensor_to_samples(image, approximation_indexes.get(opts.sd_vae_encode_method), self.sd_model) devices.torch_gc() -- cgit v1.2.1 From daee41e0d64e51adaebbd0d6ba4ba85e0b59d0ae Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Fri, 4 Aug 2023 06:45:12 -0400 Subject: Fix Gradio 3.39.0 textbox overflow --- style.css | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/style.css b/style.css index 86b4f61e..14e6c011 100644 --- a/style.css +++ b/style.css @@ -140,6 +140,10 @@ div.styler{ background: var(--background-fill-primary); } +.block.gradio-textbox{ + overflow: visible !important; +} + /* general styled components */ -- cgit v1.2.1 From 3ca3c7f1c62f525539926fa883d8f5d54de85de6 Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Fri, 4 Aug 2023 07:20:32 -0400 Subject: Add styling for script components --- style.css | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/style.css b/style.css index 86b4f61e..d9699668 100644 --- a/style.css +++ b/style.css @@ -190,6 +190,13 @@ button.custom-button{ text-align: center; } +.gradio-group[id$="_script_container"] > div .gradio-group > div > div:is(.gradio-accordion, .form) { + border: 1px solid var(--block-border-color) !important; + border-radius: 8px !important; + margin: 2px 0; + padding: 8px 8px; +} + /* txt2img/img2img specific */ -- cgit v1.2.1 From fadbab378183c654f3af35865022acbac877de24 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Fri, 4 Aug 2023 14:56:27 +0300 Subject: Curse you, gradio!!! fixes broken refresh button #12309 --- modules/ui_common.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/ui_common.py b/modules/ui_common.py index eefe0c0e..1dda1627 100644 --- a/modules/ui_common.py +++ b/modules/ui_common.py @@ -239,13 +239,13 @@ def create_refresh_button(refresh_component, refresh_method, refreshed_args, ele for comp in refresh_components: setattr(comp, k, v) - return [gr.update(**(args or {})) for _ in refresh_components] + return (gr.update(**(args or {})) for _ in refresh_components) if len(refresh_components) > 1 else gr.update(**(args or {})) refresh_button = ToolButton(value=refresh_symbol, elem_id=elem_id, tooltip=f"{label}: refresh" if label else "Refresh") refresh_button.click( fn=refresh, inputs=[], - outputs=[*refresh_components] + outputs=refresh_components ) return refresh_button -- cgit v1.2.1 From 682ff8936df018330e0d2a259794a262dc3251b2 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Fri, 4 Aug 2023 18:51:15 +0300 Subject: glorious, glorious wonderful clear milky white butter smooth color for inpainting you are the best, gradio how I yearned for this day i always believed in you i knew you had it in you this day marks a new beginning thank you, everyone thank you --- modules/ui.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/ui.py b/modules/ui.py index 586174b8..6cf3dff8 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -667,11 +667,11 @@ def create_ui(): add_copy_image_controls('sketch', sketch) with gr.TabItem('Inpaint', id='inpaint', elem_id="img2img_inpaint_tab") as tab_inpaint: - init_img_with_mask = gr.Image(label="Image for inpainting with mask", show_label=False, elem_id="img2maskimg", source="upload", interactive=True, type="pil", tool="sketch", image_mode="RGBA", height=opts.img2img_editor_height) + init_img_with_mask = gr.Image(label="Image for inpainting with mask", show_label=False, elem_id="img2maskimg", source="upload", interactive=True, type="pil", tool="sketch", image_mode="RGBA", height=opts.img2img_editor_height, brush_color='#ffffff') add_copy_image_controls('inpaint', init_img_with_mask) with gr.TabItem('Inpaint sketch', id='inpaint_sketch', elem_id="img2img_inpaint_sketch_tab") as tab_inpaint_color: - inpaint_color_sketch = gr.Image(label="Color sketch inpainting", show_label=False, elem_id="inpaint_sketch", source="upload", interactive=True, type="pil", tool="color-sketch", image_mode="RGBA", height=opts.img2img_editor_height) + inpaint_color_sketch = gr.Image(label="Color sketch inpainting", show_label=False, elem_id="inpaint_sketch", source="upload", interactive=True, type="pil", tool="color-sketch", image_mode="RGBA", height=opts.img2img_editor_height, brush_color='#ffffff') inpaint_color_sketch_orig = gr.State(None) add_copy_image_controls('inpaint_sketch', inpaint_color_sketch) -- cgit v1.2.1 From 9213d5cb3b5614b85f4752a0ba54f0cdf1282857 Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Fri, 4 Aug 2023 12:26:37 -0400 Subject: Open raw sysinfo link in new page --- modules/ui_settings.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/ui_settings.py b/modules/ui_settings.py index a6076bf3..6dde4b6a 100644 --- a/modules/ui_settings.py +++ b/modules/ui_settings.py @@ -158,7 +158,7 @@ class UiSettings: loadsave.create_ui() with gr.TabItem("Sysinfo", id="sysinfo", elem_id="settings_tab_sysinfo"): - gr.HTML('Download system info
(or open as text in a new page)', elem_id="sysinfo_download") + gr.HTML('Download system info
(or open as text in a new page)', elem_id="sysinfo_download") with gr.Row(): with gr.Column(scale=1): -- cgit v1.2.1 From b6596cdb19414cdb31a762e4c4ffbdce17d2d6e9 Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Fri, 4 Aug 2023 13:26:37 -0400 Subject: Prompt parser: account for empty field in alternating words syntax --- modules/prompt_parser.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/modules/prompt_parser.py b/modules/prompt_parser.py index 8169a459..32d214e3 100644 --- a/modules/prompt_parser.py +++ b/modules/prompt_parser.py @@ -20,7 +20,7 @@ prompt: (emphasized | scheduled | alternate | plain | WHITESPACE)* | "(" prompt ":" prompt ")" | "[" prompt "]" scheduled: "[" [prompt ":"] prompt ":" [WHITESPACE] NUMBER [WHITESPACE] "]" -alternate: "[" prompt ("|" prompt)+ "]" +alternate: "[" prompt ("|" [prompt])+ "]" WHITESPACE: /\s+/ plain: /([^\\\[\]():|]|\\.)+/ %import common.SIGNED_NUMBER -> NUMBER @@ -53,6 +53,10 @@ def get_learned_conditioning_prompt_schedules(prompts, steps): [[3, '((a][:b:c '], [10, '((a][:b:c d']] >>> g("[a|(b:1.1)]") [[1, 'a'], [2, '(b:1.1)'], [3, 'a'], [4, '(b:1.1)'], [5, 'a'], [6, '(b:1.1)'], [7, 'a'], [8, '(b:1.1)'], [9, 'a'], [10, '(b:1.1)']] + >>> g("[fe|]male") + [[1, 'female'], [2, 'male'], [3, 'female'], [4, 'male'], [5, 'female'], [6, 'male'], [7, 'female'], [8, 'male'], [9, 'female'], [10, 'male']] + >>> g("[fe|||]male") + [[1, 'female'], [2, 'male'], [3, 'male'], [4, 'male'], [5, 'female'], [6, 'male'], [7, 'male'], [8, 'male'], [9, 'female'], [10, 'male']] """ def collect_steps(steps, tree): @@ -78,7 +82,8 @@ def get_learned_conditioning_prompt_schedules(prompts, steps): before, after, _, when, _ = args yield before or () if step <= when else after def alternate(self, args): - yield next(args[(step - 1)%len(args)]) + args = ["" if not arg else arg for arg in args] + yield args[(step - 1) % len(args)] def start(self, args): def flatten(x): if type(x) == str: -- cgit v1.2.1 From 45601766409e531d2b4ee512bf1433600f140183 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Fri, 4 Aug 2023 22:05:40 +0300 Subject: added VAE selection to checkpoint user metadata --- modules/extra_networks.py | 19 +++++++ modules/sd_vae.py | 13 ++++- modules/ui_extra_networks.py | 13 +---- modules/ui_extra_networks_checkpoints.py | 3 ++ .../ui_extra_networks_checkpoints_user_metadata.py | 60 ++++++++++++++++++++++ 5 files changed, 96 insertions(+), 12 deletions(-) create mode 100644 modules/ui_extra_networks_checkpoints_user_metadata.py diff --git a/modules/extra_networks.py b/modules/extra_networks.py index 6ae07e91..fa28ac75 100644 --- a/modules/extra_networks.py +++ b/modules/extra_networks.py @@ -1,3 +1,5 @@ +import json +import os import re from collections import defaultdict @@ -177,3 +179,20 @@ def parse_prompts(prompts): return res, extra_data + +def get_user_metadata(filename): + if filename is None: + return {} + + basename, ext = os.path.splitext(filename) + metadata_filename = basename + '.json' + + metadata = {} + try: + if os.path.isfile(metadata_filename): + with open(metadata_filename, "r", encoding="utf8") as file: + metadata = json.load(file) + except Exception as e: + errors.display(e, f"reading extra network user metadata from {metadata_filename}") + + return metadata diff --git a/modules/sd_vae.py b/modules/sd_vae.py index 84271db0..0bd5e19b 100644 --- a/modules/sd_vae.py +++ b/modules/sd_vae.py @@ -1,6 +1,6 @@ import os import collections -from modules import paths, shared, devices, script_callbacks, sd_models +from modules import paths, shared, devices, script_callbacks, sd_models, extra_networks import glob from copy import deepcopy @@ -16,6 +16,7 @@ checkpoint_info = None checkpoints_loaded = collections.OrderedDict() + def get_base_vae(model): if base_vae is not None and checkpoint_info == model.sd_checkpoint_info and model: return base_vae @@ -100,6 +101,16 @@ def resolve_vae(checkpoint_file): if shared.cmd_opts.vae_path is not None: return shared.cmd_opts.vae_path, 'from commandline argument' + metadata = extra_networks.get_user_metadata(checkpoint_file) + vae_metadata = metadata.get("vae", None) + if vae_metadata is not None and vae_metadata != "Automatic": + if vae_metadata == "None": + return None, None + + vae_from_metadata = vae_dict.get(vae_metadata, None) + if vae_from_metadata is not None: + return vae_from_metadata, "from user metadata" + is_automatic = shared.opts.sd_vae in {"Automatic", "auto"} # "auto" for people with old config vae_near_checkpoint = find_vae_near_checkpoint(checkpoint_file) diff --git a/modules/ui_extra_networks.py b/modules/ui_extra_networks.py index f2752f10..c6390db7 100644 --- a/modules/ui_extra_networks.py +++ b/modules/ui_extra_networks.py @@ -2,7 +2,7 @@ import os.path import urllib.parse from pathlib import Path -from modules import shared, ui_extra_networks_user_metadata, errors +from modules import shared, ui_extra_networks_user_metadata, errors, extra_networks from modules.images import read_info_from_image, save_image_with_geninfo from modules.ui import up_down_symbol import gradio as gr @@ -101,16 +101,7 @@ class ExtraNetworksPage: def read_user_metadata(self, item): filename = item.get("filename", None) - basename, ext = os.path.splitext(filename) - metadata_filename = basename + '.json' - - metadata = {} - try: - if os.path.isfile(metadata_filename): - with open(metadata_filename, "r", encoding="utf8") as file: - metadata = json.load(file) - except Exception as e: - errors.display(e, f"reading extra network user metadata from {metadata_filename}") + metadata = extra_networks.get_user_metadata(filename) desc = metadata.get("description", None) if desc is not None: diff --git a/modules/ui_extra_networks_checkpoints.py b/modules/ui_extra_networks_checkpoints.py index 891d8f2c..77885022 100644 --- a/modules/ui_extra_networks_checkpoints.py +++ b/modules/ui_extra_networks_checkpoints.py @@ -3,6 +3,7 @@ import os from modules import shared, ui_extra_networks, sd_models from modules.ui_extra_networks import quote_js +from modules.ui_extra_networks_checkpoints_user_metadata import CheckpointUserMetadataEditor class ExtraNetworksPageCheckpoints(ui_extra_networks.ExtraNetworksPage): @@ -34,3 +35,5 @@ class ExtraNetworksPageCheckpoints(ui_extra_networks.ExtraNetworksPage): def allowed_directories_for_previews(self): return [v for v in [shared.cmd_opts.ckpt_dir, sd_models.model_path] if v is not None] + def create_user_metadata_editor(self, ui, tabname): + return CheckpointUserMetadataEditor(ui, tabname, self) diff --git a/modules/ui_extra_networks_checkpoints_user_metadata.py b/modules/ui_extra_networks_checkpoints_user_metadata.py new file mode 100644 index 00000000..2c69aab8 --- /dev/null +++ b/modules/ui_extra_networks_checkpoints_user_metadata.py @@ -0,0 +1,60 @@ +import gradio as gr + +from modules import ui_extra_networks_user_metadata, sd_vae +from modules.ui_common import create_refresh_button + + +class CheckpointUserMetadataEditor(ui_extra_networks_user_metadata.UserMetadataEditor): + def __init__(self, ui, tabname, page): + super().__init__(ui, tabname, page) + + self.select_vae = None + + def save_user_metadata(self, name, desc, notes, vae): + user_metadata = self.get_user_metadata(name) + user_metadata["description"] = desc + user_metadata["notes"] = notes + user_metadata["vae"] = vae + + self.write_user_metadata(name, user_metadata) + + def put_values_into_components(self, name): + user_metadata = self.get_user_metadata(name) + values = super().put_values_into_components(name) + + return [ + *values[0:5], + user_metadata.get('vae', ''), + ] + + def create_editor(self): + self.create_default_editor_elems() + + with gr.Row(): + self.select_vae = gr.Dropdown(choices=["Automatic", "None"] + list(sd_vae.vae_dict), value="None", label="Preferred VAE", elem_id="checpoint_edit_user_metadata_preferred_vae") + create_refresh_button(self.select_vae, sd_vae.refresh_vae_list, lambda: {"choices": ["Automatic", "None"] + list(sd_vae.vae_dict)}, "checpoint_edit_user_metadata_refresh_preferred_vae") + + self.edit_notes = gr.TextArea(label='Notes', lines=4) + + self.create_default_buttons() + + viewed_components = [ + self.edit_name, + self.edit_description, + self.html_filedata, + self.html_preview, + self.edit_notes, + self.select_vae, + ] + + self.button_edit\ + .click(fn=self.put_values_into_components, inputs=[self.edit_name_input], outputs=viewed_components)\ + .then(fn=lambda: gr.update(visible=True), inputs=[], outputs=[self.box]) + + edited_components = [ + self.edit_description, + self.edit_notes, + self.select_vae, + ] + + self.setup_save_handler(self.button_save, self.save_user_metadata, edited_components) -- cgit v1.2.1 From 1d60a609a9d7a7f79517dc0c87d4b834b89db252 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Sat, 5 Aug 2023 09:25:21 +0900 Subject: configurable masks color and default brush color --- modules/shared.py | 3 +++ modules/ui.py | 6 +++--- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/modules/shared.py b/modules/shared.py index cec030f7..1eb00b8f 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -492,6 +492,9 @@ options_templates.update(options_section(('ui', "User interface"), { "localization": OptionInfo("None", "Localization", gr.Dropdown, lambda: {"choices": ["None"] + list(localization.localizations.keys())}, refresh=lambda: localization.list_localizations(cmd_opts.localizations_dir)).needs_restart(), "gradio_theme": OptionInfo("Default", "Gradio theme", ui_components.DropdownEditable, lambda: {"choices": ["Default"] + gradio_hf_hub_themes}).needs_restart(), "img2img_editor_height": OptionInfo(720, "img2img: height of image editor", gr.Slider, {"minimum": 80, "maximum": 1600, "step": 1}).info("in pixels").needs_restart(), + "img2img_sketch_default_brush_color": OptionInfo("#000000", "sketch brush color", ui_components.FormColorPicker, {}).info("default brush color of img2img sketch) (requires reload"), + "img2img_inpaint_mask_brush_color": OptionInfo("#000000", "inpaint mask brush color", ui_components.FormColorPicker, {}).info("brush color of inpaint mask) (requires reload"), + "img2img_inpaint_sketch_default_brush_color": OptionInfo("#000000", "inpaint sketch brush color", ui_components.FormColorPicker, {}).info("default brush color of img2img inpaint sketch) (requires reload"), "return_grid": OptionInfo(True, "Show grid in results for web"), "return_mask": OptionInfo(False, "For inpainting, include the greyscale mask in results for web"), "return_mask_composite": OptionInfo(False, "For inpainting, include masked composite in results for web"), diff --git a/modules/ui.py b/modules/ui.py index 6cf3dff8..843a75ef 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -663,15 +663,15 @@ def create_ui(): add_copy_image_controls('img2img', init_img) with gr.TabItem('Sketch', id='img2img_sketch', elem_id="img2img_img2img_sketch_tab") as tab_sketch: - sketch = gr.Image(label="Image for img2img", elem_id="img2img_sketch", show_label=False, source="upload", interactive=True, type="pil", tool="color-sketch", image_mode="RGBA", height=opts.img2img_editor_height) + sketch = gr.Image(label="Image for img2img", elem_id="img2img_sketch", show_label=False, source="upload", interactive=True, type="pil", tool="color-sketch", image_mode="RGBA", height=opts.img2img_editor_height, brush_color=opts.img2img_sketch_default_brush_color) add_copy_image_controls('sketch', sketch) with gr.TabItem('Inpaint', id='inpaint', elem_id="img2img_inpaint_tab") as tab_inpaint: - init_img_with_mask = gr.Image(label="Image for inpainting with mask", show_label=False, elem_id="img2maskimg", source="upload", interactive=True, type="pil", tool="sketch", image_mode="RGBA", height=opts.img2img_editor_height, brush_color='#ffffff') + init_img_with_mask = gr.Image(label="Image for inpainting with mask", show_label=False, elem_id="img2maskimg", source="upload", interactive=True, type="pil", tool="sketch", image_mode="RGBA", height=opts.img2img_editor_height, brush_color=opts.img2img_inpaint_mask_brush_color) add_copy_image_controls('inpaint', init_img_with_mask) with gr.TabItem('Inpaint sketch', id='inpaint_sketch', elem_id="img2img_inpaint_sketch_tab") as tab_inpaint_color: - inpaint_color_sketch = gr.Image(label="Color sketch inpainting", show_label=False, elem_id="inpaint_sketch", source="upload", interactive=True, type="pil", tool="color-sketch", image_mode="RGBA", height=opts.img2img_editor_height, brush_color='#ffffff') + inpaint_color_sketch = gr.Image(label="Color sketch inpainting", show_label=False, elem_id="inpaint_sketch", source="upload", interactive=True, type="pil", tool="color-sketch", image_mode="RGBA", height=opts.img2img_editor_height, brush_color=opts.img2img_inpaint_sketch_default_brush_color) inpaint_color_sketch_orig = gr.State(None) add_copy_image_controls('inpaint_sketch', inpaint_color_sketch) -- cgit v1.2.1 From bcff763b6e71c0d1291c283071048535cb8ab247 Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Fri, 4 Aug 2023 22:59:47 -0400 Subject: Add tab and carriage return to invalid filename chars --- modules/images.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/images.py b/modules/images.py index 38aa933d..ba3c43a4 100644 --- a/modules/images.py +++ b/modules/images.py @@ -318,7 +318,7 @@ def resize_image(resize_mode, im, width, height, upscaler_name=None): return res -invalid_filename_chars = '<>:"/\\|?*\n' +invalid_filename_chars = '<>:"/\\|?*\n\r\t' invalid_filename_prefix = ' ' invalid_filename_postfix = ' .' re_nonletters = re.compile(r'[\s' + string.punctuation + ']+') -- cgit v1.2.1 From aa744cadc8e357e696a608c8d0c77a7bfc1c9f39 Mon Sep 17 00:00:00 2001 From: Kohaku-Blueleaf <59680068+KohakuBlueleaf@users.noreply.github.com> Date: Sat, 5 Aug 2023 12:35:40 +0800 Subject: add infotext --- modules/generation_parameters_copypaste.py | 8 ++++++++ modules/processing.py | 3 +++ 2 files changed, 11 insertions(+) diff --git a/modules/generation_parameters_copypaste.py b/modules/generation_parameters_copypaste.py index a3448be9..0713dbf0 100644 --- a/modules/generation_parameters_copypaste.py +++ b/modules/generation_parameters_copypaste.py @@ -304,6 +304,12 @@ Steps: 20, Sampler: Euler a, CFG scale: 7, Seed: 965400086, Size: 512x512, Model if "Schedule rho" not in res: res["Schedule rho"] = 0 + if "VAE Encoder" not in res: + res["VAE Encoder"] = "Full" + + if "VAE Decoder" not in res: + res["VAE Decoder"] = "Full" + return res @@ -329,6 +335,8 @@ infotext_to_setting_name_mapping = [ ('RNG', 'randn_source'), ('NGMS', 's_min_uncond'), ('Pad conds', 'pad_cond_uncond'), + ('VAE Encoder', 'sd_vae_encode_method'), + ('VAE Decoder', 'sd_vae_decode_method'), ] diff --git a/modules/processing.py b/modules/processing.py index aa6d4d2a..a9ee7507 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -788,6 +788,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: with devices.without_autocast() if devices.unet_needs_upcast else devices.autocast(): samples_ddim = p.sample(conditioning=p.c, unconditional_conditioning=p.uc, seeds=p.seeds, subseeds=p.subseeds, subseed_strength=p.subseed_strength, prompts=p.prompts) + p.extra_generation_params['VAE Decoder'] = opts.sd_vae_decode_method x_samples_ddim = decode_latent_batch(p.sd_model, samples_ddim, target_device=devices.cpu, check_for_nans=True) x_samples_ddim = torch.stack(x_samples_ddim).float() x_samples_ddim = torch.clamp((x_samples_ddim + 1.0) / 2.0, min=0.0, max=1.0) @@ -1100,6 +1101,7 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): decoded_samples = torch.from_numpy(np.array(batch_images)) decoded_samples = decoded_samples.to(shared.device) + self.extra_generation_params['VAE Encoder'] = opts.sd_vae_encode_method samples = images_tensor_to_samples(decoded_samples, approximation_indexes.get(opts.sd_vae_encode_method)) image_conditioning = self.img2img_image_conditioning(decoded_samples, samples) @@ -1338,6 +1340,7 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing): raise RuntimeError(f"bad number of images passed: {len(imgs)}; expecting {self.batch_size} or less") image = torch.from_numpy(batch_images) + self.extra_generation_params['VAE Encoder'] = opts.sd_vae_encode_method self.init_latent = images_tensor_to_samples(image, approximation_indexes.get(opts.sd_vae_encode_method), self.sd_model) devices.torch_gc() -- cgit v1.2.1 From e7140a36c07c7590334eaaea07a3c79d7e044db9 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sat, 5 Aug 2023 07:36:25 +0300 Subject: change default color to white --- modules/shared.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/modules/shared.py b/modules/shared.py index 1eb00b8f..fca6ad63 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -492,9 +492,9 @@ options_templates.update(options_section(('ui', "User interface"), { "localization": OptionInfo("None", "Localization", gr.Dropdown, lambda: {"choices": ["None"] + list(localization.localizations.keys())}, refresh=lambda: localization.list_localizations(cmd_opts.localizations_dir)).needs_restart(), "gradio_theme": OptionInfo("Default", "Gradio theme", ui_components.DropdownEditable, lambda: {"choices": ["Default"] + gradio_hf_hub_themes}).needs_restart(), "img2img_editor_height": OptionInfo(720, "img2img: height of image editor", gr.Slider, {"minimum": 80, "maximum": 1600, "step": 1}).info("in pixels").needs_restart(), - "img2img_sketch_default_brush_color": OptionInfo("#000000", "sketch brush color", ui_components.FormColorPicker, {}).info("default brush color of img2img sketch) (requires reload"), - "img2img_inpaint_mask_brush_color": OptionInfo("#000000", "inpaint mask brush color", ui_components.FormColorPicker, {}).info("brush color of inpaint mask) (requires reload"), - "img2img_inpaint_sketch_default_brush_color": OptionInfo("#000000", "inpaint sketch brush color", ui_components.FormColorPicker, {}).info("default brush color of img2img inpaint sketch) (requires reload"), + "img2img_sketch_default_brush_color": OptionInfo("#ffffff", "sketch brush color", ui_components.FormColorPicker, {}).info("default brush color of img2img sketch").needs_restart(), + "img2img_inpaint_mask_brush_color": OptionInfo("#ffffff", "inpaint mask brush color", ui_components.FormColorPicker, {}).info("brush color of inpaint mask").needs_restart(), + "img2img_inpaint_sketch_default_brush_color": OptionInfo("#ffffff", "inpaint sketch brush color", ui_components.FormColorPicker, {}).info("default brush color of img2img inpaint sketch").needs_restart(), "return_grid": OptionInfo(True, "Show grid in results for web"), "return_mask": OptionInfo(False, "For inpainting, include the greyscale mask in results for web"), "return_mask_composite": OptionInfo(False, "For inpainting, include masked composite in results for web"), -- cgit v1.2.1 From d8371d0b3c90252bfb4de619a2e6f80296845554 Mon Sep 17 00:00:00 2001 From: Kohaku-Blueleaf <59680068+KohakuBlueleaf@users.noreply.github.com> Date: Sat, 5 Aug 2023 12:37:46 +0800 Subject: update info --- modules/shared.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/shared.py b/modules/shared.py index 61ba9347..3491ad79 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -430,7 +430,7 @@ options_templates.update(options_section(('sd', "Stable Diffusion"), { "upcast_attn": OptionInfo(False, "Upcast cross attention layer to float32"), "auto_vae_precision": OptionInfo(True, "Automaticlly revert VAE to 32-bit floats").info("triggers when a tensor with NaNs is produced in VAE; disabling the option in this case will result in a black square image"), "randn_source": OptionInfo("GPU", "Random number generator source.", gr.Radio, {"choices": ["GPU", "CPU", "NV"]}).info("changes seeds drastically; use CPU to produce the same picture across different videocard vendors; use NV to produce same picture as on NVidia videocards"), - "sd_vae_encode_method": OptionInfo("Full", "VAE type for encode", gr.Radio, {"choices": ["Full", "TAESD"]}).info("method to encode image to latent (use in img2img or inpaint mask)"), + "sd_vae_encode_method": OptionInfo("Full", "VAE type for encode", gr.Radio, {"choices": ["Full", "TAESD"]}).info("method to encode image to latent (use in img2img, hires-dix or inpaint mask)"), "sd_vae_decode_method": OptionInfo("Full", "VAE type for decode", gr.Radio, {"choices": ["Full", "TAESD"]}).info("method to decode latent to image"), })) -- cgit v1.2.1 From d2b842ce079949f2eafa8a4a6e2374f0e5acac34 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sat, 5 Aug 2023 07:46:22 +0300 Subject: move img2img settings to their own section --- modules/shared.py | 32 +++++++++++++++++++------------- 1 file changed, 19 insertions(+), 13 deletions(-) diff --git a/modules/shared.py b/modules/shared.py index fca6ad63..55199cb9 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -417,12 +417,7 @@ options_templates.update(options_section(('sd', "Stable Diffusion"), { "sd_vae": OptionInfo("Automatic", "SD VAE", gr.Dropdown, lambda: {"choices": shared_items.sd_vae_items()}, refresh=shared_items.refresh_vae_list).info("choose VAE model: Automatic = use one with same filename as checkpoint; None = use VAE from checkpoint"), "sd_vae_as_default": OptionInfo(True, "Ignore selected VAE for stable diffusion checkpoints that have their own .vae.pt next to them"), "sd_unet": OptionInfo("Automatic", "SD Unet", gr.Dropdown, lambda: {"choices": shared_items.sd_unet_items()}, refresh=shared_items.refresh_unet_list).info("choose Unet model: Automatic = use one with same filename as checkpoint; None = use Unet from checkpoint"), - "inpainting_mask_weight": OptionInfo(1.0, "Inpainting conditioning mask strength", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}), - "initial_noise_multiplier": OptionInfo(1.0, "Noise multiplier for img2img", gr.Slider, {"minimum": 0.5, "maximum": 1.5, "step": 0.01}), - "img2img_color_correction": OptionInfo(False, "Apply color correction to img2img results to match original colors."), - "img2img_fix_steps": OptionInfo(False, "With img2img, do exactly the amount of steps the slider specifies.").info("normally you'd do less with less denoising"), - "img2img_background_color": OptionInfo("#ffffff", "With img2img, fill image's transparent parts with this color.", ui_components.FormColorPicker, {}), - "enable_quantization": OptionInfo(False, "Enable quantization in K samplers for sharper and cleaner results. This may change existing seeds. Requires restart to apply."), + "enable_quantization": OptionInfo(False, "Enable quantization in K samplers for sharper and cleaner results. This may change existing seeds").needs_restart(), "enable_emphasis": OptionInfo(True, "Enable emphasis").info("use (text) to make model pay more attention to text and [text] to make it pay less attention"), "enable_batch_seeds": OptionInfo(True, "Make K-diffusion samplers produce same images in a batch as when making a single image"), "comma_padding_backtrack": OptionInfo(20, "Prompt word wrap length limit", gr.Slider, {"minimum": 0, "maximum": 74, "step": 1}).info("in tokens - for texts shorter than specified, if they don't fit into 75 token limit, move them to the next 75 token chunk"), @@ -439,6 +434,22 @@ options_templates.update(options_section(('sdxl', "Stable Diffusion XL"), { "sdxl_refiner_high_aesthetic_score": OptionInfo(6.0, "SDXL high aesthetic score", gr.Number).info("used for refiner model prompt"), })) + +options_templates.update(options_section(('img2img', "img2img"), { + "inpainting_mask_weight": OptionInfo(1.0, "Inpainting conditioning mask strength", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}), + "initial_noise_multiplier": OptionInfo(1.0, "Noise multiplier for img2img", gr.Slider, {"minimum": 0.5, "maximum": 1.5, "step": 0.01}), + "img2img_color_correction": OptionInfo(False, "Apply color correction to img2img results to match original colors."), + "img2img_fix_steps": OptionInfo(False, "With img2img, do exactly the amount of steps the slider specifies.").info("normally you'd do less with less denoising"), + "img2img_background_color": OptionInfo("#ffffff", "With img2img, fill transparent parts of the input image with this color.", ui_components.FormColorPicker, {}), + "img2img_editor_height": OptionInfo(720, "Height of the image editor", gr.Slider, {"minimum": 80, "maximum": 1600, "step": 1}).info("in pixels").needs_restart(), + "img2img_sketch_default_brush_color": OptionInfo("#ffffff", "Sketch initial brush color", ui_components.FormColorPicker, {}).info("default brush color of img2img sketch").needs_restart(), + "img2img_inpaint_mask_brush_color": OptionInfo("#ffffff", "Inpaint mask brush color", ui_components.FormColorPicker, {}).info("brush color of inpaint mask").needs_restart(), + "img2img_inpaint_sketch_default_brush_color": OptionInfo("#ffffff", "Inpaint sketch initial brush color", ui_components.FormColorPicker, {}).info("default brush color of img2img inpaint sketch").needs_restart(), + "return_mask": OptionInfo(False, "For inpainting, include the greyscale mask in results for web"), + "return_mask_composite": OptionInfo(False, "For inpainting, include masked composite in results for web"), +})) + + options_templates.update(options_section(('optimizations', "Optimizations"), { "cross_attention_optimization": OptionInfo("Automatic", "Cross attention optimization", gr.Dropdown, lambda: {"choices": shared_items.cross_attention_optimizations()}), "s_min_uncond": OptionInfo(0.0, "Negative Guidance minimum sigma", gr.Slider, {"minimum": 0.0, "maximum": 15.0, "step": 0.01}).link("PR", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/9177").info("skip negative prompt for some steps when the image is almost ready; 0=disable, higher=faster"), @@ -458,7 +469,7 @@ options_templates.update(options_section(('compatibility', "Compatibility"), { "hires_fix_use_firstpass_conds": OptionInfo(False, "For hires fix, calculate conds of second pass using extra networks of first pass."), })) -options_templates.update(options_section(('interrogate', "Interrogate Options"), { +options_templates.update(options_section(('interrogate', "Interrogate"), { "interrogate_keep_models_in_memory": OptionInfo(False, "Keep models in VRAM"), "interrogate_return_ranks": OptionInfo(False, "Include ranks of model tags matches in results.").info("booru only"), "interrogate_clip_num_beams": OptionInfo(1, "BLIP: num_beams", gr.Slider, {"minimum": 1, "maximum": 16, "step": 1}), @@ -491,13 +502,7 @@ options_templates.update(options_section(('extra_networks', "Extra Networks"), { options_templates.update(options_section(('ui', "User interface"), { "localization": OptionInfo("None", "Localization", gr.Dropdown, lambda: {"choices": ["None"] + list(localization.localizations.keys())}, refresh=lambda: localization.list_localizations(cmd_opts.localizations_dir)).needs_restart(), "gradio_theme": OptionInfo("Default", "Gradio theme", ui_components.DropdownEditable, lambda: {"choices": ["Default"] + gradio_hf_hub_themes}).needs_restart(), - "img2img_editor_height": OptionInfo(720, "img2img: height of image editor", gr.Slider, {"minimum": 80, "maximum": 1600, "step": 1}).info("in pixels").needs_restart(), - "img2img_sketch_default_brush_color": OptionInfo("#ffffff", "sketch brush color", ui_components.FormColorPicker, {}).info("default brush color of img2img sketch").needs_restart(), - "img2img_inpaint_mask_brush_color": OptionInfo("#ffffff", "inpaint mask brush color", ui_components.FormColorPicker, {}).info("brush color of inpaint mask").needs_restart(), - "img2img_inpaint_sketch_default_brush_color": OptionInfo("#ffffff", "inpaint sketch brush color", ui_components.FormColorPicker, {}).info("default brush color of img2img inpaint sketch").needs_restart(), "return_grid": OptionInfo(True, "Show grid in results for web"), - "return_mask": OptionInfo(False, "For inpainting, include the greyscale mask in results for web"), - "return_mask_composite": OptionInfo(False, "For inpainting, include masked composite in results for web"), "do_not_show_images": OptionInfo(False, "Do not show any images in results for web"), "send_seed": OptionInfo(True, "Send seed when sending prompt or image to other interface"), "send_size": OptionInfo(True, "Send size when sending prompt or image to another interface"), @@ -521,6 +526,7 @@ options_templates.update(options_section(('ui', "User interface"), { "disable_token_counters": OptionInfo(False, "Disable prompt token counters").needs_restart(), })) + options_templates.update(options_section(('infotext', "Infotext"), { "add_model_hash_to_info": OptionInfo(True, "Add model hash to generation information"), "add_model_name_to_info": OptionInfo(True, "Add model name to generation information"), -- cgit v1.2.1 From a6b245e46f28efe013637e5e9b0600b88df79dc9 Mon Sep 17 00:00:00 2001 From: Kohaku-Blueleaf <59680068+KohakuBlueleaf@users.noreply.github.com> Date: Sat, 5 Aug 2023 12:49:35 +0800 Subject: dix --- modules/shared.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/shared.py b/modules/shared.py index 3491ad79..df454d4a 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -430,7 +430,7 @@ options_templates.update(options_section(('sd', "Stable Diffusion"), { "upcast_attn": OptionInfo(False, "Upcast cross attention layer to float32"), "auto_vae_precision": OptionInfo(True, "Automaticlly revert VAE to 32-bit floats").info("triggers when a tensor with NaNs is produced in VAE; disabling the option in this case will result in a black square image"), "randn_source": OptionInfo("GPU", "Random number generator source.", gr.Radio, {"choices": ["GPU", "CPU", "NV"]}).info("changes seeds drastically; use CPU to produce the same picture across different videocard vendors; use NV to produce same picture as on NVidia videocards"), - "sd_vae_encode_method": OptionInfo("Full", "VAE type for encode", gr.Radio, {"choices": ["Full", "TAESD"]}).info("method to encode image to latent (use in img2img, hires-dix or inpaint mask)"), + "sd_vae_encode_method": OptionInfo("Full", "VAE type for encode", gr.Radio, {"choices": ["Full", "TAESD"]}).info("method to encode image to latent (use in img2img, hires-fix or inpaint mask)"), "sd_vae_decode_method": OptionInfo("Full", "VAE type for decode", gr.Radio, {"choices": ["Full", "TAESD"]}).info("method to decode latent to image"), })) -- cgit v1.2.1 From 56888644a67298253260eda84ceb2d6cd0ce5099 Mon Sep 17 00:00:00 2001 From: Splendide Imaginarius <119545140+Splendide-Imaginarius@users.noreply.github.com> Date: Sat, 5 Aug 2023 04:54:23 +0000 Subject: Reduce mask blur kernel size to 2.5 sigmas MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This more closely matches the old behavior of PIL's Gaussian blur, and fixes breakage when tiling. See https://github.com/Coyote-A/ultimate-upscale-for-automatic1111/issues/111#issuecomment-1663504109 Thanks to Алексей Трофимов and eunnone for reporting the issue. --- modules/processing.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/processing.py b/modules/processing.py index 44d20fb7..63cd025c 100755 --- a/modules/processing.py +++ b/modules/processing.py @@ -1275,13 +1275,13 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing): if self.mask_blur_x > 0: np_mask = np.array(image_mask) - kernel_size = 2 * int(4 * self.mask_blur_x + 0.5) + 1 + kernel_size = 2 * int(2.5 * self.mask_blur_x + 0.5) + 1 np_mask = cv2.GaussianBlur(np_mask, (kernel_size, 1), self.mask_blur_x) image_mask = Image.fromarray(np_mask) if self.mask_blur_y > 0: np_mask = np.array(image_mask) - kernel_size = 2 * int(4 * self.mask_blur_y + 0.5) + 1 + kernel_size = 2 * int(2.5 * self.mask_blur_y + 0.5) + 1 np_mask = cv2.GaussianBlur(np_mask, (1, kernel_size), self.mask_blur_y) image_mask = Image.fromarray(np_mask) -- cgit v1.2.1 From 8197f24dbcfe41d77e337da42bed22944154465f Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sat, 5 Aug 2023 08:07:13 +0300 Subject: remove the extra networks button --- modules/ui.py | 1 - 1 file changed, 1 deletion(-) diff --git a/modules/ui.py b/modules/ui.py index 61a6b4ad..1af6b4c8 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -289,7 +289,6 @@ class Toprow: with gr.Row(elem_id=f"{id_part}_tools"): self.paste = ToolButton(value=paste_symbol, elem_id="paste") self.clear_prompt_button = ToolButton(value=clear_prompt_symbol, elem_id=f"{id_part}_clear_prompt") - self.extra_networks_button = ToolButton(value=extra_networks_symbol, elem_id=f"{id_part}_extra_networks") self.restore_progress_button = ToolButton(value=restore_progress_symbol, elem_id=f"{id_part}_restore_progress", visible=False) self.token_counter = gr.HTML(value="0/75", elem_id=f"{id_part}_token_counter", elem_classes=["token-counter"]) -- cgit v1.2.1 From b85ec2b9b66492ff9bf3d40a4d9b424390067f0f Mon Sep 17 00:00:00 2001 From: Kohaku-Blueleaf <59680068+KohakuBlueleaf@users.noreply.github.com> Date: Sat, 5 Aug 2023 13:14:00 +0800 Subject: Fix some merge mistakes --- modules/processing.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/modules/processing.py b/modules/processing.py index b9900ded..43cb763f 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -1136,7 +1136,7 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): batch_images.append(image) decoded_samples = torch.from_numpy(np.array(batch_images)) - decoded_samples = decoded_samples.to(shared.device) + decoded_samples = decoded_samples.to(shared.device, dtype=devices.dtype_vae) self.extra_generation_params['VAE Encoder'] = opts.sd_vae_encode_method samples = images_tensor_to_samples(decoded_samples, approximation_indexes.get(opts.sd_vae_encode_method)) @@ -1374,6 +1374,7 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing): raise RuntimeError(f"bad number of images passed: {len(imgs)}; expecting {self.batch_size} or less") image = torch.from_numpy(batch_images) + image = image.to(shared.device, dtype=devices.dtype_vae) self.extra_generation_params['VAE Encoder'] = opts.sd_vae_encode_method self.init_latent = images_tensor_to_samples(image, approximation_indexes.get(opts.sd_vae_encode_method), self.sd_model) devices.torch_gc() -- cgit v1.2.1 From 7a64601428378c30e92efc00af7729db1b22dfb0 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Sat, 5 Aug 2023 14:21:28 +0900 Subject: need Reload UI not Restart --- .../scripts/extra_options_section.py | 4 +- modules/shared.py | 44 +++++++++++----------- 2 files changed, 25 insertions(+), 23 deletions(-) diff --git a/extensions-builtin/extra-options-section/scripts/extra_options_section.py b/extensions-builtin/extra-options-section/scripts/extra_options_section.py index a05e10d8..7bb0a1bb 100644 --- a/extensions-builtin/extra-options-section/scripts/extra_options_section.py +++ b/extensions-builtin/extra-options-section/scripts/extra_options_section.py @@ -43,6 +43,6 @@ class ExtraOptionsSection(scripts.Script): shared.options_templates.update(shared.options_section(('ui', "User interface"), { - "extra_options": shared.OptionInfo([], "Options in main UI", ui_components.DropdownMulti, lambda: {"choices": list(shared.opts.data_labels.keys())}).js("info", "settingsHintsShowQuicksettings").info("setting entries that also appear in txt2img/img2img interfaces").needs_restart(), - "extra_options_accordion": shared.OptionInfo(False, "Place options in main UI into an accordion") + "extra_options": shared.OptionInfo([], "Options in main UI", ui_components.DropdownMulti, lambda: {"choices": list(shared.opts.data_labels.keys())}).js("info", "settingsHintsShowQuicksettings").info("setting entries that also appear in txt2img/img2img interfaces").needs_reload_ui(), + "extra_options_accordion": shared.OptionInfo(False, "Place options in main UI into an accordion").needs_restart() })) diff --git a/modules/shared.py b/modules/shared.py index 8245250a..fb317972 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -286,7 +286,9 @@ class OptionInfo: self.comment_after += " (requires restart)" return self - + def needs_reload_ui(self): + self.comment_after += " (requires Reload UI)" + return self def options_section(section_identifier, options_dict): @@ -392,8 +394,8 @@ options_templates.update(options_section(('face-restoration', "Face restoration" })) options_templates.update(options_section(('system', "System"), { - "show_warnings": OptionInfo(False, "Show warnings in console.").needs_restart(), - "show_gradio_deprecation_warnings": OptionInfo(True, "Show gradio deprecation warnings in console.").needs_restart(), + "show_warnings": OptionInfo(False, "Show warnings in console.").needs_reload_ui(), + "show_gradio_deprecation_warnings": OptionInfo(True, "Show gradio deprecation warnings in console.").needs_reload_ui(), "memmon_poll_rate": OptionInfo(8, "VRAM usage polls per second during generation.", gr.Slider, {"minimum": 0, "maximum": 40, "step": 1}).info("0 = disable"), "samples_log_stdout": OptionInfo(False, "Always print all generation info to standard output"), "multiple_tqdm": OptionInfo(True, "Add a second progress bar to the console that shows progress for an entire job."), @@ -427,7 +429,7 @@ options_templates.update(options_section(('sd', "Stable Diffusion"), { "sd_vae": OptionInfo("Automatic", "SD VAE", gr.Dropdown, lambda: {"choices": shared_items.sd_vae_items()}, refresh=shared_items.refresh_vae_list).info("choose VAE model: Automatic = use one with same filename as checkpoint; None = use VAE from checkpoint"), "sd_vae_as_default": OptionInfo(True, "Ignore selected VAE for stable diffusion checkpoints that have their own .vae.pt next to them"), "sd_unet": OptionInfo("Automatic", "SD Unet", gr.Dropdown, lambda: {"choices": shared_items.sd_unet_items()}, refresh=shared_items.refresh_unet_list).info("choose Unet model: Automatic = use one with same filename as checkpoint; None = use Unet from checkpoint"), - "enable_quantization": OptionInfo(False, "Enable quantization in K samplers for sharper and cleaner results. This may change existing seeds").needs_restart(), + "enable_quantization": OptionInfo(False, "Enable quantization in K samplers for sharper and cleaner results. This may change existing seeds").needs_reload_ui(), "enable_emphasis": OptionInfo(True, "Enable emphasis").info("use (text) to make model pay more attention to text and [text] to make it pay less attention"), "enable_batch_seeds": OptionInfo(True, "Make K-diffusion samplers produce same images in a batch as when making a single image"), "comma_padding_backtrack": OptionInfo(20, "Prompt word wrap length limit", gr.Slider, {"minimum": 0, "maximum": 74, "step": 1}).info("in tokens - for texts shorter than specified, if they don't fit into 75 token limit, move them to the next 75 token chunk"), @@ -451,10 +453,10 @@ options_templates.update(options_section(('img2img', "img2img"), { "img2img_color_correction": OptionInfo(False, "Apply color correction to img2img results to match original colors."), "img2img_fix_steps": OptionInfo(False, "With img2img, do exactly the amount of steps the slider specifies.").info("normally you'd do less with less denoising"), "img2img_background_color": OptionInfo("#ffffff", "With img2img, fill transparent parts of the input image with this color.", ui_components.FormColorPicker, {}), - "img2img_editor_height": OptionInfo(720, "Height of the image editor", gr.Slider, {"minimum": 80, "maximum": 1600, "step": 1}).info("in pixels").needs_restart(), - "img2img_sketch_default_brush_color": OptionInfo("#ffffff", "Sketch initial brush color", ui_components.FormColorPicker, {}).info("default brush color of img2img sketch").needs_restart(), - "img2img_inpaint_mask_brush_color": OptionInfo("#ffffff", "Inpaint mask brush color", ui_components.FormColorPicker, {}).info("brush color of inpaint mask").needs_restart(), - "img2img_inpaint_sketch_default_brush_color": OptionInfo("#ffffff", "Inpaint sketch initial brush color", ui_components.FormColorPicker, {}).info("default brush color of img2img inpaint sketch").needs_restart(), + "img2img_editor_height": OptionInfo(720, "Height of the image editor", gr.Slider, {"minimum": 80, "maximum": 1600, "step": 1}).info("in pixels").needs_reload_ui(), + "img2img_sketch_default_brush_color": OptionInfo("#ffffff", "Sketch initial brush color", ui_components.FormColorPicker, {}).info("default brush color of img2img sketch").needs_reload_ui(), + "img2img_inpaint_mask_brush_color": OptionInfo("#ffffff", "Inpaint mask brush color", ui_components.FormColorPicker, {}).info("brush color of inpaint mask").needs_reload_ui(), + "img2img_inpaint_sketch_default_brush_color": OptionInfo("#ffffff", "Inpaint sketch initial brush color", ui_components.FormColorPicker, {}).info("default brush color of img2img inpaint sketch").needs_reload_ui(), "return_mask": OptionInfo(False, "For inpainting, include the greyscale mask in results for web"), "return_mask_composite": OptionInfo(False, "For inpainting, include masked composite in results for web"), })) @@ -503,15 +505,15 @@ options_templates.update(options_section(('extra_networks', "Extra Networks"), { "extra_networks_card_text_scale": OptionInfo(1.0, "Card text scale", gr.Slider, {"minimum": 0.0, "maximum": 2.0, "step": 0.01}).info("1 = original size"), "extra_networks_card_show_desc": OptionInfo(True, "Show description on card"), "extra_networks_add_text_separator": OptionInfo(" ", "Extra networks separator").info("extra text to add before <...> when adding extra network to prompt"), - "ui_extra_networks_tab_reorder": OptionInfo("", "Extra networks tab order").needs_restart(), + "ui_extra_networks_tab_reorder": OptionInfo("", "Extra networks tab order").needs_reload_ui(), "textual_inversion_print_at_load": OptionInfo(False, "Print a list of Textual Inversion embeddings when loading model"), "textual_inversion_add_hashes_to_infotext": OptionInfo(True, "Add Textual Inversion hashes to infotext"), "sd_hypernetwork": OptionInfo("None", "Add hypernetwork to prompt", gr.Dropdown, lambda: {"choices": ["None", *hypernetworks]}, refresh=reload_hypernetworks), })) options_templates.update(options_section(('ui', "User interface"), { - "localization": OptionInfo("None", "Localization", gr.Dropdown, lambda: {"choices": ["None"] + list(localization.localizations.keys())}, refresh=lambda: localization.list_localizations(cmd_opts.localizations_dir)).needs_restart(), - "gradio_theme": OptionInfo("Default", "Gradio theme", ui_components.DropdownEditable, lambda: {"choices": ["Default"] + gradio_hf_hub_themes}).needs_restart(), + "localization": OptionInfo("None", "Localization", gr.Dropdown, lambda: {"choices": ["None"] + list(localization.localizations.keys())}, refresh=lambda: localization.list_localizations(cmd_opts.localizations_dir)).needs_reload_ui(), + "gradio_theme": OptionInfo("Default", "Gradio theme", ui_components.DropdownEditable, lambda: {"choices": ["Default"] + gradio_hf_hub_themes}).needs_reload_ui(), "return_grid": OptionInfo(True, "Show grid in results for web"), "do_not_show_images": OptionInfo(False, "Do not show any images in results for web"), "send_seed": OptionInfo(True, "Send seed when sending prompt or image to other interface"), @@ -521,19 +523,19 @@ options_templates.update(options_section(('ui', "User interface"), { "js_modal_lightbox_gamepad": OptionInfo(False, "Navigate image viewer with gamepad"), "js_modal_lightbox_gamepad_repeat": OptionInfo(250, "Gamepad repeat period, in milliseconds"), "show_progress_in_title": OptionInfo(True, "Show generation progress in window title."), - "samplers_in_dropdown": OptionInfo(True, "Use dropdown for sampler selection instead of radio group").needs_restart(), - "dimensions_and_batch_together": OptionInfo(True, "Show Width/Height and Batch sliders in same row").needs_restart(), + "samplers_in_dropdown": OptionInfo(True, "Use dropdown for sampler selection instead of radio group").needs_reload_ui(), + "dimensions_and_batch_together": OptionInfo(True, "Show Width/Height and Batch sliders in same row").needs_reload_ui(), "keyedit_precision_attention": OptionInfo(0.1, "Ctrl+up/down precision when editing (attention:1.1)", gr.Slider, {"minimum": 0.01, "maximum": 0.2, "step": 0.001}), "keyedit_precision_extra": OptionInfo(0.05, "Ctrl+up/down precision when editing ", gr.Slider, {"minimum": 0.01, "maximum": 0.2, "step": 0.001}), "keyedit_delimiters": OptionInfo(".,\\/!?%^*;:{}=`~()", "Ctrl+up/down word delimiters"), "keyedit_move": OptionInfo(True, "Alt+left/right moves prompt elements"), - "quicksettings_list": OptionInfo(["sd_model_checkpoint"], "Quicksettings list", ui_components.DropdownMulti, lambda: {"choices": list(opts.data_labels.keys())}).js("info", "settingsHintsShowQuicksettings").info("setting entries that appear at the top of page rather than in settings tab").needs_restart(), - "ui_tab_order": OptionInfo([], "UI tab order", ui_components.DropdownMulti, lambda: {"choices": list(tab_names)}).needs_restart(), - "hidden_tabs": OptionInfo([], "Hidden UI tabs", ui_components.DropdownMulti, lambda: {"choices": list(tab_names)}).needs_restart(), - "ui_reorder_list": OptionInfo([], "txt2img/img2img UI item order", ui_components.DropdownMulti, lambda: {"choices": list(shared_items.ui_reorder_categories())}).info("selected items appear first").needs_restart(), - "hires_fix_show_sampler": OptionInfo(False, "Hires fix: show hires checkpoint and sampler selection").needs_restart(), - "hires_fix_show_prompts": OptionInfo(False, "Hires fix: show hires prompt and negative prompt").needs_restart(), - "disable_token_counters": OptionInfo(False, "Disable prompt token counters").needs_restart(), + "quicksettings_list": OptionInfo(["sd_model_checkpoint"], "Quicksettings list", ui_components.DropdownMulti, lambda: {"choices": list(opts.data_labels.keys())}).js("info", "settingsHintsShowQuicksettings").info("setting entries that appear at the top of page rather than in settings tab").needs_reload_ui(), + "ui_tab_order": OptionInfo([], "UI tab order", ui_components.DropdownMulti, lambda: {"choices": list(tab_names)}).needs_reload_ui(), + "hidden_tabs": OptionInfo([], "Hidden UI tabs", ui_components.DropdownMulti, lambda: {"choices": list(tab_names)}).needs_reload_ui(), + "ui_reorder_list": OptionInfo([], "txt2img/img2img UI item order", ui_components.DropdownMulti, lambda: {"choices": list(shared_items.ui_reorder_categories())}).info("selected items appear first").needs_reload_ui(), + "hires_fix_show_sampler": OptionInfo(False, "Hires fix: show hires checkpoint and sampler selection").needs_reload_ui(), + "hires_fix_show_prompts": OptionInfo(False, "Hires fix: show hires prompt and negative prompt").needs_reload_ui(), + "disable_token_counters": OptionInfo(False, "Disable prompt token counters").needs_reload_ui(), })) @@ -564,7 +566,7 @@ options_templates.update(options_section(('ui', "Live previews"), { })) options_templates.update(options_section(('sampler-params', "Sampler parameters"), { - "hide_samplers": OptionInfo([], "Hide samplers in user interface", gr.CheckboxGroup, lambda: {"choices": [x.name for x in list_samplers()]}).needs_restart(), + "hide_samplers": OptionInfo([], "Hide samplers in user interface", gr.CheckboxGroup, lambda: {"choices": [x.name for x in list_samplers()]}).needs_reload_ui(), "eta_ddim": OptionInfo(0.0, "Eta for DDIM", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}).info("noise multiplier; higher = more unperdictable results"), "eta_ancestral": OptionInfo(1.0, "Eta for ancestral samplers", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}).info("noise multiplier; applies to Euler a and other samplers that have a in them"), "ddim_discretize": OptionInfo('uniform', "img2img DDIM discretize", gr.Radio, {"choices": ['uniform', 'quad']}), -- cgit v1.2.1 From e053e21af6563e9b42347b484f307fdcabb85f3c Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sat, 5 Aug 2023 08:48:03 +0300 Subject: put localStorage stuff into its own file --- .eslintrc.js | 4 ++++ javascript/localStorage.js | 26 ++++++++++++++++++++++++++ javascript/ui.js | 24 ++++++++---------------- 3 files changed, 38 insertions(+), 16 deletions(-) create mode 100644 javascript/localStorage.js diff --git a/.eslintrc.js b/.eslintrc.js index f33aca09..e3b4fb76 100644 --- a/.eslintrc.js +++ b/.eslintrc.js @@ -87,5 +87,9 @@ module.exports = { modalNextImage: "readonly", // token-counters.js setupTokenCounters: "readonly", + // localStorage.js + localSet: "readonly", + localGet: "readonly", + localRemove: "readonly" } }; diff --git a/javascript/localStorage.js b/javascript/localStorage.js new file mode 100644 index 00000000..dc1a36c3 --- /dev/null +++ b/javascript/localStorage.js @@ -0,0 +1,26 @@ + +function localSet(k, v) { + try { + localStorage.setItem(k, v); + } catch (e) { + console.warn(`Failed to save ${k} to localStorage: ${e}`); + } +} + +function localGet(k, def) { + try { + return localStorage.getItem(k); + } catch (e) { + console.warn(`Failed to load ${k} from localStorage: ${e}`); + } + + return def; +} + +function localRemove(k) { + try { + return localStorage.removeItem(k); + } catch (e) { + console.warn(`Failed to remove ${k} from localStorage: ${e}`); + } +} diff --git a/javascript/ui.js b/javascript/ui.js index abf23a78..bade3089 100644 --- a/javascript/ui.js +++ b/javascript/ui.js @@ -152,15 +152,11 @@ function submit() { showSubmitButtons('txt2img', false); var id = randomId(); - try { - localStorage.setItem("txt2img_task_id", id); - } catch (e) { - console.warn(`Failed to save txt2img task id to localStorage: ${e}`); - } + localSet("txt2img_task_id", id); requestProgress(id, gradioApp().getElementById('txt2img_gallery_container'), gradioApp().getElementById('txt2img_gallery'), function() { showSubmitButtons('txt2img', true); - localStorage.removeItem("txt2img_task_id"); + localRemove("txt2img_task_id"); showRestoreProgressButton('txt2img', false); }); @@ -175,15 +171,11 @@ function submit_img2img() { showSubmitButtons('img2img', false); var id = randomId(); - try { - localStorage.setItem("img2img_task_id", id); - } catch (e) { - console.warn(`Failed to save img2img task id to localStorage: ${e}`); - } + localSet("img2img_task_id", id); requestProgress(id, gradioApp().getElementById('img2img_gallery_container'), gradioApp().getElementById('img2img_gallery'), function() { showSubmitButtons('img2img', true); - localStorage.removeItem("img2img_task_id"); + localRemove("img2img_task_id"); showRestoreProgressButton('img2img', false); }); @@ -197,7 +189,7 @@ function submit_img2img() { function restoreProgressTxt2img() { showRestoreProgressButton("txt2img", false); - var id = localStorage.getItem("txt2img_task_id"); + var id = localGet("txt2img_task_id"); if (id) { requestProgress(id, gradioApp().getElementById('txt2img_gallery_container'), gradioApp().getElementById('txt2img_gallery'), function() { @@ -211,7 +203,7 @@ function restoreProgressTxt2img() { function restoreProgressImg2img() { showRestoreProgressButton("img2img", false); - var id = localStorage.getItem("img2img_task_id"); + var id = localGet("img2img_task_id"); if (id) { requestProgress(id, gradioApp().getElementById('img2img_gallery_container'), gradioApp().getElementById('img2img_gallery'), function() { @@ -224,8 +216,8 @@ function restoreProgressImg2img() { onUiLoaded(function() { - showRestoreProgressButton('txt2img', localStorage.getItem("txt2img_task_id")); - showRestoreProgressButton('img2img', localStorage.getItem("img2img_task_id")); + showRestoreProgressButton('txt2img', localGet("txt2img_task_id")); + showRestoreProgressButton('img2img', localGet("img2img_task_id")); }); -- cgit v1.2.1 From c74c708ed8c422bf7ca1f388a3ee772c7d1e4ddd Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sat, 5 Aug 2023 09:15:18 +0300 Subject: add checkbox to show/hide dirs for extra networks --- javascript/extraNetworks.js | 29 +++++++++++++++++++++++++++++ modules/ui_extra_networks.py | 5 +++-- style.css | 5 ++++- 3 files changed, 36 insertions(+), 3 deletions(-) diff --git a/javascript/extraNetworks.js b/javascript/extraNetworks.js index 44d02349..897ebeba 100644 --- a/javascript/extraNetworks.js +++ b/javascript/extraNetworks.js @@ -1,3 +1,20 @@ +function toggleCss(key, css, enable) { + var style = document.getElementById(key); + if (enable && !style) { + style = document.createElement('style'); + style.id = key; + style.type = 'text/css'; + document.head.appendChild(style); + } + if (style && !enable) { + document.head.removeChild(style); + } + if (style) { + style.innerHTML == ''; + style.appendChild(document.createTextNode(css)); + } +} + function setupExtraNetworksForTab(tabname) { gradioApp().querySelector('#' + tabname + '_extra_tabs').classList.add('extra-networks'); @@ -7,12 +24,15 @@ function setupExtraNetworksForTab(tabname) { var sort = gradioApp().getElementById(tabname + '_extra_sort'); var sortOrder = gradioApp().getElementById(tabname + '_extra_sortorder'); var refresh = gradioApp().getElementById(tabname + '_extra_refresh'); + var showDirsDiv = gradioApp().getElementById(tabname + '_extra_show_dirs'); + var showDirs = gradioApp().querySelector('#' + tabname + '_extra_show_dirs input'); sort.dataset.sortkey = 'sortDefault'; tabs.appendChild(searchDiv); tabs.appendChild(sort); tabs.appendChild(sortOrder); tabs.appendChild(refresh); + tabs.appendChild(showDirsDiv); var applyFilter = function() { var searchTerm = search.value.toLowerCase(); @@ -78,6 +98,15 @@ function setupExtraNetworksForTab(tabname) { }); extraNetworksApplyFilter[tabname] = applyFilter; + + var showDirsUpdate = function() { + var css = '#' + tabname + '_extra_tabs .extra-network-subdirs { display: none; }'; + toggleCss(tabname + '_extra_show_dirs_style', css, !showDirs.checked); + localSet('extra-networks-show-dirs', showDirs.checked ? 1 : 0); + }; + showDirs.checked = localGet('extra-networks-show-dirs', 1) == 1; + showDirs.addEventListener("change", showDirsUpdate); + showDirsUpdate(); } function applyExtraNetworkFilter(tabname) { diff --git a/modules/ui_extra_networks.py b/modules/ui_extra_networks.py index 3a73c89e..e0b932b9 100644 --- a/modules/ui_extra_networks.py +++ b/modules/ui_extra_networks.py @@ -375,15 +375,16 @@ def create_ui(interface: gr.Blocks, unrelated_tabs, tabname): dropdown_sort = gr.Dropdown(choices=['Default Sort', 'Date Created', 'Date Modified', 'Name'], value='Default Sort', elem_id=tabname+"_extra_sort", elem_classes="sort", multiselect=False, visible=False, show_label=False, interactive=True, label=tabname+"_extra_sort_order") button_sortorder = ToolButton(up_down_symbol, elem_id=tabname+"_extra_sortorder", elem_classes="sortorder", visible=False) button_refresh = gr.Button('Refresh', elem_id=tabname+"_extra_refresh", visible=False) + checkbox_show_dirs = gr.Checkbox(True, label='Show dirs', elem_id=tabname+"_extra_show_dirs", elem_classes="show-dirs", visible=False) ui.button_save_preview = gr.Button('Save preview', elem_id=tabname+"_save_preview", visible=False) ui.preview_target_filename = gr.Textbox('Preview save filename', elem_id=tabname+"_preview_filename", visible=False) for tab in unrelated_tabs: - tab.select(fn=lambda: [gr.update(visible=False) for _ in range(5)], inputs=[], outputs=[edit_search, edit_search, dropdown_sort, button_sortorder, button_refresh], show_progress=False) + tab.select(fn=lambda: [gr.update(visible=False) for _ in range(5)], inputs=[], outputs=[edit_search, dropdown_sort, button_sortorder, button_refresh, checkbox_show_dirs], show_progress=False) for tab in related_tabs: - tab.select(fn=lambda: [gr.update(visible=True) for _ in range(5)], inputs=[], outputs=[edit_search, edit_search, dropdown_sort, button_sortorder, button_refresh], show_progress=False) + tab.select(fn=lambda: [gr.update(visible=True) for _ in range(5)], inputs=[], outputs=[edit_search, dropdown_sort, button_sortorder, button_refresh, checkbox_show_dirs], show_progress=False) def pages_html(): if not ui.pages_contents: diff --git a/style.css b/style.css index 52919f71..dc4d37b9 100644 --- a/style.css +++ b/style.css @@ -801,9 +801,12 @@ footer { margin: 0 0.15em; } .extra-networks .tab-nav .search, -.extra-networks .tab-nav .sort{ +.extra-networks .tab-nav .sort, +.extra-networks .tab-nav .show-dirs +{ margin: 0.3em; align-self: center; + width: auto; } .extra-networks .tab-nav .search { -- cgit v1.2.1 From ad510b2cd312b554f641084a375038ead4c5149c Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sat, 5 Aug 2023 09:17:36 +0300 Subject: fix refresh button for styles --- modules/ui_common.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/ui_common.py b/modules/ui_common.py index 1dda1627..303af9cd 100644 --- a/modules/ui_common.py +++ b/modules/ui_common.py @@ -239,7 +239,7 @@ def create_refresh_button(refresh_component, refresh_method, refreshed_args, ele for comp in refresh_components: setattr(comp, k, v) - return (gr.update(**(args or {})) for _ in refresh_components) if len(refresh_components) > 1 else gr.update(**(args or {})) + return [gr.update(**(args or {})) for _ in refresh_components] if len(refresh_components) > 1 else gr.update(**(args or {})) refresh_button = ToolButton(value=refresh_symbol, elem_id=elem_id, tooltip=f"{label}: refresh" if label else "Refresh") refresh_button.click( -- cgit v1.2.1 From 3f451f3042cc69a751cebc329c11f9f145da5186 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sat, 5 Aug 2023 10:36:26 +0300 Subject: do not add VAE Encoder/Decoder to infotext if it's the default --- modules/processing.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/modules/processing.py b/modules/processing.py index 43cb763f..a9d66005 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -795,7 +795,9 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: if getattr(samples_ddim, 'already_decoded', False): x_samples_ddim = samples_ddim else: - p.extra_generation_params['VAE Decoder'] = opts.sd_vae_decode_method + if opts.sd_vae_decode_method != 'Full': + p.extra_generation_params['VAE Decoder'] = opts.sd_vae_decode_method + x_samples_ddim = decode_latent_batch(p.sd_model, samples_ddim, target_device=devices.cpu, check_for_nans=True) x_samples_ddim = torch.stack(x_samples_ddim).float() @@ -1138,7 +1140,8 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): decoded_samples = torch.from_numpy(np.array(batch_images)) decoded_samples = decoded_samples.to(shared.device, dtype=devices.dtype_vae) - self.extra_generation_params['VAE Encoder'] = opts.sd_vae_encode_method + if opts.sd_vae_encode_method != 'Full': + self.extra_generation_params['VAE Encoder'] = opts.sd_vae_encode_method samples = images_tensor_to_samples(decoded_samples, approximation_indexes.get(opts.sd_vae_encode_method)) image_conditioning = self.img2img_image_conditioning(decoded_samples, samples) @@ -1375,7 +1378,10 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing): image = torch.from_numpy(batch_images) image = image.to(shared.device, dtype=devices.dtype_vae) - self.extra_generation_params['VAE Encoder'] = opts.sd_vae_encode_method + + if opts.sd_vae_encode_method != 'Full': + self.extra_generation_params['VAE Encoder'] = opts.sd_vae_encode_method + self.init_latent = images_tensor_to_samples(image, approximation_indexes.get(opts.sd_vae_encode_method), self.sd_model) devices.torch_gc() -- cgit v1.2.1 From 36ca80d0046f85529682dc966a2bf822b00d8f2b Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sat, 5 Aug 2023 10:43:06 +0300 Subject: put VAE into a separate settings page --- modules/shared.py | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/modules/shared.py b/modules/shared.py index 367f815e..c6adda73 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -425,9 +425,6 @@ options_templates.update(options_section(('sd', "Stable Diffusion"), { "sd_checkpoints_limit": OptionInfo(1, "Maximum number of checkpoints loaded at the same time", gr.Slider, {"minimum": 1, "maximum": 10, "step": 1}), "sd_checkpoints_keep_in_cpu": OptionInfo(True, "Only keep one model on device").info("will keep models other than the currently used one in RAM rather than VRAM"), "sd_checkpoint_cache": OptionInfo(0, "Checkpoints to cache in RAM", gr.Slider, {"minimum": 0, "maximum": 10, "step": 1}).info("obsolete; set to 0 and use the two settings above instead"), - "sd_vae_checkpoint_cache": OptionInfo(0, "VAE Checkpoints to cache in RAM", gr.Slider, {"minimum": 0, "maximum": 10, "step": 1}), - "sd_vae": OptionInfo("Automatic", "SD VAE", gr.Dropdown, lambda: {"choices": shared_items.sd_vae_items()}, refresh=shared_items.refresh_vae_list).info("choose VAE model: Automatic = use one with same filename as checkpoint; None = use VAE from checkpoint"), - "sd_vae_as_default": OptionInfo(True, "Ignore selected VAE for stable diffusion checkpoints that have their own .vae.pt next to them"), "sd_unet": OptionInfo("Automatic", "SD Unet", gr.Dropdown, lambda: {"choices": shared_items.sd_unet_items()}, refresh=shared_items.refresh_unet_list).info("choose Unet model: Automatic = use one with same filename as checkpoint; None = use Unet from checkpoint"), "enable_quantization": OptionInfo(False, "Enable quantization in K samplers for sharper and cleaner results. This may change existing seeds").needs_reload_ui(), "enable_emphasis": OptionInfo(True, "Enable emphasis").info("use (text) to make model pay more attention to text and [text] to make it pay less attention"), @@ -435,10 +432,7 @@ options_templates.update(options_section(('sd', "Stable Diffusion"), { "comma_padding_backtrack": OptionInfo(20, "Prompt word wrap length limit", gr.Slider, {"minimum": 0, "maximum": 74, "step": 1}).info("in tokens - for texts shorter than specified, if they don't fit into 75 token limit, move them to the next 75 token chunk"), "CLIP_stop_at_last_layers": OptionInfo(1, "Clip skip", gr.Slider, {"minimum": 1, "maximum": 12, "step": 1}).link("wiki", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Features#clip-skip").info("ignore last layers of CLIP network; 1 ignores none, 2 ignores one layer"), "upcast_attn": OptionInfo(False, "Upcast cross attention layer to float32"), - "auto_vae_precision": OptionInfo(True, "Automaticlly revert VAE to 32-bit floats").info("triggers when a tensor with NaNs is produced in VAE; disabling the option in this case will result in a black square image"), "randn_source": OptionInfo("GPU", "Random number generator source.", gr.Radio, {"choices": ["GPU", "CPU", "NV"]}).info("changes seeds drastically; use CPU to produce the same picture across different videocard vendors; use NV to produce same picture as on NVidia videocards"), - "sd_vae_encode_method": OptionInfo("Full", "VAE type for encode", gr.Radio, {"choices": ["Full", "TAESD"]}).info("method to encode image to latent (use in img2img, hires-fix or inpaint mask)"), - "sd_vae_decode_method": OptionInfo("Full", "VAE type for decode", gr.Radio, {"choices": ["Full", "TAESD"]}).info("method to decode latent to image"), })) options_templates.update(options_section(('sdxl', "Stable Diffusion XL"), { @@ -448,6 +442,14 @@ options_templates.update(options_section(('sdxl', "Stable Diffusion XL"), { "sdxl_refiner_high_aesthetic_score": OptionInfo(6.0, "SDXL high aesthetic score", gr.Number).info("used for refiner model prompt"), })) +options_templates.update(options_section(('vae', "VAE"), { + "sd_vae_checkpoint_cache": OptionInfo(0, "VAE Checkpoints to cache in RAM", gr.Slider, {"minimum": 0, "maximum": 10, "step": 1}), + "sd_vae": OptionInfo("Automatic", "SD VAE", gr.Dropdown, lambda: {"choices": shared_items.sd_vae_items()}, refresh=shared_items.refresh_vae_list).info("choose VAE model: Automatic = use one with same filename as checkpoint; None = use VAE from checkpoint"), + "sd_vae_as_default": OptionInfo(True, "Ignore selected VAE for stable diffusion checkpoints that have their own .vae.pt next to them"), + "auto_vae_precision": OptionInfo(True, "Automaticlly revert VAE to 32-bit floats").info("triggers when a tensor with NaNs is produced in VAE; disabling the option in this case will result in a black square image"), + "sd_vae_encode_method": OptionInfo("Full", "VAE type for encode", gr.Radio, {"choices": ["Full", "TAESD"]}).info("method to encode image to latent (use in img2img, hires-fix or inpaint mask)"), + "sd_vae_decode_method": OptionInfo("Full", "VAE type for decode", gr.Radio, {"choices": ["Full", "TAESD"]}).info("method to decode latent to image"), +})) options_templates.update(options_section(('img2img', "img2img"), { "inpainting_mask_weight": OptionInfo(1.0, "Inpainting conditioning mask strength", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}), @@ -463,7 +465,6 @@ options_templates.update(options_section(('img2img', "img2img"), { "return_mask_composite": OptionInfo(False, "For inpainting, include masked composite in results for web"), })) - options_templates.update(options_section(('optimizations', "Optimizations"), { "cross_attention_optimization": OptionInfo("Automatic", "Cross attention optimization", gr.Dropdown, lambda: {"choices": shared_items.cross_attention_optimizations()}), "s_min_uncond": OptionInfo(0.0, "Negative Guidance minimum sigma", gr.Slider, {"minimum": 0.0, "maximum": 15.0, "step": 0.01}).link("PR", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/9177").info("skip negative prompt for some steps when the image is almost ready; 0=disable, higher=faster"), -- cgit v1.2.1 From 60183eebc37a69545e41cb6b00189609b85129b0 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sat, 5 Aug 2023 11:18:13 +0300 Subject: add description to VAE setting page --- modules/shared.py | 20 ++++++++++++++++++++ style.css | 7 +++++++ 2 files changed, 27 insertions(+) diff --git a/modules/shared.py b/modules/shared.py index c6adda73..92adc563 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -259,6 +259,7 @@ class OptionInfo: self.onchange = onchange self.section = section self.refresh = refresh + self.do_not_save = False self.comment_before = comment_before """HTML text that will be added after label in UI""" @@ -291,6 +292,13 @@ class OptionInfo: return self +class OptionHTML(OptionInfo): + def __init__(self, text): + super().__init__(str(text).strip(), label='', component=lambda **kwargs: gr.HTML(elem_classes="settings-info", **kwargs)) + + self.do_not_save = True + + def options_section(section_identifier, options_dict): for v in options_dict.values(): v.section = section_identifier @@ -443,6 +451,12 @@ options_templates.update(options_section(('sdxl', "Stable Diffusion XL"), { })) options_templates.update(options_section(('vae', "VAE"), { + "sd_vae_explanation": OptionHTML(""" +VAE is a neural network that transforms a standard RGB +image into latent space representation and back. Latent space representation is what stable diffusion is working on during sampling +(i.e. when the progress bar is between empty and full). For txt2img, VAE is used to create a resulting image after the sampling is finished. +For img2img, VAE is used to process user's input image before the sampling, and to create an image after sampling. +"""), "sd_vae_checkpoint_cache": OptionInfo(0, "VAE Checkpoints to cache in RAM", gr.Slider, {"minimum": 0, "maximum": 10, "step": 1}), "sd_vae": OptionInfo("Automatic", "SD VAE", gr.Dropdown, lambda: {"choices": shared_items.sd_vae_items()}, refresh=shared_items.refresh_vae_list).info("choose VAE model: Automatic = use one with same filename as checkpoint; None = use VAE from checkpoint"), "sd_vae_as_default": OptionInfo(True, "Ignore selected VAE for stable diffusion checkpoints that have their own .vae.pt next to them"), @@ -619,6 +633,9 @@ class Options: assert not cmd_opts.freeze_settings, "changing settings is disabled" info = opts.data_labels.get(key, None) + if info.do_not_save: + return + comp_args = info.component_args if info else None if isinstance(comp_args, dict) and comp_args.get('visible', True) is False: raise RuntimeError(f"not possible to set {key} because it is restricted") @@ -648,6 +665,9 @@ class Options: if oldval == value: return False + if self.data_labels[key].do_not_save: + return False + try: setattr(self, key, value) except RuntimeError: diff --git a/style.css b/style.css index dc4d37b9..8c1f273c 100644 --- a/style.css +++ b/style.css @@ -494,6 +494,13 @@ table.popup-table .link{ font-size: 18pt; } +#settings .settings-info{ + max-width: 48em; + border: 1px dotted #777; + margin: 0; + padding: 1em; +} + /* live preview */ .progressDiv{ -- cgit v1.2.1 From 1d7dcdb6c38c7bca945b3fa8a5d4a1f93446f22a Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Sat, 5 Aug 2023 19:07:39 +0900 Subject: Option to not save incomplete images --- modules/processing.py | 19 +++++++++++-------- modules/shared.py | 1 + 2 files changed, 12 insertions(+), 8 deletions(-) diff --git a/modules/processing.py b/modules/processing.py index 43cb763f..bf4f938b 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -103,6 +103,10 @@ def txt2img_image_conditioning(sd_model, x, width, height): return x.new_zeros(x.shape[0], 5, 1, 1, dtype=x.dtype, device=x.device) +def save_images_if_interrupted(): + return not (opts.dont_save_interrupted_images and (state.interrupted or state.skipped)) + + class StableDiffusionProcessing: """ The first set of paramaters: sd_models -> do_not_reload_embeddings represent the minimum required to create a StableDiffusionProcessing @@ -821,6 +825,8 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: def infotext(index=0, use_main_prompt=False): return create_infotext(p, p.prompts, p.seeds, p.subseeds, use_main_prompt=use_main_prompt, index=index, all_negative_prompts=p.negative_prompts) + save_images_if_interrupt = save_images_if_interrupted() + for i, x_sample in enumerate(x_samples_ddim): p.batch_index = i @@ -828,7 +834,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: x_sample = x_sample.astype(np.uint8) if p.restore_faces: - if opts.save and not p.do_not_save_samples and opts.save_images_before_face_restoration: + if opts.save and not p.do_not_save_samples and opts.save_images_before_face_restoration and save_images_if_interrupt: images.save_image(Image.fromarray(x_sample), p.outpath_samples, "", p.seeds[i], p.prompts[i], opts.samples_format, info=infotext(i), p=p, suffix="-before-face-restoration") devices.torch_gc() @@ -842,16 +848,15 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: pp = scripts.PostprocessImageArgs(image) p.scripts.postprocess_image(p, pp) image = pp.image - if p.color_corrections is not None and i < len(p.color_corrections): - if opts.save and not p.do_not_save_samples and opts.save_images_before_color_correction: + if opts.save and not p.do_not_save_samples and opts.save_images_before_color_correction and save_images_if_interrupt: image_without_cc = apply_overlay(image, p.paste_to, i, p.overlay_images) images.save_image(image_without_cc, p.outpath_samples, "", p.seeds[i], p.prompts[i], opts.samples_format, info=infotext(i), p=p, suffix="-before-color-correction") image = apply_color_correction(p.color_corrections[i], image) image = apply_overlay(image, p.paste_to, i, p.overlay_images) - if opts.samples_save and not p.do_not_save_samples: + if opts.samples_save and not p.do_not_save_samples and save_images_if_interrupt: images.save_image(image, p.outpath_samples, "", p.seeds[i], p.prompts[i], opts.samples_format, info=infotext(i), p=p) text = infotext(i) @@ -859,8 +864,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: if opts.enable_pnginfo: image.info["parameters"] = text output_images.append(image) - - if hasattr(p, 'mask_for_overlay') and p.mask_for_overlay and any([opts.save_mask, opts.save_mask_composite, opts.return_mask, opts.return_mask_composite]): + if hasattr(p, 'mask_for_overlay') and p.mask_for_overlay and any([opts.save_mask, opts.save_mask_composite, opts.return_mask, opts.return_mask_composite]) and save_images_if_interrupt: image_mask = p.mask_for_overlay.convert('RGB') image_mask_composite = Image.composite(image.convert('RGBA').convert('RGBa'), Image.new('RGBa', image.size), images.resize_image(2, p.mask_for_overlay, image.width, image.height).convert('L')).convert('RGBA') @@ -896,7 +900,6 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: grid.info["parameters"] = text output_images.insert(0, grid) index_of_first_image = 1 - if opts.grid_save: images.save_image(grid, p.outpath_grids, "grid", p.all_seeds[0], p.all_prompts[0], opts.grid_format, info=infotext(use_main_prompt=True), short_filename=not opts.grid_extended_filename, p=p, grid=True) @@ -1091,7 +1094,7 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): def save_intermediate(image, index): """saves image before applying hires fix, if enabled in options; takes as an argument either an image or batch with latent space images""" - if not opts.save or self.do_not_save_samples or not opts.save_images_before_highres_fix: + if not opts.save or self.do_not_save_samples or not opts.save_images_before_highres_fix or not save_images_if_interrupted(): return if not isinstance(image, Image.Image): diff --git a/modules/shared.py b/modules/shared.py index 516ad7e8..a7de686c 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -356,6 +356,7 @@ options_templates.update(options_section(('saving-images', "Saving images/grids" "temp_dir": OptionInfo("", "Directory for temporary images; leave empty for default"), "clean_temp_dir_at_start": OptionInfo(False, "Cleanup non-default temporary directory when starting webui"), + "dont_save_interrupted_images": OptionInfo(False, "Don't save incomplete images").info("Don't save images that has been interrupted in mid-generation, they will still show up in webui output."), })) options_templates.update(options_section(('saving-paths', "Paths for saving"), { -- cgit v1.2.1 From 8ece321df34af982164a8a38bfa67c2f26484bc8 Mon Sep 17 00:00:00 2001 From: dhwz Date: Sat, 5 Aug 2023 16:03:06 +0200 Subject: add new gradio themes --- modules/shared.py | 23 +++++++++++++++++++++-- 1 file changed, 21 insertions(+), 2 deletions(-) diff --git a/modules/shared.py b/modules/shared.py index 92adc563..14cef51f 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -51,16 +51,35 @@ restricted_opts = { # https://huggingface.co/datasets/freddyaboulton/gradio-theme-subdomains/resolve/main/subdomains.json gradio_hf_hub_themes = [ + "gradio/base", "gradio/glass", "gradio/monochrome", "gradio/seafoam", "gradio/soft", - "freddyaboulton/dracula_revamped", "gradio/dracula_test", "abidlabs/dracula_test", + "abidlabs/Lime", "abidlabs/pakistan", + "Ama434/neutral-barlow", "dawood/microsoft_windows", - "ysharma/steampunk" + "finlaymacklon/smooth_slate", + "Franklisi/darkmode", + "freddyaboulton/dracula_revamped", + "freddyaboulton/test-blue", + "gstaff/xkcd", + "Insuz/Mocha", + "Insuz/SimpleIndigo", + "JohnSmith9982/small_and_pretty", + "nota-ai/theme", + "nuttea/Softblue", + "ParityError/Anime", + "reilnuud/polite", + "remilia/Ghostly", + "rottenlittlecreature/Moon_Goblin", + "step-3-profit/Midnight-Deep", + "Taithrah/Minimal", + "ysharma/huggingface", + "ysharma/steampunk" ] -- cgit v1.2.1 From 1f7fc4d7a350c38121af84527873bf8a40643fe3 Mon Sep 17 00:00:00 2001 From: dhwz Date: Sat, 5 Aug 2023 16:07:57 +0200 Subject: fix whitespace --- modules/shared.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/shared.py b/modules/shared.py index 14cef51f..9530d80e 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -79,7 +79,7 @@ gradio_hf_hub_themes = [ "step-3-profit/Midnight-Deep", "Taithrah/Minimal", "ysharma/huggingface", - "ysharma/steampunk" + "ysharma/steampunk" ] -- cgit v1.2.1 From c6278c15a81bf65efb65ded50368972a920cc198 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sat, 5 Aug 2023 17:11:37 +0300 Subject: add explanation for gradio themes --- modules/shared.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/shared.py b/modules/shared.py index 9530d80e..a99b500b 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -549,7 +549,7 @@ options_templates.update(options_section(('extra_networks', "Extra Networks"), { options_templates.update(options_section(('ui', "User interface"), { "localization": OptionInfo("None", "Localization", gr.Dropdown, lambda: {"choices": ["None"] + list(localization.localizations.keys())}, refresh=lambda: localization.list_localizations(cmd_opts.localizations_dir)).needs_reload_ui(), - "gradio_theme": OptionInfo("Default", "Gradio theme", ui_components.DropdownEditable, lambda: {"choices": ["Default"] + gradio_hf_hub_themes}).needs_reload_ui(), + "gradio_theme": OptionInfo("Default", "Gradio theme", ui_components.DropdownEditable, lambda: {"choices": ["Default"] + gradio_hf_hub_themes}).info("you can also manually enter any of themes from the gallery.").needs_reload_ui(), "return_grid": OptionInfo(True, "Show grid in results for web"), "do_not_show_images": OptionInfo(False, "Do not show any images in results for web"), "send_seed": OptionInfo(True, "Send seed when sending prompt or image to other interface"), -- cgit v1.2.1 From b315c207564a950d3ff846385ebae288352af8f8 Mon Sep 17 00:00:00 2001 From: bannsec <7632864+bannsec@users.noreply.github.com> Date: Sat, 5 Aug 2023 14:07:35 -0400 Subject: Update README.md Correct install instructions on linux and provide additional required apt packages Fixes #12351 --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index b796d150..cd11f856 100644 --- a/README.md +++ b/README.md @@ -115,7 +115,7 @@ Alternatively, use online services (like Google Colab): 1. Install the dependencies: ```bash # Debian-based: -sudo apt install wget git python3 python3-venv +sudo apt install wget git python3 python3-venv libgl1 libglib2.0-0 # Red Hat-based: sudo dnf install wget git python3 # Arch-based: @@ -123,7 +123,7 @@ sudo pacman -S wget git python3 ``` 2. Navigate to the directory you would like the webui to be installed and execute the following command: ```bash -bash <(wget -qO- https://raw.githubusercontent.com/AUTOMATIC1111/stable-diffusion-webui/master/webui.sh) +wget -q https://raw.githubusercontent.com/AUTOMATIC1111/stable-diffusion-webui/master/webui.sh ``` 3. Run `webui.sh`. 4. Check `webui-user.sh` for options. -- cgit v1.2.1 From 496cef956b3337c4745b1224e244706bd747195a Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Sat, 5 Aug 2023 21:14:13 -0400 Subject: Allow s_noise override to actually be used --- modules/processing.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/processing.py b/modules/processing.py index a9d66005..1601a14c 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -149,7 +149,7 @@ class StableDiffusionProcessing: self.s_churn = s_churn or opts.s_churn self.s_tmin = s_tmin or opts.s_tmin self.s_tmax = s_tmax or float('inf') # not representable as a standard ui option - self.s_noise = s_noise or opts.s_noise + self.s_noise = opts.data.get('s_noise', s_noise) self.override_settings = {k: v for k, v in (override_settings or {}).items() if k not in shared.restricted_opts} self.override_settings_restore_afterwards = override_settings_restore_afterwards self.is_using_inpainting_conditioning = False -- cgit v1.2.1 From dfc01c68cd204fd091b3cf5b855d5c0f77a6526a Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Sat, 5 Aug 2023 21:23:58 -0400 Subject: Increase s_churn max value --- modules/shared.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/shared.py b/modules/shared.py index a99b500b..1a6cc86d 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -606,7 +606,7 @@ options_templates.update(options_section(('sampler-params', "Sampler parameters" "eta_ddim": OptionInfo(0.0, "Eta for DDIM", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}).info("noise multiplier; higher = more unperdictable results"), "eta_ancestral": OptionInfo(1.0, "Eta for ancestral samplers", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}).info("noise multiplier; applies to Euler a and other samplers that have a in them"), "ddim_discretize": OptionInfo('uniform', "img2img DDIM discretize", gr.Radio, {"choices": ['uniform', 'quad']}), - 's_churn': OptionInfo(0.0, "sigma churn", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}), + 's_churn': OptionInfo(0.0, "sigma churn", gr.Slider, {"minimum": 0.0, "maximum": 100.0, "step": 0.01}), 's_tmin': OptionInfo(0.0, "sigma tmin", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}), 's_noise': OptionInfo(1.0, "sigma noise", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}), 'k_sched_type': OptionInfo("Automatic", "scheduler type", gr.Dropdown, {"choices": ["Automatic", "karras", "exponential", "polyexponential"]}).info("lets you override the noise schedule for k-diffusion samplers; choosing Automatic disables the three parameters below"), -- cgit v1.2.1 From c11104fed5ffee7b9a22674889580028296c5e55 Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Sat, 5 Aug 2023 21:42:03 -0400 Subject: Add s_tmax --- modules/processing.py | 2 +- modules/shared.py | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/modules/processing.py b/modules/processing.py index a9d66005..a5cd2a47 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -148,7 +148,7 @@ class StableDiffusionProcessing: self.s_min_uncond = s_min_uncond or opts.s_min_uncond self.s_churn = s_churn or opts.s_churn self.s_tmin = s_tmin or opts.s_tmin - self.s_tmax = s_tmax or float('inf') # not representable as a standard ui option + self.s_tmax = opts.data.get('s_tmax', 0) or float('inf') # not representable as a standard ui option self.s_noise = s_noise or opts.s_noise self.override_settings = {k: v for k, v in (override_settings or {}).items() if k not in shared.restricted_opts} self.override_settings_restore_afterwards = override_settings_restore_afterwards diff --git a/modules/shared.py b/modules/shared.py index a99b500b..b1c0c0e9 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -608,6 +608,7 @@ options_templates.update(options_section(('sampler-params', "Sampler parameters" "ddim_discretize": OptionInfo('uniform', "img2img DDIM discretize", gr.Radio, {"choices": ['uniform', 'quad']}), 's_churn': OptionInfo(0.0, "sigma churn", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}), 's_tmin': OptionInfo(0.0, "sigma tmin", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}), + 's_tmax': OptionInfo(0.0, "sigma tmax", gr.Slider, {"minimum": 0.0, "maximum": 999.0, "step": 0.01}).info("0 = inf"), 's_noise': OptionInfo(1.0, "sigma noise", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}), 'k_sched_type': OptionInfo("Automatic", "scheduler type", gr.Dropdown, {"choices": ["Automatic", "karras", "exponential", "polyexponential"]}).info("lets you override the noise schedule for k-diffusion samplers; choosing Automatic disables the three parameters below"), 'sigma_min': OptionInfo(0.0, "sigma min", gr.Number).info("0 = default (~0.03); minimum noise strength for k-diffusion noise scheduler"), -- cgit v1.2.1 From 85c2c138d2f5f853c79bde3775d69628e86c65ca Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Sat, 5 Aug 2023 21:51:46 -0400 Subject: Attempt to read s_tmax from arg first if option not found --- modules/processing.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/processing.py b/modules/processing.py index a5cd2a47..2003532d 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -148,7 +148,7 @@ class StableDiffusionProcessing: self.s_min_uncond = s_min_uncond or opts.s_min_uncond self.s_churn = s_churn or opts.s_churn self.s_tmin = s_tmin or opts.s_tmin - self.s_tmax = opts.data.get('s_tmax', 0) or float('inf') # not representable as a standard ui option + self.s_tmax = opts.data.get('s_tmax', s_tmax or 0) or float('inf') # not representable as a standard ui option self.s_noise = s_noise or opts.s_noise self.override_settings = {k: v for k, v in (override_settings or {}).items() if k not in shared.restricted_opts} self.override_settings_restore_afterwards = override_settings_restore_afterwards -- cgit v1.2.1 From 31506f07718803190e67cbbd8180af313d9e2a08 Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Sat, 5 Aug 2023 22:37:25 -0400 Subject: Add sigma params to infotext --- modules/generation_parameters_copypaste.py | 4 ++++ modules/sd_samplers_kdiffusion.py | 34 +++++++++++++++++++++++++++++- 2 files changed, 37 insertions(+), 1 deletion(-) diff --git a/modules/generation_parameters_copypaste.py b/modules/generation_parameters_copypaste.py index 593abfef..e71c9601 100644 --- a/modules/generation_parameters_copypaste.py +++ b/modules/generation_parameters_copypaste.py @@ -328,6 +328,10 @@ infotext_to_setting_name_mapping = [ ('Noise multiplier', 'initial_noise_multiplier'), ('Eta', 'eta_ancestral'), ('Eta DDIM', 'eta_ddim'), + ('Sigma churn', 's_churn'), + ('Sigma tmin', 's_tmin'), + ('Sigma tmax', 's_tmax'), + ('Sigma noise', 's_noise'), ('Discard penultimate sigma', 'always_discard_next_to_last_sigma'), ('UniPC variant', 'uni_pc_variant'), ('UniPC skip type', 'uni_pc_skip_type'), diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py index 8bb639f5..6f46da7c 100644 --- a/modules/sd_samplers_kdiffusion.py +++ b/modules/sd_samplers_kdiffusion.py @@ -4,6 +4,7 @@ import inspect import k_diffusion.sampling from modules import prompt_parser, devices, sd_samplers_common, sd_samplers_extra +from modules.processing import StableDiffusionProcessing from modules.shared import opts, state import modules.shared as shared from modules.script_callbacks import CFGDenoiserParams, cfg_denoiser_callback @@ -280,6 +281,14 @@ class KDiffusionSampler: self.last_latent = None self.s_min_uncond = None + # NOTE: These are also defined in the StableDiffusionProcessing class. + # They should have been here to begin with but we're going to + # leave that class __init__ signature alone. + self.s_churn = 0.0 + self.s_tmin = 0.0 + self.s_tmax = float('inf') + self.s_noise = 1.0 + self.conditioning_key = sd_model.model.conditioning_key def callback_state(self, d): @@ -314,7 +323,7 @@ class KDiffusionSampler: def number_of_needed_noises(self, p): return p.steps - def initialize(self, p): + def initialize(self, p: StableDiffusionProcessing): self.model_wrap_cfg.mask = p.mask if hasattr(p, 'mask') else None self.model_wrap_cfg.nmask = p.nmask if hasattr(p, 'nmask') else None self.model_wrap_cfg.step = 0 @@ -335,6 +344,29 @@ class KDiffusionSampler: extra_params_kwargs['eta'] = self.eta + if len(self.extra_params) > 0: + s_churn = p.override_settings.get('s_churn', p.s_churn) + s_tmin = p.override_settings.get('s_tmin', p.s_tmin) + s_tmax = p.override_settings.get('s_tmax', p.s_tmax) + s_noise = p.override_settings.get('s_noise', p.s_noise) + + if s_churn != self.s_churn: + extra_params_kwargs['s_churn'] = s_churn + p.s_churn = s_churn + p.extra_generation_params['Sigma churn'] = s_churn + if s_tmin != self.s_tmin: + extra_params_kwargs['s_tmin'] = s_churn + p.s_tmin = s_tmin + p.extra_generation_params['Sigma tmin'] = s_tmin + if s_tmax != self.s_tmax: + extra_params_kwargs['s_tmax'] = s_churn + p.s_tmax = s_tmax + p.extra_generation_params['Sigma tmax'] = s_tmax + if s_noise != self.s_noise: + extra_params_kwargs['s_noise'] = s_churn + p.s_noise = s_noise + p.extra_generation_params['Sigma noise'] = s_noise + return extra_params_kwargs def get_sigmas(self, p, steps): -- cgit v1.2.1 From f18a03219039ebaa48ce96adc5b50e0e5ed5d84f Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Sat, 5 Aug 2023 23:05:25 -0400 Subject: Correct s_noise fix --- modules/processing.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/processing.py b/modules/processing.py index 1601a14c..bb268db7 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -110,7 +110,7 @@ class StableDiffusionProcessing: cached_uc = [None, None] cached_c = [None, None] - def __init__(self, sd_model=None, outpath_samples=None, outpath_grids=None, prompt: str = "", styles: List[str] = None, seed: int = -1, subseed: int = -1, subseed_strength: float = 0, seed_resize_from_h: int = -1, seed_resize_from_w: int = -1, seed_enable_extras: bool = True, sampler_name: str = None, batch_size: int = 1, n_iter: int = 1, steps: int = 50, cfg_scale: float = 7.0, width: int = 512, height: int = 512, restore_faces: bool = False, tiling: bool = False, do_not_save_samples: bool = False, do_not_save_grid: bool = False, extra_generation_params: Dict[Any, Any] = None, overlay_images: Any = None, negative_prompt: str = None, eta: float = None, do_not_reload_embeddings: bool = False, denoising_strength: float = 0, ddim_discretize: str = None, s_min_uncond: float = 0.0, s_churn: float = 0.0, s_tmax: float = None, s_tmin: float = 0.0, s_noise: float = 1.0, override_settings: Dict[str, Any] = None, override_settings_restore_afterwards: bool = True, sampler_index: int = None, script_args: list = None): + def __init__(self, sd_model=None, outpath_samples=None, outpath_grids=None, prompt: str = "", styles: List[str] = None, seed: int = -1, subseed: int = -1, subseed_strength: float = 0, seed_resize_from_h: int = -1, seed_resize_from_w: int = -1, seed_enable_extras: bool = True, sampler_name: str = None, batch_size: int = 1, n_iter: int = 1, steps: int = 50, cfg_scale: float = 7.0, width: int = 512, height: int = 512, restore_faces: bool = False, tiling: bool = False, do_not_save_samples: bool = False, do_not_save_grid: bool = False, extra_generation_params: Dict[Any, Any] = None, overlay_images: Any = None, negative_prompt: str = None, eta: float = None, do_not_reload_embeddings: bool = False, denoising_strength: float = 0, ddim_discretize: str = None, s_min_uncond: float = 0.0, s_churn: float = 0.0, s_tmax: float = None, s_tmin: float = 0.0, s_noise: float = None, override_settings: Dict[str, Any] = None, override_settings_restore_afterwards: bool = True, sampler_index: int = None, script_args: list = None): if sampler_index is not None: print("sampler_index argument for StableDiffusionProcessing does not do anything; use sampler_name", file=sys.stderr) @@ -149,7 +149,7 @@ class StableDiffusionProcessing: self.s_churn = s_churn or opts.s_churn self.s_tmin = s_tmin or opts.s_tmin self.s_tmax = s_tmax or float('inf') # not representable as a standard ui option - self.s_noise = opts.data.get('s_noise', s_noise) + self.s_noise = s_noise if s_noise is not None else opts.data.get('s_noise', 1.0) self.override_settings = {k: v for k, v in (override_settings or {}).items() if k not in shared.restricted_opts} self.override_settings_restore_afterwards = override_settings_restore_afterwards self.is_using_inpainting_conditioning = False -- cgit v1.2.1 From d86d12e9117772f041682124badc7baac7c57911 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sun, 6 Aug 2023 06:21:36 +0300 Subject: rework saving incomplete images --- modules/processing.py | 18 +++++++++--------- modules/shared.py | 2 +- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/modules/processing.py b/modules/processing.py index 8f26621b..aef8fafd 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -103,10 +103,6 @@ def txt2img_image_conditioning(sd_model, x, width, height): return x.new_zeros(x.shape[0], 5, 1, 1, dtype=x.dtype, device=x.device) -def save_images_if_interrupted(): - return not (opts.dont_save_interrupted_images and (state.interrupted or state.skipped)) - - class StableDiffusionProcessing: """ The first set of paramaters: sd_models -> do_not_reload_embeddings represent the minimum required to create a StableDiffusionProcessing @@ -372,6 +368,10 @@ class StableDiffusionProcessing: def parse_extra_network_prompts(self): self.prompts, self.extra_network_data = extra_networks.parse_prompts(self.prompts) + def save_samples(self) -> bool: + """Returns whether generated images need to be written to disk""" + return opts.samples_save and not self.do_not_save_samples and (opts.save_incomplete_images or not state.interrupted and not state.skipped) + class Processed: def __init__(self, p: StableDiffusionProcessing, images_list, seed=-1, info="", subseed=None, all_prompts=None, all_negative_prompts=None, all_seeds=None, all_subseeds=None, index_of_first_image=0, infotexts=None, comments=""): @@ -827,7 +827,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: def infotext(index=0, use_main_prompt=False): return create_infotext(p, p.prompts, p.seeds, p.subseeds, use_main_prompt=use_main_prompt, index=index, all_negative_prompts=p.negative_prompts) - save_images_if_interrupt = save_images_if_interrupted() + save_samples = p.save_samples() for i, x_sample in enumerate(x_samples_ddim): p.batch_index = i @@ -836,7 +836,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: x_sample = x_sample.astype(np.uint8) if p.restore_faces: - if opts.save and not p.do_not_save_samples and opts.save_images_before_face_restoration and save_images_if_interrupt: + if save_samples and opts.save_images_before_face_restoration: images.save_image(Image.fromarray(x_sample), p.outpath_samples, "", p.seeds[i], p.prompts[i], opts.samples_format, info=infotext(i), p=p, suffix="-before-face-restoration") devices.torch_gc() @@ -851,14 +851,14 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: p.scripts.postprocess_image(p, pp) image = pp.image if p.color_corrections is not None and i < len(p.color_corrections): - if opts.save and not p.do_not_save_samples and opts.save_images_before_color_correction and save_images_if_interrupt: + if save_samples and opts.save_images_before_color_correction: image_without_cc = apply_overlay(image, p.paste_to, i, p.overlay_images) images.save_image(image_without_cc, p.outpath_samples, "", p.seeds[i], p.prompts[i], opts.samples_format, info=infotext(i), p=p, suffix="-before-color-correction") image = apply_color_correction(p.color_corrections[i], image) image = apply_overlay(image, p.paste_to, i, p.overlay_images) - if opts.samples_save and not p.do_not_save_samples and save_images_if_interrupt: + if save_samples: images.save_image(image, p.outpath_samples, "", p.seeds[i], p.prompts[i], opts.samples_format, info=infotext(i), p=p) text = infotext(i) @@ -1096,7 +1096,7 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): def save_intermediate(image, index): """saves image before applying hires fix, if enabled in options; takes as an argument either an image or batch with latent space images""" - if not opts.save or self.do_not_save_samples or not opts.save_images_before_highres_fix or not save_images_if_interrupted(): + if not self.save_samples() or not opts.save_images_before_highres_fix: return if not isinstance(image, Image.Image): diff --git a/modules/shared.py b/modules/shared.py index 2bd49ff1..3276d45e 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -385,7 +385,7 @@ options_templates.update(options_section(('saving-images', "Saving images/grids" "temp_dir": OptionInfo("", "Directory for temporary images; leave empty for default"), "clean_temp_dir_at_start": OptionInfo(False, "Cleanup non-default temporary directory when starting webui"), - "dont_save_interrupted_images": OptionInfo(False, "Don't save incomplete images").info("Don't save images that has been interrupted in mid-generation, they will still show up in webui output."), + "save_incomplete_images": OptionInfo(False, "Save incomplete images").info("save images that has been interrupted in mid-generation; even if not saved, they will still show up in webui output."), })) options_templates.update(options_section(('saving-paths', "Paths for saving"), { -- cgit v1.2.1 From ee96a6a58852e6db30afac129570efb88e991957 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sun, 6 Aug 2023 06:32:41 +0300 Subject: do the same for s_tmax #12345 --- modules/processing.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/processing.py b/modules/processing.py index 8743ac0a..6625f2de 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -148,8 +148,8 @@ class StableDiffusionProcessing: self.s_min_uncond = s_min_uncond or opts.s_min_uncond self.s_churn = s_churn or opts.s_churn self.s_tmin = s_tmin or opts.s_tmin - self.s_tmax = opts.data.get('s_tmax', s_tmax or 0) or float('inf') # not representable as a standard ui option - self.s_noise = s_noise if s_noise is not None else opts.data.get('s_noise', 1.0) + self.s_tmax = (s_tmax if s_tmax is not None else opts.s_tmax) or float('inf') + self.s_noise = s_noise if s_noise is not None else opts.s_noise self.override_settings = {k: v for k, v in (override_settings or {}).items() if k not in shared.restricted_opts} self.override_settings_restore_afterwards = override_settings_restore_afterwards self.is_using_inpainting_conditioning = False -- cgit v1.2.1 From e9c591b10194a866f1e508899047aca6681c90dc Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Sun, 6 Aug 2023 12:06:46 +0900 Subject: Gradio theme cache --- modules/shared.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/modules/shared.py b/modules/shared.py index a99b500b..5e17a4be 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -550,6 +550,7 @@ options_templates.update(options_section(('extra_networks', "Extra Networks"), { options_templates.update(options_section(('ui', "User interface"), { "localization": OptionInfo("None", "Localization", gr.Dropdown, lambda: {"choices": ["None"] + list(localization.localizations.keys())}, refresh=lambda: localization.list_localizations(cmd_opts.localizations_dir)).needs_reload_ui(), "gradio_theme": OptionInfo("Default", "Gradio theme", ui_components.DropdownEditable, lambda: {"choices": ["Default"] + gradio_hf_hub_themes}).info("you can also manually enter any of themes from the gallery.").needs_reload_ui(), + "gradio_themes_cache": OptionInfo(True, "Cache gradio themes locally").info("disable to update the selected Gradio theme"), "return_grid": OptionInfo(True, "Show grid in results for web"), "do_not_show_images": OptionInfo(False, "Do not show any images in results for web"), "send_seed": OptionInfo(True, "Send seed when sending prompt or image to other interface"), @@ -863,13 +864,17 @@ def reload_gradio_theme(theme_name=None): gradio_theme = gr.themes.Default(**default_theme_args) else: try: - gradio_theme = gr.themes.ThemeClass.from_hub(theme_name) + theme_cache_path = os.path.join(script_path, 'tmp', 'gradio_themes', f'{theme_name.replace("/", "_")}.json') + if opts.gradio_themes_cache and os.path.exists(theme_cache_path): + gradio_theme = gr.themes.ThemeClass.load(theme_cache_path) + else: + gradio_theme = gr.themes.ThemeClass.from_hub(theme_name) + gradio_theme.dump(theme_cache_path) except Exception as e: errors.display(e, "changing gradio theme") gradio_theme = gr.themes.Default(**default_theme_args) - class TotalTQDM: def __init__(self): self._tqdm = None -- cgit v1.2.1 From ce4be668fe1d2192378a7b4d3173cfd18031a017 Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Sat, 5 Aug 2023 23:42:20 -0400 Subject: Read kdiffusion sigma params from opts --- modules/sd_samplers_kdiffusion.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py index 6f46da7c..0a0a3474 100644 --- a/modules/sd_samplers_kdiffusion.py +++ b/modules/sd_samplers_kdiffusion.py @@ -345,25 +345,25 @@ class KDiffusionSampler: extra_params_kwargs['eta'] = self.eta if len(self.extra_params) > 0: - s_churn = p.override_settings.get('s_churn', p.s_churn) - s_tmin = p.override_settings.get('s_tmin', p.s_tmin) - s_tmax = p.override_settings.get('s_tmax', p.s_tmax) - s_noise = p.override_settings.get('s_noise', p.s_noise) + s_churn = getattr(opts, 's_churn', p.s_churn) + s_tmin = getattr(opts, 's_tmin', p.s_tmin) + s_tmax = getattr(opts, 's_tmax', p.s_tmax) + s_noise = getattr(opts, 's_noise', p.s_noise) if s_churn != self.s_churn: extra_params_kwargs['s_churn'] = s_churn p.s_churn = s_churn p.extra_generation_params['Sigma churn'] = s_churn if s_tmin != self.s_tmin: - extra_params_kwargs['s_tmin'] = s_churn + extra_params_kwargs['s_tmin'] = s_tmin p.s_tmin = s_tmin p.extra_generation_params['Sigma tmin'] = s_tmin if s_tmax != self.s_tmax: - extra_params_kwargs['s_tmax'] = s_churn + extra_params_kwargs['s_tmax'] = s_tmax p.s_tmax = s_tmax p.extra_generation_params['Sigma tmax'] = s_tmax if s_noise != self.s_noise: - extra_params_kwargs['s_noise'] = s_churn + extra_params_kwargs['s_noise'] = s_noise p.s_noise = s_noise p.extra_generation_params['Sigma noise'] = s_noise -- cgit v1.2.1 From 8f31b139b8898921853f686c313ba54c57ad7cb8 Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Sat, 5 Aug 2023 23:50:33 -0400 Subject: Assume 0 = inf for s_tmax --- modules/sd_samplers_kdiffusion.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py index 0a0a3474..db71a549 100644 --- a/modules/sd_samplers_kdiffusion.py +++ b/modules/sd_samplers_kdiffusion.py @@ -347,7 +347,7 @@ class KDiffusionSampler: if len(self.extra_params) > 0: s_churn = getattr(opts, 's_churn', p.s_churn) s_tmin = getattr(opts, 's_tmin', p.s_tmin) - s_tmax = getattr(opts, 's_tmax', p.s_tmax) + s_tmax = getattr(opts, 's_tmax', p.s_tmax) or self.s_tmax # 0 = inf s_noise = getattr(opts, 's_noise', p.s_noise) if s_churn != self.s_churn: -- cgit v1.2.1 From 5cae08f2c32b63989aa04fba32e5c56d9ff4fa6e Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sun, 6 Aug 2023 06:55:19 +0300 Subject: fix rework saving incomplete images --- modules/processing.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/processing.py b/modules/processing.py index 6625f2de..7d21fb12 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -866,7 +866,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: if opts.enable_pnginfo: image.info["parameters"] = text output_images.append(image) - if hasattr(p, 'mask_for_overlay') and p.mask_for_overlay and any([opts.save_mask, opts.save_mask_composite, opts.return_mask, opts.return_mask_composite]) and save_images_if_interrupt: + if save_samples and hasattr(p, 'mask_for_overlay') and p.mask_for_overlay and any([opts.save_mask, opts.save_mask_composite, opts.return_mask, opts.return_mask_composite]): image_mask = p.mask_for_overlay.convert('RGB') image_mask_composite = Image.composite(image.convert('RGBA').convert('RGBa'), Image.new('RGBa', image.size), images.resize_image(2, p.mask_for_overlay, image.width, image.height).convert('L')).convert('RGBA') -- cgit v1.2.1 From aa42c0ff8e51c1dde50a313aa2c12b357b287b50 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sun, 6 Aug 2023 07:41:17 +0300 Subject: repair broken live previews if using VAE with half --- modules/sd_samplers_common.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/sd_samplers_common.py b/modules/sd_samplers_common.py index 42a29fc9..39586b40 100644 --- a/modules/sd_samplers_common.py +++ b/modules/sd_samplers_common.py @@ -39,7 +39,7 @@ def samples_to_images_tensor(sample, approximation=None, model=None): else: if model is None: model = shared.sd_model - x_sample = model.decode_first_stage(sample) + x_sample = model.decode_first_stage(sample.to(model.first_stage_model.dtype)) return x_sample -- cgit v1.2.1 From f9950da3e30e6c8e2993d1d69d6e5c26c6a56485 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sun, 6 Aug 2023 12:39:28 +0300 Subject: create dir for gradio themes cache if it's missing --- modules/shared.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/modules/shared.py b/modules/shared.py index 525371cc..8e1b8063 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -866,10 +866,12 @@ def reload_gradio_theme(theme_name=None): gradio_theme = gr.themes.Default(**default_theme_args) else: try: - theme_cache_path = os.path.join(script_path, 'tmp', 'gradio_themes', f'{theme_name.replace("/", "_")}.json') + theme_cache_dir = os.path.join(script_path, 'tmp', 'gradio_themes') + theme_cache_path = os.path.join(theme_cache_dir, f'{theme_name.replace("/", "_")}.json') if opts.gradio_themes_cache and os.path.exists(theme_cache_path): gradio_theme = gr.themes.ThemeClass.load(theme_cache_path) else: + os.makedirs(theme_cache_dir, exist_ok=True) gradio_theme = gr.themes.ThemeClass.from_hub(theme_name) gradio_theme.dump(theme_cache_path) except Exception as e: -- cgit v1.2.1 From 57e8a11d17a6646fdf551320f5f714fba752987a Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sun, 6 Aug 2023 13:25:51 +0300 Subject: enable cond cache by default --- modules/processing.py | 31 ++++++++++++++++++------------- modules/shared.py | 2 +- 2 files changed, 19 insertions(+), 14 deletions(-) diff --git a/modules/processing.py b/modules/processing.py index 7d21fb12..31745006 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -295,7 +295,7 @@ class StableDiffusionProcessing: self.sampler = None self.c = None self.uc = None - if not opts.experimental_persistent_cond_cache: + if not opts.persistent_cond_cache: StableDiffusionProcessing.cached_c = [None, None] StableDiffusionProcessing.cached_uc = [None, None] @@ -319,6 +319,21 @@ class StableDiffusionProcessing: self.all_prompts = [shared.prompt_styles.apply_styles_to_prompt(x, self.styles) for x in self.all_prompts] self.all_negative_prompts = [shared.prompt_styles.apply_negative_styles_to_prompt(x, self.styles) for x in self.all_negative_prompts] + def cached_params(self, required_prompts, steps, extra_network_data): + """Returns parameters that invalidate the cond cache if changed""" + + return ( + required_prompts, + steps, + opts.CLIP_stop_at_last_layers, + shared.sd_model.sd_checkpoint_info, + extra_network_data, + opts.sdxl_crop_left, + opts.sdxl_crop_top, + self.width, + self.height, + ) + def get_conds_with_caching(self, function, required_prompts, steps, caches, extra_network_data): """ Returns the result of calling function(shared.sd_model, required_prompts, steps) @@ -332,17 +347,7 @@ class StableDiffusionProcessing: caches is a list with items described above. """ - cached_params = ( - required_prompts, - steps, - opts.CLIP_stop_at_last_layers, - shared.sd_model.sd_checkpoint_info, - extra_network_data, - opts.sdxl_crop_left, - opts.sdxl_crop_top, - self.width, - self.height, - ) + cached_params = self.cached_params(required_prompts, steps, extra_network_data) for cache in caches: if cache[0] is not None and cached_params == cache[0]: @@ -1184,7 +1189,7 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): super().close() self.hr_c = None self.hr_uc = None - if not opts.experimental_persistent_cond_cache: + if not opts.persistent_cond_cache: StableDiffusionProcessingTxt2Img.cached_hr_uc = [None, None] StableDiffusionProcessingTxt2Img.cached_hr_c = [None, None] diff --git a/modules/shared.py b/modules/shared.py index 8e1b8063..078e8135 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -506,7 +506,7 @@ options_templates.update(options_section(('optimizations', "Optimizations"), { "token_merging_ratio_img2img": OptionInfo(0.0, "Token merging ratio for img2img", gr.Slider, {"minimum": 0.0, "maximum": 0.9, "step": 0.1}).info("only applies if non-zero and overrides above"), "token_merging_ratio_hr": OptionInfo(0.0, "Token merging ratio for high-res pass", gr.Slider, {"minimum": 0.0, "maximum": 0.9, "step": 0.1}).info("only applies if non-zero and overrides above"), "pad_cond_uncond": OptionInfo(False, "Pad prompt/negative prompt to be same length").info("improves performance when prompt and negative prompt have different lengths; changes seeds"), - "experimental_persistent_cond_cache": OptionInfo(False, "persistent cond cache").info("Experimental, keep cond caches across jobs, reduce overhead."), + "persistent_cond_cache": OptionInfo(True, "Persistent cond cache").info("Do not recalculate conds from prompts if prompts have not changed since previous calculation"), })) options_templates.update(options_section(('compatibility', "Compatibility"), { -- cgit v1.2.1 From e866c354626e772779a6307488b7b41f9d101399 Mon Sep 17 00:00:00 2001 From: Diego Casorran Date: Sun, 6 Aug 2023 12:25:04 +0000 Subject: add explicit content-type header for image/webp --- modules/ui.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/modules/ui.py b/modules/ui.py index 1af6b4c8..4ffb9b82 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -41,6 +41,9 @@ warnings.filterwarnings("default" if opts.show_gradio_deprecation_warnings else mimetypes.init() mimetypes.add_type('application/javascript', '.js') +# Likewise, add explicit content-type header for certain missing image types +mimetypes.add_type('image/webp', '.webp') + if not cmd_opts.share and not cmd_opts.listen: # fix gradio phoning home gradio.utils.version_check = lambda: None -- cgit v1.2.1 From f1975b0213f5be400889ec04b3891d1cb571fe20 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sun, 6 Aug 2023 17:01:07 +0300 Subject: initial refiner support --- modules/processing.py | 4 ++++ modules/sd_models.py | 18 +++++++++++++++++- modules/sd_samplers_common.py | 19 ++++++++++++++++++- modules/sd_samplers_compvis.py | 12 +++++++++++- modules/sd_samplers_kdiffusion.py | 30 ++++++++++++++++++++++++------ modules/shared.py | 2 ++ 6 files changed, 76 insertions(+), 9 deletions(-) diff --git a/modules/processing.py b/modules/processing.py index 31745006..f4748d6d 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -666,6 +666,10 @@ def process_images(p: StableDiffusionProcessing) -> Processed: stored_opts = {k: opts.data[k] for k in p.override_settings.keys()} try: + # after running refiner, the refiner model is not unloaded - webui swaps back to main model here + if shared.sd_model.sd_checkpoint_info.title != opts.sd_model_checkpoint: + sd_models.reload_model_weights() + # if no checkpoint override or the override checkpoint can't be found, remove override entry and load opts checkpoint if sd_models.checkpoint_aliases.get(p.override_settings.get('sd_model_checkpoint')) is None: p.override_settings.pop('sd_model_checkpoint', None) diff --git a/modules/sd_models.py b/modules/sd_models.py index f6051604..981aa93d 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -289,11 +289,27 @@ def get_checkpoint_state_dict(checkpoint_info: CheckpointInfo, timer): return res +class SkipWritingToConfig: + """This context manager prevents load_model_weights from writing checkpoint name to the config when it loads weight.""" + + skip = False + previous = None + + def __enter__(self): + self.previous = SkipWritingToConfig.skip + SkipWritingToConfig.skip = True + return self + + def __exit__(self, exc_type, exc_value, exc_traceback): + SkipWritingToConfig.skip = self.previous + + def load_model_weights(model, checkpoint_info: CheckpointInfo, state_dict, timer): sd_model_hash = checkpoint_info.calculate_shorthash() timer.record("calculate hash") - shared.opts.data["sd_model_checkpoint"] = checkpoint_info.title + if not SkipWritingToConfig.skip: + shared.opts.data["sd_model_checkpoint"] = checkpoint_info.title if state_dict is None: state_dict = get_checkpoint_state_dict(checkpoint_info, timer) diff --git a/modules/sd_samplers_common.py b/modules/sd_samplers_common.py index 39586b40..3f3e83e3 100644 --- a/modules/sd_samplers_common.py +++ b/modules/sd_samplers_common.py @@ -2,7 +2,7 @@ from collections import namedtuple import numpy as np import torch from PIL import Image -from modules import devices, images, sd_vae_approx, sd_samplers, sd_vae_taesd, shared +from modules import devices, images, sd_vae_approx, sd_samplers, sd_vae_taesd, shared, sd_models from modules.shared import opts, state SamplerData = namedtuple('SamplerData', ['name', 'constructor', 'aliases', 'options']) @@ -127,3 +127,20 @@ def replace_torchsde_browinan(): replace_torchsde_browinan() + + +def apply_refiner(sampler): + completed_ratio = sampler.step / sampler.steps + if completed_ratio > shared.opts.sd_refiner_switch_at and shared.sd_model.sd_checkpoint_info.title != shared.opts.sd_refiner_checkpoint: + refiner_checkpoint_info = sd_models.get_closet_checkpoint_match(shared.opts.sd_refiner_checkpoint) + if refiner_checkpoint_info is None: + raise Exception(f'Could not find checkpoint with name {shared.opts.sd_refiner_checkpoint}') + + with sd_models.SkipWritingToConfig(): + sd_models.reload_model_weights(info=refiner_checkpoint_info) + + devices.torch_gc() + + sampler.update_inner_model() + + sampler.p.setup_conds() diff --git a/modules/sd_samplers_compvis.py b/modules/sd_samplers_compvis.py index 4a8396f9..5df926d3 100644 --- a/modules/sd_samplers_compvis.py +++ b/modules/sd_samplers_compvis.py @@ -19,7 +19,8 @@ samplers_data_compvis = [ class VanillaStableDiffusionSampler: def __init__(self, constructor, sd_model): - self.sampler = constructor(sd_model) + self.p = None + self.sampler = constructor(shared.sd_model) self.is_ddim = hasattr(self.sampler, 'p_sample_ddim') self.is_plms = hasattr(self.sampler, 'p_sample_plms') self.is_unipc = isinstance(self.sampler, modules.models.diffusion.uni_pc.UniPCSampler) @@ -32,6 +33,7 @@ class VanillaStableDiffusionSampler: self.nmask = None self.init_latent = None self.sampler_noises = None + self.steps = None self.step = 0 self.stop_at = None self.eta = None @@ -44,6 +46,7 @@ class VanillaStableDiffusionSampler: return 0 def launch_sampling(self, steps, func): + self.steps = steps state.sampling_steps = steps state.sampling_step = 0 @@ -61,10 +64,15 @@ class VanillaStableDiffusionSampler: return res + def update_inner_model(self): + self.sampler.model = shared.sd_model + def before_sample(self, x, ts, cond, unconditional_conditioning): if state.interrupted or state.skipped: raise sd_samplers_common.InterruptedException + sd_samplers_common.apply_refiner(self) + if self.stop_at is not None and self.step > self.stop_at: raise sd_samplers_common.InterruptedException @@ -134,6 +142,8 @@ class VanillaStableDiffusionSampler: self.update_step(x) def initialize(self, p): + self.p = p + if self.is_ddim: self.eta = p.eta if p.eta is not None else shared.opts.eta_ddim else: diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py index db71a549..be1bd35e 100644 --- a/modules/sd_samplers_kdiffusion.py +++ b/modules/sd_samplers_kdiffusion.py @@ -2,7 +2,7 @@ from collections import deque import torch import inspect import k_diffusion.sampling -from modules import prompt_parser, devices, sd_samplers_common, sd_samplers_extra +from modules import prompt_parser, devices, sd_samplers_common, sd_samplers_extra, sd_models from modules.processing import StableDiffusionProcessing from modules.shared import opts, state @@ -87,15 +87,25 @@ class CFGDenoiser(torch.nn.Module): negative prompt. """ - def __init__(self, model): + def __init__(self): super().__init__() - self.inner_model = model + self.model_wrap = None self.mask = None self.nmask = None self.init_latent = None + self.steps = None self.step = 0 self.image_cfg_scale = None self.padded_cond_uncond = False + self.p = None + + @property + def inner_model(self): + if self.model_wrap is None: + denoiser = k_diffusion.external.CompVisVDenoiser if shared.sd_model.parameterization == "v" else k_diffusion.external.CompVisDenoiser + self.model_wrap = denoiser(shared.sd_model, quantize=shared.opts.enable_quantization) + + return self.model_wrap def combine_denoised(self, x_out, conds_list, uncond, cond_scale): denoised_uncond = x_out[-uncond.shape[0]:] @@ -113,10 +123,15 @@ class CFGDenoiser(torch.nn.Module): return denoised + def update_inner_model(self): + self.model_wrap = None + def forward(self, x, sigma, uncond, cond, cond_scale, s_min_uncond, image_cond): if state.interrupted or state.skipped: raise sd_samplers_common.InterruptedException + sd_samplers_common.apply_refiner(self) + # at self.image_cfg_scale == 1.0 produced results for edit model are the same as with normal sampling, # so is_edit_model is set to False to support AND composition. is_edit_model = shared.sd_model.cond_stage_key == "edit" and self.image_cfg_scale is not None and self.image_cfg_scale != 1.0 @@ -267,13 +282,13 @@ class TorchHijack: class KDiffusionSampler: def __init__(self, funcname, sd_model): - denoiser = k_diffusion.external.CompVisVDenoiser if sd_model.parameterization == "v" else k_diffusion.external.CompVisDenoiser - self.model_wrap = denoiser(sd_model, quantize=shared.opts.enable_quantization) + self.p = None self.funcname = funcname self.func = funcname if callable(funcname) else getattr(k_diffusion.sampling, self.funcname) self.extra_params = sampler_extra_params.get(funcname, []) - self.model_wrap_cfg = CFGDenoiser(self.model_wrap) + self.model_wrap_cfg = CFGDenoiser() + self.model_wrap = self.model_wrap_cfg.inner_model self.sampler_noises = None self.stop_at = None self.eta = None @@ -305,6 +320,7 @@ class KDiffusionSampler: shared.total_tqdm.update() def launch_sampling(self, steps, func): + self.model_wrap_cfg.steps = steps state.sampling_steps = steps state.sampling_step = 0 @@ -324,6 +340,8 @@ class KDiffusionSampler: return p.steps def initialize(self, p: StableDiffusionProcessing): + self.p = p + self.model_wrap_cfg.p = p self.model_wrap_cfg.mask = p.mask if hasattr(p, 'mask') else None self.model_wrap_cfg.nmask = p.nmask if hasattr(p, 'nmask') else None self.model_wrap_cfg.step = 0 diff --git a/modules/shared.py b/modules/shared.py index 078e8135..ed8395dc 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -461,6 +461,8 @@ options_templates.update(options_section(('sd', "Stable Diffusion"), { "CLIP_stop_at_last_layers": OptionInfo(1, "Clip skip", gr.Slider, {"minimum": 1, "maximum": 12, "step": 1}).link("wiki", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Features#clip-skip").info("ignore last layers of CLIP network; 1 ignores none, 2 ignores one layer"), "upcast_attn": OptionInfo(False, "Upcast cross attention layer to float32"), "randn_source": OptionInfo("GPU", "Random number generator source.", gr.Radio, {"choices": ["GPU", "CPU", "NV"]}).info("changes seeds drastically; use CPU to produce the same picture across different videocard vendors; use NV to produce same picture as on NVidia videocards"), + "sd_refiner_checkpoint": OptionInfo(None, "Refiner checkpoint", gr.Dropdown, lambda: {"choices": list_checkpoint_tiles()}, refresh=refresh_checkpoints).info("switch to another model in the middle of generation"), + "sd_refiner_switch_at": OptionInfo(1.0, "Refiner switch at", gr.Slider, {"minimum": 0.01, "maximum": 1.0, "step": 0.01}).info("fraction of sampling steps when the swtch to refiner model should happen; 1=never, 0.5=switch in the middle of generation"), })) options_templates.update(options_section(('sdxl', "Stable Diffusion XL"), { -- cgit v1.2.1 From 956e69bf3a9387b6a6cda6823d729e3d3f13c3e1 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sun, 6 Aug 2023 17:07:08 +0300 Subject: lint! --- modules/sd_samplers_kdiffusion.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py index be1bd35e..3aee4e3a 100644 --- a/modules/sd_samplers_kdiffusion.py +++ b/modules/sd_samplers_kdiffusion.py @@ -2,7 +2,7 @@ from collections import deque import torch import inspect import k_diffusion.sampling -from modules import prompt_parser, devices, sd_samplers_common, sd_samplers_extra, sd_models +from modules import prompt_parser, devices, sd_samplers_common, sd_samplers_extra from modules.processing import StableDiffusionProcessing from modules.shared import opts, state -- cgit v1.2.1 From 5a0db84b6c7322082c7532df11a29a95a59a612b Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sun, 6 Aug 2023 17:53:33 +0300 Subject: add infotext add proper support for recalculating conds in k-diffusion samplers remove support for compvis samplers --- modules/generation_parameters_copypaste.py | 2 ++ modules/processing.py | 10 ++++++++++ modules/sd_samplers_common.py | 29 ++++++++++++++++++++--------- modules/sd_samplers_compvis.py | 2 -- modules/sd_samplers_kdiffusion.py | 24 ++++++++++++++++-------- 5 files changed, 48 insertions(+), 19 deletions(-) diff --git a/modules/generation_parameters_copypaste.py b/modules/generation_parameters_copypaste.py index e71c9601..6711ca16 100644 --- a/modules/generation_parameters_copypaste.py +++ b/modules/generation_parameters_copypaste.py @@ -344,6 +344,8 @@ infotext_to_setting_name_mapping = [ ('Pad conds', 'pad_cond_uncond'), ('VAE Encoder', 'sd_vae_encode_method'), ('VAE Decoder', 'sd_vae_decode_method'), + ('Refiner', 'sd_refiner_checkpoint'), + ('Refiner switch at', 'sd_refiner_switch_at'), ] diff --git a/modules/processing.py b/modules/processing.py index f4748d6d..ec66fd8e 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -370,6 +370,9 @@ class StableDiffusionProcessing: self.uc = self.get_conds_with_caching(prompt_parser.get_learned_conditioning, negative_prompts, self.steps * self.step_multiplier, [self.cached_uc], self.extra_network_data) self.c = self.get_conds_with_caching(prompt_parser.get_multicond_learned_conditioning, prompts, self.steps * self.step_multiplier, [self.cached_c], self.extra_network_data) + def get_conds(self): + return self.c, self.uc + def parse_extra_network_prompts(self): self.prompts, self.extra_network_data = extra_networks.parse_prompts(self.prompts) @@ -1251,6 +1254,13 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): with devices.autocast(): extra_networks.activate(self, self.extra_network_data) + def get_conds(self): + if self.is_hr_pass: + return self.hr_c, self.hr_uc + + return super().get_conds() + + def parse_extra_network_prompts(self): res = super().parse_extra_network_prompts() diff --git a/modules/sd_samplers_common.py b/modules/sd_samplers_common.py index 3f3e83e3..92bf0ca1 100644 --- a/modules/sd_samplers_common.py +++ b/modules/sd_samplers_common.py @@ -131,16 +131,27 @@ replace_torchsde_browinan() def apply_refiner(sampler): completed_ratio = sampler.step / sampler.steps - if completed_ratio > shared.opts.sd_refiner_switch_at and shared.sd_model.sd_checkpoint_info.title != shared.opts.sd_refiner_checkpoint: - refiner_checkpoint_info = sd_models.get_closet_checkpoint_match(shared.opts.sd_refiner_checkpoint) - if refiner_checkpoint_info is None: - raise Exception(f'Could not find checkpoint with name {shared.opts.sd_refiner_checkpoint}') - with sd_models.SkipWritingToConfig(): - sd_models.reload_model_weights(info=refiner_checkpoint_info) + if completed_ratio <= shared.opts.sd_refiner_switch_at: + return False + + if shared.sd_model.sd_checkpoint_info.title == shared.opts.sd_refiner_checkpoint: + return False + + refiner_checkpoint_info = sd_models.get_closet_checkpoint_match(shared.opts.sd_refiner_checkpoint) + if refiner_checkpoint_info is None: + raise Exception(f'Could not find checkpoint with name {shared.opts.sd_refiner_checkpoint}') + + sampler.p.extra_generation_params['Refiner'] = refiner_checkpoint_info.short_title + sampler.p.extra_generation_params['Refiner switch at'] = shared.opts.sd_refiner_switch_at + + with sd_models.SkipWritingToConfig(): + sd_models.reload_model_weights(info=refiner_checkpoint_info) + + devices.torch_gc() + sampler.p.setup_conds() + sampler.update_inner_model() - devices.torch_gc() + return True - sampler.update_inner_model() - sampler.p.setup_conds() diff --git a/modules/sd_samplers_compvis.py b/modules/sd_samplers_compvis.py index 5df926d3..2eeec18a 100644 --- a/modules/sd_samplers_compvis.py +++ b/modules/sd_samplers_compvis.py @@ -71,8 +71,6 @@ class VanillaStableDiffusionSampler: if state.interrupted or state.skipped: raise sd_samplers_common.InterruptedException - sd_samplers_common.apply_refiner(self) - if self.stop_at is not None and self.step > self.stop_at: raise sd_samplers_common.InterruptedException diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py index 3aee4e3a..46da0a97 100644 --- a/modules/sd_samplers_kdiffusion.py +++ b/modules/sd_samplers_kdiffusion.py @@ -87,8 +87,9 @@ class CFGDenoiser(torch.nn.Module): negative prompt. """ - def __init__(self): + def __init__(self, sampler): super().__init__() + self.sampler = sampler self.model_wrap = None self.mask = None self.nmask = None @@ -126,11 +127,17 @@ class CFGDenoiser(torch.nn.Module): def update_inner_model(self): self.model_wrap = None + c, uc = self.p.get_conds() + self.sampler.sampler_extra_args['cond'] = c + self.sampler.sampler_extra_args['uncond'] = uc + def forward(self, x, sigma, uncond, cond, cond_scale, s_min_uncond, image_cond): if state.interrupted or state.skipped: raise sd_samplers_common.InterruptedException - sd_samplers_common.apply_refiner(self) + if sd_samplers_common.apply_refiner(self): + cond = self.sampler.sampler_extra_args['cond'] + uncond = self.sampler.sampler_extra_args['uncond'] # at self.image_cfg_scale == 1.0 produced results for edit model are the same as with normal sampling, # so is_edit_model is set to False to support AND composition. @@ -282,12 +289,12 @@ class TorchHijack: class KDiffusionSampler: def __init__(self, funcname, sd_model): - self.p = None self.funcname = funcname self.func = funcname if callable(funcname) else getattr(k_diffusion.sampling, self.funcname) self.extra_params = sampler_extra_params.get(funcname, []) - self.model_wrap_cfg = CFGDenoiser() + self.sampler_extra_args = {} + self.model_wrap_cfg = CFGDenoiser(self) self.model_wrap = self.model_wrap_cfg.inner_model self.sampler_noises = None self.stop_at = None @@ -476,7 +483,7 @@ class KDiffusionSampler: self.model_wrap_cfg.init_latent = x self.last_latent = x - extra_args = { + self.sampler_extra_args = { 'cond': conditioning, 'image_cond': image_conditioning, 'uncond': unconditional_conditioning, @@ -484,7 +491,7 @@ class KDiffusionSampler: 's_min_uncond': self.s_min_uncond } - samples = self.launch_sampling(t_enc + 1, lambda: self.func(self.model_wrap_cfg, xi, extra_args=extra_args, disable=False, callback=self.callback_state, **extra_params_kwargs)) + samples = self.launch_sampling(t_enc + 1, lambda: self.func(self.model_wrap_cfg, xi, extra_args=self.sampler_extra_args, disable=False, callback=self.callback_state, **extra_params_kwargs)) if self.model_wrap_cfg.padded_cond_uncond: p.extra_generation_params["Pad conds"] = True @@ -514,13 +521,14 @@ class KDiffusionSampler: extra_params_kwargs['noise_sampler'] = noise_sampler self.last_latent = x - samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, x, extra_args={ + self.sampler_extra_args = { 'cond': conditioning, 'image_cond': image_conditioning, 'uncond': unconditional_conditioning, 'cond_scale': p.cfg_scale, 's_min_uncond': self.s_min_uncond - }, disable=False, callback=self.callback_state, **extra_params_kwargs)) + } + samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, x, extra_args=self.sampler_extra_args, disable=False, callback=self.callback_state, **extra_params_kwargs)) if self.model_wrap_cfg.padded_cond_uncond: p.extra_generation_params["Pad conds"] = True -- cgit v1.2.1 From 976963ab6dc46141cceba9a007546c53f35e033a Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Sun, 6 Aug 2023 12:30:23 -0400 Subject: Clean up k-diffusion sigma params --- modules/shared.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/modules/shared.py b/modules/shared.py index 078e8135..57e9158e 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -609,13 +609,13 @@ options_templates.update(options_section(('sampler-params', "Sampler parameters" "eta_ancestral": OptionInfo(1.0, "Eta for ancestral samplers", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}).info("noise multiplier; applies to Euler a and other samplers that have a in them"), "ddim_discretize": OptionInfo('uniform', "img2img DDIM discretize", gr.Radio, {"choices": ['uniform', 'quad']}), 's_churn': OptionInfo(0.0, "sigma churn", gr.Slider, {"minimum": 0.0, "maximum": 100.0, "step": 0.01}), - 's_tmin': OptionInfo(0.0, "sigma tmin", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}), + 's_tmin': OptionInfo(0.0, "sigma tmin", gr.Slider, {"minimum": 0.0, "maximum": 10.0, "step": 0.01}), 's_tmax': OptionInfo(0.0, "sigma tmax", gr.Slider, {"minimum": 0.0, "maximum": 999.0, "step": 0.01}).info("0 = inf"), - 's_noise': OptionInfo(1.0, "sigma noise", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}), + 's_noise': OptionInfo(1.0, "sigma noise", gr.Slider, {"minimum": 0.0, "maximum": 1.1, "step": 0.001}), 'k_sched_type': OptionInfo("Automatic", "scheduler type", gr.Dropdown, {"choices": ["Automatic", "karras", "exponential", "polyexponential"]}).info("lets you override the noise schedule for k-diffusion samplers; choosing Automatic disables the three parameters below"), 'sigma_min': OptionInfo(0.0, "sigma min", gr.Number).info("0 = default (~0.03); minimum noise strength for k-diffusion noise scheduler"), - 'sigma_max': OptionInfo(0.0, "sigma max", gr.Number).info("0 = default (~14.6); maximum noise strength for k-diffusion noise schedule"), - 'rho': OptionInfo(0.0, "rho", gr.Number).info("0 = default (7 for karras, 1 for polyexponential); higher values result in a more steep noise schedule (decreases faster)"), + 'sigma_max': OptionInfo(0.0, "sigma max", gr.Number).info("0 = default (~14.6); maximum noise strength for k-diffusion noise scheduler"), + 'rho': OptionInfo(0.0, "rho", gr.Number).info("0 = default (7 for karras, 1 for polyexponential); higher values result in a steeper noise schedule (decreases faster)"), 'eta_noise_seed_delta': OptionInfo(0, "Eta noise seed delta", gr.Number, {"precision": 0}).info("ENSD; does not improve anything, just produces different results for ancestral samplers - only useful for reproducing images"), 'always_discard_next_to_last_sigma': OptionInfo(False, "Always discard next-to-last sigma").link("PR", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/6044"), 'uni_pc_variant': OptionInfo("bh1", "UniPC variant", gr.Radio, {"choices": ["bh1", "bh2", "vary_coeff"]}), -- cgit v1.2.1 From 7bcfb4654f677801602c80c0823eb0ad11f5b4b6 Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Sun, 6 Aug 2023 12:41:21 -0400 Subject: Add info to k-diffusion sigma params --- modules/shared.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/modules/shared.py b/modules/shared.py index 57e9158e..f0fb9dc7 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -608,10 +608,10 @@ options_templates.update(options_section(('sampler-params', "Sampler parameters" "eta_ddim": OptionInfo(0.0, "Eta for DDIM", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}).info("noise multiplier; higher = more unperdictable results"), "eta_ancestral": OptionInfo(1.0, "Eta for ancestral samplers", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}).info("noise multiplier; applies to Euler a and other samplers that have a in them"), "ddim_discretize": OptionInfo('uniform', "img2img DDIM discretize", gr.Radio, {"choices": ['uniform', 'quad']}), - 's_churn': OptionInfo(0.0, "sigma churn", gr.Slider, {"minimum": 0.0, "maximum": 100.0, "step": 0.01}), - 's_tmin': OptionInfo(0.0, "sigma tmin", gr.Slider, {"minimum": 0.0, "maximum": 10.0, "step": 0.01}), - 's_tmax': OptionInfo(0.0, "sigma tmax", gr.Slider, {"minimum": 0.0, "maximum": 999.0, "step": 0.01}).info("0 = inf"), - 's_noise': OptionInfo(1.0, "sigma noise", gr.Slider, {"minimum": 0.0, "maximum": 1.1, "step": 0.001}), + 's_churn': OptionInfo(0.0, "sigma churn", gr.Slider, {"minimum": 0.0, "maximum": 100.0, "step": 0.01}).info('amount of stochasticity; only applies to Euler, Heun, and DPM2'), + 's_tmin': OptionInfo(0.0, "sigma tmin", gr.Slider, {"minimum": 0.0, "maximum": 10.0, "step": 0.01}).info('enable stochasticity; start value of the sigma range; only applies to Euler, Heun, and DPM2'), + 's_tmax': OptionInfo(0.0, "sigma tmax", gr.Slider, {"minimum": 0.0, "maximum": 999.0, "step": 0.01}).info("0 = inf; end value of the sigma range; only applies to Euler, Heun, and DPM2"), + 's_noise': OptionInfo(1.0, "sigma noise", gr.Slider, {"minimum": 0.0, "maximum": 1.1, "step": 0.001}).info('amount of additional noise to counteract loss of detail during sampling; only applies to Euler, Heun, and DPM2'), 'k_sched_type': OptionInfo("Automatic", "scheduler type", gr.Dropdown, {"choices": ["Automatic", "karras", "exponential", "polyexponential"]}).info("lets you override the noise schedule for k-diffusion samplers; choosing Automatic disables the three parameters below"), 'sigma_min': OptionInfo(0.0, "sigma min", gr.Number).info("0 = default (~0.03); minimum noise strength for k-diffusion noise scheduler"), 'sigma_max': OptionInfo(0.0, "sigma max", gr.Number).info("0 = default (~14.6); maximum noise strength for k-diffusion noise scheduler"), -- cgit v1.2.1 From c96e4750d895a47290dc7f96e030197069c75fa4 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Mon, 7 Aug 2023 08:07:09 +0300 Subject: SD VAE rework 2 - the setting for preferring opts.sd_vae has been inverted and reworded - resolve_vae function made easier to read and now returns an object rather than a tuple - if the checkbox for overriding per-model preferences is checked, opts.sd_vae overrides checkpoint user metadata - changing VAE in user metadata for currently loaded model immediately applies the selection --- modules/sd_models.py | 2 +- modules/sd_vae.py | 71 +++++++++++++++++----- modules/shared.py | 6 +- .../ui_extra_networks_checkpoints_user_metadata.py | 8 ++- webui.py | 2 +- 5 files changed, 69 insertions(+), 20 deletions(-) diff --git a/modules/sd_models.py b/modules/sd_models.py index f6051604..d65735e3 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -356,7 +356,7 @@ def load_model_weights(model, checkpoint_info: CheckpointInfo, state_dict, timer sd_vae.delete_base_vae() sd_vae.clear_loaded_vae() - vae_file, vae_source = sd_vae.resolve_vae(checkpoint_info.filename) + vae_file, vae_source = sd_vae.resolve_vae(checkpoint_info.filename).tuple() sd_vae.load_vae(model, vae_file, vae_source) timer.record("load VAE") diff --git a/modules/sd_vae.py b/modules/sd_vae.py index 0bd5e19b..38bcb840 100644 --- a/modules/sd_vae.py +++ b/modules/sd_vae.py @@ -1,5 +1,7 @@ import os import collections +from dataclasses import dataclass + from modules import paths, shared, devices, script_callbacks, sd_models, extra_networks import glob from copy import deepcopy @@ -97,37 +99,74 @@ def find_vae_near_checkpoint(checkpoint_file): return None -def resolve_vae(checkpoint_file): - if shared.cmd_opts.vae_path is not None: - return shared.cmd_opts.vae_path, 'from commandline argument' +@dataclass +class VaeResolution: + vae: str = None + source: str = None + resolved: bool = True + + def tuple(self): + return self.vae, self.source + + +def is_automatic(): + return shared.opts.sd_vae in {"Automatic", "auto"} # "auto" for people with old config + + +def resolve_vae_from_setting() -> VaeResolution: + if shared.opts.sd_vae == "None": + return VaeResolution() + + vae_from_options = vae_dict.get(shared.opts.sd_vae, None) + if vae_from_options is not None: + return VaeResolution(vae_from_options, 'specified in settings') + + if not is_automatic(): + print(f"Couldn't find VAE named {shared.opts.sd_vae}; using None instead") + return VaeResolution(resolved=False) + + +def resolve_vae_from_user_metadata(checkpoint_file) -> VaeResolution: metadata = extra_networks.get_user_metadata(checkpoint_file) vae_metadata = metadata.get("vae", None) if vae_metadata is not None and vae_metadata != "Automatic": if vae_metadata == "None": - return None, None + return VaeResolution() vae_from_metadata = vae_dict.get(vae_metadata, None) if vae_from_metadata is not None: - return vae_from_metadata, "from user metadata" + return VaeResolution(vae_from_metadata, "from user metadata") + + return VaeResolution(resolved=False) - is_automatic = shared.opts.sd_vae in {"Automatic", "auto"} # "auto" for people with old config +def resolve_vae_near_checkpoint(checkpoint_file) -> VaeResolution: vae_near_checkpoint = find_vae_near_checkpoint(checkpoint_file) if vae_near_checkpoint is not None and (shared.opts.sd_vae_as_default or is_automatic): - return vae_near_checkpoint, 'found near the checkpoint' + return VaeResolution(vae_near_checkpoint, 'found near the checkpoint') - if shared.opts.sd_vae == "None": - return None, None + return VaeResolution(resolved=False) - vae_from_options = vae_dict.get(shared.opts.sd_vae, None) - if vae_from_options is not None: - return vae_from_options, 'specified in settings' - if not is_automatic: - print(f"Couldn't find VAE named {shared.opts.sd_vae}; using None instead") +def resolve_vae(checkpoint_file) -> VaeResolution: + if shared.cmd_opts.vae_path is not None: + return VaeResolution(shared.cmd_opts.vae_path, 'from commandline argument') + + if shared.opts.sd_vae_overrides_per_model_preferences and not is_automatic(): + return resolve_vae_from_setting() + + res = resolve_vae_from_user_metadata(checkpoint_file) + if res.resolved: + return res + + res = resolve_vae_near_checkpoint(checkpoint_file) + if res.resolved: + return res + + res = resolve_vae_from_setting() - return None, None + return res def load_vae_dict(filename, map_location): @@ -201,7 +240,7 @@ def reload_vae_weights(sd_model=None, vae_file=unspecified): checkpoint_file = checkpoint_info.filename if vae_file == unspecified: - vae_file, vae_source = resolve_vae(checkpoint_file) + vae_file, vae_source = resolve_vae(checkpoint_file).tuple() else: vae_source = "from function argument" diff --git a/modules/shared.py b/modules/shared.py index 078e8135..da53f2d9 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -479,7 +479,7 @@ For img2img, VAE is used to process user's input image before the sampling, and """), "sd_vae_checkpoint_cache": OptionInfo(0, "VAE Checkpoints to cache in RAM", gr.Slider, {"minimum": 0, "maximum": 10, "step": 1}), "sd_vae": OptionInfo("Automatic", "SD VAE", gr.Dropdown, lambda: {"choices": shared_items.sd_vae_items()}, refresh=shared_items.refresh_vae_list).info("choose VAE model: Automatic = use one with same filename as checkpoint; None = use VAE from checkpoint"), - "sd_vae_as_default": OptionInfo(True, "Ignore selected VAE for stable diffusion checkpoints that have their own .vae.pt next to them"), + "sd_vae_overrides_per_model_preferences": OptionInfo(True, "Selected VAE overrides per-model preferences").info("you can set per-model VAE either by editing user metadata for checkpoints, or by making the VAE have same name as checkpoint"), "auto_vae_precision": OptionInfo(True, "Automaticlly revert VAE to 32-bit floats").info("triggers when a tensor with NaNs is produced in VAE; disabling the option in this case will result in a black square image"), "sd_vae_encode_method": OptionInfo("Full", "VAE type for encode", gr.Radio, {"choices": ["Full", "TAESD"]}).info("method to encode image to latent (use in img2img, hires-fix or inpaint mask)"), "sd_vae_decode_method": OptionInfo("Full", "VAE type for decode", gr.Radio, {"choices": ["Full", "TAESD"]}).info("method to decode latent to image"), @@ -733,6 +733,10 @@ class Options: with open(filename, "r", encoding="utf8") as file: self.data = json.load(file) + # 1.6.0 VAE defaults + if self.data.get('sd_vae_as_default') is not None and self.data.get('sd_vae_overrides_per_model_preferences') is None: + self.data['sd_vae_overrides_per_model_preferences'] = not self.data.get('sd_vae_as_default') + # 1.1.1 quicksettings list migration if self.data.get('quicksettings') is not None and self.data.get('quicksettings_list') is None: self.data['quicksettings_list'] = [i.strip() for i in self.data.get('quicksettings').split(',')] diff --git a/modules/ui_extra_networks_checkpoints_user_metadata.py b/modules/ui_extra_networks_checkpoints_user_metadata.py index 2c69aab8..25df0a80 100644 --- a/modules/ui_extra_networks_checkpoints_user_metadata.py +++ b/modules/ui_extra_networks_checkpoints_user_metadata.py @@ -1,6 +1,6 @@ import gradio as gr -from modules import ui_extra_networks_user_metadata, sd_vae +from modules import ui_extra_networks_user_metadata, sd_vae, shared from modules.ui_common import create_refresh_button @@ -18,6 +18,10 @@ class CheckpointUserMetadataEditor(ui_extra_networks_user_metadata.UserMetadataE self.write_user_metadata(name, user_metadata) + def update_vae(self, name): + if name == shared.sd_model.sd_checkpoint_info.name_for_extra: + sd_vae.reload_vae_weights() + def put_values_into_components(self, name): user_metadata = self.get_user_metadata(name) values = super().put_values_into_components(name) @@ -58,3 +62,5 @@ class CheckpointUserMetadataEditor(ui_extra_networks_user_metadata.UserMetadataE ] self.setup_save_handler(self.button_save, self.save_user_metadata, edited_components) + self.button_save.click(fn=self.update_vae, inputs=[self.edit_name_input]) + diff --git a/webui.py b/webui.py index 1803ea8a..a5b11575 100644 --- a/webui.py +++ b/webui.py @@ -211,7 +211,7 @@ def configure_sigint_handler(): def configure_opts_onchange(): shared.opts.onchange("sd_model_checkpoint", wrap_queued_call(lambda: modules.sd_models.reload_model_weights()), call=False) shared.opts.onchange("sd_vae", wrap_queued_call(lambda: modules.sd_vae.reload_vae_weights()), call=False) - shared.opts.onchange("sd_vae_as_default", wrap_queued_call(lambda: modules.sd_vae.reload_vae_weights()), call=False) + shared.opts.onchange("sd_vae_overrides_per_model_preferences", wrap_queued_call(lambda: modules.sd_vae.reload_vae_weights()), call=False) shared.opts.onchange("temp_dir", ui_tempdir.on_tmpdir_changed) shared.opts.onchange("gradio_theme", shared.reload_gradio_theme) shared.opts.onchange("cross_attention_optimization", wrap_queued_call(lambda: modules.sd_hijack.model_hijack.redo_hijack(shared.sd_model)), call=False) -- cgit v1.2.1 From 6e7828e1d271c644840047c3db60e669a232402a Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Mon, 7 Aug 2023 08:16:20 +0300 Subject: apply unet overrides after switching model --- modules/sd_models.py | 1 + 1 file changed, 1 insertion(+) diff --git a/modules/sd_models.py b/modules/sd_models.py index d65735e3..53c1df54 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -699,6 +699,7 @@ def reload_model_weights(sd_model=None, info=None): print(f"Weights loaded in {timer.summary()}.") model_data.set_sd_model(sd_model) + sd_unet.apply_unet() return sd_model -- cgit v1.2.1 From 0ea20a0d526a531f3d329b62625900a4a18f364e Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Mon, 7 Aug 2023 08:38:18 +0300 Subject: rework #12230 to not have duplicate code --- modules/launch_utils.py | 37 ++++++++++++++++++++----------------- 1 file changed, 20 insertions(+), 17 deletions(-) diff --git a/modules/launch_utils.py b/modules/launch_utils.py index 7225af08..5be30a18 100644 --- a/modules/launch_utils.py +++ b/modules/launch_utils.py @@ -145,6 +145,21 @@ def git_fix_workspace(dir, name): return +def run_git(dir, name, command, desc=None, errdesc=None, custom_env=None, live: bool = default_command_live, autofix=True): + try: + return run(f'"{git}" -C "{dir}" {command}', desc=desc, errdesc=errdesc, custom_env=custom_env, live=live) + except RuntimeError: + pass + + if not autofix: + return None + + print(f"{errdesc}, attempting autofix...") + git_fix_workspace(dir, name) + + return run(f'"{git}" -C "{dir}" {command}', desc=desc, errdesc=errdesc, custom_env=custom_env, live=live) + + def git_clone(url, dir, name, commithash=None): # TODO clone into temporary dir and move if successful @@ -152,25 +167,13 @@ def git_clone(url, dir, name, commithash=None): if commithash is None: return - try: - current_hash = subprocess.check_output([git, "-C", dir, "rev-parse", "HEAD"], shell=False, encoding='utf8').strip() - if current_hash == commithash: - return - except Exception: - print(f"Unable to determine {name}'s hash, attempting autofix...") - git_fix_workspace(dir, name) - current_hash = subprocess.check_output([git, "-C", dir, "rev-parse", "HEAD"], shell=False, encoding='utf8').strip() - if current_hash == commithash: - return + current_hash = run_git(dir, name, 'rev-parse HEAD', None, f"Couldn't determine {name}'s hash: {commithash}", live=False).strip() + if current_hash == commithash: + return - run(f'"{git}" -C "{dir}" fetch', f"Fetching updates for {name}...", f"Couldn't fetch {name}") + run_git('fetch', f"Fetching updates for {name}...", f"Couldn't fetch {name}") - try: - run(f'"{git}" -C "{dir}" checkout {commithash}', f"Checking out commit for {name} with hash: {commithash}...", f"Couldn't checkout commit {commithash} for {name}", live=True) - except RuntimeError: - print(f"Unable to checkout {name} with hash {commithash}, attempting autofix...") - git_fix_workspace(dir, name) - run(f'"{git}" -C "{dir}" checkout {commithash}', f"Checking out commit for {name} with hash: {commithash}...", f"Couldn't checkout commit {commithash} for {name}", live=True) + run_git('checkout', f"Checking out commit for {name} with hash: {commithash}...", f"Couldn't checkout commit {commithash} for {name}", live=True) return -- cgit v1.2.1 From 7d8f55ec7c2d875254e63d92131ecc555378257e Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Mon, 7 Aug 2023 01:45:10 -0400 Subject: Remove style method --- modules/ui.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/ui.py b/modules/ui.py index 4ffb9b82..5150dae4 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -408,7 +408,7 @@ def create_ui(): extra_tabs = gr.Tabs(elem_id="txt2img_extra_tabs") extra_tabs.__enter__() - with gr.Tab("Generation", id="txt2img_generation") as txt2img_generation_tab, gr.Row().style(equal_height=False): + with gr.Tab("Generation", id="txt2img_generation") as txt2img_generation_tab, gr.Row(equal_height=False): with gr.Column(variant='compact', elem_id="txt2img_settings"): scripts.scripts_txt2img.prepare_ui() -- cgit v1.2.1 From 4c72377bbf227276914c4012b339f0b3da8b366b Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Mon, 7 Aug 2023 09:42:13 +0300 Subject: Options in main UI update - correctly read values from pasted infotext - setting for column count - infotext paste: do not add a field to override settings if some other component is already handling it --- .../scripts/extra_options_section.py | 39 +++++++++++++++++----- modules/generation_parameters_copypaste.py | 5 +++ modules/shared.py | 2 +- 3 files changed, 37 insertions(+), 9 deletions(-) diff --git a/extensions-builtin/extra-options-section/scripts/extra_options_section.py b/extensions-builtin/extra-options-section/scripts/extra_options_section.py index 7bb0a1bb..d5c29bf2 100644 --- a/extensions-builtin/extra-options-section/scripts/extra_options_section.py +++ b/extensions-builtin/extra-options-section/scripts/extra_options_section.py @@ -1,5 +1,7 @@ +import math + import gradio as gr -from modules import scripts, shared, ui_components, ui_settings +from modules import scripts, shared, ui_components, ui_settings, generation_parameters_copypaste from modules.ui_components import FormColumn @@ -19,15 +21,33 @@ class ExtraOptionsSection(scripts.Script): def ui(self, is_img2img): self.comps = [] self.setting_names = [] + self.infotext_fields = [] + + mapping = {k: v for v, k in generation_parameters_copypaste.infotext_to_setting_name_mapping} with gr.Blocks() as interface: - with gr.Accordion("Options", open=False) if shared.opts.extra_options_accordion and shared.opts.extra_options else gr.Group(), gr.Row(): - for setting_name in shared.opts.extra_options: - with FormColumn(): - comp = ui_settings.create_setting_component(setting_name) + with gr.Accordion("Options", open=False) if shared.opts.extra_options_accordion and shared.opts.extra_options else gr.Group(): + + row_count = math.ceil(len(shared.opts.extra_options) / shared.opts.extra_options_cols) + + for row in range(row_count): + with gr.Row(): + for col in range(shared.opts.extra_options_cols): + index = row * shared.opts.extra_options_cols + col + if index >= len(shared.opts.extra_options): + break + + setting_name = shared.opts.extra_options[index] - self.comps.append(comp) - self.setting_names.append(setting_name) + with FormColumn(): + comp = ui_settings.create_setting_component(setting_name) + + self.comps.append(comp) + self.setting_names.append(setting_name) + + setting_infotext_name = mapping.get(setting_name) + if setting_infotext_name is not None: + self.infotext_fields.append((comp, setting_infotext_name)) def get_settings_values(): return [ui_settings.get_value_for_setting(key) for key in self.setting_names] @@ -44,5 +64,8 @@ class ExtraOptionsSection(scripts.Script): shared.options_templates.update(shared.options_section(('ui', "User interface"), { "extra_options": shared.OptionInfo([], "Options in main UI", ui_components.DropdownMulti, lambda: {"choices": list(shared.opts.data_labels.keys())}).js("info", "settingsHintsShowQuicksettings").info("setting entries that also appear in txt2img/img2img interfaces").needs_reload_ui(), - "extra_options_accordion": shared.OptionInfo(False, "Place options in main UI into an accordion").needs_restart() + "extra_options_cols": shared.OptionInfo(1, "Options in main UI - number of columns", gr.Number, {"precision": 0}).needs_reload_ui(), + "extra_options_accordion": shared.OptionInfo(False, "Options in main UI - place into an accordion").needs_reload_ui() })) + + diff --git a/modules/generation_parameters_copypaste.py b/modules/generation_parameters_copypaste.py index e71c9601..5758e6f3 100644 --- a/modules/generation_parameters_copypaste.py +++ b/modules/generation_parameters_copypaste.py @@ -414,10 +414,15 @@ def connect_paste(button, paste_fields, input_comp, override_settings_component, return res if override_settings_component is not None: + already_handled_fields = {key: 1 for _, key in paste_fields} + def paste_settings(params): vals = {} for param_name, setting_name in infotext_to_setting_name_mapping: + if param_name in already_handled_fields: + continue + v = params.get(param_name, None) if v is None: continue diff --git a/modules/shared.py b/modules/shared.py index 115e5276..4d854928 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -612,7 +612,7 @@ options_templates.update(options_section(('sampler-params', "Sampler parameters" 's_tmin': OptionInfo(0.0, "sigma tmin", gr.Slider, {"minimum": 0.0, "maximum": 10.0, "step": 0.01}).info('enable stochasticity; start value of the sigma range; only applies to Euler, Heun, and DPM2'), 's_tmax': OptionInfo(0.0, "sigma tmax", gr.Slider, {"minimum": 0.0, "maximum": 999.0, "step": 0.01}).info("0 = inf; end value of the sigma range; only applies to Euler, Heun, and DPM2"), 's_noise': OptionInfo(1.0, "sigma noise", gr.Slider, {"minimum": 0.0, "maximum": 1.1, "step": 0.001}).info('amount of additional noise to counteract loss of detail during sampling; only applies to Euler, Heun, and DPM2'), - 'k_sched_type': OptionInfo("Automatic", "scheduler type", gr.Dropdown, {"choices": ["Automatic", "karras", "exponential", "polyexponential"]}).info("lets you override the noise schedule for k-diffusion samplers; choosing Automatic disables the three parameters below"), + 'k_sched_type': OptionInfo("Automatic", "Scheduler type", gr.Dropdown, {"choices": ["Automatic", "karras", "exponential", "polyexponential"]}).info("lets you override the noise schedule for k-diffusion samplers; choosing Automatic disables the three parameters below"), 'sigma_min': OptionInfo(0.0, "sigma min", gr.Number).info("0 = default (~0.03); minimum noise strength for k-diffusion noise scheduler"), 'sigma_max': OptionInfo(0.0, "sigma max", gr.Number).info("0 = default (~14.6); maximum noise strength for k-diffusion noise scheduler"), 'rho': OptionInfo(0.0, "rho", gr.Number).info("0 = default (7 for karras, 1 for polyexponential); higher values result in a steeper noise schedule (decreases faster)"), -- cgit v1.2.1 From 250a95b6fecc59385a7b370f0ec63bd4e45f4ca8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E7=8E=8B=E6=80=80=E5=AE=97?= Date: Mon, 7 Aug 2023 18:08:07 +0800 Subject: fix: enable before_ui_callback when api only mode (fixes #7984) --- webui.py | 1 + 1 file changed, 1 insertion(+) diff --git a/webui.py b/webui.py index a5b11575..86a62a92 100644 --- a/webui.py +++ b/webui.py @@ -341,6 +341,7 @@ def api_only(): setup_middleware(app) api = create_api(app) + modules.script_callbacks.before_ui_callback() modules.script_callbacks.app_started_callback(None, app) print(f"Startup time: {startup_timer.summary()}.") -- cgit v1.2.1 From aea0fa9fd52daa1a4b3de7c7124257cf7f3e5291 Mon Sep 17 00:00:00 2001 From: Diego Casorran Date: Mon, 7 Aug 2023 14:53:42 +0200 Subject: Allow to open images in new browser tab by MMB. Signed-off-by: Diego Casorran --- javascript/imageviewer.js | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/javascript/imageviewer.js b/javascript/imageviewer.js index 677e95c1..c21d396e 100644 --- a/javascript/imageviewer.js +++ b/javascript/imageviewer.js @@ -136,6 +136,11 @@ function setupImageForLightbox(e) { var event = isFirefox ? 'mousedown' : 'click'; e.addEventListener(event, function(evt) { + if (evt.button == 1) { + open(evt.target.src); + evt.preventDefault(); + return; + } if (!opts.js_modal_lightbox || evt.button != 0) return; modalZoomSet(gradioApp().getElementById('modalImage'), opts.js_modal_lightbox_initially_zoomed); -- cgit v1.2.1 From 01997f45ba089af24b03a5f614147bb0f9d8d824 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Mon, 7 Aug 2023 18:49:23 +0300 Subject: fix extra_options_section misbehaving when there's just one extra_options element --- .../extra-options-section/scripts/extra_options_section.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/extensions-builtin/extra-options-section/scripts/extra_options_section.py b/extensions-builtin/extra-options-section/scripts/extra_options_section.py index d5c29bf2..588b64d2 100644 --- a/extensions-builtin/extra-options-section/scripts/extra_options_section.py +++ b/extensions-builtin/extra-options-section/scripts/extra_options_section.py @@ -50,7 +50,8 @@ class ExtraOptionsSection(scripts.Script): self.infotext_fields.append((comp, setting_infotext_name)) def get_settings_values(): - return [ui_settings.get_value_for_setting(key) for key in self.setting_names] + res = [ui_settings.get_value_for_setting(key) for key in self.setting_names] + return res[0] if len(res) == 1 else res interface.load(fn=get_settings_values, inputs=[], outputs=self.comps, queue=False, show_progress=False) -- cgit v1.2.1 From b0f7f4a99125b55fed8acabb458fac6299e98fb1 Mon Sep 17 00:00:00 2001 From: Olivier Lacan Date: Mon, 7 Aug 2023 12:42:46 -0700 Subject: Pin fastapi to > 0.90.1 to fix crash See https://github.com/AUTOMATIC1111/stable-diffusion-webui/issues/11642#issuecomment-1643298659 This resolves a crashing bug for me on Python 3.10 and it appears to do so as well for others. --- requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/requirements.txt b/requirements.txt index 9a47d6d0..d83092f0 100644 --- a/requirements.txt +++ b/requirements.txt @@ -6,6 +6,7 @@ basicsr blendmodes clean-fid einops +fastapi>=0.90.1 gfpgan gradio==3.39.0 inflection -- cgit v1.2.1 From 8c200c21564992b7af1d2d692524051158e533db Mon Sep 17 00:00:00 2001 From: Uminosachi <49424133+Uminosachi@users.noreply.github.com> Date: Tue, 8 Aug 2023 10:48:03 +0900 Subject: Fix mismatch between shared.sd_model & shared.opts --- modules/sd_models.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/modules/sd_models.py b/modules/sd_models.py index 53c1df54..cded27d4 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -623,6 +623,8 @@ def reuse_model_from_already_loaded(sd_model, checkpoint_info, timer): timer.record("send model to device") model_data.set_sd_model(already_loaded) + shared.opts.data["sd_model_checkpoint"] = already_loaded.sd_checkpoint_info.title + shared.opts.data["sd_checkpoint_hash"] = already_loaded.sd_checkpoint_info.sha256 print(f"Using already loaded model {already_loaded.sd_checkpoint_info.title}: done in {timer.summary()}") return model_data.sd_model elif shared.opts.sd_checkpoints_limit > 1 and len(model_data.loaded_sd_models) < shared.opts.sd_checkpoints_limit: -- cgit v1.2.1 From c75bda867be5345bf959daf23bdc19eadc90841a Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Tue, 8 Aug 2023 11:22:35 +0900 Subject: setting: Automatically open webui in browser on startup --- modules/shared.py | 1 + webui.py | 15 +++++++++++---- 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/modules/shared.py b/modules/shared.py index aa72c9c8..5a7be85b 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -385,6 +385,7 @@ options_templates.update(options_section(('face-restoration', "Face restoration" })) options_templates.update(options_section(('system', "System"), { + "auto_launch_browser": OptionInfo("Local", "Automatically open webui in browser on startup", gr.Radio, lambda: {"choices": ["Disable", "Local", "Remote"]}), "show_warnings": OptionInfo(False, "Show warnings in console."), "memmon_poll_rate": OptionInfo(8, "VRAM usage polls per second during generation.", gr.Slider, {"minimum": 0, "maximum": 40, "step": 1}).info("0 = disable"), "samples_log_stdout": OptionInfo(False, "Always print all generation info to standard output"), diff --git a/webui.py b/webui.py index 2dc4f1aa..844e2548 100644 --- a/webui.py +++ b/webui.py @@ -398,6 +398,13 @@ def webui(): gradio_auth_creds = list(get_gradio_auth_creds()) or None + auto_launch_browser = False + if os.getenv('SD_WEBUI_RESTARTING') != '1': + if shared.opts.auto_launch_browser == "Remote" or cmd_opts.autolaunch: + auto_launch_browser = True + elif shared.opts.auto_launch_browser == "Local": + auto_launch_browser = not any([cmd_opts.listen, cmd_opts.share, cmd_opts.ngrok]) + app, local_url, share_url = shared.demo.launch( share=cmd_opts.share, server_name=server_name, @@ -407,7 +414,7 @@ def webui(): ssl_verify=cmd_opts.disable_tls_verify, debug=cmd_opts.gradio_debug, auth=gradio_auth_creds, - inbrowser=cmd_opts.autolaunch and os.getenv('SD_WEBUI_RESTARTING') != '1', + inbrowser=auto_launch_browser, prevent_thread_lock=True, allowed_paths=cmd_opts.gradio_allowed_path, app_kwargs={ @@ -417,9 +424,6 @@ def webui(): root_path=f"/{cmd_opts.subpath}" if cmd_opts.subpath else "", ) - # after initial launch, disable --autolaunch for subsequent restarts - cmd_opts.autolaunch = False - startup_timer.record("gradio launch") # gradio uses a very open CORS policy via app.user_middleware, which makes it possible for @@ -464,6 +468,9 @@ def webui(): shared.demo.close() break + # disable auto launch webui in browser for subsequent UI Reload + os.environ.setdefault('SD_WEBUI_RESTARTING', '1') + print('Restarting UI...') shared.demo.close() time.sleep(0.5) -- cgit v1.2.1 From 7e88f57aaa923eabfa6e99b6a283e69d65b12e2b Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Tue, 8 Aug 2023 18:32:17 +0300 Subject: Split history: mv modules/sd_samplers_kdiffusion.py modules/sd_samplers_cfg_denoiser.py --- modules/sd_samplers_cfg_denoiser.py | 511 ++++++++++++++++++++++++++++++++++++ modules/sd_samplers_kdiffusion.py | 511 ------------------------------------ 2 files changed, 511 insertions(+), 511 deletions(-) create mode 100644 modules/sd_samplers_cfg_denoiser.py delete mode 100644 modules/sd_samplers_kdiffusion.py diff --git a/modules/sd_samplers_cfg_denoiser.py b/modules/sd_samplers_cfg_denoiser.py new file mode 100644 index 00000000..db71a549 --- /dev/null +++ b/modules/sd_samplers_cfg_denoiser.py @@ -0,0 +1,511 @@ +from collections import deque +import torch +import inspect +import k_diffusion.sampling +from modules import prompt_parser, devices, sd_samplers_common, sd_samplers_extra + +from modules.processing import StableDiffusionProcessing +from modules.shared import opts, state +import modules.shared as shared +from modules.script_callbacks import CFGDenoiserParams, cfg_denoiser_callback +from modules.script_callbacks import CFGDenoisedParams, cfg_denoised_callback +from modules.script_callbacks import AfterCFGCallbackParams, cfg_after_cfg_callback + +samplers_k_diffusion = [ + ('Euler a', 'sample_euler_ancestral', ['k_euler_a', 'k_euler_ancestral'], {"uses_ensd": True}), + ('Euler', 'sample_euler', ['k_euler'], {}), + ('LMS', 'sample_lms', ['k_lms'], {}), + ('Heun', 'sample_heun', ['k_heun'], {"second_order": True}), + ('DPM2', 'sample_dpm_2', ['k_dpm_2'], {'discard_next_to_last_sigma': True}), + ('DPM2 a', 'sample_dpm_2_ancestral', ['k_dpm_2_a'], {'discard_next_to_last_sigma': True, "uses_ensd": True}), + ('DPM++ 2S a', 'sample_dpmpp_2s_ancestral', ['k_dpmpp_2s_a'], {"uses_ensd": True, "second_order": True}), + ('DPM++ 2M', 'sample_dpmpp_2m', ['k_dpmpp_2m'], {}), + ('DPM++ SDE', 'sample_dpmpp_sde', ['k_dpmpp_sde'], {"second_order": True, "brownian_noise": True}), + ('DPM++ 2M SDE', 'sample_dpmpp_2m_sde', ['k_dpmpp_2m_sde_ka'], {"brownian_noise": True}), + ('DPM fast', 'sample_dpm_fast', ['k_dpm_fast'], {"uses_ensd": True}), + ('DPM adaptive', 'sample_dpm_adaptive', ['k_dpm_ad'], {"uses_ensd": True}), + ('LMS Karras', 'sample_lms', ['k_lms_ka'], {'scheduler': 'karras'}), + ('DPM2 Karras', 'sample_dpm_2', ['k_dpm_2_ka'], {'scheduler': 'karras', 'discard_next_to_last_sigma': True, "uses_ensd": True, "second_order": True}), + ('DPM2 a Karras', 'sample_dpm_2_ancestral', ['k_dpm_2_a_ka'], {'scheduler': 'karras', 'discard_next_to_last_sigma': True, "uses_ensd": True, "second_order": True}), + ('DPM++ 2S a Karras', 'sample_dpmpp_2s_ancestral', ['k_dpmpp_2s_a_ka'], {'scheduler': 'karras', "uses_ensd": True, "second_order": True}), + ('DPM++ 2M Karras', 'sample_dpmpp_2m', ['k_dpmpp_2m_ka'], {'scheduler': 'karras'}), + ('DPM++ SDE Karras', 'sample_dpmpp_sde', ['k_dpmpp_sde_ka'], {'scheduler': 'karras', "second_order": True, "brownian_noise": True}), + ('DPM++ 2M SDE Karras', 'sample_dpmpp_2m_sde', ['k_dpmpp_2m_sde_ka'], {'scheduler': 'karras', "brownian_noise": True}), + ('DPM++ 2M SDE Exponential', 'sample_dpmpp_2m_sde', ['k_dpmpp_2m_sde_exp'], {'scheduler': 'exponential', "brownian_noise": True}), + ('Restart', sd_samplers_extra.restart_sampler, ['restart'], {'scheduler': 'karras'}), +] + + +samplers_data_k_diffusion = [ + sd_samplers_common.SamplerData(label, lambda model, funcname=funcname: KDiffusionSampler(funcname, model), aliases, options) + for label, funcname, aliases, options in samplers_k_diffusion + if callable(funcname) or hasattr(k_diffusion.sampling, funcname) +] + +sampler_extra_params = { + 'sample_euler': ['s_churn', 's_tmin', 's_tmax', 's_noise'], + 'sample_heun': ['s_churn', 's_tmin', 's_tmax', 's_noise'], + 'sample_dpm_2': ['s_churn', 's_tmin', 's_tmax', 's_noise'], +} + +k_diffusion_samplers_map = {x.name: x for x in samplers_data_k_diffusion} +k_diffusion_scheduler = { + 'Automatic': None, + 'karras': k_diffusion.sampling.get_sigmas_karras, + 'exponential': k_diffusion.sampling.get_sigmas_exponential, + 'polyexponential': k_diffusion.sampling.get_sigmas_polyexponential +} + + +def catenate_conds(conds): + if not isinstance(conds[0], dict): + return torch.cat(conds) + + return {key: torch.cat([x[key] for x in conds]) for key in conds[0].keys()} + + +def subscript_cond(cond, a, b): + if not isinstance(cond, dict): + return cond[a:b] + + return {key: vec[a:b] for key, vec in cond.items()} + + +def pad_cond(tensor, repeats, empty): + if not isinstance(tensor, dict): + return torch.cat([tensor, empty.repeat((tensor.shape[0], repeats, 1))], axis=1) + + tensor['crossattn'] = pad_cond(tensor['crossattn'], repeats, empty) + return tensor + + +class CFGDenoiser(torch.nn.Module): + """ + Classifier free guidance denoiser. A wrapper for stable diffusion model (specifically for unet) + that can take a noisy picture and produce a noise-free picture using two guidances (prompts) + instead of one. Originally, the second prompt is just an empty string, but we use non-empty + negative prompt. + """ + + def __init__(self, model): + super().__init__() + self.inner_model = model + self.mask = None + self.nmask = None + self.init_latent = None + self.step = 0 + self.image_cfg_scale = None + self.padded_cond_uncond = False + + def combine_denoised(self, x_out, conds_list, uncond, cond_scale): + denoised_uncond = x_out[-uncond.shape[0]:] + denoised = torch.clone(denoised_uncond) + + for i, conds in enumerate(conds_list): + for cond_index, weight in conds: + denoised[i] += (x_out[cond_index] - denoised_uncond[i]) * (weight * cond_scale) + + return denoised + + def combine_denoised_for_edit_model(self, x_out, cond_scale): + out_cond, out_img_cond, out_uncond = x_out.chunk(3) + denoised = out_uncond + cond_scale * (out_cond - out_img_cond) + self.image_cfg_scale * (out_img_cond - out_uncond) + + return denoised + + def forward(self, x, sigma, uncond, cond, cond_scale, s_min_uncond, image_cond): + if state.interrupted or state.skipped: + raise sd_samplers_common.InterruptedException + + # at self.image_cfg_scale == 1.0 produced results for edit model are the same as with normal sampling, + # so is_edit_model is set to False to support AND composition. + is_edit_model = shared.sd_model.cond_stage_key == "edit" and self.image_cfg_scale is not None and self.image_cfg_scale != 1.0 + + conds_list, tensor = prompt_parser.reconstruct_multicond_batch(cond, self.step) + uncond = prompt_parser.reconstruct_cond_batch(uncond, self.step) + + assert not is_edit_model or all(len(conds) == 1 for conds in conds_list), "AND is not supported for InstructPix2Pix checkpoint (unless using Image CFG scale = 1.0)" + + batch_size = len(conds_list) + repeats = [len(conds_list[i]) for i in range(batch_size)] + + if shared.sd_model.model.conditioning_key == "crossattn-adm": + image_uncond = torch.zeros_like(image_cond) + make_condition_dict = lambda c_crossattn, c_adm: {"c_crossattn": [c_crossattn], "c_adm": c_adm} + else: + image_uncond = image_cond + if isinstance(uncond, dict): + make_condition_dict = lambda c_crossattn, c_concat: {**c_crossattn, "c_concat": [c_concat]} + else: + make_condition_dict = lambda c_crossattn, c_concat: {"c_crossattn": [c_crossattn], "c_concat": [c_concat]} + + if not is_edit_model: + x_in = torch.cat([torch.stack([x[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [x]) + sigma_in = torch.cat([torch.stack([sigma[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [sigma]) + image_cond_in = torch.cat([torch.stack([image_cond[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [image_uncond]) + else: + x_in = torch.cat([torch.stack([x[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [x] + [x]) + sigma_in = torch.cat([torch.stack([sigma[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [sigma] + [sigma]) + image_cond_in = torch.cat([torch.stack([image_cond[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [image_uncond] + [torch.zeros_like(self.init_latent)]) + + denoiser_params = CFGDenoiserParams(x_in, image_cond_in, sigma_in, state.sampling_step, state.sampling_steps, tensor, uncond) + cfg_denoiser_callback(denoiser_params) + x_in = denoiser_params.x + image_cond_in = denoiser_params.image_cond + sigma_in = denoiser_params.sigma + tensor = denoiser_params.text_cond + uncond = denoiser_params.text_uncond + skip_uncond = False + + # alternating uncond allows for higher thresholds without the quality loss normally expected from raising it + if self.step % 2 and s_min_uncond > 0 and sigma[0] < s_min_uncond and not is_edit_model: + skip_uncond = True + x_in = x_in[:-batch_size] + sigma_in = sigma_in[:-batch_size] + + self.padded_cond_uncond = False + if shared.opts.pad_cond_uncond and tensor.shape[1] != uncond.shape[1]: + empty = shared.sd_model.cond_stage_model_empty_prompt + num_repeats = (tensor.shape[1] - uncond.shape[1]) // empty.shape[1] + + if num_repeats < 0: + tensor = pad_cond(tensor, -num_repeats, empty) + self.padded_cond_uncond = True + elif num_repeats > 0: + uncond = pad_cond(uncond, num_repeats, empty) + self.padded_cond_uncond = True + + if tensor.shape[1] == uncond.shape[1] or skip_uncond: + if is_edit_model: + cond_in = catenate_conds([tensor, uncond, uncond]) + elif skip_uncond: + cond_in = tensor + else: + cond_in = catenate_conds([tensor, uncond]) + + if shared.batch_cond_uncond: + x_out = self.inner_model(x_in, sigma_in, cond=make_condition_dict(cond_in, image_cond_in)) + else: + x_out = torch.zeros_like(x_in) + for batch_offset in range(0, x_out.shape[0], batch_size): + a = batch_offset + b = a + batch_size + x_out[a:b] = self.inner_model(x_in[a:b], sigma_in[a:b], cond=make_condition_dict(subscript_cond(cond_in, a, b), image_cond_in[a:b])) + else: + x_out = torch.zeros_like(x_in) + batch_size = batch_size*2 if shared.batch_cond_uncond else batch_size + for batch_offset in range(0, tensor.shape[0], batch_size): + a = batch_offset + b = min(a + batch_size, tensor.shape[0]) + + if not is_edit_model: + c_crossattn = subscript_cond(tensor, a, b) + else: + c_crossattn = torch.cat([tensor[a:b]], uncond) + + x_out[a:b] = self.inner_model(x_in[a:b], sigma_in[a:b], cond=make_condition_dict(c_crossattn, image_cond_in[a:b])) + + if not skip_uncond: + x_out[-uncond.shape[0]:] = self.inner_model(x_in[-uncond.shape[0]:], sigma_in[-uncond.shape[0]:], cond=make_condition_dict(uncond, image_cond_in[-uncond.shape[0]:])) + + denoised_image_indexes = [x[0][0] for x in conds_list] + if skip_uncond: + fake_uncond = torch.cat([x_out[i:i+1] for i in denoised_image_indexes]) + x_out = torch.cat([x_out, fake_uncond]) # we skipped uncond denoising, so we put cond-denoised image to where the uncond-denoised image should be + + denoised_params = CFGDenoisedParams(x_out, state.sampling_step, state.sampling_steps, self.inner_model) + cfg_denoised_callback(denoised_params) + + devices.test_for_nans(x_out, "unet") + + if opts.live_preview_content == "Prompt": + sd_samplers_common.store_latent(torch.cat([x_out[i:i+1] for i in denoised_image_indexes])) + elif opts.live_preview_content == "Negative prompt": + sd_samplers_common.store_latent(x_out[-uncond.shape[0]:]) + + if is_edit_model: + denoised = self.combine_denoised_for_edit_model(x_out, cond_scale) + elif skip_uncond: + denoised = self.combine_denoised(x_out, conds_list, uncond, 1.0) + else: + denoised = self.combine_denoised(x_out, conds_list, uncond, cond_scale) + + if self.mask is not None: + denoised = self.init_latent * self.mask + self.nmask * denoised + + after_cfg_callback_params = AfterCFGCallbackParams(denoised, state.sampling_step, state.sampling_steps) + cfg_after_cfg_callback(after_cfg_callback_params) + denoised = after_cfg_callback_params.x + + self.step += 1 + return denoised + + +class TorchHijack: + def __init__(self, sampler_noises): + # Using a deque to efficiently receive the sampler_noises in the same order as the previous index-based + # implementation. + self.sampler_noises = deque(sampler_noises) + + def __getattr__(self, item): + if item == 'randn_like': + return self.randn_like + + if hasattr(torch, item): + return getattr(torch, item) + + raise AttributeError(f"'{type(self).__name__}' object has no attribute '{item}'") + + def randn_like(self, x): + if self.sampler_noises: + noise = self.sampler_noises.popleft() + if noise.shape == x.shape: + return noise + + return devices.randn_like(x) + + +class KDiffusionSampler: + def __init__(self, funcname, sd_model): + denoiser = k_diffusion.external.CompVisVDenoiser if sd_model.parameterization == "v" else k_diffusion.external.CompVisDenoiser + + self.model_wrap = denoiser(sd_model, quantize=shared.opts.enable_quantization) + self.funcname = funcname + self.func = funcname if callable(funcname) else getattr(k_diffusion.sampling, self.funcname) + self.extra_params = sampler_extra_params.get(funcname, []) + self.model_wrap_cfg = CFGDenoiser(self.model_wrap) + self.sampler_noises = None + self.stop_at = None + self.eta = None + self.config = None # set by the function calling the constructor + self.last_latent = None + self.s_min_uncond = None + + # NOTE: These are also defined in the StableDiffusionProcessing class. + # They should have been here to begin with but we're going to + # leave that class __init__ signature alone. + self.s_churn = 0.0 + self.s_tmin = 0.0 + self.s_tmax = float('inf') + self.s_noise = 1.0 + + self.conditioning_key = sd_model.model.conditioning_key + + def callback_state(self, d): + step = d['i'] + latent = d["denoised"] + if opts.live_preview_content == "Combined": + sd_samplers_common.store_latent(latent) + self.last_latent = latent + + if self.stop_at is not None and step > self.stop_at: + raise sd_samplers_common.InterruptedException + + state.sampling_step = step + shared.total_tqdm.update() + + def launch_sampling(self, steps, func): + state.sampling_steps = steps + state.sampling_step = 0 + + try: + return func() + except RecursionError: + print( + 'Encountered RecursionError during sampling, returning last latent. ' + 'rho >5 with a polyexponential scheduler may cause this error. ' + 'You should try to use a smaller rho value instead.' + ) + return self.last_latent + except sd_samplers_common.InterruptedException: + return self.last_latent + + def number_of_needed_noises(self, p): + return p.steps + + def initialize(self, p: StableDiffusionProcessing): + self.model_wrap_cfg.mask = p.mask if hasattr(p, 'mask') else None + self.model_wrap_cfg.nmask = p.nmask if hasattr(p, 'nmask') else None + self.model_wrap_cfg.step = 0 + self.model_wrap_cfg.image_cfg_scale = getattr(p, 'image_cfg_scale', None) + self.eta = p.eta if p.eta is not None else opts.eta_ancestral + self.s_min_uncond = getattr(p, 's_min_uncond', 0.0) + + k_diffusion.sampling.torch = TorchHijack(self.sampler_noises if self.sampler_noises is not None else []) + + extra_params_kwargs = {} + for param_name in self.extra_params: + if hasattr(p, param_name) and param_name in inspect.signature(self.func).parameters: + extra_params_kwargs[param_name] = getattr(p, param_name) + + if 'eta' in inspect.signature(self.func).parameters: + if self.eta != 1.0: + p.extra_generation_params["Eta"] = self.eta + + extra_params_kwargs['eta'] = self.eta + + if len(self.extra_params) > 0: + s_churn = getattr(opts, 's_churn', p.s_churn) + s_tmin = getattr(opts, 's_tmin', p.s_tmin) + s_tmax = getattr(opts, 's_tmax', p.s_tmax) or self.s_tmax # 0 = inf + s_noise = getattr(opts, 's_noise', p.s_noise) + + if s_churn != self.s_churn: + extra_params_kwargs['s_churn'] = s_churn + p.s_churn = s_churn + p.extra_generation_params['Sigma churn'] = s_churn + if s_tmin != self.s_tmin: + extra_params_kwargs['s_tmin'] = s_tmin + p.s_tmin = s_tmin + p.extra_generation_params['Sigma tmin'] = s_tmin + if s_tmax != self.s_tmax: + extra_params_kwargs['s_tmax'] = s_tmax + p.s_tmax = s_tmax + p.extra_generation_params['Sigma tmax'] = s_tmax + if s_noise != self.s_noise: + extra_params_kwargs['s_noise'] = s_noise + p.s_noise = s_noise + p.extra_generation_params['Sigma noise'] = s_noise + + return extra_params_kwargs + + def get_sigmas(self, p, steps): + discard_next_to_last_sigma = self.config is not None and self.config.options.get('discard_next_to_last_sigma', False) + if opts.always_discard_next_to_last_sigma and not discard_next_to_last_sigma: + discard_next_to_last_sigma = True + p.extra_generation_params["Discard penultimate sigma"] = True + + steps += 1 if discard_next_to_last_sigma else 0 + + if p.sampler_noise_scheduler_override: + sigmas = p.sampler_noise_scheduler_override(steps) + elif opts.k_sched_type != "Automatic": + m_sigma_min, m_sigma_max = (self.model_wrap.sigmas[0].item(), self.model_wrap.sigmas[-1].item()) + sigma_min, sigma_max = (0.1, 10) if opts.use_old_karras_scheduler_sigmas else (m_sigma_min, m_sigma_max) + sigmas_kwargs = { + 'sigma_min': sigma_min, + 'sigma_max': sigma_max, + } + + sigmas_func = k_diffusion_scheduler[opts.k_sched_type] + p.extra_generation_params["Schedule type"] = opts.k_sched_type + + if opts.sigma_min != m_sigma_min and opts.sigma_min != 0: + sigmas_kwargs['sigma_min'] = opts.sigma_min + p.extra_generation_params["Schedule min sigma"] = opts.sigma_min + if opts.sigma_max != m_sigma_max and opts.sigma_max != 0: + sigmas_kwargs['sigma_max'] = opts.sigma_max + p.extra_generation_params["Schedule max sigma"] = opts.sigma_max + + default_rho = 1. if opts.k_sched_type == "polyexponential" else 7. + + if opts.k_sched_type != 'exponential' and opts.rho != 0 and opts.rho != default_rho: + sigmas_kwargs['rho'] = opts.rho + p.extra_generation_params["Schedule rho"] = opts.rho + + sigmas = sigmas_func(n=steps, **sigmas_kwargs, device=shared.device) + elif self.config is not None and self.config.options.get('scheduler', None) == 'karras': + sigma_min, sigma_max = (0.1, 10) if opts.use_old_karras_scheduler_sigmas else (self.model_wrap.sigmas[0].item(), self.model_wrap.sigmas[-1].item()) + + sigmas = k_diffusion.sampling.get_sigmas_karras(n=steps, sigma_min=sigma_min, sigma_max=sigma_max, device=shared.device) + elif self.config is not None and self.config.options.get('scheduler', None) == 'exponential': + m_sigma_min, m_sigma_max = (self.model_wrap.sigmas[0].item(), self.model_wrap.sigmas[-1].item()) + sigmas = k_diffusion.sampling.get_sigmas_exponential(n=steps, sigma_min=m_sigma_min, sigma_max=m_sigma_max, device=shared.device) + else: + sigmas = self.model_wrap.get_sigmas(steps) + + if discard_next_to_last_sigma: + sigmas = torch.cat([sigmas[:-2], sigmas[-1:]]) + + return sigmas + + def create_noise_sampler(self, x, sigmas, p): + """For DPM++ SDE: manually create noise sampler to enable deterministic results across different batch sizes""" + if shared.opts.no_dpmpp_sde_batch_determinism: + return None + + from k_diffusion.sampling import BrownianTreeNoiseSampler + sigma_min, sigma_max = sigmas[sigmas > 0].min(), sigmas.max() + current_iter_seeds = p.all_seeds[p.iteration * p.batch_size:(p.iteration + 1) * p.batch_size] + return BrownianTreeNoiseSampler(x, sigma_min, sigma_max, seed=current_iter_seeds) + + def sample_img2img(self, p, x, noise, conditioning, unconditional_conditioning, steps=None, image_conditioning=None): + steps, t_enc = sd_samplers_common.setup_img2img_steps(p, steps) + + sigmas = self.get_sigmas(p, steps) + + sigma_sched = sigmas[steps - t_enc - 1:] + xi = x + noise * sigma_sched[0] + + extra_params_kwargs = self.initialize(p) + parameters = inspect.signature(self.func).parameters + + if 'sigma_min' in parameters: + ## last sigma is zero which isn't allowed by DPM Fast & Adaptive so taking value before last + extra_params_kwargs['sigma_min'] = sigma_sched[-2] + if 'sigma_max' in parameters: + extra_params_kwargs['sigma_max'] = sigma_sched[0] + if 'n' in parameters: + extra_params_kwargs['n'] = len(sigma_sched) - 1 + if 'sigma_sched' in parameters: + extra_params_kwargs['sigma_sched'] = sigma_sched + if 'sigmas' in parameters: + extra_params_kwargs['sigmas'] = sigma_sched + + if self.config.options.get('brownian_noise', False): + noise_sampler = self.create_noise_sampler(x, sigmas, p) + extra_params_kwargs['noise_sampler'] = noise_sampler + + self.model_wrap_cfg.init_latent = x + self.last_latent = x + extra_args = { + 'cond': conditioning, + 'image_cond': image_conditioning, + 'uncond': unconditional_conditioning, + 'cond_scale': p.cfg_scale, + 's_min_uncond': self.s_min_uncond + } + + samples = self.launch_sampling(t_enc + 1, lambda: self.func(self.model_wrap_cfg, xi, extra_args=extra_args, disable=False, callback=self.callback_state, **extra_params_kwargs)) + + if self.model_wrap_cfg.padded_cond_uncond: + p.extra_generation_params["Pad conds"] = True + + return samples + + def sample(self, p, x, conditioning, unconditional_conditioning, steps=None, image_conditioning=None): + steps = steps or p.steps + + sigmas = self.get_sigmas(p, steps) + + x = x * sigmas[0] + + extra_params_kwargs = self.initialize(p) + parameters = inspect.signature(self.func).parameters + + if 'sigma_min' in parameters: + extra_params_kwargs['sigma_min'] = self.model_wrap.sigmas[0].item() + extra_params_kwargs['sigma_max'] = self.model_wrap.sigmas[-1].item() + if 'n' in parameters: + extra_params_kwargs['n'] = steps + else: + extra_params_kwargs['sigmas'] = sigmas + + if self.config.options.get('brownian_noise', False): + noise_sampler = self.create_noise_sampler(x, sigmas, p) + extra_params_kwargs['noise_sampler'] = noise_sampler + + self.last_latent = x + samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, x, extra_args={ + 'cond': conditioning, + 'image_cond': image_conditioning, + 'uncond': unconditional_conditioning, + 'cond_scale': p.cfg_scale, + 's_min_uncond': self.s_min_uncond + }, disable=False, callback=self.callback_state, **extra_params_kwargs)) + + if self.model_wrap_cfg.padded_cond_uncond: + p.extra_generation_params["Pad conds"] = True + + return samples + diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py deleted file mode 100644 index db71a549..00000000 --- a/modules/sd_samplers_kdiffusion.py +++ /dev/null @@ -1,511 +0,0 @@ -from collections import deque -import torch -import inspect -import k_diffusion.sampling -from modules import prompt_parser, devices, sd_samplers_common, sd_samplers_extra - -from modules.processing import StableDiffusionProcessing -from modules.shared import opts, state -import modules.shared as shared -from modules.script_callbacks import CFGDenoiserParams, cfg_denoiser_callback -from modules.script_callbacks import CFGDenoisedParams, cfg_denoised_callback -from modules.script_callbacks import AfterCFGCallbackParams, cfg_after_cfg_callback - -samplers_k_diffusion = [ - ('Euler a', 'sample_euler_ancestral', ['k_euler_a', 'k_euler_ancestral'], {"uses_ensd": True}), - ('Euler', 'sample_euler', ['k_euler'], {}), - ('LMS', 'sample_lms', ['k_lms'], {}), - ('Heun', 'sample_heun', ['k_heun'], {"second_order": True}), - ('DPM2', 'sample_dpm_2', ['k_dpm_2'], {'discard_next_to_last_sigma': True}), - ('DPM2 a', 'sample_dpm_2_ancestral', ['k_dpm_2_a'], {'discard_next_to_last_sigma': True, "uses_ensd": True}), - ('DPM++ 2S a', 'sample_dpmpp_2s_ancestral', ['k_dpmpp_2s_a'], {"uses_ensd": True, "second_order": True}), - ('DPM++ 2M', 'sample_dpmpp_2m', ['k_dpmpp_2m'], {}), - ('DPM++ SDE', 'sample_dpmpp_sde', ['k_dpmpp_sde'], {"second_order": True, "brownian_noise": True}), - ('DPM++ 2M SDE', 'sample_dpmpp_2m_sde', ['k_dpmpp_2m_sde_ka'], {"brownian_noise": True}), - ('DPM fast', 'sample_dpm_fast', ['k_dpm_fast'], {"uses_ensd": True}), - ('DPM adaptive', 'sample_dpm_adaptive', ['k_dpm_ad'], {"uses_ensd": True}), - ('LMS Karras', 'sample_lms', ['k_lms_ka'], {'scheduler': 'karras'}), - ('DPM2 Karras', 'sample_dpm_2', ['k_dpm_2_ka'], {'scheduler': 'karras', 'discard_next_to_last_sigma': True, "uses_ensd": True, "second_order": True}), - ('DPM2 a Karras', 'sample_dpm_2_ancestral', ['k_dpm_2_a_ka'], {'scheduler': 'karras', 'discard_next_to_last_sigma': True, "uses_ensd": True, "second_order": True}), - ('DPM++ 2S a Karras', 'sample_dpmpp_2s_ancestral', ['k_dpmpp_2s_a_ka'], {'scheduler': 'karras', "uses_ensd": True, "second_order": True}), - ('DPM++ 2M Karras', 'sample_dpmpp_2m', ['k_dpmpp_2m_ka'], {'scheduler': 'karras'}), - ('DPM++ SDE Karras', 'sample_dpmpp_sde', ['k_dpmpp_sde_ka'], {'scheduler': 'karras', "second_order": True, "brownian_noise": True}), - ('DPM++ 2M SDE Karras', 'sample_dpmpp_2m_sde', ['k_dpmpp_2m_sde_ka'], {'scheduler': 'karras', "brownian_noise": True}), - ('DPM++ 2M SDE Exponential', 'sample_dpmpp_2m_sde', ['k_dpmpp_2m_sde_exp'], {'scheduler': 'exponential', "brownian_noise": True}), - ('Restart', sd_samplers_extra.restart_sampler, ['restart'], {'scheduler': 'karras'}), -] - - -samplers_data_k_diffusion = [ - sd_samplers_common.SamplerData(label, lambda model, funcname=funcname: KDiffusionSampler(funcname, model), aliases, options) - for label, funcname, aliases, options in samplers_k_diffusion - if callable(funcname) or hasattr(k_diffusion.sampling, funcname) -] - -sampler_extra_params = { - 'sample_euler': ['s_churn', 's_tmin', 's_tmax', 's_noise'], - 'sample_heun': ['s_churn', 's_tmin', 's_tmax', 's_noise'], - 'sample_dpm_2': ['s_churn', 's_tmin', 's_tmax', 's_noise'], -} - -k_diffusion_samplers_map = {x.name: x for x in samplers_data_k_diffusion} -k_diffusion_scheduler = { - 'Automatic': None, - 'karras': k_diffusion.sampling.get_sigmas_karras, - 'exponential': k_diffusion.sampling.get_sigmas_exponential, - 'polyexponential': k_diffusion.sampling.get_sigmas_polyexponential -} - - -def catenate_conds(conds): - if not isinstance(conds[0], dict): - return torch.cat(conds) - - return {key: torch.cat([x[key] for x in conds]) for key in conds[0].keys()} - - -def subscript_cond(cond, a, b): - if not isinstance(cond, dict): - return cond[a:b] - - return {key: vec[a:b] for key, vec in cond.items()} - - -def pad_cond(tensor, repeats, empty): - if not isinstance(tensor, dict): - return torch.cat([tensor, empty.repeat((tensor.shape[0], repeats, 1))], axis=1) - - tensor['crossattn'] = pad_cond(tensor['crossattn'], repeats, empty) - return tensor - - -class CFGDenoiser(torch.nn.Module): - """ - Classifier free guidance denoiser. A wrapper for stable diffusion model (specifically for unet) - that can take a noisy picture and produce a noise-free picture using two guidances (prompts) - instead of one. Originally, the second prompt is just an empty string, but we use non-empty - negative prompt. - """ - - def __init__(self, model): - super().__init__() - self.inner_model = model - self.mask = None - self.nmask = None - self.init_latent = None - self.step = 0 - self.image_cfg_scale = None - self.padded_cond_uncond = False - - def combine_denoised(self, x_out, conds_list, uncond, cond_scale): - denoised_uncond = x_out[-uncond.shape[0]:] - denoised = torch.clone(denoised_uncond) - - for i, conds in enumerate(conds_list): - for cond_index, weight in conds: - denoised[i] += (x_out[cond_index] - denoised_uncond[i]) * (weight * cond_scale) - - return denoised - - def combine_denoised_for_edit_model(self, x_out, cond_scale): - out_cond, out_img_cond, out_uncond = x_out.chunk(3) - denoised = out_uncond + cond_scale * (out_cond - out_img_cond) + self.image_cfg_scale * (out_img_cond - out_uncond) - - return denoised - - def forward(self, x, sigma, uncond, cond, cond_scale, s_min_uncond, image_cond): - if state.interrupted or state.skipped: - raise sd_samplers_common.InterruptedException - - # at self.image_cfg_scale == 1.0 produced results for edit model are the same as with normal sampling, - # so is_edit_model is set to False to support AND composition. - is_edit_model = shared.sd_model.cond_stage_key == "edit" and self.image_cfg_scale is not None and self.image_cfg_scale != 1.0 - - conds_list, tensor = prompt_parser.reconstruct_multicond_batch(cond, self.step) - uncond = prompt_parser.reconstruct_cond_batch(uncond, self.step) - - assert not is_edit_model or all(len(conds) == 1 for conds in conds_list), "AND is not supported for InstructPix2Pix checkpoint (unless using Image CFG scale = 1.0)" - - batch_size = len(conds_list) - repeats = [len(conds_list[i]) for i in range(batch_size)] - - if shared.sd_model.model.conditioning_key == "crossattn-adm": - image_uncond = torch.zeros_like(image_cond) - make_condition_dict = lambda c_crossattn, c_adm: {"c_crossattn": [c_crossattn], "c_adm": c_adm} - else: - image_uncond = image_cond - if isinstance(uncond, dict): - make_condition_dict = lambda c_crossattn, c_concat: {**c_crossattn, "c_concat": [c_concat]} - else: - make_condition_dict = lambda c_crossattn, c_concat: {"c_crossattn": [c_crossattn], "c_concat": [c_concat]} - - if not is_edit_model: - x_in = torch.cat([torch.stack([x[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [x]) - sigma_in = torch.cat([torch.stack([sigma[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [sigma]) - image_cond_in = torch.cat([torch.stack([image_cond[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [image_uncond]) - else: - x_in = torch.cat([torch.stack([x[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [x] + [x]) - sigma_in = torch.cat([torch.stack([sigma[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [sigma] + [sigma]) - image_cond_in = torch.cat([torch.stack([image_cond[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [image_uncond] + [torch.zeros_like(self.init_latent)]) - - denoiser_params = CFGDenoiserParams(x_in, image_cond_in, sigma_in, state.sampling_step, state.sampling_steps, tensor, uncond) - cfg_denoiser_callback(denoiser_params) - x_in = denoiser_params.x - image_cond_in = denoiser_params.image_cond - sigma_in = denoiser_params.sigma - tensor = denoiser_params.text_cond - uncond = denoiser_params.text_uncond - skip_uncond = False - - # alternating uncond allows for higher thresholds without the quality loss normally expected from raising it - if self.step % 2 and s_min_uncond > 0 and sigma[0] < s_min_uncond and not is_edit_model: - skip_uncond = True - x_in = x_in[:-batch_size] - sigma_in = sigma_in[:-batch_size] - - self.padded_cond_uncond = False - if shared.opts.pad_cond_uncond and tensor.shape[1] != uncond.shape[1]: - empty = shared.sd_model.cond_stage_model_empty_prompt - num_repeats = (tensor.shape[1] - uncond.shape[1]) // empty.shape[1] - - if num_repeats < 0: - tensor = pad_cond(tensor, -num_repeats, empty) - self.padded_cond_uncond = True - elif num_repeats > 0: - uncond = pad_cond(uncond, num_repeats, empty) - self.padded_cond_uncond = True - - if tensor.shape[1] == uncond.shape[1] or skip_uncond: - if is_edit_model: - cond_in = catenate_conds([tensor, uncond, uncond]) - elif skip_uncond: - cond_in = tensor - else: - cond_in = catenate_conds([tensor, uncond]) - - if shared.batch_cond_uncond: - x_out = self.inner_model(x_in, sigma_in, cond=make_condition_dict(cond_in, image_cond_in)) - else: - x_out = torch.zeros_like(x_in) - for batch_offset in range(0, x_out.shape[0], batch_size): - a = batch_offset - b = a + batch_size - x_out[a:b] = self.inner_model(x_in[a:b], sigma_in[a:b], cond=make_condition_dict(subscript_cond(cond_in, a, b), image_cond_in[a:b])) - else: - x_out = torch.zeros_like(x_in) - batch_size = batch_size*2 if shared.batch_cond_uncond else batch_size - for batch_offset in range(0, tensor.shape[0], batch_size): - a = batch_offset - b = min(a + batch_size, tensor.shape[0]) - - if not is_edit_model: - c_crossattn = subscript_cond(tensor, a, b) - else: - c_crossattn = torch.cat([tensor[a:b]], uncond) - - x_out[a:b] = self.inner_model(x_in[a:b], sigma_in[a:b], cond=make_condition_dict(c_crossattn, image_cond_in[a:b])) - - if not skip_uncond: - x_out[-uncond.shape[0]:] = self.inner_model(x_in[-uncond.shape[0]:], sigma_in[-uncond.shape[0]:], cond=make_condition_dict(uncond, image_cond_in[-uncond.shape[0]:])) - - denoised_image_indexes = [x[0][0] for x in conds_list] - if skip_uncond: - fake_uncond = torch.cat([x_out[i:i+1] for i in denoised_image_indexes]) - x_out = torch.cat([x_out, fake_uncond]) # we skipped uncond denoising, so we put cond-denoised image to where the uncond-denoised image should be - - denoised_params = CFGDenoisedParams(x_out, state.sampling_step, state.sampling_steps, self.inner_model) - cfg_denoised_callback(denoised_params) - - devices.test_for_nans(x_out, "unet") - - if opts.live_preview_content == "Prompt": - sd_samplers_common.store_latent(torch.cat([x_out[i:i+1] for i in denoised_image_indexes])) - elif opts.live_preview_content == "Negative prompt": - sd_samplers_common.store_latent(x_out[-uncond.shape[0]:]) - - if is_edit_model: - denoised = self.combine_denoised_for_edit_model(x_out, cond_scale) - elif skip_uncond: - denoised = self.combine_denoised(x_out, conds_list, uncond, 1.0) - else: - denoised = self.combine_denoised(x_out, conds_list, uncond, cond_scale) - - if self.mask is not None: - denoised = self.init_latent * self.mask + self.nmask * denoised - - after_cfg_callback_params = AfterCFGCallbackParams(denoised, state.sampling_step, state.sampling_steps) - cfg_after_cfg_callback(after_cfg_callback_params) - denoised = after_cfg_callback_params.x - - self.step += 1 - return denoised - - -class TorchHijack: - def __init__(self, sampler_noises): - # Using a deque to efficiently receive the sampler_noises in the same order as the previous index-based - # implementation. - self.sampler_noises = deque(sampler_noises) - - def __getattr__(self, item): - if item == 'randn_like': - return self.randn_like - - if hasattr(torch, item): - return getattr(torch, item) - - raise AttributeError(f"'{type(self).__name__}' object has no attribute '{item}'") - - def randn_like(self, x): - if self.sampler_noises: - noise = self.sampler_noises.popleft() - if noise.shape == x.shape: - return noise - - return devices.randn_like(x) - - -class KDiffusionSampler: - def __init__(self, funcname, sd_model): - denoiser = k_diffusion.external.CompVisVDenoiser if sd_model.parameterization == "v" else k_diffusion.external.CompVisDenoiser - - self.model_wrap = denoiser(sd_model, quantize=shared.opts.enable_quantization) - self.funcname = funcname - self.func = funcname if callable(funcname) else getattr(k_diffusion.sampling, self.funcname) - self.extra_params = sampler_extra_params.get(funcname, []) - self.model_wrap_cfg = CFGDenoiser(self.model_wrap) - self.sampler_noises = None - self.stop_at = None - self.eta = None - self.config = None # set by the function calling the constructor - self.last_latent = None - self.s_min_uncond = None - - # NOTE: These are also defined in the StableDiffusionProcessing class. - # They should have been here to begin with but we're going to - # leave that class __init__ signature alone. - self.s_churn = 0.0 - self.s_tmin = 0.0 - self.s_tmax = float('inf') - self.s_noise = 1.0 - - self.conditioning_key = sd_model.model.conditioning_key - - def callback_state(self, d): - step = d['i'] - latent = d["denoised"] - if opts.live_preview_content == "Combined": - sd_samplers_common.store_latent(latent) - self.last_latent = latent - - if self.stop_at is not None and step > self.stop_at: - raise sd_samplers_common.InterruptedException - - state.sampling_step = step - shared.total_tqdm.update() - - def launch_sampling(self, steps, func): - state.sampling_steps = steps - state.sampling_step = 0 - - try: - return func() - except RecursionError: - print( - 'Encountered RecursionError during sampling, returning last latent. ' - 'rho >5 with a polyexponential scheduler may cause this error. ' - 'You should try to use a smaller rho value instead.' - ) - return self.last_latent - except sd_samplers_common.InterruptedException: - return self.last_latent - - def number_of_needed_noises(self, p): - return p.steps - - def initialize(self, p: StableDiffusionProcessing): - self.model_wrap_cfg.mask = p.mask if hasattr(p, 'mask') else None - self.model_wrap_cfg.nmask = p.nmask if hasattr(p, 'nmask') else None - self.model_wrap_cfg.step = 0 - self.model_wrap_cfg.image_cfg_scale = getattr(p, 'image_cfg_scale', None) - self.eta = p.eta if p.eta is not None else opts.eta_ancestral - self.s_min_uncond = getattr(p, 's_min_uncond', 0.0) - - k_diffusion.sampling.torch = TorchHijack(self.sampler_noises if self.sampler_noises is not None else []) - - extra_params_kwargs = {} - for param_name in self.extra_params: - if hasattr(p, param_name) and param_name in inspect.signature(self.func).parameters: - extra_params_kwargs[param_name] = getattr(p, param_name) - - if 'eta' in inspect.signature(self.func).parameters: - if self.eta != 1.0: - p.extra_generation_params["Eta"] = self.eta - - extra_params_kwargs['eta'] = self.eta - - if len(self.extra_params) > 0: - s_churn = getattr(opts, 's_churn', p.s_churn) - s_tmin = getattr(opts, 's_tmin', p.s_tmin) - s_tmax = getattr(opts, 's_tmax', p.s_tmax) or self.s_tmax # 0 = inf - s_noise = getattr(opts, 's_noise', p.s_noise) - - if s_churn != self.s_churn: - extra_params_kwargs['s_churn'] = s_churn - p.s_churn = s_churn - p.extra_generation_params['Sigma churn'] = s_churn - if s_tmin != self.s_tmin: - extra_params_kwargs['s_tmin'] = s_tmin - p.s_tmin = s_tmin - p.extra_generation_params['Sigma tmin'] = s_tmin - if s_tmax != self.s_tmax: - extra_params_kwargs['s_tmax'] = s_tmax - p.s_tmax = s_tmax - p.extra_generation_params['Sigma tmax'] = s_tmax - if s_noise != self.s_noise: - extra_params_kwargs['s_noise'] = s_noise - p.s_noise = s_noise - p.extra_generation_params['Sigma noise'] = s_noise - - return extra_params_kwargs - - def get_sigmas(self, p, steps): - discard_next_to_last_sigma = self.config is not None and self.config.options.get('discard_next_to_last_sigma', False) - if opts.always_discard_next_to_last_sigma and not discard_next_to_last_sigma: - discard_next_to_last_sigma = True - p.extra_generation_params["Discard penultimate sigma"] = True - - steps += 1 if discard_next_to_last_sigma else 0 - - if p.sampler_noise_scheduler_override: - sigmas = p.sampler_noise_scheduler_override(steps) - elif opts.k_sched_type != "Automatic": - m_sigma_min, m_sigma_max = (self.model_wrap.sigmas[0].item(), self.model_wrap.sigmas[-1].item()) - sigma_min, sigma_max = (0.1, 10) if opts.use_old_karras_scheduler_sigmas else (m_sigma_min, m_sigma_max) - sigmas_kwargs = { - 'sigma_min': sigma_min, - 'sigma_max': sigma_max, - } - - sigmas_func = k_diffusion_scheduler[opts.k_sched_type] - p.extra_generation_params["Schedule type"] = opts.k_sched_type - - if opts.sigma_min != m_sigma_min and opts.sigma_min != 0: - sigmas_kwargs['sigma_min'] = opts.sigma_min - p.extra_generation_params["Schedule min sigma"] = opts.sigma_min - if opts.sigma_max != m_sigma_max and opts.sigma_max != 0: - sigmas_kwargs['sigma_max'] = opts.sigma_max - p.extra_generation_params["Schedule max sigma"] = opts.sigma_max - - default_rho = 1. if opts.k_sched_type == "polyexponential" else 7. - - if opts.k_sched_type != 'exponential' and opts.rho != 0 and opts.rho != default_rho: - sigmas_kwargs['rho'] = opts.rho - p.extra_generation_params["Schedule rho"] = opts.rho - - sigmas = sigmas_func(n=steps, **sigmas_kwargs, device=shared.device) - elif self.config is not None and self.config.options.get('scheduler', None) == 'karras': - sigma_min, sigma_max = (0.1, 10) if opts.use_old_karras_scheduler_sigmas else (self.model_wrap.sigmas[0].item(), self.model_wrap.sigmas[-1].item()) - - sigmas = k_diffusion.sampling.get_sigmas_karras(n=steps, sigma_min=sigma_min, sigma_max=sigma_max, device=shared.device) - elif self.config is not None and self.config.options.get('scheduler', None) == 'exponential': - m_sigma_min, m_sigma_max = (self.model_wrap.sigmas[0].item(), self.model_wrap.sigmas[-1].item()) - sigmas = k_diffusion.sampling.get_sigmas_exponential(n=steps, sigma_min=m_sigma_min, sigma_max=m_sigma_max, device=shared.device) - else: - sigmas = self.model_wrap.get_sigmas(steps) - - if discard_next_to_last_sigma: - sigmas = torch.cat([sigmas[:-2], sigmas[-1:]]) - - return sigmas - - def create_noise_sampler(self, x, sigmas, p): - """For DPM++ SDE: manually create noise sampler to enable deterministic results across different batch sizes""" - if shared.opts.no_dpmpp_sde_batch_determinism: - return None - - from k_diffusion.sampling import BrownianTreeNoiseSampler - sigma_min, sigma_max = sigmas[sigmas > 0].min(), sigmas.max() - current_iter_seeds = p.all_seeds[p.iteration * p.batch_size:(p.iteration + 1) * p.batch_size] - return BrownianTreeNoiseSampler(x, sigma_min, sigma_max, seed=current_iter_seeds) - - def sample_img2img(self, p, x, noise, conditioning, unconditional_conditioning, steps=None, image_conditioning=None): - steps, t_enc = sd_samplers_common.setup_img2img_steps(p, steps) - - sigmas = self.get_sigmas(p, steps) - - sigma_sched = sigmas[steps - t_enc - 1:] - xi = x + noise * sigma_sched[0] - - extra_params_kwargs = self.initialize(p) - parameters = inspect.signature(self.func).parameters - - if 'sigma_min' in parameters: - ## last sigma is zero which isn't allowed by DPM Fast & Adaptive so taking value before last - extra_params_kwargs['sigma_min'] = sigma_sched[-2] - if 'sigma_max' in parameters: - extra_params_kwargs['sigma_max'] = sigma_sched[0] - if 'n' in parameters: - extra_params_kwargs['n'] = len(sigma_sched) - 1 - if 'sigma_sched' in parameters: - extra_params_kwargs['sigma_sched'] = sigma_sched - if 'sigmas' in parameters: - extra_params_kwargs['sigmas'] = sigma_sched - - if self.config.options.get('brownian_noise', False): - noise_sampler = self.create_noise_sampler(x, sigmas, p) - extra_params_kwargs['noise_sampler'] = noise_sampler - - self.model_wrap_cfg.init_latent = x - self.last_latent = x - extra_args = { - 'cond': conditioning, - 'image_cond': image_conditioning, - 'uncond': unconditional_conditioning, - 'cond_scale': p.cfg_scale, - 's_min_uncond': self.s_min_uncond - } - - samples = self.launch_sampling(t_enc + 1, lambda: self.func(self.model_wrap_cfg, xi, extra_args=extra_args, disable=False, callback=self.callback_state, **extra_params_kwargs)) - - if self.model_wrap_cfg.padded_cond_uncond: - p.extra_generation_params["Pad conds"] = True - - return samples - - def sample(self, p, x, conditioning, unconditional_conditioning, steps=None, image_conditioning=None): - steps = steps or p.steps - - sigmas = self.get_sigmas(p, steps) - - x = x * sigmas[0] - - extra_params_kwargs = self.initialize(p) - parameters = inspect.signature(self.func).parameters - - if 'sigma_min' in parameters: - extra_params_kwargs['sigma_min'] = self.model_wrap.sigmas[0].item() - extra_params_kwargs['sigma_max'] = self.model_wrap.sigmas[-1].item() - if 'n' in parameters: - extra_params_kwargs['n'] = steps - else: - extra_params_kwargs['sigmas'] = sigmas - - if self.config.options.get('brownian_noise', False): - noise_sampler = self.create_noise_sampler(x, sigmas, p) - extra_params_kwargs['noise_sampler'] = noise_sampler - - self.last_latent = x - samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, x, extra_args={ - 'cond': conditioning, - 'image_cond': image_conditioning, - 'uncond': unconditional_conditioning, - 'cond_scale': p.cfg_scale, - 's_min_uncond': self.s_min_uncond - }, disable=False, callback=self.callback_state, **extra_params_kwargs)) - - if self.model_wrap_cfg.padded_cond_uncond: - p.extra_generation_params["Pad conds"] = True - - return samples - -- cgit v1.2.1 From a3e27019e44e8f357181992e510f989ce59b992f Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Tue, 8 Aug 2023 18:32:17 +0300 Subject: Split history: mv modules/sd_samplers_kdiffusion.py temp --- modules/sd_samplers_kdiffusion.py | 511 -------------------------------------- temp | 511 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 511 insertions(+), 511 deletions(-) delete mode 100644 modules/sd_samplers_kdiffusion.py create mode 100644 temp diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py deleted file mode 100644 index db71a549..00000000 --- a/modules/sd_samplers_kdiffusion.py +++ /dev/null @@ -1,511 +0,0 @@ -from collections import deque -import torch -import inspect -import k_diffusion.sampling -from modules import prompt_parser, devices, sd_samplers_common, sd_samplers_extra - -from modules.processing import StableDiffusionProcessing -from modules.shared import opts, state -import modules.shared as shared -from modules.script_callbacks import CFGDenoiserParams, cfg_denoiser_callback -from modules.script_callbacks import CFGDenoisedParams, cfg_denoised_callback -from modules.script_callbacks import AfterCFGCallbackParams, cfg_after_cfg_callback - -samplers_k_diffusion = [ - ('Euler a', 'sample_euler_ancestral', ['k_euler_a', 'k_euler_ancestral'], {"uses_ensd": True}), - ('Euler', 'sample_euler', ['k_euler'], {}), - ('LMS', 'sample_lms', ['k_lms'], {}), - ('Heun', 'sample_heun', ['k_heun'], {"second_order": True}), - ('DPM2', 'sample_dpm_2', ['k_dpm_2'], {'discard_next_to_last_sigma': True}), - ('DPM2 a', 'sample_dpm_2_ancestral', ['k_dpm_2_a'], {'discard_next_to_last_sigma': True, "uses_ensd": True}), - ('DPM++ 2S a', 'sample_dpmpp_2s_ancestral', ['k_dpmpp_2s_a'], {"uses_ensd": True, "second_order": True}), - ('DPM++ 2M', 'sample_dpmpp_2m', ['k_dpmpp_2m'], {}), - ('DPM++ SDE', 'sample_dpmpp_sde', ['k_dpmpp_sde'], {"second_order": True, "brownian_noise": True}), - ('DPM++ 2M SDE', 'sample_dpmpp_2m_sde', ['k_dpmpp_2m_sde_ka'], {"brownian_noise": True}), - ('DPM fast', 'sample_dpm_fast', ['k_dpm_fast'], {"uses_ensd": True}), - ('DPM adaptive', 'sample_dpm_adaptive', ['k_dpm_ad'], {"uses_ensd": True}), - ('LMS Karras', 'sample_lms', ['k_lms_ka'], {'scheduler': 'karras'}), - ('DPM2 Karras', 'sample_dpm_2', ['k_dpm_2_ka'], {'scheduler': 'karras', 'discard_next_to_last_sigma': True, "uses_ensd": True, "second_order": True}), - ('DPM2 a Karras', 'sample_dpm_2_ancestral', ['k_dpm_2_a_ka'], {'scheduler': 'karras', 'discard_next_to_last_sigma': True, "uses_ensd": True, "second_order": True}), - ('DPM++ 2S a Karras', 'sample_dpmpp_2s_ancestral', ['k_dpmpp_2s_a_ka'], {'scheduler': 'karras', "uses_ensd": True, "second_order": True}), - ('DPM++ 2M Karras', 'sample_dpmpp_2m', ['k_dpmpp_2m_ka'], {'scheduler': 'karras'}), - ('DPM++ SDE Karras', 'sample_dpmpp_sde', ['k_dpmpp_sde_ka'], {'scheduler': 'karras', "second_order": True, "brownian_noise": True}), - ('DPM++ 2M SDE Karras', 'sample_dpmpp_2m_sde', ['k_dpmpp_2m_sde_ka'], {'scheduler': 'karras', "brownian_noise": True}), - ('DPM++ 2M SDE Exponential', 'sample_dpmpp_2m_sde', ['k_dpmpp_2m_sde_exp'], {'scheduler': 'exponential', "brownian_noise": True}), - ('Restart', sd_samplers_extra.restart_sampler, ['restart'], {'scheduler': 'karras'}), -] - - -samplers_data_k_diffusion = [ - sd_samplers_common.SamplerData(label, lambda model, funcname=funcname: KDiffusionSampler(funcname, model), aliases, options) - for label, funcname, aliases, options in samplers_k_diffusion - if callable(funcname) or hasattr(k_diffusion.sampling, funcname) -] - -sampler_extra_params = { - 'sample_euler': ['s_churn', 's_tmin', 's_tmax', 's_noise'], - 'sample_heun': ['s_churn', 's_tmin', 's_tmax', 's_noise'], - 'sample_dpm_2': ['s_churn', 's_tmin', 's_tmax', 's_noise'], -} - -k_diffusion_samplers_map = {x.name: x for x in samplers_data_k_diffusion} -k_diffusion_scheduler = { - 'Automatic': None, - 'karras': k_diffusion.sampling.get_sigmas_karras, - 'exponential': k_diffusion.sampling.get_sigmas_exponential, - 'polyexponential': k_diffusion.sampling.get_sigmas_polyexponential -} - - -def catenate_conds(conds): - if not isinstance(conds[0], dict): - return torch.cat(conds) - - return {key: torch.cat([x[key] for x in conds]) for key in conds[0].keys()} - - -def subscript_cond(cond, a, b): - if not isinstance(cond, dict): - return cond[a:b] - - return {key: vec[a:b] for key, vec in cond.items()} - - -def pad_cond(tensor, repeats, empty): - if not isinstance(tensor, dict): - return torch.cat([tensor, empty.repeat((tensor.shape[0], repeats, 1))], axis=1) - - tensor['crossattn'] = pad_cond(tensor['crossattn'], repeats, empty) - return tensor - - -class CFGDenoiser(torch.nn.Module): - """ - Classifier free guidance denoiser. A wrapper for stable diffusion model (specifically for unet) - that can take a noisy picture and produce a noise-free picture using two guidances (prompts) - instead of one. Originally, the second prompt is just an empty string, but we use non-empty - negative prompt. - """ - - def __init__(self, model): - super().__init__() - self.inner_model = model - self.mask = None - self.nmask = None - self.init_latent = None - self.step = 0 - self.image_cfg_scale = None - self.padded_cond_uncond = False - - def combine_denoised(self, x_out, conds_list, uncond, cond_scale): - denoised_uncond = x_out[-uncond.shape[0]:] - denoised = torch.clone(denoised_uncond) - - for i, conds in enumerate(conds_list): - for cond_index, weight in conds: - denoised[i] += (x_out[cond_index] - denoised_uncond[i]) * (weight * cond_scale) - - return denoised - - def combine_denoised_for_edit_model(self, x_out, cond_scale): - out_cond, out_img_cond, out_uncond = x_out.chunk(3) - denoised = out_uncond + cond_scale * (out_cond - out_img_cond) + self.image_cfg_scale * (out_img_cond - out_uncond) - - return denoised - - def forward(self, x, sigma, uncond, cond, cond_scale, s_min_uncond, image_cond): - if state.interrupted or state.skipped: - raise sd_samplers_common.InterruptedException - - # at self.image_cfg_scale == 1.0 produced results for edit model are the same as with normal sampling, - # so is_edit_model is set to False to support AND composition. - is_edit_model = shared.sd_model.cond_stage_key == "edit" and self.image_cfg_scale is not None and self.image_cfg_scale != 1.0 - - conds_list, tensor = prompt_parser.reconstruct_multicond_batch(cond, self.step) - uncond = prompt_parser.reconstruct_cond_batch(uncond, self.step) - - assert not is_edit_model or all(len(conds) == 1 for conds in conds_list), "AND is not supported for InstructPix2Pix checkpoint (unless using Image CFG scale = 1.0)" - - batch_size = len(conds_list) - repeats = [len(conds_list[i]) for i in range(batch_size)] - - if shared.sd_model.model.conditioning_key == "crossattn-adm": - image_uncond = torch.zeros_like(image_cond) - make_condition_dict = lambda c_crossattn, c_adm: {"c_crossattn": [c_crossattn], "c_adm": c_adm} - else: - image_uncond = image_cond - if isinstance(uncond, dict): - make_condition_dict = lambda c_crossattn, c_concat: {**c_crossattn, "c_concat": [c_concat]} - else: - make_condition_dict = lambda c_crossattn, c_concat: {"c_crossattn": [c_crossattn], "c_concat": [c_concat]} - - if not is_edit_model: - x_in = torch.cat([torch.stack([x[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [x]) - sigma_in = torch.cat([torch.stack([sigma[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [sigma]) - image_cond_in = torch.cat([torch.stack([image_cond[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [image_uncond]) - else: - x_in = torch.cat([torch.stack([x[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [x] + [x]) - sigma_in = torch.cat([torch.stack([sigma[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [sigma] + [sigma]) - image_cond_in = torch.cat([torch.stack([image_cond[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [image_uncond] + [torch.zeros_like(self.init_latent)]) - - denoiser_params = CFGDenoiserParams(x_in, image_cond_in, sigma_in, state.sampling_step, state.sampling_steps, tensor, uncond) - cfg_denoiser_callback(denoiser_params) - x_in = denoiser_params.x - image_cond_in = denoiser_params.image_cond - sigma_in = denoiser_params.sigma - tensor = denoiser_params.text_cond - uncond = denoiser_params.text_uncond - skip_uncond = False - - # alternating uncond allows for higher thresholds without the quality loss normally expected from raising it - if self.step % 2 and s_min_uncond > 0 and sigma[0] < s_min_uncond and not is_edit_model: - skip_uncond = True - x_in = x_in[:-batch_size] - sigma_in = sigma_in[:-batch_size] - - self.padded_cond_uncond = False - if shared.opts.pad_cond_uncond and tensor.shape[1] != uncond.shape[1]: - empty = shared.sd_model.cond_stage_model_empty_prompt - num_repeats = (tensor.shape[1] - uncond.shape[1]) // empty.shape[1] - - if num_repeats < 0: - tensor = pad_cond(tensor, -num_repeats, empty) - self.padded_cond_uncond = True - elif num_repeats > 0: - uncond = pad_cond(uncond, num_repeats, empty) - self.padded_cond_uncond = True - - if tensor.shape[1] == uncond.shape[1] or skip_uncond: - if is_edit_model: - cond_in = catenate_conds([tensor, uncond, uncond]) - elif skip_uncond: - cond_in = tensor - else: - cond_in = catenate_conds([tensor, uncond]) - - if shared.batch_cond_uncond: - x_out = self.inner_model(x_in, sigma_in, cond=make_condition_dict(cond_in, image_cond_in)) - else: - x_out = torch.zeros_like(x_in) - for batch_offset in range(0, x_out.shape[0], batch_size): - a = batch_offset - b = a + batch_size - x_out[a:b] = self.inner_model(x_in[a:b], sigma_in[a:b], cond=make_condition_dict(subscript_cond(cond_in, a, b), image_cond_in[a:b])) - else: - x_out = torch.zeros_like(x_in) - batch_size = batch_size*2 if shared.batch_cond_uncond else batch_size - for batch_offset in range(0, tensor.shape[0], batch_size): - a = batch_offset - b = min(a + batch_size, tensor.shape[0]) - - if not is_edit_model: - c_crossattn = subscript_cond(tensor, a, b) - else: - c_crossattn = torch.cat([tensor[a:b]], uncond) - - x_out[a:b] = self.inner_model(x_in[a:b], sigma_in[a:b], cond=make_condition_dict(c_crossattn, image_cond_in[a:b])) - - if not skip_uncond: - x_out[-uncond.shape[0]:] = self.inner_model(x_in[-uncond.shape[0]:], sigma_in[-uncond.shape[0]:], cond=make_condition_dict(uncond, image_cond_in[-uncond.shape[0]:])) - - denoised_image_indexes = [x[0][0] for x in conds_list] - if skip_uncond: - fake_uncond = torch.cat([x_out[i:i+1] for i in denoised_image_indexes]) - x_out = torch.cat([x_out, fake_uncond]) # we skipped uncond denoising, so we put cond-denoised image to where the uncond-denoised image should be - - denoised_params = CFGDenoisedParams(x_out, state.sampling_step, state.sampling_steps, self.inner_model) - cfg_denoised_callback(denoised_params) - - devices.test_for_nans(x_out, "unet") - - if opts.live_preview_content == "Prompt": - sd_samplers_common.store_latent(torch.cat([x_out[i:i+1] for i in denoised_image_indexes])) - elif opts.live_preview_content == "Negative prompt": - sd_samplers_common.store_latent(x_out[-uncond.shape[0]:]) - - if is_edit_model: - denoised = self.combine_denoised_for_edit_model(x_out, cond_scale) - elif skip_uncond: - denoised = self.combine_denoised(x_out, conds_list, uncond, 1.0) - else: - denoised = self.combine_denoised(x_out, conds_list, uncond, cond_scale) - - if self.mask is not None: - denoised = self.init_latent * self.mask + self.nmask * denoised - - after_cfg_callback_params = AfterCFGCallbackParams(denoised, state.sampling_step, state.sampling_steps) - cfg_after_cfg_callback(after_cfg_callback_params) - denoised = after_cfg_callback_params.x - - self.step += 1 - return denoised - - -class TorchHijack: - def __init__(self, sampler_noises): - # Using a deque to efficiently receive the sampler_noises in the same order as the previous index-based - # implementation. - self.sampler_noises = deque(sampler_noises) - - def __getattr__(self, item): - if item == 'randn_like': - return self.randn_like - - if hasattr(torch, item): - return getattr(torch, item) - - raise AttributeError(f"'{type(self).__name__}' object has no attribute '{item}'") - - def randn_like(self, x): - if self.sampler_noises: - noise = self.sampler_noises.popleft() - if noise.shape == x.shape: - return noise - - return devices.randn_like(x) - - -class KDiffusionSampler: - def __init__(self, funcname, sd_model): - denoiser = k_diffusion.external.CompVisVDenoiser if sd_model.parameterization == "v" else k_diffusion.external.CompVisDenoiser - - self.model_wrap = denoiser(sd_model, quantize=shared.opts.enable_quantization) - self.funcname = funcname - self.func = funcname if callable(funcname) else getattr(k_diffusion.sampling, self.funcname) - self.extra_params = sampler_extra_params.get(funcname, []) - self.model_wrap_cfg = CFGDenoiser(self.model_wrap) - self.sampler_noises = None - self.stop_at = None - self.eta = None - self.config = None # set by the function calling the constructor - self.last_latent = None - self.s_min_uncond = None - - # NOTE: These are also defined in the StableDiffusionProcessing class. - # They should have been here to begin with but we're going to - # leave that class __init__ signature alone. - self.s_churn = 0.0 - self.s_tmin = 0.0 - self.s_tmax = float('inf') - self.s_noise = 1.0 - - self.conditioning_key = sd_model.model.conditioning_key - - def callback_state(self, d): - step = d['i'] - latent = d["denoised"] - if opts.live_preview_content == "Combined": - sd_samplers_common.store_latent(latent) - self.last_latent = latent - - if self.stop_at is not None and step > self.stop_at: - raise sd_samplers_common.InterruptedException - - state.sampling_step = step - shared.total_tqdm.update() - - def launch_sampling(self, steps, func): - state.sampling_steps = steps - state.sampling_step = 0 - - try: - return func() - except RecursionError: - print( - 'Encountered RecursionError during sampling, returning last latent. ' - 'rho >5 with a polyexponential scheduler may cause this error. ' - 'You should try to use a smaller rho value instead.' - ) - return self.last_latent - except sd_samplers_common.InterruptedException: - return self.last_latent - - def number_of_needed_noises(self, p): - return p.steps - - def initialize(self, p: StableDiffusionProcessing): - self.model_wrap_cfg.mask = p.mask if hasattr(p, 'mask') else None - self.model_wrap_cfg.nmask = p.nmask if hasattr(p, 'nmask') else None - self.model_wrap_cfg.step = 0 - self.model_wrap_cfg.image_cfg_scale = getattr(p, 'image_cfg_scale', None) - self.eta = p.eta if p.eta is not None else opts.eta_ancestral - self.s_min_uncond = getattr(p, 's_min_uncond', 0.0) - - k_diffusion.sampling.torch = TorchHijack(self.sampler_noises if self.sampler_noises is not None else []) - - extra_params_kwargs = {} - for param_name in self.extra_params: - if hasattr(p, param_name) and param_name in inspect.signature(self.func).parameters: - extra_params_kwargs[param_name] = getattr(p, param_name) - - if 'eta' in inspect.signature(self.func).parameters: - if self.eta != 1.0: - p.extra_generation_params["Eta"] = self.eta - - extra_params_kwargs['eta'] = self.eta - - if len(self.extra_params) > 0: - s_churn = getattr(opts, 's_churn', p.s_churn) - s_tmin = getattr(opts, 's_tmin', p.s_tmin) - s_tmax = getattr(opts, 's_tmax', p.s_tmax) or self.s_tmax # 0 = inf - s_noise = getattr(opts, 's_noise', p.s_noise) - - if s_churn != self.s_churn: - extra_params_kwargs['s_churn'] = s_churn - p.s_churn = s_churn - p.extra_generation_params['Sigma churn'] = s_churn - if s_tmin != self.s_tmin: - extra_params_kwargs['s_tmin'] = s_tmin - p.s_tmin = s_tmin - p.extra_generation_params['Sigma tmin'] = s_tmin - if s_tmax != self.s_tmax: - extra_params_kwargs['s_tmax'] = s_tmax - p.s_tmax = s_tmax - p.extra_generation_params['Sigma tmax'] = s_tmax - if s_noise != self.s_noise: - extra_params_kwargs['s_noise'] = s_noise - p.s_noise = s_noise - p.extra_generation_params['Sigma noise'] = s_noise - - return extra_params_kwargs - - def get_sigmas(self, p, steps): - discard_next_to_last_sigma = self.config is not None and self.config.options.get('discard_next_to_last_sigma', False) - if opts.always_discard_next_to_last_sigma and not discard_next_to_last_sigma: - discard_next_to_last_sigma = True - p.extra_generation_params["Discard penultimate sigma"] = True - - steps += 1 if discard_next_to_last_sigma else 0 - - if p.sampler_noise_scheduler_override: - sigmas = p.sampler_noise_scheduler_override(steps) - elif opts.k_sched_type != "Automatic": - m_sigma_min, m_sigma_max = (self.model_wrap.sigmas[0].item(), self.model_wrap.sigmas[-1].item()) - sigma_min, sigma_max = (0.1, 10) if opts.use_old_karras_scheduler_sigmas else (m_sigma_min, m_sigma_max) - sigmas_kwargs = { - 'sigma_min': sigma_min, - 'sigma_max': sigma_max, - } - - sigmas_func = k_diffusion_scheduler[opts.k_sched_type] - p.extra_generation_params["Schedule type"] = opts.k_sched_type - - if opts.sigma_min != m_sigma_min and opts.sigma_min != 0: - sigmas_kwargs['sigma_min'] = opts.sigma_min - p.extra_generation_params["Schedule min sigma"] = opts.sigma_min - if opts.sigma_max != m_sigma_max and opts.sigma_max != 0: - sigmas_kwargs['sigma_max'] = opts.sigma_max - p.extra_generation_params["Schedule max sigma"] = opts.sigma_max - - default_rho = 1. if opts.k_sched_type == "polyexponential" else 7. - - if opts.k_sched_type != 'exponential' and opts.rho != 0 and opts.rho != default_rho: - sigmas_kwargs['rho'] = opts.rho - p.extra_generation_params["Schedule rho"] = opts.rho - - sigmas = sigmas_func(n=steps, **sigmas_kwargs, device=shared.device) - elif self.config is not None and self.config.options.get('scheduler', None) == 'karras': - sigma_min, sigma_max = (0.1, 10) if opts.use_old_karras_scheduler_sigmas else (self.model_wrap.sigmas[0].item(), self.model_wrap.sigmas[-1].item()) - - sigmas = k_diffusion.sampling.get_sigmas_karras(n=steps, sigma_min=sigma_min, sigma_max=sigma_max, device=shared.device) - elif self.config is not None and self.config.options.get('scheduler', None) == 'exponential': - m_sigma_min, m_sigma_max = (self.model_wrap.sigmas[0].item(), self.model_wrap.sigmas[-1].item()) - sigmas = k_diffusion.sampling.get_sigmas_exponential(n=steps, sigma_min=m_sigma_min, sigma_max=m_sigma_max, device=shared.device) - else: - sigmas = self.model_wrap.get_sigmas(steps) - - if discard_next_to_last_sigma: - sigmas = torch.cat([sigmas[:-2], sigmas[-1:]]) - - return sigmas - - def create_noise_sampler(self, x, sigmas, p): - """For DPM++ SDE: manually create noise sampler to enable deterministic results across different batch sizes""" - if shared.opts.no_dpmpp_sde_batch_determinism: - return None - - from k_diffusion.sampling import BrownianTreeNoiseSampler - sigma_min, sigma_max = sigmas[sigmas > 0].min(), sigmas.max() - current_iter_seeds = p.all_seeds[p.iteration * p.batch_size:(p.iteration + 1) * p.batch_size] - return BrownianTreeNoiseSampler(x, sigma_min, sigma_max, seed=current_iter_seeds) - - def sample_img2img(self, p, x, noise, conditioning, unconditional_conditioning, steps=None, image_conditioning=None): - steps, t_enc = sd_samplers_common.setup_img2img_steps(p, steps) - - sigmas = self.get_sigmas(p, steps) - - sigma_sched = sigmas[steps - t_enc - 1:] - xi = x + noise * sigma_sched[0] - - extra_params_kwargs = self.initialize(p) - parameters = inspect.signature(self.func).parameters - - if 'sigma_min' in parameters: - ## last sigma is zero which isn't allowed by DPM Fast & Adaptive so taking value before last - extra_params_kwargs['sigma_min'] = sigma_sched[-2] - if 'sigma_max' in parameters: - extra_params_kwargs['sigma_max'] = sigma_sched[0] - if 'n' in parameters: - extra_params_kwargs['n'] = len(sigma_sched) - 1 - if 'sigma_sched' in parameters: - extra_params_kwargs['sigma_sched'] = sigma_sched - if 'sigmas' in parameters: - extra_params_kwargs['sigmas'] = sigma_sched - - if self.config.options.get('brownian_noise', False): - noise_sampler = self.create_noise_sampler(x, sigmas, p) - extra_params_kwargs['noise_sampler'] = noise_sampler - - self.model_wrap_cfg.init_latent = x - self.last_latent = x - extra_args = { - 'cond': conditioning, - 'image_cond': image_conditioning, - 'uncond': unconditional_conditioning, - 'cond_scale': p.cfg_scale, - 's_min_uncond': self.s_min_uncond - } - - samples = self.launch_sampling(t_enc + 1, lambda: self.func(self.model_wrap_cfg, xi, extra_args=extra_args, disable=False, callback=self.callback_state, **extra_params_kwargs)) - - if self.model_wrap_cfg.padded_cond_uncond: - p.extra_generation_params["Pad conds"] = True - - return samples - - def sample(self, p, x, conditioning, unconditional_conditioning, steps=None, image_conditioning=None): - steps = steps or p.steps - - sigmas = self.get_sigmas(p, steps) - - x = x * sigmas[0] - - extra_params_kwargs = self.initialize(p) - parameters = inspect.signature(self.func).parameters - - if 'sigma_min' in parameters: - extra_params_kwargs['sigma_min'] = self.model_wrap.sigmas[0].item() - extra_params_kwargs['sigma_max'] = self.model_wrap.sigmas[-1].item() - if 'n' in parameters: - extra_params_kwargs['n'] = steps - else: - extra_params_kwargs['sigmas'] = sigmas - - if self.config.options.get('brownian_noise', False): - noise_sampler = self.create_noise_sampler(x, sigmas, p) - extra_params_kwargs['noise_sampler'] = noise_sampler - - self.last_latent = x - samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, x, extra_args={ - 'cond': conditioning, - 'image_cond': image_conditioning, - 'uncond': unconditional_conditioning, - 'cond_scale': p.cfg_scale, - 's_min_uncond': self.s_min_uncond - }, disable=False, callback=self.callback_state, **extra_params_kwargs)) - - if self.model_wrap_cfg.padded_cond_uncond: - p.extra_generation_params["Pad conds"] = True - - return samples - diff --git a/temp b/temp new file mode 100644 index 00000000..db71a549 --- /dev/null +++ b/temp @@ -0,0 +1,511 @@ +from collections import deque +import torch +import inspect +import k_diffusion.sampling +from modules import prompt_parser, devices, sd_samplers_common, sd_samplers_extra + +from modules.processing import StableDiffusionProcessing +from modules.shared import opts, state +import modules.shared as shared +from modules.script_callbacks import CFGDenoiserParams, cfg_denoiser_callback +from modules.script_callbacks import CFGDenoisedParams, cfg_denoised_callback +from modules.script_callbacks import AfterCFGCallbackParams, cfg_after_cfg_callback + +samplers_k_diffusion = [ + ('Euler a', 'sample_euler_ancestral', ['k_euler_a', 'k_euler_ancestral'], {"uses_ensd": True}), + ('Euler', 'sample_euler', ['k_euler'], {}), + ('LMS', 'sample_lms', ['k_lms'], {}), + ('Heun', 'sample_heun', ['k_heun'], {"second_order": True}), + ('DPM2', 'sample_dpm_2', ['k_dpm_2'], {'discard_next_to_last_sigma': True}), + ('DPM2 a', 'sample_dpm_2_ancestral', ['k_dpm_2_a'], {'discard_next_to_last_sigma': True, "uses_ensd": True}), + ('DPM++ 2S a', 'sample_dpmpp_2s_ancestral', ['k_dpmpp_2s_a'], {"uses_ensd": True, "second_order": True}), + ('DPM++ 2M', 'sample_dpmpp_2m', ['k_dpmpp_2m'], {}), + ('DPM++ SDE', 'sample_dpmpp_sde', ['k_dpmpp_sde'], {"second_order": True, "brownian_noise": True}), + ('DPM++ 2M SDE', 'sample_dpmpp_2m_sde', ['k_dpmpp_2m_sde_ka'], {"brownian_noise": True}), + ('DPM fast', 'sample_dpm_fast', ['k_dpm_fast'], {"uses_ensd": True}), + ('DPM adaptive', 'sample_dpm_adaptive', ['k_dpm_ad'], {"uses_ensd": True}), + ('LMS Karras', 'sample_lms', ['k_lms_ka'], {'scheduler': 'karras'}), + ('DPM2 Karras', 'sample_dpm_2', ['k_dpm_2_ka'], {'scheduler': 'karras', 'discard_next_to_last_sigma': True, "uses_ensd": True, "second_order": True}), + ('DPM2 a Karras', 'sample_dpm_2_ancestral', ['k_dpm_2_a_ka'], {'scheduler': 'karras', 'discard_next_to_last_sigma': True, "uses_ensd": True, "second_order": True}), + ('DPM++ 2S a Karras', 'sample_dpmpp_2s_ancestral', ['k_dpmpp_2s_a_ka'], {'scheduler': 'karras', "uses_ensd": True, "second_order": True}), + ('DPM++ 2M Karras', 'sample_dpmpp_2m', ['k_dpmpp_2m_ka'], {'scheduler': 'karras'}), + ('DPM++ SDE Karras', 'sample_dpmpp_sde', ['k_dpmpp_sde_ka'], {'scheduler': 'karras', "second_order": True, "brownian_noise": True}), + ('DPM++ 2M SDE Karras', 'sample_dpmpp_2m_sde', ['k_dpmpp_2m_sde_ka'], {'scheduler': 'karras', "brownian_noise": True}), + ('DPM++ 2M SDE Exponential', 'sample_dpmpp_2m_sde', ['k_dpmpp_2m_sde_exp'], {'scheduler': 'exponential', "brownian_noise": True}), + ('Restart', sd_samplers_extra.restart_sampler, ['restart'], {'scheduler': 'karras'}), +] + + +samplers_data_k_diffusion = [ + sd_samplers_common.SamplerData(label, lambda model, funcname=funcname: KDiffusionSampler(funcname, model), aliases, options) + for label, funcname, aliases, options in samplers_k_diffusion + if callable(funcname) or hasattr(k_diffusion.sampling, funcname) +] + +sampler_extra_params = { + 'sample_euler': ['s_churn', 's_tmin', 's_tmax', 's_noise'], + 'sample_heun': ['s_churn', 's_tmin', 's_tmax', 's_noise'], + 'sample_dpm_2': ['s_churn', 's_tmin', 's_tmax', 's_noise'], +} + +k_diffusion_samplers_map = {x.name: x for x in samplers_data_k_diffusion} +k_diffusion_scheduler = { + 'Automatic': None, + 'karras': k_diffusion.sampling.get_sigmas_karras, + 'exponential': k_diffusion.sampling.get_sigmas_exponential, + 'polyexponential': k_diffusion.sampling.get_sigmas_polyexponential +} + + +def catenate_conds(conds): + if not isinstance(conds[0], dict): + return torch.cat(conds) + + return {key: torch.cat([x[key] for x in conds]) for key in conds[0].keys()} + + +def subscript_cond(cond, a, b): + if not isinstance(cond, dict): + return cond[a:b] + + return {key: vec[a:b] for key, vec in cond.items()} + + +def pad_cond(tensor, repeats, empty): + if not isinstance(tensor, dict): + return torch.cat([tensor, empty.repeat((tensor.shape[0], repeats, 1))], axis=1) + + tensor['crossattn'] = pad_cond(tensor['crossattn'], repeats, empty) + return tensor + + +class CFGDenoiser(torch.nn.Module): + """ + Classifier free guidance denoiser. A wrapper for stable diffusion model (specifically for unet) + that can take a noisy picture and produce a noise-free picture using two guidances (prompts) + instead of one. Originally, the second prompt is just an empty string, but we use non-empty + negative prompt. + """ + + def __init__(self, model): + super().__init__() + self.inner_model = model + self.mask = None + self.nmask = None + self.init_latent = None + self.step = 0 + self.image_cfg_scale = None + self.padded_cond_uncond = False + + def combine_denoised(self, x_out, conds_list, uncond, cond_scale): + denoised_uncond = x_out[-uncond.shape[0]:] + denoised = torch.clone(denoised_uncond) + + for i, conds in enumerate(conds_list): + for cond_index, weight in conds: + denoised[i] += (x_out[cond_index] - denoised_uncond[i]) * (weight * cond_scale) + + return denoised + + def combine_denoised_for_edit_model(self, x_out, cond_scale): + out_cond, out_img_cond, out_uncond = x_out.chunk(3) + denoised = out_uncond + cond_scale * (out_cond - out_img_cond) + self.image_cfg_scale * (out_img_cond - out_uncond) + + return denoised + + def forward(self, x, sigma, uncond, cond, cond_scale, s_min_uncond, image_cond): + if state.interrupted or state.skipped: + raise sd_samplers_common.InterruptedException + + # at self.image_cfg_scale == 1.0 produced results for edit model are the same as with normal sampling, + # so is_edit_model is set to False to support AND composition. + is_edit_model = shared.sd_model.cond_stage_key == "edit" and self.image_cfg_scale is not None and self.image_cfg_scale != 1.0 + + conds_list, tensor = prompt_parser.reconstruct_multicond_batch(cond, self.step) + uncond = prompt_parser.reconstruct_cond_batch(uncond, self.step) + + assert not is_edit_model or all(len(conds) == 1 for conds in conds_list), "AND is not supported for InstructPix2Pix checkpoint (unless using Image CFG scale = 1.0)" + + batch_size = len(conds_list) + repeats = [len(conds_list[i]) for i in range(batch_size)] + + if shared.sd_model.model.conditioning_key == "crossattn-adm": + image_uncond = torch.zeros_like(image_cond) + make_condition_dict = lambda c_crossattn, c_adm: {"c_crossattn": [c_crossattn], "c_adm": c_adm} + else: + image_uncond = image_cond + if isinstance(uncond, dict): + make_condition_dict = lambda c_crossattn, c_concat: {**c_crossattn, "c_concat": [c_concat]} + else: + make_condition_dict = lambda c_crossattn, c_concat: {"c_crossattn": [c_crossattn], "c_concat": [c_concat]} + + if not is_edit_model: + x_in = torch.cat([torch.stack([x[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [x]) + sigma_in = torch.cat([torch.stack([sigma[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [sigma]) + image_cond_in = torch.cat([torch.stack([image_cond[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [image_uncond]) + else: + x_in = torch.cat([torch.stack([x[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [x] + [x]) + sigma_in = torch.cat([torch.stack([sigma[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [sigma] + [sigma]) + image_cond_in = torch.cat([torch.stack([image_cond[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [image_uncond] + [torch.zeros_like(self.init_latent)]) + + denoiser_params = CFGDenoiserParams(x_in, image_cond_in, sigma_in, state.sampling_step, state.sampling_steps, tensor, uncond) + cfg_denoiser_callback(denoiser_params) + x_in = denoiser_params.x + image_cond_in = denoiser_params.image_cond + sigma_in = denoiser_params.sigma + tensor = denoiser_params.text_cond + uncond = denoiser_params.text_uncond + skip_uncond = False + + # alternating uncond allows for higher thresholds without the quality loss normally expected from raising it + if self.step % 2 and s_min_uncond > 0 and sigma[0] < s_min_uncond and not is_edit_model: + skip_uncond = True + x_in = x_in[:-batch_size] + sigma_in = sigma_in[:-batch_size] + + self.padded_cond_uncond = False + if shared.opts.pad_cond_uncond and tensor.shape[1] != uncond.shape[1]: + empty = shared.sd_model.cond_stage_model_empty_prompt + num_repeats = (tensor.shape[1] - uncond.shape[1]) // empty.shape[1] + + if num_repeats < 0: + tensor = pad_cond(tensor, -num_repeats, empty) + self.padded_cond_uncond = True + elif num_repeats > 0: + uncond = pad_cond(uncond, num_repeats, empty) + self.padded_cond_uncond = True + + if tensor.shape[1] == uncond.shape[1] or skip_uncond: + if is_edit_model: + cond_in = catenate_conds([tensor, uncond, uncond]) + elif skip_uncond: + cond_in = tensor + else: + cond_in = catenate_conds([tensor, uncond]) + + if shared.batch_cond_uncond: + x_out = self.inner_model(x_in, sigma_in, cond=make_condition_dict(cond_in, image_cond_in)) + else: + x_out = torch.zeros_like(x_in) + for batch_offset in range(0, x_out.shape[0], batch_size): + a = batch_offset + b = a + batch_size + x_out[a:b] = self.inner_model(x_in[a:b], sigma_in[a:b], cond=make_condition_dict(subscript_cond(cond_in, a, b), image_cond_in[a:b])) + else: + x_out = torch.zeros_like(x_in) + batch_size = batch_size*2 if shared.batch_cond_uncond else batch_size + for batch_offset in range(0, tensor.shape[0], batch_size): + a = batch_offset + b = min(a + batch_size, tensor.shape[0]) + + if not is_edit_model: + c_crossattn = subscript_cond(tensor, a, b) + else: + c_crossattn = torch.cat([tensor[a:b]], uncond) + + x_out[a:b] = self.inner_model(x_in[a:b], sigma_in[a:b], cond=make_condition_dict(c_crossattn, image_cond_in[a:b])) + + if not skip_uncond: + x_out[-uncond.shape[0]:] = self.inner_model(x_in[-uncond.shape[0]:], sigma_in[-uncond.shape[0]:], cond=make_condition_dict(uncond, image_cond_in[-uncond.shape[0]:])) + + denoised_image_indexes = [x[0][0] for x in conds_list] + if skip_uncond: + fake_uncond = torch.cat([x_out[i:i+1] for i in denoised_image_indexes]) + x_out = torch.cat([x_out, fake_uncond]) # we skipped uncond denoising, so we put cond-denoised image to where the uncond-denoised image should be + + denoised_params = CFGDenoisedParams(x_out, state.sampling_step, state.sampling_steps, self.inner_model) + cfg_denoised_callback(denoised_params) + + devices.test_for_nans(x_out, "unet") + + if opts.live_preview_content == "Prompt": + sd_samplers_common.store_latent(torch.cat([x_out[i:i+1] for i in denoised_image_indexes])) + elif opts.live_preview_content == "Negative prompt": + sd_samplers_common.store_latent(x_out[-uncond.shape[0]:]) + + if is_edit_model: + denoised = self.combine_denoised_for_edit_model(x_out, cond_scale) + elif skip_uncond: + denoised = self.combine_denoised(x_out, conds_list, uncond, 1.0) + else: + denoised = self.combine_denoised(x_out, conds_list, uncond, cond_scale) + + if self.mask is not None: + denoised = self.init_latent * self.mask + self.nmask * denoised + + after_cfg_callback_params = AfterCFGCallbackParams(denoised, state.sampling_step, state.sampling_steps) + cfg_after_cfg_callback(after_cfg_callback_params) + denoised = after_cfg_callback_params.x + + self.step += 1 + return denoised + + +class TorchHijack: + def __init__(self, sampler_noises): + # Using a deque to efficiently receive the sampler_noises in the same order as the previous index-based + # implementation. + self.sampler_noises = deque(sampler_noises) + + def __getattr__(self, item): + if item == 'randn_like': + return self.randn_like + + if hasattr(torch, item): + return getattr(torch, item) + + raise AttributeError(f"'{type(self).__name__}' object has no attribute '{item}'") + + def randn_like(self, x): + if self.sampler_noises: + noise = self.sampler_noises.popleft() + if noise.shape == x.shape: + return noise + + return devices.randn_like(x) + + +class KDiffusionSampler: + def __init__(self, funcname, sd_model): + denoiser = k_diffusion.external.CompVisVDenoiser if sd_model.parameterization == "v" else k_diffusion.external.CompVisDenoiser + + self.model_wrap = denoiser(sd_model, quantize=shared.opts.enable_quantization) + self.funcname = funcname + self.func = funcname if callable(funcname) else getattr(k_diffusion.sampling, self.funcname) + self.extra_params = sampler_extra_params.get(funcname, []) + self.model_wrap_cfg = CFGDenoiser(self.model_wrap) + self.sampler_noises = None + self.stop_at = None + self.eta = None + self.config = None # set by the function calling the constructor + self.last_latent = None + self.s_min_uncond = None + + # NOTE: These are also defined in the StableDiffusionProcessing class. + # They should have been here to begin with but we're going to + # leave that class __init__ signature alone. + self.s_churn = 0.0 + self.s_tmin = 0.0 + self.s_tmax = float('inf') + self.s_noise = 1.0 + + self.conditioning_key = sd_model.model.conditioning_key + + def callback_state(self, d): + step = d['i'] + latent = d["denoised"] + if opts.live_preview_content == "Combined": + sd_samplers_common.store_latent(latent) + self.last_latent = latent + + if self.stop_at is not None and step > self.stop_at: + raise sd_samplers_common.InterruptedException + + state.sampling_step = step + shared.total_tqdm.update() + + def launch_sampling(self, steps, func): + state.sampling_steps = steps + state.sampling_step = 0 + + try: + return func() + except RecursionError: + print( + 'Encountered RecursionError during sampling, returning last latent. ' + 'rho >5 with a polyexponential scheduler may cause this error. ' + 'You should try to use a smaller rho value instead.' + ) + return self.last_latent + except sd_samplers_common.InterruptedException: + return self.last_latent + + def number_of_needed_noises(self, p): + return p.steps + + def initialize(self, p: StableDiffusionProcessing): + self.model_wrap_cfg.mask = p.mask if hasattr(p, 'mask') else None + self.model_wrap_cfg.nmask = p.nmask if hasattr(p, 'nmask') else None + self.model_wrap_cfg.step = 0 + self.model_wrap_cfg.image_cfg_scale = getattr(p, 'image_cfg_scale', None) + self.eta = p.eta if p.eta is not None else opts.eta_ancestral + self.s_min_uncond = getattr(p, 's_min_uncond', 0.0) + + k_diffusion.sampling.torch = TorchHijack(self.sampler_noises if self.sampler_noises is not None else []) + + extra_params_kwargs = {} + for param_name in self.extra_params: + if hasattr(p, param_name) and param_name in inspect.signature(self.func).parameters: + extra_params_kwargs[param_name] = getattr(p, param_name) + + if 'eta' in inspect.signature(self.func).parameters: + if self.eta != 1.0: + p.extra_generation_params["Eta"] = self.eta + + extra_params_kwargs['eta'] = self.eta + + if len(self.extra_params) > 0: + s_churn = getattr(opts, 's_churn', p.s_churn) + s_tmin = getattr(opts, 's_tmin', p.s_tmin) + s_tmax = getattr(opts, 's_tmax', p.s_tmax) or self.s_tmax # 0 = inf + s_noise = getattr(opts, 's_noise', p.s_noise) + + if s_churn != self.s_churn: + extra_params_kwargs['s_churn'] = s_churn + p.s_churn = s_churn + p.extra_generation_params['Sigma churn'] = s_churn + if s_tmin != self.s_tmin: + extra_params_kwargs['s_tmin'] = s_tmin + p.s_tmin = s_tmin + p.extra_generation_params['Sigma tmin'] = s_tmin + if s_tmax != self.s_tmax: + extra_params_kwargs['s_tmax'] = s_tmax + p.s_tmax = s_tmax + p.extra_generation_params['Sigma tmax'] = s_tmax + if s_noise != self.s_noise: + extra_params_kwargs['s_noise'] = s_noise + p.s_noise = s_noise + p.extra_generation_params['Sigma noise'] = s_noise + + return extra_params_kwargs + + def get_sigmas(self, p, steps): + discard_next_to_last_sigma = self.config is not None and self.config.options.get('discard_next_to_last_sigma', False) + if opts.always_discard_next_to_last_sigma and not discard_next_to_last_sigma: + discard_next_to_last_sigma = True + p.extra_generation_params["Discard penultimate sigma"] = True + + steps += 1 if discard_next_to_last_sigma else 0 + + if p.sampler_noise_scheduler_override: + sigmas = p.sampler_noise_scheduler_override(steps) + elif opts.k_sched_type != "Automatic": + m_sigma_min, m_sigma_max = (self.model_wrap.sigmas[0].item(), self.model_wrap.sigmas[-1].item()) + sigma_min, sigma_max = (0.1, 10) if opts.use_old_karras_scheduler_sigmas else (m_sigma_min, m_sigma_max) + sigmas_kwargs = { + 'sigma_min': sigma_min, + 'sigma_max': sigma_max, + } + + sigmas_func = k_diffusion_scheduler[opts.k_sched_type] + p.extra_generation_params["Schedule type"] = opts.k_sched_type + + if opts.sigma_min != m_sigma_min and opts.sigma_min != 0: + sigmas_kwargs['sigma_min'] = opts.sigma_min + p.extra_generation_params["Schedule min sigma"] = opts.sigma_min + if opts.sigma_max != m_sigma_max and opts.sigma_max != 0: + sigmas_kwargs['sigma_max'] = opts.sigma_max + p.extra_generation_params["Schedule max sigma"] = opts.sigma_max + + default_rho = 1. if opts.k_sched_type == "polyexponential" else 7. + + if opts.k_sched_type != 'exponential' and opts.rho != 0 and opts.rho != default_rho: + sigmas_kwargs['rho'] = opts.rho + p.extra_generation_params["Schedule rho"] = opts.rho + + sigmas = sigmas_func(n=steps, **sigmas_kwargs, device=shared.device) + elif self.config is not None and self.config.options.get('scheduler', None) == 'karras': + sigma_min, sigma_max = (0.1, 10) if opts.use_old_karras_scheduler_sigmas else (self.model_wrap.sigmas[0].item(), self.model_wrap.sigmas[-1].item()) + + sigmas = k_diffusion.sampling.get_sigmas_karras(n=steps, sigma_min=sigma_min, sigma_max=sigma_max, device=shared.device) + elif self.config is not None and self.config.options.get('scheduler', None) == 'exponential': + m_sigma_min, m_sigma_max = (self.model_wrap.sigmas[0].item(), self.model_wrap.sigmas[-1].item()) + sigmas = k_diffusion.sampling.get_sigmas_exponential(n=steps, sigma_min=m_sigma_min, sigma_max=m_sigma_max, device=shared.device) + else: + sigmas = self.model_wrap.get_sigmas(steps) + + if discard_next_to_last_sigma: + sigmas = torch.cat([sigmas[:-2], sigmas[-1:]]) + + return sigmas + + def create_noise_sampler(self, x, sigmas, p): + """For DPM++ SDE: manually create noise sampler to enable deterministic results across different batch sizes""" + if shared.opts.no_dpmpp_sde_batch_determinism: + return None + + from k_diffusion.sampling import BrownianTreeNoiseSampler + sigma_min, sigma_max = sigmas[sigmas > 0].min(), sigmas.max() + current_iter_seeds = p.all_seeds[p.iteration * p.batch_size:(p.iteration + 1) * p.batch_size] + return BrownianTreeNoiseSampler(x, sigma_min, sigma_max, seed=current_iter_seeds) + + def sample_img2img(self, p, x, noise, conditioning, unconditional_conditioning, steps=None, image_conditioning=None): + steps, t_enc = sd_samplers_common.setup_img2img_steps(p, steps) + + sigmas = self.get_sigmas(p, steps) + + sigma_sched = sigmas[steps - t_enc - 1:] + xi = x + noise * sigma_sched[0] + + extra_params_kwargs = self.initialize(p) + parameters = inspect.signature(self.func).parameters + + if 'sigma_min' in parameters: + ## last sigma is zero which isn't allowed by DPM Fast & Adaptive so taking value before last + extra_params_kwargs['sigma_min'] = sigma_sched[-2] + if 'sigma_max' in parameters: + extra_params_kwargs['sigma_max'] = sigma_sched[0] + if 'n' in parameters: + extra_params_kwargs['n'] = len(sigma_sched) - 1 + if 'sigma_sched' in parameters: + extra_params_kwargs['sigma_sched'] = sigma_sched + if 'sigmas' in parameters: + extra_params_kwargs['sigmas'] = sigma_sched + + if self.config.options.get('brownian_noise', False): + noise_sampler = self.create_noise_sampler(x, sigmas, p) + extra_params_kwargs['noise_sampler'] = noise_sampler + + self.model_wrap_cfg.init_latent = x + self.last_latent = x + extra_args = { + 'cond': conditioning, + 'image_cond': image_conditioning, + 'uncond': unconditional_conditioning, + 'cond_scale': p.cfg_scale, + 's_min_uncond': self.s_min_uncond + } + + samples = self.launch_sampling(t_enc + 1, lambda: self.func(self.model_wrap_cfg, xi, extra_args=extra_args, disable=False, callback=self.callback_state, **extra_params_kwargs)) + + if self.model_wrap_cfg.padded_cond_uncond: + p.extra_generation_params["Pad conds"] = True + + return samples + + def sample(self, p, x, conditioning, unconditional_conditioning, steps=None, image_conditioning=None): + steps = steps or p.steps + + sigmas = self.get_sigmas(p, steps) + + x = x * sigmas[0] + + extra_params_kwargs = self.initialize(p) + parameters = inspect.signature(self.func).parameters + + if 'sigma_min' in parameters: + extra_params_kwargs['sigma_min'] = self.model_wrap.sigmas[0].item() + extra_params_kwargs['sigma_max'] = self.model_wrap.sigmas[-1].item() + if 'n' in parameters: + extra_params_kwargs['n'] = steps + else: + extra_params_kwargs['sigmas'] = sigmas + + if self.config.options.get('brownian_noise', False): + noise_sampler = self.create_noise_sampler(x, sigmas, p) + extra_params_kwargs['noise_sampler'] = noise_sampler + + self.last_latent = x + samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, x, extra_args={ + 'cond': conditioning, + 'image_cond': image_conditioning, + 'uncond': unconditional_conditioning, + 'cond_scale': p.cfg_scale, + 's_min_uncond': self.s_min_uncond + }, disable=False, callback=self.callback_state, **extra_params_kwargs)) + + if self.model_wrap_cfg.padded_cond_uncond: + p.extra_generation_params["Pad conds"] = True + + return samples + -- cgit v1.2.1 From c721884cf5b9692c32461ffdecfc9121ca0d47b4 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Tue, 8 Aug 2023 18:32:18 +0300 Subject: Split history: mv temp modules/sd_samplers_kdiffusion.py --- modules/sd_samplers_kdiffusion.py | 511 ++++++++++++++++++++++++++++++++++++++ temp | 511 -------------------------------------- 2 files changed, 511 insertions(+), 511 deletions(-) create mode 100644 modules/sd_samplers_kdiffusion.py delete mode 100644 temp diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py new file mode 100644 index 00000000..db71a549 --- /dev/null +++ b/modules/sd_samplers_kdiffusion.py @@ -0,0 +1,511 @@ +from collections import deque +import torch +import inspect +import k_diffusion.sampling +from modules import prompt_parser, devices, sd_samplers_common, sd_samplers_extra + +from modules.processing import StableDiffusionProcessing +from modules.shared import opts, state +import modules.shared as shared +from modules.script_callbacks import CFGDenoiserParams, cfg_denoiser_callback +from modules.script_callbacks import CFGDenoisedParams, cfg_denoised_callback +from modules.script_callbacks import AfterCFGCallbackParams, cfg_after_cfg_callback + +samplers_k_diffusion = [ + ('Euler a', 'sample_euler_ancestral', ['k_euler_a', 'k_euler_ancestral'], {"uses_ensd": True}), + ('Euler', 'sample_euler', ['k_euler'], {}), + ('LMS', 'sample_lms', ['k_lms'], {}), + ('Heun', 'sample_heun', ['k_heun'], {"second_order": True}), + ('DPM2', 'sample_dpm_2', ['k_dpm_2'], {'discard_next_to_last_sigma': True}), + ('DPM2 a', 'sample_dpm_2_ancestral', ['k_dpm_2_a'], {'discard_next_to_last_sigma': True, "uses_ensd": True}), + ('DPM++ 2S a', 'sample_dpmpp_2s_ancestral', ['k_dpmpp_2s_a'], {"uses_ensd": True, "second_order": True}), + ('DPM++ 2M', 'sample_dpmpp_2m', ['k_dpmpp_2m'], {}), + ('DPM++ SDE', 'sample_dpmpp_sde', ['k_dpmpp_sde'], {"second_order": True, "brownian_noise": True}), + ('DPM++ 2M SDE', 'sample_dpmpp_2m_sde', ['k_dpmpp_2m_sde_ka'], {"brownian_noise": True}), + ('DPM fast', 'sample_dpm_fast', ['k_dpm_fast'], {"uses_ensd": True}), + ('DPM adaptive', 'sample_dpm_adaptive', ['k_dpm_ad'], {"uses_ensd": True}), + ('LMS Karras', 'sample_lms', ['k_lms_ka'], {'scheduler': 'karras'}), + ('DPM2 Karras', 'sample_dpm_2', ['k_dpm_2_ka'], {'scheduler': 'karras', 'discard_next_to_last_sigma': True, "uses_ensd": True, "second_order": True}), + ('DPM2 a Karras', 'sample_dpm_2_ancestral', ['k_dpm_2_a_ka'], {'scheduler': 'karras', 'discard_next_to_last_sigma': True, "uses_ensd": True, "second_order": True}), + ('DPM++ 2S a Karras', 'sample_dpmpp_2s_ancestral', ['k_dpmpp_2s_a_ka'], {'scheduler': 'karras', "uses_ensd": True, "second_order": True}), + ('DPM++ 2M Karras', 'sample_dpmpp_2m', ['k_dpmpp_2m_ka'], {'scheduler': 'karras'}), + ('DPM++ SDE Karras', 'sample_dpmpp_sde', ['k_dpmpp_sde_ka'], {'scheduler': 'karras', "second_order": True, "brownian_noise": True}), + ('DPM++ 2M SDE Karras', 'sample_dpmpp_2m_sde', ['k_dpmpp_2m_sde_ka'], {'scheduler': 'karras', "brownian_noise": True}), + ('DPM++ 2M SDE Exponential', 'sample_dpmpp_2m_sde', ['k_dpmpp_2m_sde_exp'], {'scheduler': 'exponential', "brownian_noise": True}), + ('Restart', sd_samplers_extra.restart_sampler, ['restart'], {'scheduler': 'karras'}), +] + + +samplers_data_k_diffusion = [ + sd_samplers_common.SamplerData(label, lambda model, funcname=funcname: KDiffusionSampler(funcname, model), aliases, options) + for label, funcname, aliases, options in samplers_k_diffusion + if callable(funcname) or hasattr(k_diffusion.sampling, funcname) +] + +sampler_extra_params = { + 'sample_euler': ['s_churn', 's_tmin', 's_tmax', 's_noise'], + 'sample_heun': ['s_churn', 's_tmin', 's_tmax', 's_noise'], + 'sample_dpm_2': ['s_churn', 's_tmin', 's_tmax', 's_noise'], +} + +k_diffusion_samplers_map = {x.name: x for x in samplers_data_k_diffusion} +k_diffusion_scheduler = { + 'Automatic': None, + 'karras': k_diffusion.sampling.get_sigmas_karras, + 'exponential': k_diffusion.sampling.get_sigmas_exponential, + 'polyexponential': k_diffusion.sampling.get_sigmas_polyexponential +} + + +def catenate_conds(conds): + if not isinstance(conds[0], dict): + return torch.cat(conds) + + return {key: torch.cat([x[key] for x in conds]) for key in conds[0].keys()} + + +def subscript_cond(cond, a, b): + if not isinstance(cond, dict): + return cond[a:b] + + return {key: vec[a:b] for key, vec in cond.items()} + + +def pad_cond(tensor, repeats, empty): + if not isinstance(tensor, dict): + return torch.cat([tensor, empty.repeat((tensor.shape[0], repeats, 1))], axis=1) + + tensor['crossattn'] = pad_cond(tensor['crossattn'], repeats, empty) + return tensor + + +class CFGDenoiser(torch.nn.Module): + """ + Classifier free guidance denoiser. A wrapper for stable diffusion model (specifically for unet) + that can take a noisy picture and produce a noise-free picture using two guidances (prompts) + instead of one. Originally, the second prompt is just an empty string, but we use non-empty + negative prompt. + """ + + def __init__(self, model): + super().__init__() + self.inner_model = model + self.mask = None + self.nmask = None + self.init_latent = None + self.step = 0 + self.image_cfg_scale = None + self.padded_cond_uncond = False + + def combine_denoised(self, x_out, conds_list, uncond, cond_scale): + denoised_uncond = x_out[-uncond.shape[0]:] + denoised = torch.clone(denoised_uncond) + + for i, conds in enumerate(conds_list): + for cond_index, weight in conds: + denoised[i] += (x_out[cond_index] - denoised_uncond[i]) * (weight * cond_scale) + + return denoised + + def combine_denoised_for_edit_model(self, x_out, cond_scale): + out_cond, out_img_cond, out_uncond = x_out.chunk(3) + denoised = out_uncond + cond_scale * (out_cond - out_img_cond) + self.image_cfg_scale * (out_img_cond - out_uncond) + + return denoised + + def forward(self, x, sigma, uncond, cond, cond_scale, s_min_uncond, image_cond): + if state.interrupted or state.skipped: + raise sd_samplers_common.InterruptedException + + # at self.image_cfg_scale == 1.0 produced results for edit model are the same as with normal sampling, + # so is_edit_model is set to False to support AND composition. + is_edit_model = shared.sd_model.cond_stage_key == "edit" and self.image_cfg_scale is not None and self.image_cfg_scale != 1.0 + + conds_list, tensor = prompt_parser.reconstruct_multicond_batch(cond, self.step) + uncond = prompt_parser.reconstruct_cond_batch(uncond, self.step) + + assert not is_edit_model or all(len(conds) == 1 for conds in conds_list), "AND is not supported for InstructPix2Pix checkpoint (unless using Image CFG scale = 1.0)" + + batch_size = len(conds_list) + repeats = [len(conds_list[i]) for i in range(batch_size)] + + if shared.sd_model.model.conditioning_key == "crossattn-adm": + image_uncond = torch.zeros_like(image_cond) + make_condition_dict = lambda c_crossattn, c_adm: {"c_crossattn": [c_crossattn], "c_adm": c_adm} + else: + image_uncond = image_cond + if isinstance(uncond, dict): + make_condition_dict = lambda c_crossattn, c_concat: {**c_crossattn, "c_concat": [c_concat]} + else: + make_condition_dict = lambda c_crossattn, c_concat: {"c_crossattn": [c_crossattn], "c_concat": [c_concat]} + + if not is_edit_model: + x_in = torch.cat([torch.stack([x[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [x]) + sigma_in = torch.cat([torch.stack([sigma[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [sigma]) + image_cond_in = torch.cat([torch.stack([image_cond[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [image_uncond]) + else: + x_in = torch.cat([torch.stack([x[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [x] + [x]) + sigma_in = torch.cat([torch.stack([sigma[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [sigma] + [sigma]) + image_cond_in = torch.cat([torch.stack([image_cond[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [image_uncond] + [torch.zeros_like(self.init_latent)]) + + denoiser_params = CFGDenoiserParams(x_in, image_cond_in, sigma_in, state.sampling_step, state.sampling_steps, tensor, uncond) + cfg_denoiser_callback(denoiser_params) + x_in = denoiser_params.x + image_cond_in = denoiser_params.image_cond + sigma_in = denoiser_params.sigma + tensor = denoiser_params.text_cond + uncond = denoiser_params.text_uncond + skip_uncond = False + + # alternating uncond allows for higher thresholds without the quality loss normally expected from raising it + if self.step % 2 and s_min_uncond > 0 and sigma[0] < s_min_uncond and not is_edit_model: + skip_uncond = True + x_in = x_in[:-batch_size] + sigma_in = sigma_in[:-batch_size] + + self.padded_cond_uncond = False + if shared.opts.pad_cond_uncond and tensor.shape[1] != uncond.shape[1]: + empty = shared.sd_model.cond_stage_model_empty_prompt + num_repeats = (tensor.shape[1] - uncond.shape[1]) // empty.shape[1] + + if num_repeats < 0: + tensor = pad_cond(tensor, -num_repeats, empty) + self.padded_cond_uncond = True + elif num_repeats > 0: + uncond = pad_cond(uncond, num_repeats, empty) + self.padded_cond_uncond = True + + if tensor.shape[1] == uncond.shape[1] or skip_uncond: + if is_edit_model: + cond_in = catenate_conds([tensor, uncond, uncond]) + elif skip_uncond: + cond_in = tensor + else: + cond_in = catenate_conds([tensor, uncond]) + + if shared.batch_cond_uncond: + x_out = self.inner_model(x_in, sigma_in, cond=make_condition_dict(cond_in, image_cond_in)) + else: + x_out = torch.zeros_like(x_in) + for batch_offset in range(0, x_out.shape[0], batch_size): + a = batch_offset + b = a + batch_size + x_out[a:b] = self.inner_model(x_in[a:b], sigma_in[a:b], cond=make_condition_dict(subscript_cond(cond_in, a, b), image_cond_in[a:b])) + else: + x_out = torch.zeros_like(x_in) + batch_size = batch_size*2 if shared.batch_cond_uncond else batch_size + for batch_offset in range(0, tensor.shape[0], batch_size): + a = batch_offset + b = min(a + batch_size, tensor.shape[0]) + + if not is_edit_model: + c_crossattn = subscript_cond(tensor, a, b) + else: + c_crossattn = torch.cat([tensor[a:b]], uncond) + + x_out[a:b] = self.inner_model(x_in[a:b], sigma_in[a:b], cond=make_condition_dict(c_crossattn, image_cond_in[a:b])) + + if not skip_uncond: + x_out[-uncond.shape[0]:] = self.inner_model(x_in[-uncond.shape[0]:], sigma_in[-uncond.shape[0]:], cond=make_condition_dict(uncond, image_cond_in[-uncond.shape[0]:])) + + denoised_image_indexes = [x[0][0] for x in conds_list] + if skip_uncond: + fake_uncond = torch.cat([x_out[i:i+1] for i in denoised_image_indexes]) + x_out = torch.cat([x_out, fake_uncond]) # we skipped uncond denoising, so we put cond-denoised image to where the uncond-denoised image should be + + denoised_params = CFGDenoisedParams(x_out, state.sampling_step, state.sampling_steps, self.inner_model) + cfg_denoised_callback(denoised_params) + + devices.test_for_nans(x_out, "unet") + + if opts.live_preview_content == "Prompt": + sd_samplers_common.store_latent(torch.cat([x_out[i:i+1] for i in denoised_image_indexes])) + elif opts.live_preview_content == "Negative prompt": + sd_samplers_common.store_latent(x_out[-uncond.shape[0]:]) + + if is_edit_model: + denoised = self.combine_denoised_for_edit_model(x_out, cond_scale) + elif skip_uncond: + denoised = self.combine_denoised(x_out, conds_list, uncond, 1.0) + else: + denoised = self.combine_denoised(x_out, conds_list, uncond, cond_scale) + + if self.mask is not None: + denoised = self.init_latent * self.mask + self.nmask * denoised + + after_cfg_callback_params = AfterCFGCallbackParams(denoised, state.sampling_step, state.sampling_steps) + cfg_after_cfg_callback(after_cfg_callback_params) + denoised = after_cfg_callback_params.x + + self.step += 1 + return denoised + + +class TorchHijack: + def __init__(self, sampler_noises): + # Using a deque to efficiently receive the sampler_noises in the same order as the previous index-based + # implementation. + self.sampler_noises = deque(sampler_noises) + + def __getattr__(self, item): + if item == 'randn_like': + return self.randn_like + + if hasattr(torch, item): + return getattr(torch, item) + + raise AttributeError(f"'{type(self).__name__}' object has no attribute '{item}'") + + def randn_like(self, x): + if self.sampler_noises: + noise = self.sampler_noises.popleft() + if noise.shape == x.shape: + return noise + + return devices.randn_like(x) + + +class KDiffusionSampler: + def __init__(self, funcname, sd_model): + denoiser = k_diffusion.external.CompVisVDenoiser if sd_model.parameterization == "v" else k_diffusion.external.CompVisDenoiser + + self.model_wrap = denoiser(sd_model, quantize=shared.opts.enable_quantization) + self.funcname = funcname + self.func = funcname if callable(funcname) else getattr(k_diffusion.sampling, self.funcname) + self.extra_params = sampler_extra_params.get(funcname, []) + self.model_wrap_cfg = CFGDenoiser(self.model_wrap) + self.sampler_noises = None + self.stop_at = None + self.eta = None + self.config = None # set by the function calling the constructor + self.last_latent = None + self.s_min_uncond = None + + # NOTE: These are also defined in the StableDiffusionProcessing class. + # They should have been here to begin with but we're going to + # leave that class __init__ signature alone. + self.s_churn = 0.0 + self.s_tmin = 0.0 + self.s_tmax = float('inf') + self.s_noise = 1.0 + + self.conditioning_key = sd_model.model.conditioning_key + + def callback_state(self, d): + step = d['i'] + latent = d["denoised"] + if opts.live_preview_content == "Combined": + sd_samplers_common.store_latent(latent) + self.last_latent = latent + + if self.stop_at is not None and step > self.stop_at: + raise sd_samplers_common.InterruptedException + + state.sampling_step = step + shared.total_tqdm.update() + + def launch_sampling(self, steps, func): + state.sampling_steps = steps + state.sampling_step = 0 + + try: + return func() + except RecursionError: + print( + 'Encountered RecursionError during sampling, returning last latent. ' + 'rho >5 with a polyexponential scheduler may cause this error. ' + 'You should try to use a smaller rho value instead.' + ) + return self.last_latent + except sd_samplers_common.InterruptedException: + return self.last_latent + + def number_of_needed_noises(self, p): + return p.steps + + def initialize(self, p: StableDiffusionProcessing): + self.model_wrap_cfg.mask = p.mask if hasattr(p, 'mask') else None + self.model_wrap_cfg.nmask = p.nmask if hasattr(p, 'nmask') else None + self.model_wrap_cfg.step = 0 + self.model_wrap_cfg.image_cfg_scale = getattr(p, 'image_cfg_scale', None) + self.eta = p.eta if p.eta is not None else opts.eta_ancestral + self.s_min_uncond = getattr(p, 's_min_uncond', 0.0) + + k_diffusion.sampling.torch = TorchHijack(self.sampler_noises if self.sampler_noises is not None else []) + + extra_params_kwargs = {} + for param_name in self.extra_params: + if hasattr(p, param_name) and param_name in inspect.signature(self.func).parameters: + extra_params_kwargs[param_name] = getattr(p, param_name) + + if 'eta' in inspect.signature(self.func).parameters: + if self.eta != 1.0: + p.extra_generation_params["Eta"] = self.eta + + extra_params_kwargs['eta'] = self.eta + + if len(self.extra_params) > 0: + s_churn = getattr(opts, 's_churn', p.s_churn) + s_tmin = getattr(opts, 's_tmin', p.s_tmin) + s_tmax = getattr(opts, 's_tmax', p.s_tmax) or self.s_tmax # 0 = inf + s_noise = getattr(opts, 's_noise', p.s_noise) + + if s_churn != self.s_churn: + extra_params_kwargs['s_churn'] = s_churn + p.s_churn = s_churn + p.extra_generation_params['Sigma churn'] = s_churn + if s_tmin != self.s_tmin: + extra_params_kwargs['s_tmin'] = s_tmin + p.s_tmin = s_tmin + p.extra_generation_params['Sigma tmin'] = s_tmin + if s_tmax != self.s_tmax: + extra_params_kwargs['s_tmax'] = s_tmax + p.s_tmax = s_tmax + p.extra_generation_params['Sigma tmax'] = s_tmax + if s_noise != self.s_noise: + extra_params_kwargs['s_noise'] = s_noise + p.s_noise = s_noise + p.extra_generation_params['Sigma noise'] = s_noise + + return extra_params_kwargs + + def get_sigmas(self, p, steps): + discard_next_to_last_sigma = self.config is not None and self.config.options.get('discard_next_to_last_sigma', False) + if opts.always_discard_next_to_last_sigma and not discard_next_to_last_sigma: + discard_next_to_last_sigma = True + p.extra_generation_params["Discard penultimate sigma"] = True + + steps += 1 if discard_next_to_last_sigma else 0 + + if p.sampler_noise_scheduler_override: + sigmas = p.sampler_noise_scheduler_override(steps) + elif opts.k_sched_type != "Automatic": + m_sigma_min, m_sigma_max = (self.model_wrap.sigmas[0].item(), self.model_wrap.sigmas[-1].item()) + sigma_min, sigma_max = (0.1, 10) if opts.use_old_karras_scheduler_sigmas else (m_sigma_min, m_sigma_max) + sigmas_kwargs = { + 'sigma_min': sigma_min, + 'sigma_max': sigma_max, + } + + sigmas_func = k_diffusion_scheduler[opts.k_sched_type] + p.extra_generation_params["Schedule type"] = opts.k_sched_type + + if opts.sigma_min != m_sigma_min and opts.sigma_min != 0: + sigmas_kwargs['sigma_min'] = opts.sigma_min + p.extra_generation_params["Schedule min sigma"] = opts.sigma_min + if opts.sigma_max != m_sigma_max and opts.sigma_max != 0: + sigmas_kwargs['sigma_max'] = opts.sigma_max + p.extra_generation_params["Schedule max sigma"] = opts.sigma_max + + default_rho = 1. if opts.k_sched_type == "polyexponential" else 7. + + if opts.k_sched_type != 'exponential' and opts.rho != 0 and opts.rho != default_rho: + sigmas_kwargs['rho'] = opts.rho + p.extra_generation_params["Schedule rho"] = opts.rho + + sigmas = sigmas_func(n=steps, **sigmas_kwargs, device=shared.device) + elif self.config is not None and self.config.options.get('scheduler', None) == 'karras': + sigma_min, sigma_max = (0.1, 10) if opts.use_old_karras_scheduler_sigmas else (self.model_wrap.sigmas[0].item(), self.model_wrap.sigmas[-1].item()) + + sigmas = k_diffusion.sampling.get_sigmas_karras(n=steps, sigma_min=sigma_min, sigma_max=sigma_max, device=shared.device) + elif self.config is not None and self.config.options.get('scheduler', None) == 'exponential': + m_sigma_min, m_sigma_max = (self.model_wrap.sigmas[0].item(), self.model_wrap.sigmas[-1].item()) + sigmas = k_diffusion.sampling.get_sigmas_exponential(n=steps, sigma_min=m_sigma_min, sigma_max=m_sigma_max, device=shared.device) + else: + sigmas = self.model_wrap.get_sigmas(steps) + + if discard_next_to_last_sigma: + sigmas = torch.cat([sigmas[:-2], sigmas[-1:]]) + + return sigmas + + def create_noise_sampler(self, x, sigmas, p): + """For DPM++ SDE: manually create noise sampler to enable deterministic results across different batch sizes""" + if shared.opts.no_dpmpp_sde_batch_determinism: + return None + + from k_diffusion.sampling import BrownianTreeNoiseSampler + sigma_min, sigma_max = sigmas[sigmas > 0].min(), sigmas.max() + current_iter_seeds = p.all_seeds[p.iteration * p.batch_size:(p.iteration + 1) * p.batch_size] + return BrownianTreeNoiseSampler(x, sigma_min, sigma_max, seed=current_iter_seeds) + + def sample_img2img(self, p, x, noise, conditioning, unconditional_conditioning, steps=None, image_conditioning=None): + steps, t_enc = sd_samplers_common.setup_img2img_steps(p, steps) + + sigmas = self.get_sigmas(p, steps) + + sigma_sched = sigmas[steps - t_enc - 1:] + xi = x + noise * sigma_sched[0] + + extra_params_kwargs = self.initialize(p) + parameters = inspect.signature(self.func).parameters + + if 'sigma_min' in parameters: + ## last sigma is zero which isn't allowed by DPM Fast & Adaptive so taking value before last + extra_params_kwargs['sigma_min'] = sigma_sched[-2] + if 'sigma_max' in parameters: + extra_params_kwargs['sigma_max'] = sigma_sched[0] + if 'n' in parameters: + extra_params_kwargs['n'] = len(sigma_sched) - 1 + if 'sigma_sched' in parameters: + extra_params_kwargs['sigma_sched'] = sigma_sched + if 'sigmas' in parameters: + extra_params_kwargs['sigmas'] = sigma_sched + + if self.config.options.get('brownian_noise', False): + noise_sampler = self.create_noise_sampler(x, sigmas, p) + extra_params_kwargs['noise_sampler'] = noise_sampler + + self.model_wrap_cfg.init_latent = x + self.last_latent = x + extra_args = { + 'cond': conditioning, + 'image_cond': image_conditioning, + 'uncond': unconditional_conditioning, + 'cond_scale': p.cfg_scale, + 's_min_uncond': self.s_min_uncond + } + + samples = self.launch_sampling(t_enc + 1, lambda: self.func(self.model_wrap_cfg, xi, extra_args=extra_args, disable=False, callback=self.callback_state, **extra_params_kwargs)) + + if self.model_wrap_cfg.padded_cond_uncond: + p.extra_generation_params["Pad conds"] = True + + return samples + + def sample(self, p, x, conditioning, unconditional_conditioning, steps=None, image_conditioning=None): + steps = steps or p.steps + + sigmas = self.get_sigmas(p, steps) + + x = x * sigmas[0] + + extra_params_kwargs = self.initialize(p) + parameters = inspect.signature(self.func).parameters + + if 'sigma_min' in parameters: + extra_params_kwargs['sigma_min'] = self.model_wrap.sigmas[0].item() + extra_params_kwargs['sigma_max'] = self.model_wrap.sigmas[-1].item() + if 'n' in parameters: + extra_params_kwargs['n'] = steps + else: + extra_params_kwargs['sigmas'] = sigmas + + if self.config.options.get('brownian_noise', False): + noise_sampler = self.create_noise_sampler(x, sigmas, p) + extra_params_kwargs['noise_sampler'] = noise_sampler + + self.last_latent = x + samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, x, extra_args={ + 'cond': conditioning, + 'image_cond': image_conditioning, + 'uncond': unconditional_conditioning, + 'cond_scale': p.cfg_scale, + 's_min_uncond': self.s_min_uncond + }, disable=False, callback=self.callback_state, **extra_params_kwargs)) + + if self.model_wrap_cfg.padded_cond_uncond: + p.extra_generation_params["Pad conds"] = True + + return samples + diff --git a/temp b/temp deleted file mode 100644 index db71a549..00000000 --- a/temp +++ /dev/null @@ -1,511 +0,0 @@ -from collections import deque -import torch -import inspect -import k_diffusion.sampling -from modules import prompt_parser, devices, sd_samplers_common, sd_samplers_extra - -from modules.processing import StableDiffusionProcessing -from modules.shared import opts, state -import modules.shared as shared -from modules.script_callbacks import CFGDenoiserParams, cfg_denoiser_callback -from modules.script_callbacks import CFGDenoisedParams, cfg_denoised_callback -from modules.script_callbacks import AfterCFGCallbackParams, cfg_after_cfg_callback - -samplers_k_diffusion = [ - ('Euler a', 'sample_euler_ancestral', ['k_euler_a', 'k_euler_ancestral'], {"uses_ensd": True}), - ('Euler', 'sample_euler', ['k_euler'], {}), - ('LMS', 'sample_lms', ['k_lms'], {}), - ('Heun', 'sample_heun', ['k_heun'], {"second_order": True}), - ('DPM2', 'sample_dpm_2', ['k_dpm_2'], {'discard_next_to_last_sigma': True}), - ('DPM2 a', 'sample_dpm_2_ancestral', ['k_dpm_2_a'], {'discard_next_to_last_sigma': True, "uses_ensd": True}), - ('DPM++ 2S a', 'sample_dpmpp_2s_ancestral', ['k_dpmpp_2s_a'], {"uses_ensd": True, "second_order": True}), - ('DPM++ 2M', 'sample_dpmpp_2m', ['k_dpmpp_2m'], {}), - ('DPM++ SDE', 'sample_dpmpp_sde', ['k_dpmpp_sde'], {"second_order": True, "brownian_noise": True}), - ('DPM++ 2M SDE', 'sample_dpmpp_2m_sde', ['k_dpmpp_2m_sde_ka'], {"brownian_noise": True}), - ('DPM fast', 'sample_dpm_fast', ['k_dpm_fast'], {"uses_ensd": True}), - ('DPM adaptive', 'sample_dpm_adaptive', ['k_dpm_ad'], {"uses_ensd": True}), - ('LMS Karras', 'sample_lms', ['k_lms_ka'], {'scheduler': 'karras'}), - ('DPM2 Karras', 'sample_dpm_2', ['k_dpm_2_ka'], {'scheduler': 'karras', 'discard_next_to_last_sigma': True, "uses_ensd": True, "second_order": True}), - ('DPM2 a Karras', 'sample_dpm_2_ancestral', ['k_dpm_2_a_ka'], {'scheduler': 'karras', 'discard_next_to_last_sigma': True, "uses_ensd": True, "second_order": True}), - ('DPM++ 2S a Karras', 'sample_dpmpp_2s_ancestral', ['k_dpmpp_2s_a_ka'], {'scheduler': 'karras', "uses_ensd": True, "second_order": True}), - ('DPM++ 2M Karras', 'sample_dpmpp_2m', ['k_dpmpp_2m_ka'], {'scheduler': 'karras'}), - ('DPM++ SDE Karras', 'sample_dpmpp_sde', ['k_dpmpp_sde_ka'], {'scheduler': 'karras', "second_order": True, "brownian_noise": True}), - ('DPM++ 2M SDE Karras', 'sample_dpmpp_2m_sde', ['k_dpmpp_2m_sde_ka'], {'scheduler': 'karras', "brownian_noise": True}), - ('DPM++ 2M SDE Exponential', 'sample_dpmpp_2m_sde', ['k_dpmpp_2m_sde_exp'], {'scheduler': 'exponential', "brownian_noise": True}), - ('Restart', sd_samplers_extra.restart_sampler, ['restart'], {'scheduler': 'karras'}), -] - - -samplers_data_k_diffusion = [ - sd_samplers_common.SamplerData(label, lambda model, funcname=funcname: KDiffusionSampler(funcname, model), aliases, options) - for label, funcname, aliases, options in samplers_k_diffusion - if callable(funcname) or hasattr(k_diffusion.sampling, funcname) -] - -sampler_extra_params = { - 'sample_euler': ['s_churn', 's_tmin', 's_tmax', 's_noise'], - 'sample_heun': ['s_churn', 's_tmin', 's_tmax', 's_noise'], - 'sample_dpm_2': ['s_churn', 's_tmin', 's_tmax', 's_noise'], -} - -k_diffusion_samplers_map = {x.name: x for x in samplers_data_k_diffusion} -k_diffusion_scheduler = { - 'Automatic': None, - 'karras': k_diffusion.sampling.get_sigmas_karras, - 'exponential': k_diffusion.sampling.get_sigmas_exponential, - 'polyexponential': k_diffusion.sampling.get_sigmas_polyexponential -} - - -def catenate_conds(conds): - if not isinstance(conds[0], dict): - return torch.cat(conds) - - return {key: torch.cat([x[key] for x in conds]) for key in conds[0].keys()} - - -def subscript_cond(cond, a, b): - if not isinstance(cond, dict): - return cond[a:b] - - return {key: vec[a:b] for key, vec in cond.items()} - - -def pad_cond(tensor, repeats, empty): - if not isinstance(tensor, dict): - return torch.cat([tensor, empty.repeat((tensor.shape[0], repeats, 1))], axis=1) - - tensor['crossattn'] = pad_cond(tensor['crossattn'], repeats, empty) - return tensor - - -class CFGDenoiser(torch.nn.Module): - """ - Classifier free guidance denoiser. A wrapper for stable diffusion model (specifically for unet) - that can take a noisy picture and produce a noise-free picture using two guidances (prompts) - instead of one. Originally, the second prompt is just an empty string, but we use non-empty - negative prompt. - """ - - def __init__(self, model): - super().__init__() - self.inner_model = model - self.mask = None - self.nmask = None - self.init_latent = None - self.step = 0 - self.image_cfg_scale = None - self.padded_cond_uncond = False - - def combine_denoised(self, x_out, conds_list, uncond, cond_scale): - denoised_uncond = x_out[-uncond.shape[0]:] - denoised = torch.clone(denoised_uncond) - - for i, conds in enumerate(conds_list): - for cond_index, weight in conds: - denoised[i] += (x_out[cond_index] - denoised_uncond[i]) * (weight * cond_scale) - - return denoised - - def combine_denoised_for_edit_model(self, x_out, cond_scale): - out_cond, out_img_cond, out_uncond = x_out.chunk(3) - denoised = out_uncond + cond_scale * (out_cond - out_img_cond) + self.image_cfg_scale * (out_img_cond - out_uncond) - - return denoised - - def forward(self, x, sigma, uncond, cond, cond_scale, s_min_uncond, image_cond): - if state.interrupted or state.skipped: - raise sd_samplers_common.InterruptedException - - # at self.image_cfg_scale == 1.0 produced results for edit model are the same as with normal sampling, - # so is_edit_model is set to False to support AND composition. - is_edit_model = shared.sd_model.cond_stage_key == "edit" and self.image_cfg_scale is not None and self.image_cfg_scale != 1.0 - - conds_list, tensor = prompt_parser.reconstruct_multicond_batch(cond, self.step) - uncond = prompt_parser.reconstruct_cond_batch(uncond, self.step) - - assert not is_edit_model or all(len(conds) == 1 for conds in conds_list), "AND is not supported for InstructPix2Pix checkpoint (unless using Image CFG scale = 1.0)" - - batch_size = len(conds_list) - repeats = [len(conds_list[i]) for i in range(batch_size)] - - if shared.sd_model.model.conditioning_key == "crossattn-adm": - image_uncond = torch.zeros_like(image_cond) - make_condition_dict = lambda c_crossattn, c_adm: {"c_crossattn": [c_crossattn], "c_adm": c_adm} - else: - image_uncond = image_cond - if isinstance(uncond, dict): - make_condition_dict = lambda c_crossattn, c_concat: {**c_crossattn, "c_concat": [c_concat]} - else: - make_condition_dict = lambda c_crossattn, c_concat: {"c_crossattn": [c_crossattn], "c_concat": [c_concat]} - - if not is_edit_model: - x_in = torch.cat([torch.stack([x[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [x]) - sigma_in = torch.cat([torch.stack([sigma[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [sigma]) - image_cond_in = torch.cat([torch.stack([image_cond[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [image_uncond]) - else: - x_in = torch.cat([torch.stack([x[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [x] + [x]) - sigma_in = torch.cat([torch.stack([sigma[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [sigma] + [sigma]) - image_cond_in = torch.cat([torch.stack([image_cond[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [image_uncond] + [torch.zeros_like(self.init_latent)]) - - denoiser_params = CFGDenoiserParams(x_in, image_cond_in, sigma_in, state.sampling_step, state.sampling_steps, tensor, uncond) - cfg_denoiser_callback(denoiser_params) - x_in = denoiser_params.x - image_cond_in = denoiser_params.image_cond - sigma_in = denoiser_params.sigma - tensor = denoiser_params.text_cond - uncond = denoiser_params.text_uncond - skip_uncond = False - - # alternating uncond allows for higher thresholds without the quality loss normally expected from raising it - if self.step % 2 and s_min_uncond > 0 and sigma[0] < s_min_uncond and not is_edit_model: - skip_uncond = True - x_in = x_in[:-batch_size] - sigma_in = sigma_in[:-batch_size] - - self.padded_cond_uncond = False - if shared.opts.pad_cond_uncond and tensor.shape[1] != uncond.shape[1]: - empty = shared.sd_model.cond_stage_model_empty_prompt - num_repeats = (tensor.shape[1] - uncond.shape[1]) // empty.shape[1] - - if num_repeats < 0: - tensor = pad_cond(tensor, -num_repeats, empty) - self.padded_cond_uncond = True - elif num_repeats > 0: - uncond = pad_cond(uncond, num_repeats, empty) - self.padded_cond_uncond = True - - if tensor.shape[1] == uncond.shape[1] or skip_uncond: - if is_edit_model: - cond_in = catenate_conds([tensor, uncond, uncond]) - elif skip_uncond: - cond_in = tensor - else: - cond_in = catenate_conds([tensor, uncond]) - - if shared.batch_cond_uncond: - x_out = self.inner_model(x_in, sigma_in, cond=make_condition_dict(cond_in, image_cond_in)) - else: - x_out = torch.zeros_like(x_in) - for batch_offset in range(0, x_out.shape[0], batch_size): - a = batch_offset - b = a + batch_size - x_out[a:b] = self.inner_model(x_in[a:b], sigma_in[a:b], cond=make_condition_dict(subscript_cond(cond_in, a, b), image_cond_in[a:b])) - else: - x_out = torch.zeros_like(x_in) - batch_size = batch_size*2 if shared.batch_cond_uncond else batch_size - for batch_offset in range(0, tensor.shape[0], batch_size): - a = batch_offset - b = min(a + batch_size, tensor.shape[0]) - - if not is_edit_model: - c_crossattn = subscript_cond(tensor, a, b) - else: - c_crossattn = torch.cat([tensor[a:b]], uncond) - - x_out[a:b] = self.inner_model(x_in[a:b], sigma_in[a:b], cond=make_condition_dict(c_crossattn, image_cond_in[a:b])) - - if not skip_uncond: - x_out[-uncond.shape[0]:] = self.inner_model(x_in[-uncond.shape[0]:], sigma_in[-uncond.shape[0]:], cond=make_condition_dict(uncond, image_cond_in[-uncond.shape[0]:])) - - denoised_image_indexes = [x[0][0] for x in conds_list] - if skip_uncond: - fake_uncond = torch.cat([x_out[i:i+1] for i in denoised_image_indexes]) - x_out = torch.cat([x_out, fake_uncond]) # we skipped uncond denoising, so we put cond-denoised image to where the uncond-denoised image should be - - denoised_params = CFGDenoisedParams(x_out, state.sampling_step, state.sampling_steps, self.inner_model) - cfg_denoised_callback(denoised_params) - - devices.test_for_nans(x_out, "unet") - - if opts.live_preview_content == "Prompt": - sd_samplers_common.store_latent(torch.cat([x_out[i:i+1] for i in denoised_image_indexes])) - elif opts.live_preview_content == "Negative prompt": - sd_samplers_common.store_latent(x_out[-uncond.shape[0]:]) - - if is_edit_model: - denoised = self.combine_denoised_for_edit_model(x_out, cond_scale) - elif skip_uncond: - denoised = self.combine_denoised(x_out, conds_list, uncond, 1.0) - else: - denoised = self.combine_denoised(x_out, conds_list, uncond, cond_scale) - - if self.mask is not None: - denoised = self.init_latent * self.mask + self.nmask * denoised - - after_cfg_callback_params = AfterCFGCallbackParams(denoised, state.sampling_step, state.sampling_steps) - cfg_after_cfg_callback(after_cfg_callback_params) - denoised = after_cfg_callback_params.x - - self.step += 1 - return denoised - - -class TorchHijack: - def __init__(self, sampler_noises): - # Using a deque to efficiently receive the sampler_noises in the same order as the previous index-based - # implementation. - self.sampler_noises = deque(sampler_noises) - - def __getattr__(self, item): - if item == 'randn_like': - return self.randn_like - - if hasattr(torch, item): - return getattr(torch, item) - - raise AttributeError(f"'{type(self).__name__}' object has no attribute '{item}'") - - def randn_like(self, x): - if self.sampler_noises: - noise = self.sampler_noises.popleft() - if noise.shape == x.shape: - return noise - - return devices.randn_like(x) - - -class KDiffusionSampler: - def __init__(self, funcname, sd_model): - denoiser = k_diffusion.external.CompVisVDenoiser if sd_model.parameterization == "v" else k_diffusion.external.CompVisDenoiser - - self.model_wrap = denoiser(sd_model, quantize=shared.opts.enable_quantization) - self.funcname = funcname - self.func = funcname if callable(funcname) else getattr(k_diffusion.sampling, self.funcname) - self.extra_params = sampler_extra_params.get(funcname, []) - self.model_wrap_cfg = CFGDenoiser(self.model_wrap) - self.sampler_noises = None - self.stop_at = None - self.eta = None - self.config = None # set by the function calling the constructor - self.last_latent = None - self.s_min_uncond = None - - # NOTE: These are also defined in the StableDiffusionProcessing class. - # They should have been here to begin with but we're going to - # leave that class __init__ signature alone. - self.s_churn = 0.0 - self.s_tmin = 0.0 - self.s_tmax = float('inf') - self.s_noise = 1.0 - - self.conditioning_key = sd_model.model.conditioning_key - - def callback_state(self, d): - step = d['i'] - latent = d["denoised"] - if opts.live_preview_content == "Combined": - sd_samplers_common.store_latent(latent) - self.last_latent = latent - - if self.stop_at is not None and step > self.stop_at: - raise sd_samplers_common.InterruptedException - - state.sampling_step = step - shared.total_tqdm.update() - - def launch_sampling(self, steps, func): - state.sampling_steps = steps - state.sampling_step = 0 - - try: - return func() - except RecursionError: - print( - 'Encountered RecursionError during sampling, returning last latent. ' - 'rho >5 with a polyexponential scheduler may cause this error. ' - 'You should try to use a smaller rho value instead.' - ) - return self.last_latent - except sd_samplers_common.InterruptedException: - return self.last_latent - - def number_of_needed_noises(self, p): - return p.steps - - def initialize(self, p: StableDiffusionProcessing): - self.model_wrap_cfg.mask = p.mask if hasattr(p, 'mask') else None - self.model_wrap_cfg.nmask = p.nmask if hasattr(p, 'nmask') else None - self.model_wrap_cfg.step = 0 - self.model_wrap_cfg.image_cfg_scale = getattr(p, 'image_cfg_scale', None) - self.eta = p.eta if p.eta is not None else opts.eta_ancestral - self.s_min_uncond = getattr(p, 's_min_uncond', 0.0) - - k_diffusion.sampling.torch = TorchHijack(self.sampler_noises if self.sampler_noises is not None else []) - - extra_params_kwargs = {} - for param_name in self.extra_params: - if hasattr(p, param_name) and param_name in inspect.signature(self.func).parameters: - extra_params_kwargs[param_name] = getattr(p, param_name) - - if 'eta' in inspect.signature(self.func).parameters: - if self.eta != 1.0: - p.extra_generation_params["Eta"] = self.eta - - extra_params_kwargs['eta'] = self.eta - - if len(self.extra_params) > 0: - s_churn = getattr(opts, 's_churn', p.s_churn) - s_tmin = getattr(opts, 's_tmin', p.s_tmin) - s_tmax = getattr(opts, 's_tmax', p.s_tmax) or self.s_tmax # 0 = inf - s_noise = getattr(opts, 's_noise', p.s_noise) - - if s_churn != self.s_churn: - extra_params_kwargs['s_churn'] = s_churn - p.s_churn = s_churn - p.extra_generation_params['Sigma churn'] = s_churn - if s_tmin != self.s_tmin: - extra_params_kwargs['s_tmin'] = s_tmin - p.s_tmin = s_tmin - p.extra_generation_params['Sigma tmin'] = s_tmin - if s_tmax != self.s_tmax: - extra_params_kwargs['s_tmax'] = s_tmax - p.s_tmax = s_tmax - p.extra_generation_params['Sigma tmax'] = s_tmax - if s_noise != self.s_noise: - extra_params_kwargs['s_noise'] = s_noise - p.s_noise = s_noise - p.extra_generation_params['Sigma noise'] = s_noise - - return extra_params_kwargs - - def get_sigmas(self, p, steps): - discard_next_to_last_sigma = self.config is not None and self.config.options.get('discard_next_to_last_sigma', False) - if opts.always_discard_next_to_last_sigma and not discard_next_to_last_sigma: - discard_next_to_last_sigma = True - p.extra_generation_params["Discard penultimate sigma"] = True - - steps += 1 if discard_next_to_last_sigma else 0 - - if p.sampler_noise_scheduler_override: - sigmas = p.sampler_noise_scheduler_override(steps) - elif opts.k_sched_type != "Automatic": - m_sigma_min, m_sigma_max = (self.model_wrap.sigmas[0].item(), self.model_wrap.sigmas[-1].item()) - sigma_min, sigma_max = (0.1, 10) if opts.use_old_karras_scheduler_sigmas else (m_sigma_min, m_sigma_max) - sigmas_kwargs = { - 'sigma_min': sigma_min, - 'sigma_max': sigma_max, - } - - sigmas_func = k_diffusion_scheduler[opts.k_sched_type] - p.extra_generation_params["Schedule type"] = opts.k_sched_type - - if opts.sigma_min != m_sigma_min and opts.sigma_min != 0: - sigmas_kwargs['sigma_min'] = opts.sigma_min - p.extra_generation_params["Schedule min sigma"] = opts.sigma_min - if opts.sigma_max != m_sigma_max and opts.sigma_max != 0: - sigmas_kwargs['sigma_max'] = opts.sigma_max - p.extra_generation_params["Schedule max sigma"] = opts.sigma_max - - default_rho = 1. if opts.k_sched_type == "polyexponential" else 7. - - if opts.k_sched_type != 'exponential' and opts.rho != 0 and opts.rho != default_rho: - sigmas_kwargs['rho'] = opts.rho - p.extra_generation_params["Schedule rho"] = opts.rho - - sigmas = sigmas_func(n=steps, **sigmas_kwargs, device=shared.device) - elif self.config is not None and self.config.options.get('scheduler', None) == 'karras': - sigma_min, sigma_max = (0.1, 10) if opts.use_old_karras_scheduler_sigmas else (self.model_wrap.sigmas[0].item(), self.model_wrap.sigmas[-1].item()) - - sigmas = k_diffusion.sampling.get_sigmas_karras(n=steps, sigma_min=sigma_min, sigma_max=sigma_max, device=shared.device) - elif self.config is not None and self.config.options.get('scheduler', None) == 'exponential': - m_sigma_min, m_sigma_max = (self.model_wrap.sigmas[0].item(), self.model_wrap.sigmas[-1].item()) - sigmas = k_diffusion.sampling.get_sigmas_exponential(n=steps, sigma_min=m_sigma_min, sigma_max=m_sigma_max, device=shared.device) - else: - sigmas = self.model_wrap.get_sigmas(steps) - - if discard_next_to_last_sigma: - sigmas = torch.cat([sigmas[:-2], sigmas[-1:]]) - - return sigmas - - def create_noise_sampler(self, x, sigmas, p): - """For DPM++ SDE: manually create noise sampler to enable deterministic results across different batch sizes""" - if shared.opts.no_dpmpp_sde_batch_determinism: - return None - - from k_diffusion.sampling import BrownianTreeNoiseSampler - sigma_min, sigma_max = sigmas[sigmas > 0].min(), sigmas.max() - current_iter_seeds = p.all_seeds[p.iteration * p.batch_size:(p.iteration + 1) * p.batch_size] - return BrownianTreeNoiseSampler(x, sigma_min, sigma_max, seed=current_iter_seeds) - - def sample_img2img(self, p, x, noise, conditioning, unconditional_conditioning, steps=None, image_conditioning=None): - steps, t_enc = sd_samplers_common.setup_img2img_steps(p, steps) - - sigmas = self.get_sigmas(p, steps) - - sigma_sched = sigmas[steps - t_enc - 1:] - xi = x + noise * sigma_sched[0] - - extra_params_kwargs = self.initialize(p) - parameters = inspect.signature(self.func).parameters - - if 'sigma_min' in parameters: - ## last sigma is zero which isn't allowed by DPM Fast & Adaptive so taking value before last - extra_params_kwargs['sigma_min'] = sigma_sched[-2] - if 'sigma_max' in parameters: - extra_params_kwargs['sigma_max'] = sigma_sched[0] - if 'n' in parameters: - extra_params_kwargs['n'] = len(sigma_sched) - 1 - if 'sigma_sched' in parameters: - extra_params_kwargs['sigma_sched'] = sigma_sched - if 'sigmas' in parameters: - extra_params_kwargs['sigmas'] = sigma_sched - - if self.config.options.get('brownian_noise', False): - noise_sampler = self.create_noise_sampler(x, sigmas, p) - extra_params_kwargs['noise_sampler'] = noise_sampler - - self.model_wrap_cfg.init_latent = x - self.last_latent = x - extra_args = { - 'cond': conditioning, - 'image_cond': image_conditioning, - 'uncond': unconditional_conditioning, - 'cond_scale': p.cfg_scale, - 's_min_uncond': self.s_min_uncond - } - - samples = self.launch_sampling(t_enc + 1, lambda: self.func(self.model_wrap_cfg, xi, extra_args=extra_args, disable=False, callback=self.callback_state, **extra_params_kwargs)) - - if self.model_wrap_cfg.padded_cond_uncond: - p.extra_generation_params["Pad conds"] = True - - return samples - - def sample(self, p, x, conditioning, unconditional_conditioning, steps=None, image_conditioning=None): - steps = steps or p.steps - - sigmas = self.get_sigmas(p, steps) - - x = x * sigmas[0] - - extra_params_kwargs = self.initialize(p) - parameters = inspect.signature(self.func).parameters - - if 'sigma_min' in parameters: - extra_params_kwargs['sigma_min'] = self.model_wrap.sigmas[0].item() - extra_params_kwargs['sigma_max'] = self.model_wrap.sigmas[-1].item() - if 'n' in parameters: - extra_params_kwargs['n'] = steps - else: - extra_params_kwargs['sigmas'] = sigmas - - if self.config.options.get('brownian_noise', False): - noise_sampler = self.create_noise_sampler(x, sigmas, p) - extra_params_kwargs['noise_sampler'] = noise_sampler - - self.last_latent = x - samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, x, extra_args={ - 'cond': conditioning, - 'image_cond': image_conditioning, - 'uncond': unconditional_conditioning, - 'cond_scale': p.cfg_scale, - 's_min_uncond': self.s_min_uncond - }, disable=False, callback=self.callback_state, **extra_params_kwargs)) - - if self.model_wrap_cfg.padded_cond_uncond: - p.extra_generation_params["Pad conds"] = True - - return samples - -- cgit v1.2.1 From 2d8e4a654480ea080fec62834331a3c632ed0330 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Tue, 8 Aug 2023 18:35:31 +0300 Subject: split sd_samplers_kdiffusion into two --- modules/sd_samplers_cfg_denoiser.py | 295 +----------------------------------- modules/sd_samplers_kdiffusion.py | 191 +---------------------- 2 files changed, 3 insertions(+), 483 deletions(-) diff --git a/modules/sd_samplers_cfg_denoiser.py b/modules/sd_samplers_cfg_denoiser.py index db71a549..33a49783 100644 --- a/modules/sd_samplers_cfg_denoiser.py +++ b/modules/sd_samplers_cfg_denoiser.py @@ -1,61 +1,13 @@ from collections import deque import torch -import inspect -import k_diffusion.sampling -from modules import prompt_parser, devices, sd_samplers_common, sd_samplers_extra +from modules import prompt_parser, devices, sd_samplers_common -from modules.processing import StableDiffusionProcessing from modules.shared import opts, state import modules.shared as shared from modules.script_callbacks import CFGDenoiserParams, cfg_denoiser_callback from modules.script_callbacks import CFGDenoisedParams, cfg_denoised_callback from modules.script_callbacks import AfterCFGCallbackParams, cfg_after_cfg_callback -samplers_k_diffusion = [ - ('Euler a', 'sample_euler_ancestral', ['k_euler_a', 'k_euler_ancestral'], {"uses_ensd": True}), - ('Euler', 'sample_euler', ['k_euler'], {}), - ('LMS', 'sample_lms', ['k_lms'], {}), - ('Heun', 'sample_heun', ['k_heun'], {"second_order": True}), - ('DPM2', 'sample_dpm_2', ['k_dpm_2'], {'discard_next_to_last_sigma': True}), - ('DPM2 a', 'sample_dpm_2_ancestral', ['k_dpm_2_a'], {'discard_next_to_last_sigma': True, "uses_ensd": True}), - ('DPM++ 2S a', 'sample_dpmpp_2s_ancestral', ['k_dpmpp_2s_a'], {"uses_ensd": True, "second_order": True}), - ('DPM++ 2M', 'sample_dpmpp_2m', ['k_dpmpp_2m'], {}), - ('DPM++ SDE', 'sample_dpmpp_sde', ['k_dpmpp_sde'], {"second_order": True, "brownian_noise": True}), - ('DPM++ 2M SDE', 'sample_dpmpp_2m_sde', ['k_dpmpp_2m_sde_ka'], {"brownian_noise": True}), - ('DPM fast', 'sample_dpm_fast', ['k_dpm_fast'], {"uses_ensd": True}), - ('DPM adaptive', 'sample_dpm_adaptive', ['k_dpm_ad'], {"uses_ensd": True}), - ('LMS Karras', 'sample_lms', ['k_lms_ka'], {'scheduler': 'karras'}), - ('DPM2 Karras', 'sample_dpm_2', ['k_dpm_2_ka'], {'scheduler': 'karras', 'discard_next_to_last_sigma': True, "uses_ensd": True, "second_order": True}), - ('DPM2 a Karras', 'sample_dpm_2_ancestral', ['k_dpm_2_a_ka'], {'scheduler': 'karras', 'discard_next_to_last_sigma': True, "uses_ensd": True, "second_order": True}), - ('DPM++ 2S a Karras', 'sample_dpmpp_2s_ancestral', ['k_dpmpp_2s_a_ka'], {'scheduler': 'karras', "uses_ensd": True, "second_order": True}), - ('DPM++ 2M Karras', 'sample_dpmpp_2m', ['k_dpmpp_2m_ka'], {'scheduler': 'karras'}), - ('DPM++ SDE Karras', 'sample_dpmpp_sde', ['k_dpmpp_sde_ka'], {'scheduler': 'karras', "second_order": True, "brownian_noise": True}), - ('DPM++ 2M SDE Karras', 'sample_dpmpp_2m_sde', ['k_dpmpp_2m_sde_ka'], {'scheduler': 'karras', "brownian_noise": True}), - ('DPM++ 2M SDE Exponential', 'sample_dpmpp_2m_sde', ['k_dpmpp_2m_sde_exp'], {'scheduler': 'exponential', "brownian_noise": True}), - ('Restart', sd_samplers_extra.restart_sampler, ['restart'], {'scheduler': 'karras'}), -] - - -samplers_data_k_diffusion = [ - sd_samplers_common.SamplerData(label, lambda model, funcname=funcname: KDiffusionSampler(funcname, model), aliases, options) - for label, funcname, aliases, options in samplers_k_diffusion - if callable(funcname) or hasattr(k_diffusion.sampling, funcname) -] - -sampler_extra_params = { - 'sample_euler': ['s_churn', 's_tmin', 's_tmax', 's_noise'], - 'sample_heun': ['s_churn', 's_tmin', 's_tmax', 's_noise'], - 'sample_dpm_2': ['s_churn', 's_tmin', 's_tmax', 's_noise'], -} - -k_diffusion_samplers_map = {x.name: x for x in samplers_data_k_diffusion} -k_diffusion_scheduler = { - 'Automatic': None, - 'karras': k_diffusion.sampling.get_sigmas_karras, - 'exponential': k_diffusion.sampling.get_sigmas_exponential, - 'polyexponential': k_diffusion.sampling.get_sigmas_polyexponential -} - def catenate_conds(conds): if not isinstance(conds[0], dict): @@ -264,248 +216,3 @@ class TorchHijack: return devices.randn_like(x) - -class KDiffusionSampler: - def __init__(self, funcname, sd_model): - denoiser = k_diffusion.external.CompVisVDenoiser if sd_model.parameterization == "v" else k_diffusion.external.CompVisDenoiser - - self.model_wrap = denoiser(sd_model, quantize=shared.opts.enable_quantization) - self.funcname = funcname - self.func = funcname if callable(funcname) else getattr(k_diffusion.sampling, self.funcname) - self.extra_params = sampler_extra_params.get(funcname, []) - self.model_wrap_cfg = CFGDenoiser(self.model_wrap) - self.sampler_noises = None - self.stop_at = None - self.eta = None - self.config = None # set by the function calling the constructor - self.last_latent = None - self.s_min_uncond = None - - # NOTE: These are also defined in the StableDiffusionProcessing class. - # They should have been here to begin with but we're going to - # leave that class __init__ signature alone. - self.s_churn = 0.0 - self.s_tmin = 0.0 - self.s_tmax = float('inf') - self.s_noise = 1.0 - - self.conditioning_key = sd_model.model.conditioning_key - - def callback_state(self, d): - step = d['i'] - latent = d["denoised"] - if opts.live_preview_content == "Combined": - sd_samplers_common.store_latent(latent) - self.last_latent = latent - - if self.stop_at is not None and step > self.stop_at: - raise sd_samplers_common.InterruptedException - - state.sampling_step = step - shared.total_tqdm.update() - - def launch_sampling(self, steps, func): - state.sampling_steps = steps - state.sampling_step = 0 - - try: - return func() - except RecursionError: - print( - 'Encountered RecursionError during sampling, returning last latent. ' - 'rho >5 with a polyexponential scheduler may cause this error. ' - 'You should try to use a smaller rho value instead.' - ) - return self.last_latent - except sd_samplers_common.InterruptedException: - return self.last_latent - - def number_of_needed_noises(self, p): - return p.steps - - def initialize(self, p: StableDiffusionProcessing): - self.model_wrap_cfg.mask = p.mask if hasattr(p, 'mask') else None - self.model_wrap_cfg.nmask = p.nmask if hasattr(p, 'nmask') else None - self.model_wrap_cfg.step = 0 - self.model_wrap_cfg.image_cfg_scale = getattr(p, 'image_cfg_scale', None) - self.eta = p.eta if p.eta is not None else opts.eta_ancestral - self.s_min_uncond = getattr(p, 's_min_uncond', 0.0) - - k_diffusion.sampling.torch = TorchHijack(self.sampler_noises if self.sampler_noises is not None else []) - - extra_params_kwargs = {} - for param_name in self.extra_params: - if hasattr(p, param_name) and param_name in inspect.signature(self.func).parameters: - extra_params_kwargs[param_name] = getattr(p, param_name) - - if 'eta' in inspect.signature(self.func).parameters: - if self.eta != 1.0: - p.extra_generation_params["Eta"] = self.eta - - extra_params_kwargs['eta'] = self.eta - - if len(self.extra_params) > 0: - s_churn = getattr(opts, 's_churn', p.s_churn) - s_tmin = getattr(opts, 's_tmin', p.s_tmin) - s_tmax = getattr(opts, 's_tmax', p.s_tmax) or self.s_tmax # 0 = inf - s_noise = getattr(opts, 's_noise', p.s_noise) - - if s_churn != self.s_churn: - extra_params_kwargs['s_churn'] = s_churn - p.s_churn = s_churn - p.extra_generation_params['Sigma churn'] = s_churn - if s_tmin != self.s_tmin: - extra_params_kwargs['s_tmin'] = s_tmin - p.s_tmin = s_tmin - p.extra_generation_params['Sigma tmin'] = s_tmin - if s_tmax != self.s_tmax: - extra_params_kwargs['s_tmax'] = s_tmax - p.s_tmax = s_tmax - p.extra_generation_params['Sigma tmax'] = s_tmax - if s_noise != self.s_noise: - extra_params_kwargs['s_noise'] = s_noise - p.s_noise = s_noise - p.extra_generation_params['Sigma noise'] = s_noise - - return extra_params_kwargs - - def get_sigmas(self, p, steps): - discard_next_to_last_sigma = self.config is not None and self.config.options.get('discard_next_to_last_sigma', False) - if opts.always_discard_next_to_last_sigma and not discard_next_to_last_sigma: - discard_next_to_last_sigma = True - p.extra_generation_params["Discard penultimate sigma"] = True - - steps += 1 if discard_next_to_last_sigma else 0 - - if p.sampler_noise_scheduler_override: - sigmas = p.sampler_noise_scheduler_override(steps) - elif opts.k_sched_type != "Automatic": - m_sigma_min, m_sigma_max = (self.model_wrap.sigmas[0].item(), self.model_wrap.sigmas[-1].item()) - sigma_min, sigma_max = (0.1, 10) if opts.use_old_karras_scheduler_sigmas else (m_sigma_min, m_sigma_max) - sigmas_kwargs = { - 'sigma_min': sigma_min, - 'sigma_max': sigma_max, - } - - sigmas_func = k_diffusion_scheduler[opts.k_sched_type] - p.extra_generation_params["Schedule type"] = opts.k_sched_type - - if opts.sigma_min != m_sigma_min and opts.sigma_min != 0: - sigmas_kwargs['sigma_min'] = opts.sigma_min - p.extra_generation_params["Schedule min sigma"] = opts.sigma_min - if opts.sigma_max != m_sigma_max and opts.sigma_max != 0: - sigmas_kwargs['sigma_max'] = opts.sigma_max - p.extra_generation_params["Schedule max sigma"] = opts.sigma_max - - default_rho = 1. if opts.k_sched_type == "polyexponential" else 7. - - if opts.k_sched_type != 'exponential' and opts.rho != 0 and opts.rho != default_rho: - sigmas_kwargs['rho'] = opts.rho - p.extra_generation_params["Schedule rho"] = opts.rho - - sigmas = sigmas_func(n=steps, **sigmas_kwargs, device=shared.device) - elif self.config is not None and self.config.options.get('scheduler', None) == 'karras': - sigma_min, sigma_max = (0.1, 10) if opts.use_old_karras_scheduler_sigmas else (self.model_wrap.sigmas[0].item(), self.model_wrap.sigmas[-1].item()) - - sigmas = k_diffusion.sampling.get_sigmas_karras(n=steps, sigma_min=sigma_min, sigma_max=sigma_max, device=shared.device) - elif self.config is not None and self.config.options.get('scheduler', None) == 'exponential': - m_sigma_min, m_sigma_max = (self.model_wrap.sigmas[0].item(), self.model_wrap.sigmas[-1].item()) - sigmas = k_diffusion.sampling.get_sigmas_exponential(n=steps, sigma_min=m_sigma_min, sigma_max=m_sigma_max, device=shared.device) - else: - sigmas = self.model_wrap.get_sigmas(steps) - - if discard_next_to_last_sigma: - sigmas = torch.cat([sigmas[:-2], sigmas[-1:]]) - - return sigmas - - def create_noise_sampler(self, x, sigmas, p): - """For DPM++ SDE: manually create noise sampler to enable deterministic results across different batch sizes""" - if shared.opts.no_dpmpp_sde_batch_determinism: - return None - - from k_diffusion.sampling import BrownianTreeNoiseSampler - sigma_min, sigma_max = sigmas[sigmas > 0].min(), sigmas.max() - current_iter_seeds = p.all_seeds[p.iteration * p.batch_size:(p.iteration + 1) * p.batch_size] - return BrownianTreeNoiseSampler(x, sigma_min, sigma_max, seed=current_iter_seeds) - - def sample_img2img(self, p, x, noise, conditioning, unconditional_conditioning, steps=None, image_conditioning=None): - steps, t_enc = sd_samplers_common.setup_img2img_steps(p, steps) - - sigmas = self.get_sigmas(p, steps) - - sigma_sched = sigmas[steps - t_enc - 1:] - xi = x + noise * sigma_sched[0] - - extra_params_kwargs = self.initialize(p) - parameters = inspect.signature(self.func).parameters - - if 'sigma_min' in parameters: - ## last sigma is zero which isn't allowed by DPM Fast & Adaptive so taking value before last - extra_params_kwargs['sigma_min'] = sigma_sched[-2] - if 'sigma_max' in parameters: - extra_params_kwargs['sigma_max'] = sigma_sched[0] - if 'n' in parameters: - extra_params_kwargs['n'] = len(sigma_sched) - 1 - if 'sigma_sched' in parameters: - extra_params_kwargs['sigma_sched'] = sigma_sched - if 'sigmas' in parameters: - extra_params_kwargs['sigmas'] = sigma_sched - - if self.config.options.get('brownian_noise', False): - noise_sampler = self.create_noise_sampler(x, sigmas, p) - extra_params_kwargs['noise_sampler'] = noise_sampler - - self.model_wrap_cfg.init_latent = x - self.last_latent = x - extra_args = { - 'cond': conditioning, - 'image_cond': image_conditioning, - 'uncond': unconditional_conditioning, - 'cond_scale': p.cfg_scale, - 's_min_uncond': self.s_min_uncond - } - - samples = self.launch_sampling(t_enc + 1, lambda: self.func(self.model_wrap_cfg, xi, extra_args=extra_args, disable=False, callback=self.callback_state, **extra_params_kwargs)) - - if self.model_wrap_cfg.padded_cond_uncond: - p.extra_generation_params["Pad conds"] = True - - return samples - - def sample(self, p, x, conditioning, unconditional_conditioning, steps=None, image_conditioning=None): - steps = steps or p.steps - - sigmas = self.get_sigmas(p, steps) - - x = x * sigmas[0] - - extra_params_kwargs = self.initialize(p) - parameters = inspect.signature(self.func).parameters - - if 'sigma_min' in parameters: - extra_params_kwargs['sigma_min'] = self.model_wrap.sigmas[0].item() - extra_params_kwargs['sigma_max'] = self.model_wrap.sigmas[-1].item() - if 'n' in parameters: - extra_params_kwargs['n'] = steps - else: - extra_params_kwargs['sigmas'] = sigmas - - if self.config.options.get('brownian_noise', False): - noise_sampler = self.create_noise_sampler(x, sigmas, p) - extra_params_kwargs['noise_sampler'] = noise_sampler - - self.last_latent = x - samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, x, extra_args={ - 'cond': conditioning, - 'image_cond': image_conditioning, - 'uncond': unconditional_conditioning, - 'cond_scale': p.cfg_scale, - 's_min_uncond': self.s_min_uncond - }, disable=False, callback=self.callback_state, **extra_params_kwargs)) - - if self.model_wrap_cfg.padded_cond_uncond: - p.extra_generation_params["Pad conds"] = True - - return samples - diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py index db71a549..9c9b46d1 100644 --- a/modules/sd_samplers_kdiffusion.py +++ b/modules/sd_samplers_kdiffusion.py @@ -2,14 +2,11 @@ from collections import deque import torch import inspect import k_diffusion.sampling -from modules import prompt_parser, devices, sd_samplers_common, sd_samplers_extra +from modules import devices, sd_samplers_common, sd_samplers_extra, sd_samplers_cfg_denoiser from modules.processing import StableDiffusionProcessing from modules.shared import opts, state import modules.shared as shared -from modules.script_callbacks import CFGDenoiserParams, cfg_denoiser_callback -from modules.script_callbacks import CFGDenoisedParams, cfg_denoised_callback -from modules.script_callbacks import AfterCFGCallbackParams, cfg_after_cfg_callback samplers_k_diffusion = [ ('Euler a', 'sample_euler_ancestral', ['k_euler_a', 'k_euler_ancestral'], {"uses_ensd": True}), @@ -57,190 +54,6 @@ k_diffusion_scheduler = { } -def catenate_conds(conds): - if not isinstance(conds[0], dict): - return torch.cat(conds) - - return {key: torch.cat([x[key] for x in conds]) for key in conds[0].keys()} - - -def subscript_cond(cond, a, b): - if not isinstance(cond, dict): - return cond[a:b] - - return {key: vec[a:b] for key, vec in cond.items()} - - -def pad_cond(tensor, repeats, empty): - if not isinstance(tensor, dict): - return torch.cat([tensor, empty.repeat((tensor.shape[0], repeats, 1))], axis=1) - - tensor['crossattn'] = pad_cond(tensor['crossattn'], repeats, empty) - return tensor - - -class CFGDenoiser(torch.nn.Module): - """ - Classifier free guidance denoiser. A wrapper for stable diffusion model (specifically for unet) - that can take a noisy picture and produce a noise-free picture using two guidances (prompts) - instead of one. Originally, the second prompt is just an empty string, but we use non-empty - negative prompt. - """ - - def __init__(self, model): - super().__init__() - self.inner_model = model - self.mask = None - self.nmask = None - self.init_latent = None - self.step = 0 - self.image_cfg_scale = None - self.padded_cond_uncond = False - - def combine_denoised(self, x_out, conds_list, uncond, cond_scale): - denoised_uncond = x_out[-uncond.shape[0]:] - denoised = torch.clone(denoised_uncond) - - for i, conds in enumerate(conds_list): - for cond_index, weight in conds: - denoised[i] += (x_out[cond_index] - denoised_uncond[i]) * (weight * cond_scale) - - return denoised - - def combine_denoised_for_edit_model(self, x_out, cond_scale): - out_cond, out_img_cond, out_uncond = x_out.chunk(3) - denoised = out_uncond + cond_scale * (out_cond - out_img_cond) + self.image_cfg_scale * (out_img_cond - out_uncond) - - return denoised - - def forward(self, x, sigma, uncond, cond, cond_scale, s_min_uncond, image_cond): - if state.interrupted or state.skipped: - raise sd_samplers_common.InterruptedException - - # at self.image_cfg_scale == 1.0 produced results for edit model are the same as with normal sampling, - # so is_edit_model is set to False to support AND composition. - is_edit_model = shared.sd_model.cond_stage_key == "edit" and self.image_cfg_scale is not None and self.image_cfg_scale != 1.0 - - conds_list, tensor = prompt_parser.reconstruct_multicond_batch(cond, self.step) - uncond = prompt_parser.reconstruct_cond_batch(uncond, self.step) - - assert not is_edit_model or all(len(conds) == 1 for conds in conds_list), "AND is not supported for InstructPix2Pix checkpoint (unless using Image CFG scale = 1.0)" - - batch_size = len(conds_list) - repeats = [len(conds_list[i]) for i in range(batch_size)] - - if shared.sd_model.model.conditioning_key == "crossattn-adm": - image_uncond = torch.zeros_like(image_cond) - make_condition_dict = lambda c_crossattn, c_adm: {"c_crossattn": [c_crossattn], "c_adm": c_adm} - else: - image_uncond = image_cond - if isinstance(uncond, dict): - make_condition_dict = lambda c_crossattn, c_concat: {**c_crossattn, "c_concat": [c_concat]} - else: - make_condition_dict = lambda c_crossattn, c_concat: {"c_crossattn": [c_crossattn], "c_concat": [c_concat]} - - if not is_edit_model: - x_in = torch.cat([torch.stack([x[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [x]) - sigma_in = torch.cat([torch.stack([sigma[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [sigma]) - image_cond_in = torch.cat([torch.stack([image_cond[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [image_uncond]) - else: - x_in = torch.cat([torch.stack([x[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [x] + [x]) - sigma_in = torch.cat([torch.stack([sigma[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [sigma] + [sigma]) - image_cond_in = torch.cat([torch.stack([image_cond[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [image_uncond] + [torch.zeros_like(self.init_latent)]) - - denoiser_params = CFGDenoiserParams(x_in, image_cond_in, sigma_in, state.sampling_step, state.sampling_steps, tensor, uncond) - cfg_denoiser_callback(denoiser_params) - x_in = denoiser_params.x - image_cond_in = denoiser_params.image_cond - sigma_in = denoiser_params.sigma - tensor = denoiser_params.text_cond - uncond = denoiser_params.text_uncond - skip_uncond = False - - # alternating uncond allows for higher thresholds without the quality loss normally expected from raising it - if self.step % 2 and s_min_uncond > 0 and sigma[0] < s_min_uncond and not is_edit_model: - skip_uncond = True - x_in = x_in[:-batch_size] - sigma_in = sigma_in[:-batch_size] - - self.padded_cond_uncond = False - if shared.opts.pad_cond_uncond and tensor.shape[1] != uncond.shape[1]: - empty = shared.sd_model.cond_stage_model_empty_prompt - num_repeats = (tensor.shape[1] - uncond.shape[1]) // empty.shape[1] - - if num_repeats < 0: - tensor = pad_cond(tensor, -num_repeats, empty) - self.padded_cond_uncond = True - elif num_repeats > 0: - uncond = pad_cond(uncond, num_repeats, empty) - self.padded_cond_uncond = True - - if tensor.shape[1] == uncond.shape[1] or skip_uncond: - if is_edit_model: - cond_in = catenate_conds([tensor, uncond, uncond]) - elif skip_uncond: - cond_in = tensor - else: - cond_in = catenate_conds([tensor, uncond]) - - if shared.batch_cond_uncond: - x_out = self.inner_model(x_in, sigma_in, cond=make_condition_dict(cond_in, image_cond_in)) - else: - x_out = torch.zeros_like(x_in) - for batch_offset in range(0, x_out.shape[0], batch_size): - a = batch_offset - b = a + batch_size - x_out[a:b] = self.inner_model(x_in[a:b], sigma_in[a:b], cond=make_condition_dict(subscript_cond(cond_in, a, b), image_cond_in[a:b])) - else: - x_out = torch.zeros_like(x_in) - batch_size = batch_size*2 if shared.batch_cond_uncond else batch_size - for batch_offset in range(0, tensor.shape[0], batch_size): - a = batch_offset - b = min(a + batch_size, tensor.shape[0]) - - if not is_edit_model: - c_crossattn = subscript_cond(tensor, a, b) - else: - c_crossattn = torch.cat([tensor[a:b]], uncond) - - x_out[a:b] = self.inner_model(x_in[a:b], sigma_in[a:b], cond=make_condition_dict(c_crossattn, image_cond_in[a:b])) - - if not skip_uncond: - x_out[-uncond.shape[0]:] = self.inner_model(x_in[-uncond.shape[0]:], sigma_in[-uncond.shape[0]:], cond=make_condition_dict(uncond, image_cond_in[-uncond.shape[0]:])) - - denoised_image_indexes = [x[0][0] for x in conds_list] - if skip_uncond: - fake_uncond = torch.cat([x_out[i:i+1] for i in denoised_image_indexes]) - x_out = torch.cat([x_out, fake_uncond]) # we skipped uncond denoising, so we put cond-denoised image to where the uncond-denoised image should be - - denoised_params = CFGDenoisedParams(x_out, state.sampling_step, state.sampling_steps, self.inner_model) - cfg_denoised_callback(denoised_params) - - devices.test_for_nans(x_out, "unet") - - if opts.live_preview_content == "Prompt": - sd_samplers_common.store_latent(torch.cat([x_out[i:i+1] for i in denoised_image_indexes])) - elif opts.live_preview_content == "Negative prompt": - sd_samplers_common.store_latent(x_out[-uncond.shape[0]:]) - - if is_edit_model: - denoised = self.combine_denoised_for_edit_model(x_out, cond_scale) - elif skip_uncond: - denoised = self.combine_denoised(x_out, conds_list, uncond, 1.0) - else: - denoised = self.combine_denoised(x_out, conds_list, uncond, cond_scale) - - if self.mask is not None: - denoised = self.init_latent * self.mask + self.nmask * denoised - - after_cfg_callback_params = AfterCFGCallbackParams(denoised, state.sampling_step, state.sampling_steps) - cfg_after_cfg_callback(after_cfg_callback_params) - denoised = after_cfg_callback_params.x - - self.step += 1 - return denoised - - class TorchHijack: def __init__(self, sampler_noises): # Using a deque to efficiently receive the sampler_noises in the same order as the previous index-based @@ -273,7 +86,7 @@ class KDiffusionSampler: self.funcname = funcname self.func = funcname if callable(funcname) else getattr(k_diffusion.sampling, self.funcname) self.extra_params = sampler_extra_params.get(funcname, []) - self.model_wrap_cfg = CFGDenoiser(self.model_wrap) + self.model_wrap_cfg = sd_samplers_cfg_denoiser.CFGDenoiser(self.model_wrap) self.sampler_noises = None self.stop_at = None self.eta = None -- cgit v1.2.1 From 2a72d76d6f3d34b1ffccec7736b19e7d52033dad Mon Sep 17 00:00:00 2001 From: dhwz Date: Tue, 8 Aug 2023 19:08:37 +0200 Subject: fix typo --- modules/processing.py | 2 +- modules/shared.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/processing.py b/modules/processing.py index 31745006..dc6e8ff1 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -568,7 +568,7 @@ def decode_latent_batch(model, batch, target_device=None, check_for_nans=False): errors.print_error_explanation( "A tensor with all NaNs was produced in VAE.\n" "Web UI will now convert VAE into 32-bit float and retry.\n" - "To disable this behavior, disable the 'Automaticlly revert VAE to 32-bit floats' setting.\n" + "To disable this behavior, disable the 'Automatically revert VAE to 32-bit floats' setting.\n" "To always start with 32-bit VAE, use --no-half-vae commandline flag." ) diff --git a/modules/shared.py b/modules/shared.py index 97f1eab5..e34847ce 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -481,7 +481,7 @@ For img2img, VAE is used to process user's input image before the sampling, and "sd_vae_checkpoint_cache": OptionInfo(0, "VAE Checkpoints to cache in RAM", gr.Slider, {"minimum": 0, "maximum": 10, "step": 1}), "sd_vae": OptionInfo("Automatic", "SD VAE", gr.Dropdown, lambda: {"choices": shared_items.sd_vae_items()}, refresh=shared_items.refresh_vae_list).info("choose VAE model: Automatic = use one with same filename as checkpoint; None = use VAE from checkpoint"), "sd_vae_overrides_per_model_preferences": OptionInfo(True, "Selected VAE overrides per-model preferences").info("you can set per-model VAE either by editing user metadata for checkpoints, or by making the VAE have same name as checkpoint"), - "auto_vae_precision": OptionInfo(True, "Automaticlly revert VAE to 32-bit floats").info("triggers when a tensor with NaNs is produced in VAE; disabling the option in this case will result in a black square image"), + "auto_vae_precision": OptionInfo(True, "Automatically revert VAE to 32-bit floats").info("triggers when a tensor with NaNs is produced in VAE; disabling the option in this case will result in a black square image"), "sd_vae_encode_method": OptionInfo("Full", "VAE type for encode", gr.Radio, {"choices": ["Full", "TAESD"]}).info("method to encode image to latent (use in img2img, hires-fix or inpaint mask)"), "sd_vae_decode_method": OptionInfo("Full", "VAE type for decode", gr.Radio, {"choices": ["Full", "TAESD"]}).info("method to decode latent to image"), })) -- cgit v1.2.1 From 8285a149d8c488ae6c7a566eb85fb5e825145464 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Tue, 8 Aug 2023 19:20:11 +0300 Subject: add CFG denoiser implementation for DDIM, PLMS and UniPC (this is the commit when you can run both old and new implementations to compare them) --- modules/sd_samplers.py | 3 +- modules/sd_samplers_cfg_denoiser.py | 50 ++++------- modules/sd_samplers_common.py | 140 ++++++++++++++++++++++++++++++- modules/sd_samplers_kdiffusion.py | 152 ++++------------------------------ modules/sd_samplers_timesteps.py | 147 ++++++++++++++++++++++++++++++++ modules/sd_samplers_timesteps_impl.py | 135 ++++++++++++++++++++++++++++++ 6 files changed, 455 insertions(+), 172 deletions(-) create mode 100644 modules/sd_samplers_timesteps.py create mode 100644 modules/sd_samplers_timesteps_impl.py diff --git a/modules/sd_samplers.py b/modules/sd_samplers.py index bea2684c..fe206894 100644 --- a/modules/sd_samplers.py +++ b/modules/sd_samplers.py @@ -1,4 +1,4 @@ -from modules import sd_samplers_compvis, sd_samplers_kdiffusion, shared +from modules import sd_samplers_compvis, sd_samplers_kdiffusion, sd_samplers_timesteps, shared # imports for functions that previously were here and are used by other modules from modules.sd_samplers_common import samples_to_image_grid, sample_to_image # noqa: F401 @@ -6,6 +6,7 @@ from modules.sd_samplers_common import samples_to_image_grid, sample_to_image # all_samplers = [ *sd_samplers_kdiffusion.samplers_data_k_diffusion, *sd_samplers_compvis.samplers_data_compvis, + *sd_samplers_timesteps.samplers_data_timesteps, ] all_samplers_map = {x.name: x for x in all_samplers} diff --git a/modules/sd_samplers_cfg_denoiser.py b/modules/sd_samplers_cfg_denoiser.py index 33a49783..166a00c7 100644 --- a/modules/sd_samplers_cfg_denoiser.py +++ b/modules/sd_samplers_cfg_denoiser.py @@ -39,7 +39,7 @@ class CFGDenoiser(torch.nn.Module): negative prompt. """ - def __init__(self, model): + def __init__(self, model, sampler): super().__init__() self.inner_model = model self.mask = None @@ -48,6 +48,7 @@ class CFGDenoiser(torch.nn.Module): self.step = 0 self.image_cfg_scale = None self.padded_cond_uncond = False + self.sampler = sampler def combine_denoised(self, x_out, conds_list, uncond, cond_scale): denoised_uncond = x_out[-uncond.shape[0]:] @@ -65,6 +66,9 @@ class CFGDenoiser(torch.nn.Module): return denoised + def get_pred_x0(self, x_in, x_out, sigma): + return x_out + def forward(self, x, sigma, uncond, cond, cond_scale, s_min_uncond, image_cond): if state.interrupted or state.skipped: raise sd_samplers_common.InterruptedException @@ -78,6 +82,9 @@ class CFGDenoiser(torch.nn.Module): assert not is_edit_model or all(len(conds) == 1 for conds in conds_list), "AND is not supported for InstructPix2Pix checkpoint (unless using Image CFG scale = 1.0)" + if self.mask is not None: + x = self.init_latent * self.mask + self.nmask * x + batch_size = len(conds_list) repeats = [len(conds_list[i]) for i in range(batch_size)] @@ -170,11 +177,6 @@ class CFGDenoiser(torch.nn.Module): devices.test_for_nans(x_out, "unet") - if opts.live_preview_content == "Prompt": - sd_samplers_common.store_latent(torch.cat([x_out[i:i+1] for i in denoised_image_indexes])) - elif opts.live_preview_content == "Negative prompt": - sd_samplers_common.store_latent(x_out[-uncond.shape[0]:]) - if is_edit_model: denoised = self.combine_denoised_for_edit_model(x_out, cond_scale) elif skip_uncond: @@ -182,8 +184,16 @@ class CFGDenoiser(torch.nn.Module): else: denoised = self.combine_denoised(x_out, conds_list, uncond, cond_scale) - if self.mask is not None: - denoised = self.init_latent * self.mask + self.nmask * denoised + self.sampler.last_latent = self.get_pred_x0(torch.cat([x_in[i:i + 1] for i in denoised_image_indexes]), torch.cat([x_out[i:i + 1] for i in denoised_image_indexes]), sigma) + + if opts.live_preview_content == "Prompt": + preview = self.sampler.last_latent + elif opts.live_preview_content == "Negative prompt": + preview = self.get_pred_x0(x_in[-uncond.shape[0]:], x_out[-uncond.shape[0]:], sigma) + else: + preview = self.get_pred_x0(torch.cat([x_in[i:i+1] for i in denoised_image_indexes]), torch.cat([denoised[i:i+1] for i in denoised_image_indexes]), sigma) + + sd_samplers_common.store_latent(preview) after_cfg_callback_params = AfterCFGCallbackParams(denoised, state.sampling_step, state.sampling_steps) cfg_after_cfg_callback(after_cfg_callback_params) @@ -192,27 +202,3 @@ class CFGDenoiser(torch.nn.Module): self.step += 1 return denoised - -class TorchHijack: - def __init__(self, sampler_noises): - # Using a deque to efficiently receive the sampler_noises in the same order as the previous index-based - # implementation. - self.sampler_noises = deque(sampler_noises) - - def __getattr__(self, item): - if item == 'randn_like': - return self.randn_like - - if hasattr(torch, item): - return getattr(torch, item) - - raise AttributeError(f"'{type(self).__name__}' object has no attribute '{item}'") - - def randn_like(self, x): - if self.sampler_noises: - noise = self.sampler_noises.popleft() - if noise.shape == x.shape: - return noise - - return devices.randn_like(x) - diff --git a/modules/sd_samplers_common.py b/modules/sd_samplers_common.py index 39586b40..adda963b 100644 --- a/modules/sd_samplers_common.py +++ b/modules/sd_samplers_common.py @@ -1,9 +1,11 @@ -from collections import namedtuple +import inspect +from collections import namedtuple, deque import numpy as np import torch from PIL import Image from modules import devices, images, sd_vae_approx, sd_samplers, sd_vae_taesd, shared from modules.shared import opts, state +import k_diffusion.sampling SamplerData = namedtuple('SamplerData', ['name', 'constructor', 'aliases', 'options']) @@ -127,3 +129,139 @@ def replace_torchsde_browinan(): replace_torchsde_browinan() + + +class TorchHijack: + def __init__(self, sampler_noises): + # Using a deque to efficiently receive the sampler_noises in the same order as the previous index-based + # implementation. + self.sampler_noises = deque(sampler_noises) + + def __getattr__(self, item): + if item == 'randn_like': + return self.randn_like + + if hasattr(torch, item): + return getattr(torch, item) + + raise AttributeError(f"'{type(self).__name__}' object has no attribute '{item}'") + + def randn_like(self, x): + if self.sampler_noises: + noise = self.sampler_noises.popleft() + if noise.shape == x.shape: + return noise + + return devices.randn_like(x) + + +class Sampler: + def __init__(self, funcname): + self.funcname = funcname + self.func = funcname + self.extra_params = [] + self.sampler_noises = None + self.stop_at = None + self.eta = None + self.config = None # set by the function calling the constructor + self.last_latent = None + self.s_min_uncond = None + self.s_churn = 0.0 + self.s_tmin = 0.0 + self.s_tmax = float('inf') + self.s_noise = 1.0 + + self.eta_option_field = 'eta_ancestral' + self.eta_infotext_field = 'Eta' + + self.conditioning_key = shared.sd_model.model.conditioning_key + + self.model_wrap = None + self.model_wrap_cfg = None + + def callback_state(self, d): + step = d['i'] + + if self.stop_at is not None and step > self.stop_at: + raise InterruptedException + + state.sampling_step = step + shared.total_tqdm.update() + + def launch_sampling(self, steps, func): + state.sampling_steps = steps + state.sampling_step = 0 + + try: + return func() + except RecursionError: + print( + 'Encountered RecursionError during sampling, returning last latent. ' + 'rho >5 with a polyexponential scheduler may cause this error. ' + 'You should try to use a smaller rho value instead.' + ) + return self.last_latent + except InterruptedException: + return self.last_latent + + def number_of_needed_noises(self, p): + return p.steps + + def initialize(self, p) -> dict: + self.model_wrap_cfg.mask = p.mask if hasattr(p, 'mask') else None + self.model_wrap_cfg.nmask = p.nmask if hasattr(p, 'nmask') else None + self.model_wrap_cfg.step = 0 + self.model_wrap_cfg.image_cfg_scale = getattr(p, 'image_cfg_scale', None) + self.eta = p.eta if p.eta is not None else getattr(opts, self.eta_option_field, 0.0) + self.s_min_uncond = getattr(p, 's_min_uncond', 0.0) + + k_diffusion.sampling.torch = TorchHijack(self.sampler_noises if self.sampler_noises is not None else []) + + extra_params_kwargs = {} + for param_name in self.extra_params: + if hasattr(p, param_name) and param_name in inspect.signature(self.func).parameters: + extra_params_kwargs[param_name] = getattr(p, param_name) + + if 'eta' in inspect.signature(self.func).parameters: + if self.eta != 1.0: + p.extra_generation_params[self.eta_infotext_field] = self.eta + + extra_params_kwargs['eta'] = self.eta + + if len(self.extra_params) > 0: + s_churn = getattr(opts, 's_churn', p.s_churn) + s_tmin = getattr(opts, 's_tmin', p.s_tmin) + s_tmax = getattr(opts, 's_tmax', p.s_tmax) or self.s_tmax # 0 = inf + s_noise = getattr(opts, 's_noise', p.s_noise) + + if s_churn != self.s_churn: + extra_params_kwargs['s_churn'] = s_churn + p.s_churn = s_churn + p.extra_generation_params['Sigma churn'] = s_churn + if s_tmin != self.s_tmin: + extra_params_kwargs['s_tmin'] = s_tmin + p.s_tmin = s_tmin + p.extra_generation_params['Sigma tmin'] = s_tmin + if s_tmax != self.s_tmax: + extra_params_kwargs['s_tmax'] = s_tmax + p.s_tmax = s_tmax + p.extra_generation_params['Sigma tmax'] = s_tmax + if s_noise != self.s_noise: + extra_params_kwargs['s_noise'] = s_noise + p.s_noise = s_noise + p.extra_generation_params['Sigma noise'] = s_noise + + return extra_params_kwargs + + def create_noise_sampler(self, x, sigmas, p): + """For DPM++ SDE: manually create noise sampler to enable deterministic results across different batch sizes""" + if shared.opts.no_dpmpp_sde_batch_determinism: + return None + + from k_diffusion.sampling import BrownianTreeNoiseSampler + sigma_min, sigma_max = sigmas[sigmas > 0].min(), sigmas.max() + current_iter_seeds = p.all_seeds[p.iteration * p.batch_size:(p.iteration + 1) * p.batch_size] + return BrownianTreeNoiseSampler(x, sigma_min, sigma_max, seed=current_iter_seeds) + + + diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py index 9c9b46d1..3a2e01b7 100644 --- a/modules/sd_samplers_kdiffusion.py +++ b/modules/sd_samplers_kdiffusion.py @@ -4,8 +4,7 @@ import inspect import k_diffusion.sampling from modules import devices, sd_samplers_common, sd_samplers_extra, sd_samplers_cfg_denoiser -from modules.processing import StableDiffusionProcessing -from modules.shared import opts, state +from modules.shared import opts import modules.shared as shared samplers_k_diffusion = [ @@ -54,133 +53,17 @@ k_diffusion_scheduler = { } -class TorchHijack: - def __init__(self, sampler_noises): - # Using a deque to efficiently receive the sampler_noises in the same order as the previous index-based - # implementation. - self.sampler_noises = deque(sampler_noises) - - def __getattr__(self, item): - if item == 'randn_like': - return self.randn_like - - if hasattr(torch, item): - return getattr(torch, item) - - raise AttributeError(f"'{type(self).__name__}' object has no attribute '{item}'") - - def randn_like(self, x): - if self.sampler_noises: - noise = self.sampler_noises.popleft() - if noise.shape == x.shape: - return noise +class KDiffusionSampler(sd_samplers_common.Sampler): + def __init__(self, funcname, sd_model): - return devices.randn_like(x) + super().__init__(funcname) + self.extra_params = sampler_extra_params.get(funcname, []) + self.func = funcname if callable(funcname) else getattr(k_diffusion.sampling, self.funcname) -class KDiffusionSampler: - def __init__(self, funcname, sd_model): denoiser = k_diffusion.external.CompVisVDenoiser if sd_model.parameterization == "v" else k_diffusion.external.CompVisDenoiser - self.model_wrap = denoiser(sd_model, quantize=shared.opts.enable_quantization) - self.funcname = funcname - self.func = funcname if callable(funcname) else getattr(k_diffusion.sampling, self.funcname) - self.extra_params = sampler_extra_params.get(funcname, []) - self.model_wrap_cfg = sd_samplers_cfg_denoiser.CFGDenoiser(self.model_wrap) - self.sampler_noises = None - self.stop_at = None - self.eta = None - self.config = None # set by the function calling the constructor - self.last_latent = None - self.s_min_uncond = None - - # NOTE: These are also defined in the StableDiffusionProcessing class. - # They should have been here to begin with but we're going to - # leave that class __init__ signature alone. - self.s_churn = 0.0 - self.s_tmin = 0.0 - self.s_tmax = float('inf') - self.s_noise = 1.0 - - self.conditioning_key = sd_model.model.conditioning_key - - def callback_state(self, d): - step = d['i'] - latent = d["denoised"] - if opts.live_preview_content == "Combined": - sd_samplers_common.store_latent(latent) - self.last_latent = latent - - if self.stop_at is not None and step > self.stop_at: - raise sd_samplers_common.InterruptedException - - state.sampling_step = step - shared.total_tqdm.update() - - def launch_sampling(self, steps, func): - state.sampling_steps = steps - state.sampling_step = 0 - - try: - return func() - except RecursionError: - print( - 'Encountered RecursionError during sampling, returning last latent. ' - 'rho >5 with a polyexponential scheduler may cause this error. ' - 'You should try to use a smaller rho value instead.' - ) - return self.last_latent - except sd_samplers_common.InterruptedException: - return self.last_latent - - def number_of_needed_noises(self, p): - return p.steps - - def initialize(self, p: StableDiffusionProcessing): - self.model_wrap_cfg.mask = p.mask if hasattr(p, 'mask') else None - self.model_wrap_cfg.nmask = p.nmask if hasattr(p, 'nmask') else None - self.model_wrap_cfg.step = 0 - self.model_wrap_cfg.image_cfg_scale = getattr(p, 'image_cfg_scale', None) - self.eta = p.eta if p.eta is not None else opts.eta_ancestral - self.s_min_uncond = getattr(p, 's_min_uncond', 0.0) - - k_diffusion.sampling.torch = TorchHijack(self.sampler_noises if self.sampler_noises is not None else []) - - extra_params_kwargs = {} - for param_name in self.extra_params: - if hasattr(p, param_name) and param_name in inspect.signature(self.func).parameters: - extra_params_kwargs[param_name] = getattr(p, param_name) - - if 'eta' in inspect.signature(self.func).parameters: - if self.eta != 1.0: - p.extra_generation_params["Eta"] = self.eta - - extra_params_kwargs['eta'] = self.eta - - if len(self.extra_params) > 0: - s_churn = getattr(opts, 's_churn', p.s_churn) - s_tmin = getattr(opts, 's_tmin', p.s_tmin) - s_tmax = getattr(opts, 's_tmax', p.s_tmax) or self.s_tmax # 0 = inf - s_noise = getattr(opts, 's_noise', p.s_noise) - - if s_churn != self.s_churn: - extra_params_kwargs['s_churn'] = s_churn - p.s_churn = s_churn - p.extra_generation_params['Sigma churn'] = s_churn - if s_tmin != self.s_tmin: - extra_params_kwargs['s_tmin'] = s_tmin - p.s_tmin = s_tmin - p.extra_generation_params['Sigma tmin'] = s_tmin - if s_tmax != self.s_tmax: - extra_params_kwargs['s_tmax'] = s_tmax - p.s_tmax = s_tmax - p.extra_generation_params['Sigma tmax'] = s_tmax - if s_noise != self.s_noise: - extra_params_kwargs['s_noise'] = s_noise - p.s_noise = s_noise - p.extra_generation_params['Sigma noise'] = s_noise - - return extra_params_kwargs + self.model_wrap_cfg = sd_samplers_cfg_denoiser.CFGDenoiser(self.model_wrap, self) def get_sigmas(self, p, steps): discard_next_to_last_sigma = self.config is not None and self.config.options.get('discard_next_to_last_sigma', False) @@ -232,22 +115,12 @@ class KDiffusionSampler: return sigmas - def create_noise_sampler(self, x, sigmas, p): - """For DPM++ SDE: manually create noise sampler to enable deterministic results across different batch sizes""" - if shared.opts.no_dpmpp_sde_batch_determinism: - return None - - from k_diffusion.sampling import BrownianTreeNoiseSampler - sigma_min, sigma_max = sigmas[sigmas > 0].min(), sigmas.max() - current_iter_seeds = p.all_seeds[p.iteration * p.batch_size:(p.iteration + 1) * p.batch_size] - return BrownianTreeNoiseSampler(x, sigma_min, sigma_max, seed=current_iter_seeds) - def sample_img2img(self, p, x, noise, conditioning, unconditional_conditioning, steps=None, image_conditioning=None): steps, t_enc = sd_samplers_common.setup_img2img_steps(p, steps) sigmas = self.get_sigmas(p, steps) - sigma_sched = sigmas[steps - t_enc - 1:] + xi = x + noise * sigma_sched[0] extra_params_kwargs = self.initialize(p) @@ -296,12 +169,14 @@ class KDiffusionSampler: extra_params_kwargs = self.initialize(p) parameters = inspect.signature(self.func).parameters + if 'n' in parameters: + extra_params_kwargs['n'] = steps + if 'sigma_min' in parameters: extra_params_kwargs['sigma_min'] = self.model_wrap.sigmas[0].item() extra_params_kwargs['sigma_max'] = self.model_wrap.sigmas[-1].item() - if 'n' in parameters: - extra_params_kwargs['n'] = steps - else: + + if 'sigmas' in parameters: extra_params_kwargs['sigmas'] = sigmas if self.config.options.get('brownian_noise', False): @@ -322,3 +197,4 @@ class KDiffusionSampler: return samples + diff --git a/modules/sd_samplers_timesteps.py b/modules/sd_samplers_timesteps.py new file mode 100644 index 00000000..8560d009 --- /dev/null +++ b/modules/sd_samplers_timesteps.py @@ -0,0 +1,147 @@ +import torch +import inspect +from modules import devices, sd_samplers_common, sd_samplers_timesteps_impl +from modules.sd_samplers_cfg_denoiser import CFGDenoiser + +from modules.shared import opts +import modules.shared as shared + +samplers_timesteps = [ + ('k_DDIM', sd_samplers_timesteps_impl.ddim, ['k_ddim'], {}), + ('k_PLMS', sd_samplers_timesteps_impl.plms, ['k_plms'], {}), + ('k_UniPC', sd_samplers_timesteps_impl.unipc, ['k_unipc'], {}), +] + + +samplers_data_timesteps = [ + sd_samplers_common.SamplerData(label, lambda model, funcname=funcname: CompVisSampler(funcname, model), aliases, options) + for label, funcname, aliases, options in samplers_timesteps +] + + +class CompVisTimestepsDenoiser(torch.nn.Module): + def __init__(self, model, *args, **kwargs): + super().__init__(*args, **kwargs) + self.inner_model = model + + def forward(self, input, timesteps, **kwargs): + return self.inner_model.apply_model(input, timesteps, **kwargs) + + +class CompVisTimestepsVDenoiser(torch.nn.Module): + def __init__(self, model, *args, **kwargs): + super().__init__(*args, **kwargs) + self.inner_model = model + + def predict_eps_from_z_and_v(self, x_t, t, v): + return self.inner_model.sqrt_alphas_cumprod[t.to(torch.int), None, None, None] * v + self.inner_model.sqrt_one_minus_alphas_cumprod[t.to(torch.int), None, None, None] * x_t + + def forward(self, input, timesteps, **kwargs): + model_output = self.inner_model.apply_model(input, timesteps, **kwargs) + e_t = self.predict_eps_from_z_and_v(input, timesteps, model_output) + return e_t + + +class CFGDenoiserTimesteps(CFGDenoiser): + + def __init__(self, model, sampler): + super().__init__(model, sampler) + + self.alphas = model.inner_model.alphas_cumprod + + def get_pred_x0(self, x_in, x_out, sigma): + ts = int(sigma.item()) + + s_in = x_in.new_ones([x_in.shape[0]]) + a_t = self.alphas[ts].item() * s_in + sqrt_one_minus_at = (1 - a_t).sqrt() + + pred_x0 = (x_in - sqrt_one_minus_at * x_out) / a_t.sqrt() + + return pred_x0 + + +class CompVisSampler(sd_samplers_common.Sampler): + def __init__(self, funcname, sd_model): + super().__init__(funcname) + + self.eta_option_field = 'eta_ddim' + self.eta_infotext_field = 'Eta DDIM' + + denoiser = CompVisTimestepsVDenoiser if sd_model.parameterization == "v" else CompVisTimestepsDenoiser + self.model_wrap = denoiser(sd_model) + self.model_wrap_cfg = CFGDenoiserTimesteps(self.model_wrap, self) + + def get_timesteps(self, p, steps): + discard_next_to_last_sigma = self.config is not None and self.config.options.get('discard_next_to_last_sigma', False) + if opts.always_discard_next_to_last_sigma and not discard_next_to_last_sigma: + discard_next_to_last_sigma = True + p.extra_generation_params["Discard penultimate sigma"] = True + + steps += 1 if discard_next_to_last_sigma else 0 + + timesteps = torch.clip(torch.asarray(list(range(0, 1000, 1000 // steps)), device=devices.device) + 1, 0, 999) + + return timesteps + + def sample_img2img(self, p, x, noise, conditioning, unconditional_conditioning, steps=None, image_conditioning=None): + steps, t_enc = sd_samplers_common.setup_img2img_steps(p, steps) + + timesteps = self.get_timesteps(p, steps) + timesteps_sched = timesteps[:t_enc] + + alphas_cumprod = shared.sd_model.alphas_cumprod + sqrt_alpha_cumprod = torch.sqrt(alphas_cumprod[timesteps[t_enc]]) + sqrt_one_minus_alpha_cumprod = torch.sqrt(1 - alphas_cumprod[timesteps[t_enc]]) + + xi = x * sqrt_alpha_cumprod + noise * sqrt_one_minus_alpha_cumprod + + extra_params_kwargs = self.initialize(p) + parameters = inspect.signature(self.func).parameters + + if 'timesteps' in parameters: + extra_params_kwargs['timesteps'] = timesteps_sched + if 'is_img2img' in parameters: + extra_params_kwargs['is_img2img'] = True + + self.model_wrap_cfg.init_latent = x + self.last_latent = x + extra_args = { + 'cond': conditioning, + 'image_cond': image_conditioning, + 'uncond': unconditional_conditioning, + 'cond_scale': p.cfg_scale, + 's_min_uncond': self.s_min_uncond + } + + samples = self.launch_sampling(t_enc + 1, lambda: self.func(self.model_wrap_cfg, xi, extra_args=extra_args, disable=False, callback=self.callback_state, **extra_params_kwargs)) + + if self.model_wrap_cfg.padded_cond_uncond: + p.extra_generation_params["Pad conds"] = True + + return samples + + def sample(self, p, x, conditioning, unconditional_conditioning, steps=None, image_conditioning=None): + steps = steps or p.steps + timesteps = self.get_timesteps(p, steps) + + extra_params_kwargs = self.initialize(p) + parameters = inspect.signature(self.func).parameters + + if 'timesteps' in parameters: + extra_params_kwargs['timesteps'] = timesteps + + self.last_latent = x + samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, x, extra_args={ + 'cond': conditioning, + 'image_cond': image_conditioning, + 'uncond': unconditional_conditioning, + 'cond_scale': p.cfg_scale, + 's_min_uncond': self.s_min_uncond + }, disable=False, callback=self.callback_state, **extra_params_kwargs)) + + if self.model_wrap_cfg.padded_cond_uncond: + p.extra_generation_params["Pad conds"] = True + + return samples + diff --git a/modules/sd_samplers_timesteps_impl.py b/modules/sd_samplers_timesteps_impl.py new file mode 100644 index 00000000..48d7e649 --- /dev/null +++ b/modules/sd_samplers_timesteps_impl.py @@ -0,0 +1,135 @@ +import torch +import tqdm +import k_diffusion.sampling +import numpy as np + +from modules import shared +from modules.models.diffusion.uni_pc import uni_pc + + +@torch.no_grad() +def ddim(model, x, timesteps, extra_args=None, callback=None, disable=None, eta=0.0): + alphas_cumprod = model.inner_model.inner_model.alphas_cumprod + alphas = alphas_cumprod[timesteps] + alphas_prev = alphas_cumprod[torch.nn.functional.pad(timesteps[:-1], pad=(1, 0))].to(torch.float64) + sqrt_one_minus_alphas = torch.sqrt(1 - alphas) + sigmas = eta * np.sqrt((1 - alphas_prev.cpu().numpy()) / (1 - alphas.cpu()) * (1 - alphas.cpu() / alphas_prev.cpu().numpy())) + + extra_args = {} if extra_args is None else extra_args + s_in = x.new_ones([x.shape[0]]) + for i in tqdm.trange(len(timesteps) - 1, disable=disable): + index = len(timesteps) - 1 - i + + e_t = model(x, timesteps[index].item() * s_in, **extra_args) + + a_t = alphas[index].item() * s_in + a_prev = alphas_prev[index].item() * s_in + sigma_t = sigmas[index].item() * s_in + sqrt_one_minus_at = sqrt_one_minus_alphas[index].item() * s_in + + pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt() + dir_xt = (1. - a_prev - sigma_t ** 2).sqrt() * e_t + noise = sigma_t * k_diffusion.sampling.torch.randn_like(x) + x = a_prev.sqrt() * pred_x0 + dir_xt + noise + + if callback is not None: + callback({'x': x, 'i': i, 'sigma': 0, 'sigma_hat': 0, 'denoised': pred_x0}) + + return x + + +@torch.no_grad() +def plms(model, x, timesteps, extra_args=None, callback=None, disable=None): + alphas_cumprod = model.inner_model.inner_model.alphas_cumprod + alphas = alphas_cumprod[timesteps] + alphas_prev = alphas_cumprod[torch.nn.functional.pad(timesteps[:-1], pad=(1, 0))].to(torch.float64) + sqrt_one_minus_alphas = torch.sqrt(1 - alphas) + + extra_args = {} if extra_args is None else extra_args + s_in = x.new_ones([x.shape[0]]) + old_eps = [] + + def get_x_prev_and_pred_x0(e_t, index): + # select parameters corresponding to the currently considered timestep + a_t = alphas[index].item() * s_in + a_prev = alphas_prev[index].item() * s_in + sqrt_one_minus_at = sqrt_one_minus_alphas[index].item() * s_in + + # current prediction for x_0 + pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt() + + # direction pointing to x_t + dir_xt = (1. - a_prev).sqrt() * e_t + x_prev = a_prev.sqrt() * pred_x0 + dir_xt + return x_prev, pred_x0 + + for i in tqdm.trange(len(timesteps) - 1, disable=disable): + index = len(timesteps) - 1 - i + ts = timesteps[index].item() * s_in + t_next = timesteps[max(index - 1, 0)].item() * s_in + + e_t = model(x, ts, **extra_args) + + if len(old_eps) == 0: + # Pseudo Improved Euler (2nd order) + x_prev, pred_x0 = get_x_prev_and_pred_x0(e_t, index) + e_t_next = model(x_prev, t_next, **extra_args) + e_t_prime = (e_t + e_t_next) / 2 + elif len(old_eps) == 1: + # 2nd order Pseudo Linear Multistep (Adams-Bashforth) + e_t_prime = (3 * e_t - old_eps[-1]) / 2 + elif len(old_eps) == 2: + # 3nd order Pseudo Linear Multistep (Adams-Bashforth) + e_t_prime = (23 * e_t - 16 * old_eps[-1] + 5 * old_eps[-2]) / 12 + else: + # 4nd order Pseudo Linear Multistep (Adams-Bashforth) + e_t_prime = (55 * e_t - 59 * old_eps[-1] + 37 * old_eps[-2] - 9 * old_eps[-3]) / 24 + + x_prev, pred_x0 = get_x_prev_and_pred_x0(e_t_prime, index) + + old_eps.append(e_t) + if len(old_eps) >= 4: + old_eps.pop(0) + + x = x_prev + + if callback is not None: + callback({'x': x, 'i': i, 'sigma': 0, 'sigma_hat': 0, 'denoised': pred_x0}) + + return x + + +class UniPCCFG(uni_pc.UniPC): + def __init__(self, cfg_model, extra_args, callback, *args, **kwargs): + super().__init__(None, *args, **kwargs) + + def after_update(x, model_x): + callback({'x': x, 'i': self.index, 'sigma': 0, 'sigma_hat': 0, 'denoised': model_x}) + self.index += 1 + + self.cfg_model = cfg_model + self.extra_args = extra_args + self.callback = callback + self.index = 0 + self.after_update = after_update + + def get_model_input_time(self, t_continuous): + return (t_continuous - 1. / self.noise_schedule.total_N) * 1000. + + def model(self, x, t): + t_input = self.get_model_input_time(t) + + res = self.cfg_model(x, t_input, **self.extra_args) + + return res + + +def unipc(model, x, timesteps, extra_args=None, callback=None, disable=None, is_img2img=False): + alphas_cumprod = model.inner_model.inner_model.alphas_cumprod + + ns = uni_pc.NoiseScheduleVP('discrete', alphas_cumprod=alphas_cumprod) + t_start = timesteps[-1] / 1000 + 1 / 1000 if is_img2img else None # this is likely off by a bit - if someone wants to fix it please by all means + unipc_sampler = UniPCCFG(model, extra_args, callback, ns, predict_x0=True, thresholding=False, variant=shared.opts.uni_pc_variant) + x = unipc_sampler.sample(x, steps=len(timesteps), t_start=t_start, skip_type=shared.opts.uni_pc_skip_type, method="multistep", order=shared.opts.uni_pc_order, lower_order_final=shared.opts.uni_pc_lower_order_final) + + return x -- cgit v1.2.1 From a8a256f9b5b445206818bfc8a363ed5a1ba50c86 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Tue, 8 Aug 2023 21:07:18 +0300 Subject: REMOVE --- modules/processing.py | 3 - modules/sd_hijack.py | 4 +- modules/sd_hijack_inpainting.py | 95 --------------- modules/sd_samplers.py | 7 +- modules/sd_samplers_cfg_denoiser.py | 1 - modules/sd_samplers_compvis.py | 224 ------------------------------------ modules/sd_samplers_kdiffusion.py | 3 +- modules/sd_samplers_timesteps.py | 6 +- 8 files changed, 7 insertions(+), 336 deletions(-) delete mode 100644 modules/sd_hijack_inpainting.py delete mode 100644 modules/sd_samplers_compvis.py diff --git a/modules/processing.py b/modules/processing.py index 31745006..61ba5f11 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -1112,9 +1112,6 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): img2img_sampler_name = self.hr_sampler_name or self.sampler_name - if self.sampler_name in ['PLMS', 'UniPC']: # PLMS/UniPC do not support img2img so we just silently switch to DDIM - img2img_sampler_name = 'DDIM' - self.sampler = sd_samplers.create_sampler(img2img_sampler_name, self.sd_model) if self.latent_scale_mode is not None: diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py index 9ad98199..46652fbd 100644 --- a/modules/sd_hijack.py +++ b/modules/sd_hijack.py @@ -5,7 +5,7 @@ from types import MethodType from modules import devices, sd_hijack_optimizations, shared, script_callbacks, errors, sd_unet from modules.hypernetworks import hypernetwork from modules.shared import cmd_opts -from modules import sd_hijack_clip, sd_hijack_open_clip, sd_hijack_unet, sd_hijack_xlmr, xlmr, sd_hijack_inpainting +from modules import sd_hijack_clip, sd_hijack_open_clip, sd_hijack_unet, sd_hijack_xlmr, xlmr import ldm.modules.attention import ldm.modules.diffusionmodules.model @@ -34,8 +34,6 @@ ldm.modules.diffusionmodules.model.print = shared.ldm_print ldm.util.print = shared.ldm_print ldm.models.diffusion.ddpm.print = shared.ldm_print -sd_hijack_inpainting.do_inpainting_hijack() - optimizers = [] current_optimizer: sd_hijack_optimizations.SdOptimization = None diff --git a/modules/sd_hijack_inpainting.py b/modules/sd_hijack_inpainting.py deleted file mode 100644 index 2d44b856..00000000 --- a/modules/sd_hijack_inpainting.py +++ /dev/null @@ -1,95 +0,0 @@ -import torch - -import ldm.models.diffusion.ddpm -import ldm.models.diffusion.ddim -import ldm.models.diffusion.plms - -from ldm.models.diffusion.ddim import noise_like -from ldm.models.diffusion.sampling_util import norm_thresholding - - -@torch.no_grad() -def p_sample_plms(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False, - temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, - unconditional_guidance_scale=1., unconditional_conditioning=None, old_eps=None, t_next=None, dynamic_threshold=None): - b, *_, device = *x.shape, x.device - - def get_model_output(x, t): - if unconditional_conditioning is None or unconditional_guidance_scale == 1.: - e_t = self.model.apply_model(x, t, c) - else: - x_in = torch.cat([x] * 2) - t_in = torch.cat([t] * 2) - - if isinstance(c, dict): - assert isinstance(unconditional_conditioning, dict) - c_in = {} - for k in c: - if isinstance(c[k], list): - c_in[k] = [ - torch.cat([unconditional_conditioning[k][i], c[k][i]]) - for i in range(len(c[k])) - ] - else: - c_in[k] = torch.cat([unconditional_conditioning[k], c[k]]) - else: - c_in = torch.cat([unconditional_conditioning, c]) - - e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in).chunk(2) - e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond) - - if score_corrector is not None: - assert self.model.parameterization == "eps" - e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs) - - return e_t - - alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas - alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev - sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas - sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas - - def get_x_prev_and_pred_x0(e_t, index): - # select parameters corresponding to the currently considered timestep - a_t = torch.full((b, 1, 1, 1), alphas[index], device=device) - a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device) - sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device) - sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device) - - # current prediction for x_0 - pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt() - if quantize_denoised: - pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0) - if dynamic_threshold is not None: - pred_x0 = norm_thresholding(pred_x0, dynamic_threshold) - # direction pointing to x_t - dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t - noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature - if noise_dropout > 0.: - noise = torch.nn.functional.dropout(noise, p=noise_dropout) - x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise - return x_prev, pred_x0 - - e_t = get_model_output(x, t) - if len(old_eps) == 0: - # Pseudo Improved Euler (2nd order) - x_prev, pred_x0 = get_x_prev_and_pred_x0(e_t, index) - e_t_next = get_model_output(x_prev, t_next) - e_t_prime = (e_t + e_t_next) / 2 - elif len(old_eps) == 1: - # 2nd order Pseudo Linear Multistep (Adams-Bashforth) - e_t_prime = (3 * e_t - old_eps[-1]) / 2 - elif len(old_eps) == 2: - # 3nd order Pseudo Linear Multistep (Adams-Bashforth) - e_t_prime = (23 * e_t - 16 * old_eps[-1] + 5 * old_eps[-2]) / 12 - elif len(old_eps) >= 3: - # 4nd order Pseudo Linear Multistep (Adams-Bashforth) - e_t_prime = (55 * e_t - 59 * old_eps[-1] + 37 * old_eps[-2] - 9 * old_eps[-3]) / 24 - - x_prev, pred_x0 = get_x_prev_and_pred_x0(e_t_prime, index) - - return x_prev, pred_x0, e_t - - -def do_inpainting_hijack(): - ldm.models.diffusion.plms.PLMSSampler.p_sample_plms = p_sample_plms diff --git a/modules/sd_samplers.py b/modules/sd_samplers.py index fe206894..05dbe2b5 100644 --- a/modules/sd_samplers.py +++ b/modules/sd_samplers.py @@ -1,11 +1,10 @@ -from modules import sd_samplers_compvis, sd_samplers_kdiffusion, sd_samplers_timesteps, shared +from modules import sd_samplers_kdiffusion, sd_samplers_timesteps, shared # imports for functions that previously were here and are used by other modules from modules.sd_samplers_common import samples_to_image_grid, sample_to_image # noqa: F401 all_samplers = [ *sd_samplers_kdiffusion.samplers_data_k_diffusion, - *sd_samplers_compvis.samplers_data_compvis, *sd_samplers_timesteps.samplers_data_timesteps, ] all_samplers_map = {x.name: x for x in all_samplers} @@ -42,10 +41,8 @@ def set_samplers(): global samplers, samplers_for_img2img hidden = set(shared.opts.hide_samplers) - hidden_img2img = set(shared.opts.hide_samplers + ['PLMS', 'UniPC']) - samplers = [x for x in all_samplers if x.name not in hidden] - samplers_for_img2img = [x for x in all_samplers if x.name not in hidden_img2img] + samplers_for_img2img = [x for x in all_samplers if x.name not in hidden] samplers_map.clear() for sampler in all_samplers: diff --git a/modules/sd_samplers_cfg_denoiser.py b/modules/sd_samplers_cfg_denoiser.py index 166a00c7..d826222c 100644 --- a/modules/sd_samplers_cfg_denoiser.py +++ b/modules/sd_samplers_cfg_denoiser.py @@ -1,4 +1,3 @@ -from collections import deque import torch from modules import prompt_parser, devices, sd_samplers_common diff --git a/modules/sd_samplers_compvis.py b/modules/sd_samplers_compvis.py deleted file mode 100644 index 4a8396f9..00000000 --- a/modules/sd_samplers_compvis.py +++ /dev/null @@ -1,224 +0,0 @@ -import math -import ldm.models.diffusion.ddim -import ldm.models.diffusion.plms - -import numpy as np -import torch - -from modules.shared import state -from modules import sd_samplers_common, prompt_parser, shared -import modules.models.diffusion.uni_pc - - -samplers_data_compvis = [ - sd_samplers_common.SamplerData('DDIM', lambda model: VanillaStableDiffusionSampler(ldm.models.diffusion.ddim.DDIMSampler, model), [], {"default_eta_is_0": True, "uses_ensd": True, "no_sdxl": True}), - sd_samplers_common.SamplerData('PLMS', lambda model: VanillaStableDiffusionSampler(ldm.models.diffusion.plms.PLMSSampler, model), [], {"no_sdxl": True}), - sd_samplers_common.SamplerData('UniPC', lambda model: VanillaStableDiffusionSampler(modules.models.diffusion.uni_pc.UniPCSampler, model), [], {"no_sdxl": True}), -] - - -class VanillaStableDiffusionSampler: - def __init__(self, constructor, sd_model): - self.sampler = constructor(sd_model) - self.is_ddim = hasattr(self.sampler, 'p_sample_ddim') - self.is_plms = hasattr(self.sampler, 'p_sample_plms') - self.is_unipc = isinstance(self.sampler, modules.models.diffusion.uni_pc.UniPCSampler) - self.orig_p_sample_ddim = None - if self.is_plms: - self.orig_p_sample_ddim = self.sampler.p_sample_plms - elif self.is_ddim: - self.orig_p_sample_ddim = self.sampler.p_sample_ddim - self.mask = None - self.nmask = None - self.init_latent = None - self.sampler_noises = None - self.step = 0 - self.stop_at = None - self.eta = None - self.config = None - self.last_latent = None - - self.conditioning_key = sd_model.model.conditioning_key - - def number_of_needed_noises(self, p): - return 0 - - def launch_sampling(self, steps, func): - state.sampling_steps = steps - state.sampling_step = 0 - - try: - return func() - except sd_samplers_common.InterruptedException: - return self.last_latent - - def p_sample_ddim_hook(self, x_dec, cond, ts, unconditional_conditioning, *args, **kwargs): - x_dec, ts, cond, unconditional_conditioning = self.before_sample(x_dec, ts, cond, unconditional_conditioning) - - res = self.orig_p_sample_ddim(x_dec, cond, ts, *args, unconditional_conditioning=unconditional_conditioning, **kwargs) - - x_dec, ts, cond, unconditional_conditioning, res = self.after_sample(x_dec, ts, cond, unconditional_conditioning, res) - - return res - - def before_sample(self, x, ts, cond, unconditional_conditioning): - if state.interrupted or state.skipped: - raise sd_samplers_common.InterruptedException - - if self.stop_at is not None and self.step > self.stop_at: - raise sd_samplers_common.InterruptedException - - # Have to unwrap the inpainting conditioning here to perform pre-processing - image_conditioning = None - uc_image_conditioning = None - if isinstance(cond, dict): - if self.conditioning_key == "crossattn-adm": - image_conditioning = cond["c_adm"] - uc_image_conditioning = unconditional_conditioning["c_adm"] - else: - image_conditioning = cond["c_concat"][0] - cond = cond["c_crossattn"][0] - unconditional_conditioning = unconditional_conditioning["c_crossattn"][0] - - conds_list, tensor = prompt_parser.reconstruct_multicond_batch(cond, self.step) - unconditional_conditioning = prompt_parser.reconstruct_cond_batch(unconditional_conditioning, self.step) - - assert all(len(conds) == 1 for conds in conds_list), 'composition via AND is not supported for DDIM/PLMS samplers' - cond = tensor - - # for DDIM, shapes must match, we can't just process cond and uncond independently; - # filling unconditional_conditioning with repeats of the last vector to match length is - # not 100% correct but should work well enough - if unconditional_conditioning.shape[1] < cond.shape[1]: - last_vector = unconditional_conditioning[:, -1:] - last_vector_repeated = last_vector.repeat([1, cond.shape[1] - unconditional_conditioning.shape[1], 1]) - unconditional_conditioning = torch.hstack([unconditional_conditioning, last_vector_repeated]) - elif unconditional_conditioning.shape[1] > cond.shape[1]: - unconditional_conditioning = unconditional_conditioning[:, :cond.shape[1]] - - if self.mask is not None: - img_orig = self.sampler.model.q_sample(self.init_latent, ts) - x = img_orig * self.mask + self.nmask * x - - # Wrap the image conditioning back up since the DDIM code can accept the dict directly. - # Note that they need to be lists because it just concatenates them later. - if image_conditioning is not None: - if self.conditioning_key == "crossattn-adm": - cond = {"c_adm": image_conditioning, "c_crossattn": [cond]} - unconditional_conditioning = {"c_adm": uc_image_conditioning, "c_crossattn": [unconditional_conditioning]} - else: - cond = {"c_concat": [image_conditioning], "c_crossattn": [cond]} - unconditional_conditioning = {"c_concat": [image_conditioning], "c_crossattn": [unconditional_conditioning]} - - return x, ts, cond, unconditional_conditioning - - def update_step(self, last_latent): - if self.mask is not None: - self.last_latent = self.init_latent * self.mask + self.nmask * last_latent - else: - self.last_latent = last_latent - - sd_samplers_common.store_latent(self.last_latent) - - self.step += 1 - state.sampling_step = self.step - shared.total_tqdm.update() - - def after_sample(self, x, ts, cond, uncond, res): - if not self.is_unipc: - self.update_step(res[1]) - - return x, ts, cond, uncond, res - - def unipc_after_update(self, x, model_x): - self.update_step(x) - - def initialize(self, p): - if self.is_ddim: - self.eta = p.eta if p.eta is not None else shared.opts.eta_ddim - else: - self.eta = 0.0 - - if self.eta != 0.0: - p.extra_generation_params["Eta DDIM"] = self.eta - - if self.is_unipc: - keys = [ - ('UniPC variant', 'uni_pc_variant'), - ('UniPC skip type', 'uni_pc_skip_type'), - ('UniPC order', 'uni_pc_order'), - ('UniPC lower order final', 'uni_pc_lower_order_final'), - ] - - for name, key in keys: - v = getattr(shared.opts, key) - if v != shared.opts.get_default(key): - p.extra_generation_params[name] = v - - for fieldname in ['p_sample_ddim', 'p_sample_plms']: - if hasattr(self.sampler, fieldname): - setattr(self.sampler, fieldname, self.p_sample_ddim_hook) - if self.is_unipc: - self.sampler.set_hooks(lambda x, t, c, u: self.before_sample(x, t, c, u), lambda x, t, c, u, r: self.after_sample(x, t, c, u, r), lambda x, mx: self.unipc_after_update(x, mx)) - - self.mask = p.mask if hasattr(p, 'mask') else None - self.nmask = p.nmask if hasattr(p, 'nmask') else None - - - def adjust_steps_if_invalid(self, p, num_steps): - if ((self.config.name == 'DDIM') and p.ddim_discretize == 'uniform') or (self.config.name == 'PLMS') or (self.config.name == 'UniPC'): - if self.config.name == 'UniPC' and num_steps < shared.opts.uni_pc_order: - num_steps = shared.opts.uni_pc_order - valid_step = 999 / (1000 // num_steps) - if valid_step == math.floor(valid_step): - return int(valid_step) + 1 - - return num_steps - - def sample_img2img(self, p, x, noise, conditioning, unconditional_conditioning, steps=None, image_conditioning=None): - steps, t_enc = sd_samplers_common.setup_img2img_steps(p, steps) - steps = self.adjust_steps_if_invalid(p, steps) - self.initialize(p) - - self.sampler.make_schedule(ddim_num_steps=steps, ddim_eta=self.eta, ddim_discretize=p.ddim_discretize, verbose=False) - x1 = self.sampler.stochastic_encode(x, torch.tensor([t_enc] * int(x.shape[0])).to(shared.device), noise=noise) - - self.init_latent = x - self.last_latent = x - self.step = 0 - - # Wrap the conditioning models with additional image conditioning for inpainting model - if image_conditioning is not None: - if self.conditioning_key == "crossattn-adm": - conditioning = {"c_adm": image_conditioning, "c_crossattn": [conditioning]} - unconditional_conditioning = {"c_adm": torch.zeros_like(image_conditioning), "c_crossattn": [unconditional_conditioning]} - else: - conditioning = {"c_concat": [image_conditioning], "c_crossattn": [conditioning]} - unconditional_conditioning = {"c_concat": [image_conditioning], "c_crossattn": [unconditional_conditioning]} - - samples = self.launch_sampling(t_enc + 1, lambda: self.sampler.decode(x1, conditioning, t_enc, unconditional_guidance_scale=p.cfg_scale, unconditional_conditioning=unconditional_conditioning)) - - return samples - - def sample(self, p, x, conditioning, unconditional_conditioning, steps=None, image_conditioning=None): - self.initialize(p) - - self.init_latent = None - self.last_latent = x - self.step = 0 - - steps = self.adjust_steps_if_invalid(p, steps or p.steps) - - # Wrap the conditioning models with additional image conditioning for inpainting model - # dummy_for_plms is needed because PLMS code checks the first item in the dict to have the right shape - if image_conditioning is not None: - if self.conditioning_key == "crossattn-adm": - conditioning = {"dummy_for_plms": np.zeros((conditioning.shape[0],)), "c_crossattn": [conditioning], "c_adm": image_conditioning} - unconditional_conditioning = {"c_crossattn": [unconditional_conditioning], "c_adm": torch.zeros_like(image_conditioning)} - else: - conditioning = {"dummy_for_plms": np.zeros((conditioning.shape[0],)), "c_crossattn": [conditioning], "c_concat": [image_conditioning]} - unconditional_conditioning = {"c_crossattn": [unconditional_conditioning], "c_concat": [image_conditioning]} - - samples_ddim = self.launch_sampling(steps, lambda: self.sampler.sample(S=steps, conditioning=conditioning, batch_size=int(x.shape[0]), shape=x[0].shape, verbose=False, unconditional_guidance_scale=p.cfg_scale, unconditional_conditioning=unconditional_conditioning, x_T=x, eta=self.eta)[0]) - - return samples_ddim diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py index 3a2e01b7..27a73486 100644 --- a/modules/sd_samplers_kdiffusion.py +++ b/modules/sd_samplers_kdiffusion.py @@ -1,8 +1,7 @@ -from collections import deque import torch import inspect import k_diffusion.sampling -from modules import devices, sd_samplers_common, sd_samplers_extra, sd_samplers_cfg_denoiser +from modules import sd_samplers_common, sd_samplers_extra, sd_samplers_cfg_denoiser from modules.shared import opts import modules.shared as shared diff --git a/modules/sd_samplers_timesteps.py b/modules/sd_samplers_timesteps.py index 8560d009..d89d0efb 100644 --- a/modules/sd_samplers_timesteps.py +++ b/modules/sd_samplers_timesteps.py @@ -7,9 +7,9 @@ from modules.shared import opts import modules.shared as shared samplers_timesteps = [ - ('k_DDIM', sd_samplers_timesteps_impl.ddim, ['k_ddim'], {}), - ('k_PLMS', sd_samplers_timesteps_impl.plms, ['k_plms'], {}), - ('k_UniPC', sd_samplers_timesteps_impl.unipc, ['k_unipc'], {}), + ('DDIM', sd_samplers_timesteps_impl.ddim, ['ddim'], {}), + ('PLMS', sd_samplers_timesteps_impl.plms, ['plms'], {}), + ('UniPC', sd_samplers_timesteps_impl.unipc, ['unipc'], {}), ] -- cgit v1.2.1 From ae1bde1aa1a987cd233fccb2caaec3abf8012178 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Tue, 8 Aug 2023 21:10:12 +0300 Subject: put commonly used samplers on top, make DPM++ 2M Karras the default choice --- modules/sd_samplers_kdiffusion.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py index 27a73486..f47431af 100644 --- a/modules/sd_samplers_kdiffusion.py +++ b/modules/sd_samplers_kdiffusion.py @@ -7,6 +7,10 @@ from modules.shared import opts import modules.shared as shared samplers_k_diffusion = [ + ('DPM++ 2M Karras', 'sample_dpmpp_2m', ['k_dpmpp_2m_ka'], {'scheduler': 'karras'}), + ('DPM++ SDE Karras', 'sample_dpmpp_sde', ['k_dpmpp_sde_ka'], {'scheduler': 'karras', "second_order": True, "brownian_noise": True}), + ('DPM++ 2M SDE Exponential', 'sample_dpmpp_2m_sde', ['k_dpmpp_2m_sde_exp'], {'scheduler': 'exponential', "brownian_noise": True}), + ('DPM++ 2M SDE Karras', 'sample_dpmpp_2m_sde', ['k_dpmpp_2m_sde_ka'], {'scheduler': 'karras', "brownian_noise": True}), ('Euler a', 'sample_euler_ancestral', ['k_euler_a', 'k_euler_ancestral'], {"uses_ensd": True}), ('Euler', 'sample_euler', ['k_euler'], {}), ('LMS', 'sample_lms', ['k_lms'], {}), @@ -23,10 +27,6 @@ samplers_k_diffusion = [ ('DPM2 Karras', 'sample_dpm_2', ['k_dpm_2_ka'], {'scheduler': 'karras', 'discard_next_to_last_sigma': True, "uses_ensd": True, "second_order": True}), ('DPM2 a Karras', 'sample_dpm_2_ancestral', ['k_dpm_2_a_ka'], {'scheduler': 'karras', 'discard_next_to_last_sigma': True, "uses_ensd": True, "second_order": True}), ('DPM++ 2S a Karras', 'sample_dpmpp_2s_ancestral', ['k_dpmpp_2s_a_ka'], {'scheduler': 'karras', "uses_ensd": True, "second_order": True}), - ('DPM++ 2M Karras', 'sample_dpmpp_2m', ['k_dpmpp_2m_ka'], {'scheduler': 'karras'}), - ('DPM++ SDE Karras', 'sample_dpmpp_sde', ['k_dpmpp_sde_ka'], {'scheduler': 'karras', "second_order": True, "brownian_noise": True}), - ('DPM++ 2M SDE Karras', 'sample_dpmpp_2m_sde', ['k_dpmpp_2m_sde_ka'], {'scheduler': 'karras', "brownian_noise": True}), - ('DPM++ 2M SDE Exponential', 'sample_dpmpp_2m_sde', ['k_dpmpp_2m_sde_exp'], {'scheduler': 'exponential', "brownian_noise": True}), ('Restart', sd_samplers_extra.restart_sampler, ['restart'], {'scheduler': 'karras'}), ] -- cgit v1.2.1 From bc7906e6d62bb3ed158304c581e941862355f6de Mon Sep 17 00:00:00 2001 From: Danil Boldyrev Date: Tue, 8 Aug 2023 21:28:16 +0300 Subject: Ability to automatically expand a picture that does not fit in the screen --- .../canvas-zoom-and-pan/javascript/zoom.js | 29 +++++++++++++++++++++- .../canvas-zoom-and-pan/scripts/hotkey_config.py | 1 + 2 files changed, 29 insertions(+), 1 deletion(-) diff --git a/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js b/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js index 30199dcd..7369e897 100644 --- a/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js +++ b/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js @@ -201,7 +201,8 @@ onUiLoaded(async() => { canvas_hotkey_overlap: "KeyO", canvas_disabled_functions: [], canvas_show_tooltip: true, - canvas_blur_prompt: false + canvas_blur_prompt: false, + canvas_auto_expand: false }; const functionMap = { @@ -648,8 +649,34 @@ onUiLoaded(async() => { mouseY = e.offsetY; } + // Simulation of the function to put a long image into the screen. + // We define the size of the canvas, make a fullscreen to reveal the image, then reduce it to fit into the element. + // We hide the image and show it to the user when it is ready. + function autoExpand(e) { + const canvas = document.querySelector(`${elemId} canvas[key="interface"]`); + const isMainTab = activeElement === elementIDs.inpaint || activeElement === elementIDs.inpaintSketch || activeElement === elementIDs.sketch; + if (canvas && isMainTab) { + if (canvas && parseInt(targetElement.style.width) > 862 || parseInt(canvas.width) < 862) { + return; + } + if (canvas) { + targetElement.style.visibility = "hidden"; + setTimeout(() => { + fitToScreen(); + resetZoom(); + targetElement.style.visibility = "visible"; + }, 10); + } + } + } + targetElement.addEventListener("mousemove", getMousePosition); + // Apply auto expand if enabled + if (hotkeysConfig.canvas_auto_expand) { + targetElement.addEventListener("mousemove", autoExpand); + } + // Handle events only inside the targetElement let isKeyDownHandlerAttached = false; diff --git a/extensions-builtin/canvas-zoom-and-pan/scripts/hotkey_config.py b/extensions-builtin/canvas-zoom-and-pan/scripts/hotkey_config.py index 380176ce..9a51ff2a 100644 --- a/extensions-builtin/canvas-zoom-and-pan/scripts/hotkey_config.py +++ b/extensions-builtin/canvas-zoom-and-pan/scripts/hotkey_config.py @@ -9,6 +9,7 @@ shared.options_templates.update(shared.options_section(('canvas_hotkey', "Canvas "canvas_hotkey_reset": shared.OptionInfo("R", "Reset zoom and canvas positon"), "canvas_hotkey_overlap": shared.OptionInfo("O", "Toggle overlap").info("Technical button, neededs for testing"), "canvas_show_tooltip": shared.OptionInfo(True, "Enable tooltip on the canvas"), + "canvas_auto_expand": shared.OptionInfo(False, "Automatically expands an image that does not fit completely in the canvas area, similar to manually pressing the S and R buttons"), "canvas_blur_prompt": shared.OptionInfo(False, "Take the focus off the prompt when working with a canvas"), "canvas_disabled_functions": shared.OptionInfo(["Overlap"], "Disable function that you don't use", gr.CheckboxGroup, {"choices": ["Zoom","Adjust brush size", "Moving canvas","Fullscreen","Reset Zoom","Overlap"]}), })) -- cgit v1.2.1 From 70c63c1208d33bf02e15c4e310bac83f12ee8625 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Tue, 8 Aug 2023 21:28:34 +0300 Subject: pass samplers from UI by name, make it possible to use a sampler from infotext even if it's hidden in the dropdown --- modules/img2img.py | 6 +++--- modules/sd_samplers.py | 13 +++++++++---- modules/txt2img.py | 8 ++++---- modules/ui.py | 29 ++++++++++++++--------------- 4 files changed, 30 insertions(+), 26 deletions(-) diff --git a/modules/img2img.py b/modules/img2img.py index d8e1c534..e06ac1d6 100644 --- a/modules/img2img.py +++ b/modules/img2img.py @@ -6,7 +6,7 @@ import numpy as np from PIL import Image, ImageOps, ImageFilter, ImageEnhance, UnidentifiedImageError import gradio as gr -from modules import sd_samplers, images as imgutil +from modules import images as imgutil from modules.generation_parameters_copypaste import create_override_settings_dict, parse_generation_parameters from modules.processing import Processed, StableDiffusionProcessingImg2Img, process_images from modules.shared import opts, state @@ -116,7 +116,7 @@ def process_batch(p, input_dir, output_dir, inpaint_mask_dir, args, to_scale=Fal process_images(p) -def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_styles, init_img, sketch, init_img_with_mask, inpaint_color_sketch, inpaint_color_sketch_orig, init_img_inpaint, init_mask_inpaint, steps: int, sampler_index: int, mask_blur: int, mask_alpha: float, inpainting_fill: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, image_cfg_scale: float, denoising_strength: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, selected_scale_tab: int, height: int, width: int, scale_by: float, resize_mode: int, inpaint_full_res: bool, inpaint_full_res_padding: int, inpainting_mask_invert: int, img2img_batch_input_dir: str, img2img_batch_output_dir: str, img2img_batch_inpaint_mask_dir: str, override_settings_texts, img2img_batch_use_png_info: bool, img2img_batch_png_info_props: list, img2img_batch_png_info_dir: str, request: gr.Request, *args): +def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_styles, init_img, sketch, init_img_with_mask, inpaint_color_sketch, inpaint_color_sketch_orig, init_img_inpaint, init_mask_inpaint, steps: int, sampler_name: str, mask_blur: int, mask_alpha: float, inpainting_fill: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, image_cfg_scale: float, denoising_strength: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, selected_scale_tab: int, height: int, width: int, scale_by: float, resize_mode: int, inpaint_full_res: bool, inpaint_full_res_padding: int, inpainting_mask_invert: int, img2img_batch_input_dir: str, img2img_batch_output_dir: str, img2img_batch_inpaint_mask_dir: str, override_settings_texts, img2img_batch_use_png_info: bool, img2img_batch_png_info_props: list, img2img_batch_png_info_dir: str, request: gr.Request, *args): override_settings = create_override_settings_dict(override_settings_texts) is_batch = mode == 5 @@ -172,7 +172,7 @@ def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_s seed_resize_from_h=seed_resize_from_h, seed_resize_from_w=seed_resize_from_w, seed_enable_extras=seed_enable_extras, - sampler_name=sd_samplers.samplers_for_img2img[sampler_index].name, + sampler_name=sampler_name, batch_size=batch_size, n_iter=n_iter, steps=steps, diff --git a/modules/sd_samplers.py b/modules/sd_samplers.py index 05dbe2b5..45faae62 100644 --- a/modules/sd_samplers.py +++ b/modules/sd_samplers.py @@ -12,6 +12,7 @@ all_samplers_map = {x.name: x for x in all_samplers} samplers = [] samplers_for_img2img = [] samplers_map = {} +samplers_hidden = {} def find_sampler_config(name): @@ -38,11 +39,11 @@ def create_sampler(name, model): def set_samplers(): - global samplers, samplers_for_img2img + global samplers, samplers_for_img2img, samplers_hidden - hidden = set(shared.opts.hide_samplers) - samplers = [x for x in all_samplers if x.name not in hidden] - samplers_for_img2img = [x for x in all_samplers if x.name not in hidden] + samplers_hidden = set(shared.opts.hide_samplers) + samplers = all_samplers + samplers_for_img2img = all_samplers samplers_map.clear() for sampler in all_samplers: @@ -51,4 +52,8 @@ def set_samplers(): samplers_map[alias.lower()] = sampler.name +def visible_sampler_names(): + return [x.name for x in samplers if x.name not in samplers_hidden] + + set_samplers() diff --git a/modules/txt2img.py b/modules/txt2img.py index 935ed418..8fa389b5 100644 --- a/modules/txt2img.py +++ b/modules/txt2img.py @@ -1,7 +1,7 @@ from contextlib import closing import modules.scripts -from modules import sd_samplers, processing +from modules import processing from modules.generation_parameters_copypaste import create_override_settings_dict from modules.shared import opts, cmd_opts import modules.shared as shared @@ -9,7 +9,7 @@ from modules.ui import plaintext_to_html import gradio as gr -def txt2img(id_task: str, prompt: str, negative_prompt: str, prompt_styles, steps: int, sampler_index: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, height: int, width: int, enable_hr: bool, denoising_strength: float, hr_scale: float, hr_upscaler: str, hr_second_pass_steps: int, hr_resize_x: int, hr_resize_y: int, hr_checkpoint_name: str, hr_sampler_index: int, hr_prompt: str, hr_negative_prompt, override_settings_texts, request: gr.Request, *args): +def txt2img(id_task: str, prompt: str, negative_prompt: str, prompt_styles, steps: int, sampler_name: str, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, height: int, width: int, enable_hr: bool, denoising_strength: float, hr_scale: float, hr_upscaler: str, hr_second_pass_steps: int, hr_resize_x: int, hr_resize_y: int, hr_checkpoint_name: str, hr_sampler_name: str, hr_prompt: str, hr_negative_prompt, override_settings_texts, request: gr.Request, *args): override_settings = create_override_settings_dict(override_settings_texts) p = processing.StableDiffusionProcessingTxt2Img( @@ -25,7 +25,7 @@ def txt2img(id_task: str, prompt: str, negative_prompt: str, prompt_styles, step seed_resize_from_h=seed_resize_from_h, seed_resize_from_w=seed_resize_from_w, seed_enable_extras=seed_enable_extras, - sampler_name=sd_samplers.samplers[sampler_index].name, + sampler_name=sampler_name, batch_size=batch_size, n_iter=n_iter, steps=steps, @@ -42,7 +42,7 @@ def txt2img(id_task: str, prompt: str, negative_prompt: str, prompt_styles, step hr_resize_x=hr_resize_x, hr_resize_y=hr_resize_y, hr_checkpoint_name=None if hr_checkpoint_name == 'Use same checkpoint' else hr_checkpoint_name, - hr_sampler_name=sd_samplers.samplers_for_img2img[hr_sampler_index - 1].name if hr_sampler_index != 0 else None, + hr_sampler_name=hr_sampler_name, hr_prompt=hr_prompt, hr_negative_prompt=hr_negative_prompt, override_settings=override_settings, diff --git a/modules/ui.py b/modules/ui.py index 5150dae4..e3753e97 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -13,7 +13,7 @@ from PIL import Image, PngImagePlugin # noqa: F401 from modules.call_queue import wrap_gradio_gpu_call, wrap_queued_call, wrap_gradio_call from modules import gradio_extensons # noqa: F401 -from modules import sd_hijack, sd_models, script_callbacks, ui_extensions, deepbooru, extra_networks, ui_common, ui_postprocessing, progress, ui_loadsave, errors, shared_items, ui_settings, timer, sysinfo, ui_checkpoint_merger, ui_prompt_styles, scripts +from modules import sd_hijack, sd_models, script_callbacks, ui_extensions, deepbooru, extra_networks, ui_common, ui_postprocessing, progress, ui_loadsave, errors, shared_items, ui_settings, timer, sysinfo, ui_checkpoint_merger, ui_prompt_styles, scripts, sd_samplers from modules.ui_components import FormRow, FormGroup, ToolButton, FormHTML from modules.paths import script_path from modules.ui_common import create_refresh_button @@ -29,7 +29,6 @@ import modules.shared as shared import modules.images from modules import prompt_parser from modules.sd_hijack import model_hijack -from modules.sd_samplers import samplers, samplers_for_img2img from modules.generation_parameters_copypaste import image_from_url_text create_setting_component = ui_settings.create_setting_component @@ -360,14 +359,14 @@ def create_output_panel(tabname, outdir): def create_sampler_and_steps_selection(choices, tabname): if opts.samplers_in_dropdown: with FormRow(elem_id=f"sampler_selection_{tabname}"): - sampler_index = gr.Dropdown(label='Sampling method', elem_id=f"{tabname}_sampling", choices=[x.name for x in choices], value=choices[0].name, type="index") + sampler_name = gr.Dropdown(label='Sampling method', elem_id=f"{tabname}_sampling", choices=choices, value=choices[0]) steps = gr.Slider(minimum=1, maximum=150, step=1, elem_id=f"{tabname}_steps", label="Sampling steps", value=20) else: with FormGroup(elem_id=f"sampler_selection_{tabname}"): steps = gr.Slider(minimum=1, maximum=150, step=1, elem_id=f"{tabname}_steps", label="Sampling steps", value=20) - sampler_index = gr.Radio(label='Sampling method', elem_id=f"{tabname}_sampling", choices=[x.name for x in choices], value=choices[0].name, type="index") + sampler_name = gr.Radio(label='Sampling method', elem_id=f"{tabname}_sampling", choices=choices, value=choices[0]) - return steps, sampler_index + return steps, sampler_name def ordered_ui_categories(): @@ -414,7 +413,7 @@ def create_ui(): for category in ordered_ui_categories(): if category == "sampler": - steps, sampler_index = create_sampler_and_steps_selection(samplers, "txt2img") + steps, sampler_name = create_sampler_and_steps_selection(sd_samplers.visible_sampler_names(), "txt2img") elif category == "dimensions": with FormRow(): @@ -460,7 +459,7 @@ def create_ui(): hr_checkpoint_name = gr.Dropdown(label='Hires checkpoint', elem_id="hr_checkpoint", choices=["Use same checkpoint"] + modules.sd_models.checkpoint_tiles(use_short=True), value="Use same checkpoint") create_refresh_button(hr_checkpoint_name, modules.sd_models.list_models, lambda: {"choices": ["Use same checkpoint"] + modules.sd_models.checkpoint_tiles(use_short=True)}, "hr_checkpoint_refresh") - hr_sampler_index = gr.Dropdown(label='Hires sampling method', elem_id="hr_sampler", choices=["Use same sampler"] + [x.name for x in samplers_for_img2img], value="Use same sampler", type="index") + hr_sampler_name = gr.Dropdown(label='Hires sampling method', elem_id="hr_sampler", choices=["Use same sampler"] + sd_samplers.visible_sampler_names(), value="Use same sampler") with FormRow(elem_id="txt2img_hires_fix_row4", variant="compact", visible=opts.hires_fix_show_prompts) as hr_prompts_container: with gr.Column(scale=80): @@ -520,7 +519,7 @@ def create_ui(): toprow.negative_prompt, toprow.ui_styles.dropdown, steps, - sampler_index, + sampler_name, restore_faces, tiling, batch_count, @@ -538,7 +537,7 @@ def create_ui(): hr_resize_x, hr_resize_y, hr_checkpoint_name, - hr_sampler_index, + hr_sampler_name, hr_prompt, hr_negative_prompt, override_settings, @@ -583,7 +582,7 @@ def create_ui(): (toprow.prompt, "Prompt"), (toprow.negative_prompt, "Negative prompt"), (steps, "Steps"), - (sampler_index, "Sampler"), + (sampler_name, "Sampler"), (restore_faces, "Face restoration"), (cfg_scale, "CFG scale"), (seed, "Seed"), @@ -605,7 +604,7 @@ def create_ui(): (hr_resize_x, "Hires resize-1"), (hr_resize_y, "Hires resize-2"), (hr_checkpoint_name, "Hires checkpoint"), - (hr_sampler_index, "Hires sampler"), + (hr_sampler_name, "Hires sampler"), (hr_sampler_container, lambda d: gr.update(visible=True) if d.get("Hires sampler", "Use same sampler") != "Use same sampler" or d.get("Hires checkpoint", "Use same checkpoint") != "Use same checkpoint" else gr.update()), (hr_prompt, "Hires prompt"), (hr_negative_prompt, "Hires negative prompt"), @@ -621,7 +620,7 @@ def create_ui(): toprow.prompt, toprow.negative_prompt, steps, - sampler_index, + sampler_name, cfg_scale, seed, width, @@ -744,7 +743,7 @@ def create_ui(): for category in ordered_ui_categories(): if category == "sampler": - steps, sampler_index = create_sampler_and_steps_selection(samplers_for_img2img, "img2img") + steps, sampler_name = create_sampler_and_steps_selection(sd_samplers.visible_sampler_names(), "img2img") elif category == "dimensions": with FormRow(): @@ -876,7 +875,7 @@ def create_ui(): init_img_inpaint, init_mask_inpaint, steps, - sampler_index, + sampler_name, mask_blur, mask_alpha, inpainting_fill, @@ -972,7 +971,7 @@ def create_ui(): (toprow.prompt, "Prompt"), (toprow.negative_prompt, "Negative prompt"), (steps, "Steps"), - (sampler_index, "Sampler"), + (sampler_name, "Sampler"), (restore_faces, "Face restoration"), (cfg_scale, "CFG scale"), (image_cfg_scale, "Image CFG scale"), -- cgit v1.2.1 From f8ff8c0638997fd0aef217db1505598846f14782 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Tue, 8 Aug 2023 22:09:40 +0300 Subject: merge errors --- modules/sd_samplers_cfg_denoiser.py | 23 +++++++++++++++++++++-- modules/sd_samplers_common.py | 6 +++++- modules/sd_samplers_kdiffusion.py | 17 ++++++++++++----- modules/sd_samplers_timesteps.py | 27 +++++++++++++++++---------- 4 files changed, 55 insertions(+), 18 deletions(-) diff --git a/modules/sd_samplers_cfg_denoiser.py b/modules/sd_samplers_cfg_denoiser.py index d826222c..a532e013 100644 --- a/modules/sd_samplers_cfg_denoiser.py +++ b/modules/sd_samplers_cfg_denoiser.py @@ -38,16 +38,24 @@ class CFGDenoiser(torch.nn.Module): negative prompt. """ - def __init__(self, model, sampler): + def __init__(self, sampler): super().__init__() - self.inner_model = model + self.model_wrap = None self.mask = None self.nmask = None self.init_latent = None + self.steps = None self.step = 0 self.image_cfg_scale = None self.padded_cond_uncond = False self.sampler = sampler + self.model_wrap = None + self.p = None + + @property + def inner_model(self): + raise NotImplementedError() + def combine_denoised(self, x_out, conds_list, uncond, cond_scale): denoised_uncond = x_out[-uncond.shape[0]:] @@ -68,10 +76,21 @@ class CFGDenoiser(torch.nn.Module): def get_pred_x0(self, x_in, x_out, sigma): return x_out + def update_inner_model(self): + self.model_wrap = None + + c, uc = self.p.get_conds() + self.sampler.sampler_extra_args['cond'] = c + self.sampler.sampler_extra_args['uncond'] = uc + def forward(self, x, sigma, uncond, cond, cond_scale, s_min_uncond, image_cond): if state.interrupted or state.skipped: raise sd_samplers_common.InterruptedException + if sd_samplers_common.apply_refiner(self): + cond = self.sampler.sampler_extra_args['cond'] + uncond = self.sampler.sampler_extra_args['uncond'] + # at self.image_cfg_scale == 1.0 produced results for edit model are the same as with normal sampling, # so is_edit_model is set to False to support AND composition. is_edit_model = shared.sd_model.cond_stage_key == "edit" and self.image_cfg_scale is not None and self.image_cfg_scale != 1.0 diff --git a/modules/sd_samplers_common.py b/modules/sd_samplers_common.py index 15f27970..fa3614ff 100644 --- a/modules/sd_samplers_common.py +++ b/modules/sd_samplers_common.py @@ -202,8 +202,9 @@ class Sampler: self.conditioning_key = shared.sd_model.model.conditioning_key - self.model_wrap = None + self.p = None self.model_wrap_cfg = None + self.sampler_extra_args = None def callback_state(self, d): step = d['i'] @@ -215,6 +216,7 @@ class Sampler: shared.total_tqdm.update() def launch_sampling(self, steps, func): + self.model_wrap_cfg.steps = steps state.sampling_steps = steps state.sampling_step = 0 @@ -234,6 +236,8 @@ class Sampler: return p.steps def initialize(self, p) -> dict: + self.p = p + self.model_wrap_cfg.p = p self.model_wrap_cfg.mask = p.mask if hasattr(p, 'mask') else None self.model_wrap_cfg.nmask = p.nmask if hasattr(p, 'nmask') else None self.model_wrap_cfg.step = 0 diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py index 3ff4b634..95a43cef 100644 --- a/modules/sd_samplers_kdiffusion.py +++ b/modules/sd_samplers_kdiffusion.py @@ -52,17 +52,24 @@ k_diffusion_scheduler = { } +class CFGDenoiserKDiffusion(sd_samplers_cfg_denoiser.CFGDenoiser): + @property + def inner_model(self): + if self.model_wrap is None: + denoiser = k_diffusion.external.CompVisVDenoiser if shared.sd_model.parameterization == "v" else k_diffusion.external.CompVisDenoiser + self.model_wrap = denoiser(shared.sd_model, quantize=shared.opts.enable_quantization) + + return self.model_wrap + + class KDiffusionSampler(sd_samplers_common.Sampler): def __init__(self, funcname, sd_model): - super().__init__(funcname) - self.extra_params = sampler_extra_params.get(funcname, []) self.func = funcname if callable(funcname) else getattr(k_diffusion.sampling, self.funcname) - denoiser = k_diffusion.external.CompVisVDenoiser if sd_model.parameterization == "v" else k_diffusion.external.CompVisDenoiser - self.model_wrap = denoiser(sd_model, quantize=shared.opts.enable_quantization) - self.model_wrap_cfg = sd_samplers_cfg_denoiser.CFGDenoiser(self.model_wrap, self) + self.model_wrap_cfg = CFGDenoiserKDiffusion(self) + self.model_wrap = self.model_wrap_cfg.inner_model def get_sigmas(self, p, steps): discard_next_to_last_sigma = self.config is not None and self.config.options.get('discard_next_to_last_sigma', False) diff --git a/modules/sd_samplers_timesteps.py b/modules/sd_samplers_timesteps.py index d89d0efb..965e61c6 100644 --- a/modules/sd_samplers_timesteps.py +++ b/modules/sd_samplers_timesteps.py @@ -44,10 +44,10 @@ class CompVisTimestepsVDenoiser(torch.nn.Module): class CFGDenoiserTimesteps(CFGDenoiser): - def __init__(self, model, sampler): - super().__init__(model, sampler) + def __init__(self, sampler): + super().__init__(sampler) - self.alphas = model.inner_model.alphas_cumprod + self.alphas = shared.sd_model.alphas_cumprod def get_pred_x0(self, x_in, x_out, sigma): ts = int(sigma.item()) @@ -60,6 +60,14 @@ class CFGDenoiserTimesteps(CFGDenoiser): return pred_x0 + @property + def inner_model(self): + if self.model_wrap is None: + denoiser = CompVisTimestepsVDenoiser if shared.sd_model.parameterization == "v" else CompVisTimestepsDenoiser + self.model_wrap = denoiser(shared.sd_model) + + return self.model_wrap + class CompVisSampler(sd_samplers_common.Sampler): def __init__(self, funcname, sd_model): @@ -68,9 +76,7 @@ class CompVisSampler(sd_samplers_common.Sampler): self.eta_option_field = 'eta_ddim' self.eta_infotext_field = 'Eta DDIM' - denoiser = CompVisTimestepsVDenoiser if sd_model.parameterization == "v" else CompVisTimestepsDenoiser - self.model_wrap = denoiser(sd_model) - self.model_wrap_cfg = CFGDenoiserTimesteps(self.model_wrap, self) + self.model_wrap_cfg = CFGDenoiserTimesteps(self) def get_timesteps(self, p, steps): discard_next_to_last_sigma = self.config is not None and self.config.options.get('discard_next_to_last_sigma', False) @@ -106,7 +112,7 @@ class CompVisSampler(sd_samplers_common.Sampler): self.model_wrap_cfg.init_latent = x self.last_latent = x - extra_args = { + self.sampler_extra_args = { 'cond': conditioning, 'image_cond': image_conditioning, 'uncond': unconditional_conditioning, @@ -114,7 +120,7 @@ class CompVisSampler(sd_samplers_common.Sampler): 's_min_uncond': self.s_min_uncond } - samples = self.launch_sampling(t_enc + 1, lambda: self.func(self.model_wrap_cfg, xi, extra_args=extra_args, disable=False, callback=self.callback_state, **extra_params_kwargs)) + samples = self.launch_sampling(t_enc + 1, lambda: self.func(self.model_wrap_cfg, xi, extra_args=self.sampler_extra_args, disable=False, callback=self.callback_state, **extra_params_kwargs)) if self.model_wrap_cfg.padded_cond_uncond: p.extra_generation_params["Pad conds"] = True @@ -132,13 +138,14 @@ class CompVisSampler(sd_samplers_common.Sampler): extra_params_kwargs['timesteps'] = timesteps self.last_latent = x - samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, x, extra_args={ + self.sampler_extra_args = { 'cond': conditioning, 'image_cond': image_conditioning, 'uncond': unconditional_conditioning, 'cond_scale': p.cfg_scale, 's_min_uncond': self.s_min_uncond - }, disable=False, callback=self.callback_state, **extra_params_kwargs)) + } + samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, x, extra_args=self.sampler_extra_args, disable=False, callback=self.callback_state, **extra_params_kwargs)) if self.model_wrap_cfg.padded_cond_uncond: p.extra_generation_params["Pad conds"] = True -- cgit v1.2.1 From ec194b637476855ea5918a44a65e85fb587483ab Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Tue, 8 Aug 2023 22:14:02 +0300 Subject: fix webui not switching back to original model from refiner when batch count is greater than 1 --- modules/processing.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/modules/processing.py b/modules/processing.py index b635cc74..cf62cdd3 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -766,6 +766,8 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: if state.interrupted: break + sd_models.reload_model_weights() # model can be changed for example by refiner + p.prompts = p.all_prompts[n * p.batch_size:(n + 1) * p.batch_size] p.negative_prompts = p.all_negative_prompts[n * p.batch_size:(n + 1) * p.batch_size] p.seeds = p.all_seeds[n * p.batch_size:(n + 1) * p.batch_size] -- cgit v1.2.1 From 1aefb5025929818b2a96cbb6148fcc2db7b947ec Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Tue, 8 Aug 2023 22:17:25 +0300 Subject: add None refiner option --- modules/sd_samplers_common.py | 3 +++ modules/shared.py | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/modules/sd_samplers_common.py b/modules/sd_samplers_common.py index fa3614ff..b6ad6830 100644 --- a/modules/sd_samplers_common.py +++ b/modules/sd_samplers_common.py @@ -137,6 +137,9 @@ def apply_refiner(sampler): if completed_ratio <= shared.opts.sd_refiner_switch_at: return False + if shared.opts.sd_refiner_checkpoint == "None": + return False + if shared.sd_model.sd_checkpoint_info.title == shared.opts.sd_refiner_checkpoint: return False diff --git a/modules/shared.py b/modules/shared.py index 2fd29904..9935d2a7 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -462,7 +462,7 @@ options_templates.update(options_section(('sd', "Stable Diffusion"), { "CLIP_stop_at_last_layers": OptionInfo(1, "Clip skip", gr.Slider, {"minimum": 1, "maximum": 12, "step": 1}).link("wiki", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Features#clip-skip").info("ignore last layers of CLIP network; 1 ignores none, 2 ignores one layer"), "upcast_attn": OptionInfo(False, "Upcast cross attention layer to float32"), "randn_source": OptionInfo("GPU", "Random number generator source.", gr.Radio, {"choices": ["GPU", "CPU", "NV"]}).info("changes seeds drastically; use CPU to produce the same picture across different videocard vendors; use NV to produce same picture as on NVidia videocards"), - "sd_refiner_checkpoint": OptionInfo(None, "Refiner checkpoint", gr.Dropdown, lambda: {"choices": list_checkpoint_tiles()}, refresh=refresh_checkpoints).info("switch to another model in the middle of generation"), + "sd_refiner_checkpoint": OptionInfo("None", "Refiner checkpoint", gr.Dropdown, lambda: {"choices": ["None"] + list_checkpoint_tiles()}, refresh=refresh_checkpoints).info("switch to another model in the middle of generation"), "sd_refiner_switch_at": OptionInfo(1.0, "Refiner switch at", gr.Slider, {"minimum": 0.01, "maximum": 1.0, "step": 0.01}).info("fraction of sampling steps when the swtch to refiner model should happen; 1=never, 0.5=switch in the middle of generation"), })) -- cgit v1.2.1 From 0e83c675257f473e024511845e7940802333fd5f Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Tue, 8 Aug 2023 22:27:32 +0300 Subject: by request: fix tiled vae extension --- modules/sd_samplers_kdiffusion.py | 5 +++-- modules/sd_samplers_timesteps.py | 4 ++++ 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py index f47431af..5613b8c1 100644 --- a/modules/sd_samplers_kdiffusion.py +++ b/modules/sd_samplers_kdiffusion.py @@ -1,7 +1,8 @@ import torch import inspect import k_diffusion.sampling -from modules import sd_samplers_common, sd_samplers_extra, sd_samplers_cfg_denoiser +from modules import sd_samplers_common, sd_samplers_extra +from modules.sd_samplers_cfg_denoiser import CFGDenoiser from modules.shared import opts import modules.shared as shared @@ -62,7 +63,7 @@ class KDiffusionSampler(sd_samplers_common.Sampler): denoiser = k_diffusion.external.CompVisVDenoiser if sd_model.parameterization == "v" else k_diffusion.external.CompVisDenoiser self.model_wrap = denoiser(sd_model, quantize=shared.opts.enable_quantization) - self.model_wrap_cfg = sd_samplers_cfg_denoiser.CFGDenoiser(self.model_wrap, self) + self.model_wrap_cfg = CFGDenoiser(self.model_wrap, self) def get_sigmas(self, p, steps): discard_next_to_last_sigma = self.config is not None and self.config.options.get('discard_next_to_last_sigma', False) diff --git a/modules/sd_samplers_timesteps.py b/modules/sd_samplers_timesteps.py index d89d0efb..f61799a8 100644 --- a/modules/sd_samplers_timesteps.py +++ b/modules/sd_samplers_timesteps.py @@ -1,5 +1,6 @@ import torch import inspect +import sys from modules import devices, sd_samplers_common, sd_samplers_timesteps_impl from modules.sd_samplers_cfg_denoiser import CFGDenoiser @@ -145,3 +146,6 @@ class CompVisSampler(sd_samplers_common.Sampler): return samples + +sys.modules['modules.sd_samplers_compvis'] = sys.modules[__name__] +VanillaStableDiffusionSampler = CompVisSampler # temp. compatibility with older extensions -- cgit v1.2.1 From a74c014425d412c0588618abe0a3e19e4f8f5902 Mon Sep 17 00:00:00 2001 From: Danil Boldyrev Date: Wed, 9 Aug 2023 00:06:51 +0300 Subject: auto-expand enable by default --- extensions-builtin/canvas-zoom-and-pan/scripts/hotkey_config.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/extensions-builtin/canvas-zoom-and-pan/scripts/hotkey_config.py b/extensions-builtin/canvas-zoom-and-pan/scripts/hotkey_config.py index 9a51ff2a..2d8d2d1c 100644 --- a/extensions-builtin/canvas-zoom-and-pan/scripts/hotkey_config.py +++ b/extensions-builtin/canvas-zoom-and-pan/scripts/hotkey_config.py @@ -9,7 +9,7 @@ shared.options_templates.update(shared.options_section(('canvas_hotkey', "Canvas "canvas_hotkey_reset": shared.OptionInfo("R", "Reset zoom and canvas positon"), "canvas_hotkey_overlap": shared.OptionInfo("O", "Toggle overlap").info("Technical button, neededs for testing"), "canvas_show_tooltip": shared.OptionInfo(True, "Enable tooltip on the canvas"), - "canvas_auto_expand": shared.OptionInfo(False, "Automatically expands an image that does not fit completely in the canvas area, similar to manually pressing the S and R buttons"), + "canvas_auto_expand": shared.OptionInfo(True, "Automatically expands an image that does not fit completely in the canvas area, similar to manually pressing the S and R buttons"), "canvas_blur_prompt": shared.OptionInfo(False, "Take the focus off the prompt when working with a canvas"), "canvas_disabled_functions": shared.OptionInfo(["Overlap"], "Disable function that you don't use", gr.CheckboxGroup, {"choices": ["Zoom","Adjust brush size", "Moving canvas","Fullscreen","Reset Zoom","Overlap"]}), })) -- cgit v1.2.1 From e12a1be1ca23377a3d5774fe8d52664a9c2c0fb9 Mon Sep 17 00:00:00 2001 From: Danil Boldyrev Date: Wed, 9 Aug 2023 00:14:19 +0300 Subject: auto-expand enable by default for js --- extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js b/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js index 7369e897..eb49a01d 100644 --- a/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js +++ b/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js @@ -201,8 +201,8 @@ onUiLoaded(async() => { canvas_hotkey_overlap: "KeyO", canvas_disabled_functions: [], canvas_show_tooltip: true, + canvas_auto_expand: true, canvas_blur_prompt: false, - canvas_auto_expand: false }; const functionMap = { -- cgit v1.2.1 From bd4b4292ef6c2cb0a452b7105485ec06301b7531 Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Tue, 8 Aug 2023 20:55:08 -0400 Subject: Fix hr use same sampler --- modules/txt2img.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/txt2img.py b/modules/txt2img.py index 8fa389b5..edad8930 100644 --- a/modules/txt2img.py +++ b/modules/txt2img.py @@ -42,7 +42,7 @@ def txt2img(id_task: str, prompt: str, negative_prompt: str, prompt_styles, step hr_resize_x=hr_resize_x, hr_resize_y=hr_resize_y, hr_checkpoint_name=None if hr_checkpoint_name == 'Use same checkpoint' else hr_checkpoint_name, - hr_sampler_name=hr_sampler_name, + hr_sampler_name=None if hr_sampler_name == 'Use same sampler' else hr_sampler_name, hr_prompt=hr_prompt, hr_negative_prompt=hr_negative_prompt, override_settings=override_settings, -- cgit v1.2.1 From c1027806936f4460cd01dcdcc93f11ea30390382 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Wed, 9 Aug 2023 13:38:53 +0900 Subject: extra network metadata inherit old description --- modules/ui_extra_networks_user_metadata.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/modules/ui_extra_networks_user_metadata.py b/modules/ui_extra_networks_user_metadata.py index 1cb9eb6f..ea7e1ab2 100644 --- a/modules/ui_extra_networks_user_metadata.py +++ b/modules/ui_extra_networks_user_metadata.py @@ -40,6 +40,8 @@ class UserMetadataEditor: user_metadata = {} item['user_metadata'] = user_metadata + if len(user_metadata) == 0: + user_metadata = {'description': item.get('description', '')} return user_metadata def create_extra_default_items_in_left_column(self): -- cgit v1.2.1 From d81d3fa8cde83ce1421889ed481a69c950c0c6f6 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Wed, 9 Aug 2023 07:45:06 +0300 Subject: fix styles missing from the prompt in infotext when making a grid of batch of multiplie images --- modules/processing.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/modules/processing.py b/modules/processing.py index d7266307..aa72b132 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -172,6 +172,8 @@ class StableDiffusionProcessing: self.iteration = 0 self.is_hr_pass = False self.sampler = None + self.main_prompt = None + self.main_negative_prompt = None self.prompts = None self.negative_prompts = None @@ -319,6 +321,9 @@ class StableDiffusionProcessing: self.all_prompts = [shared.prompt_styles.apply_styles_to_prompt(x, self.styles) for x in self.all_prompts] self.all_negative_prompts = [shared.prompt_styles.apply_negative_styles_to_prompt(x, self.styles) for x in self.all_negative_prompts] + self.main_prompt = self.all_prompts[0] + self.main_negative_prompt = self.all_negative_prompts[0] + def cached_params(self, required_prompts, steps, extra_network_data): """Returns parameters that invalidate the cond cache if changed""" @@ -653,8 +658,8 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter generation_params_text = ", ".join([k if k == v else f'{k}: {generation_parameters_copypaste.quote(v)}' for k, v in generation_params.items() if v is not None]) - prompt_text = p.prompt if use_main_prompt else all_prompts[index] - negative_prompt_text = f"\nNegative prompt: {all_negative_prompts[index]}" if all_negative_prompts[index] else "" + prompt_text = p.main_prompt if use_main_prompt else all_prompts[index] + negative_prompt_text = f"\nNegative prompt: {p.main_negative_prompt if use_main_prompt else all_negative_prompts[index]}" if all_negative_prompts[index] else "" return f"{prompt_text}{negative_prompt_text}\n{generation_params_text}".strip() -- cgit v1.2.1 From 0d5dc9a6e7f6362e423a06bf0e75dd5854025394 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Wed, 9 Aug 2023 08:43:31 +0300 Subject: rework RNG to use generators instead of generating noises beforehand --- modules/devices.py | 81 +------------------- modules/processing.py | 89 +++------------------- modules/rng.py | 171 ++++++++++++++++++++++++++++++++++++++++++ modules/sd_samplers_common.py | 24 +++--- modules/shared.py | 2 +- 5 files changed, 196 insertions(+), 171 deletions(-) create mode 100644 modules/rng.py diff --git a/modules/devices.py b/modules/devices.py index 00a00b18..ce59dc53 100644 --- a/modules/devices.py +++ b/modules/devices.py @@ -3,7 +3,7 @@ import contextlib from functools import lru_cache import torch -from modules import errors, rng_philox +from modules import errors if sys.platform == "darwin": from modules import mac_specific @@ -96,84 +96,6 @@ def cond_cast_float(input): nv_rng = None -def randn(seed, shape): - """Generate a tensor with random numbers from a normal distribution using seed. - - Uses the seed parameter to set the global torch seed; to generate more with that seed, use randn_like/randn_without_seed.""" - - from modules.shared import opts - - manual_seed(seed) - - if opts.randn_source == "NV": - return torch.asarray(nv_rng.randn(shape), device=device) - - if opts.randn_source == "CPU" or device.type == 'mps': - return torch.randn(shape, device=cpu).to(device) - - return torch.randn(shape, device=device) - - -def randn_local(seed, shape): - """Generate a tensor with random numbers from a normal distribution using seed. - - Does not change the global random number generator. You can only generate the seed's first tensor using this function.""" - - from modules.shared import opts - - if opts.randn_source == "NV": - rng = rng_philox.Generator(seed) - return torch.asarray(rng.randn(shape), device=device) - - local_device = cpu if opts.randn_source == "CPU" or device.type == 'mps' else device - local_generator = torch.Generator(local_device).manual_seed(int(seed)) - return torch.randn(shape, device=local_device, generator=local_generator).to(device) - - -def randn_like(x): - """Generate a tensor with random numbers from a normal distribution using the previously initialized genrator. - - Use either randn() or manual_seed() to initialize the generator.""" - - from modules.shared import opts - - if opts.randn_source == "NV": - return torch.asarray(nv_rng.randn(x.shape), device=x.device, dtype=x.dtype) - - if opts.randn_source == "CPU" or x.device.type == 'mps': - return torch.randn_like(x, device=cpu).to(x.device) - - return torch.randn_like(x) - - -def randn_without_seed(shape): - """Generate a tensor with random numbers from a normal distribution using the previously initialized genrator. - - Use either randn() or manual_seed() to initialize the generator.""" - - from modules.shared import opts - - if opts.randn_source == "NV": - return torch.asarray(nv_rng.randn(shape), device=device) - - if opts.randn_source == "CPU" or device.type == 'mps': - return torch.randn(shape, device=cpu).to(device) - - return torch.randn(shape, device=device) - - -def manual_seed(seed): - """Set up a global random number generator using the specified seed.""" - from modules.shared import opts - - if opts.randn_source == "NV": - global nv_rng - nv_rng = rng_philox.Generator(seed) - return - - torch.manual_seed(seed) - - def autocast(disable=False): from modules import shared @@ -236,3 +158,4 @@ def first_time_calculation(): x = torch.zeros((1, 1, 3, 3)).to(device, dtype) conv2d = torch.nn.Conv2d(1, 1, (3, 3)).to(device, dtype) conv2d(x) + diff --git a/modules/processing.py b/modules/processing.py index aa72b132..2df5e8c7 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -14,7 +14,7 @@ from skimage import exposure from typing import Any, Dict, List import modules.sd_hijack -from modules import devices, prompt_parser, masking, sd_samplers, lowvram, generation_parameters_copypaste, extra_networks, sd_vae_approx, scripts, sd_samplers_common, sd_unet, errors +from modules import devices, prompt_parser, masking, sd_samplers, lowvram, generation_parameters_copypaste, extra_networks, sd_vae_approx, scripts, sd_samplers_common, sd_unet, errors, rng from modules.sd_hijack import model_hijack from modules.sd_samplers_common import images_tensor_to_samples, decode_first_stage, approximation_indexes from modules.shared import opts, cmd_opts, state @@ -186,6 +186,7 @@ class StableDiffusionProcessing: self.cached_c = StableDiffusionProcessing.cached_c self.uc = None self.c = None + self.rng: rng.ImageRNG = None self.user = None @@ -475,82 +476,9 @@ class Processed: return self.token_merging_ratio_hr if for_hr else self.token_merging_ratio -# from https://discuss.pytorch.org/t/help-regarding-slerp-function-for-generative-model-sampling/32475/3 -def slerp(val, low, high): - low_norm = low/torch.norm(low, dim=1, keepdim=True) - high_norm = high/torch.norm(high, dim=1, keepdim=True) - dot = (low_norm*high_norm).sum(1) - - if dot.mean() > 0.9995: - return low * val + high * (1 - val) - - omega = torch.acos(dot) - so = torch.sin(omega) - res = (torch.sin((1.0-val)*omega)/so).unsqueeze(1)*low + (torch.sin(val*omega)/so).unsqueeze(1) * high - return res - - def create_random_tensors(shape, seeds, subseeds=None, subseed_strength=0.0, seed_resize_from_h=0, seed_resize_from_w=0, p=None): - eta_noise_seed_delta = opts.eta_noise_seed_delta or 0 - xs = [] - - # if we have multiple seeds, this means we are working with batch size>1; this then - # enables the generation of additional tensors with noise that the sampler will use during its processing. - # Using those pre-generated tensors instead of simple torch.randn allows a batch with seeds [100, 101] to - # produce the same images as with two batches [100], [101]. - if p is not None and p.sampler is not None and (len(seeds) > 1 and opts.enable_batch_seeds or eta_noise_seed_delta > 0): - sampler_noises = [[] for _ in range(p.sampler.number_of_needed_noises(p))] - else: - sampler_noises = None - - for i, seed in enumerate(seeds): - noise_shape = shape if seed_resize_from_h <= 0 or seed_resize_from_w <= 0 else (shape[0], seed_resize_from_h//8, seed_resize_from_w//8) - - subnoise = None - if subseeds is not None and subseed_strength != 0: - subseed = 0 if i >= len(subseeds) else subseeds[i] - - subnoise = devices.randn(subseed, noise_shape) - - # randn results depend on device; gpu and cpu get different results for same seed; - # the way I see it, it's better to do this on CPU, so that everyone gets same result; - # but the original script had it like this, so I do not dare change it for now because - # it will break everyone's seeds. - noise = devices.randn(seed, noise_shape) - - if subnoise is not None: - noise = slerp(subseed_strength, noise, subnoise) - - if noise_shape != shape: - x = devices.randn(seed, shape) - dx = (shape[2] - noise_shape[2]) // 2 - dy = (shape[1] - noise_shape[1]) // 2 - w = noise_shape[2] if dx >= 0 else noise_shape[2] + 2 * dx - h = noise_shape[1] if dy >= 0 else noise_shape[1] + 2 * dy - tx = 0 if dx < 0 else dx - ty = 0 if dy < 0 else dy - dx = max(-dx, 0) - dy = max(-dy, 0) - - x[:, ty:ty+h, tx:tx+w] = noise[:, dy:dy+h, dx:dx+w] - noise = x - - if sampler_noises is not None: - cnt = p.sampler.number_of_needed_noises(p) - - if eta_noise_seed_delta > 0: - devices.manual_seed(seed + eta_noise_seed_delta) - - for j in range(cnt): - sampler_noises[j].append(devices.randn_without_seed(tuple(noise_shape))) - - xs.append(noise) - - if sampler_noises is not None: - p.sampler.sampler_noises = [torch.stack(n).to(shared.device) for n in sampler_noises] - - x = torch.stack(xs).to(shared.device) - return x + g = rng.ImageRNG(shape, seeds, subseeds=subseeds, subseed_strength=subseed_strength, seed_resize_from_h=seed_resize_from_h, seed_resize_from_w=seed_resize_from_w) + return g.next() class DecodedSamples(list): @@ -769,6 +697,8 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: p.seeds = p.all_seeds[n * p.batch_size:(n + 1) * p.batch_size] p.subseeds = p.all_subseeds[n * p.batch_size:(n + 1) * p.batch_size] + p.rng = rng.ImageRNG((opt_C, p.height // opt_f, p.width // opt_f), p.seeds, subseeds=p.subseeds, subseed_strength=p.subseed_strength, seed_resize_from_h=p.seed_resize_from_h, seed_resize_from_w=p.seed_resize_from_w) + if p.scripts is not None: p.scripts.before_process_batch(p, batch_number=n, prompts=p.prompts, seeds=p.seeds, subseeds=p.subseeds) @@ -1072,7 +1002,7 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): def sample(self, conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength, prompts): self.sampler = sd_samplers.create_sampler(self.sampler_name, self.sd_model) - x = create_random_tensors([opt_C, self.height // opt_f, self.width // opt_f], seeds=seeds, subseeds=subseeds, subseed_strength=self.subseed_strength, seed_resize_from_h=self.seed_resize_from_h, seed_resize_from_w=self.seed_resize_from_w, p=self) + x = self.rng.next() samples = self.sampler.sample(self, x, conditioning, unconditional_conditioning, image_conditioning=self.txt2img_image_conditioning(x)) del x @@ -1160,7 +1090,8 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): samples = samples[:, :, self.truncate_y//2:samples.shape[2]-(self.truncate_y+1)//2, self.truncate_x//2:samples.shape[3]-(self.truncate_x+1)//2] - noise = create_random_tensors(samples.shape[1:], seeds=seeds, subseeds=subseeds, subseed_strength=subseed_strength, p=self) + self.rng = rng.ImageRNG(samples.shape[1:], self.seeds, subseeds=self.subseeds, subseed_strength=self.subseed_strength, seed_resize_from_h=self.seed_resize_from_h, seed_resize_from_w=self.seed_resize_from_w) + noise = self.rng.next() # GC now before running the next img2img to prevent running out of memory devices.torch_gc() @@ -1418,7 +1349,7 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing): self.image_conditioning = self.img2img_image_conditioning(image, self.init_latent, image_mask) def sample(self, conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength, prompts): - x = create_random_tensors([opt_C, self.height // opt_f, self.width // opt_f], seeds=seeds, subseeds=subseeds, subseed_strength=self.subseed_strength, seed_resize_from_h=self.seed_resize_from_h, seed_resize_from_w=self.seed_resize_from_w, p=self) + x = self.rng.next() if self.initial_noise_multiplier != 1.0: self.extra_generation_params["Noise multiplier"] = self.initial_noise_multiplier diff --git a/modules/rng.py b/modules/rng.py new file mode 100644 index 00000000..2d7baea5 --- /dev/null +++ b/modules/rng.py @@ -0,0 +1,171 @@ +import torch + +from modules import devices, rng_philox, shared + + +def randn(seed, shape, generator=None): + """Generate a tensor with random numbers from a normal distribution using seed. + + Uses the seed parameter to set the global torch seed; to generate more with that seed, use randn_like/randn_without_seed.""" + + manual_seed(seed) + + if shared.opts.randn_source == "NV": + return torch.asarray((generator or nv_rng).randn(shape), device=devices.device) + + if shared.opts.randn_source == "CPU" or devices.device.type == 'mps': + return torch.randn(shape, device=devices.cpu, generator=generator).to(devices.device) + + return torch.randn(shape, device=devices.device, generator=generator) + + +def randn_local(seed, shape): + """Generate a tensor with random numbers from a normal distribution using seed. + + Does not change the global random number generator. You can only generate the seed's first tensor using this function.""" + + if shared.opts.randn_source == "NV": + rng = rng_philox.Generator(seed) + return torch.asarray(rng.randn(shape), device=devices.device) + + local_device = devices.cpu if shared.opts.randn_source == "CPU" or devices.device.type == 'mps' else devices.device + local_generator = torch.Generator(local_device).manual_seed(int(seed)) + return torch.randn(shape, device=local_device, generator=local_generator).to(devices.device) + + +def randn_like(x): + """Generate a tensor with random numbers from a normal distribution using the previously initialized genrator. + + Use either randn() or manual_seed() to initialize the generator.""" + + if shared.opts.randn_source == "NV": + return torch.asarray(nv_rng.randn(x.shape), device=x.device, dtype=x.dtype) + + if shared.opts.randn_source == "CPU" or x.device.type == 'mps': + return torch.randn_like(x, device=devices.cpu).to(x.device) + + return torch.randn_like(x) + + +def randn_without_seed(shape, generator=None): + """Generate a tensor with random numbers from a normal distribution using the previously initialized genrator. + + Use either randn() or manual_seed() to initialize the generator.""" + + if shared.opts.randn_source == "NV": + return torch.asarray((generator or nv_rng).randn(shape), device=devices.device) + + if shared.opts.randn_source == "CPU" or devices.device.type == 'mps': + return torch.randn(shape, device=devices.cpu, generator=generator).to(devices.device) + + return torch.randn(shape, device=devices.device, generator=generator) + + +def manual_seed(seed): + """Set up a global random number generator using the specified seed.""" + from modules.shared import opts + + if opts.randn_source == "NV": + global nv_rng + nv_rng = rng_philox.Generator(seed) + return + + torch.manual_seed(seed) + + +def create_generator(seed): + if shared.opts.randn_source == "NV": + return rng_philox.Generator(seed) + + device = devices.cpu if shared.opts.randn_source == "CPU" or devices.device.type == 'mps' else devices.device + generator = torch.Generator(device).manual_seed(int(seed)) + return generator + + +# from https://discuss.pytorch.org/t/help-regarding-slerp-function-for-generative-model-sampling/32475/3 +def slerp(val, low, high): + low_norm = low/torch.norm(low, dim=1, keepdim=True) + high_norm = high/torch.norm(high, dim=1, keepdim=True) + dot = (low_norm*high_norm).sum(1) + + if dot.mean() > 0.9995: + return low * val + high * (1 - val) + + omega = torch.acos(dot) + so = torch.sin(omega) + res = (torch.sin((1.0-val)*omega)/so).unsqueeze(1)*low + (torch.sin(val*omega)/so).unsqueeze(1) * high + return res + + +class ImageRNG: + def __init__(self, shape, seeds, subseeds=None, subseed_strength=0.0, seed_resize_from_h=0, seed_resize_from_w=0): + self.shape = shape + self.seeds = seeds + self.subseeds = subseeds + self.subseed_strength = subseed_strength + self.seed_resize_from_h = seed_resize_from_h + self.seed_resize_from_w = seed_resize_from_w + + self.generators = [create_generator(seed) for seed in seeds] + + self.is_first = True + + def first(self): + noise_shape = self.shape if self.seed_resize_from_h <= 0 or self.seed_resize_from_w <= 0 else (self.shape[0], self.seed_resize_from_h // 8, self.seed_resize_from_w // 8) + + xs = [] + + for i, (seed, generator) in enumerate(zip(self.seeds, self.generators)): + subnoise = None + if self.subseeds is not None and self.subseed_strength != 0: + subseed = 0 if i >= len(self.subseeds) else self.subseeds[i] + subnoise = randn(subseed, noise_shape) + + if noise_shape != self.shape: + noise = randn(seed, noise_shape) + else: + noise = randn(seed, self.shape, generator=generator) + + if subnoise is not None: + noise = slerp(self.subseed_strength, noise, subnoise) + + if noise_shape != self.shape: + x = randn(seed, self.shape, generator=generator) + dx = (self.shape[2] - noise_shape[2]) // 2 + dy = (self.shape[1] - noise_shape[1]) // 2 + w = noise_shape[2] if dx >= 0 else noise_shape[2] + 2 * dx + h = noise_shape[1] if dy >= 0 else noise_shape[1] + 2 * dy + tx = 0 if dx < 0 else dx + ty = 0 if dy < 0 else dy + dx = max(-dx, 0) + dy = max(-dy, 0) + + x[:, ty:ty + h, tx:tx + w] = noise[:, dy:dy + h, dx:dx + w] + noise = x + + xs.append(noise) + + eta_noise_seed_delta = shared.opts.eta_noise_seed_delta or 0 + if eta_noise_seed_delta: + self.generators = [create_generator(seed + eta_noise_seed_delta) for seed in self.seeds] + + return torch.stack(xs).to(shared.device) + + def next(self): + if self.is_first: + self.is_first = False + return self.first() + + xs = [] + for generator in self.generators: + x = randn_without_seed(self.shape, generator=generator) + xs.append(x) + + return torch.stack(xs).to(shared.device) + + +devices.randn = randn +devices.randn_local = randn_local +devices.randn_like = randn_like +devices.randn_without_seed = randn_without_seed +devices.manual_seed = manual_seed diff --git a/modules/sd_samplers_common.py b/modules/sd_samplers_common.py index adda963b..97bc0804 100644 --- a/modules/sd_samplers_common.py +++ b/modules/sd_samplers_common.py @@ -1,5 +1,5 @@ import inspect -from collections import namedtuple, deque +from collections import namedtuple import numpy as np import torch from PIL import Image @@ -132,10 +132,15 @@ replace_torchsde_browinan() class TorchHijack: - def __init__(self, sampler_noises): - # Using a deque to efficiently receive the sampler_noises in the same order as the previous index-based - # implementation. - self.sampler_noises = deque(sampler_noises) + """This is here to replace torch.randn_like of k-diffusion. + + k-diffusion has random_sampler argument for most samplers, but not for all, so + this is needed to properly replace every use of torch.randn_like. + + We need to replace to make images generated in batches to be same as images generated individually.""" + + def __init__(self, p): + self.rng = p.rng def __getattr__(self, item): if item == 'randn_like': @@ -147,12 +152,7 @@ class TorchHijack: raise AttributeError(f"'{type(self).__name__}' object has no attribute '{item}'") def randn_like(self, x): - if self.sampler_noises: - noise = self.sampler_noises.popleft() - if noise.shape == x.shape: - return noise - - return devices.randn_like(x) + return self.rng.next() class Sampler: @@ -215,7 +215,7 @@ class Sampler: self.eta = p.eta if p.eta is not None else getattr(opts, self.eta_option_field, 0.0) self.s_min_uncond = getattr(p, 's_min_uncond', 0.0) - k_diffusion.sampling.torch = TorchHijack(self.sampler_noises if self.sampler_noises is not None else []) + k_diffusion.sampling.torch = TorchHijack(p) extra_params_kwargs = {} for param_name in self.extra_params: diff --git a/modules/shared.py b/modules/shared.py index e34847ce..e9b980a4 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -16,7 +16,7 @@ import modules.interrogate import modules.memmon import modules.styles import modules.devices as devices -from modules import localization, script_loading, errors, ui_components, shared_items, cmd_args +from modules import localization, script_loading, errors, ui_components, shared_items, cmd_args, rng # noqa: F401 from modules.paths_internal import models_path, script_path, data_path, sd_configs_path, sd_default_config, sd_model_file, default_sd_model_file, extensions_dir, extensions_builtin_dir # noqa: F401 from ldm.models.diffusion.ddpm import LatentDiffusion from typing import Optional -- cgit v1.2.1 From a6f840b4dc09bb876060dc2487742fef6dd49feb Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Wed, 9 Aug 2023 08:47:52 +0300 Subject: Split history: mv modules/shared.py modules/shared_options.py --- modules/shared.py | 976 ---------------------------------------------- modules/shared_options.py | 976 ++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 976 insertions(+), 976 deletions(-) delete mode 100644 modules/shared.py create mode 100644 modules/shared_options.py diff --git a/modules/shared.py b/modules/shared.py deleted file mode 100644 index e9b980a4..00000000 --- a/modules/shared.py +++ /dev/null @@ -1,976 +0,0 @@ -import datetime -import json -import os -import re -import sys -import threading -import time -import logging - -import gradio as gr -import torch -import tqdm - -import launch -import modules.interrogate -import modules.memmon -import modules.styles -import modules.devices as devices -from modules import localization, script_loading, errors, ui_components, shared_items, cmd_args, rng # noqa: F401 -from modules.paths_internal import models_path, script_path, data_path, sd_configs_path, sd_default_config, sd_model_file, default_sd_model_file, extensions_dir, extensions_builtin_dir # noqa: F401 -from ldm.models.diffusion.ddpm import LatentDiffusion -from typing import Optional - -log = logging.getLogger(__name__) - -demo = None - -parser = cmd_args.parser - -script_loading.preload_extensions(extensions_dir, parser, extension_list=launch.list_extensions(launch.args.ui_settings_file)) -script_loading.preload_extensions(extensions_builtin_dir, parser) - -if os.environ.get('IGNORE_CMD_ARGS_ERRORS', None) is None: - cmd_opts = parser.parse_args() -else: - cmd_opts, _ = parser.parse_known_args() - - -restricted_opts = { - "samples_filename_pattern", - "directories_filename_pattern", - "outdir_samples", - "outdir_txt2img_samples", - "outdir_img2img_samples", - "outdir_extras_samples", - "outdir_grids", - "outdir_txt2img_grids", - "outdir_save", - "outdir_init_images" -} - -# https://huggingface.co/datasets/freddyaboulton/gradio-theme-subdomains/resolve/main/subdomains.json -gradio_hf_hub_themes = [ - "gradio/base", - "gradio/glass", - "gradio/monochrome", - "gradio/seafoam", - "gradio/soft", - "gradio/dracula_test", - "abidlabs/dracula_test", - "abidlabs/Lime", - "abidlabs/pakistan", - "Ama434/neutral-barlow", - "dawood/microsoft_windows", - "finlaymacklon/smooth_slate", - "Franklisi/darkmode", - "freddyaboulton/dracula_revamped", - "freddyaboulton/test-blue", - "gstaff/xkcd", - "Insuz/Mocha", - "Insuz/SimpleIndigo", - "JohnSmith9982/small_and_pretty", - "nota-ai/theme", - "nuttea/Softblue", - "ParityError/Anime", - "reilnuud/polite", - "remilia/Ghostly", - "rottenlittlecreature/Moon_Goblin", - "step-3-profit/Midnight-Deep", - "Taithrah/Minimal", - "ysharma/huggingface", - "ysharma/steampunk" -] - - -cmd_opts.disable_extension_access = (cmd_opts.share or cmd_opts.listen or cmd_opts.server_name) and not cmd_opts.enable_insecure_extension_access - -devices.device, devices.device_interrogate, devices.device_gfpgan, devices.device_esrgan, devices.device_codeformer = \ - (devices.cpu if any(y in cmd_opts.use_cpu for y in [x, 'all']) else devices.get_optimal_device() for x in ['sd', 'interrogate', 'gfpgan', 'esrgan', 'codeformer']) - -devices.dtype = torch.float32 if cmd_opts.no_half else torch.float16 -devices.dtype_vae = torch.float32 if cmd_opts.no_half or cmd_opts.no_half_vae else torch.float16 - -device = devices.device -weight_load_location = None if cmd_opts.lowram else "cpu" - -batch_cond_uncond = cmd_opts.always_batch_cond_uncond or not (cmd_opts.lowvram or cmd_opts.medvram) -parallel_processing_allowed = not cmd_opts.lowvram and not cmd_opts.medvram -xformers_available = False -config_filename = cmd_opts.ui_settings_file - -os.makedirs(cmd_opts.hypernetwork_dir, exist_ok=True) -hypernetworks = {} -loaded_hypernetworks = [] - - -def reload_hypernetworks(): - from modules.hypernetworks import hypernetwork - global hypernetworks - - hypernetworks = hypernetwork.list_hypernetworks(cmd_opts.hypernetwork_dir) - - -class State: - skipped = False - interrupted = False - job = "" - job_no = 0 - job_count = 0 - processing_has_refined_job_count = False - job_timestamp = '0' - sampling_step = 0 - sampling_steps = 0 - current_latent = None - current_image = None - current_image_sampling_step = 0 - id_live_preview = 0 - textinfo = None - time_start = None - server_start = None - _server_command_signal = threading.Event() - _server_command: Optional[str] = None - - @property - def need_restart(self) -> bool: - # Compatibility getter for need_restart. - return self.server_command == "restart" - - @need_restart.setter - def need_restart(self, value: bool) -> None: - # Compatibility setter for need_restart. - if value: - self.server_command = "restart" - - @property - def server_command(self): - return self._server_command - - @server_command.setter - def server_command(self, value: Optional[str]) -> None: - """ - Set the server command to `value` and signal that it's been set. - """ - self._server_command = value - self._server_command_signal.set() - - def wait_for_server_command(self, timeout: Optional[float] = None) -> Optional[str]: - """ - Wait for server command to get set; return and clear the value and signal. - """ - if self._server_command_signal.wait(timeout): - self._server_command_signal.clear() - req = self._server_command - self._server_command = None - return req - return None - - def request_restart(self) -> None: - self.interrupt() - self.server_command = "restart" - log.info("Received restart request") - - def skip(self): - self.skipped = True - log.info("Received skip request") - - def interrupt(self): - self.interrupted = True - log.info("Received interrupt request") - - def nextjob(self): - if opts.live_previews_enable and opts.show_progress_every_n_steps == -1: - self.do_set_current_image() - - self.job_no += 1 - self.sampling_step = 0 - self.current_image_sampling_step = 0 - - def dict(self): - obj = { - "skipped": self.skipped, - "interrupted": self.interrupted, - "job": self.job, - "job_count": self.job_count, - "job_timestamp": self.job_timestamp, - "job_no": self.job_no, - "sampling_step": self.sampling_step, - "sampling_steps": self.sampling_steps, - } - - return obj - - def begin(self, job: str = "(unknown)"): - self.sampling_step = 0 - self.job_count = -1 - self.processing_has_refined_job_count = False - self.job_no = 0 - self.job_timestamp = datetime.datetime.now().strftime("%Y%m%d%H%M%S") - self.current_latent = None - self.current_image = None - self.current_image_sampling_step = 0 - self.id_live_preview = 0 - self.skipped = False - self.interrupted = False - self.textinfo = None - self.time_start = time.time() - self.job = job - devices.torch_gc() - log.info("Starting job %s", job) - - def end(self): - duration = time.time() - self.time_start - log.info("Ending job %s (%.2f seconds)", self.job, duration) - self.job = "" - self.job_count = 0 - - devices.torch_gc() - - def set_current_image(self): - """sets self.current_image from self.current_latent if enough sampling steps have been made after the last call to this""" - if not parallel_processing_allowed: - return - - if self.sampling_step - self.current_image_sampling_step >= opts.show_progress_every_n_steps and opts.live_previews_enable and opts.show_progress_every_n_steps != -1: - self.do_set_current_image() - - def do_set_current_image(self): - if self.current_latent is None: - return - - import modules.sd_samplers - - try: - if opts.show_progress_grid: - self.assign_current_image(modules.sd_samplers.samples_to_image_grid(self.current_latent)) - else: - self.assign_current_image(modules.sd_samplers.sample_to_image(self.current_latent)) - - self.current_image_sampling_step = self.sampling_step - - except Exception: - # when switching models during genration, VAE would be on CPU, so creating an image will fail. - # we silently ignore this error - errors.record_exception() - - def assign_current_image(self, image): - self.current_image = image - self.id_live_preview += 1 - - -state = State() -state.server_start = time.time() - -styles_filename = cmd_opts.styles_file -prompt_styles = modules.styles.StyleDatabase(styles_filename) - -interrogator = modules.interrogate.InterrogateModels("interrogate") - -face_restorers = [] - - -class OptionInfo: - def __init__(self, default=None, label="", component=None, component_args=None, onchange=None, section=None, refresh=None, comment_before='', comment_after=''): - self.default = default - self.label = label - self.component = component - self.component_args = component_args - self.onchange = onchange - self.section = section - self.refresh = refresh - self.do_not_save = False - - self.comment_before = comment_before - """HTML text that will be added after label in UI""" - - self.comment_after = comment_after - """HTML text that will be added before label in UI""" - - def link(self, label, url): - self.comment_before += f"[{label}]" - return self - - def js(self, label, js_func): - self.comment_before += f"[{label}]" - return self - - def info(self, info): - self.comment_after += f"({info})" - return self - - def html(self, html): - self.comment_after += html - return self - - def needs_restart(self): - self.comment_after += " (requires restart)" - return self - - def needs_reload_ui(self): - self.comment_after += " (requires Reload UI)" - return self - - -class OptionHTML(OptionInfo): - def __init__(self, text): - super().__init__(str(text).strip(), label='', component=lambda **kwargs: gr.HTML(elem_classes="settings-info", **kwargs)) - - self.do_not_save = True - - -def options_section(section_identifier, options_dict): - for v in options_dict.values(): - v.section = section_identifier - - return options_dict - - -def list_checkpoint_tiles(): - import modules.sd_models - return modules.sd_models.checkpoint_tiles() - - -def refresh_checkpoints(): - import modules.sd_models - return modules.sd_models.list_models() - - -def list_samplers(): - import modules.sd_samplers - return modules.sd_samplers.all_samplers - - -hide_dirs = {"visible": not cmd_opts.hide_ui_dir_config} -tab_names = [] - -options_templates = {} - -options_templates.update(options_section(('saving-images', "Saving images/grids"), { - "samples_save": OptionInfo(True, "Always save all generated images"), - "samples_format": OptionInfo('png', 'File format for images'), - "samples_filename_pattern": OptionInfo("", "Images filename pattern", component_args=hide_dirs).link("wiki", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Custom-Images-Filename-Name-and-Subdirectory"), - "save_images_add_number": OptionInfo(True, "Add number to filename when saving", component_args=hide_dirs), - - "grid_save": OptionInfo(True, "Always save all generated image grids"), - "grid_format": OptionInfo('png', 'File format for grids'), - "grid_extended_filename": OptionInfo(False, "Add extended info (seed, prompt) to filename when saving grid"), - "grid_only_if_multiple": OptionInfo(True, "Do not save grids consisting of one picture"), - "grid_prevent_empty_spots": OptionInfo(False, "Prevent empty spots in grid (when set to autodetect)"), - "grid_zip_filename_pattern": OptionInfo("", "Archive filename pattern", component_args=hide_dirs).link("wiki", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Custom-Images-Filename-Name-and-Subdirectory"), - "n_rows": OptionInfo(-1, "Grid row count; use -1 for autodetect and 0 for it to be same as batch size", gr.Slider, {"minimum": -1, "maximum": 16, "step": 1}), - "font": OptionInfo("", "Font for image grids that have text"), - "grid_text_active_color": OptionInfo("#000000", "Text color for image grids", ui_components.FormColorPicker, {}), - "grid_text_inactive_color": OptionInfo("#999999", "Inactive text color for image grids", ui_components.FormColorPicker, {}), - "grid_background_color": OptionInfo("#ffffff", "Background color for image grids", ui_components.FormColorPicker, {}), - - "enable_pnginfo": OptionInfo(True, "Save text information about generation parameters as chunks to png files"), - "save_txt": OptionInfo(False, "Create a text file next to every image with generation parameters."), - "save_images_before_face_restoration": OptionInfo(False, "Save a copy of image before doing face restoration."), - "save_images_before_highres_fix": OptionInfo(False, "Save a copy of image before applying highres fix."), - "save_images_before_color_correction": OptionInfo(False, "Save a copy of image before applying color correction to img2img results"), - "save_mask": OptionInfo(False, "For inpainting, save a copy of the greyscale mask"), - "save_mask_composite": OptionInfo(False, "For inpainting, save a masked composite"), - "jpeg_quality": OptionInfo(80, "Quality for saved jpeg images", gr.Slider, {"minimum": 1, "maximum": 100, "step": 1}), - "webp_lossless": OptionInfo(False, "Use lossless compression for webp images"), - "export_for_4chan": OptionInfo(True, "Save copy of large images as JPG").info("if the file size is above the limit, or either width or height are above the limit"), - "img_downscale_threshold": OptionInfo(4.0, "File size limit for the above option, MB", gr.Number), - "target_side_length": OptionInfo(4000, "Width/height limit for the above option, in pixels", gr.Number), - "img_max_size_mp": OptionInfo(200, "Maximum image size", gr.Number).info("in megapixels"), - - "use_original_name_batch": OptionInfo(True, "Use original name for output filename during batch process in extras tab"), - "use_upscaler_name_as_suffix": OptionInfo(False, "Use upscaler name as filename suffix in the extras tab"), - "save_selected_only": OptionInfo(True, "When using 'Save' button, only save a single selected image"), - "save_init_img": OptionInfo(False, "Save init images when using img2img"), - - "temp_dir": OptionInfo("", "Directory for temporary images; leave empty for default"), - "clean_temp_dir_at_start": OptionInfo(False, "Cleanup non-default temporary directory when starting webui"), - - "save_incomplete_images": OptionInfo(False, "Save incomplete images").info("save images that has been interrupted in mid-generation; even if not saved, they will still show up in webui output."), -})) - -options_templates.update(options_section(('saving-paths', "Paths for saving"), { - "outdir_samples": OptionInfo("", "Output directory for images; if empty, defaults to three directories below", component_args=hide_dirs), - "outdir_txt2img_samples": OptionInfo("outputs/txt2img-images", 'Output directory for txt2img images', component_args=hide_dirs), - "outdir_img2img_samples": OptionInfo("outputs/img2img-images", 'Output directory for img2img images', component_args=hide_dirs), - "outdir_extras_samples": OptionInfo("outputs/extras-images", 'Output directory for images from extras tab', component_args=hide_dirs), - "outdir_grids": OptionInfo("", "Output directory for grids; if empty, defaults to two directories below", component_args=hide_dirs), - "outdir_txt2img_grids": OptionInfo("outputs/txt2img-grids", 'Output directory for txt2img grids', component_args=hide_dirs), - "outdir_img2img_grids": OptionInfo("outputs/img2img-grids", 'Output directory for img2img grids', component_args=hide_dirs), - "outdir_save": OptionInfo("log/images", "Directory for saving images using the Save button", component_args=hide_dirs), - "outdir_init_images": OptionInfo("outputs/init-images", "Directory for saving init images when using img2img", component_args=hide_dirs), -})) - -options_templates.update(options_section(('saving-to-dirs', "Saving to a directory"), { - "save_to_dirs": OptionInfo(True, "Save images to a subdirectory"), - "grid_save_to_dirs": OptionInfo(True, "Save grids to a subdirectory"), - "use_save_to_dirs_for_ui": OptionInfo(False, "When using \"Save\" button, save images to a subdirectory"), - "directories_filename_pattern": OptionInfo("[date]", "Directory name pattern", component_args=hide_dirs).link("wiki", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Custom-Images-Filename-Name-and-Subdirectory"), - "directories_max_prompt_words": OptionInfo(8, "Max prompt words for [prompt_words] pattern", gr.Slider, {"minimum": 1, "maximum": 20, "step": 1, **hide_dirs}), -})) - -options_templates.update(options_section(('upscaling', "Upscaling"), { - "ESRGAN_tile": OptionInfo(192, "Tile size for ESRGAN upscalers.", gr.Slider, {"minimum": 0, "maximum": 512, "step": 16}).info("0 = no tiling"), - "ESRGAN_tile_overlap": OptionInfo(8, "Tile overlap for ESRGAN upscalers.", gr.Slider, {"minimum": 0, "maximum": 48, "step": 1}).info("Low values = visible seam"), - "realesrgan_enabled_models": OptionInfo(["R-ESRGAN 4x+", "R-ESRGAN 4x+ Anime6B"], "Select which Real-ESRGAN models to show in the web UI.", gr.CheckboxGroup, lambda: {"choices": shared_items.realesrgan_models_names()}), - "upscaler_for_img2img": OptionInfo(None, "Upscaler for img2img", gr.Dropdown, lambda: {"choices": [x.name for x in sd_upscalers]}), -})) - -options_templates.update(options_section(('face-restoration', "Face restoration"), { - "face_restoration_model": OptionInfo("CodeFormer", "Face restoration model", gr.Radio, lambda: {"choices": [x.name() for x in face_restorers]}), - "code_former_weight": OptionInfo(0.5, "CodeFormer weight", gr.Slider, {"minimum": 0, "maximum": 1, "step": 0.01}).info("0 = maximum effect; 1 = minimum effect"), - "face_restoration_unload": OptionInfo(False, "Move face restoration model from VRAM into RAM after processing"), -})) - -options_templates.update(options_section(('system', "System"), { - "auto_launch_browser": OptionInfo("Local", "Automatically open webui in browser on startup", gr.Radio, lambda: {"choices": ["Disable", "Local", "Remote"]}), - "show_warnings": OptionInfo(False, "Show warnings in console.").needs_reload_ui(), - "show_gradio_deprecation_warnings": OptionInfo(True, "Show gradio deprecation warnings in console.").needs_reload_ui(), - "memmon_poll_rate": OptionInfo(8, "VRAM usage polls per second during generation.", gr.Slider, {"minimum": 0, "maximum": 40, "step": 1}).info("0 = disable"), - "samples_log_stdout": OptionInfo(False, "Always print all generation info to standard output"), - "multiple_tqdm": OptionInfo(True, "Add a second progress bar to the console that shows progress for an entire job."), - "print_hypernet_extra": OptionInfo(False, "Print extra hypernetwork information to console."), - "list_hidden_files": OptionInfo(True, "Load models/files in hidden directories").info("directory is hidden if its name starts with \".\""), - "disable_mmap_load_safetensors": OptionInfo(False, "Disable memmapping for loading .safetensors files.").info("fixes very slow loading speed in some cases"), - "hide_ldm_prints": OptionInfo(True, "Prevent Stability-AI's ldm/sgm modules from printing noise to console."), -})) - -options_templates.update(options_section(('training', "Training"), { - "unload_models_when_training": OptionInfo(False, "Move VAE and CLIP to RAM when training if possible. Saves VRAM."), - "pin_memory": OptionInfo(False, "Turn on pin_memory for DataLoader. Makes training slightly faster but can increase memory usage."), - "save_optimizer_state": OptionInfo(False, "Saves Optimizer state as separate *.optim file. Training of embedding or HN can be resumed with the matching optim file."), - "save_training_settings_to_txt": OptionInfo(True, "Save textual inversion and hypernet settings to a text file whenever training starts."), - "dataset_filename_word_regex": OptionInfo("", "Filename word regex"), - "dataset_filename_join_string": OptionInfo(" ", "Filename join string"), - "training_image_repeats_per_epoch": OptionInfo(1, "Number of repeats for a single input image per epoch; used only for displaying epoch number", gr.Number, {"precision": 0}), - "training_write_csv_every": OptionInfo(500, "Save an csv containing the loss to log directory every N steps, 0 to disable"), - "training_xattention_optimizations": OptionInfo(False, "Use cross attention optimizations while training"), - "training_enable_tensorboard": OptionInfo(False, "Enable tensorboard logging."), - "training_tensorboard_save_images": OptionInfo(False, "Save generated images within tensorboard."), - "training_tensorboard_flush_every": OptionInfo(120, "How often, in seconds, to flush the pending tensorboard events and summaries to disk."), -})) - -options_templates.update(options_section(('sd', "Stable Diffusion"), { - "sd_model_checkpoint": OptionInfo(None, "Stable Diffusion checkpoint", gr.Dropdown, lambda: {"choices": list_checkpoint_tiles()}, refresh=refresh_checkpoints), - "sd_checkpoints_limit": OptionInfo(1, "Maximum number of checkpoints loaded at the same time", gr.Slider, {"minimum": 1, "maximum": 10, "step": 1}), - "sd_checkpoints_keep_in_cpu": OptionInfo(True, "Only keep one model on device").info("will keep models other than the currently used one in RAM rather than VRAM"), - "sd_checkpoint_cache": OptionInfo(0, "Checkpoints to cache in RAM", gr.Slider, {"minimum": 0, "maximum": 10, "step": 1}).info("obsolete; set to 0 and use the two settings above instead"), - "sd_unet": OptionInfo("Automatic", "SD Unet", gr.Dropdown, lambda: {"choices": shared_items.sd_unet_items()}, refresh=shared_items.refresh_unet_list).info("choose Unet model: Automatic = use one with same filename as checkpoint; None = use Unet from checkpoint"), - "enable_quantization": OptionInfo(False, "Enable quantization in K samplers for sharper and cleaner results. This may change existing seeds").needs_reload_ui(), - "enable_emphasis": OptionInfo(True, "Enable emphasis").info("use (text) to make model pay more attention to text and [text] to make it pay less attention"), - "enable_batch_seeds": OptionInfo(True, "Make K-diffusion samplers produce same images in a batch as when making a single image"), - "comma_padding_backtrack": OptionInfo(20, "Prompt word wrap length limit", gr.Slider, {"minimum": 0, "maximum": 74, "step": 1}).info("in tokens - for texts shorter than specified, if they don't fit into 75 token limit, move them to the next 75 token chunk"), - "CLIP_stop_at_last_layers": OptionInfo(1, "Clip skip", gr.Slider, {"minimum": 1, "maximum": 12, "step": 1}).link("wiki", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Features#clip-skip").info("ignore last layers of CLIP network; 1 ignores none, 2 ignores one layer"), - "upcast_attn": OptionInfo(False, "Upcast cross attention layer to float32"), - "randn_source": OptionInfo("GPU", "Random number generator source.", gr.Radio, {"choices": ["GPU", "CPU", "NV"]}).info("changes seeds drastically; use CPU to produce the same picture across different videocard vendors; use NV to produce same picture as on NVidia videocards"), -})) - -options_templates.update(options_section(('sdxl', "Stable Diffusion XL"), { - "sdxl_crop_top": OptionInfo(0, "crop top coordinate"), - "sdxl_crop_left": OptionInfo(0, "crop left coordinate"), - "sdxl_refiner_low_aesthetic_score": OptionInfo(2.5, "SDXL low aesthetic score", gr.Number).info("used for refiner model negative prompt"), - "sdxl_refiner_high_aesthetic_score": OptionInfo(6.0, "SDXL high aesthetic score", gr.Number).info("used for refiner model prompt"), -})) - -options_templates.update(options_section(('vae', "VAE"), { - "sd_vae_explanation": OptionHTML(""" -VAE is a neural network that transforms a standard RGB -image into latent space representation and back. Latent space representation is what stable diffusion is working on during sampling -(i.e. when the progress bar is between empty and full). For txt2img, VAE is used to create a resulting image after the sampling is finished. -For img2img, VAE is used to process user's input image before the sampling, and to create an image after sampling. -"""), - "sd_vae_checkpoint_cache": OptionInfo(0, "VAE Checkpoints to cache in RAM", gr.Slider, {"minimum": 0, "maximum": 10, "step": 1}), - "sd_vae": OptionInfo("Automatic", "SD VAE", gr.Dropdown, lambda: {"choices": shared_items.sd_vae_items()}, refresh=shared_items.refresh_vae_list).info("choose VAE model: Automatic = use one with same filename as checkpoint; None = use VAE from checkpoint"), - "sd_vae_overrides_per_model_preferences": OptionInfo(True, "Selected VAE overrides per-model preferences").info("you can set per-model VAE either by editing user metadata for checkpoints, or by making the VAE have same name as checkpoint"), - "auto_vae_precision": OptionInfo(True, "Automatically revert VAE to 32-bit floats").info("triggers when a tensor with NaNs is produced in VAE; disabling the option in this case will result in a black square image"), - "sd_vae_encode_method": OptionInfo("Full", "VAE type for encode", gr.Radio, {"choices": ["Full", "TAESD"]}).info("method to encode image to latent (use in img2img, hires-fix or inpaint mask)"), - "sd_vae_decode_method": OptionInfo("Full", "VAE type for decode", gr.Radio, {"choices": ["Full", "TAESD"]}).info("method to decode latent to image"), -})) - -options_templates.update(options_section(('img2img', "img2img"), { - "inpainting_mask_weight": OptionInfo(1.0, "Inpainting conditioning mask strength", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}), - "initial_noise_multiplier": OptionInfo(1.0, "Noise multiplier for img2img", gr.Slider, {"minimum": 0.5, "maximum": 1.5, "step": 0.01}), - "img2img_color_correction": OptionInfo(False, "Apply color correction to img2img results to match original colors."), - "img2img_fix_steps": OptionInfo(False, "With img2img, do exactly the amount of steps the slider specifies.").info("normally you'd do less with less denoising"), - "img2img_background_color": OptionInfo("#ffffff", "With img2img, fill transparent parts of the input image with this color.", ui_components.FormColorPicker, {}), - "img2img_editor_height": OptionInfo(720, "Height of the image editor", gr.Slider, {"minimum": 80, "maximum": 1600, "step": 1}).info("in pixels").needs_reload_ui(), - "img2img_sketch_default_brush_color": OptionInfo("#ffffff", "Sketch initial brush color", ui_components.FormColorPicker, {}).info("default brush color of img2img sketch").needs_reload_ui(), - "img2img_inpaint_mask_brush_color": OptionInfo("#ffffff", "Inpaint mask brush color", ui_components.FormColorPicker, {}).info("brush color of inpaint mask").needs_reload_ui(), - "img2img_inpaint_sketch_default_brush_color": OptionInfo("#ffffff", "Inpaint sketch initial brush color", ui_components.FormColorPicker, {}).info("default brush color of img2img inpaint sketch").needs_reload_ui(), - "return_mask": OptionInfo(False, "For inpainting, include the greyscale mask in results for web"), - "return_mask_composite": OptionInfo(False, "For inpainting, include masked composite in results for web"), -})) - -options_templates.update(options_section(('optimizations', "Optimizations"), { - "cross_attention_optimization": OptionInfo("Automatic", "Cross attention optimization", gr.Dropdown, lambda: {"choices": shared_items.cross_attention_optimizations()}), - "s_min_uncond": OptionInfo(0.0, "Negative Guidance minimum sigma", gr.Slider, {"minimum": 0.0, "maximum": 15.0, "step": 0.01}).link("PR", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/9177").info("skip negative prompt for some steps when the image is almost ready; 0=disable, higher=faster"), - "token_merging_ratio": OptionInfo(0.0, "Token merging ratio", gr.Slider, {"minimum": 0.0, "maximum": 0.9, "step": 0.1}).link("PR", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/9256").info("0=disable, higher=faster"), - "token_merging_ratio_img2img": OptionInfo(0.0, "Token merging ratio for img2img", gr.Slider, {"minimum": 0.0, "maximum": 0.9, "step": 0.1}).info("only applies if non-zero and overrides above"), - "token_merging_ratio_hr": OptionInfo(0.0, "Token merging ratio for high-res pass", gr.Slider, {"minimum": 0.0, "maximum": 0.9, "step": 0.1}).info("only applies if non-zero and overrides above"), - "pad_cond_uncond": OptionInfo(False, "Pad prompt/negative prompt to be same length").info("improves performance when prompt and negative prompt have different lengths; changes seeds"), - "persistent_cond_cache": OptionInfo(True, "Persistent cond cache").info("Do not recalculate conds from prompts if prompts have not changed since previous calculation"), -})) - -options_templates.update(options_section(('compatibility', "Compatibility"), { - "use_old_emphasis_implementation": OptionInfo(False, "Use old emphasis implementation. Can be useful to reproduce old seeds."), - "use_old_karras_scheduler_sigmas": OptionInfo(False, "Use old karras scheduler sigmas (0.1 to 10)."), - "no_dpmpp_sde_batch_determinism": OptionInfo(False, "Do not make DPM++ SDE deterministic across different batch sizes."), - "use_old_hires_fix_width_height": OptionInfo(False, "For hires fix, use width/height sliders to set final resolution rather than first pass (disables Upscale by, Resize width/height to)."), - "dont_fix_second_order_samplers_schedule": OptionInfo(False, "Do not fix prompt schedule for second order samplers."), - "hires_fix_use_firstpass_conds": OptionInfo(False, "For hires fix, calculate conds of second pass using extra networks of first pass."), -})) - -options_templates.update(options_section(('interrogate', "Interrogate"), { - "interrogate_keep_models_in_memory": OptionInfo(False, "Keep models in VRAM"), - "interrogate_return_ranks": OptionInfo(False, "Include ranks of model tags matches in results.").info("booru only"), - "interrogate_clip_num_beams": OptionInfo(1, "BLIP: num_beams", gr.Slider, {"minimum": 1, "maximum": 16, "step": 1}), - "interrogate_clip_min_length": OptionInfo(24, "BLIP: minimum description length", gr.Slider, {"minimum": 1, "maximum": 128, "step": 1}), - "interrogate_clip_max_length": OptionInfo(48, "BLIP: maximum description length", gr.Slider, {"minimum": 1, "maximum": 256, "step": 1}), - "interrogate_clip_dict_limit": OptionInfo(1500, "CLIP: maximum number of lines in text file").info("0 = No limit"), - "interrogate_clip_skip_categories": OptionInfo([], "CLIP: skip inquire categories", gr.CheckboxGroup, lambda: {"choices": modules.interrogate.category_types()}, refresh=modules.interrogate.category_types), - "interrogate_deepbooru_score_threshold": OptionInfo(0.5, "deepbooru: score threshold", gr.Slider, {"minimum": 0, "maximum": 1, "step": 0.01}), - "deepbooru_sort_alpha": OptionInfo(True, "deepbooru: sort tags alphabetically").info("if not: sort by score"), - "deepbooru_use_spaces": OptionInfo(True, "deepbooru: use spaces in tags").info("if not: use underscores"), - "deepbooru_escape": OptionInfo(True, "deepbooru: escape (\\) brackets").info("so they are used as literal brackets and not for emphasis"), - "deepbooru_filter_tags": OptionInfo("", "deepbooru: filter out those tags").info("separate by comma"), -})) - -options_templates.update(options_section(('extra_networks', "Extra Networks"), { - "extra_networks_show_hidden_directories": OptionInfo(True, "Show hidden directories").info("directory is hidden if its name starts with \".\"."), - "extra_networks_hidden_models": OptionInfo("When searched", "Show cards for models in hidden directories", gr.Radio, {"choices": ["Always", "When searched", "Never"]}).info('"When searched" option will only show the item when the search string has 4 characters or more'), - "extra_networks_default_multiplier": OptionInfo(1.0, "Default multiplier for extra networks", gr.Slider, {"minimum": 0.0, "maximum": 2.0, "step": 0.01}), - "extra_networks_card_width": OptionInfo(0, "Card width for Extra Networks").info("in pixels"), - "extra_networks_card_height": OptionInfo(0, "Card height for Extra Networks").info("in pixels"), - "extra_networks_card_text_scale": OptionInfo(1.0, "Card text scale", gr.Slider, {"minimum": 0.0, "maximum": 2.0, "step": 0.01}).info("1 = original size"), - "extra_networks_card_show_desc": OptionInfo(True, "Show description on card"), - "extra_networks_add_text_separator": OptionInfo(" ", "Extra networks separator").info("extra text to add before <...> when adding extra network to prompt"), - "ui_extra_networks_tab_reorder": OptionInfo("", "Extra networks tab order").needs_reload_ui(), - "textual_inversion_print_at_load": OptionInfo(False, "Print a list of Textual Inversion embeddings when loading model"), - "textual_inversion_add_hashes_to_infotext": OptionInfo(True, "Add Textual Inversion hashes to infotext"), - "sd_hypernetwork": OptionInfo("None", "Add hypernetwork to prompt", gr.Dropdown, lambda: {"choices": ["None", *hypernetworks]}, refresh=reload_hypernetworks), -})) - -options_templates.update(options_section(('ui', "User interface"), { - "localization": OptionInfo("None", "Localization", gr.Dropdown, lambda: {"choices": ["None"] + list(localization.localizations.keys())}, refresh=lambda: localization.list_localizations(cmd_opts.localizations_dir)).needs_reload_ui(), - "gradio_theme": OptionInfo("Default", "Gradio theme", ui_components.DropdownEditable, lambda: {"choices": ["Default"] + gradio_hf_hub_themes}).info("you can also manually enter any of themes from the gallery.").needs_reload_ui(), - "gradio_themes_cache": OptionInfo(True, "Cache gradio themes locally").info("disable to update the selected Gradio theme"), - "return_grid": OptionInfo(True, "Show grid in results for web"), - "do_not_show_images": OptionInfo(False, "Do not show any images in results for web"), - "send_seed": OptionInfo(True, "Send seed when sending prompt or image to other interface"), - "send_size": OptionInfo(True, "Send size when sending prompt or image to another interface"), - "js_modal_lightbox": OptionInfo(True, "Enable full page image viewer"), - "js_modal_lightbox_initially_zoomed": OptionInfo(True, "Show images zoomed in by default in full page image viewer"), - "js_modal_lightbox_gamepad": OptionInfo(False, "Navigate image viewer with gamepad"), - "js_modal_lightbox_gamepad_repeat": OptionInfo(250, "Gamepad repeat period, in milliseconds"), - "show_progress_in_title": OptionInfo(True, "Show generation progress in window title."), - "samplers_in_dropdown": OptionInfo(True, "Use dropdown for sampler selection instead of radio group").needs_reload_ui(), - "dimensions_and_batch_together": OptionInfo(True, "Show Width/Height and Batch sliders in same row").needs_reload_ui(), - "keyedit_precision_attention": OptionInfo(0.1, "Ctrl+up/down precision when editing (attention:1.1)", gr.Slider, {"minimum": 0.01, "maximum": 0.2, "step": 0.001}), - "keyedit_precision_extra": OptionInfo(0.05, "Ctrl+up/down precision when editing ", gr.Slider, {"minimum": 0.01, "maximum": 0.2, "step": 0.001}), - "keyedit_delimiters": OptionInfo(".,\\/!?%^*;:{}=`~()", "Ctrl+up/down word delimiters"), - "keyedit_move": OptionInfo(True, "Alt+left/right moves prompt elements"), - "quicksettings_list": OptionInfo(["sd_model_checkpoint"], "Quicksettings list", ui_components.DropdownMulti, lambda: {"choices": list(opts.data_labels.keys())}).js("info", "settingsHintsShowQuicksettings").info("setting entries that appear at the top of page rather than in settings tab").needs_reload_ui(), - "ui_tab_order": OptionInfo([], "UI tab order", ui_components.DropdownMulti, lambda: {"choices": list(tab_names)}).needs_reload_ui(), - "hidden_tabs": OptionInfo([], "Hidden UI tabs", ui_components.DropdownMulti, lambda: {"choices": list(tab_names)}).needs_reload_ui(), - "ui_reorder_list": OptionInfo([], "txt2img/img2img UI item order", ui_components.DropdownMulti, lambda: {"choices": list(shared_items.ui_reorder_categories())}).info("selected items appear first").needs_reload_ui(), - "hires_fix_show_sampler": OptionInfo(False, "Hires fix: show hires checkpoint and sampler selection").needs_reload_ui(), - "hires_fix_show_prompts": OptionInfo(False, "Hires fix: show hires prompt and negative prompt").needs_reload_ui(), - "disable_token_counters": OptionInfo(False, "Disable prompt token counters").needs_reload_ui(), -})) - - -options_templates.update(options_section(('infotext', "Infotext"), { - "add_model_hash_to_info": OptionInfo(True, "Add model hash to generation information"), - "add_model_name_to_info": OptionInfo(True, "Add model name to generation information"), - "add_user_name_to_info": OptionInfo(False, "Add user name to generation information when authenticated"), - "add_version_to_infotext": OptionInfo(True, "Add program version to generation information"), - "disable_weights_auto_swap": OptionInfo(True, "Disregard checkpoint information from pasted infotext").info("when reading generation parameters from text into UI"), - "infotext_styles": OptionInfo("Apply if any", "Infer styles from prompts of pasted infotext", gr.Radio, {"choices": ["Ignore", "Apply", "Discard", "Apply if any"]}).info("when reading generation parameters from text into UI)").html("""
    -
  • Ignore: keep prompt and styles dropdown as it is.
  • -
  • Apply: remove style text from prompt, always replace styles dropdown value with found styles (even if none are found).
  • -
  • Discard: remove style text from prompt, keep styles dropdown as it is.
  • -
  • Apply if any: remove style text from prompt; if any styles are found in prompt, put them into styles dropdown, otherwise keep it as it is.
  • -
"""), - -})) - -options_templates.update(options_section(('ui', "Live previews"), { - "show_progressbar": OptionInfo(True, "Show progressbar"), - "live_previews_enable": OptionInfo(True, "Show live previews of the created image"), - "live_previews_image_format": OptionInfo("png", "Live preview file format", gr.Radio, {"choices": ["jpeg", "png", "webp"]}), - "show_progress_grid": OptionInfo(True, "Show previews of all images generated in a batch as a grid"), - "show_progress_every_n_steps": OptionInfo(10, "Live preview display period", gr.Slider, {"minimum": -1, "maximum": 32, "step": 1}).info("in sampling steps - show new live preview image every N sampling steps; -1 = only show after completion of batch"), - "show_progress_type": OptionInfo("Approx NN", "Live preview method", gr.Radio, {"choices": ["Full", "Approx NN", "Approx cheap", "TAESD"]}).info("Full = slow but pretty; Approx NN and TAESD = fast but low quality; Approx cheap = super fast but terrible otherwise"), - "live_preview_content": OptionInfo("Prompt", "Live preview subject", gr.Radio, {"choices": ["Combined", "Prompt", "Negative prompt"]}), - "live_preview_refresh_period": OptionInfo(1000, "Progressbar and preview update period").info("in milliseconds"), -})) - -options_templates.update(options_section(('sampler-params', "Sampler parameters"), { - "hide_samplers": OptionInfo([], "Hide samplers in user interface", gr.CheckboxGroup, lambda: {"choices": [x.name for x in list_samplers()]}).needs_reload_ui(), - "eta_ddim": OptionInfo(0.0, "Eta for DDIM", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}).info("noise multiplier; higher = more unperdictable results"), - "eta_ancestral": OptionInfo(1.0, "Eta for ancestral samplers", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}).info("noise multiplier; applies to Euler a and other samplers that have a in them"), - "ddim_discretize": OptionInfo('uniform', "img2img DDIM discretize", gr.Radio, {"choices": ['uniform', 'quad']}), - 's_churn': OptionInfo(0.0, "sigma churn", gr.Slider, {"minimum": 0.0, "maximum": 100.0, "step": 0.01}).info('amount of stochasticity; only applies to Euler, Heun, and DPM2'), - 's_tmin': OptionInfo(0.0, "sigma tmin", gr.Slider, {"minimum": 0.0, "maximum": 10.0, "step": 0.01}).info('enable stochasticity; start value of the sigma range; only applies to Euler, Heun, and DPM2'), - 's_tmax': OptionInfo(0.0, "sigma tmax", gr.Slider, {"minimum": 0.0, "maximum": 999.0, "step": 0.01}).info("0 = inf; end value of the sigma range; only applies to Euler, Heun, and DPM2"), - 's_noise': OptionInfo(1.0, "sigma noise", gr.Slider, {"minimum": 0.0, "maximum": 1.1, "step": 0.001}).info('amount of additional noise to counteract loss of detail during sampling; only applies to Euler, Heun, and DPM2'), - 'k_sched_type': OptionInfo("Automatic", "Scheduler type", gr.Dropdown, {"choices": ["Automatic", "karras", "exponential", "polyexponential"]}).info("lets you override the noise schedule for k-diffusion samplers; choosing Automatic disables the three parameters below"), - 'sigma_min': OptionInfo(0.0, "sigma min", gr.Number).info("0 = default (~0.03); minimum noise strength for k-diffusion noise scheduler"), - 'sigma_max': OptionInfo(0.0, "sigma max", gr.Number).info("0 = default (~14.6); maximum noise strength for k-diffusion noise scheduler"), - 'rho': OptionInfo(0.0, "rho", gr.Number).info("0 = default (7 for karras, 1 for polyexponential); higher values result in a steeper noise schedule (decreases faster)"), - 'eta_noise_seed_delta': OptionInfo(0, "Eta noise seed delta", gr.Number, {"precision": 0}).info("ENSD; does not improve anything, just produces different results for ancestral samplers - only useful for reproducing images"), - 'always_discard_next_to_last_sigma': OptionInfo(False, "Always discard next-to-last sigma").link("PR", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/6044"), - 'uni_pc_variant': OptionInfo("bh1", "UniPC variant", gr.Radio, {"choices": ["bh1", "bh2", "vary_coeff"]}), - 'uni_pc_skip_type': OptionInfo("time_uniform", "UniPC skip type", gr.Radio, {"choices": ["time_uniform", "time_quadratic", "logSNR"]}), - 'uni_pc_order': OptionInfo(3, "UniPC order", gr.Slider, {"minimum": 1, "maximum": 50, "step": 1}).info("must be < sampling steps"), - 'uni_pc_lower_order_final': OptionInfo(True, "UniPC lower order final"), -})) - -options_templates.update(options_section(('postprocessing', "Postprocessing"), { - 'postprocessing_enable_in_main_ui': OptionInfo([], "Enable postprocessing operations in txt2img and img2img tabs", ui_components.DropdownMulti, lambda: {"choices": [x.name for x in shared_items.postprocessing_scripts()]}), - 'postprocessing_operation_order': OptionInfo([], "Postprocessing operation order", ui_components.DropdownMulti, lambda: {"choices": [x.name for x in shared_items.postprocessing_scripts()]}), - 'upscaling_max_images_in_cache': OptionInfo(5, "Maximum number of images in upscaling cache", gr.Slider, {"minimum": 0, "maximum": 10, "step": 1}), -})) - -options_templates.update(options_section((None, "Hidden options"), { - "disabled_extensions": OptionInfo([], "Disable these extensions"), - "disable_all_extensions": OptionInfo("none", "Disable all extensions (preserves the list of disabled extensions)", gr.Radio, {"choices": ["none", "extra", "all"]}), - "restore_config_state_file": OptionInfo("", "Config state file to restore from, under 'config-states/' folder"), - "sd_checkpoint_hash": OptionInfo("", "SHA256 hash of the current checkpoint"), -})) - - -options_templates.update() - - -class Options: - data = None - data_labels = options_templates - typemap = {int: float} - - def __init__(self): - self.data = {k: v.default for k, v in self.data_labels.items()} - - def __setattr__(self, key, value): - if self.data is not None: - if key in self.data or key in self.data_labels: - assert not cmd_opts.freeze_settings, "changing settings is disabled" - - info = opts.data_labels.get(key, None) - if info.do_not_save: - return - - comp_args = info.component_args if info else None - if isinstance(comp_args, dict) and comp_args.get('visible', True) is False: - raise RuntimeError(f"not possible to set {key} because it is restricted") - - if cmd_opts.hide_ui_dir_config and key in restricted_opts: - raise RuntimeError(f"not possible to set {key} because it is restricted") - - self.data[key] = value - return - - return super(Options, self).__setattr__(key, value) - - def __getattr__(self, item): - if self.data is not None: - if item in self.data: - return self.data[item] - - if item in self.data_labels: - return self.data_labels[item].default - - return super(Options, self).__getattribute__(item) - - def set(self, key, value): - """sets an option and calls its onchange callback, returning True if the option changed and False otherwise""" - - oldval = self.data.get(key, None) - if oldval == value: - return False - - if self.data_labels[key].do_not_save: - return False - - try: - setattr(self, key, value) - except RuntimeError: - return False - - if self.data_labels[key].onchange is not None: - try: - self.data_labels[key].onchange() - except Exception as e: - errors.display(e, f"changing setting {key} to {value}") - setattr(self, key, oldval) - return False - - return True - - def get_default(self, key): - """returns the default value for the key""" - - data_label = self.data_labels.get(key) - if data_label is None: - return None - - return data_label.default - - def save(self, filename): - assert not cmd_opts.freeze_settings, "saving settings is disabled" - - with open(filename, "w", encoding="utf8") as file: - json.dump(self.data, file, indent=4) - - def same_type(self, x, y): - if x is None or y is None: - return True - - type_x = self.typemap.get(type(x), type(x)) - type_y = self.typemap.get(type(y), type(y)) - - return type_x == type_y - - def load(self, filename): - with open(filename, "r", encoding="utf8") as file: - self.data = json.load(file) - - # 1.6.0 VAE defaults - if self.data.get('sd_vae_as_default') is not None and self.data.get('sd_vae_overrides_per_model_preferences') is None: - self.data['sd_vae_overrides_per_model_preferences'] = not self.data.get('sd_vae_as_default') - - # 1.1.1 quicksettings list migration - if self.data.get('quicksettings') is not None and self.data.get('quicksettings_list') is None: - self.data['quicksettings_list'] = [i.strip() for i in self.data.get('quicksettings').split(',')] - - # 1.4.0 ui_reorder - if isinstance(self.data.get('ui_reorder'), str) and self.data.get('ui_reorder') and "ui_reorder_list" not in self.data: - self.data['ui_reorder_list'] = [i.strip() for i in self.data.get('ui_reorder').split(',')] - - bad_settings = 0 - for k, v in self.data.items(): - info = self.data_labels.get(k, None) - if info is not None and not self.same_type(info.default, v): - print(f"Warning: bad setting value: {k}: {v} ({type(v).__name__}; expected {type(info.default).__name__})", file=sys.stderr) - bad_settings += 1 - - if bad_settings > 0: - print(f"The program is likely to not work with bad settings.\nSettings file: {filename}\nEither fix the file, or delete it and restart.", file=sys.stderr) - - def onchange(self, key, func, call=True): - item = self.data_labels.get(key) - item.onchange = func - - if call: - func() - - def dumpjson(self): - d = {k: self.data.get(k, v.default) for k, v in self.data_labels.items()} - d["_comments_before"] = {k: v.comment_before for k, v in self.data_labels.items() if v.comment_before is not None} - d["_comments_after"] = {k: v.comment_after for k, v in self.data_labels.items() if v.comment_after is not None} - return json.dumps(d) - - def add_option(self, key, info): - self.data_labels[key] = info - - def reorder(self): - """reorder settings so that all items related to section always go together""" - - section_ids = {} - settings_items = self.data_labels.items() - for _, item in settings_items: - if item.section not in section_ids: - section_ids[item.section] = len(section_ids) - - self.data_labels = dict(sorted(settings_items, key=lambda x: section_ids[x[1].section])) - - def cast_value(self, key, value): - """casts an arbitrary to the same type as this setting's value with key - Example: cast_value("eta_noise_seed_delta", "12") -> returns 12 (an int rather than str) - """ - - if value is None: - return None - - default_value = self.data_labels[key].default - if default_value is None: - default_value = getattr(self, key, None) - if default_value is None: - return None - - expected_type = type(default_value) - if expected_type == bool and value == "False": - value = False - else: - value = expected_type(value) - - return value - - -opts = Options() -if os.path.exists(config_filename): - opts.load(config_filename) - - -class Shared(sys.modules[__name__].__class__): - """ - this class is here to provide sd_model field as a property, so that it can be created and loaded on demand rather than - at program startup. - """ - - sd_model_val = None - - @property - def sd_model(self): - import modules.sd_models - - return modules.sd_models.model_data.get_sd_model() - - @sd_model.setter - def sd_model(self, value): - import modules.sd_models - - modules.sd_models.model_data.set_sd_model(value) - - -sd_model: LatentDiffusion = None # this var is here just for IDE's type checking; it cannot be accessed because the class field above will be accessed instead -sys.modules[__name__].__class__ = Shared - -settings_components = None -"""assinged from ui.py, a mapping on setting names to gradio components repsponsible for those settings""" - -latent_upscale_default_mode = "Latent" -latent_upscale_modes = { - "Latent": {"mode": "bilinear", "antialias": False}, - "Latent (antialiased)": {"mode": "bilinear", "antialias": True}, - "Latent (bicubic)": {"mode": "bicubic", "antialias": False}, - "Latent (bicubic antialiased)": {"mode": "bicubic", "antialias": True}, - "Latent (nearest)": {"mode": "nearest", "antialias": False}, - "Latent (nearest-exact)": {"mode": "nearest-exact", "antialias": False}, -} - -sd_upscalers = [] - -clip_model = None - -progress_print_out = sys.stdout - -gradio_theme = gr.themes.Base() - - -def reload_gradio_theme(theme_name=None): - global gradio_theme - if not theme_name: - theme_name = opts.gradio_theme - - default_theme_args = dict( - font=["Source Sans Pro", 'ui-sans-serif', 'system-ui', 'sans-serif'], - font_mono=['IBM Plex Mono', 'ui-monospace', 'Consolas', 'monospace'], - ) - - if theme_name == "Default": - gradio_theme = gr.themes.Default(**default_theme_args) - else: - try: - theme_cache_dir = os.path.join(script_path, 'tmp', 'gradio_themes') - theme_cache_path = os.path.join(theme_cache_dir, f'{theme_name.replace("/", "_")}.json') - if opts.gradio_themes_cache and os.path.exists(theme_cache_path): - gradio_theme = gr.themes.ThemeClass.load(theme_cache_path) - else: - os.makedirs(theme_cache_dir, exist_ok=True) - gradio_theme = gr.themes.ThemeClass.from_hub(theme_name) - gradio_theme.dump(theme_cache_path) - except Exception as e: - errors.display(e, "changing gradio theme") - gradio_theme = gr.themes.Default(**default_theme_args) - - -class TotalTQDM: - def __init__(self): - self._tqdm = None - - def reset(self): - self._tqdm = tqdm.tqdm( - desc="Total progress", - total=state.job_count * state.sampling_steps, - position=1, - file=progress_print_out - ) - - def update(self): - if not opts.multiple_tqdm or cmd_opts.disable_console_progressbars: - return - if self._tqdm is None: - self.reset() - self._tqdm.update() - - def updateTotal(self, new_total): - if not opts.multiple_tqdm or cmd_opts.disable_console_progressbars: - return - if self._tqdm is None: - self.reset() - self._tqdm.total = new_total - - def clear(self): - if self._tqdm is not None: - self._tqdm.refresh() - self._tqdm.close() - self._tqdm = None - - -total_tqdm = TotalTQDM() - -mem_mon = modules.memmon.MemUsageMonitor("MemMon", device, opts) -mem_mon.start() - - -def natural_sort_key(s, regex=re.compile('([0-9]+)')): - return [int(text) if text.isdigit() else text.lower() for text in regex.split(s)] - - -def listfiles(dirname): - filenames = [os.path.join(dirname, x) for x in sorted(os.listdir(dirname), key=natural_sort_key) if not x.startswith(".")] - return [file for file in filenames if os.path.isfile(file)] - - -def html_path(filename): - return os.path.join(script_path, "html", filename) - - -def html(filename): - path = html_path(filename) - - if os.path.exists(path): - with open(path, encoding="utf8") as file: - return file.read() - - return "" - - -def walk_files(path, allowed_extensions=None): - if not os.path.exists(path): - return - - if allowed_extensions is not None: - allowed_extensions = set(allowed_extensions) - - items = list(os.walk(path, followlinks=True)) - items = sorted(items, key=lambda x: natural_sort_key(x[0])) - - for root, _, files in items: - for filename in sorted(files, key=natural_sort_key): - if allowed_extensions is not None: - _, ext = os.path.splitext(filename) - if ext not in allowed_extensions: - continue - - if not opts.list_hidden_files and ("/." in root or "\\." in root): - continue - - yield os.path.join(root, filename) - - -def ldm_print(*args, **kwargs): - if opts.hide_ldm_prints: - return - - print(*args, **kwargs) diff --git a/modules/shared_options.py b/modules/shared_options.py new file mode 100644 index 00000000..e9b980a4 --- /dev/null +++ b/modules/shared_options.py @@ -0,0 +1,976 @@ +import datetime +import json +import os +import re +import sys +import threading +import time +import logging + +import gradio as gr +import torch +import tqdm + +import launch +import modules.interrogate +import modules.memmon +import modules.styles +import modules.devices as devices +from modules import localization, script_loading, errors, ui_components, shared_items, cmd_args, rng # noqa: F401 +from modules.paths_internal import models_path, script_path, data_path, sd_configs_path, sd_default_config, sd_model_file, default_sd_model_file, extensions_dir, extensions_builtin_dir # noqa: F401 +from ldm.models.diffusion.ddpm import LatentDiffusion +from typing import Optional + +log = logging.getLogger(__name__) + +demo = None + +parser = cmd_args.parser + +script_loading.preload_extensions(extensions_dir, parser, extension_list=launch.list_extensions(launch.args.ui_settings_file)) +script_loading.preload_extensions(extensions_builtin_dir, parser) + +if os.environ.get('IGNORE_CMD_ARGS_ERRORS', None) is None: + cmd_opts = parser.parse_args() +else: + cmd_opts, _ = parser.parse_known_args() + + +restricted_opts = { + "samples_filename_pattern", + "directories_filename_pattern", + "outdir_samples", + "outdir_txt2img_samples", + "outdir_img2img_samples", + "outdir_extras_samples", + "outdir_grids", + "outdir_txt2img_grids", + "outdir_save", + "outdir_init_images" +} + +# https://huggingface.co/datasets/freddyaboulton/gradio-theme-subdomains/resolve/main/subdomains.json +gradio_hf_hub_themes = [ + "gradio/base", + "gradio/glass", + "gradio/monochrome", + "gradio/seafoam", + "gradio/soft", + "gradio/dracula_test", + "abidlabs/dracula_test", + "abidlabs/Lime", + "abidlabs/pakistan", + "Ama434/neutral-barlow", + "dawood/microsoft_windows", + "finlaymacklon/smooth_slate", + "Franklisi/darkmode", + "freddyaboulton/dracula_revamped", + "freddyaboulton/test-blue", + "gstaff/xkcd", + "Insuz/Mocha", + "Insuz/SimpleIndigo", + "JohnSmith9982/small_and_pretty", + "nota-ai/theme", + "nuttea/Softblue", + "ParityError/Anime", + "reilnuud/polite", + "remilia/Ghostly", + "rottenlittlecreature/Moon_Goblin", + "step-3-profit/Midnight-Deep", + "Taithrah/Minimal", + "ysharma/huggingface", + "ysharma/steampunk" +] + + +cmd_opts.disable_extension_access = (cmd_opts.share or cmd_opts.listen or cmd_opts.server_name) and not cmd_opts.enable_insecure_extension_access + +devices.device, devices.device_interrogate, devices.device_gfpgan, devices.device_esrgan, devices.device_codeformer = \ + (devices.cpu if any(y in cmd_opts.use_cpu for y in [x, 'all']) else devices.get_optimal_device() for x in ['sd', 'interrogate', 'gfpgan', 'esrgan', 'codeformer']) + +devices.dtype = torch.float32 if cmd_opts.no_half else torch.float16 +devices.dtype_vae = torch.float32 if cmd_opts.no_half or cmd_opts.no_half_vae else torch.float16 + +device = devices.device +weight_load_location = None if cmd_opts.lowram else "cpu" + +batch_cond_uncond = cmd_opts.always_batch_cond_uncond or not (cmd_opts.lowvram or cmd_opts.medvram) +parallel_processing_allowed = not cmd_opts.lowvram and not cmd_opts.medvram +xformers_available = False +config_filename = cmd_opts.ui_settings_file + +os.makedirs(cmd_opts.hypernetwork_dir, exist_ok=True) +hypernetworks = {} +loaded_hypernetworks = [] + + +def reload_hypernetworks(): + from modules.hypernetworks import hypernetwork + global hypernetworks + + hypernetworks = hypernetwork.list_hypernetworks(cmd_opts.hypernetwork_dir) + + +class State: + skipped = False + interrupted = False + job = "" + job_no = 0 + job_count = 0 + processing_has_refined_job_count = False + job_timestamp = '0' + sampling_step = 0 + sampling_steps = 0 + current_latent = None + current_image = None + current_image_sampling_step = 0 + id_live_preview = 0 + textinfo = None + time_start = None + server_start = None + _server_command_signal = threading.Event() + _server_command: Optional[str] = None + + @property + def need_restart(self) -> bool: + # Compatibility getter for need_restart. + return self.server_command == "restart" + + @need_restart.setter + def need_restart(self, value: bool) -> None: + # Compatibility setter for need_restart. + if value: + self.server_command = "restart" + + @property + def server_command(self): + return self._server_command + + @server_command.setter + def server_command(self, value: Optional[str]) -> None: + """ + Set the server command to `value` and signal that it's been set. + """ + self._server_command = value + self._server_command_signal.set() + + def wait_for_server_command(self, timeout: Optional[float] = None) -> Optional[str]: + """ + Wait for server command to get set; return and clear the value and signal. + """ + if self._server_command_signal.wait(timeout): + self._server_command_signal.clear() + req = self._server_command + self._server_command = None + return req + return None + + def request_restart(self) -> None: + self.interrupt() + self.server_command = "restart" + log.info("Received restart request") + + def skip(self): + self.skipped = True + log.info("Received skip request") + + def interrupt(self): + self.interrupted = True + log.info("Received interrupt request") + + def nextjob(self): + if opts.live_previews_enable and opts.show_progress_every_n_steps == -1: + self.do_set_current_image() + + self.job_no += 1 + self.sampling_step = 0 + self.current_image_sampling_step = 0 + + def dict(self): + obj = { + "skipped": self.skipped, + "interrupted": self.interrupted, + "job": self.job, + "job_count": self.job_count, + "job_timestamp": self.job_timestamp, + "job_no": self.job_no, + "sampling_step": self.sampling_step, + "sampling_steps": self.sampling_steps, + } + + return obj + + def begin(self, job: str = "(unknown)"): + self.sampling_step = 0 + self.job_count = -1 + self.processing_has_refined_job_count = False + self.job_no = 0 + self.job_timestamp = datetime.datetime.now().strftime("%Y%m%d%H%M%S") + self.current_latent = None + self.current_image = None + self.current_image_sampling_step = 0 + self.id_live_preview = 0 + self.skipped = False + self.interrupted = False + self.textinfo = None + self.time_start = time.time() + self.job = job + devices.torch_gc() + log.info("Starting job %s", job) + + def end(self): + duration = time.time() - self.time_start + log.info("Ending job %s (%.2f seconds)", self.job, duration) + self.job = "" + self.job_count = 0 + + devices.torch_gc() + + def set_current_image(self): + """sets self.current_image from self.current_latent if enough sampling steps have been made after the last call to this""" + if not parallel_processing_allowed: + return + + if self.sampling_step - self.current_image_sampling_step >= opts.show_progress_every_n_steps and opts.live_previews_enable and opts.show_progress_every_n_steps != -1: + self.do_set_current_image() + + def do_set_current_image(self): + if self.current_latent is None: + return + + import modules.sd_samplers + + try: + if opts.show_progress_grid: + self.assign_current_image(modules.sd_samplers.samples_to_image_grid(self.current_latent)) + else: + self.assign_current_image(modules.sd_samplers.sample_to_image(self.current_latent)) + + self.current_image_sampling_step = self.sampling_step + + except Exception: + # when switching models during genration, VAE would be on CPU, so creating an image will fail. + # we silently ignore this error + errors.record_exception() + + def assign_current_image(self, image): + self.current_image = image + self.id_live_preview += 1 + + +state = State() +state.server_start = time.time() + +styles_filename = cmd_opts.styles_file +prompt_styles = modules.styles.StyleDatabase(styles_filename) + +interrogator = modules.interrogate.InterrogateModels("interrogate") + +face_restorers = [] + + +class OptionInfo: + def __init__(self, default=None, label="", component=None, component_args=None, onchange=None, section=None, refresh=None, comment_before='', comment_after=''): + self.default = default + self.label = label + self.component = component + self.component_args = component_args + self.onchange = onchange + self.section = section + self.refresh = refresh + self.do_not_save = False + + self.comment_before = comment_before + """HTML text that will be added after label in UI""" + + self.comment_after = comment_after + """HTML text that will be added before label in UI""" + + def link(self, label, url): + self.comment_before += f"[{label}]" + return self + + def js(self, label, js_func): + self.comment_before += f"[{label}]" + return self + + def info(self, info): + self.comment_after += f"({info})" + return self + + def html(self, html): + self.comment_after += html + return self + + def needs_restart(self): + self.comment_after += " (requires restart)" + return self + + def needs_reload_ui(self): + self.comment_after += " (requires Reload UI)" + return self + + +class OptionHTML(OptionInfo): + def __init__(self, text): + super().__init__(str(text).strip(), label='', component=lambda **kwargs: gr.HTML(elem_classes="settings-info", **kwargs)) + + self.do_not_save = True + + +def options_section(section_identifier, options_dict): + for v in options_dict.values(): + v.section = section_identifier + + return options_dict + + +def list_checkpoint_tiles(): + import modules.sd_models + return modules.sd_models.checkpoint_tiles() + + +def refresh_checkpoints(): + import modules.sd_models + return modules.sd_models.list_models() + + +def list_samplers(): + import modules.sd_samplers + return modules.sd_samplers.all_samplers + + +hide_dirs = {"visible": not cmd_opts.hide_ui_dir_config} +tab_names = [] + +options_templates = {} + +options_templates.update(options_section(('saving-images', "Saving images/grids"), { + "samples_save": OptionInfo(True, "Always save all generated images"), + "samples_format": OptionInfo('png', 'File format for images'), + "samples_filename_pattern": OptionInfo("", "Images filename pattern", component_args=hide_dirs).link("wiki", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Custom-Images-Filename-Name-and-Subdirectory"), + "save_images_add_number": OptionInfo(True, "Add number to filename when saving", component_args=hide_dirs), + + "grid_save": OptionInfo(True, "Always save all generated image grids"), + "grid_format": OptionInfo('png', 'File format for grids'), + "grid_extended_filename": OptionInfo(False, "Add extended info (seed, prompt) to filename when saving grid"), + "grid_only_if_multiple": OptionInfo(True, "Do not save grids consisting of one picture"), + "grid_prevent_empty_spots": OptionInfo(False, "Prevent empty spots in grid (when set to autodetect)"), + "grid_zip_filename_pattern": OptionInfo("", "Archive filename pattern", component_args=hide_dirs).link("wiki", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Custom-Images-Filename-Name-and-Subdirectory"), + "n_rows": OptionInfo(-1, "Grid row count; use -1 for autodetect and 0 for it to be same as batch size", gr.Slider, {"minimum": -1, "maximum": 16, "step": 1}), + "font": OptionInfo("", "Font for image grids that have text"), + "grid_text_active_color": OptionInfo("#000000", "Text color for image grids", ui_components.FormColorPicker, {}), + "grid_text_inactive_color": OptionInfo("#999999", "Inactive text color for image grids", ui_components.FormColorPicker, {}), + "grid_background_color": OptionInfo("#ffffff", "Background color for image grids", ui_components.FormColorPicker, {}), + + "enable_pnginfo": OptionInfo(True, "Save text information about generation parameters as chunks to png files"), + "save_txt": OptionInfo(False, "Create a text file next to every image with generation parameters."), + "save_images_before_face_restoration": OptionInfo(False, "Save a copy of image before doing face restoration."), + "save_images_before_highres_fix": OptionInfo(False, "Save a copy of image before applying highres fix."), + "save_images_before_color_correction": OptionInfo(False, "Save a copy of image before applying color correction to img2img results"), + "save_mask": OptionInfo(False, "For inpainting, save a copy of the greyscale mask"), + "save_mask_composite": OptionInfo(False, "For inpainting, save a masked composite"), + "jpeg_quality": OptionInfo(80, "Quality for saved jpeg images", gr.Slider, {"minimum": 1, "maximum": 100, "step": 1}), + "webp_lossless": OptionInfo(False, "Use lossless compression for webp images"), + "export_for_4chan": OptionInfo(True, "Save copy of large images as JPG").info("if the file size is above the limit, or either width or height are above the limit"), + "img_downscale_threshold": OptionInfo(4.0, "File size limit for the above option, MB", gr.Number), + "target_side_length": OptionInfo(4000, "Width/height limit for the above option, in pixels", gr.Number), + "img_max_size_mp": OptionInfo(200, "Maximum image size", gr.Number).info("in megapixels"), + + "use_original_name_batch": OptionInfo(True, "Use original name for output filename during batch process in extras tab"), + "use_upscaler_name_as_suffix": OptionInfo(False, "Use upscaler name as filename suffix in the extras tab"), + "save_selected_only": OptionInfo(True, "When using 'Save' button, only save a single selected image"), + "save_init_img": OptionInfo(False, "Save init images when using img2img"), + + "temp_dir": OptionInfo("", "Directory for temporary images; leave empty for default"), + "clean_temp_dir_at_start": OptionInfo(False, "Cleanup non-default temporary directory when starting webui"), + + "save_incomplete_images": OptionInfo(False, "Save incomplete images").info("save images that has been interrupted in mid-generation; even if not saved, they will still show up in webui output."), +})) + +options_templates.update(options_section(('saving-paths', "Paths for saving"), { + "outdir_samples": OptionInfo("", "Output directory for images; if empty, defaults to three directories below", component_args=hide_dirs), + "outdir_txt2img_samples": OptionInfo("outputs/txt2img-images", 'Output directory for txt2img images', component_args=hide_dirs), + "outdir_img2img_samples": OptionInfo("outputs/img2img-images", 'Output directory for img2img images', component_args=hide_dirs), + "outdir_extras_samples": OptionInfo("outputs/extras-images", 'Output directory for images from extras tab', component_args=hide_dirs), + "outdir_grids": OptionInfo("", "Output directory for grids; if empty, defaults to two directories below", component_args=hide_dirs), + "outdir_txt2img_grids": OptionInfo("outputs/txt2img-grids", 'Output directory for txt2img grids', component_args=hide_dirs), + "outdir_img2img_grids": OptionInfo("outputs/img2img-grids", 'Output directory for img2img grids', component_args=hide_dirs), + "outdir_save": OptionInfo("log/images", "Directory for saving images using the Save button", component_args=hide_dirs), + "outdir_init_images": OptionInfo("outputs/init-images", "Directory for saving init images when using img2img", component_args=hide_dirs), +})) + +options_templates.update(options_section(('saving-to-dirs', "Saving to a directory"), { + "save_to_dirs": OptionInfo(True, "Save images to a subdirectory"), + "grid_save_to_dirs": OptionInfo(True, "Save grids to a subdirectory"), + "use_save_to_dirs_for_ui": OptionInfo(False, "When using \"Save\" button, save images to a subdirectory"), + "directories_filename_pattern": OptionInfo("[date]", "Directory name pattern", component_args=hide_dirs).link("wiki", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Custom-Images-Filename-Name-and-Subdirectory"), + "directories_max_prompt_words": OptionInfo(8, "Max prompt words for [prompt_words] pattern", gr.Slider, {"minimum": 1, "maximum": 20, "step": 1, **hide_dirs}), +})) + +options_templates.update(options_section(('upscaling', "Upscaling"), { + "ESRGAN_tile": OptionInfo(192, "Tile size for ESRGAN upscalers.", gr.Slider, {"minimum": 0, "maximum": 512, "step": 16}).info("0 = no tiling"), + "ESRGAN_tile_overlap": OptionInfo(8, "Tile overlap for ESRGAN upscalers.", gr.Slider, {"minimum": 0, "maximum": 48, "step": 1}).info("Low values = visible seam"), + "realesrgan_enabled_models": OptionInfo(["R-ESRGAN 4x+", "R-ESRGAN 4x+ Anime6B"], "Select which Real-ESRGAN models to show in the web UI.", gr.CheckboxGroup, lambda: {"choices": shared_items.realesrgan_models_names()}), + "upscaler_for_img2img": OptionInfo(None, "Upscaler for img2img", gr.Dropdown, lambda: {"choices": [x.name for x in sd_upscalers]}), +})) + +options_templates.update(options_section(('face-restoration', "Face restoration"), { + "face_restoration_model": OptionInfo("CodeFormer", "Face restoration model", gr.Radio, lambda: {"choices": [x.name() for x in face_restorers]}), + "code_former_weight": OptionInfo(0.5, "CodeFormer weight", gr.Slider, {"minimum": 0, "maximum": 1, "step": 0.01}).info("0 = maximum effect; 1 = minimum effect"), + "face_restoration_unload": OptionInfo(False, "Move face restoration model from VRAM into RAM after processing"), +})) + +options_templates.update(options_section(('system', "System"), { + "auto_launch_browser": OptionInfo("Local", "Automatically open webui in browser on startup", gr.Radio, lambda: {"choices": ["Disable", "Local", "Remote"]}), + "show_warnings": OptionInfo(False, "Show warnings in console.").needs_reload_ui(), + "show_gradio_deprecation_warnings": OptionInfo(True, "Show gradio deprecation warnings in console.").needs_reload_ui(), + "memmon_poll_rate": OptionInfo(8, "VRAM usage polls per second during generation.", gr.Slider, {"minimum": 0, "maximum": 40, "step": 1}).info("0 = disable"), + "samples_log_stdout": OptionInfo(False, "Always print all generation info to standard output"), + "multiple_tqdm": OptionInfo(True, "Add a second progress bar to the console that shows progress for an entire job."), + "print_hypernet_extra": OptionInfo(False, "Print extra hypernetwork information to console."), + "list_hidden_files": OptionInfo(True, "Load models/files in hidden directories").info("directory is hidden if its name starts with \".\""), + "disable_mmap_load_safetensors": OptionInfo(False, "Disable memmapping for loading .safetensors files.").info("fixes very slow loading speed in some cases"), + "hide_ldm_prints": OptionInfo(True, "Prevent Stability-AI's ldm/sgm modules from printing noise to console."), +})) + +options_templates.update(options_section(('training', "Training"), { + "unload_models_when_training": OptionInfo(False, "Move VAE and CLIP to RAM when training if possible. Saves VRAM."), + "pin_memory": OptionInfo(False, "Turn on pin_memory for DataLoader. Makes training slightly faster but can increase memory usage."), + "save_optimizer_state": OptionInfo(False, "Saves Optimizer state as separate *.optim file. Training of embedding or HN can be resumed with the matching optim file."), + "save_training_settings_to_txt": OptionInfo(True, "Save textual inversion and hypernet settings to a text file whenever training starts."), + "dataset_filename_word_regex": OptionInfo("", "Filename word regex"), + "dataset_filename_join_string": OptionInfo(" ", "Filename join string"), + "training_image_repeats_per_epoch": OptionInfo(1, "Number of repeats for a single input image per epoch; used only for displaying epoch number", gr.Number, {"precision": 0}), + "training_write_csv_every": OptionInfo(500, "Save an csv containing the loss to log directory every N steps, 0 to disable"), + "training_xattention_optimizations": OptionInfo(False, "Use cross attention optimizations while training"), + "training_enable_tensorboard": OptionInfo(False, "Enable tensorboard logging."), + "training_tensorboard_save_images": OptionInfo(False, "Save generated images within tensorboard."), + "training_tensorboard_flush_every": OptionInfo(120, "How often, in seconds, to flush the pending tensorboard events and summaries to disk."), +})) + +options_templates.update(options_section(('sd', "Stable Diffusion"), { + "sd_model_checkpoint": OptionInfo(None, "Stable Diffusion checkpoint", gr.Dropdown, lambda: {"choices": list_checkpoint_tiles()}, refresh=refresh_checkpoints), + "sd_checkpoints_limit": OptionInfo(1, "Maximum number of checkpoints loaded at the same time", gr.Slider, {"minimum": 1, "maximum": 10, "step": 1}), + "sd_checkpoints_keep_in_cpu": OptionInfo(True, "Only keep one model on device").info("will keep models other than the currently used one in RAM rather than VRAM"), + "sd_checkpoint_cache": OptionInfo(0, "Checkpoints to cache in RAM", gr.Slider, {"minimum": 0, "maximum": 10, "step": 1}).info("obsolete; set to 0 and use the two settings above instead"), + "sd_unet": OptionInfo("Automatic", "SD Unet", gr.Dropdown, lambda: {"choices": shared_items.sd_unet_items()}, refresh=shared_items.refresh_unet_list).info("choose Unet model: Automatic = use one with same filename as checkpoint; None = use Unet from checkpoint"), + "enable_quantization": OptionInfo(False, "Enable quantization in K samplers for sharper and cleaner results. This may change existing seeds").needs_reload_ui(), + "enable_emphasis": OptionInfo(True, "Enable emphasis").info("use (text) to make model pay more attention to text and [text] to make it pay less attention"), + "enable_batch_seeds": OptionInfo(True, "Make K-diffusion samplers produce same images in a batch as when making a single image"), + "comma_padding_backtrack": OptionInfo(20, "Prompt word wrap length limit", gr.Slider, {"minimum": 0, "maximum": 74, "step": 1}).info("in tokens - for texts shorter than specified, if they don't fit into 75 token limit, move them to the next 75 token chunk"), + "CLIP_stop_at_last_layers": OptionInfo(1, "Clip skip", gr.Slider, {"minimum": 1, "maximum": 12, "step": 1}).link("wiki", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Features#clip-skip").info("ignore last layers of CLIP network; 1 ignores none, 2 ignores one layer"), + "upcast_attn": OptionInfo(False, "Upcast cross attention layer to float32"), + "randn_source": OptionInfo("GPU", "Random number generator source.", gr.Radio, {"choices": ["GPU", "CPU", "NV"]}).info("changes seeds drastically; use CPU to produce the same picture across different videocard vendors; use NV to produce same picture as on NVidia videocards"), +})) + +options_templates.update(options_section(('sdxl', "Stable Diffusion XL"), { + "sdxl_crop_top": OptionInfo(0, "crop top coordinate"), + "sdxl_crop_left": OptionInfo(0, "crop left coordinate"), + "sdxl_refiner_low_aesthetic_score": OptionInfo(2.5, "SDXL low aesthetic score", gr.Number).info("used for refiner model negative prompt"), + "sdxl_refiner_high_aesthetic_score": OptionInfo(6.0, "SDXL high aesthetic score", gr.Number).info("used for refiner model prompt"), +})) + +options_templates.update(options_section(('vae', "VAE"), { + "sd_vae_explanation": OptionHTML(""" +VAE is a neural network that transforms a standard RGB +image into latent space representation and back. Latent space representation is what stable diffusion is working on during sampling +(i.e. when the progress bar is between empty and full). For txt2img, VAE is used to create a resulting image after the sampling is finished. +For img2img, VAE is used to process user's input image before the sampling, and to create an image after sampling. +"""), + "sd_vae_checkpoint_cache": OptionInfo(0, "VAE Checkpoints to cache in RAM", gr.Slider, {"minimum": 0, "maximum": 10, "step": 1}), + "sd_vae": OptionInfo("Automatic", "SD VAE", gr.Dropdown, lambda: {"choices": shared_items.sd_vae_items()}, refresh=shared_items.refresh_vae_list).info("choose VAE model: Automatic = use one with same filename as checkpoint; None = use VAE from checkpoint"), + "sd_vae_overrides_per_model_preferences": OptionInfo(True, "Selected VAE overrides per-model preferences").info("you can set per-model VAE either by editing user metadata for checkpoints, or by making the VAE have same name as checkpoint"), + "auto_vae_precision": OptionInfo(True, "Automatically revert VAE to 32-bit floats").info("triggers when a tensor with NaNs is produced in VAE; disabling the option in this case will result in a black square image"), + "sd_vae_encode_method": OptionInfo("Full", "VAE type for encode", gr.Radio, {"choices": ["Full", "TAESD"]}).info("method to encode image to latent (use in img2img, hires-fix or inpaint mask)"), + "sd_vae_decode_method": OptionInfo("Full", "VAE type for decode", gr.Radio, {"choices": ["Full", "TAESD"]}).info("method to decode latent to image"), +})) + +options_templates.update(options_section(('img2img', "img2img"), { + "inpainting_mask_weight": OptionInfo(1.0, "Inpainting conditioning mask strength", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}), + "initial_noise_multiplier": OptionInfo(1.0, "Noise multiplier for img2img", gr.Slider, {"minimum": 0.5, "maximum": 1.5, "step": 0.01}), + "img2img_color_correction": OptionInfo(False, "Apply color correction to img2img results to match original colors."), + "img2img_fix_steps": OptionInfo(False, "With img2img, do exactly the amount of steps the slider specifies.").info("normally you'd do less with less denoising"), + "img2img_background_color": OptionInfo("#ffffff", "With img2img, fill transparent parts of the input image with this color.", ui_components.FormColorPicker, {}), + "img2img_editor_height": OptionInfo(720, "Height of the image editor", gr.Slider, {"minimum": 80, "maximum": 1600, "step": 1}).info("in pixels").needs_reload_ui(), + "img2img_sketch_default_brush_color": OptionInfo("#ffffff", "Sketch initial brush color", ui_components.FormColorPicker, {}).info("default brush color of img2img sketch").needs_reload_ui(), + "img2img_inpaint_mask_brush_color": OptionInfo("#ffffff", "Inpaint mask brush color", ui_components.FormColorPicker, {}).info("brush color of inpaint mask").needs_reload_ui(), + "img2img_inpaint_sketch_default_brush_color": OptionInfo("#ffffff", "Inpaint sketch initial brush color", ui_components.FormColorPicker, {}).info("default brush color of img2img inpaint sketch").needs_reload_ui(), + "return_mask": OptionInfo(False, "For inpainting, include the greyscale mask in results for web"), + "return_mask_composite": OptionInfo(False, "For inpainting, include masked composite in results for web"), +})) + +options_templates.update(options_section(('optimizations', "Optimizations"), { + "cross_attention_optimization": OptionInfo("Automatic", "Cross attention optimization", gr.Dropdown, lambda: {"choices": shared_items.cross_attention_optimizations()}), + "s_min_uncond": OptionInfo(0.0, "Negative Guidance minimum sigma", gr.Slider, {"minimum": 0.0, "maximum": 15.0, "step": 0.01}).link("PR", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/9177").info("skip negative prompt for some steps when the image is almost ready; 0=disable, higher=faster"), + "token_merging_ratio": OptionInfo(0.0, "Token merging ratio", gr.Slider, {"minimum": 0.0, "maximum": 0.9, "step": 0.1}).link("PR", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/9256").info("0=disable, higher=faster"), + "token_merging_ratio_img2img": OptionInfo(0.0, "Token merging ratio for img2img", gr.Slider, {"minimum": 0.0, "maximum": 0.9, "step": 0.1}).info("only applies if non-zero and overrides above"), + "token_merging_ratio_hr": OptionInfo(0.0, "Token merging ratio for high-res pass", gr.Slider, {"minimum": 0.0, "maximum": 0.9, "step": 0.1}).info("only applies if non-zero and overrides above"), + "pad_cond_uncond": OptionInfo(False, "Pad prompt/negative prompt to be same length").info("improves performance when prompt and negative prompt have different lengths; changes seeds"), + "persistent_cond_cache": OptionInfo(True, "Persistent cond cache").info("Do not recalculate conds from prompts if prompts have not changed since previous calculation"), +})) + +options_templates.update(options_section(('compatibility', "Compatibility"), { + "use_old_emphasis_implementation": OptionInfo(False, "Use old emphasis implementation. Can be useful to reproduce old seeds."), + "use_old_karras_scheduler_sigmas": OptionInfo(False, "Use old karras scheduler sigmas (0.1 to 10)."), + "no_dpmpp_sde_batch_determinism": OptionInfo(False, "Do not make DPM++ SDE deterministic across different batch sizes."), + "use_old_hires_fix_width_height": OptionInfo(False, "For hires fix, use width/height sliders to set final resolution rather than first pass (disables Upscale by, Resize width/height to)."), + "dont_fix_second_order_samplers_schedule": OptionInfo(False, "Do not fix prompt schedule for second order samplers."), + "hires_fix_use_firstpass_conds": OptionInfo(False, "For hires fix, calculate conds of second pass using extra networks of first pass."), +})) + +options_templates.update(options_section(('interrogate', "Interrogate"), { + "interrogate_keep_models_in_memory": OptionInfo(False, "Keep models in VRAM"), + "interrogate_return_ranks": OptionInfo(False, "Include ranks of model tags matches in results.").info("booru only"), + "interrogate_clip_num_beams": OptionInfo(1, "BLIP: num_beams", gr.Slider, {"minimum": 1, "maximum": 16, "step": 1}), + "interrogate_clip_min_length": OptionInfo(24, "BLIP: minimum description length", gr.Slider, {"minimum": 1, "maximum": 128, "step": 1}), + "interrogate_clip_max_length": OptionInfo(48, "BLIP: maximum description length", gr.Slider, {"minimum": 1, "maximum": 256, "step": 1}), + "interrogate_clip_dict_limit": OptionInfo(1500, "CLIP: maximum number of lines in text file").info("0 = No limit"), + "interrogate_clip_skip_categories": OptionInfo([], "CLIP: skip inquire categories", gr.CheckboxGroup, lambda: {"choices": modules.interrogate.category_types()}, refresh=modules.interrogate.category_types), + "interrogate_deepbooru_score_threshold": OptionInfo(0.5, "deepbooru: score threshold", gr.Slider, {"minimum": 0, "maximum": 1, "step": 0.01}), + "deepbooru_sort_alpha": OptionInfo(True, "deepbooru: sort tags alphabetically").info("if not: sort by score"), + "deepbooru_use_spaces": OptionInfo(True, "deepbooru: use spaces in tags").info("if not: use underscores"), + "deepbooru_escape": OptionInfo(True, "deepbooru: escape (\\) brackets").info("so they are used as literal brackets and not for emphasis"), + "deepbooru_filter_tags": OptionInfo("", "deepbooru: filter out those tags").info("separate by comma"), +})) + +options_templates.update(options_section(('extra_networks', "Extra Networks"), { + "extra_networks_show_hidden_directories": OptionInfo(True, "Show hidden directories").info("directory is hidden if its name starts with \".\"."), + "extra_networks_hidden_models": OptionInfo("When searched", "Show cards for models in hidden directories", gr.Radio, {"choices": ["Always", "When searched", "Never"]}).info('"When searched" option will only show the item when the search string has 4 characters or more'), + "extra_networks_default_multiplier": OptionInfo(1.0, "Default multiplier for extra networks", gr.Slider, {"minimum": 0.0, "maximum": 2.0, "step": 0.01}), + "extra_networks_card_width": OptionInfo(0, "Card width for Extra Networks").info("in pixels"), + "extra_networks_card_height": OptionInfo(0, "Card height for Extra Networks").info("in pixels"), + "extra_networks_card_text_scale": OptionInfo(1.0, "Card text scale", gr.Slider, {"minimum": 0.0, "maximum": 2.0, "step": 0.01}).info("1 = original size"), + "extra_networks_card_show_desc": OptionInfo(True, "Show description on card"), + "extra_networks_add_text_separator": OptionInfo(" ", "Extra networks separator").info("extra text to add before <...> when adding extra network to prompt"), + "ui_extra_networks_tab_reorder": OptionInfo("", "Extra networks tab order").needs_reload_ui(), + "textual_inversion_print_at_load": OptionInfo(False, "Print a list of Textual Inversion embeddings when loading model"), + "textual_inversion_add_hashes_to_infotext": OptionInfo(True, "Add Textual Inversion hashes to infotext"), + "sd_hypernetwork": OptionInfo("None", "Add hypernetwork to prompt", gr.Dropdown, lambda: {"choices": ["None", *hypernetworks]}, refresh=reload_hypernetworks), +})) + +options_templates.update(options_section(('ui', "User interface"), { + "localization": OptionInfo("None", "Localization", gr.Dropdown, lambda: {"choices": ["None"] + list(localization.localizations.keys())}, refresh=lambda: localization.list_localizations(cmd_opts.localizations_dir)).needs_reload_ui(), + "gradio_theme": OptionInfo("Default", "Gradio theme", ui_components.DropdownEditable, lambda: {"choices": ["Default"] + gradio_hf_hub_themes}).info("you can also manually enter any of themes from the gallery.").needs_reload_ui(), + "gradio_themes_cache": OptionInfo(True, "Cache gradio themes locally").info("disable to update the selected Gradio theme"), + "return_grid": OptionInfo(True, "Show grid in results for web"), + "do_not_show_images": OptionInfo(False, "Do not show any images in results for web"), + "send_seed": OptionInfo(True, "Send seed when sending prompt or image to other interface"), + "send_size": OptionInfo(True, "Send size when sending prompt or image to another interface"), + "js_modal_lightbox": OptionInfo(True, "Enable full page image viewer"), + "js_modal_lightbox_initially_zoomed": OptionInfo(True, "Show images zoomed in by default in full page image viewer"), + "js_modal_lightbox_gamepad": OptionInfo(False, "Navigate image viewer with gamepad"), + "js_modal_lightbox_gamepad_repeat": OptionInfo(250, "Gamepad repeat period, in milliseconds"), + "show_progress_in_title": OptionInfo(True, "Show generation progress in window title."), + "samplers_in_dropdown": OptionInfo(True, "Use dropdown for sampler selection instead of radio group").needs_reload_ui(), + "dimensions_and_batch_together": OptionInfo(True, "Show Width/Height and Batch sliders in same row").needs_reload_ui(), + "keyedit_precision_attention": OptionInfo(0.1, "Ctrl+up/down precision when editing (attention:1.1)", gr.Slider, {"minimum": 0.01, "maximum": 0.2, "step": 0.001}), + "keyedit_precision_extra": OptionInfo(0.05, "Ctrl+up/down precision when editing ", gr.Slider, {"minimum": 0.01, "maximum": 0.2, "step": 0.001}), + "keyedit_delimiters": OptionInfo(".,\\/!?%^*;:{}=`~()", "Ctrl+up/down word delimiters"), + "keyedit_move": OptionInfo(True, "Alt+left/right moves prompt elements"), + "quicksettings_list": OptionInfo(["sd_model_checkpoint"], "Quicksettings list", ui_components.DropdownMulti, lambda: {"choices": list(opts.data_labels.keys())}).js("info", "settingsHintsShowQuicksettings").info("setting entries that appear at the top of page rather than in settings tab").needs_reload_ui(), + "ui_tab_order": OptionInfo([], "UI tab order", ui_components.DropdownMulti, lambda: {"choices": list(tab_names)}).needs_reload_ui(), + "hidden_tabs": OptionInfo([], "Hidden UI tabs", ui_components.DropdownMulti, lambda: {"choices": list(tab_names)}).needs_reload_ui(), + "ui_reorder_list": OptionInfo([], "txt2img/img2img UI item order", ui_components.DropdownMulti, lambda: {"choices": list(shared_items.ui_reorder_categories())}).info("selected items appear first").needs_reload_ui(), + "hires_fix_show_sampler": OptionInfo(False, "Hires fix: show hires checkpoint and sampler selection").needs_reload_ui(), + "hires_fix_show_prompts": OptionInfo(False, "Hires fix: show hires prompt and negative prompt").needs_reload_ui(), + "disable_token_counters": OptionInfo(False, "Disable prompt token counters").needs_reload_ui(), +})) + + +options_templates.update(options_section(('infotext', "Infotext"), { + "add_model_hash_to_info": OptionInfo(True, "Add model hash to generation information"), + "add_model_name_to_info": OptionInfo(True, "Add model name to generation information"), + "add_user_name_to_info": OptionInfo(False, "Add user name to generation information when authenticated"), + "add_version_to_infotext": OptionInfo(True, "Add program version to generation information"), + "disable_weights_auto_swap": OptionInfo(True, "Disregard checkpoint information from pasted infotext").info("when reading generation parameters from text into UI"), + "infotext_styles": OptionInfo("Apply if any", "Infer styles from prompts of pasted infotext", gr.Radio, {"choices": ["Ignore", "Apply", "Discard", "Apply if any"]}).info("when reading generation parameters from text into UI)").html("""
    +
  • Ignore: keep prompt and styles dropdown as it is.
  • +
  • Apply: remove style text from prompt, always replace styles dropdown value with found styles (even if none are found).
  • +
  • Discard: remove style text from prompt, keep styles dropdown as it is.
  • +
  • Apply if any: remove style text from prompt; if any styles are found in prompt, put them into styles dropdown, otherwise keep it as it is.
  • +
"""), + +})) + +options_templates.update(options_section(('ui', "Live previews"), { + "show_progressbar": OptionInfo(True, "Show progressbar"), + "live_previews_enable": OptionInfo(True, "Show live previews of the created image"), + "live_previews_image_format": OptionInfo("png", "Live preview file format", gr.Radio, {"choices": ["jpeg", "png", "webp"]}), + "show_progress_grid": OptionInfo(True, "Show previews of all images generated in a batch as a grid"), + "show_progress_every_n_steps": OptionInfo(10, "Live preview display period", gr.Slider, {"minimum": -1, "maximum": 32, "step": 1}).info("in sampling steps - show new live preview image every N sampling steps; -1 = only show after completion of batch"), + "show_progress_type": OptionInfo("Approx NN", "Live preview method", gr.Radio, {"choices": ["Full", "Approx NN", "Approx cheap", "TAESD"]}).info("Full = slow but pretty; Approx NN and TAESD = fast but low quality; Approx cheap = super fast but terrible otherwise"), + "live_preview_content": OptionInfo("Prompt", "Live preview subject", gr.Radio, {"choices": ["Combined", "Prompt", "Negative prompt"]}), + "live_preview_refresh_period": OptionInfo(1000, "Progressbar and preview update period").info("in milliseconds"), +})) + +options_templates.update(options_section(('sampler-params', "Sampler parameters"), { + "hide_samplers": OptionInfo([], "Hide samplers in user interface", gr.CheckboxGroup, lambda: {"choices": [x.name for x in list_samplers()]}).needs_reload_ui(), + "eta_ddim": OptionInfo(0.0, "Eta for DDIM", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}).info("noise multiplier; higher = more unperdictable results"), + "eta_ancestral": OptionInfo(1.0, "Eta for ancestral samplers", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}).info("noise multiplier; applies to Euler a and other samplers that have a in them"), + "ddim_discretize": OptionInfo('uniform', "img2img DDIM discretize", gr.Radio, {"choices": ['uniform', 'quad']}), + 's_churn': OptionInfo(0.0, "sigma churn", gr.Slider, {"minimum": 0.0, "maximum": 100.0, "step": 0.01}).info('amount of stochasticity; only applies to Euler, Heun, and DPM2'), + 's_tmin': OptionInfo(0.0, "sigma tmin", gr.Slider, {"minimum": 0.0, "maximum": 10.0, "step": 0.01}).info('enable stochasticity; start value of the sigma range; only applies to Euler, Heun, and DPM2'), + 's_tmax': OptionInfo(0.0, "sigma tmax", gr.Slider, {"minimum": 0.0, "maximum": 999.0, "step": 0.01}).info("0 = inf; end value of the sigma range; only applies to Euler, Heun, and DPM2"), + 's_noise': OptionInfo(1.0, "sigma noise", gr.Slider, {"minimum": 0.0, "maximum": 1.1, "step": 0.001}).info('amount of additional noise to counteract loss of detail during sampling; only applies to Euler, Heun, and DPM2'), + 'k_sched_type': OptionInfo("Automatic", "Scheduler type", gr.Dropdown, {"choices": ["Automatic", "karras", "exponential", "polyexponential"]}).info("lets you override the noise schedule for k-diffusion samplers; choosing Automatic disables the three parameters below"), + 'sigma_min': OptionInfo(0.0, "sigma min", gr.Number).info("0 = default (~0.03); minimum noise strength for k-diffusion noise scheduler"), + 'sigma_max': OptionInfo(0.0, "sigma max", gr.Number).info("0 = default (~14.6); maximum noise strength for k-diffusion noise scheduler"), + 'rho': OptionInfo(0.0, "rho", gr.Number).info("0 = default (7 for karras, 1 for polyexponential); higher values result in a steeper noise schedule (decreases faster)"), + 'eta_noise_seed_delta': OptionInfo(0, "Eta noise seed delta", gr.Number, {"precision": 0}).info("ENSD; does not improve anything, just produces different results for ancestral samplers - only useful for reproducing images"), + 'always_discard_next_to_last_sigma': OptionInfo(False, "Always discard next-to-last sigma").link("PR", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/6044"), + 'uni_pc_variant': OptionInfo("bh1", "UniPC variant", gr.Radio, {"choices": ["bh1", "bh2", "vary_coeff"]}), + 'uni_pc_skip_type': OptionInfo("time_uniform", "UniPC skip type", gr.Radio, {"choices": ["time_uniform", "time_quadratic", "logSNR"]}), + 'uni_pc_order': OptionInfo(3, "UniPC order", gr.Slider, {"minimum": 1, "maximum": 50, "step": 1}).info("must be < sampling steps"), + 'uni_pc_lower_order_final': OptionInfo(True, "UniPC lower order final"), +})) + +options_templates.update(options_section(('postprocessing', "Postprocessing"), { + 'postprocessing_enable_in_main_ui': OptionInfo([], "Enable postprocessing operations in txt2img and img2img tabs", ui_components.DropdownMulti, lambda: {"choices": [x.name for x in shared_items.postprocessing_scripts()]}), + 'postprocessing_operation_order': OptionInfo([], "Postprocessing operation order", ui_components.DropdownMulti, lambda: {"choices": [x.name for x in shared_items.postprocessing_scripts()]}), + 'upscaling_max_images_in_cache': OptionInfo(5, "Maximum number of images in upscaling cache", gr.Slider, {"minimum": 0, "maximum": 10, "step": 1}), +})) + +options_templates.update(options_section((None, "Hidden options"), { + "disabled_extensions": OptionInfo([], "Disable these extensions"), + "disable_all_extensions": OptionInfo("none", "Disable all extensions (preserves the list of disabled extensions)", gr.Radio, {"choices": ["none", "extra", "all"]}), + "restore_config_state_file": OptionInfo("", "Config state file to restore from, under 'config-states/' folder"), + "sd_checkpoint_hash": OptionInfo("", "SHA256 hash of the current checkpoint"), +})) + + +options_templates.update() + + +class Options: + data = None + data_labels = options_templates + typemap = {int: float} + + def __init__(self): + self.data = {k: v.default for k, v in self.data_labels.items()} + + def __setattr__(self, key, value): + if self.data is not None: + if key in self.data or key in self.data_labels: + assert not cmd_opts.freeze_settings, "changing settings is disabled" + + info = opts.data_labels.get(key, None) + if info.do_not_save: + return + + comp_args = info.component_args if info else None + if isinstance(comp_args, dict) and comp_args.get('visible', True) is False: + raise RuntimeError(f"not possible to set {key} because it is restricted") + + if cmd_opts.hide_ui_dir_config and key in restricted_opts: + raise RuntimeError(f"not possible to set {key} because it is restricted") + + self.data[key] = value + return + + return super(Options, self).__setattr__(key, value) + + def __getattr__(self, item): + if self.data is not None: + if item in self.data: + return self.data[item] + + if item in self.data_labels: + return self.data_labels[item].default + + return super(Options, self).__getattribute__(item) + + def set(self, key, value): + """sets an option and calls its onchange callback, returning True if the option changed and False otherwise""" + + oldval = self.data.get(key, None) + if oldval == value: + return False + + if self.data_labels[key].do_not_save: + return False + + try: + setattr(self, key, value) + except RuntimeError: + return False + + if self.data_labels[key].onchange is not None: + try: + self.data_labels[key].onchange() + except Exception as e: + errors.display(e, f"changing setting {key} to {value}") + setattr(self, key, oldval) + return False + + return True + + def get_default(self, key): + """returns the default value for the key""" + + data_label = self.data_labels.get(key) + if data_label is None: + return None + + return data_label.default + + def save(self, filename): + assert not cmd_opts.freeze_settings, "saving settings is disabled" + + with open(filename, "w", encoding="utf8") as file: + json.dump(self.data, file, indent=4) + + def same_type(self, x, y): + if x is None or y is None: + return True + + type_x = self.typemap.get(type(x), type(x)) + type_y = self.typemap.get(type(y), type(y)) + + return type_x == type_y + + def load(self, filename): + with open(filename, "r", encoding="utf8") as file: + self.data = json.load(file) + + # 1.6.0 VAE defaults + if self.data.get('sd_vae_as_default') is not None and self.data.get('sd_vae_overrides_per_model_preferences') is None: + self.data['sd_vae_overrides_per_model_preferences'] = not self.data.get('sd_vae_as_default') + + # 1.1.1 quicksettings list migration + if self.data.get('quicksettings') is not None and self.data.get('quicksettings_list') is None: + self.data['quicksettings_list'] = [i.strip() for i in self.data.get('quicksettings').split(',')] + + # 1.4.0 ui_reorder + if isinstance(self.data.get('ui_reorder'), str) and self.data.get('ui_reorder') and "ui_reorder_list" not in self.data: + self.data['ui_reorder_list'] = [i.strip() for i in self.data.get('ui_reorder').split(',')] + + bad_settings = 0 + for k, v in self.data.items(): + info = self.data_labels.get(k, None) + if info is not None and not self.same_type(info.default, v): + print(f"Warning: bad setting value: {k}: {v} ({type(v).__name__}; expected {type(info.default).__name__})", file=sys.stderr) + bad_settings += 1 + + if bad_settings > 0: + print(f"The program is likely to not work with bad settings.\nSettings file: {filename}\nEither fix the file, or delete it and restart.", file=sys.stderr) + + def onchange(self, key, func, call=True): + item = self.data_labels.get(key) + item.onchange = func + + if call: + func() + + def dumpjson(self): + d = {k: self.data.get(k, v.default) for k, v in self.data_labels.items()} + d["_comments_before"] = {k: v.comment_before for k, v in self.data_labels.items() if v.comment_before is not None} + d["_comments_after"] = {k: v.comment_after for k, v in self.data_labels.items() if v.comment_after is not None} + return json.dumps(d) + + def add_option(self, key, info): + self.data_labels[key] = info + + def reorder(self): + """reorder settings so that all items related to section always go together""" + + section_ids = {} + settings_items = self.data_labels.items() + for _, item in settings_items: + if item.section not in section_ids: + section_ids[item.section] = len(section_ids) + + self.data_labels = dict(sorted(settings_items, key=lambda x: section_ids[x[1].section])) + + def cast_value(self, key, value): + """casts an arbitrary to the same type as this setting's value with key + Example: cast_value("eta_noise_seed_delta", "12") -> returns 12 (an int rather than str) + """ + + if value is None: + return None + + default_value = self.data_labels[key].default + if default_value is None: + default_value = getattr(self, key, None) + if default_value is None: + return None + + expected_type = type(default_value) + if expected_type == bool and value == "False": + value = False + else: + value = expected_type(value) + + return value + + +opts = Options() +if os.path.exists(config_filename): + opts.load(config_filename) + + +class Shared(sys.modules[__name__].__class__): + """ + this class is here to provide sd_model field as a property, so that it can be created and loaded on demand rather than + at program startup. + """ + + sd_model_val = None + + @property + def sd_model(self): + import modules.sd_models + + return modules.sd_models.model_data.get_sd_model() + + @sd_model.setter + def sd_model(self, value): + import modules.sd_models + + modules.sd_models.model_data.set_sd_model(value) + + +sd_model: LatentDiffusion = None # this var is here just for IDE's type checking; it cannot be accessed because the class field above will be accessed instead +sys.modules[__name__].__class__ = Shared + +settings_components = None +"""assinged from ui.py, a mapping on setting names to gradio components repsponsible for those settings""" + +latent_upscale_default_mode = "Latent" +latent_upscale_modes = { + "Latent": {"mode": "bilinear", "antialias": False}, + "Latent (antialiased)": {"mode": "bilinear", "antialias": True}, + "Latent (bicubic)": {"mode": "bicubic", "antialias": False}, + "Latent (bicubic antialiased)": {"mode": "bicubic", "antialias": True}, + "Latent (nearest)": {"mode": "nearest", "antialias": False}, + "Latent (nearest-exact)": {"mode": "nearest-exact", "antialias": False}, +} + +sd_upscalers = [] + +clip_model = None + +progress_print_out = sys.stdout + +gradio_theme = gr.themes.Base() + + +def reload_gradio_theme(theme_name=None): + global gradio_theme + if not theme_name: + theme_name = opts.gradio_theme + + default_theme_args = dict( + font=["Source Sans Pro", 'ui-sans-serif', 'system-ui', 'sans-serif'], + font_mono=['IBM Plex Mono', 'ui-monospace', 'Consolas', 'monospace'], + ) + + if theme_name == "Default": + gradio_theme = gr.themes.Default(**default_theme_args) + else: + try: + theme_cache_dir = os.path.join(script_path, 'tmp', 'gradio_themes') + theme_cache_path = os.path.join(theme_cache_dir, f'{theme_name.replace("/", "_")}.json') + if opts.gradio_themes_cache and os.path.exists(theme_cache_path): + gradio_theme = gr.themes.ThemeClass.load(theme_cache_path) + else: + os.makedirs(theme_cache_dir, exist_ok=True) + gradio_theme = gr.themes.ThemeClass.from_hub(theme_name) + gradio_theme.dump(theme_cache_path) + except Exception as e: + errors.display(e, "changing gradio theme") + gradio_theme = gr.themes.Default(**default_theme_args) + + +class TotalTQDM: + def __init__(self): + self._tqdm = None + + def reset(self): + self._tqdm = tqdm.tqdm( + desc="Total progress", + total=state.job_count * state.sampling_steps, + position=1, + file=progress_print_out + ) + + def update(self): + if not opts.multiple_tqdm or cmd_opts.disable_console_progressbars: + return + if self._tqdm is None: + self.reset() + self._tqdm.update() + + def updateTotal(self, new_total): + if not opts.multiple_tqdm or cmd_opts.disable_console_progressbars: + return + if self._tqdm is None: + self.reset() + self._tqdm.total = new_total + + def clear(self): + if self._tqdm is not None: + self._tqdm.refresh() + self._tqdm.close() + self._tqdm = None + + +total_tqdm = TotalTQDM() + +mem_mon = modules.memmon.MemUsageMonitor("MemMon", device, opts) +mem_mon.start() + + +def natural_sort_key(s, regex=re.compile('([0-9]+)')): + return [int(text) if text.isdigit() else text.lower() for text in regex.split(s)] + + +def listfiles(dirname): + filenames = [os.path.join(dirname, x) for x in sorted(os.listdir(dirname), key=natural_sort_key) if not x.startswith(".")] + return [file for file in filenames if os.path.isfile(file)] + + +def html_path(filename): + return os.path.join(script_path, "html", filename) + + +def html(filename): + path = html_path(filename) + + if os.path.exists(path): + with open(path, encoding="utf8") as file: + return file.read() + + return "" + + +def walk_files(path, allowed_extensions=None): + if not os.path.exists(path): + return + + if allowed_extensions is not None: + allowed_extensions = set(allowed_extensions) + + items = list(os.walk(path, followlinks=True)) + items = sorted(items, key=lambda x: natural_sort_key(x[0])) + + for root, _, files in items: + for filename in sorted(files, key=natural_sort_key): + if allowed_extensions is not None: + _, ext = os.path.splitext(filename) + if ext not in allowed_extensions: + continue + + if not opts.list_hidden_files and ("/." in root or "\\." in root): + continue + + yield os.path.join(root, filename) + + +def ldm_print(*args, **kwargs): + if opts.hide_ldm_prints: + return + + print(*args, **kwargs) -- cgit v1.2.1 From da0712ee7d6e9353e6d2d1828d6217bd122fbd51 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Wed, 9 Aug 2023 08:47:53 +0300 Subject: Split history: mv modules/shared.py temp --- modules/shared.py | 976 ------------------------------------------------------ temp | 976 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 976 insertions(+), 976 deletions(-) delete mode 100644 modules/shared.py create mode 100644 temp diff --git a/modules/shared.py b/modules/shared.py deleted file mode 100644 index e9b980a4..00000000 --- a/modules/shared.py +++ /dev/null @@ -1,976 +0,0 @@ -import datetime -import json -import os -import re -import sys -import threading -import time -import logging - -import gradio as gr -import torch -import tqdm - -import launch -import modules.interrogate -import modules.memmon -import modules.styles -import modules.devices as devices -from modules import localization, script_loading, errors, ui_components, shared_items, cmd_args, rng # noqa: F401 -from modules.paths_internal import models_path, script_path, data_path, sd_configs_path, sd_default_config, sd_model_file, default_sd_model_file, extensions_dir, extensions_builtin_dir # noqa: F401 -from ldm.models.diffusion.ddpm import LatentDiffusion -from typing import Optional - -log = logging.getLogger(__name__) - -demo = None - -parser = cmd_args.parser - -script_loading.preload_extensions(extensions_dir, parser, extension_list=launch.list_extensions(launch.args.ui_settings_file)) -script_loading.preload_extensions(extensions_builtin_dir, parser) - -if os.environ.get('IGNORE_CMD_ARGS_ERRORS', None) is None: - cmd_opts = parser.parse_args() -else: - cmd_opts, _ = parser.parse_known_args() - - -restricted_opts = { - "samples_filename_pattern", - "directories_filename_pattern", - "outdir_samples", - "outdir_txt2img_samples", - "outdir_img2img_samples", - "outdir_extras_samples", - "outdir_grids", - "outdir_txt2img_grids", - "outdir_save", - "outdir_init_images" -} - -# https://huggingface.co/datasets/freddyaboulton/gradio-theme-subdomains/resolve/main/subdomains.json -gradio_hf_hub_themes = [ - "gradio/base", - "gradio/glass", - "gradio/monochrome", - "gradio/seafoam", - "gradio/soft", - "gradio/dracula_test", - "abidlabs/dracula_test", - "abidlabs/Lime", - "abidlabs/pakistan", - "Ama434/neutral-barlow", - "dawood/microsoft_windows", - "finlaymacklon/smooth_slate", - "Franklisi/darkmode", - "freddyaboulton/dracula_revamped", - "freddyaboulton/test-blue", - "gstaff/xkcd", - "Insuz/Mocha", - "Insuz/SimpleIndigo", - "JohnSmith9982/small_and_pretty", - "nota-ai/theme", - "nuttea/Softblue", - "ParityError/Anime", - "reilnuud/polite", - "remilia/Ghostly", - "rottenlittlecreature/Moon_Goblin", - "step-3-profit/Midnight-Deep", - "Taithrah/Minimal", - "ysharma/huggingface", - "ysharma/steampunk" -] - - -cmd_opts.disable_extension_access = (cmd_opts.share or cmd_opts.listen or cmd_opts.server_name) and not cmd_opts.enable_insecure_extension_access - -devices.device, devices.device_interrogate, devices.device_gfpgan, devices.device_esrgan, devices.device_codeformer = \ - (devices.cpu if any(y in cmd_opts.use_cpu for y in [x, 'all']) else devices.get_optimal_device() for x in ['sd', 'interrogate', 'gfpgan', 'esrgan', 'codeformer']) - -devices.dtype = torch.float32 if cmd_opts.no_half else torch.float16 -devices.dtype_vae = torch.float32 if cmd_opts.no_half or cmd_opts.no_half_vae else torch.float16 - -device = devices.device -weight_load_location = None if cmd_opts.lowram else "cpu" - -batch_cond_uncond = cmd_opts.always_batch_cond_uncond or not (cmd_opts.lowvram or cmd_opts.medvram) -parallel_processing_allowed = not cmd_opts.lowvram and not cmd_opts.medvram -xformers_available = False -config_filename = cmd_opts.ui_settings_file - -os.makedirs(cmd_opts.hypernetwork_dir, exist_ok=True) -hypernetworks = {} -loaded_hypernetworks = [] - - -def reload_hypernetworks(): - from modules.hypernetworks import hypernetwork - global hypernetworks - - hypernetworks = hypernetwork.list_hypernetworks(cmd_opts.hypernetwork_dir) - - -class State: - skipped = False - interrupted = False - job = "" - job_no = 0 - job_count = 0 - processing_has_refined_job_count = False - job_timestamp = '0' - sampling_step = 0 - sampling_steps = 0 - current_latent = None - current_image = None - current_image_sampling_step = 0 - id_live_preview = 0 - textinfo = None - time_start = None - server_start = None - _server_command_signal = threading.Event() - _server_command: Optional[str] = None - - @property - def need_restart(self) -> bool: - # Compatibility getter for need_restart. - return self.server_command == "restart" - - @need_restart.setter - def need_restart(self, value: bool) -> None: - # Compatibility setter for need_restart. - if value: - self.server_command = "restart" - - @property - def server_command(self): - return self._server_command - - @server_command.setter - def server_command(self, value: Optional[str]) -> None: - """ - Set the server command to `value` and signal that it's been set. - """ - self._server_command = value - self._server_command_signal.set() - - def wait_for_server_command(self, timeout: Optional[float] = None) -> Optional[str]: - """ - Wait for server command to get set; return and clear the value and signal. - """ - if self._server_command_signal.wait(timeout): - self._server_command_signal.clear() - req = self._server_command - self._server_command = None - return req - return None - - def request_restart(self) -> None: - self.interrupt() - self.server_command = "restart" - log.info("Received restart request") - - def skip(self): - self.skipped = True - log.info("Received skip request") - - def interrupt(self): - self.interrupted = True - log.info("Received interrupt request") - - def nextjob(self): - if opts.live_previews_enable and opts.show_progress_every_n_steps == -1: - self.do_set_current_image() - - self.job_no += 1 - self.sampling_step = 0 - self.current_image_sampling_step = 0 - - def dict(self): - obj = { - "skipped": self.skipped, - "interrupted": self.interrupted, - "job": self.job, - "job_count": self.job_count, - "job_timestamp": self.job_timestamp, - "job_no": self.job_no, - "sampling_step": self.sampling_step, - "sampling_steps": self.sampling_steps, - } - - return obj - - def begin(self, job: str = "(unknown)"): - self.sampling_step = 0 - self.job_count = -1 - self.processing_has_refined_job_count = False - self.job_no = 0 - self.job_timestamp = datetime.datetime.now().strftime("%Y%m%d%H%M%S") - self.current_latent = None - self.current_image = None - self.current_image_sampling_step = 0 - self.id_live_preview = 0 - self.skipped = False - self.interrupted = False - self.textinfo = None - self.time_start = time.time() - self.job = job - devices.torch_gc() - log.info("Starting job %s", job) - - def end(self): - duration = time.time() - self.time_start - log.info("Ending job %s (%.2f seconds)", self.job, duration) - self.job = "" - self.job_count = 0 - - devices.torch_gc() - - def set_current_image(self): - """sets self.current_image from self.current_latent if enough sampling steps have been made after the last call to this""" - if not parallel_processing_allowed: - return - - if self.sampling_step - self.current_image_sampling_step >= opts.show_progress_every_n_steps and opts.live_previews_enable and opts.show_progress_every_n_steps != -1: - self.do_set_current_image() - - def do_set_current_image(self): - if self.current_latent is None: - return - - import modules.sd_samplers - - try: - if opts.show_progress_grid: - self.assign_current_image(modules.sd_samplers.samples_to_image_grid(self.current_latent)) - else: - self.assign_current_image(modules.sd_samplers.sample_to_image(self.current_latent)) - - self.current_image_sampling_step = self.sampling_step - - except Exception: - # when switching models during genration, VAE would be on CPU, so creating an image will fail. - # we silently ignore this error - errors.record_exception() - - def assign_current_image(self, image): - self.current_image = image - self.id_live_preview += 1 - - -state = State() -state.server_start = time.time() - -styles_filename = cmd_opts.styles_file -prompt_styles = modules.styles.StyleDatabase(styles_filename) - -interrogator = modules.interrogate.InterrogateModels("interrogate") - -face_restorers = [] - - -class OptionInfo: - def __init__(self, default=None, label="", component=None, component_args=None, onchange=None, section=None, refresh=None, comment_before='', comment_after=''): - self.default = default - self.label = label - self.component = component - self.component_args = component_args - self.onchange = onchange - self.section = section - self.refresh = refresh - self.do_not_save = False - - self.comment_before = comment_before - """HTML text that will be added after label in UI""" - - self.comment_after = comment_after - """HTML text that will be added before label in UI""" - - def link(self, label, url): - self.comment_before += f"[{label}]" - return self - - def js(self, label, js_func): - self.comment_before += f"[{label}]" - return self - - def info(self, info): - self.comment_after += f"({info})" - return self - - def html(self, html): - self.comment_after += html - return self - - def needs_restart(self): - self.comment_after += " (requires restart)" - return self - - def needs_reload_ui(self): - self.comment_after += " (requires Reload UI)" - return self - - -class OptionHTML(OptionInfo): - def __init__(self, text): - super().__init__(str(text).strip(), label='', component=lambda **kwargs: gr.HTML(elem_classes="settings-info", **kwargs)) - - self.do_not_save = True - - -def options_section(section_identifier, options_dict): - for v in options_dict.values(): - v.section = section_identifier - - return options_dict - - -def list_checkpoint_tiles(): - import modules.sd_models - return modules.sd_models.checkpoint_tiles() - - -def refresh_checkpoints(): - import modules.sd_models - return modules.sd_models.list_models() - - -def list_samplers(): - import modules.sd_samplers - return modules.sd_samplers.all_samplers - - -hide_dirs = {"visible": not cmd_opts.hide_ui_dir_config} -tab_names = [] - -options_templates = {} - -options_templates.update(options_section(('saving-images', "Saving images/grids"), { - "samples_save": OptionInfo(True, "Always save all generated images"), - "samples_format": OptionInfo('png', 'File format for images'), - "samples_filename_pattern": OptionInfo("", "Images filename pattern", component_args=hide_dirs).link("wiki", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Custom-Images-Filename-Name-and-Subdirectory"), - "save_images_add_number": OptionInfo(True, "Add number to filename when saving", component_args=hide_dirs), - - "grid_save": OptionInfo(True, "Always save all generated image grids"), - "grid_format": OptionInfo('png', 'File format for grids'), - "grid_extended_filename": OptionInfo(False, "Add extended info (seed, prompt) to filename when saving grid"), - "grid_only_if_multiple": OptionInfo(True, "Do not save grids consisting of one picture"), - "grid_prevent_empty_spots": OptionInfo(False, "Prevent empty spots in grid (when set to autodetect)"), - "grid_zip_filename_pattern": OptionInfo("", "Archive filename pattern", component_args=hide_dirs).link("wiki", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Custom-Images-Filename-Name-and-Subdirectory"), - "n_rows": OptionInfo(-1, "Grid row count; use -1 for autodetect and 0 for it to be same as batch size", gr.Slider, {"minimum": -1, "maximum": 16, "step": 1}), - "font": OptionInfo("", "Font for image grids that have text"), - "grid_text_active_color": OptionInfo("#000000", "Text color for image grids", ui_components.FormColorPicker, {}), - "grid_text_inactive_color": OptionInfo("#999999", "Inactive text color for image grids", ui_components.FormColorPicker, {}), - "grid_background_color": OptionInfo("#ffffff", "Background color for image grids", ui_components.FormColorPicker, {}), - - "enable_pnginfo": OptionInfo(True, "Save text information about generation parameters as chunks to png files"), - "save_txt": OptionInfo(False, "Create a text file next to every image with generation parameters."), - "save_images_before_face_restoration": OptionInfo(False, "Save a copy of image before doing face restoration."), - "save_images_before_highres_fix": OptionInfo(False, "Save a copy of image before applying highres fix."), - "save_images_before_color_correction": OptionInfo(False, "Save a copy of image before applying color correction to img2img results"), - "save_mask": OptionInfo(False, "For inpainting, save a copy of the greyscale mask"), - "save_mask_composite": OptionInfo(False, "For inpainting, save a masked composite"), - "jpeg_quality": OptionInfo(80, "Quality for saved jpeg images", gr.Slider, {"minimum": 1, "maximum": 100, "step": 1}), - "webp_lossless": OptionInfo(False, "Use lossless compression for webp images"), - "export_for_4chan": OptionInfo(True, "Save copy of large images as JPG").info("if the file size is above the limit, or either width or height are above the limit"), - "img_downscale_threshold": OptionInfo(4.0, "File size limit for the above option, MB", gr.Number), - "target_side_length": OptionInfo(4000, "Width/height limit for the above option, in pixels", gr.Number), - "img_max_size_mp": OptionInfo(200, "Maximum image size", gr.Number).info("in megapixels"), - - "use_original_name_batch": OptionInfo(True, "Use original name for output filename during batch process in extras tab"), - "use_upscaler_name_as_suffix": OptionInfo(False, "Use upscaler name as filename suffix in the extras tab"), - "save_selected_only": OptionInfo(True, "When using 'Save' button, only save a single selected image"), - "save_init_img": OptionInfo(False, "Save init images when using img2img"), - - "temp_dir": OptionInfo("", "Directory for temporary images; leave empty for default"), - "clean_temp_dir_at_start": OptionInfo(False, "Cleanup non-default temporary directory when starting webui"), - - "save_incomplete_images": OptionInfo(False, "Save incomplete images").info("save images that has been interrupted in mid-generation; even if not saved, they will still show up in webui output."), -})) - -options_templates.update(options_section(('saving-paths', "Paths for saving"), { - "outdir_samples": OptionInfo("", "Output directory for images; if empty, defaults to three directories below", component_args=hide_dirs), - "outdir_txt2img_samples": OptionInfo("outputs/txt2img-images", 'Output directory for txt2img images', component_args=hide_dirs), - "outdir_img2img_samples": OptionInfo("outputs/img2img-images", 'Output directory for img2img images', component_args=hide_dirs), - "outdir_extras_samples": OptionInfo("outputs/extras-images", 'Output directory for images from extras tab', component_args=hide_dirs), - "outdir_grids": OptionInfo("", "Output directory for grids; if empty, defaults to two directories below", component_args=hide_dirs), - "outdir_txt2img_grids": OptionInfo("outputs/txt2img-grids", 'Output directory for txt2img grids', component_args=hide_dirs), - "outdir_img2img_grids": OptionInfo("outputs/img2img-grids", 'Output directory for img2img grids', component_args=hide_dirs), - "outdir_save": OptionInfo("log/images", "Directory for saving images using the Save button", component_args=hide_dirs), - "outdir_init_images": OptionInfo("outputs/init-images", "Directory for saving init images when using img2img", component_args=hide_dirs), -})) - -options_templates.update(options_section(('saving-to-dirs', "Saving to a directory"), { - "save_to_dirs": OptionInfo(True, "Save images to a subdirectory"), - "grid_save_to_dirs": OptionInfo(True, "Save grids to a subdirectory"), - "use_save_to_dirs_for_ui": OptionInfo(False, "When using \"Save\" button, save images to a subdirectory"), - "directories_filename_pattern": OptionInfo("[date]", "Directory name pattern", component_args=hide_dirs).link("wiki", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Custom-Images-Filename-Name-and-Subdirectory"), - "directories_max_prompt_words": OptionInfo(8, "Max prompt words for [prompt_words] pattern", gr.Slider, {"minimum": 1, "maximum": 20, "step": 1, **hide_dirs}), -})) - -options_templates.update(options_section(('upscaling', "Upscaling"), { - "ESRGAN_tile": OptionInfo(192, "Tile size for ESRGAN upscalers.", gr.Slider, {"minimum": 0, "maximum": 512, "step": 16}).info("0 = no tiling"), - "ESRGAN_tile_overlap": OptionInfo(8, "Tile overlap for ESRGAN upscalers.", gr.Slider, {"minimum": 0, "maximum": 48, "step": 1}).info("Low values = visible seam"), - "realesrgan_enabled_models": OptionInfo(["R-ESRGAN 4x+", "R-ESRGAN 4x+ Anime6B"], "Select which Real-ESRGAN models to show in the web UI.", gr.CheckboxGroup, lambda: {"choices": shared_items.realesrgan_models_names()}), - "upscaler_for_img2img": OptionInfo(None, "Upscaler for img2img", gr.Dropdown, lambda: {"choices": [x.name for x in sd_upscalers]}), -})) - -options_templates.update(options_section(('face-restoration', "Face restoration"), { - "face_restoration_model": OptionInfo("CodeFormer", "Face restoration model", gr.Radio, lambda: {"choices": [x.name() for x in face_restorers]}), - "code_former_weight": OptionInfo(0.5, "CodeFormer weight", gr.Slider, {"minimum": 0, "maximum": 1, "step": 0.01}).info("0 = maximum effect; 1 = minimum effect"), - "face_restoration_unload": OptionInfo(False, "Move face restoration model from VRAM into RAM after processing"), -})) - -options_templates.update(options_section(('system', "System"), { - "auto_launch_browser": OptionInfo("Local", "Automatically open webui in browser on startup", gr.Radio, lambda: {"choices": ["Disable", "Local", "Remote"]}), - "show_warnings": OptionInfo(False, "Show warnings in console.").needs_reload_ui(), - "show_gradio_deprecation_warnings": OptionInfo(True, "Show gradio deprecation warnings in console.").needs_reload_ui(), - "memmon_poll_rate": OptionInfo(8, "VRAM usage polls per second during generation.", gr.Slider, {"minimum": 0, "maximum": 40, "step": 1}).info("0 = disable"), - "samples_log_stdout": OptionInfo(False, "Always print all generation info to standard output"), - "multiple_tqdm": OptionInfo(True, "Add a second progress bar to the console that shows progress for an entire job."), - "print_hypernet_extra": OptionInfo(False, "Print extra hypernetwork information to console."), - "list_hidden_files": OptionInfo(True, "Load models/files in hidden directories").info("directory is hidden if its name starts with \".\""), - "disable_mmap_load_safetensors": OptionInfo(False, "Disable memmapping for loading .safetensors files.").info("fixes very slow loading speed in some cases"), - "hide_ldm_prints": OptionInfo(True, "Prevent Stability-AI's ldm/sgm modules from printing noise to console."), -})) - -options_templates.update(options_section(('training', "Training"), { - "unload_models_when_training": OptionInfo(False, "Move VAE and CLIP to RAM when training if possible. Saves VRAM."), - "pin_memory": OptionInfo(False, "Turn on pin_memory for DataLoader. Makes training slightly faster but can increase memory usage."), - "save_optimizer_state": OptionInfo(False, "Saves Optimizer state as separate *.optim file. Training of embedding or HN can be resumed with the matching optim file."), - "save_training_settings_to_txt": OptionInfo(True, "Save textual inversion and hypernet settings to a text file whenever training starts."), - "dataset_filename_word_regex": OptionInfo("", "Filename word regex"), - "dataset_filename_join_string": OptionInfo(" ", "Filename join string"), - "training_image_repeats_per_epoch": OptionInfo(1, "Number of repeats for a single input image per epoch; used only for displaying epoch number", gr.Number, {"precision": 0}), - "training_write_csv_every": OptionInfo(500, "Save an csv containing the loss to log directory every N steps, 0 to disable"), - "training_xattention_optimizations": OptionInfo(False, "Use cross attention optimizations while training"), - "training_enable_tensorboard": OptionInfo(False, "Enable tensorboard logging."), - "training_tensorboard_save_images": OptionInfo(False, "Save generated images within tensorboard."), - "training_tensorboard_flush_every": OptionInfo(120, "How often, in seconds, to flush the pending tensorboard events and summaries to disk."), -})) - -options_templates.update(options_section(('sd', "Stable Diffusion"), { - "sd_model_checkpoint": OptionInfo(None, "Stable Diffusion checkpoint", gr.Dropdown, lambda: {"choices": list_checkpoint_tiles()}, refresh=refresh_checkpoints), - "sd_checkpoints_limit": OptionInfo(1, "Maximum number of checkpoints loaded at the same time", gr.Slider, {"minimum": 1, "maximum": 10, "step": 1}), - "sd_checkpoints_keep_in_cpu": OptionInfo(True, "Only keep one model on device").info("will keep models other than the currently used one in RAM rather than VRAM"), - "sd_checkpoint_cache": OptionInfo(0, "Checkpoints to cache in RAM", gr.Slider, {"minimum": 0, "maximum": 10, "step": 1}).info("obsolete; set to 0 and use the two settings above instead"), - "sd_unet": OptionInfo("Automatic", "SD Unet", gr.Dropdown, lambda: {"choices": shared_items.sd_unet_items()}, refresh=shared_items.refresh_unet_list).info("choose Unet model: Automatic = use one with same filename as checkpoint; None = use Unet from checkpoint"), - "enable_quantization": OptionInfo(False, "Enable quantization in K samplers for sharper and cleaner results. This may change existing seeds").needs_reload_ui(), - "enable_emphasis": OptionInfo(True, "Enable emphasis").info("use (text) to make model pay more attention to text and [text] to make it pay less attention"), - "enable_batch_seeds": OptionInfo(True, "Make K-diffusion samplers produce same images in a batch as when making a single image"), - "comma_padding_backtrack": OptionInfo(20, "Prompt word wrap length limit", gr.Slider, {"minimum": 0, "maximum": 74, "step": 1}).info("in tokens - for texts shorter than specified, if they don't fit into 75 token limit, move them to the next 75 token chunk"), - "CLIP_stop_at_last_layers": OptionInfo(1, "Clip skip", gr.Slider, {"minimum": 1, "maximum": 12, "step": 1}).link("wiki", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Features#clip-skip").info("ignore last layers of CLIP network; 1 ignores none, 2 ignores one layer"), - "upcast_attn": OptionInfo(False, "Upcast cross attention layer to float32"), - "randn_source": OptionInfo("GPU", "Random number generator source.", gr.Radio, {"choices": ["GPU", "CPU", "NV"]}).info("changes seeds drastically; use CPU to produce the same picture across different videocard vendors; use NV to produce same picture as on NVidia videocards"), -})) - -options_templates.update(options_section(('sdxl', "Stable Diffusion XL"), { - "sdxl_crop_top": OptionInfo(0, "crop top coordinate"), - "sdxl_crop_left": OptionInfo(0, "crop left coordinate"), - "sdxl_refiner_low_aesthetic_score": OptionInfo(2.5, "SDXL low aesthetic score", gr.Number).info("used for refiner model negative prompt"), - "sdxl_refiner_high_aesthetic_score": OptionInfo(6.0, "SDXL high aesthetic score", gr.Number).info("used for refiner model prompt"), -})) - -options_templates.update(options_section(('vae', "VAE"), { - "sd_vae_explanation": OptionHTML(""" -VAE is a neural network that transforms a standard RGB -image into latent space representation and back. Latent space representation is what stable diffusion is working on during sampling -(i.e. when the progress bar is between empty and full). For txt2img, VAE is used to create a resulting image after the sampling is finished. -For img2img, VAE is used to process user's input image before the sampling, and to create an image after sampling. -"""), - "sd_vae_checkpoint_cache": OptionInfo(0, "VAE Checkpoints to cache in RAM", gr.Slider, {"minimum": 0, "maximum": 10, "step": 1}), - "sd_vae": OptionInfo("Automatic", "SD VAE", gr.Dropdown, lambda: {"choices": shared_items.sd_vae_items()}, refresh=shared_items.refresh_vae_list).info("choose VAE model: Automatic = use one with same filename as checkpoint; None = use VAE from checkpoint"), - "sd_vae_overrides_per_model_preferences": OptionInfo(True, "Selected VAE overrides per-model preferences").info("you can set per-model VAE either by editing user metadata for checkpoints, or by making the VAE have same name as checkpoint"), - "auto_vae_precision": OptionInfo(True, "Automatically revert VAE to 32-bit floats").info("triggers when a tensor with NaNs is produced in VAE; disabling the option in this case will result in a black square image"), - "sd_vae_encode_method": OptionInfo("Full", "VAE type for encode", gr.Radio, {"choices": ["Full", "TAESD"]}).info("method to encode image to latent (use in img2img, hires-fix or inpaint mask)"), - "sd_vae_decode_method": OptionInfo("Full", "VAE type for decode", gr.Radio, {"choices": ["Full", "TAESD"]}).info("method to decode latent to image"), -})) - -options_templates.update(options_section(('img2img', "img2img"), { - "inpainting_mask_weight": OptionInfo(1.0, "Inpainting conditioning mask strength", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}), - "initial_noise_multiplier": OptionInfo(1.0, "Noise multiplier for img2img", gr.Slider, {"minimum": 0.5, "maximum": 1.5, "step": 0.01}), - "img2img_color_correction": OptionInfo(False, "Apply color correction to img2img results to match original colors."), - "img2img_fix_steps": OptionInfo(False, "With img2img, do exactly the amount of steps the slider specifies.").info("normally you'd do less with less denoising"), - "img2img_background_color": OptionInfo("#ffffff", "With img2img, fill transparent parts of the input image with this color.", ui_components.FormColorPicker, {}), - "img2img_editor_height": OptionInfo(720, "Height of the image editor", gr.Slider, {"minimum": 80, "maximum": 1600, "step": 1}).info("in pixels").needs_reload_ui(), - "img2img_sketch_default_brush_color": OptionInfo("#ffffff", "Sketch initial brush color", ui_components.FormColorPicker, {}).info("default brush color of img2img sketch").needs_reload_ui(), - "img2img_inpaint_mask_brush_color": OptionInfo("#ffffff", "Inpaint mask brush color", ui_components.FormColorPicker, {}).info("brush color of inpaint mask").needs_reload_ui(), - "img2img_inpaint_sketch_default_brush_color": OptionInfo("#ffffff", "Inpaint sketch initial brush color", ui_components.FormColorPicker, {}).info("default brush color of img2img inpaint sketch").needs_reload_ui(), - "return_mask": OptionInfo(False, "For inpainting, include the greyscale mask in results for web"), - "return_mask_composite": OptionInfo(False, "For inpainting, include masked composite in results for web"), -})) - -options_templates.update(options_section(('optimizations', "Optimizations"), { - "cross_attention_optimization": OptionInfo("Automatic", "Cross attention optimization", gr.Dropdown, lambda: {"choices": shared_items.cross_attention_optimizations()}), - "s_min_uncond": OptionInfo(0.0, "Negative Guidance minimum sigma", gr.Slider, {"minimum": 0.0, "maximum": 15.0, "step": 0.01}).link("PR", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/9177").info("skip negative prompt for some steps when the image is almost ready; 0=disable, higher=faster"), - "token_merging_ratio": OptionInfo(0.0, "Token merging ratio", gr.Slider, {"minimum": 0.0, "maximum": 0.9, "step": 0.1}).link("PR", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/9256").info("0=disable, higher=faster"), - "token_merging_ratio_img2img": OptionInfo(0.0, "Token merging ratio for img2img", gr.Slider, {"minimum": 0.0, "maximum": 0.9, "step": 0.1}).info("only applies if non-zero and overrides above"), - "token_merging_ratio_hr": OptionInfo(0.0, "Token merging ratio for high-res pass", gr.Slider, {"minimum": 0.0, "maximum": 0.9, "step": 0.1}).info("only applies if non-zero and overrides above"), - "pad_cond_uncond": OptionInfo(False, "Pad prompt/negative prompt to be same length").info("improves performance when prompt and negative prompt have different lengths; changes seeds"), - "persistent_cond_cache": OptionInfo(True, "Persistent cond cache").info("Do not recalculate conds from prompts if prompts have not changed since previous calculation"), -})) - -options_templates.update(options_section(('compatibility', "Compatibility"), { - "use_old_emphasis_implementation": OptionInfo(False, "Use old emphasis implementation. Can be useful to reproduce old seeds."), - "use_old_karras_scheduler_sigmas": OptionInfo(False, "Use old karras scheduler sigmas (0.1 to 10)."), - "no_dpmpp_sde_batch_determinism": OptionInfo(False, "Do not make DPM++ SDE deterministic across different batch sizes."), - "use_old_hires_fix_width_height": OptionInfo(False, "For hires fix, use width/height sliders to set final resolution rather than first pass (disables Upscale by, Resize width/height to)."), - "dont_fix_second_order_samplers_schedule": OptionInfo(False, "Do not fix prompt schedule for second order samplers."), - "hires_fix_use_firstpass_conds": OptionInfo(False, "For hires fix, calculate conds of second pass using extra networks of first pass."), -})) - -options_templates.update(options_section(('interrogate', "Interrogate"), { - "interrogate_keep_models_in_memory": OptionInfo(False, "Keep models in VRAM"), - "interrogate_return_ranks": OptionInfo(False, "Include ranks of model tags matches in results.").info("booru only"), - "interrogate_clip_num_beams": OptionInfo(1, "BLIP: num_beams", gr.Slider, {"minimum": 1, "maximum": 16, "step": 1}), - "interrogate_clip_min_length": OptionInfo(24, "BLIP: minimum description length", gr.Slider, {"minimum": 1, "maximum": 128, "step": 1}), - "interrogate_clip_max_length": OptionInfo(48, "BLIP: maximum description length", gr.Slider, {"minimum": 1, "maximum": 256, "step": 1}), - "interrogate_clip_dict_limit": OptionInfo(1500, "CLIP: maximum number of lines in text file").info("0 = No limit"), - "interrogate_clip_skip_categories": OptionInfo([], "CLIP: skip inquire categories", gr.CheckboxGroup, lambda: {"choices": modules.interrogate.category_types()}, refresh=modules.interrogate.category_types), - "interrogate_deepbooru_score_threshold": OptionInfo(0.5, "deepbooru: score threshold", gr.Slider, {"minimum": 0, "maximum": 1, "step": 0.01}), - "deepbooru_sort_alpha": OptionInfo(True, "deepbooru: sort tags alphabetically").info("if not: sort by score"), - "deepbooru_use_spaces": OptionInfo(True, "deepbooru: use spaces in tags").info("if not: use underscores"), - "deepbooru_escape": OptionInfo(True, "deepbooru: escape (\\) brackets").info("so they are used as literal brackets and not for emphasis"), - "deepbooru_filter_tags": OptionInfo("", "deepbooru: filter out those tags").info("separate by comma"), -})) - -options_templates.update(options_section(('extra_networks', "Extra Networks"), { - "extra_networks_show_hidden_directories": OptionInfo(True, "Show hidden directories").info("directory is hidden if its name starts with \".\"."), - "extra_networks_hidden_models": OptionInfo("When searched", "Show cards for models in hidden directories", gr.Radio, {"choices": ["Always", "When searched", "Never"]}).info('"When searched" option will only show the item when the search string has 4 characters or more'), - "extra_networks_default_multiplier": OptionInfo(1.0, "Default multiplier for extra networks", gr.Slider, {"minimum": 0.0, "maximum": 2.0, "step": 0.01}), - "extra_networks_card_width": OptionInfo(0, "Card width for Extra Networks").info("in pixels"), - "extra_networks_card_height": OptionInfo(0, "Card height for Extra Networks").info("in pixels"), - "extra_networks_card_text_scale": OptionInfo(1.0, "Card text scale", gr.Slider, {"minimum": 0.0, "maximum": 2.0, "step": 0.01}).info("1 = original size"), - "extra_networks_card_show_desc": OptionInfo(True, "Show description on card"), - "extra_networks_add_text_separator": OptionInfo(" ", "Extra networks separator").info("extra text to add before <...> when adding extra network to prompt"), - "ui_extra_networks_tab_reorder": OptionInfo("", "Extra networks tab order").needs_reload_ui(), - "textual_inversion_print_at_load": OptionInfo(False, "Print a list of Textual Inversion embeddings when loading model"), - "textual_inversion_add_hashes_to_infotext": OptionInfo(True, "Add Textual Inversion hashes to infotext"), - "sd_hypernetwork": OptionInfo("None", "Add hypernetwork to prompt", gr.Dropdown, lambda: {"choices": ["None", *hypernetworks]}, refresh=reload_hypernetworks), -})) - -options_templates.update(options_section(('ui', "User interface"), { - "localization": OptionInfo("None", "Localization", gr.Dropdown, lambda: {"choices": ["None"] + list(localization.localizations.keys())}, refresh=lambda: localization.list_localizations(cmd_opts.localizations_dir)).needs_reload_ui(), - "gradio_theme": OptionInfo("Default", "Gradio theme", ui_components.DropdownEditable, lambda: {"choices": ["Default"] + gradio_hf_hub_themes}).info("you can also manually enter any of themes from the gallery.").needs_reload_ui(), - "gradio_themes_cache": OptionInfo(True, "Cache gradio themes locally").info("disable to update the selected Gradio theme"), - "return_grid": OptionInfo(True, "Show grid in results for web"), - "do_not_show_images": OptionInfo(False, "Do not show any images in results for web"), - "send_seed": OptionInfo(True, "Send seed when sending prompt or image to other interface"), - "send_size": OptionInfo(True, "Send size when sending prompt or image to another interface"), - "js_modal_lightbox": OptionInfo(True, "Enable full page image viewer"), - "js_modal_lightbox_initially_zoomed": OptionInfo(True, "Show images zoomed in by default in full page image viewer"), - "js_modal_lightbox_gamepad": OptionInfo(False, "Navigate image viewer with gamepad"), - "js_modal_lightbox_gamepad_repeat": OptionInfo(250, "Gamepad repeat period, in milliseconds"), - "show_progress_in_title": OptionInfo(True, "Show generation progress in window title."), - "samplers_in_dropdown": OptionInfo(True, "Use dropdown for sampler selection instead of radio group").needs_reload_ui(), - "dimensions_and_batch_together": OptionInfo(True, "Show Width/Height and Batch sliders in same row").needs_reload_ui(), - "keyedit_precision_attention": OptionInfo(0.1, "Ctrl+up/down precision when editing (attention:1.1)", gr.Slider, {"minimum": 0.01, "maximum": 0.2, "step": 0.001}), - "keyedit_precision_extra": OptionInfo(0.05, "Ctrl+up/down precision when editing ", gr.Slider, {"minimum": 0.01, "maximum": 0.2, "step": 0.001}), - "keyedit_delimiters": OptionInfo(".,\\/!?%^*;:{}=`~()", "Ctrl+up/down word delimiters"), - "keyedit_move": OptionInfo(True, "Alt+left/right moves prompt elements"), - "quicksettings_list": OptionInfo(["sd_model_checkpoint"], "Quicksettings list", ui_components.DropdownMulti, lambda: {"choices": list(opts.data_labels.keys())}).js("info", "settingsHintsShowQuicksettings").info("setting entries that appear at the top of page rather than in settings tab").needs_reload_ui(), - "ui_tab_order": OptionInfo([], "UI tab order", ui_components.DropdownMulti, lambda: {"choices": list(tab_names)}).needs_reload_ui(), - "hidden_tabs": OptionInfo([], "Hidden UI tabs", ui_components.DropdownMulti, lambda: {"choices": list(tab_names)}).needs_reload_ui(), - "ui_reorder_list": OptionInfo([], "txt2img/img2img UI item order", ui_components.DropdownMulti, lambda: {"choices": list(shared_items.ui_reorder_categories())}).info("selected items appear first").needs_reload_ui(), - "hires_fix_show_sampler": OptionInfo(False, "Hires fix: show hires checkpoint and sampler selection").needs_reload_ui(), - "hires_fix_show_prompts": OptionInfo(False, "Hires fix: show hires prompt and negative prompt").needs_reload_ui(), - "disable_token_counters": OptionInfo(False, "Disable prompt token counters").needs_reload_ui(), -})) - - -options_templates.update(options_section(('infotext', "Infotext"), { - "add_model_hash_to_info": OptionInfo(True, "Add model hash to generation information"), - "add_model_name_to_info": OptionInfo(True, "Add model name to generation information"), - "add_user_name_to_info": OptionInfo(False, "Add user name to generation information when authenticated"), - "add_version_to_infotext": OptionInfo(True, "Add program version to generation information"), - "disable_weights_auto_swap": OptionInfo(True, "Disregard checkpoint information from pasted infotext").info("when reading generation parameters from text into UI"), - "infotext_styles": OptionInfo("Apply if any", "Infer styles from prompts of pasted infotext", gr.Radio, {"choices": ["Ignore", "Apply", "Discard", "Apply if any"]}).info("when reading generation parameters from text into UI)").html("""
    -
  • Ignore: keep prompt and styles dropdown as it is.
  • -
  • Apply: remove style text from prompt, always replace styles dropdown value with found styles (even if none are found).
  • -
  • Discard: remove style text from prompt, keep styles dropdown as it is.
  • -
  • Apply if any: remove style text from prompt; if any styles are found in prompt, put them into styles dropdown, otherwise keep it as it is.
  • -
"""), - -})) - -options_templates.update(options_section(('ui', "Live previews"), { - "show_progressbar": OptionInfo(True, "Show progressbar"), - "live_previews_enable": OptionInfo(True, "Show live previews of the created image"), - "live_previews_image_format": OptionInfo("png", "Live preview file format", gr.Radio, {"choices": ["jpeg", "png", "webp"]}), - "show_progress_grid": OptionInfo(True, "Show previews of all images generated in a batch as a grid"), - "show_progress_every_n_steps": OptionInfo(10, "Live preview display period", gr.Slider, {"minimum": -1, "maximum": 32, "step": 1}).info("in sampling steps - show new live preview image every N sampling steps; -1 = only show after completion of batch"), - "show_progress_type": OptionInfo("Approx NN", "Live preview method", gr.Radio, {"choices": ["Full", "Approx NN", "Approx cheap", "TAESD"]}).info("Full = slow but pretty; Approx NN and TAESD = fast but low quality; Approx cheap = super fast but terrible otherwise"), - "live_preview_content": OptionInfo("Prompt", "Live preview subject", gr.Radio, {"choices": ["Combined", "Prompt", "Negative prompt"]}), - "live_preview_refresh_period": OptionInfo(1000, "Progressbar and preview update period").info("in milliseconds"), -})) - -options_templates.update(options_section(('sampler-params', "Sampler parameters"), { - "hide_samplers": OptionInfo([], "Hide samplers in user interface", gr.CheckboxGroup, lambda: {"choices": [x.name for x in list_samplers()]}).needs_reload_ui(), - "eta_ddim": OptionInfo(0.0, "Eta for DDIM", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}).info("noise multiplier; higher = more unperdictable results"), - "eta_ancestral": OptionInfo(1.0, "Eta for ancestral samplers", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}).info("noise multiplier; applies to Euler a and other samplers that have a in them"), - "ddim_discretize": OptionInfo('uniform', "img2img DDIM discretize", gr.Radio, {"choices": ['uniform', 'quad']}), - 's_churn': OptionInfo(0.0, "sigma churn", gr.Slider, {"minimum": 0.0, "maximum": 100.0, "step": 0.01}).info('amount of stochasticity; only applies to Euler, Heun, and DPM2'), - 's_tmin': OptionInfo(0.0, "sigma tmin", gr.Slider, {"minimum": 0.0, "maximum": 10.0, "step": 0.01}).info('enable stochasticity; start value of the sigma range; only applies to Euler, Heun, and DPM2'), - 's_tmax': OptionInfo(0.0, "sigma tmax", gr.Slider, {"minimum": 0.0, "maximum": 999.0, "step": 0.01}).info("0 = inf; end value of the sigma range; only applies to Euler, Heun, and DPM2"), - 's_noise': OptionInfo(1.0, "sigma noise", gr.Slider, {"minimum": 0.0, "maximum": 1.1, "step": 0.001}).info('amount of additional noise to counteract loss of detail during sampling; only applies to Euler, Heun, and DPM2'), - 'k_sched_type': OptionInfo("Automatic", "Scheduler type", gr.Dropdown, {"choices": ["Automatic", "karras", "exponential", "polyexponential"]}).info("lets you override the noise schedule for k-diffusion samplers; choosing Automatic disables the three parameters below"), - 'sigma_min': OptionInfo(0.0, "sigma min", gr.Number).info("0 = default (~0.03); minimum noise strength for k-diffusion noise scheduler"), - 'sigma_max': OptionInfo(0.0, "sigma max", gr.Number).info("0 = default (~14.6); maximum noise strength for k-diffusion noise scheduler"), - 'rho': OptionInfo(0.0, "rho", gr.Number).info("0 = default (7 for karras, 1 for polyexponential); higher values result in a steeper noise schedule (decreases faster)"), - 'eta_noise_seed_delta': OptionInfo(0, "Eta noise seed delta", gr.Number, {"precision": 0}).info("ENSD; does not improve anything, just produces different results for ancestral samplers - only useful for reproducing images"), - 'always_discard_next_to_last_sigma': OptionInfo(False, "Always discard next-to-last sigma").link("PR", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/6044"), - 'uni_pc_variant': OptionInfo("bh1", "UniPC variant", gr.Radio, {"choices": ["bh1", "bh2", "vary_coeff"]}), - 'uni_pc_skip_type': OptionInfo("time_uniform", "UniPC skip type", gr.Radio, {"choices": ["time_uniform", "time_quadratic", "logSNR"]}), - 'uni_pc_order': OptionInfo(3, "UniPC order", gr.Slider, {"minimum": 1, "maximum": 50, "step": 1}).info("must be < sampling steps"), - 'uni_pc_lower_order_final': OptionInfo(True, "UniPC lower order final"), -})) - -options_templates.update(options_section(('postprocessing', "Postprocessing"), { - 'postprocessing_enable_in_main_ui': OptionInfo([], "Enable postprocessing operations in txt2img and img2img tabs", ui_components.DropdownMulti, lambda: {"choices": [x.name for x in shared_items.postprocessing_scripts()]}), - 'postprocessing_operation_order': OptionInfo([], "Postprocessing operation order", ui_components.DropdownMulti, lambda: {"choices": [x.name for x in shared_items.postprocessing_scripts()]}), - 'upscaling_max_images_in_cache': OptionInfo(5, "Maximum number of images in upscaling cache", gr.Slider, {"minimum": 0, "maximum": 10, "step": 1}), -})) - -options_templates.update(options_section((None, "Hidden options"), { - "disabled_extensions": OptionInfo([], "Disable these extensions"), - "disable_all_extensions": OptionInfo("none", "Disable all extensions (preserves the list of disabled extensions)", gr.Radio, {"choices": ["none", "extra", "all"]}), - "restore_config_state_file": OptionInfo("", "Config state file to restore from, under 'config-states/' folder"), - "sd_checkpoint_hash": OptionInfo("", "SHA256 hash of the current checkpoint"), -})) - - -options_templates.update() - - -class Options: - data = None - data_labels = options_templates - typemap = {int: float} - - def __init__(self): - self.data = {k: v.default for k, v in self.data_labels.items()} - - def __setattr__(self, key, value): - if self.data is not None: - if key in self.data or key in self.data_labels: - assert not cmd_opts.freeze_settings, "changing settings is disabled" - - info = opts.data_labels.get(key, None) - if info.do_not_save: - return - - comp_args = info.component_args if info else None - if isinstance(comp_args, dict) and comp_args.get('visible', True) is False: - raise RuntimeError(f"not possible to set {key} because it is restricted") - - if cmd_opts.hide_ui_dir_config and key in restricted_opts: - raise RuntimeError(f"not possible to set {key} because it is restricted") - - self.data[key] = value - return - - return super(Options, self).__setattr__(key, value) - - def __getattr__(self, item): - if self.data is not None: - if item in self.data: - return self.data[item] - - if item in self.data_labels: - return self.data_labels[item].default - - return super(Options, self).__getattribute__(item) - - def set(self, key, value): - """sets an option and calls its onchange callback, returning True if the option changed and False otherwise""" - - oldval = self.data.get(key, None) - if oldval == value: - return False - - if self.data_labels[key].do_not_save: - return False - - try: - setattr(self, key, value) - except RuntimeError: - return False - - if self.data_labels[key].onchange is not None: - try: - self.data_labels[key].onchange() - except Exception as e: - errors.display(e, f"changing setting {key} to {value}") - setattr(self, key, oldval) - return False - - return True - - def get_default(self, key): - """returns the default value for the key""" - - data_label = self.data_labels.get(key) - if data_label is None: - return None - - return data_label.default - - def save(self, filename): - assert not cmd_opts.freeze_settings, "saving settings is disabled" - - with open(filename, "w", encoding="utf8") as file: - json.dump(self.data, file, indent=4) - - def same_type(self, x, y): - if x is None or y is None: - return True - - type_x = self.typemap.get(type(x), type(x)) - type_y = self.typemap.get(type(y), type(y)) - - return type_x == type_y - - def load(self, filename): - with open(filename, "r", encoding="utf8") as file: - self.data = json.load(file) - - # 1.6.0 VAE defaults - if self.data.get('sd_vae_as_default') is not None and self.data.get('sd_vae_overrides_per_model_preferences') is None: - self.data['sd_vae_overrides_per_model_preferences'] = not self.data.get('sd_vae_as_default') - - # 1.1.1 quicksettings list migration - if self.data.get('quicksettings') is not None and self.data.get('quicksettings_list') is None: - self.data['quicksettings_list'] = [i.strip() for i in self.data.get('quicksettings').split(',')] - - # 1.4.0 ui_reorder - if isinstance(self.data.get('ui_reorder'), str) and self.data.get('ui_reorder') and "ui_reorder_list" not in self.data: - self.data['ui_reorder_list'] = [i.strip() for i in self.data.get('ui_reorder').split(',')] - - bad_settings = 0 - for k, v in self.data.items(): - info = self.data_labels.get(k, None) - if info is not None and not self.same_type(info.default, v): - print(f"Warning: bad setting value: {k}: {v} ({type(v).__name__}; expected {type(info.default).__name__})", file=sys.stderr) - bad_settings += 1 - - if bad_settings > 0: - print(f"The program is likely to not work with bad settings.\nSettings file: {filename}\nEither fix the file, or delete it and restart.", file=sys.stderr) - - def onchange(self, key, func, call=True): - item = self.data_labels.get(key) - item.onchange = func - - if call: - func() - - def dumpjson(self): - d = {k: self.data.get(k, v.default) for k, v in self.data_labels.items()} - d["_comments_before"] = {k: v.comment_before for k, v in self.data_labels.items() if v.comment_before is not None} - d["_comments_after"] = {k: v.comment_after for k, v in self.data_labels.items() if v.comment_after is not None} - return json.dumps(d) - - def add_option(self, key, info): - self.data_labels[key] = info - - def reorder(self): - """reorder settings so that all items related to section always go together""" - - section_ids = {} - settings_items = self.data_labels.items() - for _, item in settings_items: - if item.section not in section_ids: - section_ids[item.section] = len(section_ids) - - self.data_labels = dict(sorted(settings_items, key=lambda x: section_ids[x[1].section])) - - def cast_value(self, key, value): - """casts an arbitrary to the same type as this setting's value with key - Example: cast_value("eta_noise_seed_delta", "12") -> returns 12 (an int rather than str) - """ - - if value is None: - return None - - default_value = self.data_labels[key].default - if default_value is None: - default_value = getattr(self, key, None) - if default_value is None: - return None - - expected_type = type(default_value) - if expected_type == bool and value == "False": - value = False - else: - value = expected_type(value) - - return value - - -opts = Options() -if os.path.exists(config_filename): - opts.load(config_filename) - - -class Shared(sys.modules[__name__].__class__): - """ - this class is here to provide sd_model field as a property, so that it can be created and loaded on demand rather than - at program startup. - """ - - sd_model_val = None - - @property - def sd_model(self): - import modules.sd_models - - return modules.sd_models.model_data.get_sd_model() - - @sd_model.setter - def sd_model(self, value): - import modules.sd_models - - modules.sd_models.model_data.set_sd_model(value) - - -sd_model: LatentDiffusion = None # this var is here just for IDE's type checking; it cannot be accessed because the class field above will be accessed instead -sys.modules[__name__].__class__ = Shared - -settings_components = None -"""assinged from ui.py, a mapping on setting names to gradio components repsponsible for those settings""" - -latent_upscale_default_mode = "Latent" -latent_upscale_modes = { - "Latent": {"mode": "bilinear", "antialias": False}, - "Latent (antialiased)": {"mode": "bilinear", "antialias": True}, - "Latent (bicubic)": {"mode": "bicubic", "antialias": False}, - "Latent (bicubic antialiased)": {"mode": "bicubic", "antialias": True}, - "Latent (nearest)": {"mode": "nearest", "antialias": False}, - "Latent (nearest-exact)": {"mode": "nearest-exact", "antialias": False}, -} - -sd_upscalers = [] - -clip_model = None - -progress_print_out = sys.stdout - -gradio_theme = gr.themes.Base() - - -def reload_gradio_theme(theme_name=None): - global gradio_theme - if not theme_name: - theme_name = opts.gradio_theme - - default_theme_args = dict( - font=["Source Sans Pro", 'ui-sans-serif', 'system-ui', 'sans-serif'], - font_mono=['IBM Plex Mono', 'ui-monospace', 'Consolas', 'monospace'], - ) - - if theme_name == "Default": - gradio_theme = gr.themes.Default(**default_theme_args) - else: - try: - theme_cache_dir = os.path.join(script_path, 'tmp', 'gradio_themes') - theme_cache_path = os.path.join(theme_cache_dir, f'{theme_name.replace("/", "_")}.json') - if opts.gradio_themes_cache and os.path.exists(theme_cache_path): - gradio_theme = gr.themes.ThemeClass.load(theme_cache_path) - else: - os.makedirs(theme_cache_dir, exist_ok=True) - gradio_theme = gr.themes.ThemeClass.from_hub(theme_name) - gradio_theme.dump(theme_cache_path) - except Exception as e: - errors.display(e, "changing gradio theme") - gradio_theme = gr.themes.Default(**default_theme_args) - - -class TotalTQDM: - def __init__(self): - self._tqdm = None - - def reset(self): - self._tqdm = tqdm.tqdm( - desc="Total progress", - total=state.job_count * state.sampling_steps, - position=1, - file=progress_print_out - ) - - def update(self): - if not opts.multiple_tqdm or cmd_opts.disable_console_progressbars: - return - if self._tqdm is None: - self.reset() - self._tqdm.update() - - def updateTotal(self, new_total): - if not opts.multiple_tqdm or cmd_opts.disable_console_progressbars: - return - if self._tqdm is None: - self.reset() - self._tqdm.total = new_total - - def clear(self): - if self._tqdm is not None: - self._tqdm.refresh() - self._tqdm.close() - self._tqdm = None - - -total_tqdm = TotalTQDM() - -mem_mon = modules.memmon.MemUsageMonitor("MemMon", device, opts) -mem_mon.start() - - -def natural_sort_key(s, regex=re.compile('([0-9]+)')): - return [int(text) if text.isdigit() else text.lower() for text in regex.split(s)] - - -def listfiles(dirname): - filenames = [os.path.join(dirname, x) for x in sorted(os.listdir(dirname), key=natural_sort_key) if not x.startswith(".")] - return [file for file in filenames if os.path.isfile(file)] - - -def html_path(filename): - return os.path.join(script_path, "html", filename) - - -def html(filename): - path = html_path(filename) - - if os.path.exists(path): - with open(path, encoding="utf8") as file: - return file.read() - - return "" - - -def walk_files(path, allowed_extensions=None): - if not os.path.exists(path): - return - - if allowed_extensions is not None: - allowed_extensions = set(allowed_extensions) - - items = list(os.walk(path, followlinks=True)) - items = sorted(items, key=lambda x: natural_sort_key(x[0])) - - for root, _, files in items: - for filename in sorted(files, key=natural_sort_key): - if allowed_extensions is not None: - _, ext = os.path.splitext(filename) - if ext not in allowed_extensions: - continue - - if not opts.list_hidden_files and ("/." in root or "\\." in root): - continue - - yield os.path.join(root, filename) - - -def ldm_print(*args, **kwargs): - if opts.hide_ldm_prints: - return - - print(*args, **kwargs) diff --git a/temp b/temp new file mode 100644 index 00000000..e9b980a4 --- /dev/null +++ b/temp @@ -0,0 +1,976 @@ +import datetime +import json +import os +import re +import sys +import threading +import time +import logging + +import gradio as gr +import torch +import tqdm + +import launch +import modules.interrogate +import modules.memmon +import modules.styles +import modules.devices as devices +from modules import localization, script_loading, errors, ui_components, shared_items, cmd_args, rng # noqa: F401 +from modules.paths_internal import models_path, script_path, data_path, sd_configs_path, sd_default_config, sd_model_file, default_sd_model_file, extensions_dir, extensions_builtin_dir # noqa: F401 +from ldm.models.diffusion.ddpm import LatentDiffusion +from typing import Optional + +log = logging.getLogger(__name__) + +demo = None + +parser = cmd_args.parser + +script_loading.preload_extensions(extensions_dir, parser, extension_list=launch.list_extensions(launch.args.ui_settings_file)) +script_loading.preload_extensions(extensions_builtin_dir, parser) + +if os.environ.get('IGNORE_CMD_ARGS_ERRORS', None) is None: + cmd_opts = parser.parse_args() +else: + cmd_opts, _ = parser.parse_known_args() + + +restricted_opts = { + "samples_filename_pattern", + "directories_filename_pattern", + "outdir_samples", + "outdir_txt2img_samples", + "outdir_img2img_samples", + "outdir_extras_samples", + "outdir_grids", + "outdir_txt2img_grids", + "outdir_save", + "outdir_init_images" +} + +# https://huggingface.co/datasets/freddyaboulton/gradio-theme-subdomains/resolve/main/subdomains.json +gradio_hf_hub_themes = [ + "gradio/base", + "gradio/glass", + "gradio/monochrome", + "gradio/seafoam", + "gradio/soft", + "gradio/dracula_test", + "abidlabs/dracula_test", + "abidlabs/Lime", + "abidlabs/pakistan", + "Ama434/neutral-barlow", + "dawood/microsoft_windows", + "finlaymacklon/smooth_slate", + "Franklisi/darkmode", + "freddyaboulton/dracula_revamped", + "freddyaboulton/test-blue", + "gstaff/xkcd", + "Insuz/Mocha", + "Insuz/SimpleIndigo", + "JohnSmith9982/small_and_pretty", + "nota-ai/theme", + "nuttea/Softblue", + "ParityError/Anime", + "reilnuud/polite", + "remilia/Ghostly", + "rottenlittlecreature/Moon_Goblin", + "step-3-profit/Midnight-Deep", + "Taithrah/Minimal", + "ysharma/huggingface", + "ysharma/steampunk" +] + + +cmd_opts.disable_extension_access = (cmd_opts.share or cmd_opts.listen or cmd_opts.server_name) and not cmd_opts.enable_insecure_extension_access + +devices.device, devices.device_interrogate, devices.device_gfpgan, devices.device_esrgan, devices.device_codeformer = \ + (devices.cpu if any(y in cmd_opts.use_cpu for y in [x, 'all']) else devices.get_optimal_device() for x in ['sd', 'interrogate', 'gfpgan', 'esrgan', 'codeformer']) + +devices.dtype = torch.float32 if cmd_opts.no_half else torch.float16 +devices.dtype_vae = torch.float32 if cmd_opts.no_half or cmd_opts.no_half_vae else torch.float16 + +device = devices.device +weight_load_location = None if cmd_opts.lowram else "cpu" + +batch_cond_uncond = cmd_opts.always_batch_cond_uncond or not (cmd_opts.lowvram or cmd_opts.medvram) +parallel_processing_allowed = not cmd_opts.lowvram and not cmd_opts.medvram +xformers_available = False +config_filename = cmd_opts.ui_settings_file + +os.makedirs(cmd_opts.hypernetwork_dir, exist_ok=True) +hypernetworks = {} +loaded_hypernetworks = [] + + +def reload_hypernetworks(): + from modules.hypernetworks import hypernetwork + global hypernetworks + + hypernetworks = hypernetwork.list_hypernetworks(cmd_opts.hypernetwork_dir) + + +class State: + skipped = False + interrupted = False + job = "" + job_no = 0 + job_count = 0 + processing_has_refined_job_count = False + job_timestamp = '0' + sampling_step = 0 + sampling_steps = 0 + current_latent = None + current_image = None + current_image_sampling_step = 0 + id_live_preview = 0 + textinfo = None + time_start = None + server_start = None + _server_command_signal = threading.Event() + _server_command: Optional[str] = None + + @property + def need_restart(self) -> bool: + # Compatibility getter for need_restart. + return self.server_command == "restart" + + @need_restart.setter + def need_restart(self, value: bool) -> None: + # Compatibility setter for need_restart. + if value: + self.server_command = "restart" + + @property + def server_command(self): + return self._server_command + + @server_command.setter + def server_command(self, value: Optional[str]) -> None: + """ + Set the server command to `value` and signal that it's been set. + """ + self._server_command = value + self._server_command_signal.set() + + def wait_for_server_command(self, timeout: Optional[float] = None) -> Optional[str]: + """ + Wait for server command to get set; return and clear the value and signal. + """ + if self._server_command_signal.wait(timeout): + self._server_command_signal.clear() + req = self._server_command + self._server_command = None + return req + return None + + def request_restart(self) -> None: + self.interrupt() + self.server_command = "restart" + log.info("Received restart request") + + def skip(self): + self.skipped = True + log.info("Received skip request") + + def interrupt(self): + self.interrupted = True + log.info("Received interrupt request") + + def nextjob(self): + if opts.live_previews_enable and opts.show_progress_every_n_steps == -1: + self.do_set_current_image() + + self.job_no += 1 + self.sampling_step = 0 + self.current_image_sampling_step = 0 + + def dict(self): + obj = { + "skipped": self.skipped, + "interrupted": self.interrupted, + "job": self.job, + "job_count": self.job_count, + "job_timestamp": self.job_timestamp, + "job_no": self.job_no, + "sampling_step": self.sampling_step, + "sampling_steps": self.sampling_steps, + } + + return obj + + def begin(self, job: str = "(unknown)"): + self.sampling_step = 0 + self.job_count = -1 + self.processing_has_refined_job_count = False + self.job_no = 0 + self.job_timestamp = datetime.datetime.now().strftime("%Y%m%d%H%M%S") + self.current_latent = None + self.current_image = None + self.current_image_sampling_step = 0 + self.id_live_preview = 0 + self.skipped = False + self.interrupted = False + self.textinfo = None + self.time_start = time.time() + self.job = job + devices.torch_gc() + log.info("Starting job %s", job) + + def end(self): + duration = time.time() - self.time_start + log.info("Ending job %s (%.2f seconds)", self.job, duration) + self.job = "" + self.job_count = 0 + + devices.torch_gc() + + def set_current_image(self): + """sets self.current_image from self.current_latent if enough sampling steps have been made after the last call to this""" + if not parallel_processing_allowed: + return + + if self.sampling_step - self.current_image_sampling_step >= opts.show_progress_every_n_steps and opts.live_previews_enable and opts.show_progress_every_n_steps != -1: + self.do_set_current_image() + + def do_set_current_image(self): + if self.current_latent is None: + return + + import modules.sd_samplers + + try: + if opts.show_progress_grid: + self.assign_current_image(modules.sd_samplers.samples_to_image_grid(self.current_latent)) + else: + self.assign_current_image(modules.sd_samplers.sample_to_image(self.current_latent)) + + self.current_image_sampling_step = self.sampling_step + + except Exception: + # when switching models during genration, VAE would be on CPU, so creating an image will fail. + # we silently ignore this error + errors.record_exception() + + def assign_current_image(self, image): + self.current_image = image + self.id_live_preview += 1 + + +state = State() +state.server_start = time.time() + +styles_filename = cmd_opts.styles_file +prompt_styles = modules.styles.StyleDatabase(styles_filename) + +interrogator = modules.interrogate.InterrogateModels("interrogate") + +face_restorers = [] + + +class OptionInfo: + def __init__(self, default=None, label="", component=None, component_args=None, onchange=None, section=None, refresh=None, comment_before='', comment_after=''): + self.default = default + self.label = label + self.component = component + self.component_args = component_args + self.onchange = onchange + self.section = section + self.refresh = refresh + self.do_not_save = False + + self.comment_before = comment_before + """HTML text that will be added after label in UI""" + + self.comment_after = comment_after + """HTML text that will be added before label in UI""" + + def link(self, label, url): + self.comment_before += f"[{label}]" + return self + + def js(self, label, js_func): + self.comment_before += f"[{label}]" + return self + + def info(self, info): + self.comment_after += f"({info})" + return self + + def html(self, html): + self.comment_after += html + return self + + def needs_restart(self): + self.comment_after += " (requires restart)" + return self + + def needs_reload_ui(self): + self.comment_after += " (requires Reload UI)" + return self + + +class OptionHTML(OptionInfo): + def __init__(self, text): + super().__init__(str(text).strip(), label='', component=lambda **kwargs: gr.HTML(elem_classes="settings-info", **kwargs)) + + self.do_not_save = True + + +def options_section(section_identifier, options_dict): + for v in options_dict.values(): + v.section = section_identifier + + return options_dict + + +def list_checkpoint_tiles(): + import modules.sd_models + return modules.sd_models.checkpoint_tiles() + + +def refresh_checkpoints(): + import modules.sd_models + return modules.sd_models.list_models() + + +def list_samplers(): + import modules.sd_samplers + return modules.sd_samplers.all_samplers + + +hide_dirs = {"visible": not cmd_opts.hide_ui_dir_config} +tab_names = [] + +options_templates = {} + +options_templates.update(options_section(('saving-images', "Saving images/grids"), { + "samples_save": OptionInfo(True, "Always save all generated images"), + "samples_format": OptionInfo('png', 'File format for images'), + "samples_filename_pattern": OptionInfo("", "Images filename pattern", component_args=hide_dirs).link("wiki", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Custom-Images-Filename-Name-and-Subdirectory"), + "save_images_add_number": OptionInfo(True, "Add number to filename when saving", component_args=hide_dirs), + + "grid_save": OptionInfo(True, "Always save all generated image grids"), + "grid_format": OptionInfo('png', 'File format for grids'), + "grid_extended_filename": OptionInfo(False, "Add extended info (seed, prompt) to filename when saving grid"), + "grid_only_if_multiple": OptionInfo(True, "Do not save grids consisting of one picture"), + "grid_prevent_empty_spots": OptionInfo(False, "Prevent empty spots in grid (when set to autodetect)"), + "grid_zip_filename_pattern": OptionInfo("", "Archive filename pattern", component_args=hide_dirs).link("wiki", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Custom-Images-Filename-Name-and-Subdirectory"), + "n_rows": OptionInfo(-1, "Grid row count; use -1 for autodetect and 0 for it to be same as batch size", gr.Slider, {"minimum": -1, "maximum": 16, "step": 1}), + "font": OptionInfo("", "Font for image grids that have text"), + "grid_text_active_color": OptionInfo("#000000", "Text color for image grids", ui_components.FormColorPicker, {}), + "grid_text_inactive_color": OptionInfo("#999999", "Inactive text color for image grids", ui_components.FormColorPicker, {}), + "grid_background_color": OptionInfo("#ffffff", "Background color for image grids", ui_components.FormColorPicker, {}), + + "enable_pnginfo": OptionInfo(True, "Save text information about generation parameters as chunks to png files"), + "save_txt": OptionInfo(False, "Create a text file next to every image with generation parameters."), + "save_images_before_face_restoration": OptionInfo(False, "Save a copy of image before doing face restoration."), + "save_images_before_highres_fix": OptionInfo(False, "Save a copy of image before applying highres fix."), + "save_images_before_color_correction": OptionInfo(False, "Save a copy of image before applying color correction to img2img results"), + "save_mask": OptionInfo(False, "For inpainting, save a copy of the greyscale mask"), + "save_mask_composite": OptionInfo(False, "For inpainting, save a masked composite"), + "jpeg_quality": OptionInfo(80, "Quality for saved jpeg images", gr.Slider, {"minimum": 1, "maximum": 100, "step": 1}), + "webp_lossless": OptionInfo(False, "Use lossless compression for webp images"), + "export_for_4chan": OptionInfo(True, "Save copy of large images as JPG").info("if the file size is above the limit, or either width or height are above the limit"), + "img_downscale_threshold": OptionInfo(4.0, "File size limit for the above option, MB", gr.Number), + "target_side_length": OptionInfo(4000, "Width/height limit for the above option, in pixels", gr.Number), + "img_max_size_mp": OptionInfo(200, "Maximum image size", gr.Number).info("in megapixels"), + + "use_original_name_batch": OptionInfo(True, "Use original name for output filename during batch process in extras tab"), + "use_upscaler_name_as_suffix": OptionInfo(False, "Use upscaler name as filename suffix in the extras tab"), + "save_selected_only": OptionInfo(True, "When using 'Save' button, only save a single selected image"), + "save_init_img": OptionInfo(False, "Save init images when using img2img"), + + "temp_dir": OptionInfo("", "Directory for temporary images; leave empty for default"), + "clean_temp_dir_at_start": OptionInfo(False, "Cleanup non-default temporary directory when starting webui"), + + "save_incomplete_images": OptionInfo(False, "Save incomplete images").info("save images that has been interrupted in mid-generation; even if not saved, they will still show up in webui output."), +})) + +options_templates.update(options_section(('saving-paths', "Paths for saving"), { + "outdir_samples": OptionInfo("", "Output directory for images; if empty, defaults to three directories below", component_args=hide_dirs), + "outdir_txt2img_samples": OptionInfo("outputs/txt2img-images", 'Output directory for txt2img images', component_args=hide_dirs), + "outdir_img2img_samples": OptionInfo("outputs/img2img-images", 'Output directory for img2img images', component_args=hide_dirs), + "outdir_extras_samples": OptionInfo("outputs/extras-images", 'Output directory for images from extras tab', component_args=hide_dirs), + "outdir_grids": OptionInfo("", "Output directory for grids; if empty, defaults to two directories below", component_args=hide_dirs), + "outdir_txt2img_grids": OptionInfo("outputs/txt2img-grids", 'Output directory for txt2img grids', component_args=hide_dirs), + "outdir_img2img_grids": OptionInfo("outputs/img2img-grids", 'Output directory for img2img grids', component_args=hide_dirs), + "outdir_save": OptionInfo("log/images", "Directory for saving images using the Save button", component_args=hide_dirs), + "outdir_init_images": OptionInfo("outputs/init-images", "Directory for saving init images when using img2img", component_args=hide_dirs), +})) + +options_templates.update(options_section(('saving-to-dirs', "Saving to a directory"), { + "save_to_dirs": OptionInfo(True, "Save images to a subdirectory"), + "grid_save_to_dirs": OptionInfo(True, "Save grids to a subdirectory"), + "use_save_to_dirs_for_ui": OptionInfo(False, "When using \"Save\" button, save images to a subdirectory"), + "directories_filename_pattern": OptionInfo("[date]", "Directory name pattern", component_args=hide_dirs).link("wiki", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Custom-Images-Filename-Name-and-Subdirectory"), + "directories_max_prompt_words": OptionInfo(8, "Max prompt words for [prompt_words] pattern", gr.Slider, {"minimum": 1, "maximum": 20, "step": 1, **hide_dirs}), +})) + +options_templates.update(options_section(('upscaling', "Upscaling"), { + "ESRGAN_tile": OptionInfo(192, "Tile size for ESRGAN upscalers.", gr.Slider, {"minimum": 0, "maximum": 512, "step": 16}).info("0 = no tiling"), + "ESRGAN_tile_overlap": OptionInfo(8, "Tile overlap for ESRGAN upscalers.", gr.Slider, {"minimum": 0, "maximum": 48, "step": 1}).info("Low values = visible seam"), + "realesrgan_enabled_models": OptionInfo(["R-ESRGAN 4x+", "R-ESRGAN 4x+ Anime6B"], "Select which Real-ESRGAN models to show in the web UI.", gr.CheckboxGroup, lambda: {"choices": shared_items.realesrgan_models_names()}), + "upscaler_for_img2img": OptionInfo(None, "Upscaler for img2img", gr.Dropdown, lambda: {"choices": [x.name for x in sd_upscalers]}), +})) + +options_templates.update(options_section(('face-restoration', "Face restoration"), { + "face_restoration_model": OptionInfo("CodeFormer", "Face restoration model", gr.Radio, lambda: {"choices": [x.name() for x in face_restorers]}), + "code_former_weight": OptionInfo(0.5, "CodeFormer weight", gr.Slider, {"minimum": 0, "maximum": 1, "step": 0.01}).info("0 = maximum effect; 1 = minimum effect"), + "face_restoration_unload": OptionInfo(False, "Move face restoration model from VRAM into RAM after processing"), +})) + +options_templates.update(options_section(('system', "System"), { + "auto_launch_browser": OptionInfo("Local", "Automatically open webui in browser on startup", gr.Radio, lambda: {"choices": ["Disable", "Local", "Remote"]}), + "show_warnings": OptionInfo(False, "Show warnings in console.").needs_reload_ui(), + "show_gradio_deprecation_warnings": OptionInfo(True, "Show gradio deprecation warnings in console.").needs_reload_ui(), + "memmon_poll_rate": OptionInfo(8, "VRAM usage polls per second during generation.", gr.Slider, {"minimum": 0, "maximum": 40, "step": 1}).info("0 = disable"), + "samples_log_stdout": OptionInfo(False, "Always print all generation info to standard output"), + "multiple_tqdm": OptionInfo(True, "Add a second progress bar to the console that shows progress for an entire job."), + "print_hypernet_extra": OptionInfo(False, "Print extra hypernetwork information to console."), + "list_hidden_files": OptionInfo(True, "Load models/files in hidden directories").info("directory is hidden if its name starts with \".\""), + "disable_mmap_load_safetensors": OptionInfo(False, "Disable memmapping for loading .safetensors files.").info("fixes very slow loading speed in some cases"), + "hide_ldm_prints": OptionInfo(True, "Prevent Stability-AI's ldm/sgm modules from printing noise to console."), +})) + +options_templates.update(options_section(('training', "Training"), { + "unload_models_when_training": OptionInfo(False, "Move VAE and CLIP to RAM when training if possible. Saves VRAM."), + "pin_memory": OptionInfo(False, "Turn on pin_memory for DataLoader. Makes training slightly faster but can increase memory usage."), + "save_optimizer_state": OptionInfo(False, "Saves Optimizer state as separate *.optim file. Training of embedding or HN can be resumed with the matching optim file."), + "save_training_settings_to_txt": OptionInfo(True, "Save textual inversion and hypernet settings to a text file whenever training starts."), + "dataset_filename_word_regex": OptionInfo("", "Filename word regex"), + "dataset_filename_join_string": OptionInfo(" ", "Filename join string"), + "training_image_repeats_per_epoch": OptionInfo(1, "Number of repeats for a single input image per epoch; used only for displaying epoch number", gr.Number, {"precision": 0}), + "training_write_csv_every": OptionInfo(500, "Save an csv containing the loss to log directory every N steps, 0 to disable"), + "training_xattention_optimizations": OptionInfo(False, "Use cross attention optimizations while training"), + "training_enable_tensorboard": OptionInfo(False, "Enable tensorboard logging."), + "training_tensorboard_save_images": OptionInfo(False, "Save generated images within tensorboard."), + "training_tensorboard_flush_every": OptionInfo(120, "How often, in seconds, to flush the pending tensorboard events and summaries to disk."), +})) + +options_templates.update(options_section(('sd', "Stable Diffusion"), { + "sd_model_checkpoint": OptionInfo(None, "Stable Diffusion checkpoint", gr.Dropdown, lambda: {"choices": list_checkpoint_tiles()}, refresh=refresh_checkpoints), + "sd_checkpoints_limit": OptionInfo(1, "Maximum number of checkpoints loaded at the same time", gr.Slider, {"minimum": 1, "maximum": 10, "step": 1}), + "sd_checkpoints_keep_in_cpu": OptionInfo(True, "Only keep one model on device").info("will keep models other than the currently used one in RAM rather than VRAM"), + "sd_checkpoint_cache": OptionInfo(0, "Checkpoints to cache in RAM", gr.Slider, {"minimum": 0, "maximum": 10, "step": 1}).info("obsolete; set to 0 and use the two settings above instead"), + "sd_unet": OptionInfo("Automatic", "SD Unet", gr.Dropdown, lambda: {"choices": shared_items.sd_unet_items()}, refresh=shared_items.refresh_unet_list).info("choose Unet model: Automatic = use one with same filename as checkpoint; None = use Unet from checkpoint"), + "enable_quantization": OptionInfo(False, "Enable quantization in K samplers for sharper and cleaner results. This may change existing seeds").needs_reload_ui(), + "enable_emphasis": OptionInfo(True, "Enable emphasis").info("use (text) to make model pay more attention to text and [text] to make it pay less attention"), + "enable_batch_seeds": OptionInfo(True, "Make K-diffusion samplers produce same images in a batch as when making a single image"), + "comma_padding_backtrack": OptionInfo(20, "Prompt word wrap length limit", gr.Slider, {"minimum": 0, "maximum": 74, "step": 1}).info("in tokens - for texts shorter than specified, if they don't fit into 75 token limit, move them to the next 75 token chunk"), + "CLIP_stop_at_last_layers": OptionInfo(1, "Clip skip", gr.Slider, {"minimum": 1, "maximum": 12, "step": 1}).link("wiki", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Features#clip-skip").info("ignore last layers of CLIP network; 1 ignores none, 2 ignores one layer"), + "upcast_attn": OptionInfo(False, "Upcast cross attention layer to float32"), + "randn_source": OptionInfo("GPU", "Random number generator source.", gr.Radio, {"choices": ["GPU", "CPU", "NV"]}).info("changes seeds drastically; use CPU to produce the same picture across different videocard vendors; use NV to produce same picture as on NVidia videocards"), +})) + +options_templates.update(options_section(('sdxl', "Stable Diffusion XL"), { + "sdxl_crop_top": OptionInfo(0, "crop top coordinate"), + "sdxl_crop_left": OptionInfo(0, "crop left coordinate"), + "sdxl_refiner_low_aesthetic_score": OptionInfo(2.5, "SDXL low aesthetic score", gr.Number).info("used for refiner model negative prompt"), + "sdxl_refiner_high_aesthetic_score": OptionInfo(6.0, "SDXL high aesthetic score", gr.Number).info("used for refiner model prompt"), +})) + +options_templates.update(options_section(('vae', "VAE"), { + "sd_vae_explanation": OptionHTML(""" +VAE is a neural network that transforms a standard RGB +image into latent space representation and back. Latent space representation is what stable diffusion is working on during sampling +(i.e. when the progress bar is between empty and full). For txt2img, VAE is used to create a resulting image after the sampling is finished. +For img2img, VAE is used to process user's input image before the sampling, and to create an image after sampling. +"""), + "sd_vae_checkpoint_cache": OptionInfo(0, "VAE Checkpoints to cache in RAM", gr.Slider, {"minimum": 0, "maximum": 10, "step": 1}), + "sd_vae": OptionInfo("Automatic", "SD VAE", gr.Dropdown, lambda: {"choices": shared_items.sd_vae_items()}, refresh=shared_items.refresh_vae_list).info("choose VAE model: Automatic = use one with same filename as checkpoint; None = use VAE from checkpoint"), + "sd_vae_overrides_per_model_preferences": OptionInfo(True, "Selected VAE overrides per-model preferences").info("you can set per-model VAE either by editing user metadata for checkpoints, or by making the VAE have same name as checkpoint"), + "auto_vae_precision": OptionInfo(True, "Automatically revert VAE to 32-bit floats").info("triggers when a tensor with NaNs is produced in VAE; disabling the option in this case will result in a black square image"), + "sd_vae_encode_method": OptionInfo("Full", "VAE type for encode", gr.Radio, {"choices": ["Full", "TAESD"]}).info("method to encode image to latent (use in img2img, hires-fix or inpaint mask)"), + "sd_vae_decode_method": OptionInfo("Full", "VAE type for decode", gr.Radio, {"choices": ["Full", "TAESD"]}).info("method to decode latent to image"), +})) + +options_templates.update(options_section(('img2img', "img2img"), { + "inpainting_mask_weight": OptionInfo(1.0, "Inpainting conditioning mask strength", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}), + "initial_noise_multiplier": OptionInfo(1.0, "Noise multiplier for img2img", gr.Slider, {"minimum": 0.5, "maximum": 1.5, "step": 0.01}), + "img2img_color_correction": OptionInfo(False, "Apply color correction to img2img results to match original colors."), + "img2img_fix_steps": OptionInfo(False, "With img2img, do exactly the amount of steps the slider specifies.").info("normally you'd do less with less denoising"), + "img2img_background_color": OptionInfo("#ffffff", "With img2img, fill transparent parts of the input image with this color.", ui_components.FormColorPicker, {}), + "img2img_editor_height": OptionInfo(720, "Height of the image editor", gr.Slider, {"minimum": 80, "maximum": 1600, "step": 1}).info("in pixels").needs_reload_ui(), + "img2img_sketch_default_brush_color": OptionInfo("#ffffff", "Sketch initial brush color", ui_components.FormColorPicker, {}).info("default brush color of img2img sketch").needs_reload_ui(), + "img2img_inpaint_mask_brush_color": OptionInfo("#ffffff", "Inpaint mask brush color", ui_components.FormColorPicker, {}).info("brush color of inpaint mask").needs_reload_ui(), + "img2img_inpaint_sketch_default_brush_color": OptionInfo("#ffffff", "Inpaint sketch initial brush color", ui_components.FormColorPicker, {}).info("default brush color of img2img inpaint sketch").needs_reload_ui(), + "return_mask": OptionInfo(False, "For inpainting, include the greyscale mask in results for web"), + "return_mask_composite": OptionInfo(False, "For inpainting, include masked composite in results for web"), +})) + +options_templates.update(options_section(('optimizations', "Optimizations"), { + "cross_attention_optimization": OptionInfo("Automatic", "Cross attention optimization", gr.Dropdown, lambda: {"choices": shared_items.cross_attention_optimizations()}), + "s_min_uncond": OptionInfo(0.0, "Negative Guidance minimum sigma", gr.Slider, {"minimum": 0.0, "maximum": 15.0, "step": 0.01}).link("PR", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/9177").info("skip negative prompt for some steps when the image is almost ready; 0=disable, higher=faster"), + "token_merging_ratio": OptionInfo(0.0, "Token merging ratio", gr.Slider, {"minimum": 0.0, "maximum": 0.9, "step": 0.1}).link("PR", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/9256").info("0=disable, higher=faster"), + "token_merging_ratio_img2img": OptionInfo(0.0, "Token merging ratio for img2img", gr.Slider, {"minimum": 0.0, "maximum": 0.9, "step": 0.1}).info("only applies if non-zero and overrides above"), + "token_merging_ratio_hr": OptionInfo(0.0, "Token merging ratio for high-res pass", gr.Slider, {"minimum": 0.0, "maximum": 0.9, "step": 0.1}).info("only applies if non-zero and overrides above"), + "pad_cond_uncond": OptionInfo(False, "Pad prompt/negative prompt to be same length").info("improves performance when prompt and negative prompt have different lengths; changes seeds"), + "persistent_cond_cache": OptionInfo(True, "Persistent cond cache").info("Do not recalculate conds from prompts if prompts have not changed since previous calculation"), +})) + +options_templates.update(options_section(('compatibility', "Compatibility"), { + "use_old_emphasis_implementation": OptionInfo(False, "Use old emphasis implementation. Can be useful to reproduce old seeds."), + "use_old_karras_scheduler_sigmas": OptionInfo(False, "Use old karras scheduler sigmas (0.1 to 10)."), + "no_dpmpp_sde_batch_determinism": OptionInfo(False, "Do not make DPM++ SDE deterministic across different batch sizes."), + "use_old_hires_fix_width_height": OptionInfo(False, "For hires fix, use width/height sliders to set final resolution rather than first pass (disables Upscale by, Resize width/height to)."), + "dont_fix_second_order_samplers_schedule": OptionInfo(False, "Do not fix prompt schedule for second order samplers."), + "hires_fix_use_firstpass_conds": OptionInfo(False, "For hires fix, calculate conds of second pass using extra networks of first pass."), +})) + +options_templates.update(options_section(('interrogate', "Interrogate"), { + "interrogate_keep_models_in_memory": OptionInfo(False, "Keep models in VRAM"), + "interrogate_return_ranks": OptionInfo(False, "Include ranks of model tags matches in results.").info("booru only"), + "interrogate_clip_num_beams": OptionInfo(1, "BLIP: num_beams", gr.Slider, {"minimum": 1, "maximum": 16, "step": 1}), + "interrogate_clip_min_length": OptionInfo(24, "BLIP: minimum description length", gr.Slider, {"minimum": 1, "maximum": 128, "step": 1}), + "interrogate_clip_max_length": OptionInfo(48, "BLIP: maximum description length", gr.Slider, {"minimum": 1, "maximum": 256, "step": 1}), + "interrogate_clip_dict_limit": OptionInfo(1500, "CLIP: maximum number of lines in text file").info("0 = No limit"), + "interrogate_clip_skip_categories": OptionInfo([], "CLIP: skip inquire categories", gr.CheckboxGroup, lambda: {"choices": modules.interrogate.category_types()}, refresh=modules.interrogate.category_types), + "interrogate_deepbooru_score_threshold": OptionInfo(0.5, "deepbooru: score threshold", gr.Slider, {"minimum": 0, "maximum": 1, "step": 0.01}), + "deepbooru_sort_alpha": OptionInfo(True, "deepbooru: sort tags alphabetically").info("if not: sort by score"), + "deepbooru_use_spaces": OptionInfo(True, "deepbooru: use spaces in tags").info("if not: use underscores"), + "deepbooru_escape": OptionInfo(True, "deepbooru: escape (\\) brackets").info("so they are used as literal brackets and not for emphasis"), + "deepbooru_filter_tags": OptionInfo("", "deepbooru: filter out those tags").info("separate by comma"), +})) + +options_templates.update(options_section(('extra_networks', "Extra Networks"), { + "extra_networks_show_hidden_directories": OptionInfo(True, "Show hidden directories").info("directory is hidden if its name starts with \".\"."), + "extra_networks_hidden_models": OptionInfo("When searched", "Show cards for models in hidden directories", gr.Radio, {"choices": ["Always", "When searched", "Never"]}).info('"When searched" option will only show the item when the search string has 4 characters or more'), + "extra_networks_default_multiplier": OptionInfo(1.0, "Default multiplier for extra networks", gr.Slider, {"minimum": 0.0, "maximum": 2.0, "step": 0.01}), + "extra_networks_card_width": OptionInfo(0, "Card width for Extra Networks").info("in pixels"), + "extra_networks_card_height": OptionInfo(0, "Card height for Extra Networks").info("in pixels"), + "extra_networks_card_text_scale": OptionInfo(1.0, "Card text scale", gr.Slider, {"minimum": 0.0, "maximum": 2.0, "step": 0.01}).info("1 = original size"), + "extra_networks_card_show_desc": OptionInfo(True, "Show description on card"), + "extra_networks_add_text_separator": OptionInfo(" ", "Extra networks separator").info("extra text to add before <...> when adding extra network to prompt"), + "ui_extra_networks_tab_reorder": OptionInfo("", "Extra networks tab order").needs_reload_ui(), + "textual_inversion_print_at_load": OptionInfo(False, "Print a list of Textual Inversion embeddings when loading model"), + "textual_inversion_add_hashes_to_infotext": OptionInfo(True, "Add Textual Inversion hashes to infotext"), + "sd_hypernetwork": OptionInfo("None", "Add hypernetwork to prompt", gr.Dropdown, lambda: {"choices": ["None", *hypernetworks]}, refresh=reload_hypernetworks), +})) + +options_templates.update(options_section(('ui', "User interface"), { + "localization": OptionInfo("None", "Localization", gr.Dropdown, lambda: {"choices": ["None"] + list(localization.localizations.keys())}, refresh=lambda: localization.list_localizations(cmd_opts.localizations_dir)).needs_reload_ui(), + "gradio_theme": OptionInfo("Default", "Gradio theme", ui_components.DropdownEditable, lambda: {"choices": ["Default"] + gradio_hf_hub_themes}).info("you can also manually enter any of themes from the gallery.").needs_reload_ui(), + "gradio_themes_cache": OptionInfo(True, "Cache gradio themes locally").info("disable to update the selected Gradio theme"), + "return_grid": OptionInfo(True, "Show grid in results for web"), + "do_not_show_images": OptionInfo(False, "Do not show any images in results for web"), + "send_seed": OptionInfo(True, "Send seed when sending prompt or image to other interface"), + "send_size": OptionInfo(True, "Send size when sending prompt or image to another interface"), + "js_modal_lightbox": OptionInfo(True, "Enable full page image viewer"), + "js_modal_lightbox_initially_zoomed": OptionInfo(True, "Show images zoomed in by default in full page image viewer"), + "js_modal_lightbox_gamepad": OptionInfo(False, "Navigate image viewer with gamepad"), + "js_modal_lightbox_gamepad_repeat": OptionInfo(250, "Gamepad repeat period, in milliseconds"), + "show_progress_in_title": OptionInfo(True, "Show generation progress in window title."), + "samplers_in_dropdown": OptionInfo(True, "Use dropdown for sampler selection instead of radio group").needs_reload_ui(), + "dimensions_and_batch_together": OptionInfo(True, "Show Width/Height and Batch sliders in same row").needs_reload_ui(), + "keyedit_precision_attention": OptionInfo(0.1, "Ctrl+up/down precision when editing (attention:1.1)", gr.Slider, {"minimum": 0.01, "maximum": 0.2, "step": 0.001}), + "keyedit_precision_extra": OptionInfo(0.05, "Ctrl+up/down precision when editing ", gr.Slider, {"minimum": 0.01, "maximum": 0.2, "step": 0.001}), + "keyedit_delimiters": OptionInfo(".,\\/!?%^*;:{}=`~()", "Ctrl+up/down word delimiters"), + "keyedit_move": OptionInfo(True, "Alt+left/right moves prompt elements"), + "quicksettings_list": OptionInfo(["sd_model_checkpoint"], "Quicksettings list", ui_components.DropdownMulti, lambda: {"choices": list(opts.data_labels.keys())}).js("info", "settingsHintsShowQuicksettings").info("setting entries that appear at the top of page rather than in settings tab").needs_reload_ui(), + "ui_tab_order": OptionInfo([], "UI tab order", ui_components.DropdownMulti, lambda: {"choices": list(tab_names)}).needs_reload_ui(), + "hidden_tabs": OptionInfo([], "Hidden UI tabs", ui_components.DropdownMulti, lambda: {"choices": list(tab_names)}).needs_reload_ui(), + "ui_reorder_list": OptionInfo([], "txt2img/img2img UI item order", ui_components.DropdownMulti, lambda: {"choices": list(shared_items.ui_reorder_categories())}).info("selected items appear first").needs_reload_ui(), + "hires_fix_show_sampler": OptionInfo(False, "Hires fix: show hires checkpoint and sampler selection").needs_reload_ui(), + "hires_fix_show_prompts": OptionInfo(False, "Hires fix: show hires prompt and negative prompt").needs_reload_ui(), + "disable_token_counters": OptionInfo(False, "Disable prompt token counters").needs_reload_ui(), +})) + + +options_templates.update(options_section(('infotext', "Infotext"), { + "add_model_hash_to_info": OptionInfo(True, "Add model hash to generation information"), + "add_model_name_to_info": OptionInfo(True, "Add model name to generation information"), + "add_user_name_to_info": OptionInfo(False, "Add user name to generation information when authenticated"), + "add_version_to_infotext": OptionInfo(True, "Add program version to generation information"), + "disable_weights_auto_swap": OptionInfo(True, "Disregard checkpoint information from pasted infotext").info("when reading generation parameters from text into UI"), + "infotext_styles": OptionInfo("Apply if any", "Infer styles from prompts of pasted infotext", gr.Radio, {"choices": ["Ignore", "Apply", "Discard", "Apply if any"]}).info("when reading generation parameters from text into UI)").html("""
    +
  • Ignore: keep prompt and styles dropdown as it is.
  • +
  • Apply: remove style text from prompt, always replace styles dropdown value with found styles (even if none are found).
  • +
  • Discard: remove style text from prompt, keep styles dropdown as it is.
  • +
  • Apply if any: remove style text from prompt; if any styles are found in prompt, put them into styles dropdown, otherwise keep it as it is.
  • +
"""), + +})) + +options_templates.update(options_section(('ui', "Live previews"), { + "show_progressbar": OptionInfo(True, "Show progressbar"), + "live_previews_enable": OptionInfo(True, "Show live previews of the created image"), + "live_previews_image_format": OptionInfo("png", "Live preview file format", gr.Radio, {"choices": ["jpeg", "png", "webp"]}), + "show_progress_grid": OptionInfo(True, "Show previews of all images generated in a batch as a grid"), + "show_progress_every_n_steps": OptionInfo(10, "Live preview display period", gr.Slider, {"minimum": -1, "maximum": 32, "step": 1}).info("in sampling steps - show new live preview image every N sampling steps; -1 = only show after completion of batch"), + "show_progress_type": OptionInfo("Approx NN", "Live preview method", gr.Radio, {"choices": ["Full", "Approx NN", "Approx cheap", "TAESD"]}).info("Full = slow but pretty; Approx NN and TAESD = fast but low quality; Approx cheap = super fast but terrible otherwise"), + "live_preview_content": OptionInfo("Prompt", "Live preview subject", gr.Radio, {"choices": ["Combined", "Prompt", "Negative prompt"]}), + "live_preview_refresh_period": OptionInfo(1000, "Progressbar and preview update period").info("in milliseconds"), +})) + +options_templates.update(options_section(('sampler-params', "Sampler parameters"), { + "hide_samplers": OptionInfo([], "Hide samplers in user interface", gr.CheckboxGroup, lambda: {"choices": [x.name for x in list_samplers()]}).needs_reload_ui(), + "eta_ddim": OptionInfo(0.0, "Eta for DDIM", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}).info("noise multiplier; higher = more unperdictable results"), + "eta_ancestral": OptionInfo(1.0, "Eta for ancestral samplers", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}).info("noise multiplier; applies to Euler a and other samplers that have a in them"), + "ddim_discretize": OptionInfo('uniform', "img2img DDIM discretize", gr.Radio, {"choices": ['uniform', 'quad']}), + 's_churn': OptionInfo(0.0, "sigma churn", gr.Slider, {"minimum": 0.0, "maximum": 100.0, "step": 0.01}).info('amount of stochasticity; only applies to Euler, Heun, and DPM2'), + 's_tmin': OptionInfo(0.0, "sigma tmin", gr.Slider, {"minimum": 0.0, "maximum": 10.0, "step": 0.01}).info('enable stochasticity; start value of the sigma range; only applies to Euler, Heun, and DPM2'), + 's_tmax': OptionInfo(0.0, "sigma tmax", gr.Slider, {"minimum": 0.0, "maximum": 999.0, "step": 0.01}).info("0 = inf; end value of the sigma range; only applies to Euler, Heun, and DPM2"), + 's_noise': OptionInfo(1.0, "sigma noise", gr.Slider, {"minimum": 0.0, "maximum": 1.1, "step": 0.001}).info('amount of additional noise to counteract loss of detail during sampling; only applies to Euler, Heun, and DPM2'), + 'k_sched_type': OptionInfo("Automatic", "Scheduler type", gr.Dropdown, {"choices": ["Automatic", "karras", "exponential", "polyexponential"]}).info("lets you override the noise schedule for k-diffusion samplers; choosing Automatic disables the three parameters below"), + 'sigma_min': OptionInfo(0.0, "sigma min", gr.Number).info("0 = default (~0.03); minimum noise strength for k-diffusion noise scheduler"), + 'sigma_max': OptionInfo(0.0, "sigma max", gr.Number).info("0 = default (~14.6); maximum noise strength for k-diffusion noise scheduler"), + 'rho': OptionInfo(0.0, "rho", gr.Number).info("0 = default (7 for karras, 1 for polyexponential); higher values result in a steeper noise schedule (decreases faster)"), + 'eta_noise_seed_delta': OptionInfo(0, "Eta noise seed delta", gr.Number, {"precision": 0}).info("ENSD; does not improve anything, just produces different results for ancestral samplers - only useful for reproducing images"), + 'always_discard_next_to_last_sigma': OptionInfo(False, "Always discard next-to-last sigma").link("PR", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/6044"), + 'uni_pc_variant': OptionInfo("bh1", "UniPC variant", gr.Radio, {"choices": ["bh1", "bh2", "vary_coeff"]}), + 'uni_pc_skip_type': OptionInfo("time_uniform", "UniPC skip type", gr.Radio, {"choices": ["time_uniform", "time_quadratic", "logSNR"]}), + 'uni_pc_order': OptionInfo(3, "UniPC order", gr.Slider, {"minimum": 1, "maximum": 50, "step": 1}).info("must be < sampling steps"), + 'uni_pc_lower_order_final': OptionInfo(True, "UniPC lower order final"), +})) + +options_templates.update(options_section(('postprocessing', "Postprocessing"), { + 'postprocessing_enable_in_main_ui': OptionInfo([], "Enable postprocessing operations in txt2img and img2img tabs", ui_components.DropdownMulti, lambda: {"choices": [x.name for x in shared_items.postprocessing_scripts()]}), + 'postprocessing_operation_order': OptionInfo([], "Postprocessing operation order", ui_components.DropdownMulti, lambda: {"choices": [x.name for x in shared_items.postprocessing_scripts()]}), + 'upscaling_max_images_in_cache': OptionInfo(5, "Maximum number of images in upscaling cache", gr.Slider, {"minimum": 0, "maximum": 10, "step": 1}), +})) + +options_templates.update(options_section((None, "Hidden options"), { + "disabled_extensions": OptionInfo([], "Disable these extensions"), + "disable_all_extensions": OptionInfo("none", "Disable all extensions (preserves the list of disabled extensions)", gr.Radio, {"choices": ["none", "extra", "all"]}), + "restore_config_state_file": OptionInfo("", "Config state file to restore from, under 'config-states/' folder"), + "sd_checkpoint_hash": OptionInfo("", "SHA256 hash of the current checkpoint"), +})) + + +options_templates.update() + + +class Options: + data = None + data_labels = options_templates + typemap = {int: float} + + def __init__(self): + self.data = {k: v.default for k, v in self.data_labels.items()} + + def __setattr__(self, key, value): + if self.data is not None: + if key in self.data or key in self.data_labels: + assert not cmd_opts.freeze_settings, "changing settings is disabled" + + info = opts.data_labels.get(key, None) + if info.do_not_save: + return + + comp_args = info.component_args if info else None + if isinstance(comp_args, dict) and comp_args.get('visible', True) is False: + raise RuntimeError(f"not possible to set {key} because it is restricted") + + if cmd_opts.hide_ui_dir_config and key in restricted_opts: + raise RuntimeError(f"not possible to set {key} because it is restricted") + + self.data[key] = value + return + + return super(Options, self).__setattr__(key, value) + + def __getattr__(self, item): + if self.data is not None: + if item in self.data: + return self.data[item] + + if item in self.data_labels: + return self.data_labels[item].default + + return super(Options, self).__getattribute__(item) + + def set(self, key, value): + """sets an option and calls its onchange callback, returning True if the option changed and False otherwise""" + + oldval = self.data.get(key, None) + if oldval == value: + return False + + if self.data_labels[key].do_not_save: + return False + + try: + setattr(self, key, value) + except RuntimeError: + return False + + if self.data_labels[key].onchange is not None: + try: + self.data_labels[key].onchange() + except Exception as e: + errors.display(e, f"changing setting {key} to {value}") + setattr(self, key, oldval) + return False + + return True + + def get_default(self, key): + """returns the default value for the key""" + + data_label = self.data_labels.get(key) + if data_label is None: + return None + + return data_label.default + + def save(self, filename): + assert not cmd_opts.freeze_settings, "saving settings is disabled" + + with open(filename, "w", encoding="utf8") as file: + json.dump(self.data, file, indent=4) + + def same_type(self, x, y): + if x is None or y is None: + return True + + type_x = self.typemap.get(type(x), type(x)) + type_y = self.typemap.get(type(y), type(y)) + + return type_x == type_y + + def load(self, filename): + with open(filename, "r", encoding="utf8") as file: + self.data = json.load(file) + + # 1.6.0 VAE defaults + if self.data.get('sd_vae_as_default') is not None and self.data.get('sd_vae_overrides_per_model_preferences') is None: + self.data['sd_vae_overrides_per_model_preferences'] = not self.data.get('sd_vae_as_default') + + # 1.1.1 quicksettings list migration + if self.data.get('quicksettings') is not None and self.data.get('quicksettings_list') is None: + self.data['quicksettings_list'] = [i.strip() for i in self.data.get('quicksettings').split(',')] + + # 1.4.0 ui_reorder + if isinstance(self.data.get('ui_reorder'), str) and self.data.get('ui_reorder') and "ui_reorder_list" not in self.data: + self.data['ui_reorder_list'] = [i.strip() for i in self.data.get('ui_reorder').split(',')] + + bad_settings = 0 + for k, v in self.data.items(): + info = self.data_labels.get(k, None) + if info is not None and not self.same_type(info.default, v): + print(f"Warning: bad setting value: {k}: {v} ({type(v).__name__}; expected {type(info.default).__name__})", file=sys.stderr) + bad_settings += 1 + + if bad_settings > 0: + print(f"The program is likely to not work with bad settings.\nSettings file: {filename}\nEither fix the file, or delete it and restart.", file=sys.stderr) + + def onchange(self, key, func, call=True): + item = self.data_labels.get(key) + item.onchange = func + + if call: + func() + + def dumpjson(self): + d = {k: self.data.get(k, v.default) for k, v in self.data_labels.items()} + d["_comments_before"] = {k: v.comment_before for k, v in self.data_labels.items() if v.comment_before is not None} + d["_comments_after"] = {k: v.comment_after for k, v in self.data_labels.items() if v.comment_after is not None} + return json.dumps(d) + + def add_option(self, key, info): + self.data_labels[key] = info + + def reorder(self): + """reorder settings so that all items related to section always go together""" + + section_ids = {} + settings_items = self.data_labels.items() + for _, item in settings_items: + if item.section not in section_ids: + section_ids[item.section] = len(section_ids) + + self.data_labels = dict(sorted(settings_items, key=lambda x: section_ids[x[1].section])) + + def cast_value(self, key, value): + """casts an arbitrary to the same type as this setting's value with key + Example: cast_value("eta_noise_seed_delta", "12") -> returns 12 (an int rather than str) + """ + + if value is None: + return None + + default_value = self.data_labels[key].default + if default_value is None: + default_value = getattr(self, key, None) + if default_value is None: + return None + + expected_type = type(default_value) + if expected_type == bool and value == "False": + value = False + else: + value = expected_type(value) + + return value + + +opts = Options() +if os.path.exists(config_filename): + opts.load(config_filename) + + +class Shared(sys.modules[__name__].__class__): + """ + this class is here to provide sd_model field as a property, so that it can be created and loaded on demand rather than + at program startup. + """ + + sd_model_val = None + + @property + def sd_model(self): + import modules.sd_models + + return modules.sd_models.model_data.get_sd_model() + + @sd_model.setter + def sd_model(self, value): + import modules.sd_models + + modules.sd_models.model_data.set_sd_model(value) + + +sd_model: LatentDiffusion = None # this var is here just for IDE's type checking; it cannot be accessed because the class field above will be accessed instead +sys.modules[__name__].__class__ = Shared + +settings_components = None +"""assinged from ui.py, a mapping on setting names to gradio components repsponsible for those settings""" + +latent_upscale_default_mode = "Latent" +latent_upscale_modes = { + "Latent": {"mode": "bilinear", "antialias": False}, + "Latent (antialiased)": {"mode": "bilinear", "antialias": True}, + "Latent (bicubic)": {"mode": "bicubic", "antialias": False}, + "Latent (bicubic antialiased)": {"mode": "bicubic", "antialias": True}, + "Latent (nearest)": {"mode": "nearest", "antialias": False}, + "Latent (nearest-exact)": {"mode": "nearest-exact", "antialias": False}, +} + +sd_upscalers = [] + +clip_model = None + +progress_print_out = sys.stdout + +gradio_theme = gr.themes.Base() + + +def reload_gradio_theme(theme_name=None): + global gradio_theme + if not theme_name: + theme_name = opts.gradio_theme + + default_theme_args = dict( + font=["Source Sans Pro", 'ui-sans-serif', 'system-ui', 'sans-serif'], + font_mono=['IBM Plex Mono', 'ui-monospace', 'Consolas', 'monospace'], + ) + + if theme_name == "Default": + gradio_theme = gr.themes.Default(**default_theme_args) + else: + try: + theme_cache_dir = os.path.join(script_path, 'tmp', 'gradio_themes') + theme_cache_path = os.path.join(theme_cache_dir, f'{theme_name.replace("/", "_")}.json') + if opts.gradio_themes_cache and os.path.exists(theme_cache_path): + gradio_theme = gr.themes.ThemeClass.load(theme_cache_path) + else: + os.makedirs(theme_cache_dir, exist_ok=True) + gradio_theme = gr.themes.ThemeClass.from_hub(theme_name) + gradio_theme.dump(theme_cache_path) + except Exception as e: + errors.display(e, "changing gradio theme") + gradio_theme = gr.themes.Default(**default_theme_args) + + +class TotalTQDM: + def __init__(self): + self._tqdm = None + + def reset(self): + self._tqdm = tqdm.tqdm( + desc="Total progress", + total=state.job_count * state.sampling_steps, + position=1, + file=progress_print_out + ) + + def update(self): + if not opts.multiple_tqdm or cmd_opts.disable_console_progressbars: + return + if self._tqdm is None: + self.reset() + self._tqdm.update() + + def updateTotal(self, new_total): + if not opts.multiple_tqdm or cmd_opts.disable_console_progressbars: + return + if self._tqdm is None: + self.reset() + self._tqdm.total = new_total + + def clear(self): + if self._tqdm is not None: + self._tqdm.refresh() + self._tqdm.close() + self._tqdm = None + + +total_tqdm = TotalTQDM() + +mem_mon = modules.memmon.MemUsageMonitor("MemMon", device, opts) +mem_mon.start() + + +def natural_sort_key(s, regex=re.compile('([0-9]+)')): + return [int(text) if text.isdigit() else text.lower() for text in regex.split(s)] + + +def listfiles(dirname): + filenames = [os.path.join(dirname, x) for x in sorted(os.listdir(dirname), key=natural_sort_key) if not x.startswith(".")] + return [file for file in filenames if os.path.isfile(file)] + + +def html_path(filename): + return os.path.join(script_path, "html", filename) + + +def html(filename): + path = html_path(filename) + + if os.path.exists(path): + with open(path, encoding="utf8") as file: + return file.read() + + return "" + + +def walk_files(path, allowed_extensions=None): + if not os.path.exists(path): + return + + if allowed_extensions is not None: + allowed_extensions = set(allowed_extensions) + + items = list(os.walk(path, followlinks=True)) + items = sorted(items, key=lambda x: natural_sort_key(x[0])) + + for root, _, files in items: + for filename in sorted(files, key=natural_sort_key): + if allowed_extensions is not None: + _, ext = os.path.splitext(filename) + if ext not in allowed_extensions: + continue + + if not opts.list_hidden_files and ("/." in root or "\\." in root): + continue + + yield os.path.join(root, filename) + + +def ldm_print(*args, **kwargs): + if opts.hide_ldm_prints: + return + + print(*args, **kwargs) -- cgit v1.2.1 From 7d81ecbea6b558addd356d49c56891d04bc91fd4 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Wed, 9 Aug 2023 08:47:53 +0300 Subject: Split history: mv temp modules/shared.py --- modules/shared.py | 976 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ temp | 976 ------------------------------------------------------ 2 files changed, 976 insertions(+), 976 deletions(-) create mode 100644 modules/shared.py delete mode 100644 temp diff --git a/modules/shared.py b/modules/shared.py new file mode 100644 index 00000000..e9b980a4 --- /dev/null +++ b/modules/shared.py @@ -0,0 +1,976 @@ +import datetime +import json +import os +import re +import sys +import threading +import time +import logging + +import gradio as gr +import torch +import tqdm + +import launch +import modules.interrogate +import modules.memmon +import modules.styles +import modules.devices as devices +from modules import localization, script_loading, errors, ui_components, shared_items, cmd_args, rng # noqa: F401 +from modules.paths_internal import models_path, script_path, data_path, sd_configs_path, sd_default_config, sd_model_file, default_sd_model_file, extensions_dir, extensions_builtin_dir # noqa: F401 +from ldm.models.diffusion.ddpm import LatentDiffusion +from typing import Optional + +log = logging.getLogger(__name__) + +demo = None + +parser = cmd_args.parser + +script_loading.preload_extensions(extensions_dir, parser, extension_list=launch.list_extensions(launch.args.ui_settings_file)) +script_loading.preload_extensions(extensions_builtin_dir, parser) + +if os.environ.get('IGNORE_CMD_ARGS_ERRORS', None) is None: + cmd_opts = parser.parse_args() +else: + cmd_opts, _ = parser.parse_known_args() + + +restricted_opts = { + "samples_filename_pattern", + "directories_filename_pattern", + "outdir_samples", + "outdir_txt2img_samples", + "outdir_img2img_samples", + "outdir_extras_samples", + "outdir_grids", + "outdir_txt2img_grids", + "outdir_save", + "outdir_init_images" +} + +# https://huggingface.co/datasets/freddyaboulton/gradio-theme-subdomains/resolve/main/subdomains.json +gradio_hf_hub_themes = [ + "gradio/base", + "gradio/glass", + "gradio/monochrome", + "gradio/seafoam", + "gradio/soft", + "gradio/dracula_test", + "abidlabs/dracula_test", + "abidlabs/Lime", + "abidlabs/pakistan", + "Ama434/neutral-barlow", + "dawood/microsoft_windows", + "finlaymacklon/smooth_slate", + "Franklisi/darkmode", + "freddyaboulton/dracula_revamped", + "freddyaboulton/test-blue", + "gstaff/xkcd", + "Insuz/Mocha", + "Insuz/SimpleIndigo", + "JohnSmith9982/small_and_pretty", + "nota-ai/theme", + "nuttea/Softblue", + "ParityError/Anime", + "reilnuud/polite", + "remilia/Ghostly", + "rottenlittlecreature/Moon_Goblin", + "step-3-profit/Midnight-Deep", + "Taithrah/Minimal", + "ysharma/huggingface", + "ysharma/steampunk" +] + + +cmd_opts.disable_extension_access = (cmd_opts.share or cmd_opts.listen or cmd_opts.server_name) and not cmd_opts.enable_insecure_extension_access + +devices.device, devices.device_interrogate, devices.device_gfpgan, devices.device_esrgan, devices.device_codeformer = \ + (devices.cpu if any(y in cmd_opts.use_cpu for y in [x, 'all']) else devices.get_optimal_device() for x in ['sd', 'interrogate', 'gfpgan', 'esrgan', 'codeformer']) + +devices.dtype = torch.float32 if cmd_opts.no_half else torch.float16 +devices.dtype_vae = torch.float32 if cmd_opts.no_half or cmd_opts.no_half_vae else torch.float16 + +device = devices.device +weight_load_location = None if cmd_opts.lowram else "cpu" + +batch_cond_uncond = cmd_opts.always_batch_cond_uncond or not (cmd_opts.lowvram or cmd_opts.medvram) +parallel_processing_allowed = not cmd_opts.lowvram and not cmd_opts.medvram +xformers_available = False +config_filename = cmd_opts.ui_settings_file + +os.makedirs(cmd_opts.hypernetwork_dir, exist_ok=True) +hypernetworks = {} +loaded_hypernetworks = [] + + +def reload_hypernetworks(): + from modules.hypernetworks import hypernetwork + global hypernetworks + + hypernetworks = hypernetwork.list_hypernetworks(cmd_opts.hypernetwork_dir) + + +class State: + skipped = False + interrupted = False + job = "" + job_no = 0 + job_count = 0 + processing_has_refined_job_count = False + job_timestamp = '0' + sampling_step = 0 + sampling_steps = 0 + current_latent = None + current_image = None + current_image_sampling_step = 0 + id_live_preview = 0 + textinfo = None + time_start = None + server_start = None + _server_command_signal = threading.Event() + _server_command: Optional[str] = None + + @property + def need_restart(self) -> bool: + # Compatibility getter for need_restart. + return self.server_command == "restart" + + @need_restart.setter + def need_restart(self, value: bool) -> None: + # Compatibility setter for need_restart. + if value: + self.server_command = "restart" + + @property + def server_command(self): + return self._server_command + + @server_command.setter + def server_command(self, value: Optional[str]) -> None: + """ + Set the server command to `value` and signal that it's been set. + """ + self._server_command = value + self._server_command_signal.set() + + def wait_for_server_command(self, timeout: Optional[float] = None) -> Optional[str]: + """ + Wait for server command to get set; return and clear the value and signal. + """ + if self._server_command_signal.wait(timeout): + self._server_command_signal.clear() + req = self._server_command + self._server_command = None + return req + return None + + def request_restart(self) -> None: + self.interrupt() + self.server_command = "restart" + log.info("Received restart request") + + def skip(self): + self.skipped = True + log.info("Received skip request") + + def interrupt(self): + self.interrupted = True + log.info("Received interrupt request") + + def nextjob(self): + if opts.live_previews_enable and opts.show_progress_every_n_steps == -1: + self.do_set_current_image() + + self.job_no += 1 + self.sampling_step = 0 + self.current_image_sampling_step = 0 + + def dict(self): + obj = { + "skipped": self.skipped, + "interrupted": self.interrupted, + "job": self.job, + "job_count": self.job_count, + "job_timestamp": self.job_timestamp, + "job_no": self.job_no, + "sampling_step": self.sampling_step, + "sampling_steps": self.sampling_steps, + } + + return obj + + def begin(self, job: str = "(unknown)"): + self.sampling_step = 0 + self.job_count = -1 + self.processing_has_refined_job_count = False + self.job_no = 0 + self.job_timestamp = datetime.datetime.now().strftime("%Y%m%d%H%M%S") + self.current_latent = None + self.current_image = None + self.current_image_sampling_step = 0 + self.id_live_preview = 0 + self.skipped = False + self.interrupted = False + self.textinfo = None + self.time_start = time.time() + self.job = job + devices.torch_gc() + log.info("Starting job %s", job) + + def end(self): + duration = time.time() - self.time_start + log.info("Ending job %s (%.2f seconds)", self.job, duration) + self.job = "" + self.job_count = 0 + + devices.torch_gc() + + def set_current_image(self): + """sets self.current_image from self.current_latent if enough sampling steps have been made after the last call to this""" + if not parallel_processing_allowed: + return + + if self.sampling_step - self.current_image_sampling_step >= opts.show_progress_every_n_steps and opts.live_previews_enable and opts.show_progress_every_n_steps != -1: + self.do_set_current_image() + + def do_set_current_image(self): + if self.current_latent is None: + return + + import modules.sd_samplers + + try: + if opts.show_progress_grid: + self.assign_current_image(modules.sd_samplers.samples_to_image_grid(self.current_latent)) + else: + self.assign_current_image(modules.sd_samplers.sample_to_image(self.current_latent)) + + self.current_image_sampling_step = self.sampling_step + + except Exception: + # when switching models during genration, VAE would be on CPU, so creating an image will fail. + # we silently ignore this error + errors.record_exception() + + def assign_current_image(self, image): + self.current_image = image + self.id_live_preview += 1 + + +state = State() +state.server_start = time.time() + +styles_filename = cmd_opts.styles_file +prompt_styles = modules.styles.StyleDatabase(styles_filename) + +interrogator = modules.interrogate.InterrogateModels("interrogate") + +face_restorers = [] + + +class OptionInfo: + def __init__(self, default=None, label="", component=None, component_args=None, onchange=None, section=None, refresh=None, comment_before='', comment_after=''): + self.default = default + self.label = label + self.component = component + self.component_args = component_args + self.onchange = onchange + self.section = section + self.refresh = refresh + self.do_not_save = False + + self.comment_before = comment_before + """HTML text that will be added after label in UI""" + + self.comment_after = comment_after + """HTML text that will be added before label in UI""" + + def link(self, label, url): + self.comment_before += f"[{label}]" + return self + + def js(self, label, js_func): + self.comment_before += f"[{label}]" + return self + + def info(self, info): + self.comment_after += f"({info})" + return self + + def html(self, html): + self.comment_after += html + return self + + def needs_restart(self): + self.comment_after += " (requires restart)" + return self + + def needs_reload_ui(self): + self.comment_after += " (requires Reload UI)" + return self + + +class OptionHTML(OptionInfo): + def __init__(self, text): + super().__init__(str(text).strip(), label='', component=lambda **kwargs: gr.HTML(elem_classes="settings-info", **kwargs)) + + self.do_not_save = True + + +def options_section(section_identifier, options_dict): + for v in options_dict.values(): + v.section = section_identifier + + return options_dict + + +def list_checkpoint_tiles(): + import modules.sd_models + return modules.sd_models.checkpoint_tiles() + + +def refresh_checkpoints(): + import modules.sd_models + return modules.sd_models.list_models() + + +def list_samplers(): + import modules.sd_samplers + return modules.sd_samplers.all_samplers + + +hide_dirs = {"visible": not cmd_opts.hide_ui_dir_config} +tab_names = [] + +options_templates = {} + +options_templates.update(options_section(('saving-images', "Saving images/grids"), { + "samples_save": OptionInfo(True, "Always save all generated images"), + "samples_format": OptionInfo('png', 'File format for images'), + "samples_filename_pattern": OptionInfo("", "Images filename pattern", component_args=hide_dirs).link("wiki", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Custom-Images-Filename-Name-and-Subdirectory"), + "save_images_add_number": OptionInfo(True, "Add number to filename when saving", component_args=hide_dirs), + + "grid_save": OptionInfo(True, "Always save all generated image grids"), + "grid_format": OptionInfo('png', 'File format for grids'), + "grid_extended_filename": OptionInfo(False, "Add extended info (seed, prompt) to filename when saving grid"), + "grid_only_if_multiple": OptionInfo(True, "Do not save grids consisting of one picture"), + "grid_prevent_empty_spots": OptionInfo(False, "Prevent empty spots in grid (when set to autodetect)"), + "grid_zip_filename_pattern": OptionInfo("", "Archive filename pattern", component_args=hide_dirs).link("wiki", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Custom-Images-Filename-Name-and-Subdirectory"), + "n_rows": OptionInfo(-1, "Grid row count; use -1 for autodetect and 0 for it to be same as batch size", gr.Slider, {"minimum": -1, "maximum": 16, "step": 1}), + "font": OptionInfo("", "Font for image grids that have text"), + "grid_text_active_color": OptionInfo("#000000", "Text color for image grids", ui_components.FormColorPicker, {}), + "grid_text_inactive_color": OptionInfo("#999999", "Inactive text color for image grids", ui_components.FormColorPicker, {}), + "grid_background_color": OptionInfo("#ffffff", "Background color for image grids", ui_components.FormColorPicker, {}), + + "enable_pnginfo": OptionInfo(True, "Save text information about generation parameters as chunks to png files"), + "save_txt": OptionInfo(False, "Create a text file next to every image with generation parameters."), + "save_images_before_face_restoration": OptionInfo(False, "Save a copy of image before doing face restoration."), + "save_images_before_highres_fix": OptionInfo(False, "Save a copy of image before applying highres fix."), + "save_images_before_color_correction": OptionInfo(False, "Save a copy of image before applying color correction to img2img results"), + "save_mask": OptionInfo(False, "For inpainting, save a copy of the greyscale mask"), + "save_mask_composite": OptionInfo(False, "For inpainting, save a masked composite"), + "jpeg_quality": OptionInfo(80, "Quality for saved jpeg images", gr.Slider, {"minimum": 1, "maximum": 100, "step": 1}), + "webp_lossless": OptionInfo(False, "Use lossless compression for webp images"), + "export_for_4chan": OptionInfo(True, "Save copy of large images as JPG").info("if the file size is above the limit, or either width or height are above the limit"), + "img_downscale_threshold": OptionInfo(4.0, "File size limit for the above option, MB", gr.Number), + "target_side_length": OptionInfo(4000, "Width/height limit for the above option, in pixels", gr.Number), + "img_max_size_mp": OptionInfo(200, "Maximum image size", gr.Number).info("in megapixels"), + + "use_original_name_batch": OptionInfo(True, "Use original name for output filename during batch process in extras tab"), + "use_upscaler_name_as_suffix": OptionInfo(False, "Use upscaler name as filename suffix in the extras tab"), + "save_selected_only": OptionInfo(True, "When using 'Save' button, only save a single selected image"), + "save_init_img": OptionInfo(False, "Save init images when using img2img"), + + "temp_dir": OptionInfo("", "Directory for temporary images; leave empty for default"), + "clean_temp_dir_at_start": OptionInfo(False, "Cleanup non-default temporary directory when starting webui"), + + "save_incomplete_images": OptionInfo(False, "Save incomplete images").info("save images that has been interrupted in mid-generation; even if not saved, they will still show up in webui output."), +})) + +options_templates.update(options_section(('saving-paths', "Paths for saving"), { + "outdir_samples": OptionInfo("", "Output directory for images; if empty, defaults to three directories below", component_args=hide_dirs), + "outdir_txt2img_samples": OptionInfo("outputs/txt2img-images", 'Output directory for txt2img images', component_args=hide_dirs), + "outdir_img2img_samples": OptionInfo("outputs/img2img-images", 'Output directory for img2img images', component_args=hide_dirs), + "outdir_extras_samples": OptionInfo("outputs/extras-images", 'Output directory for images from extras tab', component_args=hide_dirs), + "outdir_grids": OptionInfo("", "Output directory for grids; if empty, defaults to two directories below", component_args=hide_dirs), + "outdir_txt2img_grids": OptionInfo("outputs/txt2img-grids", 'Output directory for txt2img grids', component_args=hide_dirs), + "outdir_img2img_grids": OptionInfo("outputs/img2img-grids", 'Output directory for img2img grids', component_args=hide_dirs), + "outdir_save": OptionInfo("log/images", "Directory for saving images using the Save button", component_args=hide_dirs), + "outdir_init_images": OptionInfo("outputs/init-images", "Directory for saving init images when using img2img", component_args=hide_dirs), +})) + +options_templates.update(options_section(('saving-to-dirs', "Saving to a directory"), { + "save_to_dirs": OptionInfo(True, "Save images to a subdirectory"), + "grid_save_to_dirs": OptionInfo(True, "Save grids to a subdirectory"), + "use_save_to_dirs_for_ui": OptionInfo(False, "When using \"Save\" button, save images to a subdirectory"), + "directories_filename_pattern": OptionInfo("[date]", "Directory name pattern", component_args=hide_dirs).link("wiki", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Custom-Images-Filename-Name-and-Subdirectory"), + "directories_max_prompt_words": OptionInfo(8, "Max prompt words for [prompt_words] pattern", gr.Slider, {"minimum": 1, "maximum": 20, "step": 1, **hide_dirs}), +})) + +options_templates.update(options_section(('upscaling', "Upscaling"), { + "ESRGAN_tile": OptionInfo(192, "Tile size for ESRGAN upscalers.", gr.Slider, {"minimum": 0, "maximum": 512, "step": 16}).info("0 = no tiling"), + "ESRGAN_tile_overlap": OptionInfo(8, "Tile overlap for ESRGAN upscalers.", gr.Slider, {"minimum": 0, "maximum": 48, "step": 1}).info("Low values = visible seam"), + "realesrgan_enabled_models": OptionInfo(["R-ESRGAN 4x+", "R-ESRGAN 4x+ Anime6B"], "Select which Real-ESRGAN models to show in the web UI.", gr.CheckboxGroup, lambda: {"choices": shared_items.realesrgan_models_names()}), + "upscaler_for_img2img": OptionInfo(None, "Upscaler for img2img", gr.Dropdown, lambda: {"choices": [x.name for x in sd_upscalers]}), +})) + +options_templates.update(options_section(('face-restoration', "Face restoration"), { + "face_restoration_model": OptionInfo("CodeFormer", "Face restoration model", gr.Radio, lambda: {"choices": [x.name() for x in face_restorers]}), + "code_former_weight": OptionInfo(0.5, "CodeFormer weight", gr.Slider, {"minimum": 0, "maximum": 1, "step": 0.01}).info("0 = maximum effect; 1 = minimum effect"), + "face_restoration_unload": OptionInfo(False, "Move face restoration model from VRAM into RAM after processing"), +})) + +options_templates.update(options_section(('system', "System"), { + "auto_launch_browser": OptionInfo("Local", "Automatically open webui in browser on startup", gr.Radio, lambda: {"choices": ["Disable", "Local", "Remote"]}), + "show_warnings": OptionInfo(False, "Show warnings in console.").needs_reload_ui(), + "show_gradio_deprecation_warnings": OptionInfo(True, "Show gradio deprecation warnings in console.").needs_reload_ui(), + "memmon_poll_rate": OptionInfo(8, "VRAM usage polls per second during generation.", gr.Slider, {"minimum": 0, "maximum": 40, "step": 1}).info("0 = disable"), + "samples_log_stdout": OptionInfo(False, "Always print all generation info to standard output"), + "multiple_tqdm": OptionInfo(True, "Add a second progress bar to the console that shows progress for an entire job."), + "print_hypernet_extra": OptionInfo(False, "Print extra hypernetwork information to console."), + "list_hidden_files": OptionInfo(True, "Load models/files in hidden directories").info("directory is hidden if its name starts with \".\""), + "disable_mmap_load_safetensors": OptionInfo(False, "Disable memmapping for loading .safetensors files.").info("fixes very slow loading speed in some cases"), + "hide_ldm_prints": OptionInfo(True, "Prevent Stability-AI's ldm/sgm modules from printing noise to console."), +})) + +options_templates.update(options_section(('training', "Training"), { + "unload_models_when_training": OptionInfo(False, "Move VAE and CLIP to RAM when training if possible. Saves VRAM."), + "pin_memory": OptionInfo(False, "Turn on pin_memory for DataLoader. Makes training slightly faster but can increase memory usage."), + "save_optimizer_state": OptionInfo(False, "Saves Optimizer state as separate *.optim file. Training of embedding or HN can be resumed with the matching optim file."), + "save_training_settings_to_txt": OptionInfo(True, "Save textual inversion and hypernet settings to a text file whenever training starts."), + "dataset_filename_word_regex": OptionInfo("", "Filename word regex"), + "dataset_filename_join_string": OptionInfo(" ", "Filename join string"), + "training_image_repeats_per_epoch": OptionInfo(1, "Number of repeats for a single input image per epoch; used only for displaying epoch number", gr.Number, {"precision": 0}), + "training_write_csv_every": OptionInfo(500, "Save an csv containing the loss to log directory every N steps, 0 to disable"), + "training_xattention_optimizations": OptionInfo(False, "Use cross attention optimizations while training"), + "training_enable_tensorboard": OptionInfo(False, "Enable tensorboard logging."), + "training_tensorboard_save_images": OptionInfo(False, "Save generated images within tensorboard."), + "training_tensorboard_flush_every": OptionInfo(120, "How often, in seconds, to flush the pending tensorboard events and summaries to disk."), +})) + +options_templates.update(options_section(('sd', "Stable Diffusion"), { + "sd_model_checkpoint": OptionInfo(None, "Stable Diffusion checkpoint", gr.Dropdown, lambda: {"choices": list_checkpoint_tiles()}, refresh=refresh_checkpoints), + "sd_checkpoints_limit": OptionInfo(1, "Maximum number of checkpoints loaded at the same time", gr.Slider, {"minimum": 1, "maximum": 10, "step": 1}), + "sd_checkpoints_keep_in_cpu": OptionInfo(True, "Only keep one model on device").info("will keep models other than the currently used one in RAM rather than VRAM"), + "sd_checkpoint_cache": OptionInfo(0, "Checkpoints to cache in RAM", gr.Slider, {"minimum": 0, "maximum": 10, "step": 1}).info("obsolete; set to 0 and use the two settings above instead"), + "sd_unet": OptionInfo("Automatic", "SD Unet", gr.Dropdown, lambda: {"choices": shared_items.sd_unet_items()}, refresh=shared_items.refresh_unet_list).info("choose Unet model: Automatic = use one with same filename as checkpoint; None = use Unet from checkpoint"), + "enable_quantization": OptionInfo(False, "Enable quantization in K samplers for sharper and cleaner results. This may change existing seeds").needs_reload_ui(), + "enable_emphasis": OptionInfo(True, "Enable emphasis").info("use (text) to make model pay more attention to text and [text] to make it pay less attention"), + "enable_batch_seeds": OptionInfo(True, "Make K-diffusion samplers produce same images in a batch as when making a single image"), + "comma_padding_backtrack": OptionInfo(20, "Prompt word wrap length limit", gr.Slider, {"minimum": 0, "maximum": 74, "step": 1}).info("in tokens - for texts shorter than specified, if they don't fit into 75 token limit, move them to the next 75 token chunk"), + "CLIP_stop_at_last_layers": OptionInfo(1, "Clip skip", gr.Slider, {"minimum": 1, "maximum": 12, "step": 1}).link("wiki", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Features#clip-skip").info("ignore last layers of CLIP network; 1 ignores none, 2 ignores one layer"), + "upcast_attn": OptionInfo(False, "Upcast cross attention layer to float32"), + "randn_source": OptionInfo("GPU", "Random number generator source.", gr.Radio, {"choices": ["GPU", "CPU", "NV"]}).info("changes seeds drastically; use CPU to produce the same picture across different videocard vendors; use NV to produce same picture as on NVidia videocards"), +})) + +options_templates.update(options_section(('sdxl', "Stable Diffusion XL"), { + "sdxl_crop_top": OptionInfo(0, "crop top coordinate"), + "sdxl_crop_left": OptionInfo(0, "crop left coordinate"), + "sdxl_refiner_low_aesthetic_score": OptionInfo(2.5, "SDXL low aesthetic score", gr.Number).info("used for refiner model negative prompt"), + "sdxl_refiner_high_aesthetic_score": OptionInfo(6.0, "SDXL high aesthetic score", gr.Number).info("used for refiner model prompt"), +})) + +options_templates.update(options_section(('vae', "VAE"), { + "sd_vae_explanation": OptionHTML(""" +VAE is a neural network that transforms a standard RGB +image into latent space representation and back. Latent space representation is what stable diffusion is working on during sampling +(i.e. when the progress bar is between empty and full). For txt2img, VAE is used to create a resulting image after the sampling is finished. +For img2img, VAE is used to process user's input image before the sampling, and to create an image after sampling. +"""), + "sd_vae_checkpoint_cache": OptionInfo(0, "VAE Checkpoints to cache in RAM", gr.Slider, {"minimum": 0, "maximum": 10, "step": 1}), + "sd_vae": OptionInfo("Automatic", "SD VAE", gr.Dropdown, lambda: {"choices": shared_items.sd_vae_items()}, refresh=shared_items.refresh_vae_list).info("choose VAE model: Automatic = use one with same filename as checkpoint; None = use VAE from checkpoint"), + "sd_vae_overrides_per_model_preferences": OptionInfo(True, "Selected VAE overrides per-model preferences").info("you can set per-model VAE either by editing user metadata for checkpoints, or by making the VAE have same name as checkpoint"), + "auto_vae_precision": OptionInfo(True, "Automatically revert VAE to 32-bit floats").info("triggers when a tensor with NaNs is produced in VAE; disabling the option in this case will result in a black square image"), + "sd_vae_encode_method": OptionInfo("Full", "VAE type for encode", gr.Radio, {"choices": ["Full", "TAESD"]}).info("method to encode image to latent (use in img2img, hires-fix or inpaint mask)"), + "sd_vae_decode_method": OptionInfo("Full", "VAE type for decode", gr.Radio, {"choices": ["Full", "TAESD"]}).info("method to decode latent to image"), +})) + +options_templates.update(options_section(('img2img', "img2img"), { + "inpainting_mask_weight": OptionInfo(1.0, "Inpainting conditioning mask strength", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}), + "initial_noise_multiplier": OptionInfo(1.0, "Noise multiplier for img2img", gr.Slider, {"minimum": 0.5, "maximum": 1.5, "step": 0.01}), + "img2img_color_correction": OptionInfo(False, "Apply color correction to img2img results to match original colors."), + "img2img_fix_steps": OptionInfo(False, "With img2img, do exactly the amount of steps the slider specifies.").info("normally you'd do less with less denoising"), + "img2img_background_color": OptionInfo("#ffffff", "With img2img, fill transparent parts of the input image with this color.", ui_components.FormColorPicker, {}), + "img2img_editor_height": OptionInfo(720, "Height of the image editor", gr.Slider, {"minimum": 80, "maximum": 1600, "step": 1}).info("in pixels").needs_reload_ui(), + "img2img_sketch_default_brush_color": OptionInfo("#ffffff", "Sketch initial brush color", ui_components.FormColorPicker, {}).info("default brush color of img2img sketch").needs_reload_ui(), + "img2img_inpaint_mask_brush_color": OptionInfo("#ffffff", "Inpaint mask brush color", ui_components.FormColorPicker, {}).info("brush color of inpaint mask").needs_reload_ui(), + "img2img_inpaint_sketch_default_brush_color": OptionInfo("#ffffff", "Inpaint sketch initial brush color", ui_components.FormColorPicker, {}).info("default brush color of img2img inpaint sketch").needs_reload_ui(), + "return_mask": OptionInfo(False, "For inpainting, include the greyscale mask in results for web"), + "return_mask_composite": OptionInfo(False, "For inpainting, include masked composite in results for web"), +})) + +options_templates.update(options_section(('optimizations', "Optimizations"), { + "cross_attention_optimization": OptionInfo("Automatic", "Cross attention optimization", gr.Dropdown, lambda: {"choices": shared_items.cross_attention_optimizations()}), + "s_min_uncond": OptionInfo(0.0, "Negative Guidance minimum sigma", gr.Slider, {"minimum": 0.0, "maximum": 15.0, "step": 0.01}).link("PR", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/9177").info("skip negative prompt for some steps when the image is almost ready; 0=disable, higher=faster"), + "token_merging_ratio": OptionInfo(0.0, "Token merging ratio", gr.Slider, {"minimum": 0.0, "maximum": 0.9, "step": 0.1}).link("PR", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/9256").info("0=disable, higher=faster"), + "token_merging_ratio_img2img": OptionInfo(0.0, "Token merging ratio for img2img", gr.Slider, {"minimum": 0.0, "maximum": 0.9, "step": 0.1}).info("only applies if non-zero and overrides above"), + "token_merging_ratio_hr": OptionInfo(0.0, "Token merging ratio for high-res pass", gr.Slider, {"minimum": 0.0, "maximum": 0.9, "step": 0.1}).info("only applies if non-zero and overrides above"), + "pad_cond_uncond": OptionInfo(False, "Pad prompt/negative prompt to be same length").info("improves performance when prompt and negative prompt have different lengths; changes seeds"), + "persistent_cond_cache": OptionInfo(True, "Persistent cond cache").info("Do not recalculate conds from prompts if prompts have not changed since previous calculation"), +})) + +options_templates.update(options_section(('compatibility', "Compatibility"), { + "use_old_emphasis_implementation": OptionInfo(False, "Use old emphasis implementation. Can be useful to reproduce old seeds."), + "use_old_karras_scheduler_sigmas": OptionInfo(False, "Use old karras scheduler sigmas (0.1 to 10)."), + "no_dpmpp_sde_batch_determinism": OptionInfo(False, "Do not make DPM++ SDE deterministic across different batch sizes."), + "use_old_hires_fix_width_height": OptionInfo(False, "For hires fix, use width/height sliders to set final resolution rather than first pass (disables Upscale by, Resize width/height to)."), + "dont_fix_second_order_samplers_schedule": OptionInfo(False, "Do not fix prompt schedule for second order samplers."), + "hires_fix_use_firstpass_conds": OptionInfo(False, "For hires fix, calculate conds of second pass using extra networks of first pass."), +})) + +options_templates.update(options_section(('interrogate', "Interrogate"), { + "interrogate_keep_models_in_memory": OptionInfo(False, "Keep models in VRAM"), + "interrogate_return_ranks": OptionInfo(False, "Include ranks of model tags matches in results.").info("booru only"), + "interrogate_clip_num_beams": OptionInfo(1, "BLIP: num_beams", gr.Slider, {"minimum": 1, "maximum": 16, "step": 1}), + "interrogate_clip_min_length": OptionInfo(24, "BLIP: minimum description length", gr.Slider, {"minimum": 1, "maximum": 128, "step": 1}), + "interrogate_clip_max_length": OptionInfo(48, "BLIP: maximum description length", gr.Slider, {"minimum": 1, "maximum": 256, "step": 1}), + "interrogate_clip_dict_limit": OptionInfo(1500, "CLIP: maximum number of lines in text file").info("0 = No limit"), + "interrogate_clip_skip_categories": OptionInfo([], "CLIP: skip inquire categories", gr.CheckboxGroup, lambda: {"choices": modules.interrogate.category_types()}, refresh=modules.interrogate.category_types), + "interrogate_deepbooru_score_threshold": OptionInfo(0.5, "deepbooru: score threshold", gr.Slider, {"minimum": 0, "maximum": 1, "step": 0.01}), + "deepbooru_sort_alpha": OptionInfo(True, "deepbooru: sort tags alphabetically").info("if not: sort by score"), + "deepbooru_use_spaces": OptionInfo(True, "deepbooru: use spaces in tags").info("if not: use underscores"), + "deepbooru_escape": OptionInfo(True, "deepbooru: escape (\\) brackets").info("so they are used as literal brackets and not for emphasis"), + "deepbooru_filter_tags": OptionInfo("", "deepbooru: filter out those tags").info("separate by comma"), +})) + +options_templates.update(options_section(('extra_networks', "Extra Networks"), { + "extra_networks_show_hidden_directories": OptionInfo(True, "Show hidden directories").info("directory is hidden if its name starts with \".\"."), + "extra_networks_hidden_models": OptionInfo("When searched", "Show cards for models in hidden directories", gr.Radio, {"choices": ["Always", "When searched", "Never"]}).info('"When searched" option will only show the item when the search string has 4 characters or more'), + "extra_networks_default_multiplier": OptionInfo(1.0, "Default multiplier for extra networks", gr.Slider, {"minimum": 0.0, "maximum": 2.0, "step": 0.01}), + "extra_networks_card_width": OptionInfo(0, "Card width for Extra Networks").info("in pixels"), + "extra_networks_card_height": OptionInfo(0, "Card height for Extra Networks").info("in pixels"), + "extra_networks_card_text_scale": OptionInfo(1.0, "Card text scale", gr.Slider, {"minimum": 0.0, "maximum": 2.0, "step": 0.01}).info("1 = original size"), + "extra_networks_card_show_desc": OptionInfo(True, "Show description on card"), + "extra_networks_add_text_separator": OptionInfo(" ", "Extra networks separator").info("extra text to add before <...> when adding extra network to prompt"), + "ui_extra_networks_tab_reorder": OptionInfo("", "Extra networks tab order").needs_reload_ui(), + "textual_inversion_print_at_load": OptionInfo(False, "Print a list of Textual Inversion embeddings when loading model"), + "textual_inversion_add_hashes_to_infotext": OptionInfo(True, "Add Textual Inversion hashes to infotext"), + "sd_hypernetwork": OptionInfo("None", "Add hypernetwork to prompt", gr.Dropdown, lambda: {"choices": ["None", *hypernetworks]}, refresh=reload_hypernetworks), +})) + +options_templates.update(options_section(('ui', "User interface"), { + "localization": OptionInfo("None", "Localization", gr.Dropdown, lambda: {"choices": ["None"] + list(localization.localizations.keys())}, refresh=lambda: localization.list_localizations(cmd_opts.localizations_dir)).needs_reload_ui(), + "gradio_theme": OptionInfo("Default", "Gradio theme", ui_components.DropdownEditable, lambda: {"choices": ["Default"] + gradio_hf_hub_themes}).info("you can also manually enter any of themes from the gallery.").needs_reload_ui(), + "gradio_themes_cache": OptionInfo(True, "Cache gradio themes locally").info("disable to update the selected Gradio theme"), + "return_grid": OptionInfo(True, "Show grid in results for web"), + "do_not_show_images": OptionInfo(False, "Do not show any images in results for web"), + "send_seed": OptionInfo(True, "Send seed when sending prompt or image to other interface"), + "send_size": OptionInfo(True, "Send size when sending prompt or image to another interface"), + "js_modal_lightbox": OptionInfo(True, "Enable full page image viewer"), + "js_modal_lightbox_initially_zoomed": OptionInfo(True, "Show images zoomed in by default in full page image viewer"), + "js_modal_lightbox_gamepad": OptionInfo(False, "Navigate image viewer with gamepad"), + "js_modal_lightbox_gamepad_repeat": OptionInfo(250, "Gamepad repeat period, in milliseconds"), + "show_progress_in_title": OptionInfo(True, "Show generation progress in window title."), + "samplers_in_dropdown": OptionInfo(True, "Use dropdown for sampler selection instead of radio group").needs_reload_ui(), + "dimensions_and_batch_together": OptionInfo(True, "Show Width/Height and Batch sliders in same row").needs_reload_ui(), + "keyedit_precision_attention": OptionInfo(0.1, "Ctrl+up/down precision when editing (attention:1.1)", gr.Slider, {"minimum": 0.01, "maximum": 0.2, "step": 0.001}), + "keyedit_precision_extra": OptionInfo(0.05, "Ctrl+up/down precision when editing ", gr.Slider, {"minimum": 0.01, "maximum": 0.2, "step": 0.001}), + "keyedit_delimiters": OptionInfo(".,\\/!?%^*;:{}=`~()", "Ctrl+up/down word delimiters"), + "keyedit_move": OptionInfo(True, "Alt+left/right moves prompt elements"), + "quicksettings_list": OptionInfo(["sd_model_checkpoint"], "Quicksettings list", ui_components.DropdownMulti, lambda: {"choices": list(opts.data_labels.keys())}).js("info", "settingsHintsShowQuicksettings").info("setting entries that appear at the top of page rather than in settings tab").needs_reload_ui(), + "ui_tab_order": OptionInfo([], "UI tab order", ui_components.DropdownMulti, lambda: {"choices": list(tab_names)}).needs_reload_ui(), + "hidden_tabs": OptionInfo([], "Hidden UI tabs", ui_components.DropdownMulti, lambda: {"choices": list(tab_names)}).needs_reload_ui(), + "ui_reorder_list": OptionInfo([], "txt2img/img2img UI item order", ui_components.DropdownMulti, lambda: {"choices": list(shared_items.ui_reorder_categories())}).info("selected items appear first").needs_reload_ui(), + "hires_fix_show_sampler": OptionInfo(False, "Hires fix: show hires checkpoint and sampler selection").needs_reload_ui(), + "hires_fix_show_prompts": OptionInfo(False, "Hires fix: show hires prompt and negative prompt").needs_reload_ui(), + "disable_token_counters": OptionInfo(False, "Disable prompt token counters").needs_reload_ui(), +})) + + +options_templates.update(options_section(('infotext', "Infotext"), { + "add_model_hash_to_info": OptionInfo(True, "Add model hash to generation information"), + "add_model_name_to_info": OptionInfo(True, "Add model name to generation information"), + "add_user_name_to_info": OptionInfo(False, "Add user name to generation information when authenticated"), + "add_version_to_infotext": OptionInfo(True, "Add program version to generation information"), + "disable_weights_auto_swap": OptionInfo(True, "Disregard checkpoint information from pasted infotext").info("when reading generation parameters from text into UI"), + "infotext_styles": OptionInfo("Apply if any", "Infer styles from prompts of pasted infotext", gr.Radio, {"choices": ["Ignore", "Apply", "Discard", "Apply if any"]}).info("when reading generation parameters from text into UI)").html("""
    +
  • Ignore: keep prompt and styles dropdown as it is.
  • +
  • Apply: remove style text from prompt, always replace styles dropdown value with found styles (even if none are found).
  • +
  • Discard: remove style text from prompt, keep styles dropdown as it is.
  • +
  • Apply if any: remove style text from prompt; if any styles are found in prompt, put them into styles dropdown, otherwise keep it as it is.
  • +
"""), + +})) + +options_templates.update(options_section(('ui', "Live previews"), { + "show_progressbar": OptionInfo(True, "Show progressbar"), + "live_previews_enable": OptionInfo(True, "Show live previews of the created image"), + "live_previews_image_format": OptionInfo("png", "Live preview file format", gr.Radio, {"choices": ["jpeg", "png", "webp"]}), + "show_progress_grid": OptionInfo(True, "Show previews of all images generated in a batch as a grid"), + "show_progress_every_n_steps": OptionInfo(10, "Live preview display period", gr.Slider, {"minimum": -1, "maximum": 32, "step": 1}).info("in sampling steps - show new live preview image every N sampling steps; -1 = only show after completion of batch"), + "show_progress_type": OptionInfo("Approx NN", "Live preview method", gr.Radio, {"choices": ["Full", "Approx NN", "Approx cheap", "TAESD"]}).info("Full = slow but pretty; Approx NN and TAESD = fast but low quality; Approx cheap = super fast but terrible otherwise"), + "live_preview_content": OptionInfo("Prompt", "Live preview subject", gr.Radio, {"choices": ["Combined", "Prompt", "Negative prompt"]}), + "live_preview_refresh_period": OptionInfo(1000, "Progressbar and preview update period").info("in milliseconds"), +})) + +options_templates.update(options_section(('sampler-params', "Sampler parameters"), { + "hide_samplers": OptionInfo([], "Hide samplers in user interface", gr.CheckboxGroup, lambda: {"choices": [x.name for x in list_samplers()]}).needs_reload_ui(), + "eta_ddim": OptionInfo(0.0, "Eta for DDIM", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}).info("noise multiplier; higher = more unperdictable results"), + "eta_ancestral": OptionInfo(1.0, "Eta for ancestral samplers", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}).info("noise multiplier; applies to Euler a and other samplers that have a in them"), + "ddim_discretize": OptionInfo('uniform', "img2img DDIM discretize", gr.Radio, {"choices": ['uniform', 'quad']}), + 's_churn': OptionInfo(0.0, "sigma churn", gr.Slider, {"minimum": 0.0, "maximum": 100.0, "step": 0.01}).info('amount of stochasticity; only applies to Euler, Heun, and DPM2'), + 's_tmin': OptionInfo(0.0, "sigma tmin", gr.Slider, {"minimum": 0.0, "maximum": 10.0, "step": 0.01}).info('enable stochasticity; start value of the sigma range; only applies to Euler, Heun, and DPM2'), + 's_tmax': OptionInfo(0.0, "sigma tmax", gr.Slider, {"minimum": 0.0, "maximum": 999.0, "step": 0.01}).info("0 = inf; end value of the sigma range; only applies to Euler, Heun, and DPM2"), + 's_noise': OptionInfo(1.0, "sigma noise", gr.Slider, {"minimum": 0.0, "maximum": 1.1, "step": 0.001}).info('amount of additional noise to counteract loss of detail during sampling; only applies to Euler, Heun, and DPM2'), + 'k_sched_type': OptionInfo("Automatic", "Scheduler type", gr.Dropdown, {"choices": ["Automatic", "karras", "exponential", "polyexponential"]}).info("lets you override the noise schedule for k-diffusion samplers; choosing Automatic disables the three parameters below"), + 'sigma_min': OptionInfo(0.0, "sigma min", gr.Number).info("0 = default (~0.03); minimum noise strength for k-diffusion noise scheduler"), + 'sigma_max': OptionInfo(0.0, "sigma max", gr.Number).info("0 = default (~14.6); maximum noise strength for k-diffusion noise scheduler"), + 'rho': OptionInfo(0.0, "rho", gr.Number).info("0 = default (7 for karras, 1 for polyexponential); higher values result in a steeper noise schedule (decreases faster)"), + 'eta_noise_seed_delta': OptionInfo(0, "Eta noise seed delta", gr.Number, {"precision": 0}).info("ENSD; does not improve anything, just produces different results for ancestral samplers - only useful for reproducing images"), + 'always_discard_next_to_last_sigma': OptionInfo(False, "Always discard next-to-last sigma").link("PR", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/6044"), + 'uni_pc_variant': OptionInfo("bh1", "UniPC variant", gr.Radio, {"choices": ["bh1", "bh2", "vary_coeff"]}), + 'uni_pc_skip_type': OptionInfo("time_uniform", "UniPC skip type", gr.Radio, {"choices": ["time_uniform", "time_quadratic", "logSNR"]}), + 'uni_pc_order': OptionInfo(3, "UniPC order", gr.Slider, {"minimum": 1, "maximum": 50, "step": 1}).info("must be < sampling steps"), + 'uni_pc_lower_order_final': OptionInfo(True, "UniPC lower order final"), +})) + +options_templates.update(options_section(('postprocessing', "Postprocessing"), { + 'postprocessing_enable_in_main_ui': OptionInfo([], "Enable postprocessing operations in txt2img and img2img tabs", ui_components.DropdownMulti, lambda: {"choices": [x.name for x in shared_items.postprocessing_scripts()]}), + 'postprocessing_operation_order': OptionInfo([], "Postprocessing operation order", ui_components.DropdownMulti, lambda: {"choices": [x.name for x in shared_items.postprocessing_scripts()]}), + 'upscaling_max_images_in_cache': OptionInfo(5, "Maximum number of images in upscaling cache", gr.Slider, {"minimum": 0, "maximum": 10, "step": 1}), +})) + +options_templates.update(options_section((None, "Hidden options"), { + "disabled_extensions": OptionInfo([], "Disable these extensions"), + "disable_all_extensions": OptionInfo("none", "Disable all extensions (preserves the list of disabled extensions)", gr.Radio, {"choices": ["none", "extra", "all"]}), + "restore_config_state_file": OptionInfo("", "Config state file to restore from, under 'config-states/' folder"), + "sd_checkpoint_hash": OptionInfo("", "SHA256 hash of the current checkpoint"), +})) + + +options_templates.update() + + +class Options: + data = None + data_labels = options_templates + typemap = {int: float} + + def __init__(self): + self.data = {k: v.default for k, v in self.data_labels.items()} + + def __setattr__(self, key, value): + if self.data is not None: + if key in self.data or key in self.data_labels: + assert not cmd_opts.freeze_settings, "changing settings is disabled" + + info = opts.data_labels.get(key, None) + if info.do_not_save: + return + + comp_args = info.component_args if info else None + if isinstance(comp_args, dict) and comp_args.get('visible', True) is False: + raise RuntimeError(f"not possible to set {key} because it is restricted") + + if cmd_opts.hide_ui_dir_config and key in restricted_opts: + raise RuntimeError(f"not possible to set {key} because it is restricted") + + self.data[key] = value + return + + return super(Options, self).__setattr__(key, value) + + def __getattr__(self, item): + if self.data is not None: + if item in self.data: + return self.data[item] + + if item in self.data_labels: + return self.data_labels[item].default + + return super(Options, self).__getattribute__(item) + + def set(self, key, value): + """sets an option and calls its onchange callback, returning True if the option changed and False otherwise""" + + oldval = self.data.get(key, None) + if oldval == value: + return False + + if self.data_labels[key].do_not_save: + return False + + try: + setattr(self, key, value) + except RuntimeError: + return False + + if self.data_labels[key].onchange is not None: + try: + self.data_labels[key].onchange() + except Exception as e: + errors.display(e, f"changing setting {key} to {value}") + setattr(self, key, oldval) + return False + + return True + + def get_default(self, key): + """returns the default value for the key""" + + data_label = self.data_labels.get(key) + if data_label is None: + return None + + return data_label.default + + def save(self, filename): + assert not cmd_opts.freeze_settings, "saving settings is disabled" + + with open(filename, "w", encoding="utf8") as file: + json.dump(self.data, file, indent=4) + + def same_type(self, x, y): + if x is None or y is None: + return True + + type_x = self.typemap.get(type(x), type(x)) + type_y = self.typemap.get(type(y), type(y)) + + return type_x == type_y + + def load(self, filename): + with open(filename, "r", encoding="utf8") as file: + self.data = json.load(file) + + # 1.6.0 VAE defaults + if self.data.get('sd_vae_as_default') is not None and self.data.get('sd_vae_overrides_per_model_preferences') is None: + self.data['sd_vae_overrides_per_model_preferences'] = not self.data.get('sd_vae_as_default') + + # 1.1.1 quicksettings list migration + if self.data.get('quicksettings') is not None and self.data.get('quicksettings_list') is None: + self.data['quicksettings_list'] = [i.strip() for i in self.data.get('quicksettings').split(',')] + + # 1.4.0 ui_reorder + if isinstance(self.data.get('ui_reorder'), str) and self.data.get('ui_reorder') and "ui_reorder_list" not in self.data: + self.data['ui_reorder_list'] = [i.strip() for i in self.data.get('ui_reorder').split(',')] + + bad_settings = 0 + for k, v in self.data.items(): + info = self.data_labels.get(k, None) + if info is not None and not self.same_type(info.default, v): + print(f"Warning: bad setting value: {k}: {v} ({type(v).__name__}; expected {type(info.default).__name__})", file=sys.stderr) + bad_settings += 1 + + if bad_settings > 0: + print(f"The program is likely to not work with bad settings.\nSettings file: {filename}\nEither fix the file, or delete it and restart.", file=sys.stderr) + + def onchange(self, key, func, call=True): + item = self.data_labels.get(key) + item.onchange = func + + if call: + func() + + def dumpjson(self): + d = {k: self.data.get(k, v.default) for k, v in self.data_labels.items()} + d["_comments_before"] = {k: v.comment_before for k, v in self.data_labels.items() if v.comment_before is not None} + d["_comments_after"] = {k: v.comment_after for k, v in self.data_labels.items() if v.comment_after is not None} + return json.dumps(d) + + def add_option(self, key, info): + self.data_labels[key] = info + + def reorder(self): + """reorder settings so that all items related to section always go together""" + + section_ids = {} + settings_items = self.data_labels.items() + for _, item in settings_items: + if item.section not in section_ids: + section_ids[item.section] = len(section_ids) + + self.data_labels = dict(sorted(settings_items, key=lambda x: section_ids[x[1].section])) + + def cast_value(self, key, value): + """casts an arbitrary to the same type as this setting's value with key + Example: cast_value("eta_noise_seed_delta", "12") -> returns 12 (an int rather than str) + """ + + if value is None: + return None + + default_value = self.data_labels[key].default + if default_value is None: + default_value = getattr(self, key, None) + if default_value is None: + return None + + expected_type = type(default_value) + if expected_type == bool and value == "False": + value = False + else: + value = expected_type(value) + + return value + + +opts = Options() +if os.path.exists(config_filename): + opts.load(config_filename) + + +class Shared(sys.modules[__name__].__class__): + """ + this class is here to provide sd_model field as a property, so that it can be created and loaded on demand rather than + at program startup. + """ + + sd_model_val = None + + @property + def sd_model(self): + import modules.sd_models + + return modules.sd_models.model_data.get_sd_model() + + @sd_model.setter + def sd_model(self, value): + import modules.sd_models + + modules.sd_models.model_data.set_sd_model(value) + + +sd_model: LatentDiffusion = None # this var is here just for IDE's type checking; it cannot be accessed because the class field above will be accessed instead +sys.modules[__name__].__class__ = Shared + +settings_components = None +"""assinged from ui.py, a mapping on setting names to gradio components repsponsible for those settings""" + +latent_upscale_default_mode = "Latent" +latent_upscale_modes = { + "Latent": {"mode": "bilinear", "antialias": False}, + "Latent (antialiased)": {"mode": "bilinear", "antialias": True}, + "Latent (bicubic)": {"mode": "bicubic", "antialias": False}, + "Latent (bicubic antialiased)": {"mode": "bicubic", "antialias": True}, + "Latent (nearest)": {"mode": "nearest", "antialias": False}, + "Latent (nearest-exact)": {"mode": "nearest-exact", "antialias": False}, +} + +sd_upscalers = [] + +clip_model = None + +progress_print_out = sys.stdout + +gradio_theme = gr.themes.Base() + + +def reload_gradio_theme(theme_name=None): + global gradio_theme + if not theme_name: + theme_name = opts.gradio_theme + + default_theme_args = dict( + font=["Source Sans Pro", 'ui-sans-serif', 'system-ui', 'sans-serif'], + font_mono=['IBM Plex Mono', 'ui-monospace', 'Consolas', 'monospace'], + ) + + if theme_name == "Default": + gradio_theme = gr.themes.Default(**default_theme_args) + else: + try: + theme_cache_dir = os.path.join(script_path, 'tmp', 'gradio_themes') + theme_cache_path = os.path.join(theme_cache_dir, f'{theme_name.replace("/", "_")}.json') + if opts.gradio_themes_cache and os.path.exists(theme_cache_path): + gradio_theme = gr.themes.ThemeClass.load(theme_cache_path) + else: + os.makedirs(theme_cache_dir, exist_ok=True) + gradio_theme = gr.themes.ThemeClass.from_hub(theme_name) + gradio_theme.dump(theme_cache_path) + except Exception as e: + errors.display(e, "changing gradio theme") + gradio_theme = gr.themes.Default(**default_theme_args) + + +class TotalTQDM: + def __init__(self): + self._tqdm = None + + def reset(self): + self._tqdm = tqdm.tqdm( + desc="Total progress", + total=state.job_count * state.sampling_steps, + position=1, + file=progress_print_out + ) + + def update(self): + if not opts.multiple_tqdm or cmd_opts.disable_console_progressbars: + return + if self._tqdm is None: + self.reset() + self._tqdm.update() + + def updateTotal(self, new_total): + if not opts.multiple_tqdm or cmd_opts.disable_console_progressbars: + return + if self._tqdm is None: + self.reset() + self._tqdm.total = new_total + + def clear(self): + if self._tqdm is not None: + self._tqdm.refresh() + self._tqdm.close() + self._tqdm = None + + +total_tqdm = TotalTQDM() + +mem_mon = modules.memmon.MemUsageMonitor("MemMon", device, opts) +mem_mon.start() + + +def natural_sort_key(s, regex=re.compile('([0-9]+)')): + return [int(text) if text.isdigit() else text.lower() for text in regex.split(s)] + + +def listfiles(dirname): + filenames = [os.path.join(dirname, x) for x in sorted(os.listdir(dirname), key=natural_sort_key) if not x.startswith(".")] + return [file for file in filenames if os.path.isfile(file)] + + +def html_path(filename): + return os.path.join(script_path, "html", filename) + + +def html(filename): + path = html_path(filename) + + if os.path.exists(path): + with open(path, encoding="utf8") as file: + return file.read() + + return "" + + +def walk_files(path, allowed_extensions=None): + if not os.path.exists(path): + return + + if allowed_extensions is not None: + allowed_extensions = set(allowed_extensions) + + items = list(os.walk(path, followlinks=True)) + items = sorted(items, key=lambda x: natural_sort_key(x[0])) + + for root, _, files in items: + for filename in sorted(files, key=natural_sort_key): + if allowed_extensions is not None: + _, ext = os.path.splitext(filename) + if ext not in allowed_extensions: + continue + + if not opts.list_hidden_files and ("/." in root or "\\." in root): + continue + + yield os.path.join(root, filename) + + +def ldm_print(*args, **kwargs): + if opts.hide_ldm_prints: + return + + print(*args, **kwargs) diff --git a/temp b/temp deleted file mode 100644 index e9b980a4..00000000 --- a/temp +++ /dev/null @@ -1,976 +0,0 @@ -import datetime -import json -import os -import re -import sys -import threading -import time -import logging - -import gradio as gr -import torch -import tqdm - -import launch -import modules.interrogate -import modules.memmon -import modules.styles -import modules.devices as devices -from modules import localization, script_loading, errors, ui_components, shared_items, cmd_args, rng # noqa: F401 -from modules.paths_internal import models_path, script_path, data_path, sd_configs_path, sd_default_config, sd_model_file, default_sd_model_file, extensions_dir, extensions_builtin_dir # noqa: F401 -from ldm.models.diffusion.ddpm import LatentDiffusion -from typing import Optional - -log = logging.getLogger(__name__) - -demo = None - -parser = cmd_args.parser - -script_loading.preload_extensions(extensions_dir, parser, extension_list=launch.list_extensions(launch.args.ui_settings_file)) -script_loading.preload_extensions(extensions_builtin_dir, parser) - -if os.environ.get('IGNORE_CMD_ARGS_ERRORS', None) is None: - cmd_opts = parser.parse_args() -else: - cmd_opts, _ = parser.parse_known_args() - - -restricted_opts = { - "samples_filename_pattern", - "directories_filename_pattern", - "outdir_samples", - "outdir_txt2img_samples", - "outdir_img2img_samples", - "outdir_extras_samples", - "outdir_grids", - "outdir_txt2img_grids", - "outdir_save", - "outdir_init_images" -} - -# https://huggingface.co/datasets/freddyaboulton/gradio-theme-subdomains/resolve/main/subdomains.json -gradio_hf_hub_themes = [ - "gradio/base", - "gradio/glass", - "gradio/monochrome", - "gradio/seafoam", - "gradio/soft", - "gradio/dracula_test", - "abidlabs/dracula_test", - "abidlabs/Lime", - "abidlabs/pakistan", - "Ama434/neutral-barlow", - "dawood/microsoft_windows", - "finlaymacklon/smooth_slate", - "Franklisi/darkmode", - "freddyaboulton/dracula_revamped", - "freddyaboulton/test-blue", - "gstaff/xkcd", - "Insuz/Mocha", - "Insuz/SimpleIndigo", - "JohnSmith9982/small_and_pretty", - "nota-ai/theme", - "nuttea/Softblue", - "ParityError/Anime", - "reilnuud/polite", - "remilia/Ghostly", - "rottenlittlecreature/Moon_Goblin", - "step-3-profit/Midnight-Deep", - "Taithrah/Minimal", - "ysharma/huggingface", - "ysharma/steampunk" -] - - -cmd_opts.disable_extension_access = (cmd_opts.share or cmd_opts.listen or cmd_opts.server_name) and not cmd_opts.enable_insecure_extension_access - -devices.device, devices.device_interrogate, devices.device_gfpgan, devices.device_esrgan, devices.device_codeformer = \ - (devices.cpu if any(y in cmd_opts.use_cpu for y in [x, 'all']) else devices.get_optimal_device() for x in ['sd', 'interrogate', 'gfpgan', 'esrgan', 'codeformer']) - -devices.dtype = torch.float32 if cmd_opts.no_half else torch.float16 -devices.dtype_vae = torch.float32 if cmd_opts.no_half or cmd_opts.no_half_vae else torch.float16 - -device = devices.device -weight_load_location = None if cmd_opts.lowram else "cpu" - -batch_cond_uncond = cmd_opts.always_batch_cond_uncond or not (cmd_opts.lowvram or cmd_opts.medvram) -parallel_processing_allowed = not cmd_opts.lowvram and not cmd_opts.medvram -xformers_available = False -config_filename = cmd_opts.ui_settings_file - -os.makedirs(cmd_opts.hypernetwork_dir, exist_ok=True) -hypernetworks = {} -loaded_hypernetworks = [] - - -def reload_hypernetworks(): - from modules.hypernetworks import hypernetwork - global hypernetworks - - hypernetworks = hypernetwork.list_hypernetworks(cmd_opts.hypernetwork_dir) - - -class State: - skipped = False - interrupted = False - job = "" - job_no = 0 - job_count = 0 - processing_has_refined_job_count = False - job_timestamp = '0' - sampling_step = 0 - sampling_steps = 0 - current_latent = None - current_image = None - current_image_sampling_step = 0 - id_live_preview = 0 - textinfo = None - time_start = None - server_start = None - _server_command_signal = threading.Event() - _server_command: Optional[str] = None - - @property - def need_restart(self) -> bool: - # Compatibility getter for need_restart. - return self.server_command == "restart" - - @need_restart.setter - def need_restart(self, value: bool) -> None: - # Compatibility setter for need_restart. - if value: - self.server_command = "restart" - - @property - def server_command(self): - return self._server_command - - @server_command.setter - def server_command(self, value: Optional[str]) -> None: - """ - Set the server command to `value` and signal that it's been set. - """ - self._server_command = value - self._server_command_signal.set() - - def wait_for_server_command(self, timeout: Optional[float] = None) -> Optional[str]: - """ - Wait for server command to get set; return and clear the value and signal. - """ - if self._server_command_signal.wait(timeout): - self._server_command_signal.clear() - req = self._server_command - self._server_command = None - return req - return None - - def request_restart(self) -> None: - self.interrupt() - self.server_command = "restart" - log.info("Received restart request") - - def skip(self): - self.skipped = True - log.info("Received skip request") - - def interrupt(self): - self.interrupted = True - log.info("Received interrupt request") - - def nextjob(self): - if opts.live_previews_enable and opts.show_progress_every_n_steps == -1: - self.do_set_current_image() - - self.job_no += 1 - self.sampling_step = 0 - self.current_image_sampling_step = 0 - - def dict(self): - obj = { - "skipped": self.skipped, - "interrupted": self.interrupted, - "job": self.job, - "job_count": self.job_count, - "job_timestamp": self.job_timestamp, - "job_no": self.job_no, - "sampling_step": self.sampling_step, - "sampling_steps": self.sampling_steps, - } - - return obj - - def begin(self, job: str = "(unknown)"): - self.sampling_step = 0 - self.job_count = -1 - self.processing_has_refined_job_count = False - self.job_no = 0 - self.job_timestamp = datetime.datetime.now().strftime("%Y%m%d%H%M%S") - self.current_latent = None - self.current_image = None - self.current_image_sampling_step = 0 - self.id_live_preview = 0 - self.skipped = False - self.interrupted = False - self.textinfo = None - self.time_start = time.time() - self.job = job - devices.torch_gc() - log.info("Starting job %s", job) - - def end(self): - duration = time.time() - self.time_start - log.info("Ending job %s (%.2f seconds)", self.job, duration) - self.job = "" - self.job_count = 0 - - devices.torch_gc() - - def set_current_image(self): - """sets self.current_image from self.current_latent if enough sampling steps have been made after the last call to this""" - if not parallel_processing_allowed: - return - - if self.sampling_step - self.current_image_sampling_step >= opts.show_progress_every_n_steps and opts.live_previews_enable and opts.show_progress_every_n_steps != -1: - self.do_set_current_image() - - def do_set_current_image(self): - if self.current_latent is None: - return - - import modules.sd_samplers - - try: - if opts.show_progress_grid: - self.assign_current_image(modules.sd_samplers.samples_to_image_grid(self.current_latent)) - else: - self.assign_current_image(modules.sd_samplers.sample_to_image(self.current_latent)) - - self.current_image_sampling_step = self.sampling_step - - except Exception: - # when switching models during genration, VAE would be on CPU, so creating an image will fail. - # we silently ignore this error - errors.record_exception() - - def assign_current_image(self, image): - self.current_image = image - self.id_live_preview += 1 - - -state = State() -state.server_start = time.time() - -styles_filename = cmd_opts.styles_file -prompt_styles = modules.styles.StyleDatabase(styles_filename) - -interrogator = modules.interrogate.InterrogateModels("interrogate") - -face_restorers = [] - - -class OptionInfo: - def __init__(self, default=None, label="", component=None, component_args=None, onchange=None, section=None, refresh=None, comment_before='', comment_after=''): - self.default = default - self.label = label - self.component = component - self.component_args = component_args - self.onchange = onchange - self.section = section - self.refresh = refresh - self.do_not_save = False - - self.comment_before = comment_before - """HTML text that will be added after label in UI""" - - self.comment_after = comment_after - """HTML text that will be added before label in UI""" - - def link(self, label, url): - self.comment_before += f"[{label}]" - return self - - def js(self, label, js_func): - self.comment_before += f"[{label}]" - return self - - def info(self, info): - self.comment_after += f"({info})" - return self - - def html(self, html): - self.comment_after += html - return self - - def needs_restart(self): - self.comment_after += " (requires restart)" - return self - - def needs_reload_ui(self): - self.comment_after += " (requires Reload UI)" - return self - - -class OptionHTML(OptionInfo): - def __init__(self, text): - super().__init__(str(text).strip(), label='', component=lambda **kwargs: gr.HTML(elem_classes="settings-info", **kwargs)) - - self.do_not_save = True - - -def options_section(section_identifier, options_dict): - for v in options_dict.values(): - v.section = section_identifier - - return options_dict - - -def list_checkpoint_tiles(): - import modules.sd_models - return modules.sd_models.checkpoint_tiles() - - -def refresh_checkpoints(): - import modules.sd_models - return modules.sd_models.list_models() - - -def list_samplers(): - import modules.sd_samplers - return modules.sd_samplers.all_samplers - - -hide_dirs = {"visible": not cmd_opts.hide_ui_dir_config} -tab_names = [] - -options_templates = {} - -options_templates.update(options_section(('saving-images', "Saving images/grids"), { - "samples_save": OptionInfo(True, "Always save all generated images"), - "samples_format": OptionInfo('png', 'File format for images'), - "samples_filename_pattern": OptionInfo("", "Images filename pattern", component_args=hide_dirs).link("wiki", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Custom-Images-Filename-Name-and-Subdirectory"), - "save_images_add_number": OptionInfo(True, "Add number to filename when saving", component_args=hide_dirs), - - "grid_save": OptionInfo(True, "Always save all generated image grids"), - "grid_format": OptionInfo('png', 'File format for grids'), - "grid_extended_filename": OptionInfo(False, "Add extended info (seed, prompt) to filename when saving grid"), - "grid_only_if_multiple": OptionInfo(True, "Do not save grids consisting of one picture"), - "grid_prevent_empty_spots": OptionInfo(False, "Prevent empty spots in grid (when set to autodetect)"), - "grid_zip_filename_pattern": OptionInfo("", "Archive filename pattern", component_args=hide_dirs).link("wiki", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Custom-Images-Filename-Name-and-Subdirectory"), - "n_rows": OptionInfo(-1, "Grid row count; use -1 for autodetect and 0 for it to be same as batch size", gr.Slider, {"minimum": -1, "maximum": 16, "step": 1}), - "font": OptionInfo("", "Font for image grids that have text"), - "grid_text_active_color": OptionInfo("#000000", "Text color for image grids", ui_components.FormColorPicker, {}), - "grid_text_inactive_color": OptionInfo("#999999", "Inactive text color for image grids", ui_components.FormColorPicker, {}), - "grid_background_color": OptionInfo("#ffffff", "Background color for image grids", ui_components.FormColorPicker, {}), - - "enable_pnginfo": OptionInfo(True, "Save text information about generation parameters as chunks to png files"), - "save_txt": OptionInfo(False, "Create a text file next to every image with generation parameters."), - "save_images_before_face_restoration": OptionInfo(False, "Save a copy of image before doing face restoration."), - "save_images_before_highres_fix": OptionInfo(False, "Save a copy of image before applying highres fix."), - "save_images_before_color_correction": OptionInfo(False, "Save a copy of image before applying color correction to img2img results"), - "save_mask": OptionInfo(False, "For inpainting, save a copy of the greyscale mask"), - "save_mask_composite": OptionInfo(False, "For inpainting, save a masked composite"), - "jpeg_quality": OptionInfo(80, "Quality for saved jpeg images", gr.Slider, {"minimum": 1, "maximum": 100, "step": 1}), - "webp_lossless": OptionInfo(False, "Use lossless compression for webp images"), - "export_for_4chan": OptionInfo(True, "Save copy of large images as JPG").info("if the file size is above the limit, or either width or height are above the limit"), - "img_downscale_threshold": OptionInfo(4.0, "File size limit for the above option, MB", gr.Number), - "target_side_length": OptionInfo(4000, "Width/height limit for the above option, in pixels", gr.Number), - "img_max_size_mp": OptionInfo(200, "Maximum image size", gr.Number).info("in megapixels"), - - "use_original_name_batch": OptionInfo(True, "Use original name for output filename during batch process in extras tab"), - "use_upscaler_name_as_suffix": OptionInfo(False, "Use upscaler name as filename suffix in the extras tab"), - "save_selected_only": OptionInfo(True, "When using 'Save' button, only save a single selected image"), - "save_init_img": OptionInfo(False, "Save init images when using img2img"), - - "temp_dir": OptionInfo("", "Directory for temporary images; leave empty for default"), - "clean_temp_dir_at_start": OptionInfo(False, "Cleanup non-default temporary directory when starting webui"), - - "save_incomplete_images": OptionInfo(False, "Save incomplete images").info("save images that has been interrupted in mid-generation; even if not saved, they will still show up in webui output."), -})) - -options_templates.update(options_section(('saving-paths', "Paths for saving"), { - "outdir_samples": OptionInfo("", "Output directory for images; if empty, defaults to three directories below", component_args=hide_dirs), - "outdir_txt2img_samples": OptionInfo("outputs/txt2img-images", 'Output directory for txt2img images', component_args=hide_dirs), - "outdir_img2img_samples": OptionInfo("outputs/img2img-images", 'Output directory for img2img images', component_args=hide_dirs), - "outdir_extras_samples": OptionInfo("outputs/extras-images", 'Output directory for images from extras tab', component_args=hide_dirs), - "outdir_grids": OptionInfo("", "Output directory for grids; if empty, defaults to two directories below", component_args=hide_dirs), - "outdir_txt2img_grids": OptionInfo("outputs/txt2img-grids", 'Output directory for txt2img grids', component_args=hide_dirs), - "outdir_img2img_grids": OptionInfo("outputs/img2img-grids", 'Output directory for img2img grids', component_args=hide_dirs), - "outdir_save": OptionInfo("log/images", "Directory for saving images using the Save button", component_args=hide_dirs), - "outdir_init_images": OptionInfo("outputs/init-images", "Directory for saving init images when using img2img", component_args=hide_dirs), -})) - -options_templates.update(options_section(('saving-to-dirs', "Saving to a directory"), { - "save_to_dirs": OptionInfo(True, "Save images to a subdirectory"), - "grid_save_to_dirs": OptionInfo(True, "Save grids to a subdirectory"), - "use_save_to_dirs_for_ui": OptionInfo(False, "When using \"Save\" button, save images to a subdirectory"), - "directories_filename_pattern": OptionInfo("[date]", "Directory name pattern", component_args=hide_dirs).link("wiki", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Custom-Images-Filename-Name-and-Subdirectory"), - "directories_max_prompt_words": OptionInfo(8, "Max prompt words for [prompt_words] pattern", gr.Slider, {"minimum": 1, "maximum": 20, "step": 1, **hide_dirs}), -})) - -options_templates.update(options_section(('upscaling', "Upscaling"), { - "ESRGAN_tile": OptionInfo(192, "Tile size for ESRGAN upscalers.", gr.Slider, {"minimum": 0, "maximum": 512, "step": 16}).info("0 = no tiling"), - "ESRGAN_tile_overlap": OptionInfo(8, "Tile overlap for ESRGAN upscalers.", gr.Slider, {"minimum": 0, "maximum": 48, "step": 1}).info("Low values = visible seam"), - "realesrgan_enabled_models": OptionInfo(["R-ESRGAN 4x+", "R-ESRGAN 4x+ Anime6B"], "Select which Real-ESRGAN models to show in the web UI.", gr.CheckboxGroup, lambda: {"choices": shared_items.realesrgan_models_names()}), - "upscaler_for_img2img": OptionInfo(None, "Upscaler for img2img", gr.Dropdown, lambda: {"choices": [x.name for x in sd_upscalers]}), -})) - -options_templates.update(options_section(('face-restoration', "Face restoration"), { - "face_restoration_model": OptionInfo("CodeFormer", "Face restoration model", gr.Radio, lambda: {"choices": [x.name() for x in face_restorers]}), - "code_former_weight": OptionInfo(0.5, "CodeFormer weight", gr.Slider, {"minimum": 0, "maximum": 1, "step": 0.01}).info("0 = maximum effect; 1 = minimum effect"), - "face_restoration_unload": OptionInfo(False, "Move face restoration model from VRAM into RAM after processing"), -})) - -options_templates.update(options_section(('system', "System"), { - "auto_launch_browser": OptionInfo("Local", "Automatically open webui in browser on startup", gr.Radio, lambda: {"choices": ["Disable", "Local", "Remote"]}), - "show_warnings": OptionInfo(False, "Show warnings in console.").needs_reload_ui(), - "show_gradio_deprecation_warnings": OptionInfo(True, "Show gradio deprecation warnings in console.").needs_reload_ui(), - "memmon_poll_rate": OptionInfo(8, "VRAM usage polls per second during generation.", gr.Slider, {"minimum": 0, "maximum": 40, "step": 1}).info("0 = disable"), - "samples_log_stdout": OptionInfo(False, "Always print all generation info to standard output"), - "multiple_tqdm": OptionInfo(True, "Add a second progress bar to the console that shows progress for an entire job."), - "print_hypernet_extra": OptionInfo(False, "Print extra hypernetwork information to console."), - "list_hidden_files": OptionInfo(True, "Load models/files in hidden directories").info("directory is hidden if its name starts with \".\""), - "disable_mmap_load_safetensors": OptionInfo(False, "Disable memmapping for loading .safetensors files.").info("fixes very slow loading speed in some cases"), - "hide_ldm_prints": OptionInfo(True, "Prevent Stability-AI's ldm/sgm modules from printing noise to console."), -})) - -options_templates.update(options_section(('training', "Training"), { - "unload_models_when_training": OptionInfo(False, "Move VAE and CLIP to RAM when training if possible. Saves VRAM."), - "pin_memory": OptionInfo(False, "Turn on pin_memory for DataLoader. Makes training slightly faster but can increase memory usage."), - "save_optimizer_state": OptionInfo(False, "Saves Optimizer state as separate *.optim file. Training of embedding or HN can be resumed with the matching optim file."), - "save_training_settings_to_txt": OptionInfo(True, "Save textual inversion and hypernet settings to a text file whenever training starts."), - "dataset_filename_word_regex": OptionInfo("", "Filename word regex"), - "dataset_filename_join_string": OptionInfo(" ", "Filename join string"), - "training_image_repeats_per_epoch": OptionInfo(1, "Number of repeats for a single input image per epoch; used only for displaying epoch number", gr.Number, {"precision": 0}), - "training_write_csv_every": OptionInfo(500, "Save an csv containing the loss to log directory every N steps, 0 to disable"), - "training_xattention_optimizations": OptionInfo(False, "Use cross attention optimizations while training"), - "training_enable_tensorboard": OptionInfo(False, "Enable tensorboard logging."), - "training_tensorboard_save_images": OptionInfo(False, "Save generated images within tensorboard."), - "training_tensorboard_flush_every": OptionInfo(120, "How often, in seconds, to flush the pending tensorboard events and summaries to disk."), -})) - -options_templates.update(options_section(('sd', "Stable Diffusion"), { - "sd_model_checkpoint": OptionInfo(None, "Stable Diffusion checkpoint", gr.Dropdown, lambda: {"choices": list_checkpoint_tiles()}, refresh=refresh_checkpoints), - "sd_checkpoints_limit": OptionInfo(1, "Maximum number of checkpoints loaded at the same time", gr.Slider, {"minimum": 1, "maximum": 10, "step": 1}), - "sd_checkpoints_keep_in_cpu": OptionInfo(True, "Only keep one model on device").info("will keep models other than the currently used one in RAM rather than VRAM"), - "sd_checkpoint_cache": OptionInfo(0, "Checkpoints to cache in RAM", gr.Slider, {"minimum": 0, "maximum": 10, "step": 1}).info("obsolete; set to 0 and use the two settings above instead"), - "sd_unet": OptionInfo("Automatic", "SD Unet", gr.Dropdown, lambda: {"choices": shared_items.sd_unet_items()}, refresh=shared_items.refresh_unet_list).info("choose Unet model: Automatic = use one with same filename as checkpoint; None = use Unet from checkpoint"), - "enable_quantization": OptionInfo(False, "Enable quantization in K samplers for sharper and cleaner results. This may change existing seeds").needs_reload_ui(), - "enable_emphasis": OptionInfo(True, "Enable emphasis").info("use (text) to make model pay more attention to text and [text] to make it pay less attention"), - "enable_batch_seeds": OptionInfo(True, "Make K-diffusion samplers produce same images in a batch as when making a single image"), - "comma_padding_backtrack": OptionInfo(20, "Prompt word wrap length limit", gr.Slider, {"minimum": 0, "maximum": 74, "step": 1}).info("in tokens - for texts shorter than specified, if they don't fit into 75 token limit, move them to the next 75 token chunk"), - "CLIP_stop_at_last_layers": OptionInfo(1, "Clip skip", gr.Slider, {"minimum": 1, "maximum": 12, "step": 1}).link("wiki", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Features#clip-skip").info("ignore last layers of CLIP network; 1 ignores none, 2 ignores one layer"), - "upcast_attn": OptionInfo(False, "Upcast cross attention layer to float32"), - "randn_source": OptionInfo("GPU", "Random number generator source.", gr.Radio, {"choices": ["GPU", "CPU", "NV"]}).info("changes seeds drastically; use CPU to produce the same picture across different videocard vendors; use NV to produce same picture as on NVidia videocards"), -})) - -options_templates.update(options_section(('sdxl', "Stable Diffusion XL"), { - "sdxl_crop_top": OptionInfo(0, "crop top coordinate"), - "sdxl_crop_left": OptionInfo(0, "crop left coordinate"), - "sdxl_refiner_low_aesthetic_score": OptionInfo(2.5, "SDXL low aesthetic score", gr.Number).info("used for refiner model negative prompt"), - "sdxl_refiner_high_aesthetic_score": OptionInfo(6.0, "SDXL high aesthetic score", gr.Number).info("used for refiner model prompt"), -})) - -options_templates.update(options_section(('vae', "VAE"), { - "sd_vae_explanation": OptionHTML(""" -VAE is a neural network that transforms a standard RGB -image into latent space representation and back. Latent space representation is what stable diffusion is working on during sampling -(i.e. when the progress bar is between empty and full). For txt2img, VAE is used to create a resulting image after the sampling is finished. -For img2img, VAE is used to process user's input image before the sampling, and to create an image after sampling. -"""), - "sd_vae_checkpoint_cache": OptionInfo(0, "VAE Checkpoints to cache in RAM", gr.Slider, {"minimum": 0, "maximum": 10, "step": 1}), - "sd_vae": OptionInfo("Automatic", "SD VAE", gr.Dropdown, lambda: {"choices": shared_items.sd_vae_items()}, refresh=shared_items.refresh_vae_list).info("choose VAE model: Automatic = use one with same filename as checkpoint; None = use VAE from checkpoint"), - "sd_vae_overrides_per_model_preferences": OptionInfo(True, "Selected VAE overrides per-model preferences").info("you can set per-model VAE either by editing user metadata for checkpoints, or by making the VAE have same name as checkpoint"), - "auto_vae_precision": OptionInfo(True, "Automatically revert VAE to 32-bit floats").info("triggers when a tensor with NaNs is produced in VAE; disabling the option in this case will result in a black square image"), - "sd_vae_encode_method": OptionInfo("Full", "VAE type for encode", gr.Radio, {"choices": ["Full", "TAESD"]}).info("method to encode image to latent (use in img2img, hires-fix or inpaint mask)"), - "sd_vae_decode_method": OptionInfo("Full", "VAE type for decode", gr.Radio, {"choices": ["Full", "TAESD"]}).info("method to decode latent to image"), -})) - -options_templates.update(options_section(('img2img', "img2img"), { - "inpainting_mask_weight": OptionInfo(1.0, "Inpainting conditioning mask strength", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}), - "initial_noise_multiplier": OptionInfo(1.0, "Noise multiplier for img2img", gr.Slider, {"minimum": 0.5, "maximum": 1.5, "step": 0.01}), - "img2img_color_correction": OptionInfo(False, "Apply color correction to img2img results to match original colors."), - "img2img_fix_steps": OptionInfo(False, "With img2img, do exactly the amount of steps the slider specifies.").info("normally you'd do less with less denoising"), - "img2img_background_color": OptionInfo("#ffffff", "With img2img, fill transparent parts of the input image with this color.", ui_components.FormColorPicker, {}), - "img2img_editor_height": OptionInfo(720, "Height of the image editor", gr.Slider, {"minimum": 80, "maximum": 1600, "step": 1}).info("in pixels").needs_reload_ui(), - "img2img_sketch_default_brush_color": OptionInfo("#ffffff", "Sketch initial brush color", ui_components.FormColorPicker, {}).info("default brush color of img2img sketch").needs_reload_ui(), - "img2img_inpaint_mask_brush_color": OptionInfo("#ffffff", "Inpaint mask brush color", ui_components.FormColorPicker, {}).info("brush color of inpaint mask").needs_reload_ui(), - "img2img_inpaint_sketch_default_brush_color": OptionInfo("#ffffff", "Inpaint sketch initial brush color", ui_components.FormColorPicker, {}).info("default brush color of img2img inpaint sketch").needs_reload_ui(), - "return_mask": OptionInfo(False, "For inpainting, include the greyscale mask in results for web"), - "return_mask_composite": OptionInfo(False, "For inpainting, include masked composite in results for web"), -})) - -options_templates.update(options_section(('optimizations', "Optimizations"), { - "cross_attention_optimization": OptionInfo("Automatic", "Cross attention optimization", gr.Dropdown, lambda: {"choices": shared_items.cross_attention_optimizations()}), - "s_min_uncond": OptionInfo(0.0, "Negative Guidance minimum sigma", gr.Slider, {"minimum": 0.0, "maximum": 15.0, "step": 0.01}).link("PR", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/9177").info("skip negative prompt for some steps when the image is almost ready; 0=disable, higher=faster"), - "token_merging_ratio": OptionInfo(0.0, "Token merging ratio", gr.Slider, {"minimum": 0.0, "maximum": 0.9, "step": 0.1}).link("PR", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/9256").info("0=disable, higher=faster"), - "token_merging_ratio_img2img": OptionInfo(0.0, "Token merging ratio for img2img", gr.Slider, {"minimum": 0.0, "maximum": 0.9, "step": 0.1}).info("only applies if non-zero and overrides above"), - "token_merging_ratio_hr": OptionInfo(0.0, "Token merging ratio for high-res pass", gr.Slider, {"minimum": 0.0, "maximum": 0.9, "step": 0.1}).info("only applies if non-zero and overrides above"), - "pad_cond_uncond": OptionInfo(False, "Pad prompt/negative prompt to be same length").info("improves performance when prompt and negative prompt have different lengths; changes seeds"), - "persistent_cond_cache": OptionInfo(True, "Persistent cond cache").info("Do not recalculate conds from prompts if prompts have not changed since previous calculation"), -})) - -options_templates.update(options_section(('compatibility', "Compatibility"), { - "use_old_emphasis_implementation": OptionInfo(False, "Use old emphasis implementation. Can be useful to reproduce old seeds."), - "use_old_karras_scheduler_sigmas": OptionInfo(False, "Use old karras scheduler sigmas (0.1 to 10)."), - "no_dpmpp_sde_batch_determinism": OptionInfo(False, "Do not make DPM++ SDE deterministic across different batch sizes."), - "use_old_hires_fix_width_height": OptionInfo(False, "For hires fix, use width/height sliders to set final resolution rather than first pass (disables Upscale by, Resize width/height to)."), - "dont_fix_second_order_samplers_schedule": OptionInfo(False, "Do not fix prompt schedule for second order samplers."), - "hires_fix_use_firstpass_conds": OptionInfo(False, "For hires fix, calculate conds of second pass using extra networks of first pass."), -})) - -options_templates.update(options_section(('interrogate', "Interrogate"), { - "interrogate_keep_models_in_memory": OptionInfo(False, "Keep models in VRAM"), - "interrogate_return_ranks": OptionInfo(False, "Include ranks of model tags matches in results.").info("booru only"), - "interrogate_clip_num_beams": OptionInfo(1, "BLIP: num_beams", gr.Slider, {"minimum": 1, "maximum": 16, "step": 1}), - "interrogate_clip_min_length": OptionInfo(24, "BLIP: minimum description length", gr.Slider, {"minimum": 1, "maximum": 128, "step": 1}), - "interrogate_clip_max_length": OptionInfo(48, "BLIP: maximum description length", gr.Slider, {"minimum": 1, "maximum": 256, "step": 1}), - "interrogate_clip_dict_limit": OptionInfo(1500, "CLIP: maximum number of lines in text file").info("0 = No limit"), - "interrogate_clip_skip_categories": OptionInfo([], "CLIP: skip inquire categories", gr.CheckboxGroup, lambda: {"choices": modules.interrogate.category_types()}, refresh=modules.interrogate.category_types), - "interrogate_deepbooru_score_threshold": OptionInfo(0.5, "deepbooru: score threshold", gr.Slider, {"minimum": 0, "maximum": 1, "step": 0.01}), - "deepbooru_sort_alpha": OptionInfo(True, "deepbooru: sort tags alphabetically").info("if not: sort by score"), - "deepbooru_use_spaces": OptionInfo(True, "deepbooru: use spaces in tags").info("if not: use underscores"), - "deepbooru_escape": OptionInfo(True, "deepbooru: escape (\\) brackets").info("so they are used as literal brackets and not for emphasis"), - "deepbooru_filter_tags": OptionInfo("", "deepbooru: filter out those tags").info("separate by comma"), -})) - -options_templates.update(options_section(('extra_networks', "Extra Networks"), { - "extra_networks_show_hidden_directories": OptionInfo(True, "Show hidden directories").info("directory is hidden if its name starts with \".\"."), - "extra_networks_hidden_models": OptionInfo("When searched", "Show cards for models in hidden directories", gr.Radio, {"choices": ["Always", "When searched", "Never"]}).info('"When searched" option will only show the item when the search string has 4 characters or more'), - "extra_networks_default_multiplier": OptionInfo(1.0, "Default multiplier for extra networks", gr.Slider, {"minimum": 0.0, "maximum": 2.0, "step": 0.01}), - "extra_networks_card_width": OptionInfo(0, "Card width for Extra Networks").info("in pixels"), - "extra_networks_card_height": OptionInfo(0, "Card height for Extra Networks").info("in pixels"), - "extra_networks_card_text_scale": OptionInfo(1.0, "Card text scale", gr.Slider, {"minimum": 0.0, "maximum": 2.0, "step": 0.01}).info("1 = original size"), - "extra_networks_card_show_desc": OptionInfo(True, "Show description on card"), - "extra_networks_add_text_separator": OptionInfo(" ", "Extra networks separator").info("extra text to add before <...> when adding extra network to prompt"), - "ui_extra_networks_tab_reorder": OptionInfo("", "Extra networks tab order").needs_reload_ui(), - "textual_inversion_print_at_load": OptionInfo(False, "Print a list of Textual Inversion embeddings when loading model"), - "textual_inversion_add_hashes_to_infotext": OptionInfo(True, "Add Textual Inversion hashes to infotext"), - "sd_hypernetwork": OptionInfo("None", "Add hypernetwork to prompt", gr.Dropdown, lambda: {"choices": ["None", *hypernetworks]}, refresh=reload_hypernetworks), -})) - -options_templates.update(options_section(('ui', "User interface"), { - "localization": OptionInfo("None", "Localization", gr.Dropdown, lambda: {"choices": ["None"] + list(localization.localizations.keys())}, refresh=lambda: localization.list_localizations(cmd_opts.localizations_dir)).needs_reload_ui(), - "gradio_theme": OptionInfo("Default", "Gradio theme", ui_components.DropdownEditable, lambda: {"choices": ["Default"] + gradio_hf_hub_themes}).info("you can also manually enter any of themes from the gallery.").needs_reload_ui(), - "gradio_themes_cache": OptionInfo(True, "Cache gradio themes locally").info("disable to update the selected Gradio theme"), - "return_grid": OptionInfo(True, "Show grid in results for web"), - "do_not_show_images": OptionInfo(False, "Do not show any images in results for web"), - "send_seed": OptionInfo(True, "Send seed when sending prompt or image to other interface"), - "send_size": OptionInfo(True, "Send size when sending prompt or image to another interface"), - "js_modal_lightbox": OptionInfo(True, "Enable full page image viewer"), - "js_modal_lightbox_initially_zoomed": OptionInfo(True, "Show images zoomed in by default in full page image viewer"), - "js_modal_lightbox_gamepad": OptionInfo(False, "Navigate image viewer with gamepad"), - "js_modal_lightbox_gamepad_repeat": OptionInfo(250, "Gamepad repeat period, in milliseconds"), - "show_progress_in_title": OptionInfo(True, "Show generation progress in window title."), - "samplers_in_dropdown": OptionInfo(True, "Use dropdown for sampler selection instead of radio group").needs_reload_ui(), - "dimensions_and_batch_together": OptionInfo(True, "Show Width/Height and Batch sliders in same row").needs_reload_ui(), - "keyedit_precision_attention": OptionInfo(0.1, "Ctrl+up/down precision when editing (attention:1.1)", gr.Slider, {"minimum": 0.01, "maximum": 0.2, "step": 0.001}), - "keyedit_precision_extra": OptionInfo(0.05, "Ctrl+up/down precision when editing ", gr.Slider, {"minimum": 0.01, "maximum": 0.2, "step": 0.001}), - "keyedit_delimiters": OptionInfo(".,\\/!?%^*;:{}=`~()", "Ctrl+up/down word delimiters"), - "keyedit_move": OptionInfo(True, "Alt+left/right moves prompt elements"), - "quicksettings_list": OptionInfo(["sd_model_checkpoint"], "Quicksettings list", ui_components.DropdownMulti, lambda: {"choices": list(opts.data_labels.keys())}).js("info", "settingsHintsShowQuicksettings").info("setting entries that appear at the top of page rather than in settings tab").needs_reload_ui(), - "ui_tab_order": OptionInfo([], "UI tab order", ui_components.DropdownMulti, lambda: {"choices": list(tab_names)}).needs_reload_ui(), - "hidden_tabs": OptionInfo([], "Hidden UI tabs", ui_components.DropdownMulti, lambda: {"choices": list(tab_names)}).needs_reload_ui(), - "ui_reorder_list": OptionInfo([], "txt2img/img2img UI item order", ui_components.DropdownMulti, lambda: {"choices": list(shared_items.ui_reorder_categories())}).info("selected items appear first").needs_reload_ui(), - "hires_fix_show_sampler": OptionInfo(False, "Hires fix: show hires checkpoint and sampler selection").needs_reload_ui(), - "hires_fix_show_prompts": OptionInfo(False, "Hires fix: show hires prompt and negative prompt").needs_reload_ui(), - "disable_token_counters": OptionInfo(False, "Disable prompt token counters").needs_reload_ui(), -})) - - -options_templates.update(options_section(('infotext', "Infotext"), { - "add_model_hash_to_info": OptionInfo(True, "Add model hash to generation information"), - "add_model_name_to_info": OptionInfo(True, "Add model name to generation information"), - "add_user_name_to_info": OptionInfo(False, "Add user name to generation information when authenticated"), - "add_version_to_infotext": OptionInfo(True, "Add program version to generation information"), - "disable_weights_auto_swap": OptionInfo(True, "Disregard checkpoint information from pasted infotext").info("when reading generation parameters from text into UI"), - "infotext_styles": OptionInfo("Apply if any", "Infer styles from prompts of pasted infotext", gr.Radio, {"choices": ["Ignore", "Apply", "Discard", "Apply if any"]}).info("when reading generation parameters from text into UI)").html("""
    -
  • Ignore: keep prompt and styles dropdown as it is.
  • -
  • Apply: remove style text from prompt, always replace styles dropdown value with found styles (even if none are found).
  • -
  • Discard: remove style text from prompt, keep styles dropdown as it is.
  • -
  • Apply if any: remove style text from prompt; if any styles are found in prompt, put them into styles dropdown, otherwise keep it as it is.
  • -
"""), - -})) - -options_templates.update(options_section(('ui', "Live previews"), { - "show_progressbar": OptionInfo(True, "Show progressbar"), - "live_previews_enable": OptionInfo(True, "Show live previews of the created image"), - "live_previews_image_format": OptionInfo("png", "Live preview file format", gr.Radio, {"choices": ["jpeg", "png", "webp"]}), - "show_progress_grid": OptionInfo(True, "Show previews of all images generated in a batch as a grid"), - "show_progress_every_n_steps": OptionInfo(10, "Live preview display period", gr.Slider, {"minimum": -1, "maximum": 32, "step": 1}).info("in sampling steps - show new live preview image every N sampling steps; -1 = only show after completion of batch"), - "show_progress_type": OptionInfo("Approx NN", "Live preview method", gr.Radio, {"choices": ["Full", "Approx NN", "Approx cheap", "TAESD"]}).info("Full = slow but pretty; Approx NN and TAESD = fast but low quality; Approx cheap = super fast but terrible otherwise"), - "live_preview_content": OptionInfo("Prompt", "Live preview subject", gr.Radio, {"choices": ["Combined", "Prompt", "Negative prompt"]}), - "live_preview_refresh_period": OptionInfo(1000, "Progressbar and preview update period").info("in milliseconds"), -})) - -options_templates.update(options_section(('sampler-params', "Sampler parameters"), { - "hide_samplers": OptionInfo([], "Hide samplers in user interface", gr.CheckboxGroup, lambda: {"choices": [x.name for x in list_samplers()]}).needs_reload_ui(), - "eta_ddim": OptionInfo(0.0, "Eta for DDIM", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}).info("noise multiplier; higher = more unperdictable results"), - "eta_ancestral": OptionInfo(1.0, "Eta for ancestral samplers", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}).info("noise multiplier; applies to Euler a and other samplers that have a in them"), - "ddim_discretize": OptionInfo('uniform', "img2img DDIM discretize", gr.Radio, {"choices": ['uniform', 'quad']}), - 's_churn': OptionInfo(0.0, "sigma churn", gr.Slider, {"minimum": 0.0, "maximum": 100.0, "step": 0.01}).info('amount of stochasticity; only applies to Euler, Heun, and DPM2'), - 's_tmin': OptionInfo(0.0, "sigma tmin", gr.Slider, {"minimum": 0.0, "maximum": 10.0, "step": 0.01}).info('enable stochasticity; start value of the sigma range; only applies to Euler, Heun, and DPM2'), - 's_tmax': OptionInfo(0.0, "sigma tmax", gr.Slider, {"minimum": 0.0, "maximum": 999.0, "step": 0.01}).info("0 = inf; end value of the sigma range; only applies to Euler, Heun, and DPM2"), - 's_noise': OptionInfo(1.0, "sigma noise", gr.Slider, {"minimum": 0.0, "maximum": 1.1, "step": 0.001}).info('amount of additional noise to counteract loss of detail during sampling; only applies to Euler, Heun, and DPM2'), - 'k_sched_type': OptionInfo("Automatic", "Scheduler type", gr.Dropdown, {"choices": ["Automatic", "karras", "exponential", "polyexponential"]}).info("lets you override the noise schedule for k-diffusion samplers; choosing Automatic disables the three parameters below"), - 'sigma_min': OptionInfo(0.0, "sigma min", gr.Number).info("0 = default (~0.03); minimum noise strength for k-diffusion noise scheduler"), - 'sigma_max': OptionInfo(0.0, "sigma max", gr.Number).info("0 = default (~14.6); maximum noise strength for k-diffusion noise scheduler"), - 'rho': OptionInfo(0.0, "rho", gr.Number).info("0 = default (7 for karras, 1 for polyexponential); higher values result in a steeper noise schedule (decreases faster)"), - 'eta_noise_seed_delta': OptionInfo(0, "Eta noise seed delta", gr.Number, {"precision": 0}).info("ENSD; does not improve anything, just produces different results for ancestral samplers - only useful for reproducing images"), - 'always_discard_next_to_last_sigma': OptionInfo(False, "Always discard next-to-last sigma").link("PR", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/6044"), - 'uni_pc_variant': OptionInfo("bh1", "UniPC variant", gr.Radio, {"choices": ["bh1", "bh2", "vary_coeff"]}), - 'uni_pc_skip_type': OptionInfo("time_uniform", "UniPC skip type", gr.Radio, {"choices": ["time_uniform", "time_quadratic", "logSNR"]}), - 'uni_pc_order': OptionInfo(3, "UniPC order", gr.Slider, {"minimum": 1, "maximum": 50, "step": 1}).info("must be < sampling steps"), - 'uni_pc_lower_order_final': OptionInfo(True, "UniPC lower order final"), -})) - -options_templates.update(options_section(('postprocessing', "Postprocessing"), { - 'postprocessing_enable_in_main_ui': OptionInfo([], "Enable postprocessing operations in txt2img and img2img tabs", ui_components.DropdownMulti, lambda: {"choices": [x.name for x in shared_items.postprocessing_scripts()]}), - 'postprocessing_operation_order': OptionInfo([], "Postprocessing operation order", ui_components.DropdownMulti, lambda: {"choices": [x.name for x in shared_items.postprocessing_scripts()]}), - 'upscaling_max_images_in_cache': OptionInfo(5, "Maximum number of images in upscaling cache", gr.Slider, {"minimum": 0, "maximum": 10, "step": 1}), -})) - -options_templates.update(options_section((None, "Hidden options"), { - "disabled_extensions": OptionInfo([], "Disable these extensions"), - "disable_all_extensions": OptionInfo("none", "Disable all extensions (preserves the list of disabled extensions)", gr.Radio, {"choices": ["none", "extra", "all"]}), - "restore_config_state_file": OptionInfo("", "Config state file to restore from, under 'config-states/' folder"), - "sd_checkpoint_hash": OptionInfo("", "SHA256 hash of the current checkpoint"), -})) - - -options_templates.update() - - -class Options: - data = None - data_labels = options_templates - typemap = {int: float} - - def __init__(self): - self.data = {k: v.default for k, v in self.data_labels.items()} - - def __setattr__(self, key, value): - if self.data is not None: - if key in self.data or key in self.data_labels: - assert not cmd_opts.freeze_settings, "changing settings is disabled" - - info = opts.data_labels.get(key, None) - if info.do_not_save: - return - - comp_args = info.component_args if info else None - if isinstance(comp_args, dict) and comp_args.get('visible', True) is False: - raise RuntimeError(f"not possible to set {key} because it is restricted") - - if cmd_opts.hide_ui_dir_config and key in restricted_opts: - raise RuntimeError(f"not possible to set {key} because it is restricted") - - self.data[key] = value - return - - return super(Options, self).__setattr__(key, value) - - def __getattr__(self, item): - if self.data is not None: - if item in self.data: - return self.data[item] - - if item in self.data_labels: - return self.data_labels[item].default - - return super(Options, self).__getattribute__(item) - - def set(self, key, value): - """sets an option and calls its onchange callback, returning True if the option changed and False otherwise""" - - oldval = self.data.get(key, None) - if oldval == value: - return False - - if self.data_labels[key].do_not_save: - return False - - try: - setattr(self, key, value) - except RuntimeError: - return False - - if self.data_labels[key].onchange is not None: - try: - self.data_labels[key].onchange() - except Exception as e: - errors.display(e, f"changing setting {key} to {value}") - setattr(self, key, oldval) - return False - - return True - - def get_default(self, key): - """returns the default value for the key""" - - data_label = self.data_labels.get(key) - if data_label is None: - return None - - return data_label.default - - def save(self, filename): - assert not cmd_opts.freeze_settings, "saving settings is disabled" - - with open(filename, "w", encoding="utf8") as file: - json.dump(self.data, file, indent=4) - - def same_type(self, x, y): - if x is None or y is None: - return True - - type_x = self.typemap.get(type(x), type(x)) - type_y = self.typemap.get(type(y), type(y)) - - return type_x == type_y - - def load(self, filename): - with open(filename, "r", encoding="utf8") as file: - self.data = json.load(file) - - # 1.6.0 VAE defaults - if self.data.get('sd_vae_as_default') is not None and self.data.get('sd_vae_overrides_per_model_preferences') is None: - self.data['sd_vae_overrides_per_model_preferences'] = not self.data.get('sd_vae_as_default') - - # 1.1.1 quicksettings list migration - if self.data.get('quicksettings') is not None and self.data.get('quicksettings_list') is None: - self.data['quicksettings_list'] = [i.strip() for i in self.data.get('quicksettings').split(',')] - - # 1.4.0 ui_reorder - if isinstance(self.data.get('ui_reorder'), str) and self.data.get('ui_reorder') and "ui_reorder_list" not in self.data: - self.data['ui_reorder_list'] = [i.strip() for i in self.data.get('ui_reorder').split(',')] - - bad_settings = 0 - for k, v in self.data.items(): - info = self.data_labels.get(k, None) - if info is not None and not self.same_type(info.default, v): - print(f"Warning: bad setting value: {k}: {v} ({type(v).__name__}; expected {type(info.default).__name__})", file=sys.stderr) - bad_settings += 1 - - if bad_settings > 0: - print(f"The program is likely to not work with bad settings.\nSettings file: {filename}\nEither fix the file, or delete it and restart.", file=sys.stderr) - - def onchange(self, key, func, call=True): - item = self.data_labels.get(key) - item.onchange = func - - if call: - func() - - def dumpjson(self): - d = {k: self.data.get(k, v.default) for k, v in self.data_labels.items()} - d["_comments_before"] = {k: v.comment_before for k, v in self.data_labels.items() if v.comment_before is not None} - d["_comments_after"] = {k: v.comment_after for k, v in self.data_labels.items() if v.comment_after is not None} - return json.dumps(d) - - def add_option(self, key, info): - self.data_labels[key] = info - - def reorder(self): - """reorder settings so that all items related to section always go together""" - - section_ids = {} - settings_items = self.data_labels.items() - for _, item in settings_items: - if item.section not in section_ids: - section_ids[item.section] = len(section_ids) - - self.data_labels = dict(sorted(settings_items, key=lambda x: section_ids[x[1].section])) - - def cast_value(self, key, value): - """casts an arbitrary to the same type as this setting's value with key - Example: cast_value("eta_noise_seed_delta", "12") -> returns 12 (an int rather than str) - """ - - if value is None: - return None - - default_value = self.data_labels[key].default - if default_value is None: - default_value = getattr(self, key, None) - if default_value is None: - return None - - expected_type = type(default_value) - if expected_type == bool and value == "False": - value = False - else: - value = expected_type(value) - - return value - - -opts = Options() -if os.path.exists(config_filename): - opts.load(config_filename) - - -class Shared(sys.modules[__name__].__class__): - """ - this class is here to provide sd_model field as a property, so that it can be created and loaded on demand rather than - at program startup. - """ - - sd_model_val = None - - @property - def sd_model(self): - import modules.sd_models - - return modules.sd_models.model_data.get_sd_model() - - @sd_model.setter - def sd_model(self, value): - import modules.sd_models - - modules.sd_models.model_data.set_sd_model(value) - - -sd_model: LatentDiffusion = None # this var is here just for IDE's type checking; it cannot be accessed because the class field above will be accessed instead -sys.modules[__name__].__class__ = Shared - -settings_components = None -"""assinged from ui.py, a mapping on setting names to gradio components repsponsible for those settings""" - -latent_upscale_default_mode = "Latent" -latent_upscale_modes = { - "Latent": {"mode": "bilinear", "antialias": False}, - "Latent (antialiased)": {"mode": "bilinear", "antialias": True}, - "Latent (bicubic)": {"mode": "bicubic", "antialias": False}, - "Latent (bicubic antialiased)": {"mode": "bicubic", "antialias": True}, - "Latent (nearest)": {"mode": "nearest", "antialias": False}, - "Latent (nearest-exact)": {"mode": "nearest-exact", "antialias": False}, -} - -sd_upscalers = [] - -clip_model = None - -progress_print_out = sys.stdout - -gradio_theme = gr.themes.Base() - - -def reload_gradio_theme(theme_name=None): - global gradio_theme - if not theme_name: - theme_name = opts.gradio_theme - - default_theme_args = dict( - font=["Source Sans Pro", 'ui-sans-serif', 'system-ui', 'sans-serif'], - font_mono=['IBM Plex Mono', 'ui-monospace', 'Consolas', 'monospace'], - ) - - if theme_name == "Default": - gradio_theme = gr.themes.Default(**default_theme_args) - else: - try: - theme_cache_dir = os.path.join(script_path, 'tmp', 'gradio_themes') - theme_cache_path = os.path.join(theme_cache_dir, f'{theme_name.replace("/", "_")}.json') - if opts.gradio_themes_cache and os.path.exists(theme_cache_path): - gradio_theme = gr.themes.ThemeClass.load(theme_cache_path) - else: - os.makedirs(theme_cache_dir, exist_ok=True) - gradio_theme = gr.themes.ThemeClass.from_hub(theme_name) - gradio_theme.dump(theme_cache_path) - except Exception as e: - errors.display(e, "changing gradio theme") - gradio_theme = gr.themes.Default(**default_theme_args) - - -class TotalTQDM: - def __init__(self): - self._tqdm = None - - def reset(self): - self._tqdm = tqdm.tqdm( - desc="Total progress", - total=state.job_count * state.sampling_steps, - position=1, - file=progress_print_out - ) - - def update(self): - if not opts.multiple_tqdm or cmd_opts.disable_console_progressbars: - return - if self._tqdm is None: - self.reset() - self._tqdm.update() - - def updateTotal(self, new_total): - if not opts.multiple_tqdm or cmd_opts.disable_console_progressbars: - return - if self._tqdm is None: - self.reset() - self._tqdm.total = new_total - - def clear(self): - if self._tqdm is not None: - self._tqdm.refresh() - self._tqdm.close() - self._tqdm = None - - -total_tqdm = TotalTQDM() - -mem_mon = modules.memmon.MemUsageMonitor("MemMon", device, opts) -mem_mon.start() - - -def natural_sort_key(s, regex=re.compile('([0-9]+)')): - return [int(text) if text.isdigit() else text.lower() for text in regex.split(s)] - - -def listfiles(dirname): - filenames = [os.path.join(dirname, x) for x in sorted(os.listdir(dirname), key=natural_sort_key) if not x.startswith(".")] - return [file for file in filenames if os.path.isfile(file)] - - -def html_path(filename): - return os.path.join(script_path, "html", filename) - - -def html(filename): - path = html_path(filename) - - if os.path.exists(path): - with open(path, encoding="utf8") as file: - return file.read() - - return "" - - -def walk_files(path, allowed_extensions=None): - if not os.path.exists(path): - return - - if allowed_extensions is not None: - allowed_extensions = set(allowed_extensions) - - items = list(os.walk(path, followlinks=True)) - items = sorted(items, key=lambda x: natural_sort_key(x[0])) - - for root, _, files in items: - for filename in sorted(files, key=natural_sort_key): - if allowed_extensions is not None: - _, ext = os.path.splitext(filename) - if ext not in allowed_extensions: - continue - - if not opts.list_hidden_files and ("/." in root or "\\." in root): - continue - - yield os.path.join(root, filename) - - -def ldm_print(*args, **kwargs): - if opts.hide_ldm_prints: - return - - print(*args, **kwargs) -- cgit v1.2.1 From 386245a26427a64f364f66f6fecd03b3bccfd7f3 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Wed, 9 Aug 2023 10:25:35 +0300 Subject: split shared.py into multiple files; should resolve all circular reference import errors related to shared.py --- modules/devices.py | 10 +- modules/extensions.py | 4 +- modules/generation_parameters_copypaste.py | 3 +- modules/images.py | 28 +- modules/localization.py | 3 +- modules/mac_specific.py | 4 +- modules/options.py | 236 +++++++ modules/rng.py | 3 +- modules/sd_models.py | 9 +- modules/sd_models_config.py | 3 +- modules/sd_vae.py | 5 +- modules/shared.py | 961 ++--------------------------- modules/shared_cmd_options.py | 18 + modules/shared_gradio_themes.py | 66 ++ modules/shared_init.py | 51 ++ modules/shared_items.py | 49 ++ modules/shared_options.py | 692 +-------------------- modules/shared_state.py | 159 +++++ modules/shared_total_tqdm.py | 37 ++ modules/sysinfo.py | 7 +- modules/ui.py | 6 +- modules/ui_common.py | 4 +- modules/util.py | 58 ++ webui.py | 11 +- 24 files changed, 762 insertions(+), 1665 deletions(-) create mode 100644 modules/options.py create mode 100644 modules/shared_cmd_options.py create mode 100644 modules/shared_gradio_themes.py create mode 100644 modules/shared_init.py create mode 100644 modules/shared_state.py create mode 100644 modules/shared_total_tqdm.py create mode 100644 modules/util.py diff --git a/modules/devices.py b/modules/devices.py index ce59dc53..c01f0602 100644 --- a/modules/devices.py +++ b/modules/devices.py @@ -3,7 +3,7 @@ import contextlib from functools import lru_cache import torch -from modules import errors +from modules import errors, shared if sys.platform == "darwin": from modules import mac_specific @@ -17,8 +17,6 @@ def has_mps() -> bool: def get_cuda_device_string(): - from modules import shared - if shared.cmd_opts.device_id is not None: return f"cuda:{shared.cmd_opts.device_id}" @@ -40,8 +38,6 @@ def get_optimal_device(): def get_device_for(task): - from modules import shared - if task in shared.cmd_opts.use_cpu: return cpu @@ -97,8 +93,6 @@ nv_rng = None def autocast(disable=False): - from modules import shared - if disable: return contextlib.nullcontext() @@ -117,8 +111,6 @@ class NansException(Exception): def test_for_nans(x, where): - from modules import shared - if shared.cmd_opts.disable_nan_check: return diff --git a/modules/extensions.py b/modules/extensions.py index e4633af4..bf9a1878 100644 --- a/modules/extensions.py +++ b/modules/extensions.py @@ -1,7 +1,7 @@ import os import threading -from modules import shared, errors, cache +from modules import shared, errors, cache, scripts from modules.gitpython_hack import Repo from modules.paths_internal import extensions_dir, extensions_builtin_dir, script_path # noqa: F401 @@ -90,8 +90,6 @@ class Extension: self.have_info_from_repo = True def list_files(self, subdir, extension): - from modules import scripts - dirpath = os.path.join(self.path, subdir) if not os.path.isdir(dirpath): return [] diff --git a/modules/generation_parameters_copypaste.py b/modules/generation_parameters_copypaste.py index 5758e6f3..d932c67d 100644 --- a/modules/generation_parameters_copypaste.py +++ b/modules/generation_parameters_copypaste.py @@ -6,7 +6,7 @@ import re import gradio as gr from modules.paths import data_path -from modules import shared, ui_tempdir, script_callbacks +from modules import shared, ui_tempdir, script_callbacks, processing from PIL import Image re_param_code = r'\s*([\w ]+):\s*("(?:\\"[^,]|\\"|\\|[^\"])+"|[^,]*)(?:,|$)' @@ -198,7 +198,6 @@ def restore_old_hires_fix_params(res): height = int(res.get("Size-2", 512)) if firstpass_width == 0 or firstpass_height == 0: - from modules import processing firstpass_width, firstpass_height = processing.old_hires_fix_first_pass_dimensions(width, height) res['Size-1'] = firstpass_width diff --git a/modules/images.py b/modules/images.py index ba3c43a4..019c1d60 100644 --- a/modules/images.py +++ b/modules/images.py @@ -21,8 +21,6 @@ from modules import sd_samplers, shared, script_callbacks, errors from modules.paths_internal import roboto_ttf_file from modules.shared import opts -import modules.sd_vae as sd_vae - LANCZOS = (Image.Resampling.LANCZOS if hasattr(Image, 'Resampling') else Image.LANCZOS) @@ -342,16 +340,6 @@ def sanitize_filename_part(text, replace_spaces=True): class FilenameGenerator: - def get_vae_filename(self): #get the name of the VAE file. - if sd_vae.loaded_vae_file is None: - return "NoneType" - file_name = os.path.basename(sd_vae.loaded_vae_file) - split_file_name = file_name.split('.') - if len(split_file_name) > 1 and split_file_name[0] == '': - return split_file_name[1] # if the first character of the filename is "." then [1] is obtained. - else: - return split_file_name[0] - replacements = { 'seed': lambda self: self.seed if self.seed is not None else '', 'seed_first': lambda self: self.seed if self.p.batch_size == 1 else self.p.all_seeds[0], @@ -391,6 +379,22 @@ class FilenameGenerator: self.image = image self.zip = zip + def get_vae_filename(self): + """Get the name of the VAE file.""" + + import modules.sd_vae as sd_vae + + if sd_vae.loaded_vae_file is None: + return "NoneType" + + file_name = os.path.basename(sd_vae.loaded_vae_file) + split_file_name = file_name.split('.') + if len(split_file_name) > 1 and split_file_name[0] == '': + return split_file_name[1] # if the first character of the filename is "." then [1] is obtained. + else: + return split_file_name[0] + + def hasprompt(self, *args): lower = self.prompt.lower() if self.p is None or self.prompt is None: diff --git a/modules/localization.py b/modules/localization.py index e8f585da..c1320288 100644 --- a/modules/localization.py +++ b/modules/localization.py @@ -1,7 +1,7 @@ import json import os -from modules import errors +from modules import errors, scripts localizations = {} @@ -16,7 +16,6 @@ def list_localizations(dirname): localizations[fn] = os.path.join(dirname, file) - from modules import scripts for file in scripts.list_scripts("localizations", ".json"): fn, ext = os.path.splitext(file.filename) localizations[fn] = file.path diff --git a/modules/mac_specific.py b/modules/mac_specific.py index 9ceb43ba..bce527cc 100644 --- a/modules/mac_specific.py +++ b/modules/mac_specific.py @@ -4,6 +4,7 @@ import torch import platform from modules.sd_hijack_utils import CondFunc from packaging import version +from modules import shared log = logging.getLogger(__name__) @@ -30,8 +31,7 @@ has_mps = check_for_mps() def torch_mps_gc() -> None: try: - from modules.shared import state - if state.current_latent is not None: + if shared.state.current_latent is not None: log.debug("`current_latent` is set, skipping MPS garbage collection") return from torch.mps import empty_cache diff --git a/modules/options.py b/modules/options.py new file mode 100644 index 00000000..59cb75ec --- /dev/null +++ b/modules/options.py @@ -0,0 +1,236 @@ +import json +import sys + +import gradio as gr + +from modules import errors +from modules.shared_cmd_options import cmd_opts + + +class OptionInfo: + def __init__(self, default=None, label="", component=None, component_args=None, onchange=None, section=None, refresh=None, comment_before='', comment_after=''): + self.default = default + self.label = label + self.component = component + self.component_args = component_args + self.onchange = onchange + self.section = section + self.refresh = refresh + self.do_not_save = False + + self.comment_before = comment_before + """HTML text that will be added after label in UI""" + + self.comment_after = comment_after + """HTML text that will be added before label in UI""" + + def link(self, label, url): + self.comment_before += f"[{label}]" + return self + + def js(self, label, js_func): + self.comment_before += f"[{label}]" + return self + + def info(self, info): + self.comment_after += f"({info})" + return self + + def html(self, html): + self.comment_after += html + return self + + def needs_restart(self): + self.comment_after += " (requires restart)" + return self + + def needs_reload_ui(self): + self.comment_after += " (requires Reload UI)" + return self + + +class OptionHTML(OptionInfo): + def __init__(self, text): + super().__init__(str(text).strip(), label='', component=lambda **kwargs: gr.HTML(elem_classes="settings-info", **kwargs)) + + self.do_not_save = True + + +def options_section(section_identifier, options_dict): + for v in options_dict.values(): + v.section = section_identifier + + return options_dict + + +options_builtin_fields = {"data_labels", "data", "restricted_opts", "typemap"} + + +class Options: + typemap = {int: float} + + def __init__(self, data_labels, restricted_opts): + self.data_labels = data_labels + self.data = {k: v.default for k, v in self.data_labels.items()} + self.restricted_opts = restricted_opts + + def __setattr__(self, key, value): + if key in options_builtin_fields: + return super(Options, self).__setattr__(key, value) + + if self.data is not None: + if key in self.data or key in self.data_labels: + assert not cmd_opts.freeze_settings, "changing settings is disabled" + + info = self.data_labels.get(key, None) + if info.do_not_save: + return + + comp_args = info.component_args if info else None + if isinstance(comp_args, dict) and comp_args.get('visible', True) is False: + raise RuntimeError(f"not possible to set {key} because it is restricted") + + if cmd_opts.hide_ui_dir_config and key in self.restricted_opts: + raise RuntimeError(f"not possible to set {key} because it is restricted") + + self.data[key] = value + return + + return super(Options, self).__setattr__(key, value) + + def __getattr__(self, item): + if item in options_builtin_fields: + return super(Options, self).__getattribute__(item) + + if self.data is not None: + if item in self.data: + return self.data[item] + + if item in self.data_labels: + return self.data_labels[item].default + + return super(Options, self).__getattribute__(item) + + def set(self, key, value): + """sets an option and calls its onchange callback, returning True if the option changed and False otherwise""" + + oldval = self.data.get(key, None) + if oldval == value: + return False + + if self.data_labels[key].do_not_save: + return False + + try: + setattr(self, key, value) + except RuntimeError: + return False + + if self.data_labels[key].onchange is not None: + try: + self.data_labels[key].onchange() + except Exception as e: + errors.display(e, f"changing setting {key} to {value}") + setattr(self, key, oldval) + return False + + return True + + def get_default(self, key): + """returns the default value for the key""" + + data_label = self.data_labels.get(key) + if data_label is None: + return None + + return data_label.default + + def save(self, filename): + assert not cmd_opts.freeze_settings, "saving settings is disabled" + + with open(filename, "w", encoding="utf8") as file: + json.dump(self.data, file, indent=4) + + def same_type(self, x, y): + if x is None or y is None: + return True + + type_x = self.typemap.get(type(x), type(x)) + type_y = self.typemap.get(type(y), type(y)) + + return type_x == type_y + + def load(self, filename): + with open(filename, "r", encoding="utf8") as file: + self.data = json.load(file) + + # 1.6.0 VAE defaults + if self.data.get('sd_vae_as_default') is not None and self.data.get('sd_vae_overrides_per_model_preferences') is None: + self.data['sd_vae_overrides_per_model_preferences'] = not self.data.get('sd_vae_as_default') + + # 1.1.1 quicksettings list migration + if self.data.get('quicksettings') is not None and self.data.get('quicksettings_list') is None: + self.data['quicksettings_list'] = [i.strip() for i in self.data.get('quicksettings').split(',')] + + # 1.4.0 ui_reorder + if isinstance(self.data.get('ui_reorder'), str) and self.data.get('ui_reorder') and "ui_reorder_list" not in self.data: + self.data['ui_reorder_list'] = [i.strip() for i in self.data.get('ui_reorder').split(',')] + + bad_settings = 0 + for k, v in self.data.items(): + info = self.data_labels.get(k, None) + if info is not None and not self.same_type(info.default, v): + print(f"Warning: bad setting value: {k}: {v} ({type(v).__name__}; expected {type(info.default).__name__})", file=sys.stderr) + bad_settings += 1 + + if bad_settings > 0: + print(f"The program is likely to not work with bad settings.\nSettings file: {filename}\nEither fix the file, or delete it and restart.", file=sys.stderr) + + def onchange(self, key, func, call=True): + item = self.data_labels.get(key) + item.onchange = func + + if call: + func() + + def dumpjson(self): + d = {k: self.data.get(k, v.default) for k, v in self.data_labels.items()} + d["_comments_before"] = {k: v.comment_before for k, v in self.data_labels.items() if v.comment_before is not None} + d["_comments_after"] = {k: v.comment_after for k, v in self.data_labels.items() if v.comment_after is not None} + return json.dumps(d) + + def add_option(self, key, info): + self.data_labels[key] = info + + def reorder(self): + """reorder settings so that all items related to section always go together""" + + section_ids = {} + settings_items = self.data_labels.items() + for _, item in settings_items: + if item.section not in section_ids: + section_ids[item.section] = len(section_ids) + + self.data_labels = dict(sorted(settings_items, key=lambda x: section_ids[x[1].section])) + + def cast_value(self, key, value): + """casts an arbitrary to the same type as this setting's value with key + Example: cast_value("eta_noise_seed_delta", "12") -> returns 12 (an int rather than str) + """ + + if value is None: + return None + + default_value = self.data_labels[key].default + if default_value is None: + default_value = getattr(self, key, None) + if default_value is None: + return None + + expected_type = type(default_value) + if expected_type == bool and value == "False": + value = False + else: + value = expected_type(value) + + return value diff --git a/modules/rng.py b/modules/rng.py index 2d7baea5..f927a318 100644 --- a/modules/rng.py +++ b/modules/rng.py @@ -63,9 +63,8 @@ def randn_without_seed(shape, generator=None): def manual_seed(seed): """Set up a global random number generator using the specified seed.""" - from modules.shared import opts - if opts.randn_source == "NV": + if shared.opts.randn_source == "NV": global nv_rng nv_rng = rng_philox.Generator(seed) return diff --git a/modules/sd_models.py b/modules/sd_models.py index 53c1df54..88a09899 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -14,7 +14,7 @@ import ldm.modules.midas as midas from ldm.util import instantiate_from_config -from modules import paths, shared, modelloader, devices, script_callbacks, sd_vae, sd_disable_initialization, errors, hashes, sd_models_config, sd_unet, sd_models_xl, cache +from modules import paths, shared, modelloader, devices, script_callbacks, sd_vae, sd_disable_initialization, errors, hashes, sd_models_config, sd_unet, sd_models_xl, cache, extra_networks, processing, lowvram, sd_hijack from modules.timer import Timer import tomesd @@ -473,7 +473,6 @@ model_data = SdModelData() def get_empty_cond(sd_model): - from modules import extra_networks, processing p = processing.StableDiffusionProcessingTxt2Img() extra_networks.activate(p, {}) @@ -486,8 +485,6 @@ def get_empty_cond(sd_model): def send_model_to_cpu(m): - from modules import lowvram - if shared.cmd_opts.lowvram or shared.cmd_opts.medvram: lowvram.send_everything_to_cpu() else: @@ -497,8 +494,6 @@ def send_model_to_cpu(m): def send_model_to_device(m): - from modules import lowvram - if shared.cmd_opts.lowvram or shared.cmd_opts.medvram: lowvram.setup_for_low_vram(m, shared.cmd_opts.medvram) else: @@ -642,7 +637,6 @@ def reuse_model_from_already_loaded(sd_model, checkpoint_info, timer): def reload_model_weights(sd_model=None, info=None): - from modules import devices, sd_hijack checkpoint_info = info or select_checkpoint() timer = Timer() @@ -705,7 +699,6 @@ def reload_model_weights(sd_model=None, info=None): def unload_model_weights(sd_model=None, info=None): - from modules import devices, sd_hijack timer = Timer() if model_data.sd_model: diff --git a/modules/sd_models_config.py b/modules/sd_models_config.py index 8266fa39..08dd03f1 100644 --- a/modules/sd_models_config.py +++ b/modules/sd_models_config.py @@ -2,7 +2,7 @@ import os import torch -from modules import shared, paths, sd_disable_initialization +from modules import shared, paths, sd_disable_initialization, devices sd_configs_path = shared.sd_configs_path sd_repo_configs_path = os.path.join(paths.paths['Stable Diffusion'], "configs", "stable-diffusion") @@ -29,7 +29,6 @@ def is_using_v_parameterization_for_sd2(state_dict): """ import ldm.modules.diffusionmodules.openaimodel - from modules import devices device = devices.cpu diff --git a/modules/sd_vae.py b/modules/sd_vae.py index 38bcb840..5ac1ac31 100644 --- a/modules/sd_vae.py +++ b/modules/sd_vae.py @@ -2,7 +2,8 @@ import os import collections from dataclasses import dataclass -from modules import paths, shared, devices, script_callbacks, sd_models, extra_networks +from modules import paths, shared, devices, script_callbacks, sd_models, extra_networks, lowvram, sd_hijack + import glob from copy import deepcopy @@ -231,8 +232,6 @@ unspecified = object() def reload_vae_weights(sd_model=None, vae_file=unspecified): - from modules import lowvram, devices, sd_hijack - if not sd_model: sd_model = shared.sd_model diff --git a/modules/shared.py b/modules/shared.py index e9b980a4..8ba72f49 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -1,843 +1,51 @@ -import datetime -import json -import os -import re import sys -import threading -import time -import logging import gradio as gr -import torch -import tqdm -import launch -import modules.interrogate -import modules.memmon -import modules.styles -import modules.devices as devices -from modules import localization, script_loading, errors, ui_components, shared_items, cmd_args, rng # noqa: F401 +from modules import shared_cmd_options, shared_gradio_themes, options, shared_items from modules.paths_internal import models_path, script_path, data_path, sd_configs_path, sd_default_config, sd_model_file, default_sd_model_file, extensions_dir, extensions_builtin_dir # noqa: F401 from ldm.models.diffusion.ddpm import LatentDiffusion -from typing import Optional +from modules import util -log = logging.getLogger(__name__) - -demo = None - -parser = cmd_args.parser - -script_loading.preload_extensions(extensions_dir, parser, extension_list=launch.list_extensions(launch.args.ui_settings_file)) -script_loading.preload_extensions(extensions_builtin_dir, parser) - -if os.environ.get('IGNORE_CMD_ARGS_ERRORS', None) is None: - cmd_opts = parser.parse_args() -else: - cmd_opts, _ = parser.parse_known_args() - - -restricted_opts = { - "samples_filename_pattern", - "directories_filename_pattern", - "outdir_samples", - "outdir_txt2img_samples", - "outdir_img2img_samples", - "outdir_extras_samples", - "outdir_grids", - "outdir_txt2img_grids", - "outdir_save", - "outdir_init_images" -} - -# https://huggingface.co/datasets/freddyaboulton/gradio-theme-subdomains/resolve/main/subdomains.json -gradio_hf_hub_themes = [ - "gradio/base", - "gradio/glass", - "gradio/monochrome", - "gradio/seafoam", - "gradio/soft", - "gradio/dracula_test", - "abidlabs/dracula_test", - "abidlabs/Lime", - "abidlabs/pakistan", - "Ama434/neutral-barlow", - "dawood/microsoft_windows", - "finlaymacklon/smooth_slate", - "Franklisi/darkmode", - "freddyaboulton/dracula_revamped", - "freddyaboulton/test-blue", - "gstaff/xkcd", - "Insuz/Mocha", - "Insuz/SimpleIndigo", - "JohnSmith9982/small_and_pretty", - "nota-ai/theme", - "nuttea/Softblue", - "ParityError/Anime", - "reilnuud/polite", - "remilia/Ghostly", - "rottenlittlecreature/Moon_Goblin", - "step-3-profit/Midnight-Deep", - "Taithrah/Minimal", - "ysharma/huggingface", - "ysharma/steampunk" -] - - -cmd_opts.disable_extension_access = (cmd_opts.share or cmd_opts.listen or cmd_opts.server_name) and not cmd_opts.enable_insecure_extension_access - -devices.device, devices.device_interrogate, devices.device_gfpgan, devices.device_esrgan, devices.device_codeformer = \ - (devices.cpu if any(y in cmd_opts.use_cpu for y in [x, 'all']) else devices.get_optimal_device() for x in ['sd', 'interrogate', 'gfpgan', 'esrgan', 'codeformer']) - -devices.dtype = torch.float32 if cmd_opts.no_half else torch.float16 -devices.dtype_vae = torch.float32 if cmd_opts.no_half or cmd_opts.no_half_vae else torch.float16 - -device = devices.device -weight_load_location = None if cmd_opts.lowram else "cpu" +cmd_opts = shared_cmd_options.cmd_opts +parser = shared_cmd_options.parser batch_cond_uncond = cmd_opts.always_batch_cond_uncond or not (cmd_opts.lowvram or cmd_opts.medvram) parallel_processing_allowed = not cmd_opts.lowvram and not cmd_opts.medvram -xformers_available = False -config_filename = cmd_opts.ui_settings_file - -os.makedirs(cmd_opts.hypernetwork_dir, exist_ok=True) -hypernetworks = {} -loaded_hypernetworks = [] - - -def reload_hypernetworks(): - from modules.hypernetworks import hypernetwork - global hypernetworks - - hypernetworks = hypernetwork.list_hypernetworks(cmd_opts.hypernetwork_dir) - - -class State: - skipped = False - interrupted = False - job = "" - job_no = 0 - job_count = 0 - processing_has_refined_job_count = False - job_timestamp = '0' - sampling_step = 0 - sampling_steps = 0 - current_latent = None - current_image = None - current_image_sampling_step = 0 - id_live_preview = 0 - textinfo = None - time_start = None - server_start = None - _server_command_signal = threading.Event() - _server_command: Optional[str] = None - - @property - def need_restart(self) -> bool: - # Compatibility getter for need_restart. - return self.server_command == "restart" - - @need_restart.setter - def need_restart(self, value: bool) -> None: - # Compatibility setter for need_restart. - if value: - self.server_command = "restart" - - @property - def server_command(self): - return self._server_command - - @server_command.setter - def server_command(self, value: Optional[str]) -> None: - """ - Set the server command to `value` and signal that it's been set. - """ - self._server_command = value - self._server_command_signal.set() - - def wait_for_server_command(self, timeout: Optional[float] = None) -> Optional[str]: - """ - Wait for server command to get set; return and clear the value and signal. - """ - if self._server_command_signal.wait(timeout): - self._server_command_signal.clear() - req = self._server_command - self._server_command = None - return req - return None - - def request_restart(self) -> None: - self.interrupt() - self.server_command = "restart" - log.info("Received restart request") - - def skip(self): - self.skipped = True - log.info("Received skip request") - - def interrupt(self): - self.interrupted = True - log.info("Received interrupt request") - - def nextjob(self): - if opts.live_previews_enable and opts.show_progress_every_n_steps == -1: - self.do_set_current_image() - - self.job_no += 1 - self.sampling_step = 0 - self.current_image_sampling_step = 0 - - def dict(self): - obj = { - "skipped": self.skipped, - "interrupted": self.interrupted, - "job": self.job, - "job_count": self.job_count, - "job_timestamp": self.job_timestamp, - "job_no": self.job_no, - "sampling_step": self.sampling_step, - "sampling_steps": self.sampling_steps, - } - - return obj - - def begin(self, job: str = "(unknown)"): - self.sampling_step = 0 - self.job_count = -1 - self.processing_has_refined_job_count = False - self.job_no = 0 - self.job_timestamp = datetime.datetime.now().strftime("%Y%m%d%H%M%S") - self.current_latent = None - self.current_image = None - self.current_image_sampling_step = 0 - self.id_live_preview = 0 - self.skipped = False - self.interrupted = False - self.textinfo = None - self.time_start = time.time() - self.job = job - devices.torch_gc() - log.info("Starting job %s", job) - - def end(self): - duration = time.time() - self.time_start - log.info("Ending job %s (%.2f seconds)", self.job, duration) - self.job = "" - self.job_count = 0 - - devices.torch_gc() - - def set_current_image(self): - """sets self.current_image from self.current_latent if enough sampling steps have been made after the last call to this""" - if not parallel_processing_allowed: - return - - if self.sampling_step - self.current_image_sampling_step >= opts.show_progress_every_n_steps and opts.live_previews_enable and opts.show_progress_every_n_steps != -1: - self.do_set_current_image() - - def do_set_current_image(self): - if self.current_latent is None: - return - - import modules.sd_samplers - - try: - if opts.show_progress_grid: - self.assign_current_image(modules.sd_samplers.samples_to_image_grid(self.current_latent)) - else: - self.assign_current_image(modules.sd_samplers.sample_to_image(self.current_latent)) - - self.current_image_sampling_step = self.sampling_step - - except Exception: - # when switching models during genration, VAE would be on CPU, so creating an image will fail. - # we silently ignore this error - errors.record_exception() - - def assign_current_image(self, image): - self.current_image = image - self.id_live_preview += 1 - - -state = State() -state.server_start = time.time() - styles_filename = cmd_opts.styles_file -prompt_styles = modules.styles.StyleDatabase(styles_filename) - -interrogator = modules.interrogate.InterrogateModels("interrogate") - -face_restorers = [] - - -class OptionInfo: - def __init__(self, default=None, label="", component=None, component_args=None, onchange=None, section=None, refresh=None, comment_before='', comment_after=''): - self.default = default - self.label = label - self.component = component - self.component_args = component_args - self.onchange = onchange - self.section = section - self.refresh = refresh - self.do_not_save = False - - self.comment_before = comment_before - """HTML text that will be added after label in UI""" - - self.comment_after = comment_after - """HTML text that will be added before label in UI""" - - def link(self, label, url): - self.comment_before += f"[{label}]" - return self - - def js(self, label, js_func): - self.comment_before += f"[{label}]" - return self - - def info(self, info): - self.comment_after += f"({info})" - return self - - def html(self, html): - self.comment_after += html - return self - - def needs_restart(self): - self.comment_after += " (requires restart)" - return self - - def needs_reload_ui(self): - self.comment_after += " (requires Reload UI)" - return self - - -class OptionHTML(OptionInfo): - def __init__(self, text): - super().__init__(str(text).strip(), label='', component=lambda **kwargs: gr.HTML(elem_classes="settings-info", **kwargs)) - - self.do_not_save = True - - -def options_section(section_identifier, options_dict): - for v in options_dict.values(): - v.section = section_identifier - - return options_dict - - -def list_checkpoint_tiles(): - import modules.sd_models - return modules.sd_models.checkpoint_tiles() - - -def refresh_checkpoints(): - import modules.sd_models - return modules.sd_models.list_models() - - -def list_samplers(): - import modules.sd_samplers - return modules.sd_samplers.all_samplers - - +config_filename = cmd_opts.ui_settings_file hide_dirs = {"visible": not cmd_opts.hide_ui_dir_config} -tab_names = [] - -options_templates = {} - -options_templates.update(options_section(('saving-images', "Saving images/grids"), { - "samples_save": OptionInfo(True, "Always save all generated images"), - "samples_format": OptionInfo('png', 'File format for images'), - "samples_filename_pattern": OptionInfo("", "Images filename pattern", component_args=hide_dirs).link("wiki", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Custom-Images-Filename-Name-and-Subdirectory"), - "save_images_add_number": OptionInfo(True, "Add number to filename when saving", component_args=hide_dirs), - - "grid_save": OptionInfo(True, "Always save all generated image grids"), - "grid_format": OptionInfo('png', 'File format for grids'), - "grid_extended_filename": OptionInfo(False, "Add extended info (seed, prompt) to filename when saving grid"), - "grid_only_if_multiple": OptionInfo(True, "Do not save grids consisting of one picture"), - "grid_prevent_empty_spots": OptionInfo(False, "Prevent empty spots in grid (when set to autodetect)"), - "grid_zip_filename_pattern": OptionInfo("", "Archive filename pattern", component_args=hide_dirs).link("wiki", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Custom-Images-Filename-Name-and-Subdirectory"), - "n_rows": OptionInfo(-1, "Grid row count; use -1 for autodetect and 0 for it to be same as batch size", gr.Slider, {"minimum": -1, "maximum": 16, "step": 1}), - "font": OptionInfo("", "Font for image grids that have text"), - "grid_text_active_color": OptionInfo("#000000", "Text color for image grids", ui_components.FormColorPicker, {}), - "grid_text_inactive_color": OptionInfo("#999999", "Inactive text color for image grids", ui_components.FormColorPicker, {}), - "grid_background_color": OptionInfo("#ffffff", "Background color for image grids", ui_components.FormColorPicker, {}), - - "enable_pnginfo": OptionInfo(True, "Save text information about generation parameters as chunks to png files"), - "save_txt": OptionInfo(False, "Create a text file next to every image with generation parameters."), - "save_images_before_face_restoration": OptionInfo(False, "Save a copy of image before doing face restoration."), - "save_images_before_highres_fix": OptionInfo(False, "Save a copy of image before applying highres fix."), - "save_images_before_color_correction": OptionInfo(False, "Save a copy of image before applying color correction to img2img results"), - "save_mask": OptionInfo(False, "For inpainting, save a copy of the greyscale mask"), - "save_mask_composite": OptionInfo(False, "For inpainting, save a masked composite"), - "jpeg_quality": OptionInfo(80, "Quality for saved jpeg images", gr.Slider, {"minimum": 1, "maximum": 100, "step": 1}), - "webp_lossless": OptionInfo(False, "Use lossless compression for webp images"), - "export_for_4chan": OptionInfo(True, "Save copy of large images as JPG").info("if the file size is above the limit, or either width or height are above the limit"), - "img_downscale_threshold": OptionInfo(4.0, "File size limit for the above option, MB", gr.Number), - "target_side_length": OptionInfo(4000, "Width/height limit for the above option, in pixels", gr.Number), - "img_max_size_mp": OptionInfo(200, "Maximum image size", gr.Number).info("in megapixels"), - - "use_original_name_batch": OptionInfo(True, "Use original name for output filename during batch process in extras tab"), - "use_upscaler_name_as_suffix": OptionInfo(False, "Use upscaler name as filename suffix in the extras tab"), - "save_selected_only": OptionInfo(True, "When using 'Save' button, only save a single selected image"), - "save_init_img": OptionInfo(False, "Save init images when using img2img"), - - "temp_dir": OptionInfo("", "Directory for temporary images; leave empty for default"), - "clean_temp_dir_at_start": OptionInfo(False, "Cleanup non-default temporary directory when starting webui"), - - "save_incomplete_images": OptionInfo(False, "Save incomplete images").info("save images that has been interrupted in mid-generation; even if not saved, they will still show up in webui output."), -})) - -options_templates.update(options_section(('saving-paths', "Paths for saving"), { - "outdir_samples": OptionInfo("", "Output directory for images; if empty, defaults to three directories below", component_args=hide_dirs), - "outdir_txt2img_samples": OptionInfo("outputs/txt2img-images", 'Output directory for txt2img images', component_args=hide_dirs), - "outdir_img2img_samples": OptionInfo("outputs/img2img-images", 'Output directory for img2img images', component_args=hide_dirs), - "outdir_extras_samples": OptionInfo("outputs/extras-images", 'Output directory for images from extras tab', component_args=hide_dirs), - "outdir_grids": OptionInfo("", "Output directory for grids; if empty, defaults to two directories below", component_args=hide_dirs), - "outdir_txt2img_grids": OptionInfo("outputs/txt2img-grids", 'Output directory for txt2img grids', component_args=hide_dirs), - "outdir_img2img_grids": OptionInfo("outputs/img2img-grids", 'Output directory for img2img grids', component_args=hide_dirs), - "outdir_save": OptionInfo("log/images", "Directory for saving images using the Save button", component_args=hide_dirs), - "outdir_init_images": OptionInfo("outputs/init-images", "Directory for saving init images when using img2img", component_args=hide_dirs), -})) - -options_templates.update(options_section(('saving-to-dirs', "Saving to a directory"), { - "save_to_dirs": OptionInfo(True, "Save images to a subdirectory"), - "grid_save_to_dirs": OptionInfo(True, "Save grids to a subdirectory"), - "use_save_to_dirs_for_ui": OptionInfo(False, "When using \"Save\" button, save images to a subdirectory"), - "directories_filename_pattern": OptionInfo("[date]", "Directory name pattern", component_args=hide_dirs).link("wiki", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Custom-Images-Filename-Name-and-Subdirectory"), - "directories_max_prompt_words": OptionInfo(8, "Max prompt words for [prompt_words] pattern", gr.Slider, {"minimum": 1, "maximum": 20, "step": 1, **hide_dirs}), -})) - -options_templates.update(options_section(('upscaling', "Upscaling"), { - "ESRGAN_tile": OptionInfo(192, "Tile size for ESRGAN upscalers.", gr.Slider, {"minimum": 0, "maximum": 512, "step": 16}).info("0 = no tiling"), - "ESRGAN_tile_overlap": OptionInfo(8, "Tile overlap for ESRGAN upscalers.", gr.Slider, {"minimum": 0, "maximum": 48, "step": 1}).info("Low values = visible seam"), - "realesrgan_enabled_models": OptionInfo(["R-ESRGAN 4x+", "R-ESRGAN 4x+ Anime6B"], "Select which Real-ESRGAN models to show in the web UI.", gr.CheckboxGroup, lambda: {"choices": shared_items.realesrgan_models_names()}), - "upscaler_for_img2img": OptionInfo(None, "Upscaler for img2img", gr.Dropdown, lambda: {"choices": [x.name for x in sd_upscalers]}), -})) - -options_templates.update(options_section(('face-restoration', "Face restoration"), { - "face_restoration_model": OptionInfo("CodeFormer", "Face restoration model", gr.Radio, lambda: {"choices": [x.name() for x in face_restorers]}), - "code_former_weight": OptionInfo(0.5, "CodeFormer weight", gr.Slider, {"minimum": 0, "maximum": 1, "step": 0.01}).info("0 = maximum effect; 1 = minimum effect"), - "face_restoration_unload": OptionInfo(False, "Move face restoration model from VRAM into RAM after processing"), -})) - -options_templates.update(options_section(('system', "System"), { - "auto_launch_browser": OptionInfo("Local", "Automatically open webui in browser on startup", gr.Radio, lambda: {"choices": ["Disable", "Local", "Remote"]}), - "show_warnings": OptionInfo(False, "Show warnings in console.").needs_reload_ui(), - "show_gradio_deprecation_warnings": OptionInfo(True, "Show gradio deprecation warnings in console.").needs_reload_ui(), - "memmon_poll_rate": OptionInfo(8, "VRAM usage polls per second during generation.", gr.Slider, {"minimum": 0, "maximum": 40, "step": 1}).info("0 = disable"), - "samples_log_stdout": OptionInfo(False, "Always print all generation info to standard output"), - "multiple_tqdm": OptionInfo(True, "Add a second progress bar to the console that shows progress for an entire job."), - "print_hypernet_extra": OptionInfo(False, "Print extra hypernetwork information to console."), - "list_hidden_files": OptionInfo(True, "Load models/files in hidden directories").info("directory is hidden if its name starts with \".\""), - "disable_mmap_load_safetensors": OptionInfo(False, "Disable memmapping for loading .safetensors files.").info("fixes very slow loading speed in some cases"), - "hide_ldm_prints": OptionInfo(True, "Prevent Stability-AI's ldm/sgm modules from printing noise to console."), -})) - -options_templates.update(options_section(('training', "Training"), { - "unload_models_when_training": OptionInfo(False, "Move VAE and CLIP to RAM when training if possible. Saves VRAM."), - "pin_memory": OptionInfo(False, "Turn on pin_memory for DataLoader. Makes training slightly faster but can increase memory usage."), - "save_optimizer_state": OptionInfo(False, "Saves Optimizer state as separate *.optim file. Training of embedding or HN can be resumed with the matching optim file."), - "save_training_settings_to_txt": OptionInfo(True, "Save textual inversion and hypernet settings to a text file whenever training starts."), - "dataset_filename_word_regex": OptionInfo("", "Filename word regex"), - "dataset_filename_join_string": OptionInfo(" ", "Filename join string"), - "training_image_repeats_per_epoch": OptionInfo(1, "Number of repeats for a single input image per epoch; used only for displaying epoch number", gr.Number, {"precision": 0}), - "training_write_csv_every": OptionInfo(500, "Save an csv containing the loss to log directory every N steps, 0 to disable"), - "training_xattention_optimizations": OptionInfo(False, "Use cross attention optimizations while training"), - "training_enable_tensorboard": OptionInfo(False, "Enable tensorboard logging."), - "training_tensorboard_save_images": OptionInfo(False, "Save generated images within tensorboard."), - "training_tensorboard_flush_every": OptionInfo(120, "How often, in seconds, to flush the pending tensorboard events and summaries to disk."), -})) - -options_templates.update(options_section(('sd', "Stable Diffusion"), { - "sd_model_checkpoint": OptionInfo(None, "Stable Diffusion checkpoint", gr.Dropdown, lambda: {"choices": list_checkpoint_tiles()}, refresh=refresh_checkpoints), - "sd_checkpoints_limit": OptionInfo(1, "Maximum number of checkpoints loaded at the same time", gr.Slider, {"minimum": 1, "maximum": 10, "step": 1}), - "sd_checkpoints_keep_in_cpu": OptionInfo(True, "Only keep one model on device").info("will keep models other than the currently used one in RAM rather than VRAM"), - "sd_checkpoint_cache": OptionInfo(0, "Checkpoints to cache in RAM", gr.Slider, {"minimum": 0, "maximum": 10, "step": 1}).info("obsolete; set to 0 and use the two settings above instead"), - "sd_unet": OptionInfo("Automatic", "SD Unet", gr.Dropdown, lambda: {"choices": shared_items.sd_unet_items()}, refresh=shared_items.refresh_unet_list).info("choose Unet model: Automatic = use one with same filename as checkpoint; None = use Unet from checkpoint"), - "enable_quantization": OptionInfo(False, "Enable quantization in K samplers for sharper and cleaner results. This may change existing seeds").needs_reload_ui(), - "enable_emphasis": OptionInfo(True, "Enable emphasis").info("use (text) to make model pay more attention to text and [text] to make it pay less attention"), - "enable_batch_seeds": OptionInfo(True, "Make K-diffusion samplers produce same images in a batch as when making a single image"), - "comma_padding_backtrack": OptionInfo(20, "Prompt word wrap length limit", gr.Slider, {"minimum": 0, "maximum": 74, "step": 1}).info("in tokens - for texts shorter than specified, if they don't fit into 75 token limit, move them to the next 75 token chunk"), - "CLIP_stop_at_last_layers": OptionInfo(1, "Clip skip", gr.Slider, {"minimum": 1, "maximum": 12, "step": 1}).link("wiki", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Features#clip-skip").info("ignore last layers of CLIP network; 1 ignores none, 2 ignores one layer"), - "upcast_attn": OptionInfo(False, "Upcast cross attention layer to float32"), - "randn_source": OptionInfo("GPU", "Random number generator source.", gr.Radio, {"choices": ["GPU", "CPU", "NV"]}).info("changes seeds drastically; use CPU to produce the same picture across different videocard vendors; use NV to produce same picture as on NVidia videocards"), -})) - -options_templates.update(options_section(('sdxl', "Stable Diffusion XL"), { - "sdxl_crop_top": OptionInfo(0, "crop top coordinate"), - "sdxl_crop_left": OptionInfo(0, "crop left coordinate"), - "sdxl_refiner_low_aesthetic_score": OptionInfo(2.5, "SDXL low aesthetic score", gr.Number).info("used for refiner model negative prompt"), - "sdxl_refiner_high_aesthetic_score": OptionInfo(6.0, "SDXL high aesthetic score", gr.Number).info("used for refiner model prompt"), -})) -options_templates.update(options_section(('vae', "VAE"), { - "sd_vae_explanation": OptionHTML(""" -VAE is a neural network that transforms a standard RGB -image into latent space representation and back. Latent space representation is what stable diffusion is working on during sampling -(i.e. when the progress bar is between empty and full). For txt2img, VAE is used to create a resulting image after the sampling is finished. -For img2img, VAE is used to process user's input image before the sampling, and to create an image after sampling. -"""), - "sd_vae_checkpoint_cache": OptionInfo(0, "VAE Checkpoints to cache in RAM", gr.Slider, {"minimum": 0, "maximum": 10, "step": 1}), - "sd_vae": OptionInfo("Automatic", "SD VAE", gr.Dropdown, lambda: {"choices": shared_items.sd_vae_items()}, refresh=shared_items.refresh_vae_list).info("choose VAE model: Automatic = use one with same filename as checkpoint; None = use VAE from checkpoint"), - "sd_vae_overrides_per_model_preferences": OptionInfo(True, "Selected VAE overrides per-model preferences").info("you can set per-model VAE either by editing user metadata for checkpoints, or by making the VAE have same name as checkpoint"), - "auto_vae_precision": OptionInfo(True, "Automatically revert VAE to 32-bit floats").info("triggers when a tensor with NaNs is produced in VAE; disabling the option in this case will result in a black square image"), - "sd_vae_encode_method": OptionInfo("Full", "VAE type for encode", gr.Radio, {"choices": ["Full", "TAESD"]}).info("method to encode image to latent (use in img2img, hires-fix or inpaint mask)"), - "sd_vae_decode_method": OptionInfo("Full", "VAE type for decode", gr.Radio, {"choices": ["Full", "TAESD"]}).info("method to decode latent to image"), -})) - -options_templates.update(options_section(('img2img', "img2img"), { - "inpainting_mask_weight": OptionInfo(1.0, "Inpainting conditioning mask strength", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}), - "initial_noise_multiplier": OptionInfo(1.0, "Noise multiplier for img2img", gr.Slider, {"minimum": 0.5, "maximum": 1.5, "step": 0.01}), - "img2img_color_correction": OptionInfo(False, "Apply color correction to img2img results to match original colors."), - "img2img_fix_steps": OptionInfo(False, "With img2img, do exactly the amount of steps the slider specifies.").info("normally you'd do less with less denoising"), - "img2img_background_color": OptionInfo("#ffffff", "With img2img, fill transparent parts of the input image with this color.", ui_components.FormColorPicker, {}), - "img2img_editor_height": OptionInfo(720, "Height of the image editor", gr.Slider, {"minimum": 80, "maximum": 1600, "step": 1}).info("in pixels").needs_reload_ui(), - "img2img_sketch_default_brush_color": OptionInfo("#ffffff", "Sketch initial brush color", ui_components.FormColorPicker, {}).info("default brush color of img2img sketch").needs_reload_ui(), - "img2img_inpaint_mask_brush_color": OptionInfo("#ffffff", "Inpaint mask brush color", ui_components.FormColorPicker, {}).info("brush color of inpaint mask").needs_reload_ui(), - "img2img_inpaint_sketch_default_brush_color": OptionInfo("#ffffff", "Inpaint sketch initial brush color", ui_components.FormColorPicker, {}).info("default brush color of img2img inpaint sketch").needs_reload_ui(), - "return_mask": OptionInfo(False, "For inpainting, include the greyscale mask in results for web"), - "return_mask_composite": OptionInfo(False, "For inpainting, include masked composite in results for web"), -})) - -options_templates.update(options_section(('optimizations', "Optimizations"), { - "cross_attention_optimization": OptionInfo("Automatic", "Cross attention optimization", gr.Dropdown, lambda: {"choices": shared_items.cross_attention_optimizations()}), - "s_min_uncond": OptionInfo(0.0, "Negative Guidance minimum sigma", gr.Slider, {"minimum": 0.0, "maximum": 15.0, "step": 0.01}).link("PR", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/9177").info("skip negative prompt for some steps when the image is almost ready; 0=disable, higher=faster"), - "token_merging_ratio": OptionInfo(0.0, "Token merging ratio", gr.Slider, {"minimum": 0.0, "maximum": 0.9, "step": 0.1}).link("PR", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/9256").info("0=disable, higher=faster"), - "token_merging_ratio_img2img": OptionInfo(0.0, "Token merging ratio for img2img", gr.Slider, {"minimum": 0.0, "maximum": 0.9, "step": 0.1}).info("only applies if non-zero and overrides above"), - "token_merging_ratio_hr": OptionInfo(0.0, "Token merging ratio for high-res pass", gr.Slider, {"minimum": 0.0, "maximum": 0.9, "step": 0.1}).info("only applies if non-zero and overrides above"), - "pad_cond_uncond": OptionInfo(False, "Pad prompt/negative prompt to be same length").info("improves performance when prompt and negative prompt have different lengths; changes seeds"), - "persistent_cond_cache": OptionInfo(True, "Persistent cond cache").info("Do not recalculate conds from prompts if prompts have not changed since previous calculation"), -})) - -options_templates.update(options_section(('compatibility', "Compatibility"), { - "use_old_emphasis_implementation": OptionInfo(False, "Use old emphasis implementation. Can be useful to reproduce old seeds."), - "use_old_karras_scheduler_sigmas": OptionInfo(False, "Use old karras scheduler sigmas (0.1 to 10)."), - "no_dpmpp_sde_batch_determinism": OptionInfo(False, "Do not make DPM++ SDE deterministic across different batch sizes."), - "use_old_hires_fix_width_height": OptionInfo(False, "For hires fix, use width/height sliders to set final resolution rather than first pass (disables Upscale by, Resize width/height to)."), - "dont_fix_second_order_samplers_schedule": OptionInfo(False, "Do not fix prompt schedule for second order samplers."), - "hires_fix_use_firstpass_conds": OptionInfo(False, "For hires fix, calculate conds of second pass using extra networks of first pass."), -})) - -options_templates.update(options_section(('interrogate', "Interrogate"), { - "interrogate_keep_models_in_memory": OptionInfo(False, "Keep models in VRAM"), - "interrogate_return_ranks": OptionInfo(False, "Include ranks of model tags matches in results.").info("booru only"), - "interrogate_clip_num_beams": OptionInfo(1, "BLIP: num_beams", gr.Slider, {"minimum": 1, "maximum": 16, "step": 1}), - "interrogate_clip_min_length": OptionInfo(24, "BLIP: minimum description length", gr.Slider, {"minimum": 1, "maximum": 128, "step": 1}), - "interrogate_clip_max_length": OptionInfo(48, "BLIP: maximum description length", gr.Slider, {"minimum": 1, "maximum": 256, "step": 1}), - "interrogate_clip_dict_limit": OptionInfo(1500, "CLIP: maximum number of lines in text file").info("0 = No limit"), - "interrogate_clip_skip_categories": OptionInfo([], "CLIP: skip inquire categories", gr.CheckboxGroup, lambda: {"choices": modules.interrogate.category_types()}, refresh=modules.interrogate.category_types), - "interrogate_deepbooru_score_threshold": OptionInfo(0.5, "deepbooru: score threshold", gr.Slider, {"minimum": 0, "maximum": 1, "step": 0.01}), - "deepbooru_sort_alpha": OptionInfo(True, "deepbooru: sort tags alphabetically").info("if not: sort by score"), - "deepbooru_use_spaces": OptionInfo(True, "deepbooru: use spaces in tags").info("if not: use underscores"), - "deepbooru_escape": OptionInfo(True, "deepbooru: escape (\\) brackets").info("so they are used as literal brackets and not for emphasis"), - "deepbooru_filter_tags": OptionInfo("", "deepbooru: filter out those tags").info("separate by comma"), -})) - -options_templates.update(options_section(('extra_networks', "Extra Networks"), { - "extra_networks_show_hidden_directories": OptionInfo(True, "Show hidden directories").info("directory is hidden if its name starts with \".\"."), - "extra_networks_hidden_models": OptionInfo("When searched", "Show cards for models in hidden directories", gr.Radio, {"choices": ["Always", "When searched", "Never"]}).info('"When searched" option will only show the item when the search string has 4 characters or more'), - "extra_networks_default_multiplier": OptionInfo(1.0, "Default multiplier for extra networks", gr.Slider, {"minimum": 0.0, "maximum": 2.0, "step": 0.01}), - "extra_networks_card_width": OptionInfo(0, "Card width for Extra Networks").info("in pixels"), - "extra_networks_card_height": OptionInfo(0, "Card height for Extra Networks").info("in pixels"), - "extra_networks_card_text_scale": OptionInfo(1.0, "Card text scale", gr.Slider, {"minimum": 0.0, "maximum": 2.0, "step": 0.01}).info("1 = original size"), - "extra_networks_card_show_desc": OptionInfo(True, "Show description on card"), - "extra_networks_add_text_separator": OptionInfo(" ", "Extra networks separator").info("extra text to add before <...> when adding extra network to prompt"), - "ui_extra_networks_tab_reorder": OptionInfo("", "Extra networks tab order").needs_reload_ui(), - "textual_inversion_print_at_load": OptionInfo(False, "Print a list of Textual Inversion embeddings when loading model"), - "textual_inversion_add_hashes_to_infotext": OptionInfo(True, "Add Textual Inversion hashes to infotext"), - "sd_hypernetwork": OptionInfo("None", "Add hypernetwork to prompt", gr.Dropdown, lambda: {"choices": ["None", *hypernetworks]}, refresh=reload_hypernetworks), -})) - -options_templates.update(options_section(('ui', "User interface"), { - "localization": OptionInfo("None", "Localization", gr.Dropdown, lambda: {"choices": ["None"] + list(localization.localizations.keys())}, refresh=lambda: localization.list_localizations(cmd_opts.localizations_dir)).needs_reload_ui(), - "gradio_theme": OptionInfo("Default", "Gradio theme", ui_components.DropdownEditable, lambda: {"choices": ["Default"] + gradio_hf_hub_themes}).info("you can also manually enter any of themes from the gallery.").needs_reload_ui(), - "gradio_themes_cache": OptionInfo(True, "Cache gradio themes locally").info("disable to update the selected Gradio theme"), - "return_grid": OptionInfo(True, "Show grid in results for web"), - "do_not_show_images": OptionInfo(False, "Do not show any images in results for web"), - "send_seed": OptionInfo(True, "Send seed when sending prompt or image to other interface"), - "send_size": OptionInfo(True, "Send size when sending prompt or image to another interface"), - "js_modal_lightbox": OptionInfo(True, "Enable full page image viewer"), - "js_modal_lightbox_initially_zoomed": OptionInfo(True, "Show images zoomed in by default in full page image viewer"), - "js_modal_lightbox_gamepad": OptionInfo(False, "Navigate image viewer with gamepad"), - "js_modal_lightbox_gamepad_repeat": OptionInfo(250, "Gamepad repeat period, in milliseconds"), - "show_progress_in_title": OptionInfo(True, "Show generation progress in window title."), - "samplers_in_dropdown": OptionInfo(True, "Use dropdown for sampler selection instead of radio group").needs_reload_ui(), - "dimensions_and_batch_together": OptionInfo(True, "Show Width/Height and Batch sliders in same row").needs_reload_ui(), - "keyedit_precision_attention": OptionInfo(0.1, "Ctrl+up/down precision when editing (attention:1.1)", gr.Slider, {"minimum": 0.01, "maximum": 0.2, "step": 0.001}), - "keyedit_precision_extra": OptionInfo(0.05, "Ctrl+up/down precision when editing ", gr.Slider, {"minimum": 0.01, "maximum": 0.2, "step": 0.001}), - "keyedit_delimiters": OptionInfo(".,\\/!?%^*;:{}=`~()", "Ctrl+up/down word delimiters"), - "keyedit_move": OptionInfo(True, "Alt+left/right moves prompt elements"), - "quicksettings_list": OptionInfo(["sd_model_checkpoint"], "Quicksettings list", ui_components.DropdownMulti, lambda: {"choices": list(opts.data_labels.keys())}).js("info", "settingsHintsShowQuicksettings").info("setting entries that appear at the top of page rather than in settings tab").needs_reload_ui(), - "ui_tab_order": OptionInfo([], "UI tab order", ui_components.DropdownMulti, lambda: {"choices": list(tab_names)}).needs_reload_ui(), - "hidden_tabs": OptionInfo([], "Hidden UI tabs", ui_components.DropdownMulti, lambda: {"choices": list(tab_names)}).needs_reload_ui(), - "ui_reorder_list": OptionInfo([], "txt2img/img2img UI item order", ui_components.DropdownMulti, lambda: {"choices": list(shared_items.ui_reorder_categories())}).info("selected items appear first").needs_reload_ui(), - "hires_fix_show_sampler": OptionInfo(False, "Hires fix: show hires checkpoint and sampler selection").needs_reload_ui(), - "hires_fix_show_prompts": OptionInfo(False, "Hires fix: show hires prompt and negative prompt").needs_reload_ui(), - "disable_token_counters": OptionInfo(False, "Disable prompt token counters").needs_reload_ui(), -})) - - -options_templates.update(options_section(('infotext', "Infotext"), { - "add_model_hash_to_info": OptionInfo(True, "Add model hash to generation information"), - "add_model_name_to_info": OptionInfo(True, "Add model name to generation information"), - "add_user_name_to_info": OptionInfo(False, "Add user name to generation information when authenticated"), - "add_version_to_infotext": OptionInfo(True, "Add program version to generation information"), - "disable_weights_auto_swap": OptionInfo(True, "Disregard checkpoint information from pasted infotext").info("when reading generation parameters from text into UI"), - "infotext_styles": OptionInfo("Apply if any", "Infer styles from prompts of pasted infotext", gr.Radio, {"choices": ["Ignore", "Apply", "Discard", "Apply if any"]}).info("when reading generation parameters from text into UI)").html("""
    -
  • Ignore: keep prompt and styles dropdown as it is.
  • -
  • Apply: remove style text from prompt, always replace styles dropdown value with found styles (even if none are found).
  • -
  • Discard: remove style text from prompt, keep styles dropdown as it is.
  • -
  • Apply if any: remove style text from prompt; if any styles are found in prompt, put them into styles dropdown, otherwise keep it as it is.
  • -
"""), - -})) - -options_templates.update(options_section(('ui', "Live previews"), { - "show_progressbar": OptionInfo(True, "Show progressbar"), - "live_previews_enable": OptionInfo(True, "Show live previews of the created image"), - "live_previews_image_format": OptionInfo("png", "Live preview file format", gr.Radio, {"choices": ["jpeg", "png", "webp"]}), - "show_progress_grid": OptionInfo(True, "Show previews of all images generated in a batch as a grid"), - "show_progress_every_n_steps": OptionInfo(10, "Live preview display period", gr.Slider, {"minimum": -1, "maximum": 32, "step": 1}).info("in sampling steps - show new live preview image every N sampling steps; -1 = only show after completion of batch"), - "show_progress_type": OptionInfo("Approx NN", "Live preview method", gr.Radio, {"choices": ["Full", "Approx NN", "Approx cheap", "TAESD"]}).info("Full = slow but pretty; Approx NN and TAESD = fast but low quality; Approx cheap = super fast but terrible otherwise"), - "live_preview_content": OptionInfo("Prompt", "Live preview subject", gr.Radio, {"choices": ["Combined", "Prompt", "Negative prompt"]}), - "live_preview_refresh_period": OptionInfo(1000, "Progressbar and preview update period").info("in milliseconds"), -})) - -options_templates.update(options_section(('sampler-params', "Sampler parameters"), { - "hide_samplers": OptionInfo([], "Hide samplers in user interface", gr.CheckboxGroup, lambda: {"choices": [x.name for x in list_samplers()]}).needs_reload_ui(), - "eta_ddim": OptionInfo(0.0, "Eta for DDIM", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}).info("noise multiplier; higher = more unperdictable results"), - "eta_ancestral": OptionInfo(1.0, "Eta for ancestral samplers", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}).info("noise multiplier; applies to Euler a and other samplers that have a in them"), - "ddim_discretize": OptionInfo('uniform', "img2img DDIM discretize", gr.Radio, {"choices": ['uniform', 'quad']}), - 's_churn': OptionInfo(0.0, "sigma churn", gr.Slider, {"minimum": 0.0, "maximum": 100.0, "step": 0.01}).info('amount of stochasticity; only applies to Euler, Heun, and DPM2'), - 's_tmin': OptionInfo(0.0, "sigma tmin", gr.Slider, {"minimum": 0.0, "maximum": 10.0, "step": 0.01}).info('enable stochasticity; start value of the sigma range; only applies to Euler, Heun, and DPM2'), - 's_tmax': OptionInfo(0.0, "sigma tmax", gr.Slider, {"minimum": 0.0, "maximum": 999.0, "step": 0.01}).info("0 = inf; end value of the sigma range; only applies to Euler, Heun, and DPM2"), - 's_noise': OptionInfo(1.0, "sigma noise", gr.Slider, {"minimum": 0.0, "maximum": 1.1, "step": 0.001}).info('amount of additional noise to counteract loss of detail during sampling; only applies to Euler, Heun, and DPM2'), - 'k_sched_type': OptionInfo("Automatic", "Scheduler type", gr.Dropdown, {"choices": ["Automatic", "karras", "exponential", "polyexponential"]}).info("lets you override the noise schedule for k-diffusion samplers; choosing Automatic disables the three parameters below"), - 'sigma_min': OptionInfo(0.0, "sigma min", gr.Number).info("0 = default (~0.03); minimum noise strength for k-diffusion noise scheduler"), - 'sigma_max': OptionInfo(0.0, "sigma max", gr.Number).info("0 = default (~14.6); maximum noise strength for k-diffusion noise scheduler"), - 'rho': OptionInfo(0.0, "rho", gr.Number).info("0 = default (7 for karras, 1 for polyexponential); higher values result in a steeper noise schedule (decreases faster)"), - 'eta_noise_seed_delta': OptionInfo(0, "Eta noise seed delta", gr.Number, {"precision": 0}).info("ENSD; does not improve anything, just produces different results for ancestral samplers - only useful for reproducing images"), - 'always_discard_next_to_last_sigma': OptionInfo(False, "Always discard next-to-last sigma").link("PR", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/6044"), - 'uni_pc_variant': OptionInfo("bh1", "UniPC variant", gr.Radio, {"choices": ["bh1", "bh2", "vary_coeff"]}), - 'uni_pc_skip_type': OptionInfo("time_uniform", "UniPC skip type", gr.Radio, {"choices": ["time_uniform", "time_quadratic", "logSNR"]}), - 'uni_pc_order': OptionInfo(3, "UniPC order", gr.Slider, {"minimum": 1, "maximum": 50, "step": 1}).info("must be < sampling steps"), - 'uni_pc_lower_order_final': OptionInfo(True, "UniPC lower order final"), -})) - -options_templates.update(options_section(('postprocessing', "Postprocessing"), { - 'postprocessing_enable_in_main_ui': OptionInfo([], "Enable postprocessing operations in txt2img and img2img tabs", ui_components.DropdownMulti, lambda: {"choices": [x.name for x in shared_items.postprocessing_scripts()]}), - 'postprocessing_operation_order': OptionInfo([], "Postprocessing operation order", ui_components.DropdownMulti, lambda: {"choices": [x.name for x in shared_items.postprocessing_scripts()]}), - 'upscaling_max_images_in_cache': OptionInfo(5, "Maximum number of images in upscaling cache", gr.Slider, {"minimum": 0, "maximum": 10, "step": 1}), -})) - -options_templates.update(options_section((None, "Hidden options"), { - "disabled_extensions": OptionInfo([], "Disable these extensions"), - "disable_all_extensions": OptionInfo("none", "Disable all extensions (preserves the list of disabled extensions)", gr.Radio, {"choices": ["none", "extra", "all"]}), - "restore_config_state_file": OptionInfo("", "Config state file to restore from, under 'config-states/' folder"), - "sd_checkpoint_hash": OptionInfo("", "SHA256 hash of the current checkpoint"), -})) - - -options_templates.update() - - -class Options: - data = None - data_labels = options_templates - typemap = {int: float} - - def __init__(self): - self.data = {k: v.default for k, v in self.data_labels.items()} - - def __setattr__(self, key, value): - if self.data is not None: - if key in self.data or key in self.data_labels: - assert not cmd_opts.freeze_settings, "changing settings is disabled" - - info = opts.data_labels.get(key, None) - if info.do_not_save: - return - - comp_args = info.component_args if info else None - if isinstance(comp_args, dict) and comp_args.get('visible', True) is False: - raise RuntimeError(f"not possible to set {key} because it is restricted") - - if cmd_opts.hide_ui_dir_config and key in restricted_opts: - raise RuntimeError(f"not possible to set {key} because it is restricted") - - self.data[key] = value - return - - return super(Options, self).__setattr__(key, value) - - def __getattr__(self, item): - if self.data is not None: - if item in self.data: - return self.data[item] - - if item in self.data_labels: - return self.data_labels[item].default - - return super(Options, self).__getattribute__(item) - - def set(self, key, value): - """sets an option and calls its onchange callback, returning True if the option changed and False otherwise""" - - oldval = self.data.get(key, None) - if oldval == value: - return False - - if self.data_labels[key].do_not_save: - return False - - try: - setattr(self, key, value) - except RuntimeError: - return False - - if self.data_labels[key].onchange is not None: - try: - self.data_labels[key].onchange() - except Exception as e: - errors.display(e, f"changing setting {key} to {value}") - setattr(self, key, oldval) - return False - - return True - - def get_default(self, key): - """returns the default value for the key""" - - data_label = self.data_labels.get(key) - if data_label is None: - return None - - return data_label.default - - def save(self, filename): - assert not cmd_opts.freeze_settings, "saving settings is disabled" - - with open(filename, "w", encoding="utf8") as file: - json.dump(self.data, file, indent=4) - - def same_type(self, x, y): - if x is None or y is None: - return True - - type_x = self.typemap.get(type(x), type(x)) - type_y = self.typemap.get(type(y), type(y)) - - return type_x == type_y - - def load(self, filename): - with open(filename, "r", encoding="utf8") as file: - self.data = json.load(file) - - # 1.6.0 VAE defaults - if self.data.get('sd_vae_as_default') is not None and self.data.get('sd_vae_overrides_per_model_preferences') is None: - self.data['sd_vae_overrides_per_model_preferences'] = not self.data.get('sd_vae_as_default') - - # 1.1.1 quicksettings list migration - if self.data.get('quicksettings') is not None and self.data.get('quicksettings_list') is None: - self.data['quicksettings_list'] = [i.strip() for i in self.data.get('quicksettings').split(',')] - - # 1.4.0 ui_reorder - if isinstance(self.data.get('ui_reorder'), str) and self.data.get('ui_reorder') and "ui_reorder_list" not in self.data: - self.data['ui_reorder_list'] = [i.strip() for i in self.data.get('ui_reorder').split(',')] - - bad_settings = 0 - for k, v in self.data.items(): - info = self.data_labels.get(k, None) - if info is not None and not self.same_type(info.default, v): - print(f"Warning: bad setting value: {k}: {v} ({type(v).__name__}; expected {type(info.default).__name__})", file=sys.stderr) - bad_settings += 1 - - if bad_settings > 0: - print(f"The program is likely to not work with bad settings.\nSettings file: {filename}\nEither fix the file, or delete it and restart.", file=sys.stderr) - - def onchange(self, key, func, call=True): - item = self.data_labels.get(key) - item.onchange = func - - if call: - func() - - def dumpjson(self): - d = {k: self.data.get(k, v.default) for k, v in self.data_labels.items()} - d["_comments_before"] = {k: v.comment_before for k, v in self.data_labels.items() if v.comment_before is not None} - d["_comments_after"] = {k: v.comment_after for k, v in self.data_labels.items() if v.comment_after is not None} - return json.dumps(d) - - def add_option(self, key, info): - self.data_labels[key] = info - - def reorder(self): - """reorder settings so that all items related to section always go together""" - - section_ids = {} - settings_items = self.data_labels.items() - for _, item in settings_items: - if item.section not in section_ids: - section_ids[item.section] = len(section_ids) - - self.data_labels = dict(sorted(settings_items, key=lambda x: section_ids[x[1].section])) - - def cast_value(self, key, value): - """casts an arbitrary to the same type as this setting's value with key - Example: cast_value("eta_noise_seed_delta", "12") -> returns 12 (an int rather than str) - """ - - if value is None: - return None - - default_value = self.data_labels[key].default - if default_value is None: - default_value = getattr(self, key, None) - if default_value is None: - return None - - expected_type = type(default_value) - if expected_type == bool and value == "False": - value = False - else: - value = expected_type(value) - - return value +demo = None +device = None -opts = Options() -if os.path.exists(config_filename): - opts.load(config_filename) +weight_load_location = None +xformers_available = False -class Shared(sys.modules[__name__].__class__): - """ - this class is here to provide sd_model field as a property, so that it can be created and loaded on demand rather than - at program startup. - """ +hypernetworks = {} - sd_model_val = None +loaded_hypernetworks = [] - @property - def sd_model(self): - import modules.sd_models +state = None - return modules.sd_models.model_data.get_sd_model() +prompt_styles = None - @sd_model.setter - def sd_model(self, value): - import modules.sd_models +interrogator = None - modules.sd_models.model_data.set_sd_model(value) +face_restorers = [] +options_templates = None +opts = None -sd_model: LatentDiffusion = None # this var is here just for IDE's type checking; it cannot be accessed because the class field above will be accessed instead -sys.modules[__name__].__class__ = Shared +sd_model: LatentDiffusion = None settings_components = None """assinged from ui.py, a mapping on setting names to gradio components repsponsible for those settings""" +tab_names = [] + latent_upscale_default_mode = "Latent" latent_upscale_modes = { "Latent": {"mode": "bilinear", "antialias": False}, @@ -856,121 +64,24 @@ progress_print_out = sys.stdout gradio_theme = gr.themes.Base() +total_tqdm = None -def reload_gradio_theme(theme_name=None): - global gradio_theme - if not theme_name: - theme_name = opts.gradio_theme - - default_theme_args = dict( - font=["Source Sans Pro", 'ui-sans-serif', 'system-ui', 'sans-serif'], - font_mono=['IBM Plex Mono', 'ui-monospace', 'Consolas', 'monospace'], - ) - - if theme_name == "Default": - gradio_theme = gr.themes.Default(**default_theme_args) - else: - try: - theme_cache_dir = os.path.join(script_path, 'tmp', 'gradio_themes') - theme_cache_path = os.path.join(theme_cache_dir, f'{theme_name.replace("/", "_")}.json') - if opts.gradio_themes_cache and os.path.exists(theme_cache_path): - gradio_theme = gr.themes.ThemeClass.load(theme_cache_path) - else: - os.makedirs(theme_cache_dir, exist_ok=True) - gradio_theme = gr.themes.ThemeClass.from_hub(theme_name) - gradio_theme.dump(theme_cache_path) - except Exception as e: - errors.display(e, "changing gradio theme") - gradio_theme = gr.themes.Default(**default_theme_args) - - -class TotalTQDM: - def __init__(self): - self._tqdm = None - - def reset(self): - self._tqdm = tqdm.tqdm( - desc="Total progress", - total=state.job_count * state.sampling_steps, - position=1, - file=progress_print_out - ) - - def update(self): - if not opts.multiple_tqdm or cmd_opts.disable_console_progressbars: - return - if self._tqdm is None: - self.reset() - self._tqdm.update() - - def updateTotal(self, new_total): - if not opts.multiple_tqdm or cmd_opts.disable_console_progressbars: - return - if self._tqdm is None: - self.reset() - self._tqdm.total = new_total - - def clear(self): - if self._tqdm is not None: - self._tqdm.refresh() - self._tqdm.close() - self._tqdm = None - - -total_tqdm = TotalTQDM() - -mem_mon = modules.memmon.MemUsageMonitor("MemMon", device, opts) -mem_mon.start() - - -def natural_sort_key(s, regex=re.compile('([0-9]+)')): - return [int(text) if text.isdigit() else text.lower() for text in regex.split(s)] - - -def listfiles(dirname): - filenames = [os.path.join(dirname, x) for x in sorted(os.listdir(dirname), key=natural_sort_key) if not x.startswith(".")] - return [file for file in filenames if os.path.isfile(file)] - - -def html_path(filename): - return os.path.join(script_path, "html", filename) - - -def html(filename): - path = html_path(filename) - - if os.path.exists(path): - with open(path, encoding="utf8") as file: - return file.read() - - return "" - - -def walk_files(path, allowed_extensions=None): - if not os.path.exists(path): - return - - if allowed_extensions is not None: - allowed_extensions = set(allowed_extensions) - - items = list(os.walk(path, followlinks=True)) - items = sorted(items, key=lambda x: natural_sort_key(x[0])) - - for root, _, files in items: - for filename in sorted(files, key=natural_sort_key): - if allowed_extensions is not None: - _, ext = os.path.splitext(filename) - if ext not in allowed_extensions: - continue - - if not opts.list_hidden_files and ("/." in root or "\\." in root): - continue +mem_mon = None - yield os.path.join(root, filename) +options_section = options.options_section +OptionInfo = options.OptionInfo +OptionHTML = options.OptionHTML +natural_sort_key = util.natural_sort_key +listfiles = util.listfiles +html_path = util.html_path +html = util.html +walk_files = util.walk_files +ldm_print = util.ldm_print -def ldm_print(*args, **kwargs): - if opts.hide_ldm_prints: - return +reload_gradio_theme = shared_gradio_themes.reload_gradio_theme - print(*args, **kwargs) +list_checkpoint_tiles = shared_items.list_checkpoint_tiles +refresh_checkpoints = shared_items.refresh_checkpoints +list_samplers = shared_items.list_samplers +reload_hypernetworks = shared_items.reload_hypernetworks diff --git a/modules/shared_cmd_options.py b/modules/shared_cmd_options.py new file mode 100644 index 00000000..af24938b --- /dev/null +++ b/modules/shared_cmd_options.py @@ -0,0 +1,18 @@ +import os + +import launch +from modules import cmd_args, script_loading +from modules.paths_internal import models_path, script_path, data_path, sd_configs_path, sd_default_config, sd_model_file, default_sd_model_file, extensions_dir, extensions_builtin_dir # noqa: F401 + +parser = cmd_args.parser + +script_loading.preload_extensions(extensions_dir, parser, extension_list=launch.list_extensions(launch.args.ui_settings_file)) +script_loading.preload_extensions(extensions_builtin_dir, parser) + +if os.environ.get('IGNORE_CMD_ARGS_ERRORS', None) is None: + cmd_opts = parser.parse_args() +else: + cmd_opts, _ = parser.parse_known_args() + + +cmd_opts.disable_extension_access = (cmd_opts.share or cmd_opts.listen or cmd_opts.server_name) and not cmd_opts.enable_insecure_extension_access diff --git a/modules/shared_gradio_themes.py b/modules/shared_gradio_themes.py new file mode 100644 index 00000000..ad1f2212 --- /dev/null +++ b/modules/shared_gradio_themes.py @@ -0,0 +1,66 @@ +import os + +import gradio as gr + +from modules import errors, shared +from modules.paths_internal import script_path + + +# https://huggingface.co/datasets/freddyaboulton/gradio-theme-subdomains/resolve/main/subdomains.json +gradio_hf_hub_themes = [ + "gradio/base", + "gradio/glass", + "gradio/monochrome", + "gradio/seafoam", + "gradio/soft", + "gradio/dracula_test", + "abidlabs/dracula_test", + "abidlabs/Lime", + "abidlabs/pakistan", + "Ama434/neutral-barlow", + "dawood/microsoft_windows", + "finlaymacklon/smooth_slate", + "Franklisi/darkmode", + "freddyaboulton/dracula_revamped", + "freddyaboulton/test-blue", + "gstaff/xkcd", + "Insuz/Mocha", + "Insuz/SimpleIndigo", + "JohnSmith9982/small_and_pretty", + "nota-ai/theme", + "nuttea/Softblue", + "ParityError/Anime", + "reilnuud/polite", + "remilia/Ghostly", + "rottenlittlecreature/Moon_Goblin", + "step-3-profit/Midnight-Deep", + "Taithrah/Minimal", + "ysharma/huggingface", + "ysharma/steampunk" +] + + +def reload_gradio_theme(theme_name=None): + if not theme_name: + theme_name = shared.opts.gradio_theme + + default_theme_args = dict( + font=["Source Sans Pro", 'ui-sans-serif', 'system-ui', 'sans-serif'], + font_mono=['IBM Plex Mono', 'ui-monospace', 'Consolas', 'monospace'], + ) + + if theme_name == "Default": + shared.gradio_theme = gr.themes.Default(**default_theme_args) + else: + try: + theme_cache_dir = os.path.join(script_path, 'tmp', 'gradio_themes') + theme_cache_path = os.path.join(theme_cache_dir, f'{theme_name.replace("/", "_")}.json') + if shared.opts.gradio_themes_cache and os.path.exists(theme_cache_path): + shared.gradio_theme = gr.themes.ThemeClass.load(theme_cache_path) + else: + os.makedirs(theme_cache_dir, exist_ok=True) + gradio_theme = gr.themes.ThemeClass.from_hub(theme_name) + gradio_theme.dump(theme_cache_path) + except Exception as e: + errors.display(e, "changing gradio theme") + shared.gradio_theme = gr.themes.Default(**default_theme_args) diff --git a/modules/shared_init.py b/modules/shared_init.py new file mode 100644 index 00000000..e7fc18d2 --- /dev/null +++ b/modules/shared_init.py @@ -0,0 +1,51 @@ +import os + +import torch + +from modules import shared +from modules.shared import cmd_opts + +import sys +sys.setrecursionlimit(1000) + + +def initialize(): + """Initializes fields inside the shared module in a controlled manner. + + Should be called early because some other modules you can import mingt need these fields to be already set. + """ + + os.makedirs(cmd_opts.hypernetwork_dir, exist_ok=True) + + from modules import options, shared_options + shared.options_templates = shared_options.options_templates + shared.opts = options.Options(shared_options.options_templates, shared_options.restricted_opts) + if os.path.exists(shared.config_filename): + shared.opts.load(shared.config_filename) + + from modules import devices + devices.device, devices.device_interrogate, devices.device_gfpgan, devices.device_esrgan, devices.device_codeformer = \ + (devices.cpu if any(y in cmd_opts.use_cpu for y in [x, 'all']) else devices.get_optimal_device() for x in ['sd', 'interrogate', 'gfpgan', 'esrgan', 'codeformer']) + + devices.dtype = torch.float32 if cmd_opts.no_half else torch.float16 + devices.dtype_vae = torch.float32 if cmd_opts.no_half or cmd_opts.no_half_vae else torch.float16 + + shared.device = devices.device + shared.weight_load_location = None if cmd_opts.lowram else "cpu" + + from modules import shared_state + shared.state = shared_state.State() + + from modules import styles + shared.prompt_styles = styles.StyleDatabase(shared.styles_filename) + + from modules import interrogate + shared.interrogator = interrogate.InterrogateModels("interrogate") + + from modules import shared_total_tqdm + shared.total_tqdm = shared_total_tqdm.TotalTQDM() + + from modules import memmon, devices + shared.mem_mon = memmon.MemUsageMonitor("MemMon", devices.device, shared.opts) + shared.mem_mon.start() + diff --git a/modules/shared_items.py b/modules/shared_items.py index 89792e88..e4ec40a8 100644 --- a/modules/shared_items.py +++ b/modules/shared_items.py @@ -1,3 +1,6 @@ +import sys + +from modules.shared_cmd_options import cmd_opts def realesrgan_models_names(): @@ -41,6 +44,28 @@ def refresh_unet_list(): modules.sd_unet.list_unets() +def list_checkpoint_tiles(): + import modules.sd_models + return modules.sd_models.checkpoint_tiles() + + +def refresh_checkpoints(): + import modules.sd_models + return modules.sd_models.list_models() + + +def list_samplers(): + import modules.sd_samplers + return modules.sd_samplers.all_samplers + + +def reload_hypernetworks(): + from modules.hypernetworks import hypernetwork + from modules import shared + + shared.hypernetworks = hypernetwork.list_hypernetworks(cmd_opts.hypernetwork_dir) + + ui_reorder_categories_builtin_items = [ "inpaint", "sampler", @@ -67,3 +92,27 @@ def ui_reorder_categories(): yield from sections yield "scripts" + + +class Shared(sys.modules[__name__].__class__): + """ + this class is here to provide sd_model field as a property, so that it can be created and loaded on demand rather than + at program startup. + """ + + sd_model_val = None + + @property + def sd_model(self): + import modules.sd_models + + return modules.sd_models.model_data.get_sd_model() + + @sd_model.setter + def sd_model(self, value): + import modules.sd_models + + modules.sd_models.model_data.set_sd_model(value) + + +sys.modules['modules.shared'].__class__ = Shared diff --git a/modules/shared_options.py b/modules/shared_options.py index e9b980a4..7468bc81 100644 --- a/modules/shared_options.py +++ b/modules/shared_options.py @@ -1,40 +1,12 @@ -import datetime -import json -import os -import re -import sys -import threading -import time -import logging - import gradio as gr -import torch -import tqdm - -import launch -import modules.interrogate -import modules.memmon -import modules.styles -import modules.devices as devices -from modules import localization, script_loading, errors, ui_components, shared_items, cmd_args, rng # noqa: F401 -from modules.paths_internal import models_path, script_path, data_path, sd_configs_path, sd_default_config, sd_model_file, default_sd_model_file, extensions_dir, extensions_builtin_dir # noqa: F401 -from ldm.models.diffusion.ddpm import LatentDiffusion -from typing import Optional - -log = logging.getLogger(__name__) - -demo = None - -parser = cmd_args.parser -script_loading.preload_extensions(extensions_dir, parser, extension_list=launch.list_extensions(launch.args.ui_settings_file)) -script_loading.preload_extensions(extensions_builtin_dir, parser) - -if os.environ.get('IGNORE_CMD_ARGS_ERRORS', None) is None: - cmd_opts = parser.parse_args() -else: - cmd_opts, _ = parser.parse_known_args() +from modules import localization, ui_components, shared_items, shared, interrogate, shared_gradio_themes +from modules.paths_internal import models_path, script_path, data_path, sd_configs_path, sd_default_config, sd_model_file, default_sd_model_file, extensions_dir, extensions_builtin_dir # noqa: F401 +from modules.shared_cmd_options import cmd_opts +from modules.options import options_section, OptionInfo, OptionHTML +options_templates = {} +hide_dirs = shared.hide_dirs restricted_opts = { "samples_filename_pattern", @@ -49,302 +21,6 @@ restricted_opts = { "outdir_init_images" } -# https://huggingface.co/datasets/freddyaboulton/gradio-theme-subdomains/resolve/main/subdomains.json -gradio_hf_hub_themes = [ - "gradio/base", - "gradio/glass", - "gradio/monochrome", - "gradio/seafoam", - "gradio/soft", - "gradio/dracula_test", - "abidlabs/dracula_test", - "abidlabs/Lime", - "abidlabs/pakistan", - "Ama434/neutral-barlow", - "dawood/microsoft_windows", - "finlaymacklon/smooth_slate", - "Franklisi/darkmode", - "freddyaboulton/dracula_revamped", - "freddyaboulton/test-blue", - "gstaff/xkcd", - "Insuz/Mocha", - "Insuz/SimpleIndigo", - "JohnSmith9982/small_and_pretty", - "nota-ai/theme", - "nuttea/Softblue", - "ParityError/Anime", - "reilnuud/polite", - "remilia/Ghostly", - "rottenlittlecreature/Moon_Goblin", - "step-3-profit/Midnight-Deep", - "Taithrah/Minimal", - "ysharma/huggingface", - "ysharma/steampunk" -] - - -cmd_opts.disable_extension_access = (cmd_opts.share or cmd_opts.listen or cmd_opts.server_name) and not cmd_opts.enable_insecure_extension_access - -devices.device, devices.device_interrogate, devices.device_gfpgan, devices.device_esrgan, devices.device_codeformer = \ - (devices.cpu if any(y in cmd_opts.use_cpu for y in [x, 'all']) else devices.get_optimal_device() for x in ['sd', 'interrogate', 'gfpgan', 'esrgan', 'codeformer']) - -devices.dtype = torch.float32 if cmd_opts.no_half else torch.float16 -devices.dtype_vae = torch.float32 if cmd_opts.no_half or cmd_opts.no_half_vae else torch.float16 - -device = devices.device -weight_load_location = None if cmd_opts.lowram else "cpu" - -batch_cond_uncond = cmd_opts.always_batch_cond_uncond or not (cmd_opts.lowvram or cmd_opts.medvram) -parallel_processing_allowed = not cmd_opts.lowvram and not cmd_opts.medvram -xformers_available = False -config_filename = cmd_opts.ui_settings_file - -os.makedirs(cmd_opts.hypernetwork_dir, exist_ok=True) -hypernetworks = {} -loaded_hypernetworks = [] - - -def reload_hypernetworks(): - from modules.hypernetworks import hypernetwork - global hypernetworks - - hypernetworks = hypernetwork.list_hypernetworks(cmd_opts.hypernetwork_dir) - - -class State: - skipped = False - interrupted = False - job = "" - job_no = 0 - job_count = 0 - processing_has_refined_job_count = False - job_timestamp = '0' - sampling_step = 0 - sampling_steps = 0 - current_latent = None - current_image = None - current_image_sampling_step = 0 - id_live_preview = 0 - textinfo = None - time_start = None - server_start = None - _server_command_signal = threading.Event() - _server_command: Optional[str] = None - - @property - def need_restart(self) -> bool: - # Compatibility getter for need_restart. - return self.server_command == "restart" - - @need_restart.setter - def need_restart(self, value: bool) -> None: - # Compatibility setter for need_restart. - if value: - self.server_command = "restart" - - @property - def server_command(self): - return self._server_command - - @server_command.setter - def server_command(self, value: Optional[str]) -> None: - """ - Set the server command to `value` and signal that it's been set. - """ - self._server_command = value - self._server_command_signal.set() - - def wait_for_server_command(self, timeout: Optional[float] = None) -> Optional[str]: - """ - Wait for server command to get set; return and clear the value and signal. - """ - if self._server_command_signal.wait(timeout): - self._server_command_signal.clear() - req = self._server_command - self._server_command = None - return req - return None - - def request_restart(self) -> None: - self.interrupt() - self.server_command = "restart" - log.info("Received restart request") - - def skip(self): - self.skipped = True - log.info("Received skip request") - - def interrupt(self): - self.interrupted = True - log.info("Received interrupt request") - - def nextjob(self): - if opts.live_previews_enable and opts.show_progress_every_n_steps == -1: - self.do_set_current_image() - - self.job_no += 1 - self.sampling_step = 0 - self.current_image_sampling_step = 0 - - def dict(self): - obj = { - "skipped": self.skipped, - "interrupted": self.interrupted, - "job": self.job, - "job_count": self.job_count, - "job_timestamp": self.job_timestamp, - "job_no": self.job_no, - "sampling_step": self.sampling_step, - "sampling_steps": self.sampling_steps, - } - - return obj - - def begin(self, job: str = "(unknown)"): - self.sampling_step = 0 - self.job_count = -1 - self.processing_has_refined_job_count = False - self.job_no = 0 - self.job_timestamp = datetime.datetime.now().strftime("%Y%m%d%H%M%S") - self.current_latent = None - self.current_image = None - self.current_image_sampling_step = 0 - self.id_live_preview = 0 - self.skipped = False - self.interrupted = False - self.textinfo = None - self.time_start = time.time() - self.job = job - devices.torch_gc() - log.info("Starting job %s", job) - - def end(self): - duration = time.time() - self.time_start - log.info("Ending job %s (%.2f seconds)", self.job, duration) - self.job = "" - self.job_count = 0 - - devices.torch_gc() - - def set_current_image(self): - """sets self.current_image from self.current_latent if enough sampling steps have been made after the last call to this""" - if not parallel_processing_allowed: - return - - if self.sampling_step - self.current_image_sampling_step >= opts.show_progress_every_n_steps and opts.live_previews_enable and opts.show_progress_every_n_steps != -1: - self.do_set_current_image() - - def do_set_current_image(self): - if self.current_latent is None: - return - - import modules.sd_samplers - - try: - if opts.show_progress_grid: - self.assign_current_image(modules.sd_samplers.samples_to_image_grid(self.current_latent)) - else: - self.assign_current_image(modules.sd_samplers.sample_to_image(self.current_latent)) - - self.current_image_sampling_step = self.sampling_step - - except Exception: - # when switching models during genration, VAE would be on CPU, so creating an image will fail. - # we silently ignore this error - errors.record_exception() - - def assign_current_image(self, image): - self.current_image = image - self.id_live_preview += 1 - - -state = State() -state.server_start = time.time() - -styles_filename = cmd_opts.styles_file -prompt_styles = modules.styles.StyleDatabase(styles_filename) - -interrogator = modules.interrogate.InterrogateModels("interrogate") - -face_restorers = [] - - -class OptionInfo: - def __init__(self, default=None, label="", component=None, component_args=None, onchange=None, section=None, refresh=None, comment_before='', comment_after=''): - self.default = default - self.label = label - self.component = component - self.component_args = component_args - self.onchange = onchange - self.section = section - self.refresh = refresh - self.do_not_save = False - - self.comment_before = comment_before - """HTML text that will be added after label in UI""" - - self.comment_after = comment_after - """HTML text that will be added before label in UI""" - - def link(self, label, url): - self.comment_before += f"[{label}]" - return self - - def js(self, label, js_func): - self.comment_before += f"[{label}]" - return self - - def info(self, info): - self.comment_after += f"({info})" - return self - - def html(self, html): - self.comment_after += html - return self - - def needs_restart(self): - self.comment_after += " (requires restart)" - return self - - def needs_reload_ui(self): - self.comment_after += " (requires Reload UI)" - return self - - -class OptionHTML(OptionInfo): - def __init__(self, text): - super().__init__(str(text).strip(), label='', component=lambda **kwargs: gr.HTML(elem_classes="settings-info", **kwargs)) - - self.do_not_save = True - - -def options_section(section_identifier, options_dict): - for v in options_dict.values(): - v.section = section_identifier - - return options_dict - - -def list_checkpoint_tiles(): - import modules.sd_models - return modules.sd_models.checkpoint_tiles() - - -def refresh_checkpoints(): - import modules.sd_models - return modules.sd_models.list_models() - - -def list_samplers(): - import modules.sd_samplers - return modules.sd_samplers.all_samplers - - -hide_dirs = {"visible": not cmd_opts.hide_ui_dir_config} -tab_names = [] - -options_templates = {} - options_templates.update(options_section(('saving-images', "Saving images/grids"), { "samples_save": OptionInfo(True, "Always save all generated images"), "samples_format": OptionInfo('png', 'File format for images'), @@ -412,11 +88,11 @@ options_templates.update(options_section(('upscaling', "Upscaling"), { "ESRGAN_tile": OptionInfo(192, "Tile size for ESRGAN upscalers.", gr.Slider, {"minimum": 0, "maximum": 512, "step": 16}).info("0 = no tiling"), "ESRGAN_tile_overlap": OptionInfo(8, "Tile overlap for ESRGAN upscalers.", gr.Slider, {"minimum": 0, "maximum": 48, "step": 1}).info("Low values = visible seam"), "realesrgan_enabled_models": OptionInfo(["R-ESRGAN 4x+", "R-ESRGAN 4x+ Anime6B"], "Select which Real-ESRGAN models to show in the web UI.", gr.CheckboxGroup, lambda: {"choices": shared_items.realesrgan_models_names()}), - "upscaler_for_img2img": OptionInfo(None, "Upscaler for img2img", gr.Dropdown, lambda: {"choices": [x.name for x in sd_upscalers]}), + "upscaler_for_img2img": OptionInfo(None, "Upscaler for img2img", gr.Dropdown, lambda: {"choices": [x.name for x in shared.sd_upscalers]}), })) options_templates.update(options_section(('face-restoration', "Face restoration"), { - "face_restoration_model": OptionInfo("CodeFormer", "Face restoration model", gr.Radio, lambda: {"choices": [x.name() for x in face_restorers]}), + "face_restoration_model": OptionInfo("CodeFormer", "Face restoration model", gr.Radio, lambda: {"choices": [x.name() for x in shared.face_restorers]}), "code_former_weight": OptionInfo(0.5, "CodeFormer weight", gr.Slider, {"minimum": 0, "maximum": 1, "step": 0.01}).info("0 = maximum effect; 1 = minimum effect"), "face_restoration_unload": OptionInfo(False, "Move face restoration model from VRAM into RAM after processing"), })) @@ -450,7 +126,7 @@ options_templates.update(options_section(('training', "Training"), { })) options_templates.update(options_section(('sd', "Stable Diffusion"), { - "sd_model_checkpoint": OptionInfo(None, "Stable Diffusion checkpoint", gr.Dropdown, lambda: {"choices": list_checkpoint_tiles()}, refresh=refresh_checkpoints), + "sd_model_checkpoint": OptionInfo(None, "Stable Diffusion checkpoint", gr.Dropdown, lambda: {"choices": shared_items.list_checkpoint_tiles()}, refresh=shared_items.refresh_checkpoints), "sd_checkpoints_limit": OptionInfo(1, "Maximum number of checkpoints loaded at the same time", gr.Slider, {"minimum": 1, "maximum": 10, "step": 1}), "sd_checkpoints_keep_in_cpu": OptionInfo(True, "Only keep one model on device").info("will keep models other than the currently used one in RAM rather than VRAM"), "sd_checkpoint_cache": OptionInfo(0, "Checkpoints to cache in RAM", gr.Slider, {"minimum": 0, "maximum": 10, "step": 1}).info("obsolete; set to 0 and use the two settings above instead"), @@ -526,7 +202,7 @@ options_templates.update(options_section(('interrogate', "Interrogate"), { "interrogate_clip_min_length": OptionInfo(24, "BLIP: minimum description length", gr.Slider, {"minimum": 1, "maximum": 128, "step": 1}), "interrogate_clip_max_length": OptionInfo(48, "BLIP: maximum description length", gr.Slider, {"minimum": 1, "maximum": 256, "step": 1}), "interrogate_clip_dict_limit": OptionInfo(1500, "CLIP: maximum number of lines in text file").info("0 = No limit"), - "interrogate_clip_skip_categories": OptionInfo([], "CLIP: skip inquire categories", gr.CheckboxGroup, lambda: {"choices": modules.interrogate.category_types()}, refresh=modules.interrogate.category_types), + "interrogate_clip_skip_categories": OptionInfo([], "CLIP: skip inquire categories", gr.CheckboxGroup, lambda: {"choices": interrogate.category_types()}, refresh=interrogate.category_types), "interrogate_deepbooru_score_threshold": OptionInfo(0.5, "deepbooru: score threshold", gr.Slider, {"minimum": 0, "maximum": 1, "step": 0.01}), "deepbooru_sort_alpha": OptionInfo(True, "deepbooru: sort tags alphabetically").info("if not: sort by score"), "deepbooru_use_spaces": OptionInfo(True, "deepbooru: use spaces in tags").info("if not: use underscores"), @@ -546,12 +222,12 @@ options_templates.update(options_section(('extra_networks', "Extra Networks"), { "ui_extra_networks_tab_reorder": OptionInfo("", "Extra networks tab order").needs_reload_ui(), "textual_inversion_print_at_load": OptionInfo(False, "Print a list of Textual Inversion embeddings when loading model"), "textual_inversion_add_hashes_to_infotext": OptionInfo(True, "Add Textual Inversion hashes to infotext"), - "sd_hypernetwork": OptionInfo("None", "Add hypernetwork to prompt", gr.Dropdown, lambda: {"choices": ["None", *hypernetworks]}, refresh=reload_hypernetworks), + "sd_hypernetwork": OptionInfo("None", "Add hypernetwork to prompt", gr.Dropdown, lambda: {"choices": ["None", *shared.hypernetworks]}, refresh=shared_items.reload_hypernetworks), })) options_templates.update(options_section(('ui', "User interface"), { "localization": OptionInfo("None", "Localization", gr.Dropdown, lambda: {"choices": ["None"] + list(localization.localizations.keys())}, refresh=lambda: localization.list_localizations(cmd_opts.localizations_dir)).needs_reload_ui(), - "gradio_theme": OptionInfo("Default", "Gradio theme", ui_components.DropdownEditable, lambda: {"choices": ["Default"] + gradio_hf_hub_themes}).info("you can also manually enter any of themes from the gallery.").needs_reload_ui(), + "gradio_theme": OptionInfo("Default", "Gradio theme", ui_components.DropdownEditable, lambda: {"choices": ["Default"] + shared_gradio_themes.gradio_hf_hub_themes}).info("you can also manually enter any of themes from the gallery.").needs_reload_ui(), "gradio_themes_cache": OptionInfo(True, "Cache gradio themes locally").info("disable to update the selected Gradio theme"), "return_grid": OptionInfo(True, "Show grid in results for web"), "do_not_show_images": OptionInfo(False, "Do not show any images in results for web"), @@ -568,9 +244,9 @@ options_templates.update(options_section(('ui', "User interface"), { "keyedit_precision_extra": OptionInfo(0.05, "Ctrl+up/down precision when editing ", gr.Slider, {"minimum": 0.01, "maximum": 0.2, "step": 0.001}), "keyedit_delimiters": OptionInfo(".,\\/!?%^*;:{}=`~()", "Ctrl+up/down word delimiters"), "keyedit_move": OptionInfo(True, "Alt+left/right moves prompt elements"), - "quicksettings_list": OptionInfo(["sd_model_checkpoint"], "Quicksettings list", ui_components.DropdownMulti, lambda: {"choices": list(opts.data_labels.keys())}).js("info", "settingsHintsShowQuicksettings").info("setting entries that appear at the top of page rather than in settings tab").needs_reload_ui(), - "ui_tab_order": OptionInfo([], "UI tab order", ui_components.DropdownMulti, lambda: {"choices": list(tab_names)}).needs_reload_ui(), - "hidden_tabs": OptionInfo([], "Hidden UI tabs", ui_components.DropdownMulti, lambda: {"choices": list(tab_names)}).needs_reload_ui(), + "quicksettings_list": OptionInfo(["sd_model_checkpoint"], "Quicksettings list", ui_components.DropdownMulti, lambda: {"choices": list(shared.opts.data_labels.keys())}).js("info", "settingsHintsShowQuicksettings").info("setting entries that appear at the top of page rather than in settings tab").needs_reload_ui(), + "ui_tab_order": OptionInfo([], "UI tab order", ui_components.DropdownMulti, lambda: {"choices": list(shared.tab_names)}).needs_reload_ui(), + "hidden_tabs": OptionInfo([], "Hidden UI tabs", ui_components.DropdownMulti, lambda: {"choices": list(shared.tab_names)}).needs_reload_ui(), "ui_reorder_list": OptionInfo([], "txt2img/img2img UI item order", ui_components.DropdownMulti, lambda: {"choices": list(shared_items.ui_reorder_categories())}).info("selected items appear first").needs_reload_ui(), "hires_fix_show_sampler": OptionInfo(False, "Hires fix: show hires checkpoint and sampler selection").needs_reload_ui(), "hires_fix_show_prompts": OptionInfo(False, "Hires fix: show hires prompt and negative prompt").needs_reload_ui(), @@ -605,7 +281,7 @@ options_templates.update(options_section(('ui', "Live previews"), { })) options_templates.update(options_section(('sampler-params', "Sampler parameters"), { - "hide_samplers": OptionInfo([], "Hide samplers in user interface", gr.CheckboxGroup, lambda: {"choices": [x.name for x in list_samplers()]}).needs_reload_ui(), + "hide_samplers": OptionInfo([], "Hide samplers in user interface", gr.CheckboxGroup, lambda: {"choices": [x.name for x in shared_items.list_samplers()]}).needs_reload_ui(), "eta_ddim": OptionInfo(0.0, "Eta for DDIM", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}).info("noise multiplier; higher = more unperdictable results"), "eta_ancestral": OptionInfo(1.0, "Eta for ancestral samplers", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}).info("noise multiplier; applies to Euler a and other samplers that have a in them"), "ddim_discretize": OptionInfo('uniform', "img2img DDIM discretize", gr.Radio, {"choices": ['uniform', 'quad']}), @@ -638,339 +314,3 @@ options_templates.update(options_section((None, "Hidden options"), { "sd_checkpoint_hash": OptionInfo("", "SHA256 hash of the current checkpoint"), })) - -options_templates.update() - - -class Options: - data = None - data_labels = options_templates - typemap = {int: float} - - def __init__(self): - self.data = {k: v.default for k, v in self.data_labels.items()} - - def __setattr__(self, key, value): - if self.data is not None: - if key in self.data or key in self.data_labels: - assert not cmd_opts.freeze_settings, "changing settings is disabled" - - info = opts.data_labels.get(key, None) - if info.do_not_save: - return - - comp_args = info.component_args if info else None - if isinstance(comp_args, dict) and comp_args.get('visible', True) is False: - raise RuntimeError(f"not possible to set {key} because it is restricted") - - if cmd_opts.hide_ui_dir_config and key in restricted_opts: - raise RuntimeError(f"not possible to set {key} because it is restricted") - - self.data[key] = value - return - - return super(Options, self).__setattr__(key, value) - - def __getattr__(self, item): - if self.data is not None: - if item in self.data: - return self.data[item] - - if item in self.data_labels: - return self.data_labels[item].default - - return super(Options, self).__getattribute__(item) - - def set(self, key, value): - """sets an option and calls its onchange callback, returning True if the option changed and False otherwise""" - - oldval = self.data.get(key, None) - if oldval == value: - return False - - if self.data_labels[key].do_not_save: - return False - - try: - setattr(self, key, value) - except RuntimeError: - return False - - if self.data_labels[key].onchange is not None: - try: - self.data_labels[key].onchange() - except Exception as e: - errors.display(e, f"changing setting {key} to {value}") - setattr(self, key, oldval) - return False - - return True - - def get_default(self, key): - """returns the default value for the key""" - - data_label = self.data_labels.get(key) - if data_label is None: - return None - - return data_label.default - - def save(self, filename): - assert not cmd_opts.freeze_settings, "saving settings is disabled" - - with open(filename, "w", encoding="utf8") as file: - json.dump(self.data, file, indent=4) - - def same_type(self, x, y): - if x is None or y is None: - return True - - type_x = self.typemap.get(type(x), type(x)) - type_y = self.typemap.get(type(y), type(y)) - - return type_x == type_y - - def load(self, filename): - with open(filename, "r", encoding="utf8") as file: - self.data = json.load(file) - - # 1.6.0 VAE defaults - if self.data.get('sd_vae_as_default') is not None and self.data.get('sd_vae_overrides_per_model_preferences') is None: - self.data['sd_vae_overrides_per_model_preferences'] = not self.data.get('sd_vae_as_default') - - # 1.1.1 quicksettings list migration - if self.data.get('quicksettings') is not None and self.data.get('quicksettings_list') is None: - self.data['quicksettings_list'] = [i.strip() for i in self.data.get('quicksettings').split(',')] - - # 1.4.0 ui_reorder - if isinstance(self.data.get('ui_reorder'), str) and self.data.get('ui_reorder') and "ui_reorder_list" not in self.data: - self.data['ui_reorder_list'] = [i.strip() for i in self.data.get('ui_reorder').split(',')] - - bad_settings = 0 - for k, v in self.data.items(): - info = self.data_labels.get(k, None) - if info is not None and not self.same_type(info.default, v): - print(f"Warning: bad setting value: {k}: {v} ({type(v).__name__}; expected {type(info.default).__name__})", file=sys.stderr) - bad_settings += 1 - - if bad_settings > 0: - print(f"The program is likely to not work with bad settings.\nSettings file: {filename}\nEither fix the file, or delete it and restart.", file=sys.stderr) - - def onchange(self, key, func, call=True): - item = self.data_labels.get(key) - item.onchange = func - - if call: - func() - - def dumpjson(self): - d = {k: self.data.get(k, v.default) for k, v in self.data_labels.items()} - d["_comments_before"] = {k: v.comment_before for k, v in self.data_labels.items() if v.comment_before is not None} - d["_comments_after"] = {k: v.comment_after for k, v in self.data_labels.items() if v.comment_after is not None} - return json.dumps(d) - - def add_option(self, key, info): - self.data_labels[key] = info - - def reorder(self): - """reorder settings so that all items related to section always go together""" - - section_ids = {} - settings_items = self.data_labels.items() - for _, item in settings_items: - if item.section not in section_ids: - section_ids[item.section] = len(section_ids) - - self.data_labels = dict(sorted(settings_items, key=lambda x: section_ids[x[1].section])) - - def cast_value(self, key, value): - """casts an arbitrary to the same type as this setting's value with key - Example: cast_value("eta_noise_seed_delta", "12") -> returns 12 (an int rather than str) - """ - - if value is None: - return None - - default_value = self.data_labels[key].default - if default_value is None: - default_value = getattr(self, key, None) - if default_value is None: - return None - - expected_type = type(default_value) - if expected_type == bool and value == "False": - value = False - else: - value = expected_type(value) - - return value - - -opts = Options() -if os.path.exists(config_filename): - opts.load(config_filename) - - -class Shared(sys.modules[__name__].__class__): - """ - this class is here to provide sd_model field as a property, so that it can be created and loaded on demand rather than - at program startup. - """ - - sd_model_val = None - - @property - def sd_model(self): - import modules.sd_models - - return modules.sd_models.model_data.get_sd_model() - - @sd_model.setter - def sd_model(self, value): - import modules.sd_models - - modules.sd_models.model_data.set_sd_model(value) - - -sd_model: LatentDiffusion = None # this var is here just for IDE's type checking; it cannot be accessed because the class field above will be accessed instead -sys.modules[__name__].__class__ = Shared - -settings_components = None -"""assinged from ui.py, a mapping on setting names to gradio components repsponsible for those settings""" - -latent_upscale_default_mode = "Latent" -latent_upscale_modes = { - "Latent": {"mode": "bilinear", "antialias": False}, - "Latent (antialiased)": {"mode": "bilinear", "antialias": True}, - "Latent (bicubic)": {"mode": "bicubic", "antialias": False}, - "Latent (bicubic antialiased)": {"mode": "bicubic", "antialias": True}, - "Latent (nearest)": {"mode": "nearest", "antialias": False}, - "Latent (nearest-exact)": {"mode": "nearest-exact", "antialias": False}, -} - -sd_upscalers = [] - -clip_model = None - -progress_print_out = sys.stdout - -gradio_theme = gr.themes.Base() - - -def reload_gradio_theme(theme_name=None): - global gradio_theme - if not theme_name: - theme_name = opts.gradio_theme - - default_theme_args = dict( - font=["Source Sans Pro", 'ui-sans-serif', 'system-ui', 'sans-serif'], - font_mono=['IBM Plex Mono', 'ui-monospace', 'Consolas', 'monospace'], - ) - - if theme_name == "Default": - gradio_theme = gr.themes.Default(**default_theme_args) - else: - try: - theme_cache_dir = os.path.join(script_path, 'tmp', 'gradio_themes') - theme_cache_path = os.path.join(theme_cache_dir, f'{theme_name.replace("/", "_")}.json') - if opts.gradio_themes_cache and os.path.exists(theme_cache_path): - gradio_theme = gr.themes.ThemeClass.load(theme_cache_path) - else: - os.makedirs(theme_cache_dir, exist_ok=True) - gradio_theme = gr.themes.ThemeClass.from_hub(theme_name) - gradio_theme.dump(theme_cache_path) - except Exception as e: - errors.display(e, "changing gradio theme") - gradio_theme = gr.themes.Default(**default_theme_args) - - -class TotalTQDM: - def __init__(self): - self._tqdm = None - - def reset(self): - self._tqdm = tqdm.tqdm( - desc="Total progress", - total=state.job_count * state.sampling_steps, - position=1, - file=progress_print_out - ) - - def update(self): - if not opts.multiple_tqdm or cmd_opts.disable_console_progressbars: - return - if self._tqdm is None: - self.reset() - self._tqdm.update() - - def updateTotal(self, new_total): - if not opts.multiple_tqdm or cmd_opts.disable_console_progressbars: - return - if self._tqdm is None: - self.reset() - self._tqdm.total = new_total - - def clear(self): - if self._tqdm is not None: - self._tqdm.refresh() - self._tqdm.close() - self._tqdm = None - - -total_tqdm = TotalTQDM() - -mem_mon = modules.memmon.MemUsageMonitor("MemMon", device, opts) -mem_mon.start() - - -def natural_sort_key(s, regex=re.compile('([0-9]+)')): - return [int(text) if text.isdigit() else text.lower() for text in regex.split(s)] - - -def listfiles(dirname): - filenames = [os.path.join(dirname, x) for x in sorted(os.listdir(dirname), key=natural_sort_key) if not x.startswith(".")] - return [file for file in filenames if os.path.isfile(file)] - - -def html_path(filename): - return os.path.join(script_path, "html", filename) - - -def html(filename): - path = html_path(filename) - - if os.path.exists(path): - with open(path, encoding="utf8") as file: - return file.read() - - return "" - - -def walk_files(path, allowed_extensions=None): - if not os.path.exists(path): - return - - if allowed_extensions is not None: - allowed_extensions = set(allowed_extensions) - - items = list(os.walk(path, followlinks=True)) - items = sorted(items, key=lambda x: natural_sort_key(x[0])) - - for root, _, files in items: - for filename in sorted(files, key=natural_sort_key): - if allowed_extensions is not None: - _, ext = os.path.splitext(filename) - if ext not in allowed_extensions: - continue - - if not opts.list_hidden_files and ("/." in root or "\\." in root): - continue - - yield os.path.join(root, filename) - - -def ldm_print(*args, **kwargs): - if opts.hide_ldm_prints: - return - - print(*args, **kwargs) diff --git a/modules/shared_state.py b/modules/shared_state.py new file mode 100644 index 00000000..3dc9c788 --- /dev/null +++ b/modules/shared_state.py @@ -0,0 +1,159 @@ +import datetime +import logging +import threading +import time + +from modules import errors, shared, devices +from typing import Optional + +log = logging.getLogger(__name__) + + +class State: + skipped = False + interrupted = False + job = "" + job_no = 0 + job_count = 0 + processing_has_refined_job_count = False + job_timestamp = '0' + sampling_step = 0 + sampling_steps = 0 + current_latent = None + current_image = None + current_image_sampling_step = 0 + id_live_preview = 0 + textinfo = None + time_start = None + server_start = None + _server_command_signal = threading.Event() + _server_command: Optional[str] = None + + def __init__(self): + self.server_start = time.time() + + @property + def need_restart(self) -> bool: + # Compatibility getter for need_restart. + return self.server_command == "restart" + + @need_restart.setter + def need_restart(self, value: bool) -> None: + # Compatibility setter for need_restart. + if value: + self.server_command = "restart" + + @property + def server_command(self): + return self._server_command + + @server_command.setter + def server_command(self, value: Optional[str]) -> None: + """ + Set the server command to `value` and signal that it's been set. + """ + self._server_command = value + self._server_command_signal.set() + + def wait_for_server_command(self, timeout: Optional[float] = None) -> Optional[str]: + """ + Wait for server command to get set; return and clear the value and signal. + """ + if self._server_command_signal.wait(timeout): + self._server_command_signal.clear() + req = self._server_command + self._server_command = None + return req + return None + + def request_restart(self) -> None: + self.interrupt() + self.server_command = "restart" + log.info("Received restart request") + + def skip(self): + self.skipped = True + log.info("Received skip request") + + def interrupt(self): + self.interrupted = True + log.info("Received interrupt request") + + def nextjob(self): + if shared.opts.live_previews_enable and shared.opts.show_progress_every_n_steps == -1: + self.do_set_current_image() + + self.job_no += 1 + self.sampling_step = 0 + self.current_image_sampling_step = 0 + + def dict(self): + obj = { + "skipped": self.skipped, + "interrupted": self.interrupted, + "job": self.job, + "job_count": self.job_count, + "job_timestamp": self.job_timestamp, + "job_no": self.job_no, + "sampling_step": self.sampling_step, + "sampling_steps": self.sampling_steps, + } + + return obj + + def begin(self, job: str = "(unknown)"): + self.sampling_step = 0 + self.job_count = -1 + self.processing_has_refined_job_count = False + self.job_no = 0 + self.job_timestamp = datetime.datetime.now().strftime("%Y%m%d%H%M%S") + self.current_latent = None + self.current_image = None + self.current_image_sampling_step = 0 + self.id_live_preview = 0 + self.skipped = False + self.interrupted = False + self.textinfo = None + self.time_start = time.time() + self.job = job + devices.torch_gc() + log.info("Starting job %s", job) + + def end(self): + duration = time.time() - self.time_start + log.info("Ending job %s (%.2f seconds)", self.job, duration) + self.job = "" + self.job_count = 0 + + devices.torch_gc() + + def set_current_image(self): + """sets self.current_image from self.current_latent if enough sampling steps have been made after the last call to this""" + if not shared.parallel_processing_allowed: + return + + if self.sampling_step - self.current_image_sampling_step >= shared.opts.show_progress_every_n_steps and shared.opts.live_previews_enable and shared.opts.show_progress_every_n_steps != -1: + self.do_set_current_image() + + def do_set_current_image(self): + if self.current_latent is None: + return + + import modules.sd_samplers + + try: + if shared.opts.show_progress_grid: + self.assign_current_image(modules.sd_samplers.samples_to_image_grid(self.current_latent)) + else: + self.assign_current_image(modules.sd_samplers.sample_to_image(self.current_latent)) + + self.current_image_sampling_step = self.sampling_step + + except Exception: + # when switching models during genration, VAE would be on CPU, so creating an image will fail. + # we silently ignore this error + errors.record_exception() + + def assign_current_image(self, image): + self.current_image = image + self.id_live_preview += 1 diff --git a/modules/shared_total_tqdm.py b/modules/shared_total_tqdm.py new file mode 100644 index 00000000..cf82e104 --- /dev/null +++ b/modules/shared_total_tqdm.py @@ -0,0 +1,37 @@ +import tqdm + +from modules import shared + + +class TotalTQDM: + def __init__(self): + self._tqdm = None + + def reset(self): + self._tqdm = tqdm.tqdm( + desc="Total progress", + total=shared.state.job_count * shared.state.sampling_steps, + position=1, + file=shared.progress_print_out + ) + + def update(self): + if not shared.opts.multiple_tqdm or shared.cmd_opts.disable_console_progressbars: + return + if self._tqdm is None: + self.reset() + self._tqdm.update() + + def updateTotal(self, new_total): + if not shared.opts.multiple_tqdm or shared.cmd_opts.disable_console_progressbars: + return + if self._tqdm is None: + self.reset() + self._tqdm.total = new_total + + def clear(self): + if self._tqdm is not None: + self._tqdm.refresh() + self._tqdm.close() + self._tqdm = None + diff --git a/modules/sysinfo.py b/modules/sysinfo.py index cf24c6dd..7d906e1f 100644 --- a/modules/sysinfo.py +++ b/modules/sysinfo.py @@ -10,7 +10,7 @@ import psutil import re import launch -from modules import paths_internal, timer +from modules import paths_internal, timer, shared, extensions, errors checksum_token = "DontStealMyGamePlz__WINNERS_DONT_USE_DRUGS__DONT_COPY_THAT_FLOPPY" environment_whitelist = { @@ -115,8 +115,6 @@ def format_exception(e, tb): def get_exceptions(): try: - from modules import errors - return list(reversed(errors.exception_records)) except Exception as e: return str(e) @@ -142,8 +140,6 @@ def get_torch_sysinfo(): def get_extensions(*, enabled): try: - from modules import extensions - def to_json(x: extensions.Extension): return { "name": x.name, @@ -160,7 +156,6 @@ def get_extensions(*, enabled): def get_config(): try: - from modules import shared return shared.opts.data except Exception as e: return str(e) diff --git a/modules/ui.py b/modules/ui.py index e3753e97..30b80417 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -13,7 +13,7 @@ from PIL import Image, PngImagePlugin # noqa: F401 from modules.call_queue import wrap_gradio_gpu_call, wrap_queued_call, wrap_gradio_call from modules import gradio_extensons # noqa: F401 -from modules import sd_hijack, sd_models, script_callbacks, ui_extensions, deepbooru, extra_networks, ui_common, ui_postprocessing, progress, ui_loadsave, errors, shared_items, ui_settings, timer, sysinfo, ui_checkpoint_merger, ui_prompt_styles, scripts, sd_samplers +from modules import sd_hijack, sd_models, script_callbacks, ui_extensions, deepbooru, extra_networks, ui_common, ui_postprocessing, progress, ui_loadsave, errors, shared_items, ui_settings, timer, sysinfo, ui_checkpoint_merger, ui_prompt_styles, scripts, sd_samplers, processing, devices, ui_extra_networks from modules.ui_components import FormRow, FormGroup, ToolButton, FormHTML from modules.paths import script_path from modules.ui_common import create_refresh_button @@ -91,8 +91,6 @@ def send_gradio_gallery_to_image(x): def calc_resolution_hires(enable, width, height, hr_scale, hr_resize_x, hr_resize_y): - from modules import processing, devices - if not enable: return "" @@ -630,7 +628,6 @@ def create_ui(): toprow.token_button.click(fn=wrap_queued_call(update_token_counter), inputs=[toprow.prompt, steps], outputs=[toprow.token_counter]) toprow.negative_token_button.click(fn=wrap_queued_call(update_token_counter), inputs=[toprow.negative_prompt, steps], outputs=[toprow.negative_token_counter]) - from modules import ui_extra_networks extra_networks_ui = ui_extra_networks.create_ui(txt2img_interface, [txt2img_generation_tab], 'txt2img') ui_extra_networks.setup_ui(extra_networks_ui, txt2img_gallery) @@ -995,7 +992,6 @@ def create_ui(): paste_button=toprow.paste, tabname="img2img", source_text_component=toprow.prompt, source_image_component=None, )) - from modules import ui_extra_networks extra_networks_ui_img2img = ui_extra_networks.create_ui(img2img_interface, [img2img_generation_tab], 'img2img') ui_extra_networks.setup_ui(extra_networks_ui_img2img, img2img_gallery) diff --git a/modules/ui_common.py b/modules/ui_common.py index 303af9cd..99d19ff0 100644 --- a/modules/ui_common.py +++ b/modules/ui_common.py @@ -11,7 +11,7 @@ from modules import call_queue, shared from modules.generation_parameters_copypaste import image_from_url_text import modules.images from modules.ui_components import ToolButton - +import modules.generation_parameters_copypaste as parameters_copypaste folder_symbol = '\U0001f4c2' # 📂 refresh_symbol = '\U0001f504' # 🔄 @@ -105,8 +105,6 @@ def save_files(js_data, images, do_make_zip, index): def create_output_panel(tabname, outdir): - from modules import shared - import modules.generation_parameters_copypaste as parameters_copypaste def open_folder(f): if not os.path.exists(f): diff --git a/modules/util.py b/modules/util.py new file mode 100644 index 00000000..60afc067 --- /dev/null +++ b/modules/util.py @@ -0,0 +1,58 @@ +import os +import re + +from modules import shared +from modules.paths_internal import script_path + + +def natural_sort_key(s, regex=re.compile('([0-9]+)')): + return [int(text) if text.isdigit() else text.lower() for text in regex.split(s)] + + +def listfiles(dirname): + filenames = [os.path.join(dirname, x) for x in sorted(os.listdir(dirname), key=natural_sort_key) if not x.startswith(".")] + return [file for file in filenames if os.path.isfile(file)] + + +def html_path(filename): + return os.path.join(script_path, "html", filename) + + +def html(filename): + path = html_path(filename) + + if os.path.exists(path): + with open(path, encoding="utf8") as file: + return file.read() + + return "" + + +def walk_files(path, allowed_extensions=None): + if not os.path.exists(path): + return + + if allowed_extensions is not None: + allowed_extensions = set(allowed_extensions) + + items = list(os.walk(path, followlinks=True)) + items = sorted(items, key=lambda x: natural_sort_key(x[0])) + + for root, _, files in items: + for filename in sorted(files, key=natural_sort_key): + if allowed_extensions is not None: + _, ext = os.path.splitext(filename) + if ext not in allowed_extensions: + continue + + if not shared.opts.list_hidden_files and ("/." in root or "\\." in root): + continue + + yield os.path.join(root, filename) + + +def ldm_print(*args, **kwargs): + if shared.opts.hide_ldm_prints: + return + + print(*args, **kwargs) diff --git a/webui.py b/webui.py index 6d36f880..0f1ace97 100644 --- a/webui.py +++ b/webui.py @@ -43,12 +43,15 @@ startup_timer.record("import torch") import gradio # noqa: F401 startup_timer.record("import gradio") -from modules import paths, timer, import_hook, errors, devices # noqa: F401 +from modules import paths, timer, import_hook, errors # noqa: F401 startup_timer.record("setup paths") import ldm.modules.encoders.modules # noqa: F401 startup_timer.record("import ldm") +from modules import shared_init, shared, shared_items +shared_init.initialize() +startup_timer.record("initialize shared") from modules import extra_networks from modules.call_queue import wrap_gradio_gpu_call, wrap_queued_call, queue_lock # noqa: F401 @@ -58,8 +61,6 @@ if ".dev" in torch.__version__ or "+git" in torch.__version__: torch.__long_version__ = torch.__version__ torch.__version__ = re.search(r'[\d.]+[\d]', torch.__version__).group(0) -from modules import shared - if not shared.cmd_opts.skip_version_check: errors.check_versions() @@ -82,7 +83,7 @@ import modules.textual_inversion.textual_inversion import modules.progress import modules.ui -from modules import modelloader +from modules import modelloader, devices from modules.shared import cmd_opts import modules.hypernetworks.hypernetwork @@ -297,7 +298,7 @@ def initialize_rest(*, reload_script_modules=False): Thread(target=load_model).start() - shared.reload_hypernetworks() + shared_items.reload_hypernetworks() startup_timer.record("reload hypernetworks") ui_extra_networks.initialize() -- cgit v1.2.1 From 0cac6ab6153dc376b55c9b7f61c01024515ae73a Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Wed, 9 Aug 2023 20:32:36 +0900 Subject: extra network metadata inherit old description --- modules/ui_extra_networks_user_metadata.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/modules/ui_extra_networks_user_metadata.py b/modules/ui_extra_networks_user_metadata.py index ea7e1ab2..a5423fd8 100644 --- a/modules/ui_extra_networks_user_metadata.py +++ b/modules/ui_extra_networks_user_metadata.py @@ -36,12 +36,10 @@ class UserMetadataEditor: item = self.page.items.get(name, {}) user_metadata = item.get('user_metadata', None) - if user_metadata is None: - user_metadata = {} + if not user_metadata: + user_metadata = {'description': item.get('description', '')} item['user_metadata'] = user_metadata - if len(user_metadata) == 0: - user_metadata = {'description': item.get('description', '')} return user_metadata def create_extra_default_items_in_left_column(self): -- cgit v1.2.1 From aa10faa591f1ca0bd93ae3d53a0a4c15a3fbaf82 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Wed, 9 Aug 2023 14:47:44 +0300 Subject: fix checkpoint name jumping around in the list of checkpoints for no good reason --- modules/sd_models.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/modules/sd_models.py b/modules/sd_models.py index b490fa99..7a866a07 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -68,7 +68,9 @@ class CheckpointInfo: self.title = name if self.shorthash is None else f'{name} [{self.shorthash}]' self.short_title = self.name_for_extra if self.shorthash is None else f'{self.name_for_extra} [{self.shorthash}]' - self.ids = [self.hash, self.model_name, self.title, name, self.name_for_extra, f'{name} [{self.hash}]'] + ([self.shorthash, self.sha256, f'{self.name} [{self.shorthash}]'] if self.shorthash else []) + self.ids = [self.hash, self.model_name, self.title, name, self.name_for_extra, f'{name} [{self.hash}]'] + if self.shorthash: + self.ids += [self.shorthash, self.sha256, f'{self.name} [{self.shorthash}]', f'{self.name_for_extra} [{self.shorthash}]'] def register(self): checkpoints_list[self.title] = self @@ -80,10 +82,14 @@ class CheckpointInfo: if self.sha256 is None: return - self.shorthash = self.sha256[0:10] + shorthash = self.sha256[0:10] + if self.shorthash == self.sha256[0:10]: + return self.shorthash + + self.shorthash = shorthash if self.shorthash not in self.ids: - self.ids += [self.shorthash, self.sha256, f'{self.name} [{self.shorthash}]'] + self.ids += [self.shorthash, self.sha256, f'{self.name} [{self.shorthash}]', f'{self.name_for_extra} [{self.shorthash}]'] checkpoints_list.pop(self.title, None) self.title = f'{self.name} [{self.shorthash}]' -- cgit v1.2.1 From 7ba8f11688bee1a04b48d8108627fd25ada69721 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Wed, 9 Aug 2023 15:06:03 +0300 Subject: fix missing restricted_opts from shared --- modules/shared.py | 1 + modules/shared_init.py | 1 + 2 files changed, 2 insertions(+) diff --git a/modules/shared.py b/modules/shared.py index 8ba72f49..d9d01484 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -38,6 +38,7 @@ face_restorers = [] options_templates = None opts = None +restricted_opts = None sd_model: LatentDiffusion = None diff --git a/modules/shared_init.py b/modules/shared_init.py index e7fc18d2..b88d1d8e 100644 --- a/modules/shared_init.py +++ b/modules/shared_init.py @@ -20,6 +20,7 @@ def initialize(): from modules import options, shared_options shared.options_templates = shared_options.options_templates shared.opts = options.Options(shared_options.options_templates, shared_options.restricted_opts) + shared.restricted_opts = shared_options.restricted_opts if os.path.exists(shared.config_filename): shared.opts.load(shared.config_filename) -- cgit v1.2.1 From eed963e97261ee03bffe59e3d343dcf53d82dbfd Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Wed, 9 Aug 2023 16:54:49 +0300 Subject: Lora cache in memory --- extensions-builtin/Lora/networks.py | 22 +++++++++++++++++++--- extensions-builtin/Lora/scripts/lora_script.py | 3 +++ 2 files changed, 22 insertions(+), 3 deletions(-) diff --git a/extensions-builtin/Lora/networks.py b/extensions-builtin/Lora/networks.py index 17cbe1bb..bc722e90 100644 --- a/extensions-builtin/Lora/networks.py +++ b/extensions-builtin/Lora/networks.py @@ -195,6 +195,15 @@ def load_network(name, network_on_disk): return net +def purge_networks_from_memory(): + while len(networks_in_memory) > shared.opts.lora_in_memory_limit and len(networks_in_memory) > 0: + name = next(iter(networks_in_memory)) + networks_in_memory.pop(name, None) + + devices.torch_gc() + + + def load_networks(names, te_multipliers=None, unet_multipliers=None, dyn_dims=None): already_loaded = {} @@ -212,15 +221,19 @@ def load_networks(names, te_multipliers=None, unet_multipliers=None, dyn_dims=No failed_to_load_networks = [] - for i, name in enumerate(names): + for i, (network_on_disk, name) in enumerate(zip(networks_on_disk, names)): net = already_loaded.get(name, None) - network_on_disk = networks_on_disk[i] - if network_on_disk is not None: + if net is None: + net = networks_in_memory.get(name) + if net is None or os.path.getmtime(network_on_disk.filename) > net.mtime: try: net = load_network(name, network_on_disk) + + networks_in_memory.pop(name, None) + networks_in_memory[name] = net except Exception as e: errors.display(e, f"loading network {network_on_disk.filename}") continue @@ -242,6 +255,8 @@ def load_networks(names, te_multipliers=None, unet_multipliers=None, dyn_dims=No if failed_to_load_networks: sd_hijack.model_hijack.comments.append("Failed to find networks: " + ", ".join(failed_to_load_networks)) + purge_networks_from_memory() + def network_restore_weights_from_backup(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn.MultiheadAttention]): weights_backup = getattr(self, "network_weights_backup", None) @@ -462,6 +477,7 @@ def infotext_pasted(infotext, params): available_networks = {} available_network_aliases = {} loaded_networks = [] +networks_in_memory = {} available_network_hash_lookup = {} forbidden_network_aliases = {} diff --git a/extensions-builtin/Lora/scripts/lora_script.py b/extensions-builtin/Lora/scripts/lora_script.py index cd28afc9..6ab8b6e7 100644 --- a/extensions-builtin/Lora/scripts/lora_script.py +++ b/extensions-builtin/Lora/scripts/lora_script.py @@ -65,6 +65,7 @@ shared.options_templates.update(shared.options_section(('extra_networks', "Extra "lora_add_hashes_to_infotext": shared.OptionInfo(True, "Add Lora hashes to infotext"), "lora_show_all": shared.OptionInfo(False, "Always show all networks on the Lora page").info("otherwise, those detected as for incompatible version of Stable Diffusion will be hidden"), "lora_hide_unknown_for_versions": shared.OptionInfo([], "Hide networks of unknown versions for model versions", gr.CheckboxGroup, {"choices": ["SD1", "SD2", "SDXL"]}), + "lora_in_memory_limit": shared.OptionInfo(0, "Number of Lora networks to keep cached in memory", gr.Number, {"precision": 0}), })) @@ -121,3 +122,5 @@ def infotext_pasted(infotext, d): script_callbacks.on_infotext_pasted(infotext_pasted) + +shared.opts.onchange("lora_in_memory_limit", networks.purge_networks_from_memory) -- cgit v1.2.1 From a2a97e57f069d24c9616d1867a6df7f532f92a04 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Wed, 9 Aug 2023 17:08:36 +0300 Subject: simplify --- modules/launch_utils.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/modules/launch_utils.py b/modules/launch_utils.py index c0847e14..98ac5bbd 100644 --- a/modules/launch_utils.py +++ b/modules/launch_utils.py @@ -7,6 +7,7 @@ import importlib.util import platform import json from functools import lru_cache +import tqdm from modules import cmd_args, errors from modules.paths_internal import script_path, extensions_dir @@ -248,11 +249,10 @@ def run_extensions_installers(settings_file): return with startup_timer.subcategory("run extensions installers"): - from tqdm.auto import tqdm - pbar_extensions = tqdm(list_extensions(settings_file), - bar_format="{desc}: |{bar}|{percentage:3.0f}% [{n_fmt}/{total_fmt} {elapsed}<{remaining}]") - for dirname_extension in pbar_extensions: - pbar_extensions.set_description("Installing %s" % dirname_extension) + progress_bar = tqdm.tqdm(list_extensions(settings_file)) + for dirname_extension in progress_bar: + progress_bar.set_description(f"Installing {dirname_extension}") + path = os.path.join(extensions_dir, dirname_extension) if os.path.isdir(path): -- cgit v1.2.1 From 95821f0132f5437ef30b0dbcac7c51e55818c18f Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Wed, 9 Aug 2023 18:11:13 +0300 Subject: split webui.py's initialization and utility functions into separate files --- modules/gradio_extensons.py | 4 +- modules/initialize.py | 168 ++++++++++++++++++++ modules/initialize_util.py | 195 +++++++++++++++++++++++ modules/shared_init.py | 3 - modules/ui_extra_networks.py | 3 +- modules/ui_tempdir.py | 5 +- webui.py | 368 ++++--------------------------------------- 7 files changed, 405 insertions(+), 341 deletions(-) create mode 100644 modules/initialize.py create mode 100644 modules/initialize_util.py diff --git a/modules/gradio_extensons.py b/modules/gradio_extensons.py index 5af7fd8e..77c34c8b 100644 --- a/modules/gradio_extensons.py +++ b/modules/gradio_extensons.py @@ -1,6 +1,6 @@ import gradio as gr -from modules import scripts +from modules import scripts, ui_tempdir def add_classes_to_gradio_component(comp): """ @@ -58,3 +58,5 @@ original_BlockContext_init = gr.blocks.BlockContext.__init__ gr.components.IOComponent.__init__ = IOComponent_init gr.blocks.Block.get_config = Block_get_config gr.blocks.BlockContext.__init__ = BlockContext_init + +ui_tempdir.install_ui_tempdir_override() diff --git a/modules/initialize.py b/modules/initialize.py new file mode 100644 index 00000000..f24f7637 --- /dev/null +++ b/modules/initialize.py @@ -0,0 +1,168 @@ +import importlib +import logging +import sys +import warnings +from threading import Thread + +from modules.timer import startup_timer + + +def imports(): + logging.getLogger("torch.distributed.nn").setLevel(logging.ERROR) # sshh... + logging.getLogger("xformers").addFilter(lambda record: 'A matching Triton is not available' not in record.getMessage()) + + import torch # noqa: F401 + startup_timer.record("import torch") + import pytorch_lightning # noqa: F401 + startup_timer.record("import torch") + warnings.filterwarnings(action="ignore", category=DeprecationWarning, module="pytorch_lightning") + warnings.filterwarnings(action="ignore", category=UserWarning, module="torchvision") + + import gradio # noqa: F401 + startup_timer.record("import gradio") + + from modules import paths, timer, import_hook, errors # noqa: F401 + startup_timer.record("setup paths") + + import ldm.modules.encoders.modules # noqa: F401 + startup_timer.record("import ldm") + + import sgm.modules.encoders.modules # noqa: F401 + startup_timer.record("import sgm") + + from modules import shared_init + shared_init.initialize() + startup_timer.record("initialize shared") + + from modules import processing, gradio_extensons, ui # noqa: F401 + startup_timer.record("other imports") + + +def check_versions(): + from modules.shared_cmd_options import cmd_opts + + if not cmd_opts.skip_version_check: + from modules import errors + errors.check_versions() + + +def initialize(): + from modules import initialize_util + initialize_util.fix_torch_version() + initialize_util.fix_asyncio_event_loop_policy() + initialize_util.validate_tls_options() + initialize_util.configure_sigint_handler() + initialize_util.configure_opts_onchange() + + from modules import modelloader + modelloader.cleanup_models() + + from modules import sd_models + sd_models.setup_model() + startup_timer.record("setup SD model") + + from modules.shared_cmd_options import cmd_opts + + from modules import codeformer_model + warnings.filterwarnings(action="ignore", category=UserWarning, module="torchvision.transforms.functional_tensor") + codeformer_model.setup_model(cmd_opts.codeformer_models_path) + startup_timer.record("setup codeformer") + + from modules import gfpgan_model + gfpgan_model.setup_model(cmd_opts.gfpgan_models_path) + startup_timer.record("setup gfpgan") + + initialize_rest(reload_script_modules=False) + + +def initialize_rest(*, reload_script_modules=False): + """ + Called both from initialize() and when reloading the webui. + """ + from modules.shared_cmd_options import cmd_opts + + from modules import sd_samplers + sd_samplers.set_samplers() + startup_timer.record("set samplers") + + from modules import extensions + extensions.list_extensions() + startup_timer.record("list extensions") + + from modules import initialize_util + initialize_util.restore_config_state_file() + startup_timer.record("restore config state file") + + from modules import shared, upscaler, scripts + if cmd_opts.ui_debug_mode: + shared.sd_upscalers = upscaler.UpscalerLanczos().scalers + scripts.load_scripts() + return + + from modules import sd_models + sd_models.list_models() + startup_timer.record("list SD models") + + from modules import localization + localization.list_localizations(cmd_opts.localizations_dir) + startup_timer.record("list localizations") + + with startup_timer.subcategory("load scripts"): + scripts.load_scripts() + + if reload_script_modules: + for module in [module for name, module in sys.modules.items() if name.startswith("modules.ui")]: + importlib.reload(module) + startup_timer.record("reload script modules") + + from modules import modelloader + modelloader.load_upscalers() + startup_timer.record("load upscalers") + + from modules import sd_vae + sd_vae.refresh_vae_list() + startup_timer.record("refresh VAE") + + from modules import textual_inversion + textual_inversion.textual_inversion.list_textual_inversion_templates() + startup_timer.record("refresh textual inversion templates") + + from modules import script_callbacks, sd_hijack_optimizations, sd_hijack + script_callbacks.on_list_optimizers(sd_hijack_optimizations.list_optimizers) + sd_hijack.list_optimizers() + startup_timer.record("scripts list_optimizers") + + from modules import sd_unet + sd_unet.list_unets() + startup_timer.record("scripts list_unets") + + def load_model(): + """ + Accesses shared.sd_model property to load model. + After it's available, if it has been loaded before this access by some extension, + its optimization may be None because the list of optimizaers has neet been filled + by that time, so we apply optimization again. + """ + + shared.sd_model # noqa: B018 + + if sd_hijack.current_optimizer is None: + sd_hijack.apply_optimizations() + + from modules import devices + devices.first_time_calculation() + + Thread(target=load_model).start() + + from modules import shared_items + shared_items.reload_hypernetworks() + startup_timer.record("reload hypernetworks") + + from modules import ui_extra_networks + ui_extra_networks.initialize() + ui_extra_networks.register_default_pages() + + from modules import extra_networks + extra_networks.initialize() + extra_networks.register_default_extra_networks() + startup_timer.record("initialize extra networks") diff --git a/modules/initialize_util.py b/modules/initialize_util.py new file mode 100644 index 00000000..e59bd3c4 --- /dev/null +++ b/modules/initialize_util.py @@ -0,0 +1,195 @@ +import json +import logging +import os +import signal +import sys +import re + +from modules.timer import startup_timer + +def setup_logging(): + # We can't use cmd_opts for this because it will not have been initialized at this point. + log_level = os.environ.get("SD_WEBUI_LOG_LEVEL") + if log_level: + log_level = getattr(logging, log_level.upper(), None) or logging.INFO + logging.basicConfig( + level=log_level, + format='%(asctime)s %(levelname)s [%(name)s] %(message)s', + datefmt='%Y-%m-%d %H:%M:%S', + ) + + +def gradio_server_name(): + from modules.shared_cmd_options import cmd_opts + + if cmd_opts.server_name: + return cmd_opts.server_name + else: + return "0.0.0.0" if cmd_opts.listen else None + + +def fix_torch_version(): + import torch + + # Truncate version number of nightly/local build of PyTorch to not cause exceptions with CodeFormer or Safetensors + if ".dev" in torch.__version__ or "+git" in torch.__version__: + torch.__long_version__ = torch.__version__ + torch.__version__ = re.search(r'[\d.]+[\d]', torch.__version__).group(0) + + +def fix_asyncio_event_loop_policy(): + """ + The default `asyncio` event loop policy only automatically creates + event loops in the main threads. Other threads must create event + loops explicitly or `asyncio.get_event_loop` (and therefore + `.IOLoop.current`) will fail. Installing this policy allows event + loops to be created automatically on any thread, matching the + behavior of Tornado versions prior to 5.0 (or 5.0 on Python 2). + """ + + import asyncio + + if sys.platform == "win32" and hasattr(asyncio, "WindowsSelectorEventLoopPolicy"): + # "Any thread" and "selector" should be orthogonal, but there's not a clean + # interface for composing policies so pick the right base. + _BasePolicy = asyncio.WindowsSelectorEventLoopPolicy # type: ignore + else: + _BasePolicy = asyncio.DefaultEventLoopPolicy + + class AnyThreadEventLoopPolicy(_BasePolicy): # type: ignore + """Event loop policy that allows loop creation on any thread. + Usage:: + + asyncio.set_event_loop_policy(AnyThreadEventLoopPolicy()) + """ + + def get_event_loop(self) -> asyncio.AbstractEventLoop: + try: + return super().get_event_loop() + except (RuntimeError, AssertionError): + # This was an AssertionError in python 3.4.2 (which ships with debian jessie) + # and changed to a RuntimeError in 3.4.3. + # "There is no current event loop in thread %r" + loop = self.new_event_loop() + self.set_event_loop(loop) + return loop + + asyncio.set_event_loop_policy(AnyThreadEventLoopPolicy()) + + +def restore_config_state_file(): + from modules import shared, config_states + + config_state_file = shared.opts.restore_config_state_file + if config_state_file == "": + return + + shared.opts.restore_config_state_file = "" + shared.opts.save(shared.config_filename) + + if os.path.isfile(config_state_file): + print(f"*** About to restore extension state from file: {config_state_file}") + with open(config_state_file, "r", encoding="utf-8") as f: + config_state = json.load(f) + config_states.restore_extension_config(config_state) + startup_timer.record("restore extension config") + elif config_state_file: + print(f"!!! Config state backup not found: {config_state_file}") + + +def validate_tls_options(): + from modules.shared_cmd_options import cmd_opts + + if not (cmd_opts.tls_keyfile and cmd_opts.tls_certfile): + return + + try: + if not os.path.exists(cmd_opts.tls_keyfile): + print("Invalid path to TLS keyfile given") + if not os.path.exists(cmd_opts.tls_certfile): + print(f"Invalid path to TLS certfile: '{cmd_opts.tls_certfile}'") + except TypeError: + cmd_opts.tls_keyfile = cmd_opts.tls_certfile = None + print("TLS setup invalid, running webui without TLS") + else: + print("Running with TLS") + startup_timer.record("TLS") + + +def get_gradio_auth_creds(): + """ + Convert the gradio_auth and gradio_auth_path commandline arguments into + an iterable of (username, password) tuples. + """ + from modules.shared_cmd_options import cmd_opts + + def process_credential_line(s): + s = s.strip() + if not s: + return None + return tuple(s.split(':', 1)) + + if cmd_opts.gradio_auth: + for cred in cmd_opts.gradio_auth.split(','): + cred = process_credential_line(cred) + if cred: + yield cred + + if cmd_opts.gradio_auth_path: + with open(cmd_opts.gradio_auth_path, 'r', encoding="utf8") as file: + for line in file.readlines(): + for cred in line.strip().split(','): + cred = process_credential_line(cred) + if cred: + yield cred + + +def configure_sigint_handler(): + # make the program just exit at ctrl+c without waiting for anything + def sigint_handler(sig, frame): + print(f'Interrupted with signal {sig} in {frame}') + os._exit(0) + + if not os.environ.get("COVERAGE_RUN"): + # Don't install the immediate-quit handler when running under coverage, + # as then the coverage report won't be generated. + signal.signal(signal.SIGINT, sigint_handler) + + +def configure_opts_onchange(): + from modules import shared, sd_models, sd_vae, ui_tempdir, sd_hijack + from modules.call_queue import wrap_queued_call + + shared.opts.onchange("sd_model_checkpoint", wrap_queued_call(lambda: sd_models.reload_model_weights()), call=False) + shared.opts.onchange("sd_vae", wrap_queued_call(lambda: sd_vae.reload_vae_weights()), call=False) + shared.opts.onchange("sd_vae_overrides_per_model_preferences", wrap_queued_call(lambda: sd_vae.reload_vae_weights()), call=False) + shared.opts.onchange("temp_dir", ui_tempdir.on_tmpdir_changed) + shared.opts.onchange("gradio_theme", shared.reload_gradio_theme) + shared.opts.onchange("cross_attention_optimization", wrap_queued_call(lambda: sd_hijack.model_hijack.redo_hijack(shared.sd_model)), call=False) + startup_timer.record("opts onchange") + + +def setup_middleware(app): + from starlette.middleware.gzip import GZipMiddleware + + app.middleware_stack = None # reset current middleware to allow modifying user provided list + app.add_middleware(GZipMiddleware, minimum_size=1000) + configure_cors_middleware(app) + app.build_middleware_stack() # rebuild middleware stack on-the-fly + + +def configure_cors_middleware(app): + from starlette.middleware.cors import CORSMiddleware + from modules.shared_cmd_options import cmd_opts + + cors_options = { + "allow_methods": ["*"], + "allow_headers": ["*"], + "allow_credentials": True, + } + if cmd_opts.cors_allow_origins: + cors_options["allow_origins"] = cmd_opts.cors_allow_origins.split(',') + if cmd_opts.cors_allow_origins_regex: + cors_options["allow_origin_regex"] = cmd_opts.cors_allow_origins_regex + app.add_middleware(CORSMiddleware, **cors_options) + diff --git a/modules/shared_init.py b/modules/shared_init.py index b88d1d8e..d3fb687e 100644 --- a/modules/shared_init.py +++ b/modules/shared_init.py @@ -5,9 +5,6 @@ import torch from modules import shared from modules.shared import cmd_opts -import sys -sys.setrecursionlimit(1000) - def initialize(): """Initializes fields inside the shared module in a controlled manner. diff --git a/modules/ui_extra_networks.py b/modules/ui_extra_networks.py index e0b932b9..16d76a45 100644 --- a/modules/ui_extra_networks.py +++ b/modules/ui_extra_networks.py @@ -4,7 +4,6 @@ from pathlib import Path from modules import shared, ui_extra_networks_user_metadata, errors, extra_networks from modules.images import read_info_from_image, save_image_with_geninfo -from modules.ui import up_down_symbol import gradio as gr import json import html @@ -348,6 +347,8 @@ def pages_in_preferred_order(pages): def create_ui(interface: gr.Blocks, unrelated_tabs, tabname): + from modules.ui import up_down_symbol + ui = ExtraNetworksUi() ui.pages = [] ui.pages_contents = [] diff --git a/modules/ui_tempdir.py b/modules/ui_tempdir.py index fb75137e..506017e5 100644 --- a/modules/ui_tempdir.py +++ b/modules/ui_tempdir.py @@ -57,8 +57,9 @@ def save_pil_to_file(self, pil_image, dir=None, format="png"): return file_obj.name -# override save to file function so that it also writes PNG info -gradio.components.IOComponent.pil_to_temp_file = save_pil_to_file +def install_ui_tempdir_override(): + """override save to file function so that it also writes PNG info""" + gradio.components.IOComponent.pil_to_temp_file = save_pil_to_file def on_tmpdir_changed(): diff --git a/webui.py b/webui.py index 0f1ace97..738b3bef 100644 --- a/webui.py +++ b/webui.py @@ -1,349 +1,43 @@ from __future__ import annotations import os -import sys import time -import importlib -import signal -import re -import warnings -import json -from threading import Thread -from typing import Iterable - -from fastapi import FastAPI -from fastapi.middleware.cors import CORSMiddleware -from fastapi.middleware.gzip import GZipMiddleware - -import logging - -# We can't use cmd_opts for this because it will not have been initialized at this point. -log_level = os.environ.get("SD_WEBUI_LOG_LEVEL") -if log_level: - log_level = getattr(logging, log_level.upper(), None) or logging.INFO - logging.basicConfig( - level=log_level, - format='%(asctime)s %(levelname)s [%(name)s] %(message)s', - datefmt='%Y-%m-%d %H:%M:%S', - ) - -logging.getLogger("torch.distributed.nn").setLevel(logging.ERROR) # sshh... -logging.getLogger("xformers").addFilter(lambda record: 'A matching Triton is not available' not in record.getMessage()) from modules import timer +from modules import initialize_util +from modules import initialize + startup_timer = timer.startup_timer startup_timer.record("launcher") -import torch -import pytorch_lightning # noqa: F401 # pytorch_lightning should be imported after torch, but it re-enables warnings on import so import once to disable them -warnings.filterwarnings(action="ignore", category=DeprecationWarning, module="pytorch_lightning") -warnings.filterwarnings(action="ignore", category=UserWarning, module="torchvision") -startup_timer.record("import torch") - -import gradio # noqa: F401 -startup_timer.record("import gradio") - -from modules import paths, timer, import_hook, errors # noqa: F401 -startup_timer.record("setup paths") - -import ldm.modules.encoders.modules # noqa: F401 -startup_timer.record("import ldm") - -from modules import shared_init, shared, shared_items -shared_init.initialize() -startup_timer.record("initialize shared") - -from modules import extra_networks -from modules.call_queue import wrap_gradio_gpu_call, wrap_queued_call, queue_lock # noqa: F401 - -# Truncate version number of nightly/local build of PyTorch to not cause exceptions with CodeFormer or Safetensors -if ".dev" in torch.__version__ or "+git" in torch.__version__: - torch.__long_version__ = torch.__version__ - torch.__version__ = re.search(r'[\d.]+[\d]', torch.__version__).group(0) - -if not shared.cmd_opts.skip_version_check: - errors.check_versions() - -import modules.codeformer_model as codeformer -import modules.gfpgan_model as gfpgan -from modules import sd_samplers, upscaler, extensions, localization, ui_tempdir, ui_extra_networks, config_states -import modules.face_restoration -import modules.img2img - -import modules.lowvram -import modules.scripts -import modules.sd_hijack -import modules.sd_hijack_optimizations -import modules.sd_models -import modules.sd_vae -import modules.sd_unet -import modules.txt2img -import modules.script_callbacks -import modules.textual_inversion.textual_inversion -import modules.progress - -import modules.ui -from modules import modelloader, devices -from modules.shared import cmd_opts -import modules.hypernetworks.hypernetwork - -startup_timer.record("other imports") - - -if cmd_opts.server_name: - server_name = cmd_opts.server_name -else: - server_name = "0.0.0.0" if cmd_opts.listen else None - - -def fix_asyncio_event_loop_policy(): - """ - The default `asyncio` event loop policy only automatically creates - event loops in the main threads. Other threads must create event - loops explicitly or `asyncio.get_event_loop` (and therefore - `.IOLoop.current`) will fail. Installing this policy allows event - loops to be created automatically on any thread, matching the - behavior of Tornado versions prior to 5.0 (or 5.0 on Python 2). - """ - - import asyncio - - if sys.platform == "win32" and hasattr(asyncio, "WindowsSelectorEventLoopPolicy"): - # "Any thread" and "selector" should be orthogonal, but there's not a clean - # interface for composing policies so pick the right base. - _BasePolicy = asyncio.WindowsSelectorEventLoopPolicy # type: ignore - else: - _BasePolicy = asyncio.DefaultEventLoopPolicy - - class AnyThreadEventLoopPolicy(_BasePolicy): # type: ignore - """Event loop policy that allows loop creation on any thread. - Usage:: - - asyncio.set_event_loop_policy(AnyThreadEventLoopPolicy()) - """ - - def get_event_loop(self) -> asyncio.AbstractEventLoop: - try: - return super().get_event_loop() - except (RuntimeError, AssertionError): - # This was an AssertionError in python 3.4.2 (which ships with debian jessie) - # and changed to a RuntimeError in 3.4.3. - # "There is no current event loop in thread %r" - loop = self.new_event_loop() - self.set_event_loop(loop) - return loop - - asyncio.set_event_loop_policy(AnyThreadEventLoopPolicy()) - - -def restore_config_state_file(): - config_state_file = shared.opts.restore_config_state_file - if config_state_file == "": - return - - shared.opts.restore_config_state_file = "" - shared.opts.save(shared.config_filename) - - if os.path.isfile(config_state_file): - print(f"*** About to restore extension state from file: {config_state_file}") - with open(config_state_file, "r", encoding="utf-8") as f: - config_state = json.load(f) - config_states.restore_extension_config(config_state) - startup_timer.record("restore extension config") - elif config_state_file: - print(f"!!! Config state backup not found: {config_state_file}") - - -def validate_tls_options(): - if not (cmd_opts.tls_keyfile and cmd_opts.tls_certfile): - return - - try: - if not os.path.exists(cmd_opts.tls_keyfile): - print("Invalid path to TLS keyfile given") - if not os.path.exists(cmd_opts.tls_certfile): - print(f"Invalid path to TLS certfile: '{cmd_opts.tls_certfile}'") - except TypeError: - cmd_opts.tls_keyfile = cmd_opts.tls_certfile = None - print("TLS setup invalid, running webui without TLS") - else: - print("Running with TLS") - startup_timer.record("TLS") - - -def get_gradio_auth_creds() -> Iterable[tuple[str, ...]]: - """ - Convert the gradio_auth and gradio_auth_path commandline arguments into - an iterable of (username, password) tuples. - """ - def process_credential_line(s) -> tuple[str, ...] | None: - s = s.strip() - if not s: - return None - return tuple(s.split(':', 1)) - - if cmd_opts.gradio_auth: - for cred in cmd_opts.gradio_auth.split(','): - cred = process_credential_line(cred) - if cred: - yield cred - - if cmd_opts.gradio_auth_path: - with open(cmd_opts.gradio_auth_path, 'r', encoding="utf8") as file: - for line in file.readlines(): - for cred in line.strip().split(','): - cred = process_credential_line(cred) - if cred: - yield cred - - -def configure_sigint_handler(): - # make the program just exit at ctrl+c without waiting for anything - def sigint_handler(sig, frame): - print(f'Interrupted with signal {sig} in {frame}') - os._exit(0) - - if not os.environ.get("COVERAGE_RUN"): - # Don't install the immediate-quit handler when running under coverage, - # as then the coverage report won't be generated. - signal.signal(signal.SIGINT, sigint_handler) - - -def configure_opts_onchange(): - shared.opts.onchange("sd_model_checkpoint", wrap_queued_call(lambda: modules.sd_models.reload_model_weights()), call=False) - shared.opts.onchange("sd_vae", wrap_queued_call(lambda: modules.sd_vae.reload_vae_weights()), call=False) - shared.opts.onchange("sd_vae_overrides_per_model_preferences", wrap_queued_call(lambda: modules.sd_vae.reload_vae_weights()), call=False) - shared.opts.onchange("temp_dir", ui_tempdir.on_tmpdir_changed) - shared.opts.onchange("gradio_theme", shared.reload_gradio_theme) - shared.opts.onchange("cross_attention_optimization", wrap_queued_call(lambda: modules.sd_hijack.model_hijack.redo_hijack(shared.sd_model)), call=False) - startup_timer.record("opts onchange") - - -def initialize(): - fix_asyncio_event_loop_policy() - validate_tls_options() - configure_sigint_handler() - modelloader.cleanup_models() - configure_opts_onchange() - - modules.sd_models.setup_model() - startup_timer.record("setup SD model") - - codeformer.setup_model(cmd_opts.codeformer_models_path) - startup_timer.record("setup codeformer") - - gfpgan.setup_model(cmd_opts.gfpgan_models_path) - startup_timer.record("setup gfpgan") - - initialize_rest(reload_script_modules=False) - - -def initialize_rest(*, reload_script_modules=False): - """ - Called both from initialize() and when reloading the webui. - """ - sd_samplers.set_samplers() - extensions.list_extensions() - startup_timer.record("list extensions") - - restore_config_state_file() - - if cmd_opts.ui_debug_mode: - shared.sd_upscalers = upscaler.UpscalerLanczos().scalers - modules.scripts.load_scripts() - return - - modules.sd_models.list_models() - startup_timer.record("list SD models") +initialize_util.setup_logging() - localization.list_localizations(cmd_opts.localizations_dir) +initialize.imports() - with startup_timer.subcategory("load scripts"): - modules.scripts.load_scripts() - - if reload_script_modules: - for module in [module for name, module in sys.modules.items() if name.startswith("modules.ui")]: - importlib.reload(module) - startup_timer.record("reload script modules") - - modelloader.load_upscalers() - startup_timer.record("load upscalers") - - modules.sd_vae.refresh_vae_list() - startup_timer.record("refresh VAE") - modules.textual_inversion.textual_inversion.list_textual_inversion_templates() - startup_timer.record("refresh textual inversion templates") - - modules.script_callbacks.on_list_optimizers(modules.sd_hijack_optimizations.list_optimizers) - modules.sd_hijack.list_optimizers() - startup_timer.record("scripts list_optimizers") - - modules.sd_unet.list_unets() - startup_timer.record("scripts list_unets") - - def load_model(): - """ - Accesses shared.sd_model property to load model. - After it's available, if it has been loaded before this access by some extension, - its optimization may be None because the list of optimizaers has neet been filled - by that time, so we apply optimization again. - """ - - shared.sd_model # noqa: B018 - - if modules.sd_hijack.current_optimizer is None: - modules.sd_hijack.apply_optimizations() - - devices.first_time_calculation() - - Thread(target=load_model).start() - - shared_items.reload_hypernetworks() - startup_timer.record("reload hypernetworks") - - ui_extra_networks.initialize() - ui_extra_networks.register_default_pages() - - extra_networks.initialize() - extra_networks.register_default_extra_networks() - startup_timer.record("initialize extra networks") - - -def setup_middleware(app): - app.middleware_stack = None # reset current middleware to allow modifying user provided list - app.add_middleware(GZipMiddleware, minimum_size=1000) - configure_cors_middleware(app) - app.build_middleware_stack() # rebuild middleware stack on-the-fly - - -def configure_cors_middleware(app): - cors_options = { - "allow_methods": ["*"], - "allow_headers": ["*"], - "allow_credentials": True, - } - if cmd_opts.cors_allow_origins: - cors_options["allow_origins"] = cmd_opts.cors_allow_origins.split(',') - if cmd_opts.cors_allow_origins_regex: - cors_options["allow_origin_regex"] = cmd_opts.cors_allow_origins_regex - app.add_middleware(CORSMiddleware, **cors_options) +initialize.check_versions() def create_api(app): from modules.api.api import Api + from modules.call_queue import queue_lock + api = Api(app, queue_lock) return api def api_only(): - initialize() + from fastapi import FastAPI + from modules.shared_cmd_options import cmd_opts + + initialize.initialize() app = FastAPI() - setup_middleware(app) + initialize_util.setup_middleware(app) api = create_api(app) - modules.script_callbacks.before_ui_callback() - modules.script_callbacks.app_started_callback(None, app) + from modules import script_callbacks + script_callbacks.before_ui_callback() + script_callbacks.app_started_callback(None, app) print(f"Startup time: {startup_timer.summary()}.") api.launch( @@ -354,24 +48,28 @@ def api_only(): def webui(): + from modules.shared_cmd_options import cmd_opts + launch_api = cmd_opts.api - initialize() + initialize.initialize() + + from modules import shared, ui_tempdir, script_callbacks, ui, progress, ui_extra_networks while 1: if shared.opts.clean_temp_dir_at_start: ui_tempdir.cleanup_tmpdr() startup_timer.record("cleanup temp dir") - modules.script_callbacks.before_ui_callback() + script_callbacks.before_ui_callback() startup_timer.record("scripts before_ui_callback") - shared.demo = modules.ui.create_ui() + shared.demo = ui.create_ui() startup_timer.record("create ui") if not cmd_opts.no_gradio_queue: shared.demo.queue(64) - gradio_auth_creds = list(get_gradio_auth_creds()) or None + gradio_auth_creds = list(initialize_util.get_gradio_auth_creds()) or None auto_launch_browser = False if os.getenv('SD_WEBUI_RESTARTING') != '1': @@ -382,7 +80,7 @@ def webui(): app, local_url, share_url = shared.demo.launch( share=cmd_opts.share, - server_name=server_name, + server_name=initialize_util.gradio_server_name(), server_port=cmd_opts.port, ssl_keyfile=cmd_opts.tls_keyfile, ssl_certfile=cmd_opts.tls_certfile, @@ -407,10 +105,10 @@ def webui(): # running its code. We disable this here. Suggested by RyotaK. app.user_middleware = [x for x in app.user_middleware if x.cls.__name__ != 'CORSMiddleware'] - setup_middleware(app) + initialize_util.setup_middleware(app) - modules.progress.setup_progress_api(app) - modules.ui.setup_ui_api(app) + progress.setup_progress_api(app) + ui.setup_ui_api(app) if launch_api: create_api(app) @@ -420,7 +118,7 @@ def webui(): startup_timer.record("add APIs") with startup_timer.subcategory("app_started_callback"): - modules.script_callbacks.app_started_callback(shared.demo, app) + script_callbacks.app_started_callback(shared.demo, app) timer.startup_record = startup_timer.dump() print(f"Startup time: {startup_timer.summary()}.") @@ -450,14 +148,16 @@ def webui(): shared.demo.close() time.sleep(0.5) startup_timer.reset() - modules.script_callbacks.app_reload_callback() + script_callbacks.app_reload_callback() startup_timer.record("app reload callback") - modules.script_callbacks.script_unloaded_callback() + script_callbacks.script_unloaded_callback() startup_timer.record("scripts unloaded callback") - initialize_rest(reload_script_modules=True) + initialize.initialize_rest(reload_script_modules=True) if __name__ == "__main__": + from modules.shared_cmd_options import cmd_opts + if cmd_opts.nowebui: api_only() else: -- cgit v1.2.1 From 4a64d340010a907a6f5a0f53da023c46f794a590 Mon Sep 17 00:00:00 2001 From: Danil Boldyrev Date: Wed, 9 Aug 2023 18:40:45 +0300 Subject: fix auto-expand --- extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js b/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js index eb49a01d..e7616b98 100644 --- a/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js +++ b/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js @@ -42,6 +42,11 @@ onUiLoaded(async() => { } } + // Detect whether the element has a horizontal scroll bar + function hasHorizontalScrollbar(element) { + return element.scrollWidth > element.clientWidth; + } + // Function for defining the "Ctrl", "Shift" and "Alt" keys function isModifierKey(event, key) { switch (key) { @@ -650,16 +655,14 @@ onUiLoaded(async() => { } // Simulation of the function to put a long image into the screen. - // We define the size of the canvas, make a fullscreen to reveal the image, then reduce it to fit into the element. + // We detect if an image has a scroll bar or not, make a fullscreen to reveal the image, then reduce it to fit into the element. // We hide the image and show it to the user when it is ready. function autoExpand(e) { const canvas = document.querySelector(`${elemId} canvas[key="interface"]`); const isMainTab = activeElement === elementIDs.inpaint || activeElement === elementIDs.inpaintSketch || activeElement === elementIDs.sketch; + if (canvas && isMainTab) { - if (canvas && parseInt(targetElement.style.width) > 862 || parseInt(canvas.width) < 862) { - return; - } - if (canvas) { + if (hasHorizontalScrollbar(targetElement)) { targetElement.style.visibility = "hidden"; setTimeout(() => { fitToScreen(); -- cgit v1.2.1 From 8b7b99f8d582edf6422e52f41b82f08cdb80568d Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Wed, 9 Aug 2023 12:18:03 -0400 Subject: fix: Only import tqdm when needed --- modules/launch_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/launch_utils.py b/modules/launch_utils.py index 98ac5bbd..7143f144 100644 --- a/modules/launch_utils.py +++ b/modules/launch_utils.py @@ -7,7 +7,6 @@ import importlib.util import platform import json from functools import lru_cache -import tqdm from modules import cmd_args, errors from modules.paths_internal import script_path, extensions_dir @@ -249,6 +248,7 @@ def run_extensions_installers(settings_file): return with startup_timer.subcategory("run extensions installers"): + import tqdm progress_bar = tqdm.tqdm(list_extensions(settings_file)) for dirname_extension in progress_bar: progress_bar.set_description(f"Installing {dirname_extension}") -- cgit v1.2.1 From d1ba46b6e15d2f2917e7d392955c8c6f988527ba Mon Sep 17 00:00:00 2001 From: Robert Barron Date: Wed, 9 Aug 2023 07:46:30 -0700 Subject: allow first pass and hires pass to use a single prompt to do different prompt editing, hires is 1.0..2.0: relative time range is [1..2] absolute time range is [steps+1..steps+hire_steps], e.g. with 30 steps and 20 hires steps, '20' is 2/3rds through first pass, and 40 is halfway through hires pass --- modules/processing.py | 20 ++++++++++++-------- modules/prompt_parser.py | 41 +++++++++++++++++++++++++++++++---------- modules/shared.py | 1 + 3 files changed, 44 insertions(+), 18 deletions(-) diff --git a/modules/processing.py b/modules/processing.py index 31745006..0750b299 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -319,12 +319,14 @@ class StableDiffusionProcessing: self.all_prompts = [shared.prompt_styles.apply_styles_to_prompt(x, self.styles) for x in self.all_prompts] self.all_negative_prompts = [shared.prompt_styles.apply_negative_styles_to_prompt(x, self.styles) for x in self.all_negative_prompts] - def cached_params(self, required_prompts, steps, extra_network_data): + def cached_params(self, required_prompts, steps, hires_steps, extra_network_data, use_old_scheduling): """Returns parameters that invalidate the cond cache if changed""" return ( required_prompts, steps, + hires_steps, + use_old_scheduling, opts.CLIP_stop_at_last_layers, shared.sd_model.sd_checkpoint_info, extra_network_data, @@ -334,7 +336,7 @@ class StableDiffusionProcessing: self.height, ) - def get_conds_with_caching(self, function, required_prompts, steps, caches, extra_network_data): + def get_conds_with_caching(self, function, required_prompts, steps, hires_steps, caches, extra_network_data): """ Returns the result of calling function(shared.sd_model, required_prompts, steps) using a cache to store the result if the same arguments have been used before. @@ -347,7 +349,7 @@ class StableDiffusionProcessing: caches is a list with items described above. """ - cached_params = self.cached_params(required_prompts, steps, extra_network_data) + cached_params = self.cached_params(required_prompts, steps, hires_steps, extra_network_data, shared.opts.use_old_scheduling) for cache in caches: if cache[0] is not None and cached_params == cache[0]: @@ -356,7 +358,7 @@ class StableDiffusionProcessing: cache = caches[0] with devices.autocast(): - cache[1] = function(shared.sd_model, required_prompts, steps) + cache[1] = function(shared.sd_model, required_prompts, steps, hires_steps, shared.opts.use_old_scheduling) cache[0] = cached_params return cache[1] @@ -367,8 +369,9 @@ class StableDiffusionProcessing: sampler_config = sd_samplers.find_sampler_config(self.sampler_name) self.step_multiplier = 2 if sampler_config and sampler_config.options.get("second_order", False) else 1 - self.uc = self.get_conds_with_caching(prompt_parser.get_learned_conditioning, negative_prompts, self.steps * self.step_multiplier, [self.cached_uc], self.extra_network_data) - self.c = self.get_conds_with_caching(prompt_parser.get_multicond_learned_conditioning, prompts, self.steps * self.step_multiplier, [self.cached_c], self.extra_network_data) + self.firstpass_steps = self.steps * self.step_multiplier + self.uc = self.get_conds_with_caching(prompt_parser.get_learned_conditioning, negative_prompts, self.firstpass_steps, None, [self.cached_uc], self.extra_network_data) + self.c = self.get_conds_with_caching(prompt_parser.get_multicond_learned_conditioning, prompts, self.firstpass_steps, None, [self.cached_c], self.extra_network_data) def parse_extra_network_prompts(self): self.prompts, self.extra_network_data = extra_networks.parse_prompts(self.prompts) @@ -1225,8 +1228,9 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): hr_prompts = prompt_parser.SdConditioning(self.hr_prompts, width=self.hr_upscale_to_x, height=self.hr_upscale_to_y) hr_negative_prompts = prompt_parser.SdConditioning(self.hr_negative_prompts, width=self.hr_upscale_to_x, height=self.hr_upscale_to_y, is_negative_prompt=True) - self.hr_uc = self.get_conds_with_caching(prompt_parser.get_learned_conditioning, hr_negative_prompts, self.steps * self.step_multiplier, [self.cached_hr_uc, self.cached_uc], self.hr_extra_network_data) - self.hr_c = self.get_conds_with_caching(prompt_parser.get_multicond_learned_conditioning, hr_prompts, self.steps * self.step_multiplier, [self.cached_hr_c, self.cached_c], self.hr_extra_network_data) + hires_steps = (self.hr_second_pass_steps or self.steps) * self.step_multiplier + self.hr_uc = self.get_conds_with_caching(prompt_parser.get_learned_conditioning, hr_negative_prompts, self.firstpass_steps, hires_steps, [self.cached_hr_uc, self.cached_uc], self.hr_extra_network_data) + self.hr_c = self.get_conds_with_caching(prompt_parser.get_multicond_learned_conditioning, hr_prompts, self.firstpass_steps, hires_steps, [self.cached_hr_c, self.cached_c], self.hr_extra_network_data) def setup_conds(self): super().setup_conds() diff --git a/modules/prompt_parser.py b/modules/prompt_parser.py index 32d214e3..e8c41f38 100644 --- a/modules/prompt_parser.py +++ b/modules/prompt_parser.py @@ -26,7 +26,7 @@ plain: /([^\\\[\]():|]|\\.)+/ %import common.SIGNED_NUMBER -> NUMBER """) -def get_learned_conditioning_prompt_schedules(prompts, steps): +def get_learned_conditioning_prompt_schedules(prompts, base_steps, hires_steps=None, use_old_scheduling=False): """ >>> g = lambda p: get_learned_conditioning_prompt_schedules([p], 10)[0] >>> g("test") @@ -57,18 +57,39 @@ def get_learned_conditioning_prompt_schedules(prompts, steps): [[1, 'female'], [2, 'male'], [3, 'female'], [4, 'male'], [5, 'female'], [6, 'male'], [7, 'female'], [8, 'male'], [9, 'female'], [10, 'male']] >>> g("[fe|||]male") [[1, 'female'], [2, 'male'], [3, 'male'], [4, 'male'], [5, 'female'], [6, 'male'], [7, 'male'], [8, 'male'], [9, 'female'], [10, 'male']] + >>> g = lambda p: get_learned_conditioning_prompt_schedules([p], 10, 10)[0] + >>> g("a [b:.5] c") + [[10, 'a b c']] + >>> g("a [b:1.5] c") + [[5, 'a c'], [10, 'a b c']] """ + if hires_steps is None or use_old_scheduling: + int_offset = 0 + flt_offset = 0 + steps = base_steps + else: + int_offset = base_steps + flt_offset = 1.0 + steps = hires_steps + def collect_steps(steps, tree): res = [steps] class CollectSteps(lark.Visitor): def scheduled(self, tree): - tree.children[-2] = float(tree.children[-2]) - if tree.children[-2] < 1: - tree.children[-2] *= steps - tree.children[-2] = min(steps, int(tree.children[-2])) - res.append(tree.children[-2]) + s = tree.children[-2] + v = float(s) + if use_old_scheduling: + v = v*steps if v<1 else v + else: + if "." in s: + v = (v - flt_offset) * steps + else: + v = (v - int_offset) + tree.children[-2] = min(steps, int(v)) + if tree.children[-2] >= 1: + res.append(tree.children[-2]) def alternate(self, tree): res.extend(range(1, steps+1)) @@ -134,7 +155,7 @@ class SdConditioning(list): -def get_learned_conditioning(model, prompts: SdConditioning | list[str], steps): +def get_learned_conditioning(model, prompts: SdConditioning | list[str], steps, hires_steps=None, use_old_scheduling=False): """converts a list of prompts into a list of prompt schedules - each schedule is a list of ScheduledPromptConditioning, specifying the comdition (cond), and the sampling step at which this condition is to be replaced by the next one. @@ -154,7 +175,7 @@ def get_learned_conditioning(model, prompts: SdConditioning | list[str], steps): """ res = [] - prompt_schedules = get_learned_conditioning_prompt_schedules(prompts, steps) + prompt_schedules = get_learned_conditioning_prompt_schedules(prompts, steps, hires_steps, use_old_scheduling) cache = {} for prompt, prompt_schedule in zip(prompts, prompt_schedules): @@ -229,7 +250,7 @@ class MulticondLearnedConditioning: self.batch: List[List[ComposableScheduledPromptConditioning]] = batch -def get_multicond_learned_conditioning(model, prompts, steps) -> MulticondLearnedConditioning: +def get_multicond_learned_conditioning(model, prompts, steps, hires_steps=None, use_old_scheduling=False) -> MulticondLearnedConditioning: """same as get_learned_conditioning, but returns a list of ScheduledPromptConditioning along with the weight objects for each prompt. For each prompt, the list is obtained by splitting the prompt using the AND separator. @@ -238,7 +259,7 @@ def get_multicond_learned_conditioning(model, prompts, steps) -> MulticondLearne res_indexes, prompt_flat_list, prompt_indexes = get_multicond_prompt_list(prompts) - learned_conditioning = get_learned_conditioning(model, prompt_flat_list, steps) + learned_conditioning = get_learned_conditioning(model, prompt_flat_list, steps, hires_steps, use_old_scheduling) res = [] for indexes in res_indexes: diff --git a/modules/shared.py b/modules/shared.py index 078e8135..a605b08b 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -516,6 +516,7 @@ options_templates.update(options_section(('compatibility', "Compatibility"), { "use_old_hires_fix_width_height": OptionInfo(False, "For hires fix, use width/height sliders to set final resolution rather than first pass (disables Upscale by, Resize width/height to)."), "dont_fix_second_order_samplers_schedule": OptionInfo(False, "Do not fix prompt schedule for second order samplers."), "hires_fix_use_firstpass_conds": OptionInfo(False, "For hires fix, calculate conds of second pass using extra networks of first pass."), + "use_old_scheduling": OptionInfo(False, "Use old prompt where first pass and hires both used the same timeline, and < 1 meant relative and >= 1 meant absolute"), })) options_templates.update(options_section(('interrogate', "Interrogate"), { -- cgit v1.2.1 From edfae9e78af23bdd6161c55c7ec88533de8925f8 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Wed, 9 Aug 2023 20:49:33 +0300 Subject: add --loglevel commandline argument for logging remove the progressbar for extension installation in favor of logging output --- modules/cmd_args.py | 1 + modules/initialize_util.py | 12 ------------ modules/launch_utils.py | 9 +++++---- modules/logging_config.py | 16 ++++++++++++++++ webui.py | 2 -- 5 files changed, 22 insertions(+), 18 deletions(-) create mode 100644 modules/logging_config.py diff --git a/modules/cmd_args.py b/modules/cmd_args.py index 64f21e01..b0a11538 100644 --- a/modules/cmd_args.py +++ b/modules/cmd_args.py @@ -16,6 +16,7 @@ parser.add_argument("--test-server", action='store_true', help="launch.py argume parser.add_argument("--log-startup", action='store_true', help="launch.py argument: print a detailed log of what's happening at startup") parser.add_argument("--skip-prepare-environment", action='store_true', help="launch.py argument: skip all environment preparation") parser.add_argument("--skip-install", action='store_true', help="launch.py argument: skip installation of packages") +parser.add_argument("--loglevel", type=str, help="log level; one of: CRITICAL, ERROR, WARNING, INFO, DEBUG", default=None) parser.add_argument("--do-not-download-clip", action='store_true', help="do not download CLIP model even if it's not included in the checkpoint") parser.add_argument("--data-dir", type=str, default=os.path.dirname(os.path.dirname(os.path.realpath(__file__))), help="base path where all user data is stored") parser.add_argument("--config", type=str, default=sd_default_config, help="path to config which constructs model",) diff --git a/modules/initialize_util.py b/modules/initialize_util.py index e59bd3c4..d8370576 100644 --- a/modules/initialize_util.py +++ b/modules/initialize_util.py @@ -1,5 +1,4 @@ import json -import logging import os import signal import sys @@ -7,17 +6,6 @@ import re from modules.timer import startup_timer -def setup_logging(): - # We can't use cmd_opts for this because it will not have been initialized at this point. - log_level = os.environ.get("SD_WEBUI_LOG_LEVEL") - if log_level: - log_level = getattr(logging, log_level.upper(), None) or logging.INFO - logging.basicConfig( - level=log_level, - format='%(asctime)s %(levelname)s [%(name)s] %(message)s', - datefmt='%Y-%m-%d %H:%M:%S', - ) - def gradio_server_name(): from modules.shared_cmd_options import cmd_opts diff --git a/modules/launch_utils.py b/modules/launch_utils.py index 7143f144..90c00dd2 100644 --- a/modules/launch_utils.py +++ b/modules/launch_utils.py @@ -1,4 +1,5 @@ # this scripts installs necessary requirements and launches main program in webui.py +import logging import re import subprocess import os @@ -11,8 +12,10 @@ from functools import lru_cache from modules import cmd_args, errors from modules.paths_internal import script_path, extensions_dir from modules.timer import startup_timer +from modules import logging_config args, _ = cmd_args.parser.parse_known_args() +logging_config.setup_logging(args.loglevel) python = sys.executable git = os.environ.get('GIT', "git") @@ -248,10 +251,8 @@ def run_extensions_installers(settings_file): return with startup_timer.subcategory("run extensions installers"): - import tqdm - progress_bar = tqdm.tqdm(list_extensions(settings_file)) - for dirname_extension in progress_bar: - progress_bar.set_description(f"Installing {dirname_extension}") + for dirname_extension in list_extensions(settings_file): + logging.debug(f"Installing {dirname_extension}") path = os.path.join(extensions_dir, dirname_extension) diff --git a/modules/logging_config.py b/modules/logging_config.py new file mode 100644 index 00000000..7db23d4b --- /dev/null +++ b/modules/logging_config.py @@ -0,0 +1,16 @@ +import os +import logging + + +def setup_logging(loglevel): + if loglevel is None: + loglevel = os.environ.get("SD_WEBUI_LOG_LEVEL") + + if loglevel: + log_level = getattr(logging, loglevel.upper(), None) or logging.INFO + logging.basicConfig( + level=log_level, + format='%(asctime)s %(levelname)s [%(name)s] %(message)s', + datefmt='%Y-%m-%d %H:%M:%S', + ) + diff --git a/webui.py b/webui.py index 738b3bef..5c827dae 100644 --- a/webui.py +++ b/webui.py @@ -10,8 +10,6 @@ from modules import initialize startup_timer = timer.startup_timer startup_timer.record("launcher") -initialize_util.setup_logging() - initialize.imports() initialize.check_versions() -- cgit v1.2.1 From 66c32e40e8c059d958292d665a0eeb7377b18988 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Wed, 9 Aug 2023 21:19:33 +0300 Subject: fix gradio themes not applying --- modules/shared_gradio_themes.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/shared_gradio_themes.py b/modules/shared_gradio_themes.py index ad1f2212..485e89d5 100644 --- a/modules/shared_gradio_themes.py +++ b/modules/shared_gradio_themes.py @@ -59,8 +59,8 @@ def reload_gradio_theme(theme_name=None): shared.gradio_theme = gr.themes.ThemeClass.load(theme_cache_path) else: os.makedirs(theme_cache_dir, exist_ok=True) - gradio_theme = gr.themes.ThemeClass.from_hub(theme_name) - gradio_theme.dump(theme_cache_path) + shared.gradio_theme = gr.themes.ThemeClass.from_hub(theme_name) + shared.gradio_theme.dump(theme_cache_path) except Exception as e: errors.display(e, "changing gradio theme") shared.gradio_theme = gr.themes.Default(**default_theme_args) -- cgit v1.2.1 From 259805947e4e567abcf6a4be20fe520d456c251f Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Wed, 9 Aug 2023 14:24:16 -0400 Subject: Add slerp import for extension backwards compat --- modules/processing.py | 1 + 1 file changed, 1 insertion(+) diff --git a/modules/processing.py b/modules/processing.py index 2df5e8c7..6961b7b1 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -15,6 +15,7 @@ from typing import Any, Dict, List import modules.sd_hijack from modules import devices, prompt_parser, masking, sd_samplers, lowvram, generation_parameters_copypaste, extra_networks, sd_vae_approx, scripts, sd_samplers_common, sd_unet, errors, rng +from modules.rng import slerp # noqa: F401 from modules.sd_hijack import model_hijack from modules.sd_samplers_common import images_tensor_to_samples, decode_first_stage, approximation_indexes from modules.shared import opts, cmd_opts, state -- cgit v1.2.1 From 2ceb4f81e2f291ba651dd24c7eb158ea3b446b42 Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Wed, 9 Aug 2023 14:40:18 -0400 Subject: Use better symbol for extra networks sort --- modules/ui_extra_networks.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/ui_extra_networks.py b/modules/ui_extra_networks.py index 16d76a45..063bd7b8 100644 --- a/modules/ui_extra_networks.py +++ b/modules/ui_extra_networks.py @@ -347,7 +347,7 @@ def pages_in_preferred_order(pages): def create_ui(interface: gr.Blocks, unrelated_tabs, tabname): - from modules.ui import up_down_symbol + from modules.ui import switch_values_symbol ui = ExtraNetworksUi() ui.pages = [] @@ -374,7 +374,7 @@ def create_ui(interface: gr.Blocks, unrelated_tabs, tabname): edit_search = gr.Textbox('', show_label=False, elem_id=tabname+"_extra_search", elem_classes="search", placeholder="Search...", visible=False, interactive=True) dropdown_sort = gr.Dropdown(choices=['Default Sort', 'Date Created', 'Date Modified', 'Name'], value='Default Sort', elem_id=tabname+"_extra_sort", elem_classes="sort", multiselect=False, visible=False, show_label=False, interactive=True, label=tabname+"_extra_sort_order") - button_sortorder = ToolButton(up_down_symbol, elem_id=tabname+"_extra_sortorder", elem_classes="sortorder", visible=False) + button_sortorder = ToolButton(switch_values_symbol, elem_id=tabname+"_extra_sortorder", elem_classes="sortorder", visible=False) button_refresh = gr.Button('Refresh', elem_id=tabname+"_extra_refresh", visible=False) checkbox_show_dirs = gr.Checkbox(True, label='Show dirs', elem_id=tabname+"_extra_show_dirs", elem_classes="show-dirs", visible=False) -- cgit v1.2.1 From ff1bfd01ba33ee03b3eb99b1669bd29a3a6658ca Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Wed, 9 Aug 2023 14:41:25 -0400 Subject: Remove up down symbol --- modules/ui.py | 1 - 1 file changed, 1 deletion(-) diff --git a/modules/ui.py b/modules/ui.py index 30b80417..4e1daa8d 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -78,7 +78,6 @@ extra_networks_symbol = '\U0001F3B4' # 🎴 switch_values_symbol = '\U000021C5' # ⇅ restore_progress_symbol = '\U0001F300' # 🌀 detect_image_size_symbol = '\U0001F4D0' # 📐 -up_down_symbol = '\u2195\ufe0f' # ↕️ plaintext_to_html = ui_common.plaintext_to_html -- cgit v1.2.1 From 2c5106ed06044ba4b67b1c856756e33a1ca5eeea Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Thu, 10 Aug 2023 07:57:52 +0300 Subject: additional work on gradio styles; make the accordion change affect all accordions, not just inside scripts div --- style.css | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/style.css b/style.css index cda5f6a0..dfd5ca36 100644 --- a/style.css +++ b/style.css @@ -43,13 +43,15 @@ div.form{ .block.gradio-radio, .block.gradio-checkboxgroup, .block.gradio-number, -.block.gradio-colorpicker, -div.gradio-group -{ +.block.gradio-colorpicker { border-width: 0 !important; box-shadow: none !important; } +div.gradio-group, div.styler{ + border-width: 0 !important; + background: none; +} .gap.compact{ padding: 0; gap: 0.2em 0; @@ -135,12 +137,8 @@ a{ cursor: pointer; } -div.styler{ - border: none; - background: var(--background-fill-primary); -} - -.block.gradio-textbox{ +/* gradio 3.39 puts a lot of overflow: hidden all over the place for an unknown reqasaon. */ +.block.gradio-textbox, div.gradio-group, div.gradio-group div, div.gradio-dropdown{ overflow: visible !important; } @@ -194,7 +192,7 @@ button.custom-button{ text-align: center; } -.gradio-group[id$="_script_container"] > div .gradio-group > div > div:is(.gradio-accordion, .form) { +div.gradio-accordion { border: 1px solid var(--block-border-color) !important; border-radius: 8px !important; margin: 2px 0; -- cgit v1.2.1 From 9199b6b7ebe96cdf09571ba874a103e8ed8c90ef Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Thu, 10 Aug 2023 11:20:46 +0300 Subject: add a custom UI element that combines accordion and checkbox rework hires fix UI to use accordion prevent bogus progress output in console when calculating hires fix dimensions --- javascript/inputAccordion.js | 37 +++++++++++++++++++++ modules/processing.py | 77 +++++++++++++++++++++++--------------------- modules/ui.py | 23 ++++--------- modules/ui_components.py | 31 ++++++++++++++++++ style.css | 15 +++++---- 5 files changed, 124 insertions(+), 59 deletions(-) create mode 100644 javascript/inputAccordion.js diff --git a/javascript/inputAccordion.js b/javascript/inputAccordion.js new file mode 100644 index 00000000..a5eef229 --- /dev/null +++ b/javascript/inputAccordion.js @@ -0,0 +1,37 @@ +var observerAccordionOpen = new MutationObserver(function(mutations) { + mutations.forEach(function(mutationRecord) { + var elem = mutationRecord.target; + var open = elem.classList.contains('open'); + + var accordion = elem.parentNode; + accordion.classList.toggle('input-accordion-open', open); + + var checkbox = gradioApp().querySelector('#' + accordion.id + "-checkbox input"); + checkbox.checked = open; + updateInput(checkbox); + + extra = gradioApp().querySelector('#' + accordion.id + "-extra"); + if(extra){ + extra.style.display = open ? "" : "none"; + } + }); +}); + +function inputAccordionChecked(id, checked){ + var label = gradioApp().querySelector('#' + id + " .label-wrap"); + if(label.classList.contains('open') != checked){ + label.click(); + } +} + +onUiLoaded(function() { + for (var accordion of gradioApp().querySelectorAll('.input-accordion')) { + var labelWrap = accordion.querySelector('.label-wrap'); + observerAccordionOpen.observe(labelWrap, {attributes: true, attributeFilter: ['class']}); + + var extra = gradioApp().querySelector('#' + accordion.id + "-extra"); + if(extra){ + labelWrap.insertBefore(extra, labelWrap.lastElementChild) + } + } +}); diff --git a/modules/processing.py b/modules/processing.py index 6961b7b1..7819644c 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -924,6 +924,45 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): self.hr_c = None self.hr_uc = None + def calculate_target_resolution(self): + if opts.use_old_hires_fix_width_height and self.applied_old_hires_behavior_to != (self.width, self.height): + self.hr_resize_x = self.width + self.hr_resize_y = self.height + self.hr_upscale_to_x = self.width + self.hr_upscale_to_y = self.height + + self.width, self.height = old_hires_fix_first_pass_dimensions(self.width, self.height) + self.applied_old_hires_behavior_to = (self.width, self.height) + + if self.hr_resize_x == 0 and self.hr_resize_y == 0: + self.extra_generation_params["Hires upscale"] = self.hr_scale + self.hr_upscale_to_x = int(self.width * self.hr_scale) + self.hr_upscale_to_y = int(self.height * self.hr_scale) + else: + self.extra_generation_params["Hires resize"] = f"{self.hr_resize_x}x{self.hr_resize_y}" + + if self.hr_resize_y == 0: + self.hr_upscale_to_x = self.hr_resize_x + self.hr_upscale_to_y = self.hr_resize_x * self.height // self.width + elif self.hr_resize_x == 0: + self.hr_upscale_to_x = self.hr_resize_y * self.width // self.height + self.hr_upscale_to_y = self.hr_resize_y + else: + target_w = self.hr_resize_x + target_h = self.hr_resize_y + src_ratio = self.width / self.height + dst_ratio = self.hr_resize_x / self.hr_resize_y + + if src_ratio < dst_ratio: + self.hr_upscale_to_x = self.hr_resize_x + self.hr_upscale_to_y = self.hr_resize_x * self.height // self.width + else: + self.hr_upscale_to_x = self.hr_resize_y * self.width // self.height + self.hr_upscale_to_y = self.hr_resize_y + + self.truncate_x = (self.hr_upscale_to_x - target_w) // opt_f + self.truncate_y = (self.hr_upscale_to_y - target_h) // opt_f + def init(self, all_prompts, all_seeds, all_subseeds): if self.enable_hr: if self.hr_checkpoint_name: @@ -948,43 +987,7 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): if not any(x.name == self.hr_upscaler for x in shared.sd_upscalers): raise Exception(f"could not find upscaler named {self.hr_upscaler}") - if opts.use_old_hires_fix_width_height and self.applied_old_hires_behavior_to != (self.width, self.height): - self.hr_resize_x = self.width - self.hr_resize_y = self.height - self.hr_upscale_to_x = self.width - self.hr_upscale_to_y = self.height - - self.width, self.height = old_hires_fix_first_pass_dimensions(self.width, self.height) - self.applied_old_hires_behavior_to = (self.width, self.height) - - if self.hr_resize_x == 0 and self.hr_resize_y == 0: - self.extra_generation_params["Hires upscale"] = self.hr_scale - self.hr_upscale_to_x = int(self.width * self.hr_scale) - self.hr_upscale_to_y = int(self.height * self.hr_scale) - else: - self.extra_generation_params["Hires resize"] = f"{self.hr_resize_x}x{self.hr_resize_y}" - - if self.hr_resize_y == 0: - self.hr_upscale_to_x = self.hr_resize_x - self.hr_upscale_to_y = self.hr_resize_x * self.height // self.width - elif self.hr_resize_x == 0: - self.hr_upscale_to_x = self.hr_resize_y * self.width // self.height - self.hr_upscale_to_y = self.hr_resize_y - else: - target_w = self.hr_resize_x - target_h = self.hr_resize_y - src_ratio = self.width / self.height - dst_ratio = self.hr_resize_x / self.hr_resize_y - - if src_ratio < dst_ratio: - self.hr_upscale_to_x = self.hr_resize_x - self.hr_upscale_to_y = self.hr_resize_x * self.height // self.width - else: - self.hr_upscale_to_x = self.hr_resize_y * self.width // self.height - self.hr_upscale_to_y = self.hr_resize_y - - self.truncate_x = (self.hr_upscale_to_x - target_w) // opt_f - self.truncate_y = (self.hr_upscale_to_y - target_h) // opt_f + self.calculate_target_resolution() if not state.processing_has_refined_job_count: if state.job_count == -1: diff --git a/modules/ui.py b/modules/ui.py index 4e1daa8d..cbad3afe 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -14,7 +14,7 @@ from modules.call_queue import wrap_gradio_gpu_call, wrap_queued_call, wrap_grad from modules import gradio_extensons # noqa: F401 from modules import sd_hijack, sd_models, script_callbacks, ui_extensions, deepbooru, extra_networks, ui_common, ui_postprocessing, progress, ui_loadsave, errors, shared_items, ui_settings, timer, sysinfo, ui_checkpoint_merger, ui_prompt_styles, scripts, sd_samplers, processing, devices, ui_extra_networks -from modules.ui_components import FormRow, FormGroup, ToolButton, FormHTML +from modules.ui_components import FormRow, FormGroup, ToolButton, FormHTML, InputAccordion from modules.paths import script_path from modules.ui_common import create_refresh_button from modules.ui_gradio_extensions import reload_javascript @@ -94,11 +94,9 @@ def calc_resolution_hires(enable, width, height, hr_scale, hr_resize_x, hr_resiz return "" p = processing.StableDiffusionProcessingTxt2Img(width=width, height=height, enable_hr=True, hr_scale=hr_scale, hr_resize_x=hr_resize_x, hr_resize_y=hr_resize_y) + p.calculate_target_resolution() - with devices.autocast(): - p.init([""], [0], [0]) - - return f"resize: from {p.width}x{p.height} to {p.hr_resize_x or p.hr_upscale_to_x}x{p.hr_resize_y or p.hr_upscale_to_y}" + return f"from {p.width}x{p.height} to {p.hr_resize_x or p.hr_upscale_to_x}x{p.hr_resize_y or p.hr_upscale_to_y}" def resize_from_to_html(width, height, scale_by): @@ -436,11 +434,12 @@ def create_ui(): with FormRow(elem_classes="checkboxes-row", variant="compact"): restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1, elem_id="txt2img_restore_faces") tiling = gr.Checkbox(label='Tiling', value=False, elem_id="txt2img_tiling") - enable_hr = gr.Checkbox(label='Hires. fix', value=False, elem_id="txt2img_enable_hr") - hr_final_resolution = FormHTML(value="", elem_id="txtimg_hr_finalres", label="Upscaled resolution", interactive=False) elif category == "hires_fix": - with FormGroup(visible=False, elem_id="txt2img_hires_fix") as hr_options: + with InputAccordion(False, label="Hires. fix") as enable_hr: + with enable_hr.extra(): + hr_final_resolution = FormHTML(value="", elem_id="txtimg_hr_finalres", label="Upscaled resolution", interactive=False, min_width=0) + with FormRow(elem_id="txt2img_hires_fix_row1", variant="compact"): hr_upscaler = gr.Dropdown(label="Upscaler", elem_id="txt2img_hr_upscaler", choices=[*shared.latent_upscale_modes, *[x.name for x in shared.sd_upscalers]], value=shared.latent_upscale_default_mode) hr_second_pass_steps = gr.Slider(minimum=0, maximum=150, step=1, label='Hires steps', value=0, elem_id="txt2img_hires_steps") @@ -568,13 +567,6 @@ def create_ui(): show_progress=False, ) - enable_hr.change( - fn=lambda x: gr_show(x), - inputs=[enable_hr], - outputs=[hr_options], - show_progress = False, - ) - txt2img_paste_fields = [ (toprow.prompt, "Prompt"), (toprow.negative_prompt, "Negative prompt"), @@ -594,7 +586,6 @@ def create_ui(): (toprow.ui_styles.dropdown, lambda d: d["Styles array"] if isinstance(d.get("Styles array"), list) else gr.update()), (denoising_strength, "Denoising strength"), (enable_hr, lambda d: "Denoising strength" in d and ("Hires upscale" in d or "Hires upscaler" in d or "Hires resize-1" in d)), - (hr_options, lambda d: gr.Row.update(visible="Denoising strength" in d and ("Hires upscale" in d or "Hires upscaler" in d or "Hires resize-1" in d))), (hr_scale, "Hires upscale"), (hr_upscaler, "Hires upscaler"), (hr_second_pass_steps, "Hires steps"), diff --git a/modules/ui_components.py b/modules/ui_components.py index 8f8a7088..598ce738 100644 --- a/modules/ui_components.py +++ b/modules/ui_components.py @@ -72,3 +72,34 @@ class DropdownEditable(FormComponent, gr.Dropdown): def get_block_name(self): return "dropdown" + +class InputAccordion(gr.Checkbox): + global_index = 0 + + def __init__(self, value, **kwargs): + self.accordion_id = kwargs.get('elem_id') + if self.accordion_id is None: + self.accordion_id = f"input-accordion-{self.global_index}" + self.global_index += 1 + + kwargs['elem_id'] = self.accordion_id + "-checkbox" + kwargs['visible'] = False + super().__init__(value, **kwargs) + + self.change(fn=None, _js='function(checked){ inputAccordionChecked("' + self.accordion_id + '", checked); }', inputs=[self]) + + self.accordion = gr.Accordion(kwargs.get('label', 'Accordion'), open=value, elem_id=self.accordion_id, elem_classes=['input-accordion']) + + def extra(self): + return gr.Column(elem_id=self.accordion_id + '-extra', elem_classes='input-accordion-extra', min_width=0) + + def __enter__(self): + self.accordion.__enter__() + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self.accordion.__exit__(exc_type, exc_val, exc_tb) + + def get_block_name(self): + return "checkbox" + diff --git a/style.css b/style.css index dfd5ca36..5163e53c 100644 --- a/style.css +++ b/style.css @@ -329,12 +329,6 @@ div.gradio-accordion { border-radius: 0 0.5rem 0.5rem 0; } -#txtimg_hr_finalres{ - min-height: 0 !important; - padding: .625rem .75rem; - margin-left: -0.75em -} - #img2img_scale_resolution_preview.block{ display: flex; align-items: end; @@ -1016,3 +1010,12 @@ div.block.gradio-box.popup-dialog, .popup-dialog { div.block.gradio-box.popup-dialog > div:last-child, .popup-dialog > div:last-child{ margin-top: 1em; } + +div.block.input-accordion{ + margin-bottom: 0.4em; +} + +.input-accordion-extra{ + flex: 0 0 auto !important; + margin: 0 0.5em 0 auto; +} -- cgit v1.2.1 From 0a0a9d4fe92b97bdaf827532bc6a132e8ca41987 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Thu, 10 Aug 2023 18:05:17 +0900 Subject: extra networks metadata indent --- modules/ui_extra_networks_user_metadata.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/ui_extra_networks_user_metadata.py b/modules/ui_extra_networks_user_metadata.py index a5423fd8..cda471e4 100644 --- a/modules/ui_extra_networks_user_metadata.py +++ b/modules/ui_extra_networks_user_metadata.py @@ -125,7 +125,7 @@ class UserMetadataEditor: basename, ext = os.path.splitext(filename) with open(basename + '.json', "w", encoding="utf8") as file: - json.dump(metadata, file) + json.dump(metadata, file, indent=4) def save_user_metadata(self, name, desc, notes): user_metadata = self.get_user_metadata(name) -- cgit v1.2.1 From 33446acf47a8c3e0c0964782189562df3c4bcf4f Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Thu, 10 Aug 2023 12:41:41 +0300 Subject: face restoration and tiling moved to settings - use "Options in main UI" setting if you want them back --- modules/generation_parameters_copypaste.py | 2 ++ modules/img2img.py | 4 +--- modules/processing.py | 11 +++++++++-- modules/shared_options.py | 2 ++ modules/txt2img.py | 4 +--- modules/ui.py | 12 ++---------- 6 files changed, 17 insertions(+), 18 deletions(-) diff --git a/modules/generation_parameters_copypaste.py b/modules/generation_parameters_copypaste.py index d932c67d..bdff3266 100644 --- a/modules/generation_parameters_copypaste.py +++ b/modules/generation_parameters_copypaste.py @@ -343,6 +343,8 @@ infotext_to_setting_name_mapping = [ ('Pad conds', 'pad_cond_uncond'), ('VAE Encoder', 'sd_vae_encode_method'), ('VAE Decoder', 'sd_vae_decode_method'), + ('Tiling', 'tiling'), + ('Face restoration', 'face_restoration'), ] diff --git a/modules/img2img.py b/modules/img2img.py index e06ac1d6..c7bbbac8 100644 --- a/modules/img2img.py +++ b/modules/img2img.py @@ -116,7 +116,7 @@ def process_batch(p, input_dir, output_dir, inpaint_mask_dir, args, to_scale=Fal process_images(p) -def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_styles, init_img, sketch, init_img_with_mask, inpaint_color_sketch, inpaint_color_sketch_orig, init_img_inpaint, init_mask_inpaint, steps: int, sampler_name: str, mask_blur: int, mask_alpha: float, inpainting_fill: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, image_cfg_scale: float, denoising_strength: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, selected_scale_tab: int, height: int, width: int, scale_by: float, resize_mode: int, inpaint_full_res: bool, inpaint_full_res_padding: int, inpainting_mask_invert: int, img2img_batch_input_dir: str, img2img_batch_output_dir: str, img2img_batch_inpaint_mask_dir: str, override_settings_texts, img2img_batch_use_png_info: bool, img2img_batch_png_info_props: list, img2img_batch_png_info_dir: str, request: gr.Request, *args): +def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_styles, init_img, sketch, init_img_with_mask, inpaint_color_sketch, inpaint_color_sketch_orig, init_img_inpaint, init_mask_inpaint, steps: int, sampler_name: str, mask_blur: int, mask_alpha: float, inpainting_fill: int, n_iter: int, batch_size: int, cfg_scale: float, image_cfg_scale: float, denoising_strength: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, selected_scale_tab: int, height: int, width: int, scale_by: float, resize_mode: int, inpaint_full_res: bool, inpaint_full_res_padding: int, inpainting_mask_invert: int, img2img_batch_input_dir: str, img2img_batch_output_dir: str, img2img_batch_inpaint_mask_dir: str, override_settings_texts, img2img_batch_use_png_info: bool, img2img_batch_png_info_props: list, img2img_batch_png_info_dir: str, request: gr.Request, *args): override_settings = create_override_settings_dict(override_settings_texts) is_batch = mode == 5 @@ -179,8 +179,6 @@ def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_s cfg_scale=cfg_scale, width=width, height=height, - restore_faces=restore_faces, - tiling=tiling, init_images=[image], mask=mask, mask_blur=mask_blur, diff --git a/modules/processing.py b/modules/processing.py index 7819644c..68a8f1c6 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -111,7 +111,7 @@ class StableDiffusionProcessing: cached_uc = [None, None] cached_c = [None, None] - def __init__(self, sd_model=None, outpath_samples=None, outpath_grids=None, prompt: str = "", styles: List[str] = None, seed: int = -1, subseed: int = -1, subseed_strength: float = 0, seed_resize_from_h: int = -1, seed_resize_from_w: int = -1, seed_enable_extras: bool = True, sampler_name: str = None, batch_size: int = 1, n_iter: int = 1, steps: int = 50, cfg_scale: float = 7.0, width: int = 512, height: int = 512, restore_faces: bool = False, tiling: bool = False, do_not_save_samples: bool = False, do_not_save_grid: bool = False, extra_generation_params: Dict[Any, Any] = None, overlay_images: Any = None, negative_prompt: str = None, eta: float = None, do_not_reload_embeddings: bool = False, denoising_strength: float = 0, ddim_discretize: str = None, s_min_uncond: float = 0.0, s_churn: float = 0.0, s_tmax: float = None, s_tmin: float = 0.0, s_noise: float = None, override_settings: Dict[str, Any] = None, override_settings_restore_afterwards: bool = True, sampler_index: int = None, script_args: list = None): + def __init__(self, sd_model=None, outpath_samples=None, outpath_grids=None, prompt: str = "", styles: List[str] = None, seed: int = -1, subseed: int = -1, subseed_strength: float = 0, seed_resize_from_h: int = -1, seed_resize_from_w: int = -1, seed_enable_extras: bool = True, sampler_name: str = None, batch_size: int = 1, n_iter: int = 1, steps: int = 50, cfg_scale: float = 7.0, width: int = 512, height: int = 512, restore_faces: bool = None, tiling: bool = None, do_not_save_samples: bool = False, do_not_save_grid: bool = False, extra_generation_params: Dict[Any, Any] = None, overlay_images: Any = None, negative_prompt: str = None, eta: float = None, do_not_reload_embeddings: bool = False, denoising_strength: float = 0, ddim_discretize: str = None, s_min_uncond: float = 0.0, s_churn: float = 0.0, s_tmax: float = None, s_tmin: float = 0.0, s_noise: float = None, override_settings: Dict[str, Any] = None, override_settings_restore_afterwards: bool = True, sampler_index: int = None, script_args: list = None): if sampler_index is not None: print("sampler_index argument for StableDiffusionProcessing does not do anything; use sampler_name", file=sys.stderr) @@ -564,7 +564,7 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter "CFG scale": p.cfg_scale, "Image CFG scale": getattr(p, 'image_cfg_scale', None), "Seed": p.all_seeds[0] if use_main_prompt else all_seeds[index], - "Face restoration": (opts.face_restoration_model if p.restore_faces else None), + "Face restoration": opts.face_restoration_model if p.restore_faces else None, "Size": f"{p.width}x{p.height}", "Model hash": getattr(p, 'sd_model_hash', None if not opts.add_model_hash_to_info or not shared.sd_model.sd_model_hash else shared.sd_model.sd_model_hash), "Model": (None if not opts.add_model_name_to_info else shared.sd_model.sd_checkpoint_info.name_for_extra), @@ -580,6 +580,7 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter "Init image hash": getattr(p, 'init_img_hash', None), "RNG": opts.randn_source if opts.randn_source != "GPU" and opts.randn_source != "NV" else None, "NGMS": None if p.s_min_uncond == 0 else p.s_min_uncond, + "Tiling": "True" if p.tiling else None, **p.extra_generation_params, "Version": program_version() if opts.add_version_to_infotext else None, "User": p.user if opts.add_user_name_to_info else None, @@ -645,6 +646,12 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: seed = get_fixed_seed(p.seed) subseed = get_fixed_seed(p.subseed) + if p.restore_faces is None: + p.restore_faces = opts.face_restoration + + if p.tiling is None: + p.tiling = opts.tiling + modules.sd_hijack.model_hijack.apply_circular(p.tiling) modules.sd_hijack.model_hijack.clear_comments() diff --git a/modules/shared_options.py b/modules/shared_options.py index 7468bc81..f72859d9 100644 --- a/modules/shared_options.py +++ b/modules/shared_options.py @@ -92,6 +92,7 @@ options_templates.update(options_section(('upscaling', "Upscaling"), { })) options_templates.update(options_section(('face-restoration', "Face restoration"), { + "face_restoration": OptionInfo(False, "Restore faces").info("will use a third-party model on generation result to reconstruct faces"), "face_restoration_model": OptionInfo("CodeFormer", "Face restoration model", gr.Radio, lambda: {"choices": [x.name() for x in shared.face_restorers]}), "code_former_weight": OptionInfo(0.5, "CodeFormer weight", gr.Slider, {"minimum": 0, "maximum": 1, "step": 0.01}).info("0 = maximum effect; 1 = minimum effect"), "face_restoration_unload": OptionInfo(False, "Move face restoration model from VRAM into RAM after processing"), @@ -138,6 +139,7 @@ options_templates.update(options_section(('sd', "Stable Diffusion"), { "CLIP_stop_at_last_layers": OptionInfo(1, "Clip skip", gr.Slider, {"minimum": 1, "maximum": 12, "step": 1}).link("wiki", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Features#clip-skip").info("ignore last layers of CLIP network; 1 ignores none, 2 ignores one layer"), "upcast_attn": OptionInfo(False, "Upcast cross attention layer to float32"), "randn_source": OptionInfo("GPU", "Random number generator source.", gr.Radio, {"choices": ["GPU", "CPU", "NV"]}).info("changes seeds drastically; use CPU to produce the same picture across different videocard vendors; use NV to produce same picture as on NVidia videocards"), + "tiling": OptionInfo(False, "Tiling").info("produce a tileable picture"), })) options_templates.update(options_section(('sdxl', "Stable Diffusion XL"), { diff --git a/modules/txt2img.py b/modules/txt2img.py index edad8930..5ea96bba 100644 --- a/modules/txt2img.py +++ b/modules/txt2img.py @@ -9,7 +9,7 @@ from modules.ui import plaintext_to_html import gradio as gr -def txt2img(id_task: str, prompt: str, negative_prompt: str, prompt_styles, steps: int, sampler_name: str, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, height: int, width: int, enable_hr: bool, denoising_strength: float, hr_scale: float, hr_upscaler: str, hr_second_pass_steps: int, hr_resize_x: int, hr_resize_y: int, hr_checkpoint_name: str, hr_sampler_name: str, hr_prompt: str, hr_negative_prompt, override_settings_texts, request: gr.Request, *args): +def txt2img(id_task: str, prompt: str, negative_prompt: str, prompt_styles, steps: int, sampler_name: str, n_iter: int, batch_size: int, cfg_scale: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, height: int, width: int, enable_hr: bool, denoising_strength: float, hr_scale: float, hr_upscaler: str, hr_second_pass_steps: int, hr_resize_x: int, hr_resize_y: int, hr_checkpoint_name: str, hr_sampler_name: str, hr_prompt: str, hr_negative_prompt, override_settings_texts, request: gr.Request, *args): override_settings = create_override_settings_dict(override_settings_texts) p = processing.StableDiffusionProcessingTxt2Img( @@ -32,8 +32,6 @@ def txt2img(id_task: str, prompt: str, negative_prompt: str, prompt_styles, step cfg_scale=cfg_scale, width=width, height=height, - restore_faces=restore_faces, - tiling=tiling, enable_hr=enable_hr, denoising_strength=denoising_strength if enable_hr else None, hr_scale=hr_scale, diff --git a/modules/ui.py b/modules/ui.py index cbad3afe..09a826fd 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -432,8 +432,7 @@ def create_ui(): elif category == "checkboxes": with FormRow(elem_classes="checkboxes-row", variant="compact"): - restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1, elem_id="txt2img_restore_faces") - tiling = gr.Checkbox(label='Tiling', value=False, elem_id="txt2img_tiling") + pass elif category == "hires_fix": with InputAccordion(False, label="Hires. fix") as enable_hr: @@ -516,8 +515,6 @@ def create_ui(): toprow.ui_styles.dropdown, steps, sampler_name, - restore_faces, - tiling, batch_count, batch_size, cfg_scale, @@ -572,7 +569,6 @@ def create_ui(): (toprow.negative_prompt, "Negative prompt"), (steps, "Steps"), (sampler_name, "Sampler"), - (restore_faces, "Face restoration"), (cfg_scale, "CFG scale"), (seed, "Seed"), (width, "Size-1"), @@ -792,8 +788,7 @@ def create_ui(): elif category == "checkboxes": with FormRow(elem_classes="checkboxes-row", variant="compact"): - restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1, elem_id="img2img_restore_faces") - tiling = gr.Checkbox(label='Tiling', value=False, elem_id="img2img_tiling") + pass elif category == "batch": if not opts.dimensions_and_batch_together: @@ -866,8 +861,6 @@ def create_ui(): mask_blur, mask_alpha, inpainting_fill, - restore_faces, - tiling, batch_count, batch_size, cfg_scale, @@ -959,7 +952,6 @@ def create_ui(): (toprow.negative_prompt, "Negative prompt"), (steps, "Steps"), (sampler_name, "Sampler"), - (restore_faces, "Face restoration"), (cfg_scale, "CFG scale"), (image_cfg_scale, "Image CFG scale"), (seed, "Seed"), -- cgit v1.2.1 From 6c23061a7d03791babeec569d28a691758e4322d Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Thu, 10 Aug 2023 12:50:03 +0300 Subject: avoid importing gradio in tests because it spams warnings --- test/conftest.py | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/test/conftest.py b/test/conftest.py index 0723f62a..31a5d9ea 100644 --- a/test/conftest.py +++ b/test/conftest.py @@ -1,17 +1,25 @@ import os import pytest -from PIL import Image -from gradio.processing_utils import encode_pil_to_base64 +import base64 + test_files_path = os.path.dirname(__file__) + "/test_files" +def file_to_base64(filename): + with open(filename, "rb") as file: + data = file.read() + + base64_str = str(base64.b64encode(data), "utf-8") + return "data:image/png;base64," + base64_str + + @pytest.fixture(scope="session") # session so we don't read this over and over def img2img_basic_image_base64() -> str: - return encode_pil_to_base64(Image.open(os.path.join(test_files_path, "img2img_basic.png"))) + return file_to_base64(os.path.join(test_files_path, "img2img_basic.png")) @pytest.fixture(scope="session") # session so we don't read this over and over def mask_basic_image_base64() -> str: - return encode_pil_to_base64(Image.open(os.path.join(test_files_path, "mask_basic.png"))) + return file_to_base64(os.path.join(test_files_path, "mask_basic.png")) -- cgit v1.2.1 From faca86620d53b122eac00fd0a4ab54b3a2e3108e Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Thu, 10 Aug 2023 12:58:00 +0300 Subject: linter fixes --- javascript/inputAccordion.js | 12 ++++++------ modules/ui.py | 2 +- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/javascript/inputAccordion.js b/javascript/inputAccordion.js index a5eef229..f2839852 100644 --- a/javascript/inputAccordion.js +++ b/javascript/inputAccordion.js @@ -10,16 +10,16 @@ var observerAccordionOpen = new MutationObserver(function(mutations) { checkbox.checked = open; updateInput(checkbox); - extra = gradioApp().querySelector('#' + accordion.id + "-extra"); - if(extra){ + var extra = gradioApp().querySelector('#' + accordion.id + "-extra"); + if (extra) { extra.style.display = open ? "" : "none"; } }); }); -function inputAccordionChecked(id, checked){ +function inputAccordionChecked(id, checked) { var label = gradioApp().querySelector('#' + id + " .label-wrap"); - if(label.classList.contains('open') != checked){ + if (label.classList.contains('open') != checked) { label.click(); } } @@ -30,8 +30,8 @@ onUiLoaded(function() { observerAccordionOpen.observe(labelWrap, {attributes: true, attributeFilter: ['class']}); var extra = gradioApp().querySelector('#' + accordion.id + "-extra"); - if(extra){ - labelWrap.insertBefore(extra, labelWrap.lastElementChild) + if (extra) { + labelWrap.insertBefore(extra, labelWrap.lastElementChild); } } }); diff --git a/modules/ui.py b/modules/ui.py index 09a826fd..b87e95a6 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -13,7 +13,7 @@ from PIL import Image, PngImagePlugin # noqa: F401 from modules.call_queue import wrap_gradio_gpu_call, wrap_queued_call, wrap_gradio_call from modules import gradio_extensons # noqa: F401 -from modules import sd_hijack, sd_models, script_callbacks, ui_extensions, deepbooru, extra_networks, ui_common, ui_postprocessing, progress, ui_loadsave, errors, shared_items, ui_settings, timer, sysinfo, ui_checkpoint_merger, ui_prompt_styles, scripts, sd_samplers, processing, devices, ui_extra_networks +from modules import sd_hijack, sd_models, script_callbacks, ui_extensions, deepbooru, extra_networks, ui_common, ui_postprocessing, progress, ui_loadsave, errors, shared_items, ui_settings, timer, sysinfo, ui_checkpoint_merger, ui_prompt_styles, scripts, sd_samplers, processing, ui_extra_networks from modules.ui_components import FormRow, FormGroup, ToolButton, FormHTML, InputAccordion from modules.paths import script_path from modules.ui_common import create_refresh_button -- cgit v1.2.1 From 08838105923255e66ef0d58115fc9ce671d202ae Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Thu, 10 Aug 2023 13:02:50 +0300 Subject: comment for InputAccordion --- modules/ui_components.py | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/modules/ui_components.py b/modules/ui_components.py index 598ce738..9df9ddf4 100644 --- a/modules/ui_components.py +++ b/modules/ui_components.py @@ -74,6 +74,11 @@ class DropdownEditable(FormComponent, gr.Dropdown): class InputAccordion(gr.Checkbox): + """A gr.Accordion that can be used as an input - returns True if open, False if closed. + + Actaully just a hidden checkbox, but creates an accordion that follows and is followed by the state of the checkbox. + """ + global_index = 0 def __init__(self, value, **kwargs): @@ -91,6 +96,19 @@ class InputAccordion(gr.Checkbox): self.accordion = gr.Accordion(kwargs.get('label', 'Accordion'), open=value, elem_id=self.accordion_id, elem_classes=['input-accordion']) def extra(self): + """Allows you to put something into the label of the accordion. + + Use it like this: + + ``` + with InputAccordion(False, label="Accordion") as acc: + with acc.extra(): + FormHTML(value="hello", min_width=0) + + ... + ``` + """ + return gr.Column(elem_id=self.accordion_id + '-extra', elem_classes='input-accordion-extra', min_width=0) def __enter__(self): -- cgit v1.2.1 From 386202895f875981accd16ad3ff45d0df24409e7 Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Thu, 10 Aug 2023 06:17:45 -0400 Subject: Add env var for cache file --- modules/cache.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/modules/cache.py b/modules/cache.py index 71fe6302..a7cd3aeb 100644 --- a/modules/cache.py +++ b/modules/cache.py @@ -1,11 +1,12 @@ import json +import os import os.path import threading import time from modules.paths import data_path, script_path -cache_filename = os.path.join(data_path, "cache.json") +cache_filename = os.environ.get('SD_WEBUI_CACHE_FILE', os.path.join(data_path, "cache.json")) cache_data = None cache_lock = threading.Lock() -- cgit v1.2.1 From ed01d2ee3b22f7d01f218e3b7a3571dc7a2c54c0 Mon Sep 17 00:00:00 2001 From: Danil Boldyrev Date: Thu, 10 Aug 2023 13:45:25 +0300 Subject: a another fix, a different approach --- .../canvas-zoom-and-pan/javascript/zoom.js | 23 ++++++++++++++++++++-- 1 file changed, 21 insertions(+), 2 deletions(-) diff --git a/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js b/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js index e7616b98..30a74834 100644 --- a/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js +++ b/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js @@ -12,6 +12,7 @@ onUiLoaded(async() => { "Sketch": elementIDs.sketch }; + // Helper functions // Get active tab function getActiveTab(elements, all = false) { @@ -657,17 +658,20 @@ onUiLoaded(async() => { // Simulation of the function to put a long image into the screen. // We detect if an image has a scroll bar or not, make a fullscreen to reveal the image, then reduce it to fit into the element. // We hide the image and show it to the user when it is ready. - function autoExpand(e) { + + targetElement.isExpanded = false; + function autoExpand() { const canvas = document.querySelector(`${elemId} canvas[key="interface"]`); const isMainTab = activeElement === elementIDs.inpaint || activeElement === elementIDs.inpaintSketch || activeElement === elementIDs.sketch; if (canvas && isMainTab) { - if (hasHorizontalScrollbar(targetElement)) { + if (hasHorizontalScrollbar(targetElement) && targetElement.isExpanded === false) { targetElement.style.visibility = "hidden"; setTimeout(() => { fitToScreen(); resetZoom(); targetElement.style.visibility = "visible"; + targetElement.isExpanded = true; }, 10); } } @@ -675,9 +679,24 @@ onUiLoaded(async() => { targetElement.addEventListener("mousemove", getMousePosition); + //observers + // Creating an observer with a callback function to handle DOM changes + const observer = new MutationObserver((mutationsList, observer) => { + for (let mutation of mutationsList) { + // If the style attribute of the canvas has changed, by observation it happens only when the picture changes + if (mutation.type === 'attributes' && mutation.attributeName === 'style' && + mutation.target.tagName.toLowerCase() === 'canvas') { + targetElement.isExpanded = false; + setTimeout(resetZoom, 10); + } + } + }); + // Apply auto expand if enabled if (hotkeysConfig.canvas_auto_expand) { targetElement.addEventListener("mousemove", autoExpand); + // Set up an observer to track attribute changes + observer.observe(targetElement, {attributes: true, childList: true, subtree: true}); } // Handle events only inside the targetElement -- cgit v1.2.1 From 4d93f48f090e19a81d1608837e8e6449045601a8 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Thu, 10 Aug 2023 15:32:54 +0300 Subject: fix for multiple input accordions --- modules/ui_components.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/ui_components.py b/modules/ui_components.py index 9df9ddf4..bfe2fbd9 100644 --- a/modules/ui_components.py +++ b/modules/ui_components.py @@ -84,8 +84,8 @@ class InputAccordion(gr.Checkbox): def __init__(self, value, **kwargs): self.accordion_id = kwargs.get('elem_id') if self.accordion_id is None: - self.accordion_id = f"input-accordion-{self.global_index}" - self.global_index += 1 + self.accordion_id = f"input-accordion-{InputAccordion.global_index}" + InputAccordion.global_index += 1 kwargs['elem_id'] = self.accordion_id + "-checkbox" kwargs['visible'] = False -- cgit v1.2.1 From 237b704172480561a7616f3b695ffc2bd215957f Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Thu, 10 Aug 2023 21:42:26 +0900 Subject: use new style constructor --- modules/ui.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/ui.py b/modules/ui.py index b87e95a6..587d2bd8 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -628,7 +628,7 @@ def create_ui(): extra_tabs = gr.Tabs(elem_id="img2img_extra_tabs") extra_tabs.__enter__() - with gr.Tab("Generation", id="img2img_generation") as img2img_generation_tab, FormRow().style(equal_height=False): + with gr.Tab("Generation", id="img2img_generation") as img2img_generation_tab, FormRow(equal_height=False): with gr.Column(variant='compact', elem_id="img2img_settings"): copy_image_buttons = [] copy_image_destinations = {} -- cgit v1.2.1 From 1b3093fe3aedb20aa8d505ceeea7900ac592e6fe Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Thu, 10 Aug 2023 15:58:53 +0300 Subject: fix --use-textbox-seed --- modules/processing.py | 12 ++++++++++-- modules/ui.py | 6 +++++- modules/ui_loadsave.py | 5 ++++- 3 files changed, 19 insertions(+), 4 deletions(-) diff --git a/modules/processing.py b/modules/processing.py index 68a8f1c6..f06c374a 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -521,7 +521,15 @@ def decode_latent_batch(model, batch, target_device=None, check_for_nans=False): def get_fixed_seed(seed): - if seed is None or seed == '' or seed == -1: + if seed == '' or seed is None: + seed = -1 + elif isinstance(seed, str): + try: + seed = int(seed) + except Exception: + seed = -1 + + if seed == -1: return int(random.randrange(4294967294)) return seed @@ -728,7 +736,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: # strength, which is saved as "Model Strength: 1.0" in the infotext if n == 0: with open(os.path.join(paths.data_path, "params.txt"), "w", encoding="utf8") as file: - processed = Processed(p, [], p.seed, "") + processed = Processed(p, []) file.write(processed.infotext(p, 0)) p.setup_conds() diff --git a/modules/ui.py b/modules/ui.py index b87e95a6..e7433cbd 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -144,7 +144,11 @@ def interrogate_deepbooru(image): def create_seed_inputs(target_interface): with FormRow(elem_id=f"{target_interface}_seed_row", variant="compact"): - seed = (gr.Textbox if cmd_opts.use_textbox_seed else gr.Number)(label='Seed', value=-1, elem_id=f"{target_interface}_seed") + if cmd_opts.use_textbox_seed: + seed = gr.Textbox(label='Seed', value="", elem_id=f"{target_interface}_seed") + else: + seed = gr.Number(label='Seed', value=-1, elem_id=f"{target_interface}_seed") + random_seed = ToolButton(random_symbol, elem_id=f"{target_interface}_random_seed", label='Random seed') reuse_seed = ToolButton(reuse_symbol, elem_id=f"{target_interface}_reuse_seed", label='Reuse seed') diff --git a/modules/ui_loadsave.py b/modules/ui_loadsave.py index 0052a5cc..99d763e1 100644 --- a/modules/ui_loadsave.py +++ b/modules/ui_loadsave.py @@ -8,7 +8,7 @@ from modules.ui_components import ToolButton class UiLoadsave: - """allows saving and restorig default values for gradio components""" + """allows saving and restoring default values for gradio components""" def __init__(self, filename): self.filename = filename @@ -48,6 +48,9 @@ class UiLoadsave: elif condition and not condition(saved_value): pass else: + if isinstance(x, gr.Textbox) and field == 'value': # due to an undersirable behavior of gr.Textbox, if you give it an int value instead of str, everything dies + saved_value = str(saved_value) + setattr(obj, field, saved_value) if init_field is not None: init_field(saved_value) -- cgit v1.2.1 From 4f6582cb668d4f4beea64f1c7baae721c85cfc69 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Thu, 10 Aug 2023 16:10:42 +0300 Subject: add precision=0 to gr.Number seed --- modules/ui.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/ui.py b/modules/ui.py index e7433cbd..c08f412d 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -147,7 +147,7 @@ def create_seed_inputs(target_interface): if cmd_opts.use_textbox_seed: seed = gr.Textbox(label='Seed', value="", elem_id=f"{target_interface}_seed") else: - seed = gr.Number(label='Seed', value=-1, elem_id=f"{target_interface}_seed") + seed = gr.Number(label='Seed', value=-1, elem_id=f"{target_interface}_seed", precision=0) random_seed = ToolButton(random_symbol, elem_id=f"{target_interface}_random_seed", label='Random seed') reuse_seed = ToolButton(reuse_symbol, elem_id=f"{target_interface}_reuse_seed", label='Reuse seed') @@ -159,7 +159,7 @@ def create_seed_inputs(target_interface): with FormRow(visible=False, elem_id=f"{target_interface}_subseed_row") as seed_extra_row_1: seed_extras.append(seed_extra_row_1) - subseed = gr.Number(label='Variation seed', value=-1, elem_id=f"{target_interface}_subseed") + subseed = gr.Number(label='Variation seed', value=-1, elem_id=f"{target_interface}_subseed", precision=0) random_subseed = ToolButton(random_symbol, elem_id=f"{target_interface}_random_subseed") reuse_subseed = ToolButton(reuse_symbol, elem_id=f"{target_interface}_reuse_subseed") subseed_strength = gr.Slider(label='Variation strength', value=0.0, minimum=0, maximum=1, step=0.01, elem_id=f"{target_interface}_subseed_strength") -- cgit v1.2.1 From b13806c15065bd9a91edef2b8516c461ce585361 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Thu, 10 Aug 2023 16:15:34 +0300 Subject: fix a bug preventing normal operation if a string is added to a gr.Number component via ui-config.json --- modules/ui_loadsave.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/modules/ui_loadsave.py b/modules/ui_loadsave.py index 99d763e1..ef6b0154 100644 --- a/modules/ui_loadsave.py +++ b/modules/ui_loadsave.py @@ -50,6 +50,8 @@ class UiLoadsave: else: if isinstance(x, gr.Textbox) and field == 'value': # due to an undersirable behavior of gr.Textbox, if you give it an int value instead of str, everything dies saved_value = str(saved_value) + elif isinstance(x, gr.Number) and field == 'value': + saved_value = float(saved_value) setattr(obj, field, saved_value) if init_field is not None: -- cgit v1.2.1 From 045f7408926aec4bf7e8fcb09333a3f45783d239 Mon Sep 17 00:00:00 2001 From: Danil Boldyrev Date: Thu, 10 Aug 2023 16:17:52 +0300 Subject: Height fix --- extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js b/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js index 30a74834..72c8ba87 100644 --- a/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js +++ b/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js @@ -378,6 +378,11 @@ onUiLoaded(async() => { toggleOverlap("off"); fullScreenMode = false; + const closeBtn = targetElement.querySelector("button[aria-label='Remove Image']"); + if (closeBtn) { + closeBtn.addEventListener("click", resetZoom); + } + if ( canvas && parseFloat(canvas.style.width) > 865 && -- cgit v1.2.1 From 9d78d317ae492db59ebf8b31fda9a049f6c9bd14 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Thu, 10 Aug 2023 16:22:10 +0300 Subject: add VAE to infotext --- modules/generation_parameters_copypaste.py | 1 + modules/processing.py | 2 ++ modules/sd_vae.py | 16 +++++++++++++++- 3 files changed, 18 insertions(+), 1 deletion(-) diff --git a/modules/generation_parameters_copypaste.py b/modules/generation_parameters_copypaste.py index bdff3266..6ace29cf 100644 --- a/modules/generation_parameters_copypaste.py +++ b/modules/generation_parameters_copypaste.py @@ -341,6 +341,7 @@ infotext_to_setting_name_mapping = [ ('RNG', 'randn_source'), ('NGMS', 's_min_uncond'), ('Pad conds', 'pad_cond_uncond'), + ('VAE', 'sd_vae'), ('VAE Encoder', 'sd_vae_encode_method'), ('VAE Decoder', 'sd_vae_decode_method'), ('Tiling', 'tiling'), diff --git a/modules/processing.py b/modules/processing.py index f06c374a..44d47e8c 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -576,6 +576,8 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter "Size": f"{p.width}x{p.height}", "Model hash": getattr(p, 'sd_model_hash', None if not opts.add_model_hash_to_info or not shared.sd_model.sd_model_hash else shared.sd_model.sd_model_hash), "Model": (None if not opts.add_model_name_to_info else shared.sd_model.sd_checkpoint_info.name_for_extra), + "VAE hash": sd_vae.get_loaded_vae_hash() if opts.add_model_hash_to_info else None, + "VAE": sd_vae.get_loaded_vae_name() if opts.add_model_name_to_info else None, "Variation seed": (None if p.subseed_strength == 0 else (p.all_subseeds[0] if use_main_prompt else all_subseeds[index])), "Variation seed strength": (None if p.subseed_strength == 0 else p.subseed_strength), "Seed resize from": (None if p.seed_resize_from_w <= 0 or p.seed_resize_from_h <= 0 else f"{p.seed_resize_from_w}x{p.seed_resize_from_h}"), diff --git a/modules/sd_vae.py b/modules/sd_vae.py index 5ac1ac31..1db01992 100644 --- a/modules/sd_vae.py +++ b/modules/sd_vae.py @@ -2,7 +2,7 @@ import os import collections from dataclasses import dataclass -from modules import paths, shared, devices, script_callbacks, sd_models, extra_networks, lowvram, sd_hijack +from modules import paths, shared, devices, script_callbacks, sd_models, extra_networks, lowvram, sd_hijack, hashes import glob from copy import deepcopy @@ -20,6 +20,20 @@ checkpoint_info = None checkpoints_loaded = collections.OrderedDict() +def get_loaded_vae_name(): + if loaded_vae_file is None: + return None + + return os.path.basename(loaded_vae_file) + + +def get_loaded_vae_hash(): + if loaded_vae_file is None: + return None + + return hashes.sha256(loaded_vae_file, 'vae')[0:10] + + def get_base_vae(model): if base_vae is not None and checkpoint_info == model.sd_checkpoint_info and model: return base_vae -- cgit v1.2.1 From 070b034cd5b49eb5056a18b43f88aa223fec9e0b Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Thu, 10 Aug 2023 16:42:26 +0300 Subject: put infotext label for setting into OptionInfo definition rather than in a separate list --- modules/generation_parameters_copypaste.py | 39 ++++++--------------- modules/options.py | 4 ++- modules/shared_options.py | 56 +++++++++++++++--------------- 3 files changed, 42 insertions(+), 57 deletions(-) diff --git a/modules/generation_parameters_copypaste.py b/modules/generation_parameters_copypaste.py index 6ace29cf..386517ac 100644 --- a/modules/generation_parameters_copypaste.py +++ b/modules/generation_parameters_copypaste.py @@ -316,37 +316,18 @@ Steps: 20, Sampler: Euler a, CFG scale: 7, Seed: 965400086, Size: 512x512, Model infotext_to_setting_name_mapping = [ - ('Clip skip', 'CLIP_stop_at_last_layers', ), + +] +"""Mapping of infotext labels to setting names. Only left for backwards compatibility - use OptionInfo(..., infotext='...') instead. +Example content: + +infotext_to_setting_name_mapping = [ ('Conditional mask weight', 'inpainting_mask_weight'), ('Model hash', 'sd_model_checkpoint'), ('ENSD', 'eta_noise_seed_delta'), ('Schedule type', 'k_sched_type'), - ('Schedule max sigma', 'sigma_max'), - ('Schedule min sigma', 'sigma_min'), - ('Schedule rho', 'rho'), - ('Noise multiplier', 'initial_noise_multiplier'), - ('Eta', 'eta_ancestral'), - ('Eta DDIM', 'eta_ddim'), - ('Sigma churn', 's_churn'), - ('Sigma tmin', 's_tmin'), - ('Sigma tmax', 's_tmax'), - ('Sigma noise', 's_noise'), - ('Discard penultimate sigma', 'always_discard_next_to_last_sigma'), - ('UniPC variant', 'uni_pc_variant'), - ('UniPC skip type', 'uni_pc_skip_type'), - ('UniPC order', 'uni_pc_order'), - ('UniPC lower order final', 'uni_pc_lower_order_final'), - ('Token merging ratio', 'token_merging_ratio'), - ('Token merging ratio hr', 'token_merging_ratio_hr'), - ('RNG', 'randn_source'), - ('NGMS', 's_min_uncond'), - ('Pad conds', 'pad_cond_uncond'), - ('VAE', 'sd_vae'), - ('VAE Encoder', 'sd_vae_encode_method'), - ('VAE Decoder', 'sd_vae_decode_method'), - ('Tiling', 'tiling'), - ('Face restoration', 'face_restoration'), ] +""" def create_override_settings_dict(text_pairs): @@ -367,7 +348,8 @@ def create_override_settings_dict(text_pairs): params[k] = v.strip() - for param_name, setting_name in infotext_to_setting_name_mapping: + mapping = [(info.infotext, k) for k, info in shared.opts.data_labels.items() if info.infotext] + for param_name, setting_name in mapping + infotext_to_setting_name_mapping: value = params.get(param_name, None) if value is None: @@ -421,7 +403,8 @@ def connect_paste(button, paste_fields, input_comp, override_settings_component, def paste_settings(params): vals = {} - for param_name, setting_name in infotext_to_setting_name_mapping: + mapping = [(info.infotext, k) for k, info in shared.opts.data_labels.items() if info.infotext] + for param_name, setting_name in mapping + infotext_to_setting_name_mapping: if param_name in already_handled_fields: continue diff --git a/modules/options.py b/modules/options.py index 59cb75ec..db1fb157 100644 --- a/modules/options.py +++ b/modules/options.py @@ -8,7 +8,7 @@ from modules.shared_cmd_options import cmd_opts class OptionInfo: - def __init__(self, default=None, label="", component=None, component_args=None, onchange=None, section=None, refresh=None, comment_before='', comment_after=''): + def __init__(self, default=None, label="", component=None, component_args=None, onchange=None, section=None, refresh=None, comment_before='', comment_after='', infotext=None): self.default = default self.label = label self.component = component @@ -24,6 +24,8 @@ class OptionInfo: self.comment_after = comment_after """HTML text that will be added before label in UI""" + self.infotext = infotext + def link(self, label, url): self.comment_before += f"[{label}]" return self diff --git a/modules/shared_options.py b/modules/shared_options.py index f72859d9..9ae51f18 100644 --- a/modules/shared_options.py +++ b/modules/shared_options.py @@ -92,7 +92,7 @@ options_templates.update(options_section(('upscaling', "Upscaling"), { })) options_templates.update(options_section(('face-restoration', "Face restoration"), { - "face_restoration": OptionInfo(False, "Restore faces").info("will use a third-party model on generation result to reconstruct faces"), + "face_restoration": OptionInfo(False, "Restore faces", infotext='Face restoration').info("will use a third-party model on generation result to reconstruct faces"), "face_restoration_model": OptionInfo("CodeFormer", "Face restoration model", gr.Radio, lambda: {"choices": [x.name() for x in shared.face_restorers]}), "code_former_weight": OptionInfo(0.5, "CodeFormer weight", gr.Slider, {"minimum": 0, "maximum": 1, "step": 0.01}).info("0 = maximum effect; 1 = minimum effect"), "face_restoration_unload": OptionInfo(False, "Move face restoration model from VRAM into RAM after processing"), @@ -127,7 +127,7 @@ options_templates.update(options_section(('training', "Training"), { })) options_templates.update(options_section(('sd', "Stable Diffusion"), { - "sd_model_checkpoint": OptionInfo(None, "Stable Diffusion checkpoint", gr.Dropdown, lambda: {"choices": shared_items.list_checkpoint_tiles()}, refresh=shared_items.refresh_checkpoints), + "sd_model_checkpoint": OptionInfo(None, "Stable Diffusion checkpoint", gr.Dropdown, lambda: {"choices": shared_items.list_checkpoint_tiles()}, refresh=shared_items.refresh_checkpoints, infotext='Model hash'), "sd_checkpoints_limit": OptionInfo(1, "Maximum number of checkpoints loaded at the same time", gr.Slider, {"minimum": 1, "maximum": 10, "step": 1}), "sd_checkpoints_keep_in_cpu": OptionInfo(True, "Only keep one model on device").info("will keep models other than the currently used one in RAM rather than VRAM"), "sd_checkpoint_cache": OptionInfo(0, "Checkpoints to cache in RAM", gr.Slider, {"minimum": 0, "maximum": 10, "step": 1}).info("obsolete; set to 0 and use the two settings above instead"), @@ -136,10 +136,10 @@ options_templates.update(options_section(('sd', "Stable Diffusion"), { "enable_emphasis": OptionInfo(True, "Enable emphasis").info("use (text) to make model pay more attention to text and [text] to make it pay less attention"), "enable_batch_seeds": OptionInfo(True, "Make K-diffusion samplers produce same images in a batch as when making a single image"), "comma_padding_backtrack": OptionInfo(20, "Prompt word wrap length limit", gr.Slider, {"minimum": 0, "maximum": 74, "step": 1}).info("in tokens - for texts shorter than specified, if they don't fit into 75 token limit, move them to the next 75 token chunk"), - "CLIP_stop_at_last_layers": OptionInfo(1, "Clip skip", gr.Slider, {"minimum": 1, "maximum": 12, "step": 1}).link("wiki", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Features#clip-skip").info("ignore last layers of CLIP network; 1 ignores none, 2 ignores one layer"), + "CLIP_stop_at_last_layers": OptionInfo(1, "Clip skip", gr.Slider, {"minimum": 1, "maximum": 12, "step": 1}, infotext="Clip skip").link("wiki", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Features#clip-skip").info("ignore last layers of CLIP network; 1 ignores none, 2 ignores one layer"), "upcast_attn": OptionInfo(False, "Upcast cross attention layer to float32"), "randn_source": OptionInfo("GPU", "Random number generator source.", gr.Radio, {"choices": ["GPU", "CPU", "NV"]}).info("changes seeds drastically; use CPU to produce the same picture across different videocard vendors; use NV to produce same picture as on NVidia videocards"), - "tiling": OptionInfo(False, "Tiling").info("produce a tileable picture"), + "tiling": OptionInfo(False, "Tiling", infotext='Tiling').info("produce a tileable picture"), })) options_templates.update(options_section(('sdxl', "Stable Diffusion XL"), { @@ -157,16 +157,16 @@ image into latent space representation and back. Latent space representation is For img2img, VAE is used to process user's input image before the sampling, and to create an image after sampling. """), "sd_vae_checkpoint_cache": OptionInfo(0, "VAE Checkpoints to cache in RAM", gr.Slider, {"minimum": 0, "maximum": 10, "step": 1}), - "sd_vae": OptionInfo("Automatic", "SD VAE", gr.Dropdown, lambda: {"choices": shared_items.sd_vae_items()}, refresh=shared_items.refresh_vae_list).info("choose VAE model: Automatic = use one with same filename as checkpoint; None = use VAE from checkpoint"), + "sd_vae": OptionInfo("Automatic", "SD VAE", gr.Dropdown, lambda: {"choices": shared_items.sd_vae_items()}, refresh=shared_items.refresh_vae_list, infotext='VAE').info("choose VAE model: Automatic = use one with same filename as checkpoint; None = use VAE from checkpoint"), "sd_vae_overrides_per_model_preferences": OptionInfo(True, "Selected VAE overrides per-model preferences").info("you can set per-model VAE either by editing user metadata for checkpoints, or by making the VAE have same name as checkpoint"), "auto_vae_precision": OptionInfo(True, "Automatically revert VAE to 32-bit floats").info("triggers when a tensor with NaNs is produced in VAE; disabling the option in this case will result in a black square image"), - "sd_vae_encode_method": OptionInfo("Full", "VAE type for encode", gr.Radio, {"choices": ["Full", "TAESD"]}).info("method to encode image to latent (use in img2img, hires-fix or inpaint mask)"), - "sd_vae_decode_method": OptionInfo("Full", "VAE type for decode", gr.Radio, {"choices": ["Full", "TAESD"]}).info("method to decode latent to image"), + "sd_vae_encode_method": OptionInfo("Full", "VAE type for encode", gr.Radio, {"choices": ["Full", "TAESD"]}, infotext='VAE Encoder').info("method to encode image to latent (use in img2img, hires-fix or inpaint mask)"), + "sd_vae_decode_method": OptionInfo("Full", "VAE type for decode", gr.Radio, {"choices": ["Full", "TAESD"]}, infotext='VAE Decoder').info("method to decode latent to image"), })) options_templates.update(options_section(('img2img', "img2img"), { - "inpainting_mask_weight": OptionInfo(1.0, "Inpainting conditioning mask strength", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}), - "initial_noise_multiplier": OptionInfo(1.0, "Noise multiplier for img2img", gr.Slider, {"minimum": 0.5, "maximum": 1.5, "step": 0.01}), + "inpainting_mask_weight": OptionInfo(1.0, "Inpainting conditioning mask strength", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}, infotext='Conditional mask weight'), + "initial_noise_multiplier": OptionInfo(1.0, "Noise multiplier for img2img", gr.Slider, {"minimum": 0.5, "maximum": 1.5, "step": 0.01}, infotext='Noise multiplier'), "img2img_color_correction": OptionInfo(False, "Apply color correction to img2img results to match original colors."), "img2img_fix_steps": OptionInfo(False, "With img2img, do exactly the amount of steps the slider specifies.").info("normally you'd do less with less denoising"), "img2img_background_color": OptionInfo("#ffffff", "With img2img, fill transparent parts of the input image with this color.", ui_components.FormColorPicker, {}), @@ -181,10 +181,10 @@ options_templates.update(options_section(('img2img', "img2img"), { options_templates.update(options_section(('optimizations', "Optimizations"), { "cross_attention_optimization": OptionInfo("Automatic", "Cross attention optimization", gr.Dropdown, lambda: {"choices": shared_items.cross_attention_optimizations()}), "s_min_uncond": OptionInfo(0.0, "Negative Guidance minimum sigma", gr.Slider, {"minimum": 0.0, "maximum": 15.0, "step": 0.01}).link("PR", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/9177").info("skip negative prompt for some steps when the image is almost ready; 0=disable, higher=faster"), - "token_merging_ratio": OptionInfo(0.0, "Token merging ratio", gr.Slider, {"minimum": 0.0, "maximum": 0.9, "step": 0.1}).link("PR", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/9256").info("0=disable, higher=faster"), + "token_merging_ratio": OptionInfo(0.0, "Token merging ratio", gr.Slider, {"minimum": 0.0, "maximum": 0.9, "step": 0.1}, infotext='Token merging ratio').link("PR", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/9256").info("0=disable, higher=faster"), "token_merging_ratio_img2img": OptionInfo(0.0, "Token merging ratio for img2img", gr.Slider, {"minimum": 0.0, "maximum": 0.9, "step": 0.1}).info("only applies if non-zero and overrides above"), - "token_merging_ratio_hr": OptionInfo(0.0, "Token merging ratio for high-res pass", gr.Slider, {"minimum": 0.0, "maximum": 0.9, "step": 0.1}).info("only applies if non-zero and overrides above"), - "pad_cond_uncond": OptionInfo(False, "Pad prompt/negative prompt to be same length").info("improves performance when prompt and negative prompt have different lengths; changes seeds"), + "token_merging_ratio_hr": OptionInfo(0.0, "Token merging ratio for high-res pass", gr.Slider, {"minimum": 0.0, "maximum": 0.9, "step": 0.1}, infotext='Token merging ratio hr').info("only applies if non-zero and overrides above"), + "pad_cond_uncond": OptionInfo(False, "Pad prompt/negative prompt to be same length", infotext='Pad conds').info("improves performance when prompt and negative prompt have different lengths; changes seeds"), "persistent_cond_cache": OptionInfo(True, "Persistent cond cache").info("Do not recalculate conds from prompts if prompts have not changed since previous calculation"), })) @@ -284,23 +284,23 @@ options_templates.update(options_section(('ui', "Live previews"), { options_templates.update(options_section(('sampler-params', "Sampler parameters"), { "hide_samplers": OptionInfo([], "Hide samplers in user interface", gr.CheckboxGroup, lambda: {"choices": [x.name for x in shared_items.list_samplers()]}).needs_reload_ui(), - "eta_ddim": OptionInfo(0.0, "Eta for DDIM", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}).info("noise multiplier; higher = more unperdictable results"), - "eta_ancestral": OptionInfo(1.0, "Eta for ancestral samplers", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}).info("noise multiplier; applies to Euler a and other samplers that have a in them"), + "eta_ddim": OptionInfo(0.0, "Eta for DDIM", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}, infotext='Eta DDIM').info("noise multiplier; higher = more unperdictable results"), + "eta_ancestral": OptionInfo(1.0, "Eta for ancestral samplers", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}, infotext='Eta').info("noise multiplier; applies to Euler a and other samplers that have a in them"), "ddim_discretize": OptionInfo('uniform', "img2img DDIM discretize", gr.Radio, {"choices": ['uniform', 'quad']}), - 's_churn': OptionInfo(0.0, "sigma churn", gr.Slider, {"minimum": 0.0, "maximum": 100.0, "step": 0.01}).info('amount of stochasticity; only applies to Euler, Heun, and DPM2'), - 's_tmin': OptionInfo(0.0, "sigma tmin", gr.Slider, {"minimum": 0.0, "maximum": 10.0, "step": 0.01}).info('enable stochasticity; start value of the sigma range; only applies to Euler, Heun, and DPM2'), - 's_tmax': OptionInfo(0.0, "sigma tmax", gr.Slider, {"minimum": 0.0, "maximum": 999.0, "step": 0.01}).info("0 = inf; end value of the sigma range; only applies to Euler, Heun, and DPM2"), - 's_noise': OptionInfo(1.0, "sigma noise", gr.Slider, {"minimum": 0.0, "maximum": 1.1, "step": 0.001}).info('amount of additional noise to counteract loss of detail during sampling; only applies to Euler, Heun, and DPM2'), - 'k_sched_type': OptionInfo("Automatic", "Scheduler type", gr.Dropdown, {"choices": ["Automatic", "karras", "exponential", "polyexponential"]}).info("lets you override the noise schedule for k-diffusion samplers; choosing Automatic disables the three parameters below"), - 'sigma_min': OptionInfo(0.0, "sigma min", gr.Number).info("0 = default (~0.03); minimum noise strength for k-diffusion noise scheduler"), - 'sigma_max': OptionInfo(0.0, "sigma max", gr.Number).info("0 = default (~14.6); maximum noise strength for k-diffusion noise scheduler"), - 'rho': OptionInfo(0.0, "rho", gr.Number).info("0 = default (7 for karras, 1 for polyexponential); higher values result in a steeper noise schedule (decreases faster)"), - 'eta_noise_seed_delta': OptionInfo(0, "Eta noise seed delta", gr.Number, {"precision": 0}).info("ENSD; does not improve anything, just produces different results for ancestral samplers - only useful for reproducing images"), - 'always_discard_next_to_last_sigma': OptionInfo(False, "Always discard next-to-last sigma").link("PR", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/6044"), - 'uni_pc_variant': OptionInfo("bh1", "UniPC variant", gr.Radio, {"choices": ["bh1", "bh2", "vary_coeff"]}), - 'uni_pc_skip_type': OptionInfo("time_uniform", "UniPC skip type", gr.Radio, {"choices": ["time_uniform", "time_quadratic", "logSNR"]}), - 'uni_pc_order': OptionInfo(3, "UniPC order", gr.Slider, {"minimum": 1, "maximum": 50, "step": 1}).info("must be < sampling steps"), - 'uni_pc_lower_order_final': OptionInfo(True, "UniPC lower order final"), + 's_churn': OptionInfo(0.0, "sigma churn", gr.Slider, {"minimum": 0.0, "maximum": 100.0, "step": 0.01}, infotext='Sigma churn').info('amount of stochasticity; only applies to Euler, Heun, and DPM2'), + 's_tmin': OptionInfo(0.0, "sigma tmin", gr.Slider, {"minimum": 0.0, "maximum": 10.0, "step": 0.01}, infotext='Sigma tmin').info('enable stochasticity; start value of the sigma range; only applies to Euler, Heun, and DPM2'), + 's_tmax': OptionInfo(0.0, "sigma tmax", gr.Slider, {"minimum": 0.0, "maximum": 999.0, "step": 0.01}, infotext='Sigma tmax').info("0 = inf; end value of the sigma range; only applies to Euler, Heun, and DPM2"), + 's_noise': OptionInfo(1.0, "sigma noise", gr.Slider, {"minimum": 0.0, "maximum": 1.1, "step": 0.001}, infotext='Sigma noise').info('amount of additional noise to counteract loss of detail during sampling; only applies to Euler, Heun, and DPM2'), + 'k_sched_type': OptionInfo("Automatic", "Scheduler type", gr.Dropdown, {"choices": ["Automatic", "karras", "exponential", "polyexponential"]}, infotext='Schedule type').info("lets you override the noise schedule for k-diffusion samplers; choosing Automatic disables the three parameters below"), + 'sigma_min': OptionInfo(0.0, "sigma min", gr.Number, infotext='Schedule max sigma').info("0 = default (~0.03); minimum noise strength for k-diffusion noise scheduler"), + 'sigma_max': OptionInfo(0.0, "sigma max", gr.Number, infotext='Schedule min sigma').info("0 = default (~14.6); maximum noise strength for k-diffusion noise scheduler"), + 'rho': OptionInfo(0.0, "rho", gr.Number, infotext='Schedule rho').info("0 = default (7 for karras, 1 for polyexponential); higher values result in a steeper noise schedule (decreases faster)"), + 'eta_noise_seed_delta': OptionInfo(0, "Eta noise seed delta", gr.Number, {"precision": 0}, infotext='ENSD').info("ENSD; does not improve anything, just produces different results for ancestral samplers - only useful for reproducing images"), + 'always_discard_next_to_last_sigma': OptionInfo(False, "Always discard next-to-last sigma", infotext='Discard penultimate sigma').link("PR", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/6044"), + 'uni_pc_variant': OptionInfo("bh1", "UniPC variant", gr.Radio, {"choices": ["bh1", "bh2", "vary_coeff"]}, infotext='UniPC variant'), + 'uni_pc_skip_type': OptionInfo("time_uniform", "UniPC skip type", gr.Radio, {"choices": ["time_uniform", "time_quadratic", "logSNR"]}, infotext='UniPC skip type'), + 'uni_pc_order': OptionInfo(3, "UniPC order", gr.Slider, {"minimum": 1, "maximum": 50, "step": 1}, infotext='UniPC order').info("must be < sampling steps"), + 'uni_pc_lower_order_final': OptionInfo(True, "UniPC lower order final", infotext='UniPC lower order final'), })) options_templates.update(options_section(('postprocessing', "Postprocessing"), { -- cgit v1.2.1 From 4412398c4b40f0dd3a1692f65141374770fbc3da Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Thu, 10 Aug 2023 22:44:33 +0900 Subject: catch float ValueError default -1 --- modules/ui_loadsave.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/modules/ui_loadsave.py b/modules/ui_loadsave.py index ef6b0154..a96c71b2 100644 --- a/modules/ui_loadsave.py +++ b/modules/ui_loadsave.py @@ -51,7 +51,10 @@ class UiLoadsave: if isinstance(x, gr.Textbox) and field == 'value': # due to an undersirable behavior of gr.Textbox, if you give it an int value instead of str, everything dies saved_value = str(saved_value) elif isinstance(x, gr.Number) and field == 'value': - saved_value = float(saved_value) + try: + saved_value = float(saved_value) + except ValueError: + saved_value = -1 setattr(obj, field, saved_value) if init_field is not None: -- cgit v1.2.1 From ac8a5d18d3ede6bcb8fa5a3da1c7c28e064cd65d Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Thu, 10 Aug 2023 17:04:59 +0300 Subject: resolve merge issues --- modules/sd_models.py | 7 +++++-- modules/sd_samplers_kdiffusion.py | 3 +-- modules/shared_options.py | 2 ++ 3 files changed, 8 insertions(+), 4 deletions(-) diff --git a/modules/sd_models.py b/modules/sd_models.py index f6cb2f34..a178adca 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -640,8 +640,11 @@ def reuse_model_from_already_loaded(sd_model, checkpoint_info, timer): timer.record("send model to device") model_data.set_sd_model(already_loaded) - shared.opts.data["sd_model_checkpoint"] = already_loaded.sd_checkpoint_info.title - shared.opts.data["sd_checkpoint_hash"] = already_loaded.sd_checkpoint_info.sha256 + + if not SkipWritingToConfig.skip: + shared.opts.data["sd_model_checkpoint"] = already_loaded.sd_checkpoint_info.title + shared.opts.data["sd_checkpoint_hash"] = already_loaded.sd_checkpoint_info.sha256 + print(f"Using already loaded model {already_loaded.sd_checkpoint_info.title}: done in {timer.summary()}") return model_data.sd_model elif shared.opts.sd_checkpoints_limit > 1 and len(model_data.loaded_sd_models) < shared.opts.sd_checkpoints_limit: diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py index e1854980..95a43cef 100644 --- a/modules/sd_samplers_kdiffusion.py +++ b/modules/sd_samplers_kdiffusion.py @@ -1,8 +1,7 @@ import torch import inspect import k_diffusion.sampling -from modules import sd_samplers_common, sd_samplers_extra -from modules.sd_samplers_cfg_denoiser import CFGDenoiser +from modules import sd_samplers_common, sd_samplers_extra, sd_samplers_cfg_denoiser from modules.shared import opts import modules.shared as shared diff --git a/modules/shared_options.py b/modules/shared_options.py index 9ae51f18..1e5b64ea 100644 --- a/modules/shared_options.py +++ b/modules/shared_options.py @@ -140,6 +140,8 @@ options_templates.update(options_section(('sd', "Stable Diffusion"), { "upcast_attn": OptionInfo(False, "Upcast cross attention layer to float32"), "randn_source": OptionInfo("GPU", "Random number generator source.", gr.Radio, {"choices": ["GPU", "CPU", "NV"]}).info("changes seeds drastically; use CPU to produce the same picture across different videocard vendors; use NV to produce same picture as on NVidia videocards"), "tiling": OptionInfo(False, "Tiling", infotext='Tiling').info("produce a tileable picture"), + "sd_refiner_checkpoint": OptionInfo("None", "Refiner checkpoint", gr.Dropdown, lambda: {"choices": ["None"] + shared_items.list_checkpoint_tiles()}, refresh=shared_items.refresh_checkpoints, infotext="Refiner").info("switch to another model in the middle of generation"), + "sd_refiner_switch_at": OptionInfo(1.0, "Refiner switch at", gr.Slider, {"minimum": 0.01, "maximum": 1.0, "step": 0.01}, infotext='Refiner switch at').info("fraction of sampling steps when the swtch to refiner model should happen; 1=never, 0.5=switch in the middle of generation"), })) options_templates.update(options_section(('sdxl', "Stable Diffusion XL"), { -- cgit v1.2.1 From 5a705c246880c26ec4fc940dae0da5ecdd2bff50 Mon Sep 17 00:00:00 2001 From: Jabasukuriputo Wang Date: Thu, 10 Aug 2023 09:18:10 -0500 Subject: rm dir on failed clone, disable autofix for fetch --- modules/launch_utils.py | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/modules/launch_utils.py b/modules/launch_utils.py index 90c00dd2..953f968b 100644 --- a/modules/launch_utils.py +++ b/modules/launch_utils.py @@ -3,6 +3,7 @@ import logging import re import subprocess import os +import shutil import sys import importlib.util import platform @@ -152,10 +153,8 @@ def run_git(dir, name, command, desc=None, errdesc=None, custom_env=None, live: try: return run(f'"{git}" -C "{dir}" {command}', desc=desc, errdesc=errdesc, custom_env=custom_env, live=live) except RuntimeError: - pass - - if not autofix: - return None + if not autofix: + raise print(f"{errdesc}, attempting autofix...") git_fix_workspace(dir, name) @@ -174,13 +173,17 @@ def git_clone(url, dir, name, commithash=None): if current_hash == commithash: return - run_git('fetch', f"Fetching updates for {name}...", f"Couldn't fetch {name}") + run_git('fetch', f"Fetching updates for {name}...", f"Couldn't fetch {name}", autofix=False) run_git('checkout', f"Checking out commit for {name} with hash: {commithash}...", f"Couldn't checkout commit {commithash} for {name}", live=True) return - run(f'"{git}" clone "{url}" "{dir}"', f"Cloning {name} into {dir}...", f"Couldn't clone {name}", live=True) + try: + run(f'"{git}" clone "{url}" "{dir}"', f"Cloning {name} into {dir}...", f"Couldn't clone {name}", live=True) + except RuntimeError: + shutil.rmtree(dir, ignore_errors=True) + raise if commithash is not None: run(f'"{git}" -C "{dir}" checkout {commithash}', None, "Couldn't checkout {name}'s hash: {commithash}") -- cgit v1.2.1 From f4979422dd937f8a14a9eb143b837c958e8557b4 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Thu, 10 Aug 2023 17:18:33 +0300 Subject: return the line lost during the merge --- modules/sd_samplers_kdiffusion.py | 1 + 1 file changed, 1 insertion(+) diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py index 95a43cef..60534b38 100644 --- a/modules/sd_samplers_kdiffusion.py +++ b/modules/sd_samplers_kdiffusion.py @@ -2,6 +2,7 @@ import torch import inspect import k_diffusion.sampling from modules import sd_samplers_common, sd_samplers_extra, sd_samplers_cfg_denoiser +from modules.sd_samplers_cfg_denoiser import CFGDenoiser from modules.shared import opts import modules.shared as shared -- cgit v1.2.1 From 4549f2a9cc0ed9045661192e74d146fa21f300d6 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Thu, 10 Aug 2023 17:21:01 +0300 Subject: lint --- modules/sd_samplers_kdiffusion.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py index 60534b38..d10fe12e 100644 --- a/modules/sd_samplers_kdiffusion.py +++ b/modules/sd_samplers_kdiffusion.py @@ -2,7 +2,7 @@ import torch import inspect import k_diffusion.sampling from modules import sd_samplers_common, sd_samplers_extra, sd_samplers_cfg_denoiser -from modules.sd_samplers_cfg_denoiser import CFGDenoiser +from modules.sd_samplers_cfg_denoiser import CFGDenoiser # noqa: F401 from modules.shared import opts import modules.shared as shared -- cgit v1.2.1 From e0906096c57c25c7ecaa583f20af84b1699b8736 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Thu, 10 Aug 2023 17:22:08 +0300 Subject: remove unnecessary GFPGAN_PACKAGE (we install GFPGAN from the requirements file) --- modules/launch_utils.py | 6 ------ modules/sysinfo.py | 1 - 2 files changed, 7 deletions(-) diff --git a/modules/launch_utils.py b/modules/launch_utils.py index 90c00dd2..bc5f38cc 100644 --- a/modules/launch_utils.py +++ b/modules/launch_utils.py @@ -305,7 +305,6 @@ def prepare_environment(): requirements_file = os.environ.get('REQS_FILE', "requirements_versions.txt") xformers_package = os.environ.get('XFORMERS_PACKAGE', 'xformers==0.0.20') - gfpgan_package = os.environ.get('GFPGAN_PACKAGE', "https://github.com/TencentARC/GFPGAN/archive/8d2447a2d918f8eba5a4a01463fd48e45126a379.zip") clip_package = os.environ.get('CLIP_PACKAGE', "https://github.com/openai/CLIP/archive/d50d76daa670286dd6cacf3bcd80b5e4823fc8e1.zip") openclip_package = os.environ.get('OPENCLIP_PACKAGE', "https://github.com/mlfoundations/open_clip/archive/bb6e834e9c70d9c27d0dc3ecedeebeaeb1ffad6b.zip") @@ -352,11 +351,6 @@ def prepare_environment(): ) startup_timer.record("torch GPU test") - - if not is_installed("gfpgan"): - run_pip(f"install {gfpgan_package}", "gfpgan") - startup_timer.record("install gfpgan") - if not is_installed("clip"): run_pip(f"install {clip_package}", "clip") startup_timer.record("install clip") diff --git a/modules/sysinfo.py b/modules/sysinfo.py index 7d906e1f..058e66ce 100644 --- a/modules/sysinfo.py +++ b/modules/sysinfo.py @@ -23,7 +23,6 @@ environment_whitelist = { "TORCH_COMMAND", "REQS_FILE", "XFORMERS_PACKAGE", - "GFPGAN_PACKAGE", "CLIP_PACKAGE", "OPENCLIP_PACKAGE", "STABLE_DIFFUSION_REPO", -- cgit v1.2.1 From a75d756a6fc3a9d66d3c1601d5b8aafcbcd57bde Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Thu, 10 Aug 2023 23:43:55 +0900 Subject: use default value if value error --- modules/ui_loadsave.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/ui_loadsave.py b/modules/ui_loadsave.py index a96c71b2..9a40cf4f 100644 --- a/modules/ui_loadsave.py +++ b/modules/ui_loadsave.py @@ -48,13 +48,13 @@ class UiLoadsave: elif condition and not condition(saved_value): pass else: - if isinstance(x, gr.Textbox) and field == 'value': # due to an undersirable behavior of gr.Textbox, if you give it an int value instead of str, everything dies + if isinstance(x, gr.Textbox) and field == 'value': # due to an undesirable behavior of gr.Textbox, if you give it an int value instead of str, everything dies saved_value = str(saved_value) elif isinstance(x, gr.Number) and field == 'value': try: saved_value = float(saved_value) except ValueError: - saved_value = -1 + return setattr(obj, field, saved_value) if init_field is not None: -- cgit v1.2.1 From 54f926b11d29910df9f813e2e0ea6d35c6f4a50d Mon Sep 17 00:00:00 2001 From: Robert Barron Date: Thu, 10 Aug 2023 07:48:04 -0700 Subject: fix bad merge --- modules/shared.py | 958 +++--------------------------------------------------- 1 file changed, 37 insertions(+), 921 deletions(-) diff --git a/modules/shared.py b/modules/shared.py index a605b08b..d9d01484 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -1,839 +1,52 @@ -import datetime -import json -import os -import re import sys -import threading -import time -import logging import gradio as gr -import torch -import tqdm -import launch -import modules.interrogate -import modules.memmon -import modules.styles -import modules.devices as devices -from modules import localization, script_loading, errors, ui_components, shared_items, cmd_args +from modules import shared_cmd_options, shared_gradio_themes, options, shared_items from modules.paths_internal import models_path, script_path, data_path, sd_configs_path, sd_default_config, sd_model_file, default_sd_model_file, extensions_dir, extensions_builtin_dir # noqa: F401 from ldm.models.diffusion.ddpm import LatentDiffusion -from typing import Optional +from modules import util -log = logging.getLogger(__name__) - -demo = None - -parser = cmd_args.parser - -script_loading.preload_extensions(extensions_dir, parser, extension_list=launch.list_extensions(launch.args.ui_settings_file)) -script_loading.preload_extensions(extensions_builtin_dir, parser) - -if os.environ.get('IGNORE_CMD_ARGS_ERRORS', None) is None: - cmd_opts = parser.parse_args() -else: - cmd_opts, _ = parser.parse_known_args() - - -restricted_opts = { - "samples_filename_pattern", - "directories_filename_pattern", - "outdir_samples", - "outdir_txt2img_samples", - "outdir_img2img_samples", - "outdir_extras_samples", - "outdir_grids", - "outdir_txt2img_grids", - "outdir_save", - "outdir_init_images" -} - -# https://huggingface.co/datasets/freddyaboulton/gradio-theme-subdomains/resolve/main/subdomains.json -gradio_hf_hub_themes = [ - "gradio/base", - "gradio/glass", - "gradio/monochrome", - "gradio/seafoam", - "gradio/soft", - "gradio/dracula_test", - "abidlabs/dracula_test", - "abidlabs/Lime", - "abidlabs/pakistan", - "Ama434/neutral-barlow", - "dawood/microsoft_windows", - "finlaymacklon/smooth_slate", - "Franklisi/darkmode", - "freddyaboulton/dracula_revamped", - "freddyaboulton/test-blue", - "gstaff/xkcd", - "Insuz/Mocha", - "Insuz/SimpleIndigo", - "JohnSmith9982/small_and_pretty", - "nota-ai/theme", - "nuttea/Softblue", - "ParityError/Anime", - "reilnuud/polite", - "remilia/Ghostly", - "rottenlittlecreature/Moon_Goblin", - "step-3-profit/Midnight-Deep", - "Taithrah/Minimal", - "ysharma/huggingface", - "ysharma/steampunk" -] - - -cmd_opts.disable_extension_access = (cmd_opts.share or cmd_opts.listen or cmd_opts.server_name) and not cmd_opts.enable_insecure_extension_access - -devices.device, devices.device_interrogate, devices.device_gfpgan, devices.device_esrgan, devices.device_codeformer = \ - (devices.cpu if any(y in cmd_opts.use_cpu for y in [x, 'all']) else devices.get_optimal_device() for x in ['sd', 'interrogate', 'gfpgan', 'esrgan', 'codeformer']) - -devices.dtype = torch.float32 if cmd_opts.no_half else torch.float16 -devices.dtype_vae = torch.float32 if cmd_opts.no_half or cmd_opts.no_half_vae else torch.float16 - -device = devices.device -weight_load_location = None if cmd_opts.lowram else "cpu" +cmd_opts = shared_cmd_options.cmd_opts +parser = shared_cmd_options.parser batch_cond_uncond = cmd_opts.always_batch_cond_uncond or not (cmd_opts.lowvram or cmd_opts.medvram) parallel_processing_allowed = not cmd_opts.lowvram and not cmd_opts.medvram -xformers_available = False -config_filename = cmd_opts.ui_settings_file - -os.makedirs(cmd_opts.hypernetwork_dir, exist_ok=True) -hypernetworks = {} -loaded_hypernetworks = [] - - -def reload_hypernetworks(): - from modules.hypernetworks import hypernetwork - global hypernetworks - - hypernetworks = hypernetwork.list_hypernetworks(cmd_opts.hypernetwork_dir) - - -class State: - skipped = False - interrupted = False - job = "" - job_no = 0 - job_count = 0 - processing_has_refined_job_count = False - job_timestamp = '0' - sampling_step = 0 - sampling_steps = 0 - current_latent = None - current_image = None - current_image_sampling_step = 0 - id_live_preview = 0 - textinfo = None - time_start = None - server_start = None - _server_command_signal = threading.Event() - _server_command: Optional[str] = None - - @property - def need_restart(self) -> bool: - # Compatibility getter for need_restart. - return self.server_command == "restart" - - @need_restart.setter - def need_restart(self, value: bool) -> None: - # Compatibility setter for need_restart. - if value: - self.server_command = "restart" - - @property - def server_command(self): - return self._server_command - - @server_command.setter - def server_command(self, value: Optional[str]) -> None: - """ - Set the server command to `value` and signal that it's been set. - """ - self._server_command = value - self._server_command_signal.set() - - def wait_for_server_command(self, timeout: Optional[float] = None) -> Optional[str]: - """ - Wait for server command to get set; return and clear the value and signal. - """ - if self._server_command_signal.wait(timeout): - self._server_command_signal.clear() - req = self._server_command - self._server_command = None - return req - return None - - def request_restart(self) -> None: - self.interrupt() - self.server_command = "restart" - log.info("Received restart request") - - def skip(self): - self.skipped = True - log.info("Received skip request") - - def interrupt(self): - self.interrupted = True - log.info("Received interrupt request") - - def nextjob(self): - if opts.live_previews_enable and opts.show_progress_every_n_steps == -1: - self.do_set_current_image() - - self.job_no += 1 - self.sampling_step = 0 - self.current_image_sampling_step = 0 - - def dict(self): - obj = { - "skipped": self.skipped, - "interrupted": self.interrupted, - "job": self.job, - "job_count": self.job_count, - "job_timestamp": self.job_timestamp, - "job_no": self.job_no, - "sampling_step": self.sampling_step, - "sampling_steps": self.sampling_steps, - } - - return obj - - def begin(self, job: str = "(unknown)"): - self.sampling_step = 0 - self.job_count = -1 - self.processing_has_refined_job_count = False - self.job_no = 0 - self.job_timestamp = datetime.datetime.now().strftime("%Y%m%d%H%M%S") - self.current_latent = None - self.current_image = None - self.current_image_sampling_step = 0 - self.id_live_preview = 0 - self.skipped = False - self.interrupted = False - self.textinfo = None - self.time_start = time.time() - self.job = job - devices.torch_gc() - log.info("Starting job %s", job) - - def end(self): - duration = time.time() - self.time_start - log.info("Ending job %s (%.2f seconds)", self.job, duration) - self.job = "" - self.job_count = 0 - - devices.torch_gc() - - def set_current_image(self): - """sets self.current_image from self.current_latent if enough sampling steps have been made after the last call to this""" - if not parallel_processing_allowed: - return - - if self.sampling_step - self.current_image_sampling_step >= opts.show_progress_every_n_steps and opts.live_previews_enable and opts.show_progress_every_n_steps != -1: - self.do_set_current_image() - - def do_set_current_image(self): - if self.current_latent is None: - return - - import modules.sd_samplers - - try: - if opts.show_progress_grid: - self.assign_current_image(modules.sd_samplers.samples_to_image_grid(self.current_latent)) - else: - self.assign_current_image(modules.sd_samplers.sample_to_image(self.current_latent)) - - self.current_image_sampling_step = self.sampling_step - - except Exception: - # when switching models during genration, VAE would be on CPU, so creating an image will fail. - # we silently ignore this error - errors.record_exception() - - def assign_current_image(self, image): - self.current_image = image - self.id_live_preview += 1 - - -state = State() -state.server_start = time.time() - styles_filename = cmd_opts.styles_file -prompt_styles = modules.styles.StyleDatabase(styles_filename) - -interrogator = modules.interrogate.InterrogateModels("interrogate") - -face_restorers = [] - - -class OptionInfo: - def __init__(self, default=None, label="", component=None, component_args=None, onchange=None, section=None, refresh=None, comment_before='', comment_after=''): - self.default = default - self.label = label - self.component = component - self.component_args = component_args - self.onchange = onchange - self.section = section - self.refresh = refresh - self.do_not_save = False - - self.comment_before = comment_before - """HTML text that will be added after label in UI""" - - self.comment_after = comment_after - """HTML text that will be added before label in UI""" - - def link(self, label, url): - self.comment_before += f"[{label}]" - return self - - def js(self, label, js_func): - self.comment_before += f"[{label}]" - return self - - def info(self, info): - self.comment_after += f"({info})" - return self - - def html(self, html): - self.comment_after += html - return self - - def needs_restart(self): - self.comment_after += " (requires restart)" - return self - - def needs_reload_ui(self): - self.comment_after += " (requires Reload UI)" - return self - - -class OptionHTML(OptionInfo): - def __init__(self, text): - super().__init__(str(text).strip(), label='', component=lambda **kwargs: gr.HTML(elem_classes="settings-info", **kwargs)) - - self.do_not_save = True - - -def options_section(section_identifier, options_dict): - for v in options_dict.values(): - v.section = section_identifier - - return options_dict - - -def list_checkpoint_tiles(): - import modules.sd_models - return modules.sd_models.checkpoint_tiles() - - -def refresh_checkpoints(): - import modules.sd_models - return modules.sd_models.list_models() - - -def list_samplers(): - import modules.sd_samplers - return modules.sd_samplers.all_samplers - - +config_filename = cmd_opts.ui_settings_file hide_dirs = {"visible": not cmd_opts.hide_ui_dir_config} -tab_names = [] - -options_templates = {} - -options_templates.update(options_section(('saving-images', "Saving images/grids"), { - "samples_save": OptionInfo(True, "Always save all generated images"), - "samples_format": OptionInfo('png', 'File format for images'), - "samples_filename_pattern": OptionInfo("", "Images filename pattern", component_args=hide_dirs).link("wiki", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Custom-Images-Filename-Name-and-Subdirectory"), - "save_images_add_number": OptionInfo(True, "Add number to filename when saving", component_args=hide_dirs), - - "grid_save": OptionInfo(True, "Always save all generated image grids"), - "grid_format": OptionInfo('png', 'File format for grids'), - "grid_extended_filename": OptionInfo(False, "Add extended info (seed, prompt) to filename when saving grid"), - "grid_only_if_multiple": OptionInfo(True, "Do not save grids consisting of one picture"), - "grid_prevent_empty_spots": OptionInfo(False, "Prevent empty spots in grid (when set to autodetect)"), - "grid_zip_filename_pattern": OptionInfo("", "Archive filename pattern", component_args=hide_dirs).link("wiki", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Custom-Images-Filename-Name-and-Subdirectory"), - "n_rows": OptionInfo(-1, "Grid row count; use -1 for autodetect and 0 for it to be same as batch size", gr.Slider, {"minimum": -1, "maximum": 16, "step": 1}), - "font": OptionInfo("", "Font for image grids that have text"), - "grid_text_active_color": OptionInfo("#000000", "Text color for image grids", ui_components.FormColorPicker, {}), - "grid_text_inactive_color": OptionInfo("#999999", "Inactive text color for image grids", ui_components.FormColorPicker, {}), - "grid_background_color": OptionInfo("#ffffff", "Background color for image grids", ui_components.FormColorPicker, {}), - - "enable_pnginfo": OptionInfo(True, "Save text information about generation parameters as chunks to png files"), - "save_txt": OptionInfo(False, "Create a text file next to every image with generation parameters."), - "save_images_before_face_restoration": OptionInfo(False, "Save a copy of image before doing face restoration."), - "save_images_before_highres_fix": OptionInfo(False, "Save a copy of image before applying highres fix."), - "save_images_before_color_correction": OptionInfo(False, "Save a copy of image before applying color correction to img2img results"), - "save_mask": OptionInfo(False, "For inpainting, save a copy of the greyscale mask"), - "save_mask_composite": OptionInfo(False, "For inpainting, save a masked composite"), - "jpeg_quality": OptionInfo(80, "Quality for saved jpeg images", gr.Slider, {"minimum": 1, "maximum": 100, "step": 1}), - "webp_lossless": OptionInfo(False, "Use lossless compression for webp images"), - "export_for_4chan": OptionInfo(True, "Save copy of large images as JPG").info("if the file size is above the limit, or either width or height are above the limit"), - "img_downscale_threshold": OptionInfo(4.0, "File size limit for the above option, MB", gr.Number), - "target_side_length": OptionInfo(4000, "Width/height limit for the above option, in pixels", gr.Number), - "img_max_size_mp": OptionInfo(200, "Maximum image size", gr.Number).info("in megapixels"), - - "use_original_name_batch": OptionInfo(True, "Use original name for output filename during batch process in extras tab"), - "use_upscaler_name_as_suffix": OptionInfo(False, "Use upscaler name as filename suffix in the extras tab"), - "save_selected_only": OptionInfo(True, "When using 'Save' button, only save a single selected image"), - "save_init_img": OptionInfo(False, "Save init images when using img2img"), - - "temp_dir": OptionInfo("", "Directory for temporary images; leave empty for default"), - "clean_temp_dir_at_start": OptionInfo(False, "Cleanup non-default temporary directory when starting webui"), - - "save_incomplete_images": OptionInfo(False, "Save incomplete images").info("save images that has been interrupted in mid-generation; even if not saved, they will still show up in webui output."), -})) - -options_templates.update(options_section(('saving-paths', "Paths for saving"), { - "outdir_samples": OptionInfo("", "Output directory for images; if empty, defaults to three directories below", component_args=hide_dirs), - "outdir_txt2img_samples": OptionInfo("outputs/txt2img-images", 'Output directory for txt2img images', component_args=hide_dirs), - "outdir_img2img_samples": OptionInfo("outputs/img2img-images", 'Output directory for img2img images', component_args=hide_dirs), - "outdir_extras_samples": OptionInfo("outputs/extras-images", 'Output directory for images from extras tab', component_args=hide_dirs), - "outdir_grids": OptionInfo("", "Output directory for grids; if empty, defaults to two directories below", component_args=hide_dirs), - "outdir_txt2img_grids": OptionInfo("outputs/txt2img-grids", 'Output directory for txt2img grids', component_args=hide_dirs), - "outdir_img2img_grids": OptionInfo("outputs/img2img-grids", 'Output directory for img2img grids', component_args=hide_dirs), - "outdir_save": OptionInfo("log/images", "Directory for saving images using the Save button", component_args=hide_dirs), - "outdir_init_images": OptionInfo("outputs/init-images", "Directory for saving init images when using img2img", component_args=hide_dirs), -})) - -options_templates.update(options_section(('saving-to-dirs', "Saving to a directory"), { - "save_to_dirs": OptionInfo(True, "Save images to a subdirectory"), - "grid_save_to_dirs": OptionInfo(True, "Save grids to a subdirectory"), - "use_save_to_dirs_for_ui": OptionInfo(False, "When using \"Save\" button, save images to a subdirectory"), - "directories_filename_pattern": OptionInfo("[date]", "Directory name pattern", component_args=hide_dirs).link("wiki", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Custom-Images-Filename-Name-and-Subdirectory"), - "directories_max_prompt_words": OptionInfo(8, "Max prompt words for [prompt_words] pattern", gr.Slider, {"minimum": 1, "maximum": 20, "step": 1, **hide_dirs}), -})) - -options_templates.update(options_section(('upscaling', "Upscaling"), { - "ESRGAN_tile": OptionInfo(192, "Tile size for ESRGAN upscalers.", gr.Slider, {"minimum": 0, "maximum": 512, "step": 16}).info("0 = no tiling"), - "ESRGAN_tile_overlap": OptionInfo(8, "Tile overlap for ESRGAN upscalers.", gr.Slider, {"minimum": 0, "maximum": 48, "step": 1}).info("Low values = visible seam"), - "realesrgan_enabled_models": OptionInfo(["R-ESRGAN 4x+", "R-ESRGAN 4x+ Anime6B"], "Select which Real-ESRGAN models to show in the web UI.", gr.CheckboxGroup, lambda: {"choices": shared_items.realesrgan_models_names()}), - "upscaler_for_img2img": OptionInfo(None, "Upscaler for img2img", gr.Dropdown, lambda: {"choices": [x.name for x in sd_upscalers]}), -})) - -options_templates.update(options_section(('face-restoration', "Face restoration"), { - "face_restoration_model": OptionInfo("CodeFormer", "Face restoration model", gr.Radio, lambda: {"choices": [x.name() for x in face_restorers]}), - "code_former_weight": OptionInfo(0.5, "CodeFormer weight", gr.Slider, {"minimum": 0, "maximum": 1, "step": 0.01}).info("0 = maximum effect; 1 = minimum effect"), - "face_restoration_unload": OptionInfo(False, "Move face restoration model from VRAM into RAM after processing"), -})) - -options_templates.update(options_section(('system', "System"), { - "show_warnings": OptionInfo(False, "Show warnings in console.").needs_reload_ui(), - "show_gradio_deprecation_warnings": OptionInfo(True, "Show gradio deprecation warnings in console.").needs_reload_ui(), - "memmon_poll_rate": OptionInfo(8, "VRAM usage polls per second during generation.", gr.Slider, {"minimum": 0, "maximum": 40, "step": 1}).info("0 = disable"), - "samples_log_stdout": OptionInfo(False, "Always print all generation info to standard output"), - "multiple_tqdm": OptionInfo(True, "Add a second progress bar to the console that shows progress for an entire job."), - "print_hypernet_extra": OptionInfo(False, "Print extra hypernetwork information to console."), - "list_hidden_files": OptionInfo(True, "Load models/files in hidden directories").info("directory is hidden if its name starts with \".\""), - "disable_mmap_load_safetensors": OptionInfo(False, "Disable memmapping for loading .safetensors files.").info("fixes very slow loading speed in some cases"), - "hide_ldm_prints": OptionInfo(True, "Prevent Stability-AI's ldm/sgm modules from printing noise to console."), -})) - -options_templates.update(options_section(('training', "Training"), { - "unload_models_when_training": OptionInfo(False, "Move VAE and CLIP to RAM when training if possible. Saves VRAM."), - "pin_memory": OptionInfo(False, "Turn on pin_memory for DataLoader. Makes training slightly faster but can increase memory usage."), - "save_optimizer_state": OptionInfo(False, "Saves Optimizer state as separate *.optim file. Training of embedding or HN can be resumed with the matching optim file."), - "save_training_settings_to_txt": OptionInfo(True, "Save textual inversion and hypernet settings to a text file whenever training starts."), - "dataset_filename_word_regex": OptionInfo("", "Filename word regex"), - "dataset_filename_join_string": OptionInfo(" ", "Filename join string"), - "training_image_repeats_per_epoch": OptionInfo(1, "Number of repeats for a single input image per epoch; used only for displaying epoch number", gr.Number, {"precision": 0}), - "training_write_csv_every": OptionInfo(500, "Save an csv containing the loss to log directory every N steps, 0 to disable"), - "training_xattention_optimizations": OptionInfo(False, "Use cross attention optimizations while training"), - "training_enable_tensorboard": OptionInfo(False, "Enable tensorboard logging."), - "training_tensorboard_save_images": OptionInfo(False, "Save generated images within tensorboard."), - "training_tensorboard_flush_every": OptionInfo(120, "How often, in seconds, to flush the pending tensorboard events and summaries to disk."), -})) - -options_templates.update(options_section(('sd', "Stable Diffusion"), { - "sd_model_checkpoint": OptionInfo(None, "Stable Diffusion checkpoint", gr.Dropdown, lambda: {"choices": list_checkpoint_tiles()}, refresh=refresh_checkpoints), - "sd_checkpoints_limit": OptionInfo(1, "Maximum number of checkpoints loaded at the same time", gr.Slider, {"minimum": 1, "maximum": 10, "step": 1}), - "sd_checkpoints_keep_in_cpu": OptionInfo(True, "Only keep one model on device").info("will keep models other than the currently used one in RAM rather than VRAM"), - "sd_checkpoint_cache": OptionInfo(0, "Checkpoints to cache in RAM", gr.Slider, {"minimum": 0, "maximum": 10, "step": 1}).info("obsolete; set to 0 and use the two settings above instead"), - "sd_unet": OptionInfo("Automatic", "SD Unet", gr.Dropdown, lambda: {"choices": shared_items.sd_unet_items()}, refresh=shared_items.refresh_unet_list).info("choose Unet model: Automatic = use one with same filename as checkpoint; None = use Unet from checkpoint"), - "enable_quantization": OptionInfo(False, "Enable quantization in K samplers for sharper and cleaner results. This may change existing seeds").needs_reload_ui(), - "enable_emphasis": OptionInfo(True, "Enable emphasis").info("use (text) to make model pay more attention to text and [text] to make it pay less attention"), - "enable_batch_seeds": OptionInfo(True, "Make K-diffusion samplers produce same images in a batch as when making a single image"), - "comma_padding_backtrack": OptionInfo(20, "Prompt word wrap length limit", gr.Slider, {"minimum": 0, "maximum": 74, "step": 1}).info("in tokens - for texts shorter than specified, if they don't fit into 75 token limit, move them to the next 75 token chunk"), - "CLIP_stop_at_last_layers": OptionInfo(1, "Clip skip", gr.Slider, {"minimum": 1, "maximum": 12, "step": 1}).link("wiki", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Features#clip-skip").info("ignore last layers of CLIP network; 1 ignores none, 2 ignores one layer"), - "upcast_attn": OptionInfo(False, "Upcast cross attention layer to float32"), - "randn_source": OptionInfo("GPU", "Random number generator source.", gr.Radio, {"choices": ["GPU", "CPU", "NV"]}).info("changes seeds drastically; use CPU to produce the same picture across different videocard vendors; use NV to produce same picture as on NVidia videocards"), -})) -options_templates.update(options_section(('sdxl', "Stable Diffusion XL"), { - "sdxl_crop_top": OptionInfo(0, "crop top coordinate"), - "sdxl_crop_left": OptionInfo(0, "crop left coordinate"), - "sdxl_refiner_low_aesthetic_score": OptionInfo(2.5, "SDXL low aesthetic score", gr.Number).info("used for refiner model negative prompt"), - "sdxl_refiner_high_aesthetic_score": OptionInfo(6.0, "SDXL high aesthetic score", gr.Number).info("used for refiner model prompt"), -})) - -options_templates.update(options_section(('vae', "VAE"), { - "sd_vae_explanation": OptionHTML(""" -VAE is a neural network that transforms a standard RGB -image into latent space representation and back. Latent space representation is what stable diffusion is working on during sampling -(i.e. when the progress bar is between empty and full). For txt2img, VAE is used to create a resulting image after the sampling is finished. -For img2img, VAE is used to process user's input image before the sampling, and to create an image after sampling. -"""), - "sd_vae_checkpoint_cache": OptionInfo(0, "VAE Checkpoints to cache in RAM", gr.Slider, {"minimum": 0, "maximum": 10, "step": 1}), - "sd_vae": OptionInfo("Automatic", "SD VAE", gr.Dropdown, lambda: {"choices": shared_items.sd_vae_items()}, refresh=shared_items.refresh_vae_list).info("choose VAE model: Automatic = use one with same filename as checkpoint; None = use VAE from checkpoint"), - "sd_vae_as_default": OptionInfo(True, "Ignore selected VAE for stable diffusion checkpoints that have their own .vae.pt next to them"), - "auto_vae_precision": OptionInfo(True, "Automaticlly revert VAE to 32-bit floats").info("triggers when a tensor with NaNs is produced in VAE; disabling the option in this case will result in a black square image"), - "sd_vae_encode_method": OptionInfo("Full", "VAE type for encode", gr.Radio, {"choices": ["Full", "TAESD"]}).info("method to encode image to latent (use in img2img, hires-fix or inpaint mask)"), - "sd_vae_decode_method": OptionInfo("Full", "VAE type for decode", gr.Radio, {"choices": ["Full", "TAESD"]}).info("method to decode latent to image"), -})) - -options_templates.update(options_section(('img2img', "img2img"), { - "inpainting_mask_weight": OptionInfo(1.0, "Inpainting conditioning mask strength", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}), - "initial_noise_multiplier": OptionInfo(1.0, "Noise multiplier for img2img", gr.Slider, {"minimum": 0.5, "maximum": 1.5, "step": 0.01}), - "img2img_color_correction": OptionInfo(False, "Apply color correction to img2img results to match original colors."), - "img2img_fix_steps": OptionInfo(False, "With img2img, do exactly the amount of steps the slider specifies.").info("normally you'd do less with less denoising"), - "img2img_background_color": OptionInfo("#ffffff", "With img2img, fill transparent parts of the input image with this color.", ui_components.FormColorPicker, {}), - "img2img_editor_height": OptionInfo(720, "Height of the image editor", gr.Slider, {"minimum": 80, "maximum": 1600, "step": 1}).info("in pixels").needs_reload_ui(), - "img2img_sketch_default_brush_color": OptionInfo("#ffffff", "Sketch initial brush color", ui_components.FormColorPicker, {}).info("default brush color of img2img sketch").needs_reload_ui(), - "img2img_inpaint_mask_brush_color": OptionInfo("#ffffff", "Inpaint mask brush color", ui_components.FormColorPicker, {}).info("brush color of inpaint mask").needs_reload_ui(), - "img2img_inpaint_sketch_default_brush_color": OptionInfo("#ffffff", "Inpaint sketch initial brush color", ui_components.FormColorPicker, {}).info("default brush color of img2img inpaint sketch").needs_reload_ui(), - "return_mask": OptionInfo(False, "For inpainting, include the greyscale mask in results for web"), - "return_mask_composite": OptionInfo(False, "For inpainting, include masked composite in results for web"), -})) - -options_templates.update(options_section(('optimizations', "Optimizations"), { - "cross_attention_optimization": OptionInfo("Automatic", "Cross attention optimization", gr.Dropdown, lambda: {"choices": shared_items.cross_attention_optimizations()}), - "s_min_uncond": OptionInfo(0.0, "Negative Guidance minimum sigma", gr.Slider, {"minimum": 0.0, "maximum": 15.0, "step": 0.01}).link("PR", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/9177").info("skip negative prompt for some steps when the image is almost ready; 0=disable, higher=faster"), - "token_merging_ratio": OptionInfo(0.0, "Token merging ratio", gr.Slider, {"minimum": 0.0, "maximum": 0.9, "step": 0.1}).link("PR", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/9256").info("0=disable, higher=faster"), - "token_merging_ratio_img2img": OptionInfo(0.0, "Token merging ratio for img2img", gr.Slider, {"minimum": 0.0, "maximum": 0.9, "step": 0.1}).info("only applies if non-zero and overrides above"), - "token_merging_ratio_hr": OptionInfo(0.0, "Token merging ratio for high-res pass", gr.Slider, {"minimum": 0.0, "maximum": 0.9, "step": 0.1}).info("only applies if non-zero and overrides above"), - "pad_cond_uncond": OptionInfo(False, "Pad prompt/negative prompt to be same length").info("improves performance when prompt and negative prompt have different lengths; changes seeds"), - "persistent_cond_cache": OptionInfo(True, "Persistent cond cache").info("Do not recalculate conds from prompts if prompts have not changed since previous calculation"), -})) - -options_templates.update(options_section(('compatibility', "Compatibility"), { - "use_old_emphasis_implementation": OptionInfo(False, "Use old emphasis implementation. Can be useful to reproduce old seeds."), - "use_old_karras_scheduler_sigmas": OptionInfo(False, "Use old karras scheduler sigmas (0.1 to 10)."), - "no_dpmpp_sde_batch_determinism": OptionInfo(False, "Do not make DPM++ SDE deterministic across different batch sizes."), - "use_old_hires_fix_width_height": OptionInfo(False, "For hires fix, use width/height sliders to set final resolution rather than first pass (disables Upscale by, Resize width/height to)."), - "dont_fix_second_order_samplers_schedule": OptionInfo(False, "Do not fix prompt schedule for second order samplers."), - "hires_fix_use_firstpass_conds": OptionInfo(False, "For hires fix, calculate conds of second pass using extra networks of first pass."), - "use_old_scheduling": OptionInfo(False, "Use old prompt where first pass and hires both used the same timeline, and < 1 meant relative and >= 1 meant absolute"), -})) - -options_templates.update(options_section(('interrogate', "Interrogate"), { - "interrogate_keep_models_in_memory": OptionInfo(False, "Keep models in VRAM"), - "interrogate_return_ranks": OptionInfo(False, "Include ranks of model tags matches in results.").info("booru only"), - "interrogate_clip_num_beams": OptionInfo(1, "BLIP: num_beams", gr.Slider, {"minimum": 1, "maximum": 16, "step": 1}), - "interrogate_clip_min_length": OptionInfo(24, "BLIP: minimum description length", gr.Slider, {"minimum": 1, "maximum": 128, "step": 1}), - "interrogate_clip_max_length": OptionInfo(48, "BLIP: maximum description length", gr.Slider, {"minimum": 1, "maximum": 256, "step": 1}), - "interrogate_clip_dict_limit": OptionInfo(1500, "CLIP: maximum number of lines in text file").info("0 = No limit"), - "interrogate_clip_skip_categories": OptionInfo([], "CLIP: skip inquire categories", gr.CheckboxGroup, lambda: {"choices": modules.interrogate.category_types()}, refresh=modules.interrogate.category_types), - "interrogate_deepbooru_score_threshold": OptionInfo(0.5, "deepbooru: score threshold", gr.Slider, {"minimum": 0, "maximum": 1, "step": 0.01}), - "deepbooru_sort_alpha": OptionInfo(True, "deepbooru: sort tags alphabetically").info("if not: sort by score"), - "deepbooru_use_spaces": OptionInfo(True, "deepbooru: use spaces in tags").info("if not: use underscores"), - "deepbooru_escape": OptionInfo(True, "deepbooru: escape (\\) brackets").info("so they are used as literal brackets and not for emphasis"), - "deepbooru_filter_tags": OptionInfo("", "deepbooru: filter out those tags").info("separate by comma"), -})) - -options_templates.update(options_section(('extra_networks', "Extra Networks"), { - "extra_networks_show_hidden_directories": OptionInfo(True, "Show hidden directories").info("directory is hidden if its name starts with \".\"."), - "extra_networks_hidden_models": OptionInfo("When searched", "Show cards for models in hidden directories", gr.Radio, {"choices": ["Always", "When searched", "Never"]}).info('"When searched" option will only show the item when the search string has 4 characters or more'), - "extra_networks_default_multiplier": OptionInfo(1.0, "Default multiplier for extra networks", gr.Slider, {"minimum": 0.0, "maximum": 2.0, "step": 0.01}), - "extra_networks_card_width": OptionInfo(0, "Card width for Extra Networks").info("in pixels"), - "extra_networks_card_height": OptionInfo(0, "Card height for Extra Networks").info("in pixels"), - "extra_networks_card_text_scale": OptionInfo(1.0, "Card text scale", gr.Slider, {"minimum": 0.0, "maximum": 2.0, "step": 0.01}).info("1 = original size"), - "extra_networks_card_show_desc": OptionInfo(True, "Show description on card"), - "extra_networks_add_text_separator": OptionInfo(" ", "Extra networks separator").info("extra text to add before <...> when adding extra network to prompt"), - "ui_extra_networks_tab_reorder": OptionInfo("", "Extra networks tab order").needs_reload_ui(), - "textual_inversion_print_at_load": OptionInfo(False, "Print a list of Textual Inversion embeddings when loading model"), - "textual_inversion_add_hashes_to_infotext": OptionInfo(True, "Add Textual Inversion hashes to infotext"), - "sd_hypernetwork": OptionInfo("None", "Add hypernetwork to prompt", gr.Dropdown, lambda: {"choices": ["None", *hypernetworks]}, refresh=reload_hypernetworks), -})) - -options_templates.update(options_section(('ui', "User interface"), { - "localization": OptionInfo("None", "Localization", gr.Dropdown, lambda: {"choices": ["None"] + list(localization.localizations.keys())}, refresh=lambda: localization.list_localizations(cmd_opts.localizations_dir)).needs_reload_ui(), - "gradio_theme": OptionInfo("Default", "Gradio theme", ui_components.DropdownEditable, lambda: {"choices": ["Default"] + gradio_hf_hub_themes}).info("you can also manually enter any of themes from the gallery.").needs_reload_ui(), - "gradio_themes_cache": OptionInfo(True, "Cache gradio themes locally").info("disable to update the selected Gradio theme"), - "return_grid": OptionInfo(True, "Show grid in results for web"), - "do_not_show_images": OptionInfo(False, "Do not show any images in results for web"), - "send_seed": OptionInfo(True, "Send seed when sending prompt or image to other interface"), - "send_size": OptionInfo(True, "Send size when sending prompt or image to another interface"), - "js_modal_lightbox": OptionInfo(True, "Enable full page image viewer"), - "js_modal_lightbox_initially_zoomed": OptionInfo(True, "Show images zoomed in by default in full page image viewer"), - "js_modal_lightbox_gamepad": OptionInfo(False, "Navigate image viewer with gamepad"), - "js_modal_lightbox_gamepad_repeat": OptionInfo(250, "Gamepad repeat period, in milliseconds"), - "show_progress_in_title": OptionInfo(True, "Show generation progress in window title."), - "samplers_in_dropdown": OptionInfo(True, "Use dropdown for sampler selection instead of radio group").needs_reload_ui(), - "dimensions_and_batch_together": OptionInfo(True, "Show Width/Height and Batch sliders in same row").needs_reload_ui(), - "keyedit_precision_attention": OptionInfo(0.1, "Ctrl+up/down precision when editing (attention:1.1)", gr.Slider, {"minimum": 0.01, "maximum": 0.2, "step": 0.001}), - "keyedit_precision_extra": OptionInfo(0.05, "Ctrl+up/down precision when editing ", gr.Slider, {"minimum": 0.01, "maximum": 0.2, "step": 0.001}), - "keyedit_delimiters": OptionInfo(".,\\/!?%^*;:{}=`~()", "Ctrl+up/down word delimiters"), - "keyedit_move": OptionInfo(True, "Alt+left/right moves prompt elements"), - "quicksettings_list": OptionInfo(["sd_model_checkpoint"], "Quicksettings list", ui_components.DropdownMulti, lambda: {"choices": list(opts.data_labels.keys())}).js("info", "settingsHintsShowQuicksettings").info("setting entries that appear at the top of page rather than in settings tab").needs_reload_ui(), - "ui_tab_order": OptionInfo([], "UI tab order", ui_components.DropdownMulti, lambda: {"choices": list(tab_names)}).needs_reload_ui(), - "hidden_tabs": OptionInfo([], "Hidden UI tabs", ui_components.DropdownMulti, lambda: {"choices": list(tab_names)}).needs_reload_ui(), - "ui_reorder_list": OptionInfo([], "txt2img/img2img UI item order", ui_components.DropdownMulti, lambda: {"choices": list(shared_items.ui_reorder_categories())}).info("selected items appear first").needs_reload_ui(), - "hires_fix_show_sampler": OptionInfo(False, "Hires fix: show hires checkpoint and sampler selection").needs_reload_ui(), - "hires_fix_show_prompts": OptionInfo(False, "Hires fix: show hires prompt and negative prompt").needs_reload_ui(), - "disable_token_counters": OptionInfo(False, "Disable prompt token counters").needs_reload_ui(), -})) - - -options_templates.update(options_section(('infotext', "Infotext"), { - "add_model_hash_to_info": OptionInfo(True, "Add model hash to generation information"), - "add_model_name_to_info": OptionInfo(True, "Add model name to generation information"), - "add_user_name_to_info": OptionInfo(False, "Add user name to generation information when authenticated"), - "add_version_to_infotext": OptionInfo(True, "Add program version to generation information"), - "disable_weights_auto_swap": OptionInfo(True, "Disregard checkpoint information from pasted infotext").info("when reading generation parameters from text into UI"), - "infotext_styles": OptionInfo("Apply if any", "Infer styles from prompts of pasted infotext", gr.Radio, {"choices": ["Ignore", "Apply", "Discard", "Apply if any"]}).info("when reading generation parameters from text into UI)").html("""
    -
  • Ignore: keep prompt and styles dropdown as it is.
  • -
  • Apply: remove style text from prompt, always replace styles dropdown value with found styles (even if none are found).
  • -
  • Discard: remove style text from prompt, keep styles dropdown as it is.
  • -
  • Apply if any: remove style text from prompt; if any styles are found in prompt, put them into styles dropdown, otherwise keep it as it is.
  • -
"""), - -})) - -options_templates.update(options_section(('ui', "Live previews"), { - "show_progressbar": OptionInfo(True, "Show progressbar"), - "live_previews_enable": OptionInfo(True, "Show live previews of the created image"), - "live_previews_image_format": OptionInfo("png", "Live preview file format", gr.Radio, {"choices": ["jpeg", "png", "webp"]}), - "show_progress_grid": OptionInfo(True, "Show previews of all images generated in a batch as a grid"), - "show_progress_every_n_steps": OptionInfo(10, "Live preview display period", gr.Slider, {"minimum": -1, "maximum": 32, "step": 1}).info("in sampling steps - show new live preview image every N sampling steps; -1 = only show after completion of batch"), - "show_progress_type": OptionInfo("Approx NN", "Live preview method", gr.Radio, {"choices": ["Full", "Approx NN", "Approx cheap", "TAESD"]}).info("Full = slow but pretty; Approx NN and TAESD = fast but low quality; Approx cheap = super fast but terrible otherwise"), - "live_preview_content": OptionInfo("Prompt", "Live preview subject", gr.Radio, {"choices": ["Combined", "Prompt", "Negative prompt"]}), - "live_preview_refresh_period": OptionInfo(1000, "Progressbar and preview update period").info("in milliseconds"), -})) - -options_templates.update(options_section(('sampler-params', "Sampler parameters"), { - "hide_samplers": OptionInfo([], "Hide samplers in user interface", gr.CheckboxGroup, lambda: {"choices": [x.name for x in list_samplers()]}).needs_reload_ui(), - "eta_ddim": OptionInfo(0.0, "Eta for DDIM", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}).info("noise multiplier; higher = more unperdictable results"), - "eta_ancestral": OptionInfo(1.0, "Eta for ancestral samplers", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}).info("noise multiplier; applies to Euler a and other samplers that have a in them"), - "ddim_discretize": OptionInfo('uniform', "img2img DDIM discretize", gr.Radio, {"choices": ['uniform', 'quad']}), - 's_churn': OptionInfo(0.0, "sigma churn", gr.Slider, {"minimum": 0.0, "maximum": 100.0, "step": 0.01}), - 's_tmin': OptionInfo(0.0, "sigma tmin", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}), - 's_tmax': OptionInfo(0.0, "sigma tmax", gr.Slider, {"minimum": 0.0, "maximum": 999.0, "step": 0.01}).info("0 = inf"), - 's_noise': OptionInfo(1.0, "sigma noise", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}), - 'k_sched_type': OptionInfo("Automatic", "scheduler type", gr.Dropdown, {"choices": ["Automatic", "karras", "exponential", "polyexponential"]}).info("lets you override the noise schedule for k-diffusion samplers; choosing Automatic disables the three parameters below"), - 'sigma_min': OptionInfo(0.0, "sigma min", gr.Number).info("0 = default (~0.03); minimum noise strength for k-diffusion noise scheduler"), - 'sigma_max': OptionInfo(0.0, "sigma max", gr.Number).info("0 = default (~14.6); maximum noise strength for k-diffusion noise schedule"), - 'rho': OptionInfo(0.0, "rho", gr.Number).info("0 = default (7 for karras, 1 for polyexponential); higher values result in a more steep noise schedule (decreases faster)"), - 'eta_noise_seed_delta': OptionInfo(0, "Eta noise seed delta", gr.Number, {"precision": 0}).info("ENSD; does not improve anything, just produces different results for ancestral samplers - only useful for reproducing images"), - 'always_discard_next_to_last_sigma': OptionInfo(False, "Always discard next-to-last sigma").link("PR", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/6044"), - 'uni_pc_variant': OptionInfo("bh1", "UniPC variant", gr.Radio, {"choices": ["bh1", "bh2", "vary_coeff"]}), - 'uni_pc_skip_type': OptionInfo("time_uniform", "UniPC skip type", gr.Radio, {"choices": ["time_uniform", "time_quadratic", "logSNR"]}), - 'uni_pc_order': OptionInfo(3, "UniPC order", gr.Slider, {"minimum": 1, "maximum": 50, "step": 1}).info("must be < sampling steps"), - 'uni_pc_lower_order_final': OptionInfo(True, "UniPC lower order final"), -})) - -options_templates.update(options_section(('postprocessing', "Postprocessing"), { - 'postprocessing_enable_in_main_ui': OptionInfo([], "Enable postprocessing operations in txt2img and img2img tabs", ui_components.DropdownMulti, lambda: {"choices": [x.name for x in shared_items.postprocessing_scripts()]}), - 'postprocessing_operation_order': OptionInfo([], "Postprocessing operation order", ui_components.DropdownMulti, lambda: {"choices": [x.name for x in shared_items.postprocessing_scripts()]}), - 'upscaling_max_images_in_cache': OptionInfo(5, "Maximum number of images in upscaling cache", gr.Slider, {"minimum": 0, "maximum": 10, "step": 1}), -})) - -options_templates.update(options_section((None, "Hidden options"), { - "disabled_extensions": OptionInfo([], "Disable these extensions"), - "disable_all_extensions": OptionInfo("none", "Disable all extensions (preserves the list of disabled extensions)", gr.Radio, {"choices": ["none", "extra", "all"]}), - "restore_config_state_file": OptionInfo("", "Config state file to restore from, under 'config-states/' folder"), - "sd_checkpoint_hash": OptionInfo("", "SHA256 hash of the current checkpoint"), -})) - - -options_templates.update() - - -class Options: - data = None - data_labels = options_templates - typemap = {int: float} - - def __init__(self): - self.data = {k: v.default for k, v in self.data_labels.items()} - - def __setattr__(self, key, value): - if self.data is not None: - if key in self.data or key in self.data_labels: - assert not cmd_opts.freeze_settings, "changing settings is disabled" - - info = opts.data_labels.get(key, None) - if info.do_not_save: - return - - comp_args = info.component_args if info else None - if isinstance(comp_args, dict) and comp_args.get('visible', True) is False: - raise RuntimeError(f"not possible to set {key} because it is restricted") - - if cmd_opts.hide_ui_dir_config and key in restricted_opts: - raise RuntimeError(f"not possible to set {key} because it is restricted") - - self.data[key] = value - return - - return super(Options, self).__setattr__(key, value) - - def __getattr__(self, item): - if self.data is not None: - if item in self.data: - return self.data[item] - - if item in self.data_labels: - return self.data_labels[item].default - - return super(Options, self).__getattribute__(item) - - def set(self, key, value): - """sets an option and calls its onchange callback, returning True if the option changed and False otherwise""" - - oldval = self.data.get(key, None) - if oldval == value: - return False - - if self.data_labels[key].do_not_save: - return False - - try: - setattr(self, key, value) - except RuntimeError: - return False - - if self.data_labels[key].onchange is not None: - try: - self.data_labels[key].onchange() - except Exception as e: - errors.display(e, f"changing setting {key} to {value}") - setattr(self, key, oldval) - return False - - return True - - def get_default(self, key): - """returns the default value for the key""" - - data_label = self.data_labels.get(key) - if data_label is None: - return None - - return data_label.default - - def save(self, filename): - assert not cmd_opts.freeze_settings, "saving settings is disabled" - - with open(filename, "w", encoding="utf8") as file: - json.dump(self.data, file, indent=4) - - def same_type(self, x, y): - if x is None or y is None: - return True - - type_x = self.typemap.get(type(x), type(x)) - type_y = self.typemap.get(type(y), type(y)) - - return type_x == type_y - - def load(self, filename): - with open(filename, "r", encoding="utf8") as file: - self.data = json.load(file) - - # 1.1.1 quicksettings list migration - if self.data.get('quicksettings') is not None and self.data.get('quicksettings_list') is None: - self.data['quicksettings_list'] = [i.strip() for i in self.data.get('quicksettings').split(',')] - - # 1.4.0 ui_reorder - if isinstance(self.data.get('ui_reorder'), str) and self.data.get('ui_reorder') and "ui_reorder_list" not in self.data: - self.data['ui_reorder_list'] = [i.strip() for i in self.data.get('ui_reorder').split(',')] - - bad_settings = 0 - for k, v in self.data.items(): - info = self.data_labels.get(k, None) - if info is not None and not self.same_type(info.default, v): - print(f"Warning: bad setting value: {k}: {v} ({type(v).__name__}; expected {type(info.default).__name__})", file=sys.stderr) - bad_settings += 1 - - if bad_settings > 0: - print(f"The program is likely to not work with bad settings.\nSettings file: {filename}\nEither fix the file, or delete it and restart.", file=sys.stderr) - - def onchange(self, key, func, call=True): - item = self.data_labels.get(key) - item.onchange = func - - if call: - func() - - def dumpjson(self): - d = {k: self.data.get(k, v.default) for k, v in self.data_labels.items()} - d["_comments_before"] = {k: v.comment_before for k, v in self.data_labels.items() if v.comment_before is not None} - d["_comments_after"] = {k: v.comment_after for k, v in self.data_labels.items() if v.comment_after is not None} - return json.dumps(d) - - def add_option(self, key, info): - self.data_labels[key] = info - - def reorder(self): - """reorder settings so that all items related to section always go together""" - - section_ids = {} - settings_items = self.data_labels.items() - for _, item in settings_items: - if item.section not in section_ids: - section_ids[item.section] = len(section_ids) - - self.data_labels = dict(sorted(settings_items, key=lambda x: section_ids[x[1].section])) - - def cast_value(self, key, value): - """casts an arbitrary to the same type as this setting's value with key - Example: cast_value("eta_noise_seed_delta", "12") -> returns 12 (an int rather than str) - """ - - if value is None: - return None - - default_value = self.data_labels[key].default - if default_value is None: - default_value = getattr(self, key, None) - if default_value is None: - return None - - expected_type = type(default_value) - if expected_type == bool and value == "False": - value = False - else: - value = expected_type(value) - - return value +demo = None +device = None -opts = Options() -if os.path.exists(config_filename): - opts.load(config_filename) +weight_load_location = None +xformers_available = False -class Shared(sys.modules[__name__].__class__): - """ - this class is here to provide sd_model field as a property, so that it can be created and loaded on demand rather than - at program startup. - """ +hypernetworks = {} - sd_model_val = None +loaded_hypernetworks = [] - @property - def sd_model(self): - import modules.sd_models +state = None - return modules.sd_models.model_data.get_sd_model() +prompt_styles = None - @sd_model.setter - def sd_model(self, value): - import modules.sd_models +interrogator = None - modules.sd_models.model_data.set_sd_model(value) +face_restorers = [] +options_templates = None +opts = None +restricted_opts = None -sd_model: LatentDiffusion = None # this var is here just for IDE's type checking; it cannot be accessed because the class field above will be accessed instead -sys.modules[__name__].__class__ = Shared +sd_model: LatentDiffusion = None settings_components = None """assinged from ui.py, a mapping on setting names to gradio components repsponsible for those settings""" +tab_names = [] + latent_upscale_default_mode = "Latent" latent_upscale_modes = { "Latent": {"mode": "bilinear", "antialias": False}, @@ -852,121 +65,24 @@ progress_print_out = sys.stdout gradio_theme = gr.themes.Base() +total_tqdm = None -def reload_gradio_theme(theme_name=None): - global gradio_theme - if not theme_name: - theme_name = opts.gradio_theme - - default_theme_args = dict( - font=["Source Sans Pro", 'ui-sans-serif', 'system-ui', 'sans-serif'], - font_mono=['IBM Plex Mono', 'ui-monospace', 'Consolas', 'monospace'], - ) - - if theme_name == "Default": - gradio_theme = gr.themes.Default(**default_theme_args) - else: - try: - theme_cache_dir = os.path.join(script_path, 'tmp', 'gradio_themes') - theme_cache_path = os.path.join(theme_cache_dir, f'{theme_name.replace("/", "_")}.json') - if opts.gradio_themes_cache and os.path.exists(theme_cache_path): - gradio_theme = gr.themes.ThemeClass.load(theme_cache_path) - else: - os.makedirs(theme_cache_dir, exist_ok=True) - gradio_theme = gr.themes.ThemeClass.from_hub(theme_name) - gradio_theme.dump(theme_cache_path) - except Exception as e: - errors.display(e, "changing gradio theme") - gradio_theme = gr.themes.Default(**default_theme_args) - - -class TotalTQDM: - def __init__(self): - self._tqdm = None - - def reset(self): - self._tqdm = tqdm.tqdm( - desc="Total progress", - total=state.job_count * state.sampling_steps, - position=1, - file=progress_print_out - ) - - def update(self): - if not opts.multiple_tqdm or cmd_opts.disable_console_progressbars: - return - if self._tqdm is None: - self.reset() - self._tqdm.update() - - def updateTotal(self, new_total): - if not opts.multiple_tqdm or cmd_opts.disable_console_progressbars: - return - if self._tqdm is None: - self.reset() - self._tqdm.total = new_total - - def clear(self): - if self._tqdm is not None: - self._tqdm.refresh() - self._tqdm.close() - self._tqdm = None - - -total_tqdm = TotalTQDM() - -mem_mon = modules.memmon.MemUsageMonitor("MemMon", device, opts) -mem_mon.start() - - -def natural_sort_key(s, regex=re.compile('([0-9]+)')): - return [int(text) if text.isdigit() else text.lower() for text in regex.split(s)] - - -def listfiles(dirname): - filenames = [os.path.join(dirname, x) for x in sorted(os.listdir(dirname), key=natural_sort_key) if not x.startswith(".")] - return [file for file in filenames if os.path.isfile(file)] - - -def html_path(filename): - return os.path.join(script_path, "html", filename) - - -def html(filename): - path = html_path(filename) - - if os.path.exists(path): - with open(path, encoding="utf8") as file: - return file.read() - - return "" - - -def walk_files(path, allowed_extensions=None): - if not os.path.exists(path): - return - - if allowed_extensions is not None: - allowed_extensions = set(allowed_extensions) - - items = list(os.walk(path, followlinks=True)) - items = sorted(items, key=lambda x: natural_sort_key(x[0])) - - for root, _, files in items: - for filename in sorted(files, key=natural_sort_key): - if allowed_extensions is not None: - _, ext = os.path.splitext(filename) - if ext not in allowed_extensions: - continue - - if not opts.list_hidden_files and ("/." in root or "\\." in root): - continue +mem_mon = None - yield os.path.join(root, filename) +options_section = options.options_section +OptionInfo = options.OptionInfo +OptionHTML = options.OptionHTML +natural_sort_key = util.natural_sort_key +listfiles = util.listfiles +html_path = util.html_path +html = util.html +walk_files = util.walk_files +ldm_print = util.ldm_print -def ldm_print(*args, **kwargs): - if opts.hide_ldm_prints: - return +reload_gradio_theme = shared_gradio_themes.reload_gradio_theme - print(*args, **kwargs) +list_checkpoint_tiles = shared_items.list_checkpoint_tiles +refresh_checkpoints = shared_items.refresh_checkpoints +list_samplers = shared_items.list_samplers +reload_hypernetworks = shared_items.reload_hypernetworks -- cgit v1.2.1 From d456fb797ad9e1f6daddbdaf284ae34cfb2a0656 Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Thu, 10 Aug 2023 16:04:49 -0400 Subject: fix: Properly return None when VAE hash is None --- modules/sd_vae.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/modules/sd_vae.py b/modules/sd_vae.py index 1db01992..fd9a1c2a 100644 --- a/modules/sd_vae.py +++ b/modules/sd_vae.py @@ -31,7 +31,9 @@ def get_loaded_vae_hash(): if loaded_vae_file is None: return None - return hashes.sha256(loaded_vae_file, 'vae')[0:10] + sha256 = hashes.sha256(loaded_vae_file, 'vae') + + return sha256[0:10] if sha256 else None def get_base_vae(model): -- cgit v1.2.1 From 4fafc34e498130dcbb2d1a44fbc55fdba31e32d4 Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Thu, 10 Aug 2023 23:42:58 -0400 Subject: Fix to make LoRA old method setting work --- extensions-builtin/Lora/networks.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/extensions-builtin/Lora/networks.py b/extensions-builtin/Lora/networks.py index bc722e90..7e3415ac 100644 --- a/extensions-builtin/Lora/networks.py +++ b/extensions-builtin/Lora/networks.py @@ -357,7 +357,7 @@ def network_forward(module, input, original_forward): if module is None: continue - y = module.forward(y, input) + y = module.forward(input, y) return y -- cgit v1.2.1 From 77c52ea701bef8d436dd1f05253412807ddff42c Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Thu, 10 Aug 2023 18:43:27 +0300 Subject: fix accordion style on img2img --- style.css | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/style.css b/style.css index 5163e53c..4cdce87c 100644 --- a/style.css +++ b/style.css @@ -192,7 +192,7 @@ button.custom-button{ text-align: center; } -div.gradio-accordion { +div.block.gradio-accordion { border: 1px solid var(--block-border-color) !important; border-radius: 8px !important; margin: 2px 0; -- cgit v1.2.1 From 3b2f51602dbd1a8a94706ae71943403f07539b1c Mon Sep 17 00:00:00 2001 From: AnyISalIn Date: Fri, 11 Aug 2023 20:21:38 +0800 Subject: xyz_grid: support refiner_checkpoint and refiner_switch_at Signed-off-by: AnyISalIn --- scripts/xyz_grid.py | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/scripts/xyz_grid.py b/scripts/xyz_grid.py index d37b428f..a45e6d61 100644 --- a/scripts/xyz_grid.py +++ b/scripts/xyz_grid.py @@ -85,6 +85,23 @@ def confirm_checkpoints(p, xs): if modules.sd_models.get_closet_checkpoint_match(x) is None: raise RuntimeError(f"Unknown checkpoint: {x}") +def apply_refiner_checkpoint(p, x, xs): + if x == 'None': + p.override_settings['sd_refiner_checkpoint'] = 'None' + return + + info = modules.sd_models.get_closet_checkpoint_match(x) + if info is None: + raise RuntimeError(f"Unknown checkpoint: {x}") + p.override_settings['sd_refiner_checkpoint'] = info.name + +def confirm_refiner_checkpoints(p, xs): + for x in xs: + if x == 'None': + continue + if modules.sd_models.get_closet_checkpoint_match(x) is None: + raise RuntimeError(f"Unknown checkpoint: {x}") + def apply_clip_skip(p, x, xs): opts.data["CLIP_stop_at_last_layers"] = x @@ -241,6 +258,8 @@ axis_options = [ AxisOption("Token merging ratio", float, apply_override('token_merging_ratio')), AxisOption("Token merging ratio high-res", float, apply_override('token_merging_ratio_hr')), AxisOption("Always discard next-to-last sigma", str, apply_override('always_discard_next_to_last_sigma', boolean=True), choices=boolean_choice(reverse=True)), + AxisOption("Refiner checkpoint", str, apply_refiner_checkpoint, format_value=format_remove_path, confirm=confirm_refiner_checkpoints, cost=1.0, choices=lambda: ['None'] + sorted(sd_models.checkpoints_list, key=str.casefold)), + AxisOption("Refiner switch at", float, apply_override('sd_refiner_switch_at')) ] -- cgit v1.2.1 From 7c9c19b2a23d28d22694320e2db8f1fc83971c5e Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Fri, 11 Aug 2023 11:32:12 -0400 Subject: Refactor postprocessing to use generator to resolve OOM issues --- modules/postprocessing.py | 63 +++++++++++++++++++++++------------------------ 1 file changed, 31 insertions(+), 32 deletions(-) diff --git a/modules/postprocessing.py b/modules/postprocessing.py index 136e9c88..cf04d38b 100644 --- a/modules/postprocessing.py +++ b/modules/postprocessing.py @@ -11,37 +11,32 @@ def run_postprocessing(extras_mode, image, image_folder, input_dir, output_dir, shared.state.begin(job="extras") - image_data = [] - image_names = [] outputs = [] - if extras_mode == 1: - for img in image_folder: - if isinstance(img, Image.Image): - image = img - fn = '' - else: - image = Image.open(os.path.abspath(img.name)) - fn = os.path.splitext(img.orig_name)[0] - image_data.append(image) - image_names.append(fn) - elif extras_mode == 2: - assert not shared.cmd_opts.hide_ui_dir_config, '--hide-ui-dir-config option must be disabled' - assert input_dir, 'input directory not selected' - - image_list = shared.listfiles(input_dir) - for filename in image_list: - try: - image = Image.open(filename) - except Exception: - continue - image_data.append(image) - image_names.append(filename) - else: - assert image, 'image not selected' - - image_data.append(image) - image_names.append(None) + def get_images(extras_mode, image, image_folder, input_dir): + if extras_mode == 1: + for img in image_folder: + if isinstance(img, Image.Image): + image = img + fn = '' + else: + image = Image.open(os.path.abspath(img.name)) + fn = os.path.splitext(img.orig_name)[0] + yield image, fn + elif extras_mode == 2: + assert not shared.cmd_opts.hide_ui_dir_config, '--hide-ui-dir-config option must be disabled' + assert input_dir, 'input directory not selected' + + image_list = shared.listfiles(input_dir) + for filename in image_list: + try: + image = Image.open(filename) + except Exception: + continue + yield image, filename + else: + assert image, 'image not selected' + yield image, None if extras_mode == 2 and output_dir != '': outpath = output_dir @@ -50,14 +45,16 @@ def run_postprocessing(extras_mode, image, image_folder, input_dir, output_dir, infotext = '' - for image, name in zip(image_data, image_names): + for image_data, name in get_images(extras_mode, image, image_folder, input_dir): + image_data: Image.Image + shared.state.textinfo = name - parameters, existing_pnginfo = images.read_info_from_image(image) + parameters, existing_pnginfo = images.read_info_from_image(image_data) if parameters: existing_pnginfo["parameters"] = parameters - pp = scripts_postprocessing.PostprocessedImage(image.convert("RGB")) + pp = scripts_postprocessing.PostprocessedImage(image_data.convert("RGB")) scripts.scripts_postproc.run(pp, args) @@ -78,6 +75,8 @@ def run_postprocessing(extras_mode, image, image_folder, input_dir, output_dir, if extras_mode != 2 or show_extras_results: outputs.append(pp.image) + image_data.close() + devices.torch_gc() return outputs, ui_common.plaintext_to_html(infotext), '' -- cgit v1.2.1 From af27b716e53671c52308d4e101214b0fd4fd5e80 Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Fri, 11 Aug 2023 12:22:11 -0400 Subject: Fix color correction by converting image to RGB --- modules/processing.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/processing.py b/modules/processing.py index c048ca25..131c4c3c 100755 --- a/modules/processing.py +++ b/modules/processing.py @@ -57,7 +57,7 @@ def apply_color_correction(correction, original_image): image = blendLayers(image, original_image, BlendType.LUMINOSITY) - return image + return image.convert('RGB') def apply_overlay(image, paste_loc, index, overlays): -- cgit v1.2.1 From f57bc1a21ba3979061752bd7b66e881bd25cc64f Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Sat, 12 Aug 2023 12:06:31 +0900 Subject: disable extensions installer with arg --- modules/launch_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/launch_utils.py b/modules/launch_utils.py index 2782872e..65eb684f 100644 --- a/modules/launch_utils.py +++ b/modules/launch_utils.py @@ -243,7 +243,7 @@ def list_extensions(settings_file): disabled_extensions = set(settings.get('disabled_extensions', [])) disable_all_extensions = settings.get('disable_all_extensions', 'none') - if disable_all_extensions != 'none': + if disable_all_extensions != 'none' or args.disable_extra_extensions or args.disable_all_extensions: return [] return [x for x in os.listdir(extensions_dir) if x not in disabled_extensions] -- cgit v1.2.1 From 64311faa6848d641cc452115e4e1eb47d2a7b519 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sat, 12 Aug 2023 12:39:59 +0300 Subject: put refiner into main UI, into the new accordions section add VAE from main model into infotext, not from refiner model option to make scripts UI without gr.Group fix inconsistencies with refiner when usings samplers that do more denoising than steps --- modules/processing.py | 22 ++++++++----- modules/processing_scripts/refiner.py | 55 +++++++++++++++++++++++++++++++++ modules/scripts.py | 24 ++++++++++----- modules/sd_models.py | 3 ++ modules/sd_samplers_cfg_denoiser.py | 6 +++- modules/sd_samplers_common.py | 40 ++++++++++++++---------- modules/sd_samplers_kdiffusion.py | 3 +- modules/shared_items.py | 4 +-- modules/shared_options.py | 2 -- modules/ui.py | 58 ++++++++++++++++++++--------------- modules/ui_components.py | 18 ++++++++--- style.css | 32 +++++++++++-------- 12 files changed, 188 insertions(+), 79 deletions(-) create mode 100644 modules/processing_scripts/refiner.py diff --git a/modules/processing.py b/modules/processing.py index 131c4c3c..5996cbac 100755 --- a/modules/processing.py +++ b/modules/processing.py @@ -373,9 +373,10 @@ class StableDiffusionProcessing: negative_prompts = prompt_parser.SdConditioning(self.negative_prompts, width=self.width, height=self.height, is_negative_prompt=True) sampler_config = sd_samplers.find_sampler_config(self.sampler_name) - self.step_multiplier = 2 if sampler_config and sampler_config.options.get("second_order", False) else 1 - self.uc = self.get_conds_with_caching(prompt_parser.get_learned_conditioning, negative_prompts, self.steps * self.step_multiplier, [self.cached_uc], self.extra_network_data) - self.c = self.get_conds_with_caching(prompt_parser.get_multicond_learned_conditioning, prompts, self.steps * self.step_multiplier, [self.cached_c], self.extra_network_data) + total_steps = sampler_config.total_steps(self.steps) if sampler_config else self.steps + self.step_multiplier = total_steps // self.steps + self.uc = self.get_conds_with_caching(prompt_parser.get_learned_conditioning, negative_prompts, total_steps, [self.cached_uc], self.extra_network_data) + self.c = self.get_conds_with_caching(prompt_parser.get_multicond_learned_conditioning, prompts, total_steps, [self.cached_c], self.extra_network_data) def get_conds(self): return self.c, self.uc @@ -579,8 +580,8 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter "Size": f"{p.width}x{p.height}", "Model hash": getattr(p, 'sd_model_hash', None if not opts.add_model_hash_to_info or not shared.sd_model.sd_model_hash else shared.sd_model.sd_model_hash), "Model": (None if not opts.add_model_name_to_info else shared.sd_model.sd_checkpoint_info.name_for_extra), - "VAE hash": sd_vae.get_loaded_vae_hash() if opts.add_model_hash_to_info else None, - "VAE": sd_vae.get_loaded_vae_name() if opts.add_model_name_to_info else None, + "VAE hash": p.loaded_vae_hash if opts.add_model_hash_to_info else None, + "VAE": p.loaded_vae_name if opts.add_model_name_to_info else None, "Variation seed": (None if p.subseed_strength == 0 else (p.all_subseeds[0] if use_main_prompt else all_subseeds[index])), "Variation seed strength": (None if p.subseed_strength == 0 else p.subseed_strength), "Seed resize from": (None if p.seed_resize_from_w <= 0 or p.seed_resize_from_h <= 0 else f"{p.seed_resize_from_w}x{p.seed_resize_from_h}"), @@ -669,6 +670,9 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: if p.tiling is None: p.tiling = opts.tiling + p.loaded_vae_name = sd_vae.get_loaded_vae_name() + p.loaded_vae_hash = sd_vae.get_loaded_vae_hash() + modules.sd_hijack.model_hijack.apply_circular(p.tiling) modules.sd_hijack.model_hijack.clear_comments() @@ -1188,8 +1192,12 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): hr_prompts = prompt_parser.SdConditioning(self.hr_prompts, width=self.hr_upscale_to_x, height=self.hr_upscale_to_y) hr_negative_prompts = prompt_parser.SdConditioning(self.hr_negative_prompts, width=self.hr_upscale_to_x, height=self.hr_upscale_to_y, is_negative_prompt=True) - self.hr_uc = self.get_conds_with_caching(prompt_parser.get_learned_conditioning, hr_negative_prompts, self.steps * self.step_multiplier, [self.cached_hr_uc, self.cached_uc], self.hr_extra_network_data) - self.hr_c = self.get_conds_with_caching(prompt_parser.get_multicond_learned_conditioning, hr_prompts, self.steps * self.step_multiplier, [self.cached_hr_c, self.cached_c], self.hr_extra_network_data) + sampler_config = sd_samplers.find_sampler_config(self.hr_sampler_name or self.sampler_name) + steps = self.hr_second_pass_steps or self.steps + total_steps = sampler_config.total_steps(steps) if sampler_config else steps + + self.hr_uc = self.get_conds_with_caching(prompt_parser.get_learned_conditioning, hr_negative_prompts, total_steps, [self.cached_hr_uc, self.cached_uc], self.hr_extra_network_data) + self.hr_c = self.get_conds_with_caching(prompt_parser.get_multicond_learned_conditioning, hr_prompts, total_steps, [self.cached_hr_c, self.cached_c], self.hr_extra_network_data) def setup_conds(self): super().setup_conds() diff --git a/modules/processing_scripts/refiner.py b/modules/processing_scripts/refiner.py new file mode 100644 index 00000000..5a82991a --- /dev/null +++ b/modules/processing_scripts/refiner.py @@ -0,0 +1,55 @@ +import gradio as gr + +from modules import scripts, sd_models +from modules.ui_common import create_refresh_button +from modules.ui_components import InputAccordion + + +class ScriptRefiner(scripts.Script): + section = "accordions" + create_group = False + + def __init__(self): + pass + + def title(self): + return "Refiner" + + def show(self, is_img2img): + return scripts.AlwaysVisible + + def ui(self, is_img2img): + with InputAccordion(False, label="Refiner", elem_id=self.elem_id("enable")) as enable_refiner: + with gr.Row(): + refiner_checkpoint = gr.Dropdown(label='Checkpoint', elem_id=self.elem_id("checkpoint"), choices=sd_models.checkpoint_tiles(), value='', tooltip="switch to another model in the middle of generation") + create_refresh_button(refiner_checkpoint, sd_models.list_models, lambda: {"choices": sd_models.checkpoint_tiles()}, self.elem_id("checkpoint_refresh")) + + refiner_switch_at = gr.Slider(value=0.8, label="Switch at", minimum=0.01, maximum=1.0, step=0.01, elem_id=self.elem_id("switch_at"), tooltip="fraction of sampling steps when the swtch to refiner model should happen; 1=never, 0.5=switch in the middle of generation") + + def lookup_checkpoint(title): + info = sd_models.get_closet_checkpoint_match(title) + return None if info is None else info.title + + self.infotext_fields = [ + (enable_refiner, lambda d: 'Refiner' in d), + (refiner_checkpoint, lambda d: lookup_checkpoint(d.get('Refiner'))), + (refiner_switch_at, 'Refiner switch at'), + ] + + return enable_refiner, refiner_checkpoint, refiner_switch_at + + def before_process(self, p, enable_refiner, refiner_checkpoint, refiner_switch_at): + # the actual implementation is in sd_samplers_common.py, apply_refiner + + p.refiner_checkpoint_info = None + p.refiner_switch_at = None + + if not enable_refiner or refiner_checkpoint in (None, "", "None"): + return + + refiner_checkpoint_info = sd_models.get_closet_checkpoint_match(refiner_checkpoint) + if refiner_checkpoint_info is None: + raise Exception(f'Could not find checkpoint with name {refiner_checkpoint}') + + p.refiner_checkpoint_info = refiner_checkpoint_info + p.refiner_switch_at = refiner_switch_at diff --git a/modules/scripts.py b/modules/scripts.py index f7d060aa..51da732a 100644 --- a/modules/scripts.py +++ b/modules/scripts.py @@ -37,7 +37,10 @@ class Script: is_img2img = False group = None - """A gr.Group component that has all script's UI inside it""" + """A gr.Group component that has all script's UI inside it.""" + + create_group = True + """If False, for alwayson scripts, a group component will not be created.""" infotext_fields = None """if set in ui(), this is a list of pairs of gradio component + text; the text will be used when @@ -232,6 +235,7 @@ class Script: """ pass + current_basedir = paths.script_path @@ -250,7 +254,7 @@ postprocessing_scripts_data = [] ScriptClassData = namedtuple("ScriptClassData", ["script_class", "path", "basedir", "module"]) -def list_scripts(scriptdirname, extension): +def list_scripts(scriptdirname, extension, *, include_extensions=True): scripts_list = [] basedir = os.path.join(paths.script_path, scriptdirname) @@ -258,8 +262,9 @@ def list_scripts(scriptdirname, extension): for filename in sorted(os.listdir(basedir)): scripts_list.append(ScriptFile(paths.script_path, filename, os.path.join(basedir, filename))) - for ext in extensions.active(): - scripts_list += ext.list_files(scriptdirname, extension) + if include_extensions: + for ext in extensions.active(): + scripts_list += ext.list_files(scriptdirname, extension) scripts_list = [x for x in scripts_list if os.path.splitext(x.path)[1].lower() == extension and os.path.isfile(x.path)] @@ -288,7 +293,7 @@ def load_scripts(): postprocessing_scripts_data.clear() script_callbacks.clear_callbacks() - scripts_list = list_scripts("scripts", ".py") + scripts_list = list_scripts("scripts", ".py") + list_scripts("modules/processing_scripts", ".py", include_extensions=False) syspath = sys.path @@ -429,10 +434,13 @@ class ScriptRunner: if script.alwayson and script.section != section: continue - with gr.Group(visible=script.alwayson) as group: - self.create_script_ui(script) + if script.create_group: + with gr.Group(visible=script.alwayson) as group: + self.create_script_ui(script) - script.group = group + script.group = group + else: + self.create_script_ui(script) def prepare_ui(self): self.inputs = [None] diff --git a/modules/sd_models.py b/modules/sd_models.py index a178adca..f6fbdcd6 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -147,6 +147,9 @@ re_strip_checksum = re.compile(r"\s*\[[^]]+]\s*$") def get_closet_checkpoint_match(search_string): + if not search_string: + return None + checkpoint_info = checkpoint_aliases.get(search_string, None) if checkpoint_info is not None: return checkpoint_info diff --git a/modules/sd_samplers_cfg_denoiser.py b/modules/sd_samplers_cfg_denoiser.py index a532e013..113425b2 100644 --- a/modules/sd_samplers_cfg_denoiser.py +++ b/modules/sd_samplers_cfg_denoiser.py @@ -45,6 +45,11 @@ class CFGDenoiser(torch.nn.Module): self.nmask = None self.init_latent = None self.steps = None + """number of steps as specified by user in UI""" + + self.total_steps = None + """expected number of calls to denoiser calculated from self.steps and specifics of the selected sampler""" + self.step = 0 self.image_cfg_scale = None self.padded_cond_uncond = False @@ -56,7 +61,6 @@ class CFGDenoiser(torch.nn.Module): def inner_model(self): raise NotImplementedError() - def combine_denoised(self, x_out, conds_list, uncond, cond_scale): denoised_uncond = x_out[-uncond.shape[0]:] denoised = torch.clone(denoised_uncond) diff --git a/modules/sd_samplers_common.py b/modules/sd_samplers_common.py index 35c4d657..85f3c7e0 100644 --- a/modules/sd_samplers_common.py +++ b/modules/sd_samplers_common.py @@ -7,7 +7,16 @@ from modules import devices, images, sd_vae_approx, sd_samplers, sd_vae_taesd, s from modules.shared import opts, state import k_diffusion.sampling -SamplerData = namedtuple('SamplerData', ['name', 'constructor', 'aliases', 'options']) + +SamplerDataTuple = namedtuple('SamplerData', ['name', 'constructor', 'aliases', 'options']) + + +class SamplerData(SamplerDataTuple): + def total_steps(self, steps): + if self.options.get("second_order", False): + steps = steps * 2 + + return steps def setup_img2img_steps(p, steps=None): @@ -131,31 +140,26 @@ def replace_torchsde_browinan(): replace_torchsde_browinan() -def apply_refiner(sampler): - completed_ratio = sampler.step / sampler.steps +def apply_refiner(cfg_denoiser): + completed_ratio = cfg_denoiser.step / cfg_denoiser.total_steps + refiner_switch_at = cfg_denoiser.p.refiner_switch_at + refiner_checkpoint_info = cfg_denoiser.p.refiner_checkpoint_info - if completed_ratio <= shared.opts.sd_refiner_switch_at: + if refiner_switch_at is not None and completed_ratio <= refiner_switch_at: return False - if shared.opts.sd_refiner_checkpoint == "None": + if refiner_checkpoint_info is None or shared.sd_model.sd_checkpoint_info == refiner_checkpoint_info: return False - if shared.sd_model.sd_checkpoint_info.title == shared.opts.sd_refiner_checkpoint: - return False - - refiner_checkpoint_info = sd_models.get_closet_checkpoint_match(shared.opts.sd_refiner_checkpoint) - if refiner_checkpoint_info is None: - raise Exception(f'Could not find checkpoint with name {shared.opts.sd_refiner_checkpoint}') - - sampler.p.extra_generation_params['Refiner'] = refiner_checkpoint_info.short_title - sampler.p.extra_generation_params['Refiner switch at'] = shared.opts.sd_refiner_switch_at + cfg_denoiser.p.extra_generation_params['Refiner'] = refiner_checkpoint_info.short_title + cfg_denoiser.p.extra_generation_params['Refiner switch at'] = refiner_switch_at with sd_models.SkipWritingToConfig(): sd_models.reload_model_weights(info=refiner_checkpoint_info) devices.torch_gc() - sampler.p.setup_conds() - sampler.update_inner_model() + cfg_denoiser.p.setup_conds() + cfg_denoiser.update_inner_model() return True @@ -192,7 +196,7 @@ class Sampler: self.sampler_noises = None self.stop_at = None self.eta = None - self.config = None # set by the function calling the constructor + self.config: SamplerData = None # set by the function calling the constructor self.last_latent = None self.s_min_uncond = None self.s_churn = 0.0 @@ -208,6 +212,7 @@ class Sampler: self.p = None self.model_wrap_cfg = None self.sampler_extra_args = None + self.options = {} def callback_state(self, d): step = d['i'] @@ -220,6 +225,7 @@ class Sampler: def launch_sampling(self, steps, func): self.model_wrap_cfg.steps = steps + self.model_wrap_cfg.total_steps = self.config.total_steps(steps) state.sampling_steps = steps state.sampling_step = 0 diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py index d10fe12e..1f8e9c4b 100644 --- a/modules/sd_samplers_kdiffusion.py +++ b/modules/sd_samplers_kdiffusion.py @@ -64,9 +64,10 @@ class CFGDenoiserKDiffusion(sd_samplers_cfg_denoiser.CFGDenoiser): class KDiffusionSampler(sd_samplers_common.Sampler): - def __init__(self, funcname, sd_model): + def __init__(self, funcname, sd_model, options=None): super().__init__(funcname) + self.options = options or {} self.func = funcname if callable(funcname) else getattr(k_diffusion.sampling, self.funcname) self.model_wrap_cfg = CFGDenoiserKDiffusion(self) diff --git a/modules/shared_items.py b/modules/shared_items.py index e4ec40a8..754166d2 100644 --- a/modules/shared_items.py +++ b/modules/shared_items.py @@ -69,8 +69,8 @@ def reload_hypernetworks(): ui_reorder_categories_builtin_items = [ "inpaint", "sampler", + "accordions", "checkboxes", - "hires_fix", "dimensions", "cfg", "seed", @@ -86,7 +86,7 @@ def ui_reorder_categories(): sections = {} for script in scripts.scripts_txt2img.scripts + scripts.scripts_img2img.scripts: - if isinstance(script.section, str): + if isinstance(script.section, str) and script.section not in ui_reorder_categories_builtin_items: sections[script.section] = 1 yield from sections diff --git a/modules/shared_options.py b/modules/shared_options.py index 1e5b64ea..9ae51f18 100644 --- a/modules/shared_options.py +++ b/modules/shared_options.py @@ -140,8 +140,6 @@ options_templates.update(options_section(('sd', "Stable Diffusion"), { "upcast_attn": OptionInfo(False, "Upcast cross attention layer to float32"), "randn_source": OptionInfo("GPU", "Random number generator source.", gr.Radio, {"choices": ["GPU", "CPU", "NV"]}).info("changes seeds drastically; use CPU to produce the same picture across different videocard vendors; use NV to produce same picture as on NVidia videocards"), "tiling": OptionInfo(False, "Tiling", infotext='Tiling').info("produce a tileable picture"), - "sd_refiner_checkpoint": OptionInfo("None", "Refiner checkpoint", gr.Dropdown, lambda: {"choices": ["None"] + shared_items.list_checkpoint_tiles()}, refresh=shared_items.refresh_checkpoints, infotext="Refiner").info("switch to another model in the middle of generation"), - "sd_refiner_switch_at": OptionInfo(1.0, "Refiner switch at", gr.Slider, {"minimum": 0.01, "maximum": 1.0, "step": 0.01}, infotext='Refiner switch at').info("fraction of sampling steps when the swtch to refiner model should happen; 1=never, 0.5=switch in the middle of generation"), })) options_templates.update(options_section(('sdxl', "Stable Diffusion XL"), { diff --git a/modules/ui.py b/modules/ui.py index 05292734..3321b94d 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -438,35 +438,38 @@ def create_ui(): with FormRow(elem_classes="checkboxes-row", variant="compact"): pass - elif category == "hires_fix": - with InputAccordion(False, label="Hires. fix") as enable_hr: - with enable_hr.extra(): - hr_final_resolution = FormHTML(value="", elem_id="txtimg_hr_finalres", label="Upscaled resolution", interactive=False, min_width=0) + elif category == "accordions": + with gr.Row(elem_id="txt2img_accordions", elem_classes="accordions"): + with InputAccordion(False, label="Hires. fix", elem_id="txt2img_hr") as enable_hr: + with enable_hr.extra(): + hr_final_resolution = FormHTML(value="", elem_id="txtimg_hr_finalres", label="Upscaled resolution", interactive=False, min_width=0) - with FormRow(elem_id="txt2img_hires_fix_row1", variant="compact"): - hr_upscaler = gr.Dropdown(label="Upscaler", elem_id="txt2img_hr_upscaler", choices=[*shared.latent_upscale_modes, *[x.name for x in shared.sd_upscalers]], value=shared.latent_upscale_default_mode) - hr_second_pass_steps = gr.Slider(minimum=0, maximum=150, step=1, label='Hires steps', value=0, elem_id="txt2img_hires_steps") - denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.7, elem_id="txt2img_denoising_strength") + with FormRow(elem_id="txt2img_hires_fix_row1", variant="compact"): + hr_upscaler = gr.Dropdown(label="Upscaler", elem_id="txt2img_hr_upscaler", choices=[*shared.latent_upscale_modes, *[x.name for x in shared.sd_upscalers]], value=shared.latent_upscale_default_mode) + hr_second_pass_steps = gr.Slider(minimum=0, maximum=150, step=1, label='Hires steps', value=0, elem_id="txt2img_hires_steps") + denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.7, elem_id="txt2img_denoising_strength") - with FormRow(elem_id="txt2img_hires_fix_row2", variant="compact"): - hr_scale = gr.Slider(minimum=1.0, maximum=4.0, step=0.05, label="Upscale by", value=2.0, elem_id="txt2img_hr_scale") - hr_resize_x = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize width to", value=0, elem_id="txt2img_hr_resize_x") - hr_resize_y = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize height to", value=0, elem_id="txt2img_hr_resize_y") + with FormRow(elem_id="txt2img_hires_fix_row2", variant="compact"): + hr_scale = gr.Slider(minimum=1.0, maximum=4.0, step=0.05, label="Upscale by", value=2.0, elem_id="txt2img_hr_scale") + hr_resize_x = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize width to", value=0, elem_id="txt2img_hr_resize_x") + hr_resize_y = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize height to", value=0, elem_id="txt2img_hr_resize_y") - with FormRow(elem_id="txt2img_hires_fix_row3", variant="compact", visible=opts.hires_fix_show_sampler) as hr_sampler_container: + with FormRow(elem_id="txt2img_hires_fix_row3", variant="compact", visible=opts.hires_fix_show_sampler) as hr_sampler_container: - hr_checkpoint_name = gr.Dropdown(label='Hires checkpoint', elem_id="hr_checkpoint", choices=["Use same checkpoint"] + modules.sd_models.checkpoint_tiles(use_short=True), value="Use same checkpoint") - create_refresh_button(hr_checkpoint_name, modules.sd_models.list_models, lambda: {"choices": ["Use same checkpoint"] + modules.sd_models.checkpoint_tiles(use_short=True)}, "hr_checkpoint_refresh") + hr_checkpoint_name = gr.Dropdown(label='Hires checkpoint', elem_id="hr_checkpoint", choices=["Use same checkpoint"] + modules.sd_models.checkpoint_tiles(use_short=True), value="Use same checkpoint") + create_refresh_button(hr_checkpoint_name, modules.sd_models.list_models, lambda: {"choices": ["Use same checkpoint"] + modules.sd_models.checkpoint_tiles(use_short=True)}, "hr_checkpoint_refresh") - hr_sampler_name = gr.Dropdown(label='Hires sampling method', elem_id="hr_sampler", choices=["Use same sampler"] + sd_samplers.visible_sampler_names(), value="Use same sampler") + hr_sampler_name = gr.Dropdown(label='Hires sampling method', elem_id="hr_sampler", choices=["Use same sampler"] + sd_samplers.visible_sampler_names(), value="Use same sampler") - with FormRow(elem_id="txt2img_hires_fix_row4", variant="compact", visible=opts.hires_fix_show_prompts) as hr_prompts_container: - with gr.Column(scale=80): - with gr.Row(): - hr_prompt = gr.Textbox(label="Hires prompt", elem_id="hires_prompt", show_label=False, lines=3, placeholder="Prompt for hires fix pass.\nLeave empty to use the same prompt as in first pass.", elem_classes=["prompt"]) - with gr.Column(scale=80): - with gr.Row(): - hr_negative_prompt = gr.Textbox(label="Hires negative prompt", elem_id="hires_neg_prompt", show_label=False, lines=3, placeholder="Negative prompt for hires fix pass.\nLeave empty to use the same negative prompt as in first pass.", elem_classes=["prompt"]) + with FormRow(elem_id="txt2img_hires_fix_row4", variant="compact", visible=opts.hires_fix_show_prompts) as hr_prompts_container: + with gr.Column(scale=80): + with gr.Row(): + hr_prompt = gr.Textbox(label="Hires prompt", elem_id="hires_prompt", show_label=False, lines=3, placeholder="Prompt for hires fix pass.\nLeave empty to use the same prompt as in first pass.", elem_classes=["prompt"]) + with gr.Column(scale=80): + with gr.Row(): + hr_negative_prompt = gr.Textbox(label="Hires negative prompt", elem_id="hires_neg_prompt", show_label=False, lines=3, placeholder="Negative prompt for hires fix pass.\nLeave empty to use the same negative prompt as in first pass.", elem_classes=["prompt"]) + + scripts.scripts_txt2img.setup_ui_for_section(category) elif category == "batch": if not opts.dimensions_and_batch_together: @@ -482,7 +485,7 @@ def create_ui(): with FormGroup(elem_id="txt2img_script_container"): custom_inputs = scripts.scripts_txt2img.setup_ui() - else: + if category not in {"accordions"}: scripts.scripts_txt2img.setup_ui_for_section(category) hr_resolution_preview_inputs = [enable_hr, width, height, hr_scale, hr_resize_x, hr_resize_y] @@ -794,6 +797,10 @@ def create_ui(): with FormRow(elem_classes="checkboxes-row", variant="compact"): pass + elif category == "accordions": + with gr.Row(elem_id="img2img_accordions", elem_classes="accordions"): + scripts.scripts_img2img.setup_ui_for_section(category) + elif category == "batch": if not opts.dimensions_and_batch_together: with FormRow(elem_id="img2img_column_batch"): @@ -836,7 +843,8 @@ def create_ui(): inputs=[], outputs=[inpaint_controls, mask_alpha], ) - else: + + if category not in {"accordions"}: scripts.scripts_img2img.setup_ui_for_section(category) img2img_gallery, generation_info, html_info, html_log = create_output_panel("img2img", opts.outdir_img2img_samples) diff --git a/modules/ui_components.py b/modules/ui_components.py index bfe2fbd9..d08b2b99 100644 --- a/modules/ui_components.py +++ b/modules/ui_components.py @@ -87,13 +87,23 @@ class InputAccordion(gr.Checkbox): self.accordion_id = f"input-accordion-{InputAccordion.global_index}" InputAccordion.global_index += 1 - kwargs['elem_id'] = self.accordion_id + "-checkbox" - kwargs['visible'] = False - super().__init__(value, **kwargs) + kwargs_checkbox = { + **kwargs, + "elem_id": f"{self.accordion_id}-checkbox", + "visible": False, + } + super().__init__(value, **kwargs_checkbox) self.change(fn=None, _js='function(checked){ inputAccordionChecked("' + self.accordion_id + '", checked); }', inputs=[self]) - self.accordion = gr.Accordion(kwargs.get('label', 'Accordion'), open=value, elem_id=self.accordion_id, elem_classes=['input-accordion']) + kwargs_accordion = { + **kwargs, + "elem_id": self.accordion_id, + "label": kwargs.get('label', 'Accordion'), + "elem_classes": ['input-accordion'], + "open": value, + } + self.accordion = gr.Accordion(**kwargs_accordion) def extra(self): """Allows you to put something into the label of the accordion. diff --git a/style.css b/style.css index 4cdce87c..260b1056 100644 --- a/style.css +++ b/style.css @@ -166,16 +166,6 @@ a{ color: var(--button-secondary-text-color-hover); } -.checkboxes-row{ - margin-bottom: 0.5em; - margin-left: 0em; -} -.checkboxes-row > div{ - flex: 0; - white-space: nowrap; - min-width: auto !important; -} - button.custom-button{ border-radius: var(--button-large-radius); padding: var(--button-large-padding); @@ -352,7 +342,7 @@ div.block.gradio-accordion { } div.dimensions-tools{ - min-width: 0 !important; + min-width: 1.6em !important; max-width: fit-content; flex-direction: column; place-content: center; @@ -1012,10 +1002,28 @@ div.block.gradio-box.popup-dialog > div:last-child, .popup-dialog > div:last-chi } div.block.input-accordion{ - margin-bottom: 0.4em; + } .input-accordion-extra{ flex: 0 0 auto !important; margin: 0 0.5em 0 auto; } + +div.accordions > div.input-accordion{ + min-width: fit-content !important; +} + +div.accordions > div.gradio-accordion .label-wrap span{ + white-space: nowrap; + margin-right: 0.25em; +} + +div.accordions{ + gap: 0.5em; +} + +div.accordions > div.input-accordion.input-accordion-open{ + flex: 1 auto; +} + -- cgit v1.2.1 From b293ed30610c040e621e1840d63047ae298f0650 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sat, 12 Aug 2023 12:54:32 +0300 Subject: make it possible to use hires fix together with refiner --- modules/processing.py | 6 ++++++ modules/processing_scripts/refiner.py | 2 +- modules/sd_samplers_common.py | 3 +++ 3 files changed, 10 insertions(+), 1 deletion(-) diff --git a/modules/processing.py b/modules/processing.py index 5996cbac..6ad105d7 100755 --- a/modules/processing.py +++ b/modules/processing.py @@ -1200,6 +1200,12 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): self.hr_c = self.get_conds_with_caching(prompt_parser.get_multicond_learned_conditioning, hr_prompts, total_steps, [self.cached_hr_c, self.cached_c], self.hr_extra_network_data) def setup_conds(self): + if self.is_hr_pass: + # if we are in hr pass right now, the call is being made from the refiner, and we don't need to setup firstpass cons or switch model + self.hr_c = None + self.calculate_hr_conds() + return + super().setup_conds() self.hr_uc = None diff --git a/modules/processing_scripts/refiner.py b/modules/processing_scripts/refiner.py index 5a82991a..773ec5d0 100644 --- a/modules/processing_scripts/refiner.py +++ b/modules/processing_scripts/refiner.py @@ -24,7 +24,7 @@ class ScriptRefiner(scripts.Script): refiner_checkpoint = gr.Dropdown(label='Checkpoint', elem_id=self.elem_id("checkpoint"), choices=sd_models.checkpoint_tiles(), value='', tooltip="switch to another model in the middle of generation") create_refresh_button(refiner_checkpoint, sd_models.list_models, lambda: {"choices": sd_models.checkpoint_tiles()}, self.elem_id("checkpoint_refresh")) - refiner_switch_at = gr.Slider(value=0.8, label="Switch at", minimum=0.01, maximum=1.0, step=0.01, elem_id=self.elem_id("switch_at"), tooltip="fraction of sampling steps when the swtch to refiner model should happen; 1=never, 0.5=switch in the middle of generation") + refiner_switch_at = gr.Slider(value=0.8, label="Switch at", minimum=0.01, maximum=1.0, step=0.01, elem_id=self.elem_id("switch_at"), tooltip="fraction of sampling steps when the switch to refiner model should happen; 1=never, 0.5=switch in the middle of generation") def lookup_checkpoint(title): info = sd_models.get_closet_checkpoint_match(title) diff --git a/modules/sd_samplers_common.py b/modules/sd_samplers_common.py index 85f3c7e0..40c7aae0 100644 --- a/modules/sd_samplers_common.py +++ b/modules/sd_samplers_common.py @@ -151,6 +151,9 @@ def apply_refiner(cfg_denoiser): if refiner_checkpoint_info is None or shared.sd_model.sd_checkpoint_info == refiner_checkpoint_info: return False + if getattr(cfg_denoiser.p, "enable_hr", False) and not cfg_denoiser.p.is_hr_pass: + return False + cfg_denoiser.p.extra_generation_params['Refiner'] = refiner_checkpoint_info.short_title cfg_denoiser.p.extra_generation_params['Refiner switch at'] = refiner_switch_at -- cgit v1.2.1 From c8d453e91561e57fbb76119f688a5eae9d1a6aef Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Sat, 12 Aug 2023 22:14:19 +0900 Subject: bring back csv mode --- scripts/xyz_grid.py | 44 ++++++++++++++++++++++++++++---------------- 1 file changed, 28 insertions(+), 16 deletions(-) diff --git a/scripts/xyz_grid.py b/scripts/xyz_grid.py index d37b428f..80cb3cfb 100644 --- a/scripts/xyz_grid.py +++ b/scripts/xyz_grid.py @@ -182,6 +182,10 @@ def str_permutations(x): """dummy function for specifying it in AxisOption's type when you want to get a list of permutations""" return x +def list_to_csv_string(data_list): + with StringIO() as o: + csv.writer(o).writerow(data_list) + return o.getvalue().strip() class AxisOption: def __init__(self, label, type, apply, format_value=format_value_add_label, confirm=None, cost=0.0, choices=None): @@ -377,6 +381,8 @@ re_range_float = re.compile(r"\s*([+-]?\s*\d+(?:.\d*)?)\s*-\s*([+-]?\s*\d+(?:.\d re_range_count = re.compile(r"\s*([+-]?\s*\d+)\s*-\s*([+-]?\s*\d+)(?:\s*\[(\d+)\s*\])?\s*") re_range_count_float = re.compile(r"\s*([+-]?\s*\d+(?:.\d*)?)\s*-\s*([+-]?\s*\d+(?:.\d*)?)(?:\s*\[(\d+(?:.\d*)?)\s*\])?\s*") +use_dropdown = True + class Script(scripts.Script): def title(self): @@ -430,28 +436,34 @@ class Script(scripts.Script): xz_swap_args = [x_type, x_values, x_values_dropdown, z_type, z_values, z_values_dropdown] swap_xz_axes_button.click(swap_axes, inputs=xz_swap_args, outputs=xz_swap_args) - def fill(x_type): - axis = self.current_axis_options[x_type] - return axis.choices() if axis.choices else gr.update() + def fill(axis_type): + axis = self.current_axis_options[axis_type] + if axis.choices: + if use_dropdown: + return gr.update(), axis.choices() + else: + return list_to_csv_string(axis.choices()), gr.update() + else: + return gr.update(), gr.update() - fill_x_button.click(fn=fill, inputs=[x_type], outputs=[x_values_dropdown]) - fill_y_button.click(fn=fill, inputs=[y_type], outputs=[y_values_dropdown]) - fill_z_button.click(fn=fill, inputs=[z_type], outputs=[z_values_dropdown]) + fill_x_button.click(fn=fill, inputs=[x_type], outputs=[x_values, x_values_dropdown]) + fill_y_button.click(fn=fill, inputs=[y_type], outputs=[y_values, y_values_dropdown]) + fill_z_button.click(fn=fill, inputs=[z_type], outputs=[z_values, z_values_dropdown]) - def select_axis(axis_type,axis_values_dropdown): + def select_axis(axis_type, axis_values_dropdown): choices = self.current_axis_options[axis_type].choices has_choices = choices is not None current_values = axis_values_dropdown if has_choices: choices = choices() - if isinstance(current_values,str): + if isinstance(current_values, str): current_values = current_values.split(",") current_values = list(filter(lambda x: x in choices, current_values)) - return gr.Button.update(visible=has_choices),gr.Textbox.update(visible=not has_choices),gr.update(choices=choices if has_choices else None,visible=has_choices,value=current_values) + return gr.Button.update(visible=has_choices), gr.Textbox.update(visible=not has_choices or not use_dropdown), gr.update(choices=choices if has_choices else None, visible=has_choices and use_dropdown, value=current_values) - x_type.change(fn=select_axis, inputs=[x_type,x_values_dropdown], outputs=[fill_x_button,x_values,x_values_dropdown]) - y_type.change(fn=select_axis, inputs=[y_type,y_values_dropdown], outputs=[fill_y_button,y_values,y_values_dropdown]) - z_type.change(fn=select_axis, inputs=[z_type,z_values_dropdown], outputs=[fill_z_button,z_values,z_values_dropdown]) + x_type.change(fn=select_axis, inputs=[x_type, x_values_dropdown], outputs=[fill_x_button, x_values, x_values_dropdown]) + y_type.change(fn=select_axis, inputs=[y_type, y_values_dropdown], outputs=[fill_y_button, y_values, y_values_dropdown]) + z_type.change(fn=select_axis, inputs=[z_type, z_values_dropdown], outputs=[fill_z_button, z_values, z_values_dropdown]) def get_dropdown_update_from_params(axis,params): val_key = f"{axis} Values" @@ -484,7 +496,7 @@ class Script(scripts.Script): if opt.label == 'Nothing': return [0] - if opt.choices is not None: + if opt.choices is not None and use_dropdown: valslist = vals_dropdown else: valslist = [x.strip() for x in chain.from_iterable(csv.reader(StringIO(vals))) if x] @@ -545,17 +557,17 @@ class Script(scripts.Script): return valslist x_opt = self.current_axis_options[x_type] - if x_opt.choices is not None: + if x_opt.choices is not None and use_dropdown: x_values = ",".join(x_values_dropdown) xs = process_axis(x_opt, x_values, x_values_dropdown) y_opt = self.current_axis_options[y_type] - if y_opt.choices is not None: + if y_opt.choices is not None and use_dropdown: y_values = ",".join(y_values_dropdown) ys = process_axis(y_opt, y_values, y_values_dropdown) z_opt = self.current_axis_options[z_type] - if z_opt.choices is not None: + if z_opt.choices is not None and use_dropdown: z_values = ",".join(z_values_dropdown) zs = process_axis(z_opt, z_values, z_values_dropdown) -- cgit v1.2.1 From d20eb11c9e210eb92b2c4821141f9647bfaaa9d2 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Sat, 12 Aug 2023 22:24:00 +0900 Subject: format --- scripts/xyz_grid.py | 44 ++++++++++++++++++++++++-------------------- 1 file changed, 24 insertions(+), 20 deletions(-) diff --git a/scripts/xyz_grid.py b/scripts/xyz_grid.py index 80cb3cfb..d8101dca 100644 --- a/scripts/xyz_grid.py +++ b/scripts/xyz_grid.py @@ -175,18 +175,22 @@ def do_nothing(p, x, xs): def format_nothing(p, opt, x): return "" + def format_remove_path(p, opt, x): return os.path.basename(x) + def str_permutations(x): """dummy function for specifying it in AxisOption's type when you want to get a list of permutations""" return x + def list_to_csv_string(data_list): with StringIO() as o: csv.writer(o).writerow(data_list) return o.getvalue().strip() + class AxisOption: def __init__(self, label, type, apply, format_value=format_value_add_label, confirm=None, cost=0.0, choices=None): self.label = label @@ -203,6 +207,7 @@ class AxisOptionImg2Img(AxisOption): super().__init__(*args, **kwargs) self.is_img2img = True + class AxisOptionTxt2Img(AxisOption): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) @@ -290,11 +295,10 @@ def draw_xyz_grid(p, xs, ys, zs, x_labels, y_labels, z_labels, cell, draw_legend cell_size = (processed_result.width, processed_result.height) if processed_result.images[0] is not None: cell_mode = processed_result.images[0].mode - #This corrects size in case of batches: + # This corrects size in case of batches: cell_size = processed_result.images[0].size processed_result.images[idx] = Image.new(cell_mode, cell_size) - if first_axes_processed == 'x': for ix, x in enumerate(xs): if second_axes_processed == 'y': @@ -352,9 +356,9 @@ def draw_xyz_grid(p, xs, ys, zs, x_labels, y_labels, z_labels, cell, draw_legend if draw_legend: z_grid = images.draw_grid_annotations(z_grid, sub_grid_size[0], sub_grid_size[1], title_texts, [[images.GridAnnotation()]]) processed_result.images.insert(0, z_grid) - #TODO: Deeper aspects of the program rely on grid info being misaligned between metadata arrays, which is not ideal. - #processed_result.all_prompts.insert(0, processed_result.all_prompts[0]) - #processed_result.all_seeds.insert(0, processed_result.all_seeds[0]) + # TODO: Deeper aspects of the program rely on grid info being misaligned between metadata arrays, which is not ideal. + # processed_result.all_prompts.insert(0, processed_result.all_prompts[0]) + # processed_result.all_seeds.insert(0, processed_result.all_seeds[0]) processed_result.infotexts.insert(0, processed_result.infotexts[0]) return processed_result @@ -396,19 +400,19 @@ class Script(scripts.Script): with gr.Row(): x_type = gr.Dropdown(label="X type", choices=[x.label for x in self.current_axis_options], value=self.current_axis_options[1].label, type="index", elem_id=self.elem_id("x_type")) x_values = gr.Textbox(label="X values", lines=1, elem_id=self.elem_id("x_values")) - x_values_dropdown = gr.Dropdown(label="X values",visible=False,multiselect=True,interactive=True) + x_values_dropdown = gr.Dropdown(label="X values", visible=False, multiselect=True, interactive=True) fill_x_button = ToolButton(value=fill_values_symbol, elem_id="xyz_grid_fill_x_tool_button", visible=False) with gr.Row(): y_type = gr.Dropdown(label="Y type", choices=[x.label for x in self.current_axis_options], value=self.current_axis_options[0].label, type="index", elem_id=self.elem_id("y_type")) y_values = gr.Textbox(label="Y values", lines=1, elem_id=self.elem_id("y_values")) - y_values_dropdown = gr.Dropdown(label="Y values",visible=False,multiselect=True,interactive=True) + y_values_dropdown = gr.Dropdown(label="Y values", visible=False, multiselect=True, interactive=True) fill_y_button = ToolButton(value=fill_values_symbol, elem_id="xyz_grid_fill_y_tool_button", visible=False) with gr.Row(): z_type = gr.Dropdown(label="Z type", choices=[x.label for x in self.current_axis_options], value=self.current_axis_options[0].label, type="index", elem_id=self.elem_id("z_type")) z_values = gr.Textbox(label="Z values", lines=1, elem_id=self.elem_id("z_values")) - z_values_dropdown = gr.Dropdown(label="Z values",visible=False,multiselect=True,interactive=True) + z_values_dropdown = gr.Dropdown(label="Z values", visible=False, multiselect=True, interactive=True) fill_z_button = ToolButton(value=fill_values_symbol, elem_id="xyz_grid_fill_z_tool_button", visible=False) with gr.Row(variant="compact", elem_id="axis_options"): @@ -465,22 +469,22 @@ class Script(scripts.Script): y_type.change(fn=select_axis, inputs=[y_type, y_values_dropdown], outputs=[fill_y_button, y_values, y_values_dropdown]) z_type.change(fn=select_axis, inputs=[z_type, z_values_dropdown], outputs=[fill_z_button, z_values, z_values_dropdown]) - def get_dropdown_update_from_params(axis,params): + def get_dropdown_update_from_params(axis, params): val_key = f"{axis} Values" - vals = params.get(val_key,"") + vals = params.get(val_key, "") valslist = [x.strip() for x in chain.from_iterable(csv.reader(StringIO(vals))) if x] - return gr.update(value = valslist) + return gr.update(value=valslist) self.infotext_fields = ( (x_type, "X Type"), (x_values, "X Values"), - (x_values_dropdown, lambda params:get_dropdown_update_from_params("X",params)), + (x_values_dropdown, lambda params: get_dropdown_update_from_params("X", params)), (y_type, "Y Type"), (y_values, "Y Values"), - (y_values_dropdown, lambda params:get_dropdown_update_from_params("Y",params)), + (y_values_dropdown, lambda params: get_dropdown_update_from_params("Y", params)), (z_type, "Z Type"), (z_values, "Z Values"), - (z_values_dropdown, lambda params:get_dropdown_update_from_params("Z",params)), + (z_values_dropdown, lambda params: get_dropdown_update_from_params("Z", params)), ) return [x_type, x_values, x_values_dropdown, y_type, y_values, y_values_dropdown, z_type, z_values, z_values_dropdown, draw_legend, include_lone_images, include_sub_grids, no_fixed_seeds, margin_size] @@ -515,8 +519,8 @@ class Script(scripts.Script): valslist_ext += list(range(start, end, step)) elif mc is not None: start = int(mc.group(1)) - end = int(mc.group(2)) - num = int(mc.group(3)) if mc.group(3) is not None else 1 + end = int(mc.group(2)) + num = int(mc.group(3)) if mc.group(3) is not None else 1 valslist_ext += [int(x) for x in np.linspace(start=start, stop=end, num=num).tolist()] else: @@ -537,8 +541,8 @@ class Script(scripts.Script): valslist_ext += np.arange(start, end + step, step).tolist() elif mc is not None: start = float(mc.group(1)) - end = float(mc.group(2)) - num = int(mc.group(3)) if mc.group(3) is not None else 1 + end = float(mc.group(2)) + num = int(mc.group(3)) if mc.group(3) is not None else 1 valslist_ext += np.linspace(start=start, stop=end, num=num).tolist() else: @@ -572,7 +576,7 @@ class Script(scripts.Script): zs = process_axis(z_opt, z_values, z_values_dropdown) # this could be moved to common code, but unlikely to be ever triggered anywhere else - Image.MAX_IMAGE_PIXELS = None # disable check in Pillow and rely on check below to allow large custom image sizes + Image.MAX_IMAGE_PIXELS = None # disable check in Pillow and rely on check below to allow large custom image sizes grid_mp = round(len(xs) * len(ys) * len(zs) * p.width * p.height / 1000000) assert grid_mp < opts.img_max_size_mp, f'Error: Resulting grid would be too large ({grid_mp} MPixels) (max configured size is {opts.img_max_size_mp} MPixels)' @@ -732,7 +736,7 @@ class Script(scripts.Script): # Auto-save main and sub-grids: grid_count = z_count + 1 if z_count > 1 else 1 for g in range(grid_count): - #TODO: See previous comment about intentional data misalignment. + # TODO: See previous comment about intentional data misalignment. adj_g = g-1 if g > 0 else g images.save_image(processed.images[g], p.outpath_grids, "xyz_grid", info=processed.infotexts[g], extension=opts.grid_format, prompt=processed.all_prompts[adj_g], seed=processed.all_seeds[adj_g], grid=True, p=processed) -- cgit v1.2.1 From fd617fad006bdaeb8061fae0dda642da6499c3df Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Sat, 12 Aug 2023 22:24:59 +0900 Subject: Redundant character escape '\]' in RegExp --- scripts/xyz_grid.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/xyz_grid.py b/scripts/xyz_grid.py index d8101dca..0ed0f262 100644 --- a/scripts/xyz_grid.py +++ b/scripts/xyz_grid.py @@ -382,8 +382,8 @@ class SharedSettingsStackHelper(object): re_range = re.compile(r"\s*([+-]?\s*\d+)\s*-\s*([+-]?\s*\d+)(?:\s*\(([+-]\d+)\s*\))?\s*") re_range_float = re.compile(r"\s*([+-]?\s*\d+(?:.\d*)?)\s*-\s*([+-]?\s*\d+(?:.\d*)?)(?:\s*\(([+-]\d+(?:.\d*)?)\s*\))?\s*") -re_range_count = re.compile(r"\s*([+-]?\s*\d+)\s*-\s*([+-]?\s*\d+)(?:\s*\[(\d+)\s*\])?\s*") -re_range_count_float = re.compile(r"\s*([+-]?\s*\d+(?:.\d*)?)\s*-\s*([+-]?\s*\d+(?:.\d*)?)(?:\s*\[(\d+(?:.\d*)?)\s*\])?\s*") +re_range_count = re.compile(r"\s*([+-]?\s*\d+)\s*-\s*([+-]?\s*\d+)(?:\s*\[(\d+)\s*])?\s*") +re_range_count_float = re.compile(r"\s*([+-]?\s*\d+(?:.\d*)?)\s*-\s*([+-]?\s*\d+(?:.\d*)?)(?:\s*\[(\d+(?:.\d*)?)\s*])?\s*") use_dropdown = True -- cgit v1.2.1 From 6aa26a26d5beb317d708c4fa85c38056347ea5d3 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sat, 12 Aug 2023 16:47:39 +0300 Subject: change quicksettings items to have variable width --- style.css | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/style.css b/style.css index 260b1056..f936e9a0 100644 --- a/style.css +++ b/style.css @@ -386,14 +386,17 @@ div#extras_scale_to_tab div.form{ } #quicksettings > div, #quicksettings > fieldset{ - max-width: 24em; - min-width: 24em; - width: 24em; + max-width: 36em; + width: fit-content; + flex: auto; padding: 0; border: none; box-shadow: none; background: none; } +#quicksettings > div.gradio-dropdown{ + min-width: 24em !important; +} #settings{ display: block; -- cgit v1.2.1 From f131f84e13d6f1d06ffc6ac75c100357d0e7b3e7 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Sat, 12 Aug 2023 23:12:33 +0900 Subject: dropdown mode chackbox --- scripts/xyz_grid.py | 21 +++++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) diff --git a/scripts/xyz_grid.py b/scripts/xyz_grid.py index 0ed0f262..81597ade 100644 --- a/scripts/xyz_grid.py +++ b/scripts/xyz_grid.py @@ -385,8 +385,6 @@ re_range_float = re.compile(r"\s*([+-]?\s*\d+(?:.\d*)?)\s*-\s*([+-]?\s*\d+(?:.\d re_range_count = re.compile(r"\s*([+-]?\s*\d+)\s*-\s*([+-]?\s*\d+)(?:\s*\[(\d+)\s*])?\s*") re_range_count_float = re.compile(r"\s*([+-]?\s*\d+(?:.\d*)?)\s*-\s*([+-]?\s*\d+(?:.\d*)?)(?:\s*\[(\d+(?:.\d*)?)\s*])?\s*") -use_dropdown = True - class Script(scripts.Script): def title(self): @@ -422,6 +420,8 @@ class Script(scripts.Script): with gr.Column(): include_lone_images = gr.Checkbox(label='Include Sub Images', value=False, elem_id=self.elem_id("include_lone_images")) include_sub_grids = gr.Checkbox(label='Include Sub Grids', value=False, elem_id=self.elem_id("include_sub_grids")) + with gr.Column(): + use_dropdown = gr.Checkbox(label='use dropdown', value=True, elem_id=self.elem_id("use_dropdown")) with gr.Column(): margin_size = gr.Slider(label="Grid margins (px)", minimum=0, maximum=500, value=0, step=2, elem_id=self.elem_id("margin_size")) @@ -443,7 +443,7 @@ class Script(scripts.Script): def fill(axis_type): axis = self.current_axis_options[axis_type] if axis.choices: - if use_dropdown: + if use_dropdown.value: return gr.update(), axis.choices() else: return list_to_csv_string(axis.choices()), gr.update() @@ -463,12 +463,21 @@ class Script(scripts.Script): if isinstance(current_values, str): current_values = current_values.split(",") current_values = list(filter(lambda x: x in choices, current_values)) - return gr.Button.update(visible=has_choices), gr.Textbox.update(visible=not has_choices or not use_dropdown), gr.update(choices=choices if has_choices else None, visible=has_choices and use_dropdown, value=current_values) + return gr.Button.update(visible=has_choices), gr.Textbox.update(visible=not has_choices or not use_dropdown.value), gr.update(choices=choices if has_choices else None, visible=has_choices and use_dropdown.value, value=current_values) x_type.change(fn=select_axis, inputs=[x_type, x_values_dropdown], outputs=[fill_x_button, x_values, x_values_dropdown]) y_type.change(fn=select_axis, inputs=[y_type, y_values_dropdown], outputs=[fill_y_button, y_values, y_values_dropdown]) z_type.change(fn=select_axis, inputs=[z_type, z_values_dropdown], outputs=[fill_z_button, z_values, z_values_dropdown]) + def change_choice_mode(_use_dropdown, x_type, x_values_dropdown, y_type, y_values_dropdown, z_type, z_values_dropdown): + use_dropdown.value = _use_dropdown + _fill_x_button, _x_values, _x_values_dropdown = select_axis(x_type, x_values_dropdown) + _fill_y_button, _y_values, _y_values_dropdown = select_axis(y_type, y_values_dropdown) + _fill_z_button, _z_values, _z_values_dropdown = select_axis(z_type, z_values_dropdown) + return _fill_x_button, _x_values, _x_values_dropdown, _fill_y_button, _y_values, _y_values_dropdown, _fill_z_button, _z_values, _z_values_dropdown + + use_dropdown.change(fn=change_choice_mode, inputs=[use_dropdown, x_type, x_values_dropdown, y_type, y_values_dropdown, z_type, z_values_dropdown], outputs=[fill_x_button, x_values, x_values_dropdown, fill_y_button, y_values, y_values_dropdown, fill_z_button, z_values, z_values_dropdown]) + def get_dropdown_update_from_params(axis, params): val_key = f"{axis} Values" vals = params.get(val_key, "") @@ -487,9 +496,9 @@ class Script(scripts.Script): (z_values_dropdown, lambda params: get_dropdown_update_from_params("Z", params)), ) - return [x_type, x_values, x_values_dropdown, y_type, y_values, y_values_dropdown, z_type, z_values, z_values_dropdown, draw_legend, include_lone_images, include_sub_grids, no_fixed_seeds, margin_size] + return [x_type, x_values, x_values_dropdown, y_type, y_values, y_values_dropdown, z_type, z_values, z_values_dropdown, draw_legend, include_lone_images, include_sub_grids, no_fixed_seeds, margin_size, use_dropdown] - def run(self, p, x_type, x_values, x_values_dropdown, y_type, y_values, y_values_dropdown, z_type, z_values, z_values_dropdown, draw_legend, include_lone_images, include_sub_grids, no_fixed_seeds, margin_size): + def run(self, p, x_type, x_values, x_values_dropdown, y_type, y_values, y_values_dropdown, z_type, z_values, z_values_dropdown, draw_legend, include_lone_images, include_sub_grids, no_fixed_seeds, margin_size, use_dropdown): if not no_fixed_seeds: modules.processing.fix_seed(p) -- cgit v1.2.1 From 7a68ac661574dc64f510b0c4cd3403443ca2f875 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Sat, 12 Aug 2023 23:40:05 +0900 Subject: rename to csv mode --- scripts/xyz_grid.py | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/scripts/xyz_grid.py b/scripts/xyz_grid.py index 81597ade..67798202 100644 --- a/scripts/xyz_grid.py +++ b/scripts/xyz_grid.py @@ -421,7 +421,7 @@ class Script(scripts.Script): include_lone_images = gr.Checkbox(label='Include Sub Images', value=False, elem_id=self.elem_id("include_lone_images")) include_sub_grids = gr.Checkbox(label='Include Sub Grids', value=False, elem_id=self.elem_id("include_sub_grids")) with gr.Column(): - use_dropdown = gr.Checkbox(label='use dropdown', value=True, elem_id=self.elem_id("use_dropdown")) + csv_mode = gr.Checkbox(label='CSV mode', value=False, elem_id=self.elem_id("CSV mode")) with gr.Column(): margin_size = gr.Slider(label="Grid margins (px)", minimum=0, maximum=500, value=0, step=2, elem_id=self.elem_id("margin_size")) @@ -443,10 +443,10 @@ class Script(scripts.Script): def fill(axis_type): axis = self.current_axis_options[axis_type] if axis.choices: - if use_dropdown.value: - return gr.update(), axis.choices() - else: + if csv_mode.value: return list_to_csv_string(axis.choices()), gr.update() + else: + return gr.update(), axis.choices() else: return gr.update(), gr.update() @@ -463,20 +463,20 @@ class Script(scripts.Script): if isinstance(current_values, str): current_values = current_values.split(",") current_values = list(filter(lambda x: x in choices, current_values)) - return gr.Button.update(visible=has_choices), gr.Textbox.update(visible=not has_choices or not use_dropdown.value), gr.update(choices=choices if has_choices else None, visible=has_choices and use_dropdown.value, value=current_values) + return gr.Button.update(visible=has_choices), gr.Textbox.update(visible=not has_choices or csv_mode.value), gr.update(choices=choices if has_choices else None, visible=has_choices and not csv_mode.value, value=current_values) x_type.change(fn=select_axis, inputs=[x_type, x_values_dropdown], outputs=[fill_x_button, x_values, x_values_dropdown]) y_type.change(fn=select_axis, inputs=[y_type, y_values_dropdown], outputs=[fill_y_button, y_values, y_values_dropdown]) z_type.change(fn=select_axis, inputs=[z_type, z_values_dropdown], outputs=[fill_z_button, z_values, z_values_dropdown]) - def change_choice_mode(_use_dropdown, x_type, x_values_dropdown, y_type, y_values_dropdown, z_type, z_values_dropdown): - use_dropdown.value = _use_dropdown + def change_choice_mode(_csv_mode, x_type, x_values_dropdown, y_type, y_values_dropdown, z_type, z_values_dropdown): + csv_mode.value = _csv_mode _fill_x_button, _x_values, _x_values_dropdown = select_axis(x_type, x_values_dropdown) _fill_y_button, _y_values, _y_values_dropdown = select_axis(y_type, y_values_dropdown) _fill_z_button, _z_values, _z_values_dropdown = select_axis(z_type, z_values_dropdown) return _fill_x_button, _x_values, _x_values_dropdown, _fill_y_button, _y_values, _y_values_dropdown, _fill_z_button, _z_values, _z_values_dropdown - use_dropdown.change(fn=change_choice_mode, inputs=[use_dropdown, x_type, x_values_dropdown, y_type, y_values_dropdown, z_type, z_values_dropdown], outputs=[fill_x_button, x_values, x_values_dropdown, fill_y_button, y_values, y_values_dropdown, fill_z_button, z_values, z_values_dropdown]) + csv_mode.change(fn=change_choice_mode, inputs=[csv_mode, x_type, x_values_dropdown, y_type, y_values_dropdown, z_type, z_values_dropdown], outputs=[fill_x_button, x_values, x_values_dropdown, fill_y_button, y_values, y_values_dropdown, fill_z_button, z_values, z_values_dropdown]) def get_dropdown_update_from_params(axis, params): val_key = f"{axis} Values" @@ -496,9 +496,9 @@ class Script(scripts.Script): (z_values_dropdown, lambda params: get_dropdown_update_from_params("Z", params)), ) - return [x_type, x_values, x_values_dropdown, y_type, y_values, y_values_dropdown, z_type, z_values, z_values_dropdown, draw_legend, include_lone_images, include_sub_grids, no_fixed_seeds, margin_size, use_dropdown] + return [x_type, x_values, x_values_dropdown, y_type, y_values, y_values_dropdown, z_type, z_values, z_values_dropdown, draw_legend, include_lone_images, include_sub_grids, no_fixed_seeds, margin_size, csv_mode] - def run(self, p, x_type, x_values, x_values_dropdown, y_type, y_values, y_values_dropdown, z_type, z_values, z_values_dropdown, draw_legend, include_lone_images, include_sub_grids, no_fixed_seeds, margin_size, use_dropdown): + def run(self, p, x_type, x_values, x_values_dropdown, y_type, y_values, y_values_dropdown, z_type, z_values, z_values_dropdown, draw_legend, include_lone_images, include_sub_grids, no_fixed_seeds, margin_size, csv_mode): if not no_fixed_seeds: modules.processing.fix_seed(p) @@ -509,7 +509,7 @@ class Script(scripts.Script): if opt.label == 'Nothing': return [0] - if opt.choices is not None and use_dropdown: + if opt.choices is not None and not csv_mode: valslist = vals_dropdown else: valslist = [x.strip() for x in chain.from_iterable(csv.reader(StringIO(vals))) if x] @@ -570,17 +570,17 @@ class Script(scripts.Script): return valslist x_opt = self.current_axis_options[x_type] - if x_opt.choices is not None and use_dropdown: + if x_opt.choices is not None and not csv_mode: x_values = ",".join(x_values_dropdown) xs = process_axis(x_opt, x_values, x_values_dropdown) y_opt = self.current_axis_options[y_type] - if y_opt.choices is not None and use_dropdown: + if y_opt.choices is not None and not csv_mode: y_values = ",".join(y_values_dropdown) ys = process_axis(y_opt, y_values, y_values_dropdown) z_opt = self.current_axis_options[z_type] - if z_opt.choices is not None and use_dropdown: + if z_opt.choices is not None and not csv_mode: z_values = ",".join(z_values_dropdown) zs = process_axis(z_opt, z_values, z_values_dropdown) -- cgit v1.2.1 From f0b72b81211881e083c84cff585380bb70d17271 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sat, 12 Aug 2023 17:46:13 +0300 Subject: move seed, variation seed and variation seed strength to a single row, dump resize seed from UI add a way for scripts to register a callback for before/after just a single component's creation --- modules/img2img.py | 8 +-- modules/processing_scripts/seed.py | 95 +++++++++++++++++++++++++++++ modules/scripts.py | 77 +++++++++++++++++++++++- modules/shared_items.py | 1 + modules/txt2img.py | 8 +-- modules/ui.py | 119 ++++--------------------------------- style.css | 9 ++- 7 files changed, 191 insertions(+), 126 deletions(-) create mode 100644 modules/processing_scripts/seed.py diff --git a/modules/img2img.py b/modules/img2img.py index c7bbbac8..ac9fd3f8 100644 --- a/modules/img2img.py +++ b/modules/img2img.py @@ -116,7 +116,7 @@ def process_batch(p, input_dir, output_dir, inpaint_mask_dir, args, to_scale=Fal process_images(p) -def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_styles, init_img, sketch, init_img_with_mask, inpaint_color_sketch, inpaint_color_sketch_orig, init_img_inpaint, init_mask_inpaint, steps: int, sampler_name: str, mask_blur: int, mask_alpha: float, inpainting_fill: int, n_iter: int, batch_size: int, cfg_scale: float, image_cfg_scale: float, denoising_strength: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, selected_scale_tab: int, height: int, width: int, scale_by: float, resize_mode: int, inpaint_full_res: bool, inpaint_full_res_padding: int, inpainting_mask_invert: int, img2img_batch_input_dir: str, img2img_batch_output_dir: str, img2img_batch_inpaint_mask_dir: str, override_settings_texts, img2img_batch_use_png_info: bool, img2img_batch_png_info_props: list, img2img_batch_png_info_dir: str, request: gr.Request, *args): +def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_styles, init_img, sketch, init_img_with_mask, inpaint_color_sketch, inpaint_color_sketch_orig, init_img_inpaint, init_mask_inpaint, steps: int, sampler_name: str, mask_blur: int, mask_alpha: float, inpainting_fill: int, n_iter: int, batch_size: int, cfg_scale: float, image_cfg_scale: float, denoising_strength: float, selected_scale_tab: int, height: int, width: int, scale_by: float, resize_mode: int, inpaint_full_res: bool, inpaint_full_res_padding: int, inpainting_mask_invert: int, img2img_batch_input_dir: str, img2img_batch_output_dir: str, img2img_batch_inpaint_mask_dir: str, override_settings_texts, img2img_batch_use_png_info: bool, img2img_batch_png_info_props: list, img2img_batch_png_info_dir: str, request: gr.Request, *args): override_settings = create_override_settings_dict(override_settings_texts) is_batch = mode == 5 @@ -166,12 +166,6 @@ def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_s prompt=prompt, negative_prompt=negative_prompt, styles=prompt_styles, - seed=seed, - subseed=subseed, - subseed_strength=subseed_strength, - seed_resize_from_h=seed_resize_from_h, - seed_resize_from_w=seed_resize_from_w, - seed_enable_extras=seed_enable_extras, sampler_name=sampler_name, batch_size=batch_size, n_iter=n_iter, diff --git a/modules/processing_scripts/seed.py b/modules/processing_scripts/seed.py new file mode 100644 index 00000000..e0911bbe --- /dev/null +++ b/modules/processing_scripts/seed.py @@ -0,0 +1,95 @@ +import json + +import gradio as gr + +from modules import scripts, ui, errors +from modules.shared import cmd_opts +from modules.ui_components import ToolButton + + +class ScriptSeed(scripts.ScriptBuiltin): + section = "seed" + create_group = False + + def __init__(self): + self.seed = None + self.reuse_seed = None + self.reuse_subseed = None + + def title(self): + return "Seed" + + def show(self, is_img2img): + return scripts.AlwaysVisible + + def ui(self, is_img2img): + with gr.Row(elem_id=self.elem_id("seed_row")): + if cmd_opts.use_textbox_seed: + self.seed = gr.Textbox(label='Seed', value="", elem_id=self.elem_id("seed")) + else: + self.seed = gr.Number(label='Seed', value=-1, elem_id=self.elem_id("seed"), precision=0) + + random_seed = ToolButton(ui.random_symbol, elem_id=self.elem_id("random_seed"), label='Random seed') + reuse_seed = ToolButton(ui.reuse_symbol, elem_id=self.elem_id("reuse_seed"), label='Reuse seed') + + subseed = gr.Number(label='Variation seed', value=-1, elem_id=self.elem_id("subseed"), precision=0) + + random_subseed = ToolButton(ui.random_symbol, elem_id=self.elem_id("random_subseed")) + reuse_subseed = ToolButton(ui.reuse_symbol, elem_id=self.elem_id("reuse_subseed")) + + subseed_strength = gr.Slider(label='Variation strength', value=0.0, minimum=0, maximum=1, step=0.01, elem_id=self.elem_id("subseed_strength")) + + random_seed.click(fn=None, _js="function(){setRandomSeed('" + self.elem_id("seed") + "')}", show_progress=False, inputs=[], outputs=[]) + random_subseed.click(fn=None, _js="function(){setRandomSeed('" + self.elem_id("subseed") + "')}", show_progress=False, inputs=[], outputs=[]) + + self.infotext_fields = [ + (self.seed, "Seed"), + (subseed, "Variation seed"), + (subseed_strength, "Variation seed strength"), + ] + + self.on_after_component(lambda x: connect_reuse_seed(self.seed, reuse_seed, x.component, False), elem_id=f'generation_info_{self.tabname}') + self.on_after_component(lambda x: connect_reuse_seed(self.seed, reuse_subseed, x.component, True), elem_id=f'generation_info_{self.tabname}') + + return self.seed, subseed, subseed_strength + + def before_process(self, p, seed, subseed, subseed_strength): + p.seed = seed + + if subseed_strength > 0: + p.subseed = subseed + p.subseed_strength = subseed_strength + + +def connect_reuse_seed(seed: gr.Number, reuse_seed: gr.Button, generation_info: gr.Textbox, is_subseed): + """ Connects a 'reuse (sub)seed' button's click event so that it copies last used + (sub)seed value from generation info the to the seed field. If copying subseed and subseed strength + was 0, i.e. no variation seed was used, it copies the normal seed value instead.""" + + def copy_seed(gen_info_string: str, index): + res = -1 + + try: + gen_info = json.loads(gen_info_string) + index -= gen_info.get('index_of_first_image', 0) + + if is_subseed and gen_info.get('subseed_strength', 0) > 0: + all_subseeds = gen_info.get('all_subseeds', [-1]) + res = all_subseeds[index if 0 <= index < len(all_subseeds) else 0] + else: + all_seeds = gen_info.get('all_seeds', [-1]) + res = all_seeds[index if 0 <= index < len(all_seeds) else 0] + + except json.decoder.JSONDecodeError: + if gen_info_string: + errors.report(f"Error parsing JSON generation info: {gen_info_string}") + + return [res, gr.update()] + + reuse_seed.click( + fn=copy_seed, + _js="(x, y) => [x, selected_gallery_index()]", + show_progress=False, + inputs=[generation_info, seed], + outputs=[seed, seed] + ) diff --git a/modules/scripts.py b/modules/scripts.py index 51da732a..66fbec0d 100644 --- a/modules/scripts.py +++ b/modules/scripts.py @@ -3,6 +3,7 @@ import re import sys import inspect from collections import namedtuple +from dataclasses import dataclass import gradio as gr @@ -21,6 +22,11 @@ class PostprocessBatchListArgs: self.images = images +@dataclass +class OnComponent: + component: gr.blocks.Block + + class Script: name = None """script's internal name derived from title""" @@ -35,6 +41,7 @@ class Script: is_txt2img = False is_img2img = False + tabname = None group = None """A gr.Group component that has all script's UI inside it.""" @@ -55,6 +62,12 @@ class Script: api_info = None """Generated value of type modules.api.models.ScriptInfo with information about the script for API""" + on_before_component_elem_id = [] + """list of callbacks to be called before a component with an elem_id is created""" + + on_after_component_elem_id = [] + """list of callbacks to be called after a component with an elem_id is created""" + def title(self): """this function should return the title of the script. This is what will be displayed in the dropdown menu.""" @@ -215,6 +228,24 @@ class Script: pass + def on_before_component(self, callback, *, elem_id): + """ + Calls callback before a component is created. The callback function is called with a single argument of type OnComponent. + + This function is an alternative to before_component in that it also cllows to run before a component is created, but + it doesn't require to be called for every created component - just for the one you need. + """ + + self.on_before_component_elem_id.append((elem_id, callback)) + + def on_after_component(self, callback, *, elem_id): + """ + Calls callback after a component is created. The callback function is called with a single argument of type OnComponent. + """ + + self.on_after_component_elem_id.append((elem_id, callback)) + + def describe(self): """unused""" return "" @@ -236,6 +267,17 @@ class Script: pass +class ScriptBuiltin(Script): + + def elem_id(self, item_id): + """helper function to generate id for a HTML element, constructs final id out of tab and user-supplied item_id""" + + need_tabname = self.show(True) == self.show(False) + tabname = ('img2img' if self.is_img2img else 'txt2txt') + "_" if need_tabname else "" + + return f'{tabname}{item_id}' + + current_basedir = paths.script_path @@ -354,10 +396,17 @@ class ScriptRunner: self.selectable_scripts = [] self.alwayson_scripts = [] self.titles = [] + self.title_map = {} self.infotext_fields = [] self.paste_field_names = [] self.inputs = [None] + self.on_before_component_elem_id = {} + """dict of callbacks to be called before an element is created; key=elem_id, value=list of callbacks""" + + self.on_after_component_elem_id = {} + """dict of callbacks to be called after an element is created; key=elem_id, value=list of callbacks""" + def initialize_scripts(self, is_img2img): from modules import scripts_auto_postprocessing @@ -372,6 +421,7 @@ class ScriptRunner: script.filename = script_data.path script.is_txt2img = not is_img2img script.is_img2img = is_img2img + script.tabname = "img2img" if is_img2img else "txt2img" visibility = script.show(script.is_img2img) @@ -446,6 +496,8 @@ class ScriptRunner: self.inputs = [None] def setup_ui(self): + all_titles = [wrap_call(script.title, script.filename, "title") or script.filename for script in self.scripts] + self.title_map = {title.lower(): script for title, script in zip(all_titles, self.scripts)} self.titles = [wrap_call(script.title, script.filename, "title") or f"{script.filename} [error]" for script in self.selectable_scripts] self.setup_ui_for_section(None) @@ -492,6 +544,13 @@ class ScriptRunner: self.infotext_fields.append((dropdown, lambda x: gr.update(value=x.get('Script', 'None')))) self.infotext_fields.extend([(script.group, onload_script_visibility) for script in self.selectable_scripts]) + for script in self.scripts: + for elem_id, callback in script.on_before_component_elem_id: + self.on_before_component_elem_id.get(elem_id, []).append((callback, script)) + + for elem_id, callback in script.on_after_component_elem_id: + self.on_after_component_elem_id.get(elem_id, []).append((callback, script)) + return self.inputs def run(self, p, *args): @@ -585,6 +644,13 @@ class ScriptRunner: errors.report(f"Error running postprocess_image: {script.filename}", exc_info=True) def before_component(self, component, **kwargs): + for callbacks in self.on_before_component_elem_id.get(kwargs.get("elem_id"), []): + for callback, script in callbacks: + try: + callback(OnComponent(component=component)) + except Exception: + errors.report(f"Error running on_before_component: {script.filename}", exc_info=True) + for script in self.scripts: try: script.before_component(component, **kwargs) @@ -592,12 +658,22 @@ class ScriptRunner: errors.report(f"Error running before_component: {script.filename}", exc_info=True) def after_component(self, component, **kwargs): + for callbacks in self.on_after_component_elem_id.get(component.elem_id, []): + for callback, script in callbacks: + try: + callback(OnComponent(component=component)) + except Exception: + errors.report(f"Error running on_after_component: {script.filename}", exc_info=True) + for script in self.scripts: try: script.after_component(component, **kwargs) except Exception: errors.report(f"Error running after_component: {script.filename}", exc_info=True) + def script(self, title): + return self.title_map.get(title.lower()) + def reload_sources(self, cache): for si, script in list(enumerate(self.scripts)): args_from = script.args_from @@ -616,7 +692,6 @@ class ScriptRunner: self.scripts[si].args_from = args_from self.scripts[si].args_to = args_to - def before_hr(self, p): for script in self.alwayson_scripts: try: diff --git a/modules/shared_items.py b/modules/shared_items.py index 754166d2..84d69c8d 100644 --- a/modules/shared_items.py +++ b/modules/shared_items.py @@ -73,6 +73,7 @@ ui_reorder_categories_builtin_items = [ "checkboxes", "dimensions", "cfg", + "denoising", "seed", "batch", "override_settings", diff --git a/modules/txt2img.py b/modules/txt2img.py index 5ea96bba..1ee592ad 100644 --- a/modules/txt2img.py +++ b/modules/txt2img.py @@ -9,7 +9,7 @@ from modules.ui import plaintext_to_html import gradio as gr -def txt2img(id_task: str, prompt: str, negative_prompt: str, prompt_styles, steps: int, sampler_name: str, n_iter: int, batch_size: int, cfg_scale: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, height: int, width: int, enable_hr: bool, denoising_strength: float, hr_scale: float, hr_upscaler: str, hr_second_pass_steps: int, hr_resize_x: int, hr_resize_y: int, hr_checkpoint_name: str, hr_sampler_name: str, hr_prompt: str, hr_negative_prompt, override_settings_texts, request: gr.Request, *args): +def txt2img(id_task: str, prompt: str, negative_prompt: str, prompt_styles, steps: int, sampler_name: str, n_iter: int, batch_size: int, cfg_scale: float, height: int, width: int, enable_hr: bool, denoising_strength: float, hr_scale: float, hr_upscaler: str, hr_second_pass_steps: int, hr_resize_x: int, hr_resize_y: int, hr_checkpoint_name: str, hr_sampler_name: str, hr_prompt: str, hr_negative_prompt, override_settings_texts, request: gr.Request, *args): override_settings = create_override_settings_dict(override_settings_texts) p = processing.StableDiffusionProcessingTxt2Img( @@ -19,12 +19,6 @@ def txt2img(id_task: str, prompt: str, negative_prompt: str, prompt_styles, step prompt=prompt, styles=prompt_styles, negative_prompt=negative_prompt, - seed=seed, - subseed=subseed, - subseed_strength=subseed_strength, - seed_resize_from_h=seed_resize_from_h, - seed_resize_from_w=seed_resize_from_w, - seed_enable_extras=seed_enable_extras, sampler_name=sampler_name, batch_size=batch_size, n_iter=n_iter, diff --git a/modules/ui.py b/modules/ui.py index 3321b94d..a6b1f964 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1,5 +1,4 @@ import datetime -import json import mimetypes import os import sys @@ -13,7 +12,7 @@ from PIL import Image, PngImagePlugin # noqa: F401 from modules.call_queue import wrap_gradio_gpu_call, wrap_queued_call, wrap_gradio_call from modules import gradio_extensons # noqa: F401 -from modules import sd_hijack, sd_models, script_callbacks, ui_extensions, deepbooru, extra_networks, ui_common, ui_postprocessing, progress, ui_loadsave, errors, shared_items, ui_settings, timer, sysinfo, ui_checkpoint_merger, ui_prompt_styles, scripts, sd_samplers, processing, ui_extra_networks +from modules import sd_hijack, sd_models, script_callbacks, ui_extensions, deepbooru, extra_networks, ui_common, ui_postprocessing, progress, ui_loadsave, shared_items, ui_settings, timer, sysinfo, ui_checkpoint_merger, ui_prompt_styles, scripts, sd_samplers, processing, ui_extra_networks from modules.ui_components import FormRow, FormGroup, ToolButton, FormHTML, InputAccordion from modules.paths import script_path from modules.ui_common import create_refresh_button @@ -142,45 +141,6 @@ def interrogate_deepbooru(image): return gr.update() if prompt is None else prompt -def create_seed_inputs(target_interface): - with FormRow(elem_id=f"{target_interface}_seed_row", variant="compact"): - if cmd_opts.use_textbox_seed: - seed = gr.Textbox(label='Seed', value="", elem_id=f"{target_interface}_seed") - else: - seed = gr.Number(label='Seed', value=-1, elem_id=f"{target_interface}_seed", precision=0) - - random_seed = ToolButton(random_symbol, elem_id=f"{target_interface}_random_seed", label='Random seed') - reuse_seed = ToolButton(reuse_symbol, elem_id=f"{target_interface}_reuse_seed", label='Reuse seed') - - seed_checkbox = gr.Checkbox(label='Extra', elem_id=f"{target_interface}_subseed_show", value=False) - - # Components to show/hide based on the 'Extra' checkbox - seed_extras = [] - - with FormRow(visible=False, elem_id=f"{target_interface}_subseed_row") as seed_extra_row_1: - seed_extras.append(seed_extra_row_1) - subseed = gr.Number(label='Variation seed', value=-1, elem_id=f"{target_interface}_subseed", precision=0) - random_subseed = ToolButton(random_symbol, elem_id=f"{target_interface}_random_subseed") - reuse_subseed = ToolButton(reuse_symbol, elem_id=f"{target_interface}_reuse_subseed") - subseed_strength = gr.Slider(label='Variation strength', value=0.0, minimum=0, maximum=1, step=0.01, elem_id=f"{target_interface}_subseed_strength") - - with FormRow(visible=False) as seed_extra_row_2: - seed_extras.append(seed_extra_row_2) - seed_resize_from_w = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize seed from width", value=0, elem_id=f"{target_interface}_seed_resize_from_w") - seed_resize_from_h = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize seed from height", value=0, elem_id=f"{target_interface}_seed_resize_from_h") - - random_seed.click(fn=None, _js="function(){setRandomSeed('" + target_interface + "_seed')}", show_progress=False, inputs=[], outputs=[]) - random_subseed.click(fn=None, _js="function(){setRandomSeed('" + target_interface + "_subseed')}", show_progress=False, inputs=[], outputs=[]) - - def change_visibility(show): - return {comp: gr_show(show) for comp in seed_extras} - - seed_checkbox.change(change_visibility, show_progress=False, inputs=[seed_checkbox], outputs=seed_extras) - - return seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox - - - def connect_clear_prompt(button): """Given clear button, prompt, and token_counter objects, setup clear prompt button click event""" button.click( @@ -191,39 +151,6 @@ def connect_clear_prompt(button): ) -def connect_reuse_seed(seed: gr.Number, reuse_seed: gr.Button, generation_info: gr.Textbox, dummy_component, is_subseed): - """ Connects a 'reuse (sub)seed' button's click event so that it copies last used - (sub)seed value from generation info the to the seed field. If copying subseed and subseed strength - was 0, i.e. no variation seed was used, it copies the normal seed value instead.""" - def copy_seed(gen_info_string: str, index): - res = -1 - - try: - gen_info = json.loads(gen_info_string) - index -= gen_info.get('index_of_first_image', 0) - - if is_subseed and gen_info.get('subseed_strength', 0) > 0: - all_subseeds = gen_info.get('all_subseeds', [-1]) - res = all_subseeds[index if 0 <= index < len(all_subseeds) else 0] - else: - all_seeds = gen_info.get('all_seeds', [-1]) - res = all_seeds[index if 0 <= index < len(all_seeds) else 0] - - except json.decoder.JSONDecodeError: - if gen_info_string: - errors.report(f"Error parsing JSON generation info: {gen_info_string}") - - return [res, gr_show(False)] - - reuse_seed.click( - fn=copy_seed, - _js="(x, y) => [x, selected_gallery_index()]", - show_progress=False, - inputs=[generation_info, dummy_component], - outputs=[seed, dummy_component] - ) - - def update_token_counter(text, steps): try: text, _ = extra_networks.parse_prompt(text) @@ -429,10 +356,8 @@ def create_ui(): batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="txt2img_batch_size") elif category == "cfg": - cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0, elem_id="txt2img_cfg_scale") - - elif category == "seed": - seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox = create_seed_inputs('txt2img') + with gr.Row(): + cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0, elem_id="txt2img_cfg_scale") elif category == "checkboxes": with FormRow(elem_classes="checkboxes-row", variant="compact"): @@ -509,9 +434,6 @@ def create_ui(): txt2img_gallery, generation_info, html_info, html_log = create_output_panel("txt2img", opts.outdir_txt2img_samples) - connect_reuse_seed(seed, reuse_seed, generation_info, dummy_component, is_subseed=False) - connect_reuse_seed(subseed, reuse_subseed, generation_info, dummy_component, is_subseed=True) - txt2img_args = dict( fn=wrap_gradio_gpu_call(modules.txt2img.txt2img, extra_outputs=[None, '', '']), _js="submit", @@ -525,8 +447,6 @@ def create_ui(): batch_count, batch_size, cfg_scale, - seed, - subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox, height, width, enable_hr, @@ -577,15 +497,9 @@ def create_ui(): (steps, "Steps"), (sampler_name, "Sampler"), (cfg_scale, "CFG scale"), - (seed, "Seed"), (width, "Size-1"), (height, "Size-2"), (batch_size, "Batch size"), - (seed_checkbox, lambda d: "Variation seed" in d or "Seed resize from-1" in d), - (subseed, "Variation seed"), - (subseed_strength, "Variation seed strength"), - (seed_resize_from_w, "Seed resize from-1"), - (seed_resize_from_h, "Seed resize from-2"), (toprow.ui_styles.dropdown, lambda d: d["Styles array"] if isinstance(d.get("Styles array"), list) else gr.update()), (denoising_strength, "Denoising strength"), (enable_hr, lambda d: "Denoising strength" in d and ("Hires upscale" in d or "Hires upscaler" in d or "Hires resize-1" in d)), @@ -613,7 +527,7 @@ def create_ui(): steps, sampler_name, cfg_scale, - seed, + scripts.scripts_txt2img.script('Seed').seed, width, height, ] @@ -783,15 +697,13 @@ def create_ui(): batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="img2img_batch_count") batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="img2img_batch_size") - elif category == "cfg": - with FormGroup(): - with FormRow(): - cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0, elem_id="img2img_cfg_scale") - image_cfg_scale = gr.Slider(minimum=0, maximum=3.0, step=0.05, label='Image CFG Scale', value=1.5, elem_id="img2img_image_cfg_scale", visible=False) - denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.75, elem_id="img2img_denoising_strength") + elif category == "denoising": + denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.75, elem_id="img2img_denoising_strength") - elif category == "seed": - seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox = create_seed_inputs('img2img') + elif category == "cfg": + with gr.Row(): + cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0, elem_id="img2img_cfg_scale") + image_cfg_scale = gr.Slider(minimum=0, maximum=3.0, step=0.05, label='Image CFG Scale', value=1.5, elem_id="img2img_image_cfg_scale", visible=False) elif category == "checkboxes": with FormRow(elem_classes="checkboxes-row", variant="compact"): @@ -849,9 +761,6 @@ def create_ui(): img2img_gallery, generation_info, html_info, html_log = create_output_panel("img2img", opts.outdir_img2img_samples) - connect_reuse_seed(seed, reuse_seed, generation_info, dummy_component, is_subseed=False) - connect_reuse_seed(subseed, reuse_subseed, generation_info, dummy_component, is_subseed=True) - img2img_args = dict( fn=wrap_gradio_gpu_call(modules.img2img.img2img, extra_outputs=[None, '', '']), _js="submit_img2img", @@ -878,8 +787,6 @@ def create_ui(): cfg_scale, image_cfg_scale, denoising_strength, - seed, - subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox, selected_scale_tab, height, width, @@ -966,15 +873,9 @@ def create_ui(): (sampler_name, "Sampler"), (cfg_scale, "CFG scale"), (image_cfg_scale, "Image CFG scale"), - (seed, "Seed"), (width, "Size-1"), (height, "Size-2"), (batch_size, "Batch size"), - (seed_checkbox, lambda d: "Variation seed" in d or "Seed resize from-1" in d), - (subseed, "Variation seed"), - (subseed_strength, "Variation seed strength"), - (seed_resize_from_w, "Seed resize from-1"), - (seed_resize_from_h, "Seed resize from-2"), (toprow.ui_styles.dropdown, lambda d: d["Styles array"] if isinstance(d.get("Styles array"), list) else gr.update()), (denoising_strength, "Denoising strength"), (mask_blur, "Mask blur"), diff --git a/style.css b/style.css index f936e9a0..92093b89 100644 --- a/style.css +++ b/style.css @@ -222,14 +222,18 @@ div.block.gradio-accordion { padding: 0.1em 0.75em; } +[id$=_seed], [id$=_subseed]{ + max-width: 10em; +} + [id$=_subseed_show]{ min-width: auto !important; flex-grow: 0 !important; display: flex; } -[id$=_subseed_show] label{ - margin-bottom: 0.5em; +[id$=_subseed_show] .label-wrap{ + margin: 0 0 0 0.5em; align-self: end; } @@ -1028,5 +1032,6 @@ div.accordions{ div.accordions > div.input-accordion.input-accordion-open{ flex: 1 auto; + flex-flow: column; } -- cgit v1.2.1 From 4e8690906c02f14a81974200775bfc81718a9250 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sat, 12 Aug 2023 18:00:30 +0300 Subject: update seed/subseed HTML widths --- modules/processing_scripts/seed.py | 25 +++++++++++++++---------- style.css | 15 --------------- 2 files changed, 15 insertions(+), 25 deletions(-) diff --git a/modules/processing_scripts/seed.py b/modules/processing_scripts/seed.py index e0911bbe..1ec20339 100644 --- a/modules/processing_scripts/seed.py +++ b/modules/processing_scripts/seed.py @@ -24,20 +24,25 @@ class ScriptSeed(scripts.ScriptBuiltin): def ui(self, is_img2img): with gr.Row(elem_id=self.elem_id("seed_row")): - if cmd_opts.use_textbox_seed: - self.seed = gr.Textbox(label='Seed', value="", elem_id=self.elem_id("seed")) - else: - self.seed = gr.Number(label='Seed', value=-1, elem_id=self.elem_id("seed"), precision=0) + with gr.Column(scale=1, min_width=205): + with gr.Row(): + if cmd_opts.use_textbox_seed: + self.seed = gr.Textbox(label='Seed', value="", elem_id=self.elem_id("seed"), min_width=100) + else: + self.seed = gr.Number(label='Seed', value=-1, elem_id=self.elem_id("seed"), min_width=100, precision=0) - random_seed = ToolButton(ui.random_symbol, elem_id=self.elem_id("random_seed"), label='Random seed') - reuse_seed = ToolButton(ui.reuse_symbol, elem_id=self.elem_id("reuse_seed"), label='Reuse seed') + random_seed = ToolButton(ui.random_symbol, elem_id=self.elem_id("random_seed"), label='Random seed') + reuse_seed = ToolButton(ui.reuse_symbol, elem_id=self.elem_id("reuse_seed"), label='Reuse seed') - subseed = gr.Number(label='Variation seed', value=-1, elem_id=self.elem_id("subseed"), precision=0) + with gr.Column(scale=1, min_width=205): + with gr.Row(): + subseed = gr.Number(label='Variation seed', value=-1, elem_id=self.elem_id("subseed"), min_width=100, precision=0) - random_subseed = ToolButton(ui.random_symbol, elem_id=self.elem_id("random_subseed")) - reuse_subseed = ToolButton(ui.reuse_symbol, elem_id=self.elem_id("reuse_subseed")) + random_subseed = ToolButton(ui.random_symbol, elem_id=self.elem_id("random_subseed")) + reuse_subseed = ToolButton(ui.reuse_symbol, elem_id=self.elem_id("reuse_subseed")) - subseed_strength = gr.Slider(label='Variation strength', value=0.0, minimum=0, maximum=1, step=0.01, elem_id=self.elem_id("subseed_strength")) + with gr.Column(scale=2, min_width=100): + subseed_strength = gr.Slider(label='Variation strength', value=0.0, minimum=0, maximum=1, step=0.01, elem_id=self.elem_id("subseed_strength")) random_seed.click(fn=None, _js="function(){setRandomSeed('" + self.elem_id("seed") + "')}", show_progress=False, inputs=[], outputs=[]) random_subseed.click(fn=None, _js="function(){setRandomSeed('" + self.elem_id("subseed") + "')}", show_progress=False, inputs=[], outputs=[]) diff --git a/style.css b/style.css index 92093b89..dc44d2cc 100644 --- a/style.css +++ b/style.css @@ -222,21 +222,6 @@ div.block.gradio-accordion { padding: 0.1em 0.75em; } -[id$=_seed], [id$=_subseed]{ - max-width: 10em; -} - -[id$=_subseed_show]{ - min-width: auto !important; - flex-grow: 0 !important; - display: flex; -} - -[id$=_subseed_show] .label-wrap{ - margin: 0 0 0 0.5em; - align-self: end; -} - .html-log .comments{ padding-top: 0.5em; } -- cgit v1.2.1 From 6816ad5ed806b9ada81b4fab82e21a9455fa5720 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sat, 12 Aug 2023 18:36:30 +0300 Subject: fix broken reuse seed --- modules/processing_scripts/seed.py | 2 +- modules/scripts.py | 42 ++++++++++++++++++++++---------------- 2 files changed, 25 insertions(+), 19 deletions(-) diff --git a/modules/processing_scripts/seed.py b/modules/processing_scripts/seed.py index 1ec20339..cc90775a 100644 --- a/modules/processing_scripts/seed.py +++ b/modules/processing_scripts/seed.py @@ -54,7 +54,7 @@ class ScriptSeed(scripts.ScriptBuiltin): ] self.on_after_component(lambda x: connect_reuse_seed(self.seed, reuse_seed, x.component, False), elem_id=f'generation_info_{self.tabname}') - self.on_after_component(lambda x: connect_reuse_seed(self.seed, reuse_subseed, x.component, True), elem_id=f'generation_info_{self.tabname}') + self.on_after_component(lambda x: connect_reuse_seed(subseed, reuse_subseed, x.component, True), elem_id=f'generation_info_{self.tabname}') return self.seed, subseed, subseed_strength diff --git a/modules/scripts.py b/modules/scripts.py index 66fbec0d..c6459b45 100644 --- a/modules/scripts.py +++ b/modules/scripts.py @@ -62,10 +62,10 @@ class Script: api_info = None """Generated value of type modules.api.models.ScriptInfo with information about the script for API""" - on_before_component_elem_id = [] + on_before_component_elem_id = None """list of callbacks to be called before a component with an elem_id is created""" - on_after_component_elem_id = [] + on_after_component_elem_id = None """list of callbacks to be called after a component with an elem_id is created""" def title(self): @@ -235,6 +235,8 @@ class Script: This function is an alternative to before_component in that it also cllows to run before a component is created, but it doesn't require to be called for every created component - just for the one you need. """ + if self.on_before_component_elem_id is None: + self.on_before_component_elem_id = [] self.on_before_component_elem_id.append((elem_id, callback)) @@ -242,6 +244,8 @@ class Script: """ Calls callback after a component is created. The callback function is called with a single argument of type OnComponent. """ + if self.on_after_component_elem_id is None: + self.on_after_component_elem_id = [] self.on_after_component_elem_id.append((elem_id, callback)) @@ -545,11 +549,15 @@ class ScriptRunner: self.infotext_fields.extend([(script.group, onload_script_visibility) for script in self.selectable_scripts]) for script in self.scripts: - for elem_id, callback in script.on_before_component_elem_id: - self.on_before_component_elem_id.get(elem_id, []).append((callback, script)) + for elem_id, callback in script.on_before_component_elem_id or []: + items = self.on_before_component_elem_id.get(elem_id, []) + items.append((callback, script)) + self.on_before_component_elem_id[elem_id] = items - for elem_id, callback in script.on_after_component_elem_id: - self.on_after_component_elem_id.get(elem_id, []).append((callback, script)) + for elem_id, callback in script.on_after_component_elem_id or []: + items = self.on_after_component_elem_id.get(elem_id, []) + items.append((callback, script)) + self.on_after_component_elem_id[elem_id] = items return self.inputs @@ -644,12 +652,11 @@ class ScriptRunner: errors.report(f"Error running postprocess_image: {script.filename}", exc_info=True) def before_component(self, component, **kwargs): - for callbacks in self.on_before_component_elem_id.get(kwargs.get("elem_id"), []): - for callback, script in callbacks: - try: - callback(OnComponent(component=component)) - except Exception: - errors.report(f"Error running on_before_component: {script.filename}", exc_info=True) + for callback, script in self.on_before_component_elem_id.get(kwargs.get("elem_id"), []): + try: + callback(OnComponent(component=component)) + except Exception: + errors.report(f"Error running on_before_component: {script.filename}", exc_info=True) for script in self.scripts: try: @@ -658,12 +665,11 @@ class ScriptRunner: errors.report(f"Error running before_component: {script.filename}", exc_info=True) def after_component(self, component, **kwargs): - for callbacks in self.on_after_component_elem_id.get(component.elem_id, []): - for callback, script in callbacks: - try: - callback(OnComponent(component=component)) - except Exception: - errors.report(f"Error running on_after_component: {script.filename}", exc_info=True) + for callback, script in self.on_after_component_elem_id.get(component.elem_id, []): + try: + callback(OnComponent(component=component)) + except Exception: + errors.report(f"Error running on_after_component: {script.filename}", exc_info=True) for script in self.scripts: try: -- cgit v1.2.1 From 9d0ec135968d80420b84ca83f7958f5fc8e534c2 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sat, 12 Aug 2023 18:42:59 +0300 Subject: fix quicksettings on Chrome --- style.css | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/style.css b/style.css index dc44d2cc..716126ed 100644 --- a/style.css +++ b/style.css @@ -370,14 +370,13 @@ div#extras_scale_to_tab div.form{ /* settings */ #quicksettings { - width: fit-content; align-items: end; } #quicksettings > div, #quicksettings > fieldset{ max-width: 36em; width: fit-content; - flex: auto; + flex: 0 1 fit-content; padding: 0; border: none; box-shadow: none; -- cgit v1.2.1 From b2080756fcdc328292fc38998c06ccf23e53bd7e Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sat, 12 Aug 2023 19:03:33 +0300 Subject: make "send to" buttons into small tool buttons --- modules/ui_common.py | 14 +++++++++----- style.css | 4 ++-- 2 files changed, 11 insertions(+), 7 deletions(-) diff --git a/modules/ui_common.py b/modules/ui_common.py index 99d19ff0..4c035f2a 100644 --- a/modules/ui_common.py +++ b/modules/ui_common.py @@ -137,13 +137,17 @@ Requested path was: {f} generation_info = None with gr.Column(): with gr.Row(elem_id=f"image_buttons_{tabname}", elem_classes="image-buttons"): - open_folder_button = gr.Button(folder_symbol, visible=not shared.cmd_opts.hide_ui_dir_config) + open_folder_button = ToolButton(folder_symbol, elem_id=f'{tabname}_open_folder', visible=not shared.cmd_opts.hide_ui_dir_config, tooltip="Open images output directory.") if tabname != "extras": - save = gr.Button('Save', elem_id=f'save_{tabname}') - save_zip = gr.Button('Zip', elem_id=f'save_zip_{tabname}') - - buttons = parameters_copypaste.create_buttons(["img2img", "inpaint", "extras"]) + save = ToolButton('💾', elem_id=f'save_{tabname}', tooltip=f"Save the image to a dedicated directory ({shared.opts.outdir_save}).") + save_zip = ToolButton('🗃️', elem_id=f'save_zip_{tabname}', tooltip=f"Save zip archive with images to a dedicated directory ({shared.opts.outdir_save})") + + buttons = { + 'img2img': ToolButton('🖼️', elem_id=f'{tabname}_send_to_img2img', tooltip="Send image and generation parameters to img2img tab."), + 'inpaint': ToolButton('🎨️', elem_id=f'{tabname}_send_to_inpaint', tooltip="Send image and generation parameters to img2img inpaint tab."), + 'extras': ToolButton('📐', elem_id=f'{tabname}_send_to_extras', tooltip="Send image and generation parameters to extras tab.") + } open_folder_button.click( fn=lambda: open_folder(shared.opts.outdir_samples or outdir), diff --git a/style.css b/style.css index 716126ed..dc528422 100644 --- a/style.css +++ b/style.css @@ -348,8 +348,8 @@ div#extras_scale_to_tab div.form{ z-index: 5; } -.image-buttons button{ - min-width: auto; +.image-buttons > .form{ + justify-content: center; } .infotext { -- cgit v1.2.1 From 8d9ca46e0a09c414b0d07fa8a680de10c0e72ffe Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Sun, 13 Aug 2023 02:05:20 +0900 Subject: convert value when switching mode --- scripts/xyz_grid.py | 35 +++++++++++++++++++++-------------- 1 file changed, 21 insertions(+), 14 deletions(-) diff --git a/scripts/xyz_grid.py b/scripts/xyz_grid.py index 67798202..bfaf9661 100644 --- a/scripts/xyz_grid.py +++ b/scripts/xyz_grid.py @@ -454,29 +454,36 @@ class Script(scripts.Script): fill_y_button.click(fn=fill, inputs=[y_type], outputs=[y_values, y_values_dropdown]) fill_z_button.click(fn=fill, inputs=[z_type], outputs=[z_values, z_values_dropdown]) - def select_axis(axis_type, axis_values_dropdown): + def select_axis(axis_type, axis_values, axis_values_dropdown): choices = self.current_axis_options[axis_type].choices has_choices = choices is not None - current_values = axis_values_dropdown + + current_values = axis_values + current_dropdown_values = axis_values_dropdown if has_choices: choices = choices() - if isinstance(current_values, str): - current_values = current_values.split(",") - current_values = list(filter(lambda x: x in choices, current_values)) - return gr.Button.update(visible=has_choices), gr.Textbox.update(visible=not has_choices or csv_mode.value), gr.update(choices=choices if has_choices else None, visible=has_choices and not csv_mode.value, value=current_values) + if csv_mode.value: + current_dropdown_values = list(filter(lambda x: x in choices, current_dropdown_values)) + current_values = list_to_csv_string(current_dropdown_values) + else: + current_dropdown_values = [x.strip() for x in chain.from_iterable(csv.reader(StringIO(axis_values)))] + current_dropdown_values = list(filter(lambda x: x in choices, current_dropdown_values)) + + return (gr.Button.update(visible=has_choices), gr.Textbox.update(visible=not has_choices or csv_mode.value, value=current_values), + gr.update(choices=choices if has_choices else None, visible=has_choices and not csv_mode.value, value=current_dropdown_values)) - x_type.change(fn=select_axis, inputs=[x_type, x_values_dropdown], outputs=[fill_x_button, x_values, x_values_dropdown]) - y_type.change(fn=select_axis, inputs=[y_type, y_values_dropdown], outputs=[fill_y_button, y_values, y_values_dropdown]) - z_type.change(fn=select_axis, inputs=[z_type, z_values_dropdown], outputs=[fill_z_button, z_values, z_values_dropdown]) + x_type.change(fn=select_axis, inputs=[x_type, x_values, x_values_dropdown], outputs=[fill_x_button, x_values, x_values_dropdown]) + y_type.change(fn=select_axis, inputs=[y_type, y_values, y_values_dropdown], outputs=[fill_y_button, y_values, y_values_dropdown]) + z_type.change(fn=select_axis, inputs=[z_type, z_values, z_values_dropdown], outputs=[fill_z_button, z_values, z_values_dropdown]) - def change_choice_mode(_csv_mode, x_type, x_values_dropdown, y_type, y_values_dropdown, z_type, z_values_dropdown): + def change_choice_mode(_csv_mode, x_type, x_values, x_values_dropdown, y_type, y_values, y_values_dropdown, z_type, z_values, z_values_dropdown): csv_mode.value = _csv_mode - _fill_x_button, _x_values, _x_values_dropdown = select_axis(x_type, x_values_dropdown) - _fill_y_button, _y_values, _y_values_dropdown = select_axis(y_type, y_values_dropdown) - _fill_z_button, _z_values, _z_values_dropdown = select_axis(z_type, z_values_dropdown) + _fill_x_button, _x_values, _x_values_dropdown = select_axis(x_type, x_values, x_values_dropdown) + _fill_y_button, _y_values, _y_values_dropdown = select_axis(y_type, y_values, y_values_dropdown) + _fill_z_button, _z_values, _z_values_dropdown = select_axis(z_type, z_values, z_values_dropdown) return _fill_x_button, _x_values, _x_values_dropdown, _fill_y_button, _y_values, _y_values_dropdown, _fill_z_button, _z_values, _z_values_dropdown - csv_mode.change(fn=change_choice_mode, inputs=[csv_mode, x_type, x_values_dropdown, y_type, y_values_dropdown, z_type, z_values_dropdown], outputs=[fill_x_button, x_values, x_values_dropdown, fill_y_button, y_values, y_values_dropdown, fill_z_button, z_values, z_values_dropdown]) + csv_mode.change(fn=change_choice_mode, inputs=[csv_mode, x_type, x_values, x_values_dropdown, y_type, y_values, y_values_dropdown, z_type, z_values, z_values_dropdown], outputs=[fill_x_button, x_values, x_values_dropdown, fill_y_button, y_values, y_values_dropdown, fill_z_button, z_values, z_values_dropdown]) def get_dropdown_update_from_params(axis, params): val_key = f"{axis} Values" -- cgit v1.2.1 From 299eb543083668a945d6074753ea8e8f13e7d66d Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Sun, 13 Aug 2023 02:17:13 +0900 Subject: pass csv_mode --- scripts/xyz_grid.py | 33 ++++++++++++++++----------------- 1 file changed, 16 insertions(+), 17 deletions(-) diff --git a/scripts/xyz_grid.py b/scripts/xyz_grid.py index bfaf9661..254a75f4 100644 --- a/scripts/xyz_grid.py +++ b/scripts/xyz_grid.py @@ -440,21 +440,21 @@ class Script(scripts.Script): xz_swap_args = [x_type, x_values, x_values_dropdown, z_type, z_values, z_values_dropdown] swap_xz_axes_button.click(swap_axes, inputs=xz_swap_args, outputs=xz_swap_args) - def fill(axis_type): + def fill(axis_type, csv_mode): axis = self.current_axis_options[axis_type] if axis.choices: - if csv_mode.value: + if csv_mode: return list_to_csv_string(axis.choices()), gr.update() else: return gr.update(), axis.choices() else: return gr.update(), gr.update() - fill_x_button.click(fn=fill, inputs=[x_type], outputs=[x_values, x_values_dropdown]) - fill_y_button.click(fn=fill, inputs=[y_type], outputs=[y_values, y_values_dropdown]) - fill_z_button.click(fn=fill, inputs=[z_type], outputs=[z_values, z_values_dropdown]) + fill_x_button.click(fn=fill, inputs=[x_type, csv_mode], outputs=[x_values, x_values_dropdown]) + fill_y_button.click(fn=fill, inputs=[y_type, csv_mode], outputs=[y_values, y_values_dropdown]) + fill_z_button.click(fn=fill, inputs=[z_type, csv_mode], outputs=[z_values, z_values_dropdown]) - def select_axis(axis_type, axis_values, axis_values_dropdown): + def select_axis(axis_type, axis_values, axis_values_dropdown, csv_mode): choices = self.current_axis_options[axis_type].choices has_choices = choices is not None @@ -462,25 +462,24 @@ class Script(scripts.Script): current_dropdown_values = axis_values_dropdown if has_choices: choices = choices() - if csv_mode.value: + if csv_mode: current_dropdown_values = list(filter(lambda x: x in choices, current_dropdown_values)) current_values = list_to_csv_string(current_dropdown_values) else: current_dropdown_values = [x.strip() for x in chain.from_iterable(csv.reader(StringIO(axis_values)))] current_dropdown_values = list(filter(lambda x: x in choices, current_dropdown_values)) - return (gr.Button.update(visible=has_choices), gr.Textbox.update(visible=not has_choices or csv_mode.value, value=current_values), - gr.update(choices=choices if has_choices else None, visible=has_choices and not csv_mode.value, value=current_dropdown_values)) + return (gr.Button.update(visible=has_choices), gr.Textbox.update(visible=not has_choices or csv_mode, value=current_values), + gr.update(choices=choices if has_choices else None, visible=has_choices and not csv_mode, value=current_dropdown_values)) - x_type.change(fn=select_axis, inputs=[x_type, x_values, x_values_dropdown], outputs=[fill_x_button, x_values, x_values_dropdown]) - y_type.change(fn=select_axis, inputs=[y_type, y_values, y_values_dropdown], outputs=[fill_y_button, y_values, y_values_dropdown]) - z_type.change(fn=select_axis, inputs=[z_type, z_values, z_values_dropdown], outputs=[fill_z_button, z_values, z_values_dropdown]) + x_type.change(fn=select_axis, inputs=[x_type, x_values, x_values_dropdown, csv_mode], outputs=[fill_x_button, x_values, x_values_dropdown]) + y_type.change(fn=select_axis, inputs=[y_type, y_values, y_values_dropdown, csv_mode], outputs=[fill_y_button, y_values, y_values_dropdown]) + z_type.change(fn=select_axis, inputs=[z_type, z_values, z_values_dropdown, csv_mode], outputs=[fill_z_button, z_values, z_values_dropdown]) - def change_choice_mode(_csv_mode, x_type, x_values, x_values_dropdown, y_type, y_values, y_values_dropdown, z_type, z_values, z_values_dropdown): - csv_mode.value = _csv_mode - _fill_x_button, _x_values, _x_values_dropdown = select_axis(x_type, x_values, x_values_dropdown) - _fill_y_button, _y_values, _y_values_dropdown = select_axis(y_type, y_values, y_values_dropdown) - _fill_z_button, _z_values, _z_values_dropdown = select_axis(z_type, z_values, z_values_dropdown) + def change_choice_mode(csv_mode, x_type, x_values, x_values_dropdown, y_type, y_values, y_values_dropdown, z_type, z_values, z_values_dropdown): + _fill_x_button, _x_values, _x_values_dropdown = select_axis(x_type, x_values, x_values_dropdown, csv_mode) + _fill_y_button, _y_values, _y_values_dropdown = select_axis(y_type, y_values, y_values_dropdown, csv_mode) + _fill_z_button, _z_values, _z_values_dropdown = select_axis(z_type, z_values, z_values_dropdown, csv_mode) return _fill_x_button, _x_values, _x_values_dropdown, _fill_y_button, _y_values, _y_values_dropdown, _fill_z_button, _z_values, _z_values_dropdown csv_mode.change(fn=change_choice_mode, inputs=[csv_mode, x_type, x_values, x_values_dropdown, y_type, y_values, y_values_dropdown, z_type, z_values, z_values_dropdown], outputs=[fill_x_button, x_values, x_values_dropdown, fill_y_button, y_values, y_values_dropdown, fill_z_button, z_values, z_values_dropdown]) -- cgit v1.2.1 From dc5b5ee9c6beef9fa6ac9e8f9041063075e8d1e6 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Sun, 13 Aug 2023 02:21:04 +0900 Subject: properly convert this into CSV string --- scripts/xyz_grid.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/scripts/xyz_grid.py b/scripts/xyz_grid.py index 254a75f4..0350e52a 100644 --- a/scripts/xyz_grid.py +++ b/scripts/xyz_grid.py @@ -577,17 +577,17 @@ class Script(scripts.Script): x_opt = self.current_axis_options[x_type] if x_opt.choices is not None and not csv_mode: - x_values = ",".join(x_values_dropdown) + x_values = list_to_csv_string(x_values_dropdown) xs = process_axis(x_opt, x_values, x_values_dropdown) y_opt = self.current_axis_options[y_type] if y_opt.choices is not None and not csv_mode: - y_values = ",".join(y_values_dropdown) + y_values = list_to_csv_string(y_values_dropdown) ys = process_axis(y_opt, y_values, y_values_dropdown) z_opt = self.current_axis_options[z_type] if z_opt.choices is not None and not csv_mode: - z_values = ",".join(z_values_dropdown) + z_values = list_to_csv_string(z_values_dropdown) zs = process_axis(z_opt, z_values, z_values_dropdown) # this could be moved to common code, but unlikely to be ever triggered anywhere else -- cgit v1.2.1 From bd4da4474bef5c9c1f690c62b971704ee73d2860 Mon Sep 17 00:00:00 2001 From: Kohaku-Blueleaf <59680068+KohakuBlueleaf@users.noreply.github.com> Date: Sun, 13 Aug 2023 02:27:39 +0800 Subject: Add extra norm module into built-in lora ext refer to LyCORIS 1.9.0.dev6 add new option and module for training norm layer (Which is reported to be good for style) --- extensions-builtin/Lora/network.py | 7 ++- extensions-builtin/Lora/network_norm.py | 29 ++++++++++++ extensions-builtin/Lora/networks.py | 64 ++++++++++++++++++++++---- extensions-builtin/Lora/scripts/lora_script.py | 16 +++++++ 4 files changed, 105 insertions(+), 11 deletions(-) create mode 100644 extensions-builtin/Lora/network_norm.py diff --git a/extensions-builtin/Lora/network.py b/extensions-builtin/Lora/network.py index 0a18d69e..b7b89061 100644 --- a/extensions-builtin/Lora/network.py +++ b/extensions-builtin/Lora/network.py @@ -133,7 +133,7 @@ class NetworkModule: return 1.0 - def finalize_updown(self, updown, orig_weight, output_shape): + def finalize_updown(self, updown, orig_weight, output_shape, ex_bias=None): if self.bias is not None: updown = updown.reshape(self.bias.shape) updown += self.bias.to(orig_weight.device, dtype=orig_weight.dtype) @@ -145,7 +145,10 @@ class NetworkModule: if orig_weight.size().numel() == updown.size().numel(): updown = updown.reshape(orig_weight.shape) - return updown * self.calc_scale() * self.multiplier() + if ex_bias is None: + ex_bias = 0 + + return updown * self.calc_scale() * self.multiplier(), ex_bias * self.multiplier() def calc_updown(self, target): raise NotImplementedError() diff --git a/extensions-builtin/Lora/network_norm.py b/extensions-builtin/Lora/network_norm.py new file mode 100644 index 00000000..dab8b684 --- /dev/null +++ b/extensions-builtin/Lora/network_norm.py @@ -0,0 +1,29 @@ +import network + + +class ModuleTypeNorm(network.ModuleType): + def create_module(self, net: network.Network, weights: network.NetworkWeights): + if all(x in weights.w for x in ["w_norm", "b_norm"]): + return NetworkModuleNorm(net, weights) + + return None + + +class NetworkModuleNorm(network.NetworkModule): + def __init__(self, net: network.Network, weights: network.NetworkWeights): + super().__init__(net, weights) + print("NetworkModuleNorm") + + self.w_norm = weights.w.get("w_norm") + self.b_norm = weights.w.get("b_norm") + + def calc_updown(self, orig_weight): + output_shape = self.w_norm.shape + updown = self.w_norm.to(orig_weight.device, dtype=orig_weight.dtype) + + if self.b_norm is not None: + ex_bias = self.b_norm.to(orig_weight.device, dtype=orig_weight.dtype) + else: + ex_bias = None + + return self.finalize_updown(updown, orig_weight, output_shape, ex_bias) diff --git a/extensions-builtin/Lora/networks.py b/extensions-builtin/Lora/networks.py index 7e3415ac..74cefe43 100644 --- a/extensions-builtin/Lora/networks.py +++ b/extensions-builtin/Lora/networks.py @@ -7,6 +7,7 @@ import network_hada import network_ia3 import network_lokr import network_full +import network_norm import torch from typing import Union @@ -19,6 +20,7 @@ module_types = [ network_ia3.ModuleTypeIa3(), network_lokr.ModuleTypeLokr(), network_full.ModuleTypeFull(), + network_norm.ModuleTypeNorm(), ] @@ -31,6 +33,8 @@ suffix_conversion = { "resnets": { "conv1": "in_layers_2", "conv2": "out_layers_3", + "norm1": "in_layers_0", + "norm2": "out_layers_0", "time_emb_proj": "emb_layers_1", "conv_shortcut": "skip_connection", } @@ -258,20 +262,25 @@ def load_networks(names, te_multipliers=None, unet_multipliers=None, dyn_dims=No purge_networks_from_memory() -def network_restore_weights_from_backup(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn.MultiheadAttention]): +def network_restore_weights_from_backup(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn.GroupNorm, torch.nn.LayerNorm, torch.nn.MultiheadAttention]): weights_backup = getattr(self, "network_weights_backup", None) + bias_backup = getattr(self, "network_bias_backup", None) - if weights_backup is None: + if weights_backup is None and bias_backup is None: return - if isinstance(self, torch.nn.MultiheadAttention): - self.in_proj_weight.copy_(weights_backup[0]) - self.out_proj.weight.copy_(weights_backup[1]) - else: - self.weight.copy_(weights_backup) + if weights_backup is not None: + if isinstance(self, torch.nn.MultiheadAttention): + self.in_proj_weight.copy_(weights_backup[0]) + self.out_proj.weight.copy_(weights_backup[1]) + else: + self.weight.copy_(weights_backup) + if bias_backup is not None: + self.bias.copy_(bias_backup) -def network_apply_weights(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn.MultiheadAttention]): + +def network_apply_weights(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn.GroupNorm, torch.nn.LayerNorm, torch.nn.MultiheadAttention]): """ Applies the currently selected set of networks to the weights of torch layer self. If weights already have this particular set of networks applied, does nothing. @@ -294,6 +303,11 @@ def network_apply_weights(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn self.network_weights_backup = weights_backup + bias_backup = getattr(self, "network_bias_backup", None) + if bias_backup is None and getattr(self, 'bias', None) is not None: + bias_backup = self.bias.to(devices.cpu, copy=True) + self.network_bias_backup = bias_backup + if current_names != wanted_names: network_restore_weights_from_backup(self) @@ -301,13 +315,15 @@ def network_apply_weights(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn module = net.modules.get(network_layer_name, None) if module is not None and hasattr(self, 'weight'): with torch.no_grad(): - updown = module.calc_updown(self.weight) + updown, ex_bias = module.calc_updown(self.weight) if len(self.weight.shape) == 4 and self.weight.shape[1] == 9: # inpainting model. zero pad updown to make channel[1] 4 to 9 updown = torch.nn.functional.pad(updown, (0, 0, 0, 0, 0, 5)) self.weight += updown + if getattr(self, 'bias', None) is not None: + self.bias += ex_bias continue module_q = net.modules.get(network_layer_name + "_q_proj", None) @@ -397,6 +413,36 @@ def network_Conv2d_load_state_dict(self, *args, **kwargs): return torch.nn.Conv2d_load_state_dict_before_network(self, *args, **kwargs) +def network_GroupNorm_forward(self, input): + if shared.opts.lora_functional: + return network_forward(self, input, torch.nn.GroupNorm_forward_before_network) + + network_apply_weights(self) + + return torch.nn.GroupNorm_forward_before_network(self, input) + + +def network_GroupNorm_load_state_dict(self, *args, **kwargs): + network_reset_cached_weight(self) + + return torch.nn.GroupNorm_load_state_dict_before_network(self, *args, **kwargs) + + +def network_LayerNorm_forward(self, input): + if shared.opts.lora_functional: + return network_forward(self, input, torch.nn.LayerNorm_forward_before_network) + + network_apply_weights(self) + + return torch.nn.LayerNorm_forward_before_network(self, input) + + +def network_LayerNorm_load_state_dict(self, *args, **kwargs): + network_reset_cached_weight(self) + + return torch.nn.LayerNorm_load_state_dict_before_network(self, *args, **kwargs) + + def network_MultiheadAttention_forward(self, *args, **kwargs): network_apply_weights(self) diff --git a/extensions-builtin/Lora/scripts/lora_script.py b/extensions-builtin/Lora/scripts/lora_script.py index 6ab8b6e7..dc307f8c 100644 --- a/extensions-builtin/Lora/scripts/lora_script.py +++ b/extensions-builtin/Lora/scripts/lora_script.py @@ -40,6 +40,18 @@ if not hasattr(torch.nn, 'Conv2d_forward_before_network'): if not hasattr(torch.nn, 'Conv2d_load_state_dict_before_network'): torch.nn.Conv2d_load_state_dict_before_network = torch.nn.Conv2d._load_from_state_dict +if not hasattr(torch.nn, 'GroupNorm_forward_before_network'): + torch.nn.GroupNorm_forward_before_network = torch.nn.GroupNorm.forward + +if not hasattr(torch.nn, 'GroupNorm_load_state_dict_before_network'): + torch.nn.GroupNorm_load_state_dict_before_network = torch.nn.GroupNorm._load_from_state_dict + +if not hasattr(torch.nn, 'LayerNorm_forward_before_network'): + torch.nn.LayerNorm_forward_before_network = torch.nn.LayerNorm.forward + +if not hasattr(torch.nn, 'LayerNorm_load_state_dict_before_network'): + torch.nn.LayerNorm_load_state_dict_before_network = torch.nn.LayerNorm._load_from_state_dict + if not hasattr(torch.nn, 'MultiheadAttention_forward_before_network'): torch.nn.MultiheadAttention_forward_before_network = torch.nn.MultiheadAttention.forward @@ -50,6 +62,10 @@ torch.nn.Linear.forward = networks.network_Linear_forward torch.nn.Linear._load_from_state_dict = networks.network_Linear_load_state_dict torch.nn.Conv2d.forward = networks.network_Conv2d_forward torch.nn.Conv2d._load_from_state_dict = networks.network_Conv2d_load_state_dict +torch.nn.GroupNorm.forward = networks.network_GroupNorm_forward +torch.nn.GroupNorm._load_from_state_dict = networks.network_GroupNorm_load_state_dict +torch.nn.LayerNorm.forward = networks.network_LayerNorm_forward +torch.nn.LayerNorm._load_from_state_dict = networks.network_LayerNorm_load_state_dict torch.nn.MultiheadAttention.forward = networks.network_MultiheadAttention_forward torch.nn.MultiheadAttention._load_from_state_dict = networks.network_MultiheadAttention_load_state_dict -- cgit v1.2.1 From a2b83050965a1a117f2762d3b5fa8b4841777e8f Mon Sep 17 00:00:00 2001 From: Kohaku-Blueleaf <59680068+KohakuBlueleaf@users.noreply.github.com> Date: Sun, 13 Aug 2023 02:35:04 +0800 Subject: return None if no ex_bias --- extensions-builtin/Lora/network.py | 6 +++--- extensions-builtin/Lora/networks.py | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/extensions-builtin/Lora/network.py b/extensions-builtin/Lora/network.py index b7b89061..d8e8dfb7 100644 --- a/extensions-builtin/Lora/network.py +++ b/extensions-builtin/Lora/network.py @@ -145,10 +145,10 @@ class NetworkModule: if orig_weight.size().numel() == updown.size().numel(): updown = updown.reshape(orig_weight.shape) - if ex_bias is None: - ex_bias = 0 + if ex_bias is not None: + ex_bias = ex_bias * self.multiplier() - return updown * self.calc_scale() * self.multiplier(), ex_bias * self.multiplier() + return updown * self.calc_scale() * self.multiplier(), ex_bias def calc_updown(self, target): raise NotImplementedError() diff --git a/extensions-builtin/Lora/networks.py b/extensions-builtin/Lora/networks.py index 74cefe43..ba621139 100644 --- a/extensions-builtin/Lora/networks.py +++ b/extensions-builtin/Lora/networks.py @@ -322,7 +322,7 @@ def network_apply_weights(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn updown = torch.nn.functional.pad(updown, (0, 0, 0, 0, 0, 5)) self.weight += updown - if getattr(self, 'bias', None) is not None: + if ex_bias is not None and getattr(self, 'bias', None) is not None: self.bias += ex_bias continue -- cgit v1.2.1 From 5881dcb8873b3f87b9c6545e9cb8d1d77023f4fe Mon Sep 17 00:00:00 2001 From: Kohaku-Blueleaf <59680068+KohakuBlueleaf@users.noreply.github.com> Date: Sun, 13 Aug 2023 02:36:02 +0800 Subject: remove debug print --- extensions-builtin/Lora/network_norm.py | 1 - 1 file changed, 1 deletion(-) diff --git a/extensions-builtin/Lora/network_norm.py b/extensions-builtin/Lora/network_norm.py index dab8b684..ce450158 100644 --- a/extensions-builtin/Lora/network_norm.py +++ b/extensions-builtin/Lora/network_norm.py @@ -12,7 +12,6 @@ class ModuleTypeNorm(network.ModuleType): class NetworkModuleNorm(network.NetworkModule): def __init__(self, net: network.Network, weights: network.NetworkWeights): super().__init__(net, weights) - print("NetworkModuleNorm") self.w_norm = weights.w.get("w_norm") self.b_norm = weights.w.get("b_norm") -- cgit v1.2.1 From fa9370b7411166c19e8e386400dc4e6082f47b2d Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sun, 13 Aug 2023 06:07:30 +0300 Subject: add refiner to StableDiffusionProcessing class write out correct model name in infotext, rather than the refiner model --- modules/processing.py | 38 +++++++++++++++++++++++++++-------- modules/processing_scripts/refiner.py | 16 +++++---------- modules/sd_samplers_common.py | 2 +- 3 files changed, 36 insertions(+), 20 deletions(-) diff --git a/modules/processing.py b/modules/processing.py index 6ad105d7..b47ddaa8 100755 --- a/modules/processing.py +++ b/modules/processing.py @@ -111,7 +111,7 @@ class StableDiffusionProcessing: cached_uc = [None, None] cached_c = [None, None] - def __init__(self, sd_model=None, outpath_samples=None, outpath_grids=None, prompt: str = "", styles: List[str] = None, seed: int = -1, subseed: int = -1, subseed_strength: float = 0, seed_resize_from_h: int = -1, seed_resize_from_w: int = -1, seed_enable_extras: bool = True, sampler_name: str = None, batch_size: int = 1, n_iter: int = 1, steps: int = 50, cfg_scale: float = 7.0, width: int = 512, height: int = 512, restore_faces: bool = None, tiling: bool = None, do_not_save_samples: bool = False, do_not_save_grid: bool = False, extra_generation_params: Dict[Any, Any] = None, overlay_images: Any = None, negative_prompt: str = None, eta: float = None, do_not_reload_embeddings: bool = False, denoising_strength: float = 0, ddim_discretize: str = None, s_min_uncond: float = 0.0, s_churn: float = 0.0, s_tmax: float = None, s_tmin: float = 0.0, s_noise: float = None, override_settings: Dict[str, Any] = None, override_settings_restore_afterwards: bool = True, sampler_index: int = None, script_args: list = None): + def __init__(self, sd_model=None, outpath_samples=None, outpath_grids=None, prompt: str = "", styles: List[str] = None, seed: int = -1, subseed: int = -1, subseed_strength: float = 0, seed_resize_from_h: int = -1, seed_resize_from_w: int = -1, seed_enable_extras: bool = True, sampler_name: str = None, batch_size: int = 1, n_iter: int = 1, steps: int = 50, cfg_scale: float = 7.0, width: int = 512, height: int = 512, restore_faces: bool = None, tiling: bool = None, do_not_save_samples: bool = False, do_not_save_grid: bool = False, extra_generation_params: Dict[Any, Any] = None, overlay_images: Any = None, negative_prompt: str = None, eta: float = None, do_not_reload_embeddings: bool = False, denoising_strength: float = 0, ddim_discretize: str = None, s_min_uncond: float = 0.0, s_churn: float = 0.0, s_tmax: float = None, s_tmin: float = 0.0, s_noise: float = None, override_settings: Dict[str, Any] = None, override_settings_restore_afterwards: bool = True, sampler_index: int = None, refiner_checkpoint: str = None, refiner_switch_at: float = None, script_args: list = None): if sampler_index is not None: print("sampler_index argument for StableDiffusionProcessing does not do anything; use sampler_name", file=sys.stderr) @@ -153,10 +153,14 @@ class StableDiffusionProcessing: self.s_noise = s_noise if s_noise is not None else opts.s_noise self.override_settings = {k: v for k, v in (override_settings or {}).items() if k not in shared.restricted_opts} self.override_settings_restore_afterwards = override_settings_restore_afterwards + self.refiner_checkpoint = refiner_checkpoint + self.refiner_switch_at = refiner_switch_at + self.is_using_inpainting_conditioning = False self.disable_extra_networks = False self.token_merging_ratio = 0 self.token_merging_ratio_hr = 0 + self.refiner_checkpoint_info = None if not seed_enable_extras: self.subseed = -1 @@ -191,6 +195,11 @@ class StableDiffusionProcessing: self.user = None + self.sd_model_name = None + self.sd_model_hash = None + self.sd_vae_name = None + self.sd_vae_hash = None + @property def sd_model(self): return shared.sd_model @@ -408,7 +417,10 @@ class Processed: self.batch_size = p.batch_size self.restore_faces = p.restore_faces self.face_restoration_model = opts.face_restoration_model if p.restore_faces else None - self.sd_model_hash = shared.sd_model.sd_model_hash + self.sd_model_name = p.sd_model_name + self.sd_model_hash = p.sd_model_hash + self.sd_vae_name = p.sd_vae_name + self.sd_vae_hash = p.sd_vae_hash self.seed_resize_from_w = p.seed_resize_from_w self.seed_resize_from_h = p.seed_resize_from_h self.denoising_strength = getattr(p, 'denoising_strength', None) @@ -459,7 +471,10 @@ class Processed: "batch_size": self.batch_size, "restore_faces": self.restore_faces, "face_restoration_model": self.face_restoration_model, + "sd_model_name": self.sd_model_name, "sd_model_hash": self.sd_model_hash, + "sd_vae_name": self.sd_vae_name, + "sd_vae_hash": self.sd_vae_hash, "seed_resize_from_w": self.seed_resize_from_w, "seed_resize_from_h": self.seed_resize_from_h, "denoising_strength": self.denoising_strength, @@ -578,10 +593,10 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter "Seed": p.all_seeds[0] if use_main_prompt else all_seeds[index], "Face restoration": opts.face_restoration_model if p.restore_faces else None, "Size": f"{p.width}x{p.height}", - "Model hash": getattr(p, 'sd_model_hash', None if not opts.add_model_hash_to_info or not shared.sd_model.sd_model_hash else shared.sd_model.sd_model_hash), - "Model": (None if not opts.add_model_name_to_info else shared.sd_model.sd_checkpoint_info.name_for_extra), - "VAE hash": p.loaded_vae_hash if opts.add_model_hash_to_info else None, - "VAE": p.loaded_vae_name if opts.add_model_name_to_info else None, + "Model hash": p.sd_model_hash if opts.add_model_hash_to_info else None, + "Model": p.sd_model_name if opts.add_model_name_to_info else None, + "VAE hash": p.sd_vae_hash if opts.add_model_hash_to_info else None, + "VAE": p.sd_vae_name if opts.add_model_name_to_info else None, "Variation seed": (None if p.subseed_strength == 0 else (p.all_subseeds[0] if use_main_prompt else all_subseeds[index])), "Variation seed strength": (None if p.subseed_strength == 0 else p.subseed_strength), "Seed resize from": (None if p.seed_resize_from_w <= 0 or p.seed_resize_from_h <= 0 else f"{p.seed_resize_from_w}x{p.seed_resize_from_h}"), @@ -670,8 +685,15 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: if p.tiling is None: p.tiling = opts.tiling - p.loaded_vae_name = sd_vae.get_loaded_vae_name() - p.loaded_vae_hash = sd_vae.get_loaded_vae_hash() + if p.refiner_checkpoint not in (None, "", "None"): + p.refiner_checkpoint_info = sd_models.get_closet_checkpoint_match(p.refiner_checkpoint) + if p.refiner_checkpoint_info is None: + raise Exception(f'Could not find checkpoint with name {p.refiner_checkpoint}') + + p.sd_model_name = shared.sd_model.sd_checkpoint_info.name_for_extra + p.sd_model_hash = shared.sd_model.sd_model_hash + p.sd_vae_name = sd_vae.get_loaded_vae_name() + p.sd_vae_hash = sd_vae.get_loaded_vae_hash() modules.sd_hijack.model_hijack.apply_circular(p.tiling) modules.sd_hijack.model_hijack.clear_comments() diff --git a/modules/processing_scripts/refiner.py b/modules/processing_scripts/refiner.py index 773ec5d0..7b946d05 100644 --- a/modules/processing_scripts/refiner.py +++ b/modules/processing_scripts/refiner.py @@ -41,15 +41,9 @@ class ScriptRefiner(scripts.Script): def before_process(self, p, enable_refiner, refiner_checkpoint, refiner_switch_at): # the actual implementation is in sd_samplers_common.py, apply_refiner - p.refiner_checkpoint_info = None - p.refiner_switch_at = None - if not enable_refiner or refiner_checkpoint in (None, "", "None"): - return - - refiner_checkpoint_info = sd_models.get_closet_checkpoint_match(refiner_checkpoint) - if refiner_checkpoint_info is None: - raise Exception(f'Could not find checkpoint with name {refiner_checkpoint}') - - p.refiner_checkpoint_info = refiner_checkpoint_info - p.refiner_switch_at = refiner_switch_at + p.refiner_checkpoint_info = None + p.refiner_switch_at = None + else: + p.refiner_checkpoint = refiner_checkpoint + p.refiner_switch_at = refiner_switch_at diff --git a/modules/sd_samplers_common.py b/modules/sd_samplers_common.py index 40c7aae0..380cdd5f 100644 --- a/modules/sd_samplers_common.py +++ b/modules/sd_samplers_common.py @@ -145,7 +145,7 @@ def apply_refiner(cfg_denoiser): refiner_switch_at = cfg_denoiser.p.refiner_switch_at refiner_checkpoint_info = cfg_denoiser.p.refiner_checkpoint_info - if refiner_switch_at is not None and completed_ratio <= refiner_switch_at: + if refiner_switch_at is not None and completed_ratio < refiner_switch_at: return False if refiner_checkpoint_info is None or shared.sd_model.sd_checkpoint_info == refiner_checkpoint_info: -- cgit v1.2.1 From 0e3bac8132e63a9adfca0223b4791f353ecb1057 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Sun, 13 Aug 2023 04:12:37 +0900 Subject: rephrase and move --- scripts/xyz_grid.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/scripts/xyz_grid.py b/scripts/xyz_grid.py index 0350e52a..da0e48aa 100644 --- a/scripts/xyz_grid.py +++ b/scripts/xyz_grid.py @@ -420,10 +420,11 @@ class Script(scripts.Script): with gr.Column(): include_lone_images = gr.Checkbox(label='Include Sub Images', value=False, elem_id=self.elem_id("include_lone_images")) include_sub_grids = gr.Checkbox(label='Include Sub Grids', value=False, elem_id=self.elem_id("include_sub_grids")) - with gr.Column(): - csv_mode = gr.Checkbox(label='CSV mode', value=False, elem_id=self.elem_id("CSV mode")) with gr.Column(): margin_size = gr.Slider(label="Grid margins (px)", minimum=0, maximum=500, value=0, step=2, elem_id=self.elem_id("margin_size")) + with gr.Column(): + csv_mode = gr.Checkbox(label='Use text inputs instead of dropdowns', value=False, elem_id=self.elem_id("csv_mode")) + with gr.Row(variant="compact", elem_id="swap_axes"): swap_xy_axes_button = gr.Button(value="Swap X/Y axes", elem_id="xy_grid_swap_axes_button") -- cgit v1.2.1 From 599f61a1e0bddf463dd3c6adb84509b3d9db1941 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sun, 13 Aug 2023 08:24:16 +0300 Subject: use dataclass for StableDiffusionProcessing --- modules/processing.py | 318 +++++++++++++++++++++++------------------- modules/sd_samplers_common.py | 5 +- 2 files changed, 176 insertions(+), 147 deletions(-) diff --git a/modules/processing.py b/modules/processing.py index b47ddaa8..007a4e05 100755 --- a/modules/processing.py +++ b/modules/processing.py @@ -1,9 +1,11 @@ +from __future__ import annotations import json import logging import math import os import sys import hashlib +from dataclasses import dataclass, field import torch import numpy as np @@ -11,7 +13,7 @@ from PIL import Image, ImageOps import random import cv2 from skimage import exposure -from typing import Any, Dict, List +from typing import Any import modules.sd_hijack from modules import devices, prompt_parser, masking, sd_samplers, lowvram, generation_parameters_copypaste, extra_networks, sd_vae_approx, scripts, sd_samplers_common, sd_unet, errors, rng @@ -104,106 +106,126 @@ def txt2img_image_conditioning(sd_model, x, width, height): return x.new_zeros(x.shape[0], 5, 1, 1, dtype=x.dtype, device=x.device) +@dataclass(repr=False) class StableDiffusionProcessing: - """ - The first set of paramaters: sd_models -> do_not_reload_embeddings represent the minimum required to create a StableDiffusionProcessing - """ + sd_model: object = None + outpath_samples: str = None + outpath_grids: str = None + prompt: str = "" + prompt_for_display: str = None + negative_prompt: str = "" + styles: list[str] = field(default_factory=list) + seed: int = -1 + subseed: int = -1 + subseed_strength: float = 0 + seed_resize_from_h: int = -1 + seed_resize_from_w: int = -1 + seed_enable_extras: bool = True + sampler_name: str = None + batch_size: int = 1 + n_iter: int = 1 + steps: int = 50 + cfg_scale: float = 7.0 + width: int = 512 + height: int = 512 + restore_faces: bool = None + tiling: bool = None + do_not_save_samples: bool = False + do_not_save_grid: bool = False + extra_generation_params: dict[str, Any] = None + overlay_images: list = None + eta: float = None + do_not_reload_embeddings: bool = False + denoising_strength: float = 0 + ddim_discretize: str = None + s_min_uncond: float = None + s_churn: float = None + s_tmax: float = None + s_tmin: float = None + s_noise: float = None + override_settings: dict[str, Any] = None + override_settings_restore_afterwards: bool = True + sampler_index: int = None + refiner_checkpoint: str = None + refiner_switch_at: float = None + token_merging_ratio = 0 + token_merging_ratio_hr = 0 + disable_extra_networks: bool = False + + script_args: list = None + cached_uc = [None, None] cached_c = [None, None] - def __init__(self, sd_model=None, outpath_samples=None, outpath_grids=None, prompt: str = "", styles: List[str] = None, seed: int = -1, subseed: int = -1, subseed_strength: float = 0, seed_resize_from_h: int = -1, seed_resize_from_w: int = -1, seed_enable_extras: bool = True, sampler_name: str = None, batch_size: int = 1, n_iter: int = 1, steps: int = 50, cfg_scale: float = 7.0, width: int = 512, height: int = 512, restore_faces: bool = None, tiling: bool = None, do_not_save_samples: bool = False, do_not_save_grid: bool = False, extra_generation_params: Dict[Any, Any] = None, overlay_images: Any = None, negative_prompt: str = None, eta: float = None, do_not_reload_embeddings: bool = False, denoising_strength: float = 0, ddim_discretize: str = None, s_min_uncond: float = 0.0, s_churn: float = 0.0, s_tmax: float = None, s_tmin: float = 0.0, s_noise: float = None, override_settings: Dict[str, Any] = None, override_settings_restore_afterwards: bool = True, sampler_index: int = None, refiner_checkpoint: str = None, refiner_switch_at: float = None, script_args: list = None): - if sampler_index is not None: + sampler: sd_samplers_common.Sampler | None = field(default=None, init=False) + is_using_inpainting_conditioning: bool = field(default=False, init=False) + paste_to: tuple | None = field(default=None, init=False) + + is_hr_pass: bool = field(default=False, init=False) + + c: tuple = field(default=None, init=False) + uc: tuple = field(default=None, init=False) + + rng: rng.ImageRNG | None = field(default=None, init=False) + step_multiplier: int = field(default=1, init=False) + color_corrections: list = field(default=None, init=False) + + scripts: list = field(default=None, init=False) + all_prompts: list = field(default=None, init=False) + all_negative_prompts: list = field(default=None, init=False) + all_seeds: list = field(default=None, init=False) + all_subseeds: list = field(default=None, init=False) + iteration: int = field(default=0, init=False) + main_prompt: str = field(default=None, init=False) + main_negative_prompt: str = field(default=None, init=False) + + prompts: list = field(default=None, init=False) + negative_prompts: list = field(default=None, init=False) + seeds: list = field(default=None, init=False) + subseeds: list = field(default=None, init=False) + extra_network_data: dict = field(default=None, init=False) + + user: str = field(default=None, init=False) + + sd_model_name: str = field(default=None, init=False) + sd_model_hash: str = field(default=None, init=False) + sd_vae_name: str = field(default=None, init=False) + sd_vae_hash: str = field(default=None, init=False) + + def __post_init__(self): + if self.sampler_index is not None: print("sampler_index argument for StableDiffusionProcessing does not do anything; use sampler_name", file=sys.stderr) - self.outpath_samples: str = outpath_samples - self.outpath_grids: str = outpath_grids - self.prompt: str = prompt - self.prompt_for_display: str = None - self.negative_prompt: str = (negative_prompt or "") - self.styles: list = styles or [] - self.seed: int = seed - self.subseed: int = subseed - self.subseed_strength: float = subseed_strength - self.seed_resize_from_h: int = seed_resize_from_h - self.seed_resize_from_w: int = seed_resize_from_w - self.sampler_name: str = sampler_name - self.batch_size: int = batch_size - self.n_iter: int = n_iter - self.steps: int = steps - self.cfg_scale: float = cfg_scale - self.width: int = width - self.height: int = height - self.restore_faces: bool = restore_faces - self.tiling: bool = tiling - self.do_not_save_samples: bool = do_not_save_samples - self.do_not_save_grid: bool = do_not_save_grid - self.extra_generation_params: dict = extra_generation_params or {} - self.overlay_images = overlay_images - self.eta = eta - self.do_not_reload_embeddings = do_not_reload_embeddings - self.paste_to = None - self.color_corrections = None - self.denoising_strength: float = denoising_strength self.sampler_noise_scheduler_override = None - self.ddim_discretize = ddim_discretize or opts.ddim_discretize - self.s_min_uncond = s_min_uncond or opts.s_min_uncond - self.s_churn = s_churn or opts.s_churn - self.s_tmin = s_tmin or opts.s_tmin - self.s_tmax = (s_tmax if s_tmax is not None else opts.s_tmax) or float('inf') - self.s_noise = s_noise if s_noise is not None else opts.s_noise - self.override_settings = {k: v for k, v in (override_settings or {}).items() if k not in shared.restricted_opts} - self.override_settings_restore_afterwards = override_settings_restore_afterwards - self.refiner_checkpoint = refiner_checkpoint - self.refiner_switch_at = refiner_switch_at - - self.is_using_inpainting_conditioning = False - self.disable_extra_networks = False - self.token_merging_ratio = 0 - self.token_merging_ratio_hr = 0 + self.s_min_uncond = self.s_min_uncond if self.s_min_uncond is not None else opts.s_min_uncond + self.s_churn = self.s_churn if self.s_churn is not None else opts.s_churn + self.s_tmin = self.s_tmin if self.s_tmin is not None else opts.s_tmin + self.s_tmax = (self.s_tmax if self.s_tmax is not None else opts.s_tmax) or float('inf') + self.s_noise = self.s_noise if self.s_noise is not None else opts.s_noise + + self.extra_generation_params = self.extra_generation_params or {} + self.override_settings = self.override_settings or {} + self.script_args = self.script_args or {} + self.refiner_checkpoint_info = None - if not seed_enable_extras: + if not self.seed_enable_extras: self.subseed = -1 self.subseed_strength = 0 self.seed_resize_from_h = 0 self.seed_resize_from_w = 0 - self.scripts = None - self.script_args = script_args - self.all_prompts = None - self.all_negative_prompts = None - self.all_seeds = None - self.all_subseeds = None - self.iteration = 0 - self.is_hr_pass = False - self.sampler = None - self.main_prompt = None - self.main_negative_prompt = None - - self.prompts = None - self.negative_prompts = None - self.extra_network_data = None - self.seeds = None - self.subseeds = None - - self.step_multiplier = 1 self.cached_uc = StableDiffusionProcessing.cached_uc self.cached_c = StableDiffusionProcessing.cached_c - self.uc = None - self.c = None - self.rng: rng.ImageRNG = None - - self.user = None - - self.sd_model_name = None - self.sd_model_hash = None - self.sd_vae_name = None - self.sd_vae_hash = None @property def sd_model(self): return shared.sd_model + @sd_model.setter + def sd_model(self, value): + pass + def txt2img_image_conditioning(self, x, width=None, height=None): self.is_using_inpainting_conditioning = self.sd_model.model.conditioning_key in {'hybrid', 'concat'} @@ -932,49 +954,51 @@ def old_hires_fix_first_pass_dimensions(width, height): return width, height +@dataclass(repr=False) class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): - sampler = None + enable_hr: bool = False + denoising_strength: float = 0.75 + firstphase_width: int = 0 + firstphase_height: int = 0 + hr_scale: float = 2.0 + hr_upscaler: str = None + hr_second_pass_steps: int = 0 + hr_resize_x: int = 0 + hr_resize_y: int = 0 + hr_checkpoint_name: str = None + hr_sampler_name: str = None + hr_prompt: str = '' + hr_negative_prompt: str = '' + cached_hr_uc = [None, None] cached_hr_c = [None, None] - def __init__(self, enable_hr: bool = False, denoising_strength: float = 0.75, firstphase_width: int = 0, firstphase_height: int = 0, hr_scale: float = 2.0, hr_upscaler: str = None, hr_second_pass_steps: int = 0, hr_resize_x: int = 0, hr_resize_y: int = 0, hr_checkpoint_name: str = None, hr_sampler_name: str = None, hr_prompt: str = '', hr_negative_prompt: str = '', **kwargs): - super().__init__(**kwargs) - self.enable_hr = enable_hr - self.denoising_strength = denoising_strength - self.hr_scale = hr_scale - self.hr_upscaler = hr_upscaler - self.hr_second_pass_steps = hr_second_pass_steps - self.hr_resize_x = hr_resize_x - self.hr_resize_y = hr_resize_y - self.hr_upscale_to_x = hr_resize_x - self.hr_upscale_to_y = hr_resize_y - self.hr_checkpoint_name = hr_checkpoint_name - self.hr_checkpoint_info = None - self.hr_sampler_name = hr_sampler_name - self.hr_prompt = hr_prompt - self.hr_negative_prompt = hr_negative_prompt - self.all_hr_prompts = None - self.all_hr_negative_prompts = None - self.latent_scale_mode = None - - if firstphase_width != 0 or firstphase_height != 0: + hr_checkpoint_info: dict = field(default=None, init=False) + hr_upscale_to_x: int = field(default=0, init=False) + hr_upscale_to_y: int = field(default=0, init=False) + truncate_x: int = field(default=0, init=False) + truncate_y: int = field(default=0, init=False) + applied_old_hires_behavior_to: tuple = field(default=None, init=False) + latent_scale_mode: dict = field(default=None, init=False) + hr_c: tuple | None = field(default=None, init=False) + hr_uc: tuple | None = field(default=None, init=False) + all_hr_prompts: list = field(default=None, init=False) + all_hr_negative_prompts: list = field(default=None, init=False) + hr_prompts: list = field(default=None, init=False) + hr_negative_prompts: list = field(default=None, init=False) + hr_extra_network_data: list = field(default=None, init=False) + + def __post_init__(self): + super().__post_init__() + + if self.firstphase_width != 0 or self.firstphase_height != 0: self.hr_upscale_to_x = self.width self.hr_upscale_to_y = self.height - self.width = firstphase_width - self.height = firstphase_height - - self.truncate_x = 0 - self.truncate_y = 0 - self.applied_old_hires_behavior_to = None - - self.hr_prompts = None - self.hr_negative_prompts = None - self.hr_extra_network_data = None + self.width = self.firstphase_width + self.height = self.firstphase_height self.cached_hr_uc = StableDiffusionProcessingTxt2Img.cached_hr_uc self.cached_hr_c = StableDiffusionProcessingTxt2Img.cached_hr_c - self.hr_c = None - self.hr_uc = None def calculate_target_resolution(self): if opts.use_old_hires_fix_width_height and self.applied_old_hires_behavior_to != (self.width, self.height): @@ -1252,7 +1276,6 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): return super().get_conds() - def parse_extra_network_prompts(self): res = super().parse_extra_network_prompts() @@ -1265,32 +1288,37 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): return res +@dataclass(repr=False) class StableDiffusionProcessingImg2Img(StableDiffusionProcessing): - sampler = None - - def __init__(self, init_images: list = None, resize_mode: int = 0, denoising_strength: float = 0.75, image_cfg_scale: float = None, mask: Any = None, mask_blur: int = None, mask_blur_x: int = 4, mask_blur_y: int = 4, inpainting_fill: int = 0, inpaint_full_res: bool = True, inpaint_full_res_padding: int = 0, inpainting_mask_invert: int = 0, initial_noise_multiplier: float = None, **kwargs): - super().__init__(**kwargs) - - self.init_images = init_images - self.resize_mode: int = resize_mode - self.denoising_strength: float = denoising_strength - self.image_cfg_scale: float = image_cfg_scale if shared.sd_model.cond_stage_key == "edit" else None - self.init_latent = None - self.image_mask = mask - self.latent_mask = None - self.mask_for_overlay = None - self.mask_blur_x = mask_blur_x - self.mask_blur_y = mask_blur_y - if mask_blur is not None: - self.mask_blur = mask_blur - self.inpainting_fill = inpainting_fill - self.inpaint_full_res = inpaint_full_res - self.inpaint_full_res_padding = inpaint_full_res_padding - self.inpainting_mask_invert = inpainting_mask_invert - self.initial_noise_multiplier = opts.initial_noise_multiplier if initial_noise_multiplier is None else initial_noise_multiplier + init_images: list = None + resize_mode: int = 0 + denoising_strength: float = 0.75 + image_cfg_scale: float = None + mask: Any = None + mask_blur_x: int = 4 + mask_blur_y: int = 4 + mask_blur: int = None + inpainting_fill: int = 0 + inpaint_full_res: bool = True + inpaint_full_res_padding: int = 0 + inpainting_mask_invert: int = 0 + initial_noise_multiplier: float = None + latent_mask: Image = None + + image_mask: Any = field(default=None, init=False) + + nmask: torch.Tensor = field(default=None, init=False) + image_conditioning: torch.Tensor = field(default=None, init=False) + init_img_hash: str = field(default=None, init=False) + mask_for_overlay: Image = field(default=None, init=False) + init_latent: torch.Tensor = field(default=None, init=False) + + def __post_init__(self): + super().__post_init__() + + self.image_mask = self.mask self.mask = None - self.nmask = None - self.image_conditioning = None + self.initial_noise_multiplier = opts.initial_noise_multiplier if self.initial_noise_multiplier is None else self.initial_noise_multiplier @property def mask_blur(self): @@ -1300,15 +1328,13 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing): @mask_blur.setter def mask_blur(self, value): - self.mask_blur_x = value - self.mask_blur_y = value - - @mask_blur.deleter - def mask_blur(self): - del self.mask_blur_x - del self.mask_blur_y + if isinstance(value, int): + self.mask_blur_x = value + self.mask_blur_y = value def init(self, all_prompts, all_seeds, all_subseeds): + self.image_cfg_scale: float = self.image_cfg_scale if shared.sd_model.cond_stage_key == "edit" else None + self.sampler = sd_samplers.create_sampler(self.sampler_name, self.sd_model) crop_region = None diff --git a/modules/sd_samplers_common.py b/modules/sd_samplers_common.py index 380cdd5f..09d1e11e 100644 --- a/modules/sd_samplers_common.py +++ b/modules/sd_samplers_common.py @@ -305,5 +305,8 @@ class Sampler: current_iter_seeds = p.all_seeds[p.iteration * p.batch_size:(p.iteration + 1) * p.batch_size] return BrownianTreeNoiseSampler(x, sigma_min, sigma_max, seed=current_iter_seeds) + def sample(self, p, x, conditioning, unconditional_conditioning, steps=None, image_conditioning=None): + raise NotImplementedError() - + def sample_img2img(self, p, x, noise, conditioning, unconditional_conditioning, steps=None, image_conditioning=None): + raise NotImplementedError() -- cgit v1.2.1 From 7fa5ee54b15904bef6598800df76ba1291d44ec6 Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Sun, 13 Aug 2023 02:32:54 -0400 Subject: Support search and display of hashes for all extra network items --- extensions-builtin/Lora/ui_extra_networks_lora.py | 3 ++- modules/ui_extra_networks_checkpoints.py | 1 + modules/ui_extra_networks_hypernets.py | 6 +++++- modules/ui_extra_networks_textual_inversion.py | 3 ++- modules/ui_extra_networks_user_metadata.py | 4 +++- 5 files changed, 13 insertions(+), 4 deletions(-) diff --git a/extensions-builtin/Lora/ui_extra_networks_lora.py b/extensions-builtin/Lora/ui_extra_networks_lora.py index 3629e5c0..55409a78 100644 --- a/extensions-builtin/Lora/ui_extra_networks_lora.py +++ b/extensions-builtin/Lora/ui_extra_networks_lora.py @@ -25,9 +25,10 @@ class ExtraNetworksPageLora(ui_extra_networks.ExtraNetworksPage): item = { "name": name, "filename": lora_on_disk.filename, + "shorthash": lora_on_disk.shorthash, "preview": self.find_preview(path), "description": self.find_description(path), - "search_term": self.search_terms_from_path(lora_on_disk.filename), + "search_term": self.search_terms_from_path(lora_on_disk.filename) + " " + (lora_on_disk.hash or ""), "local_preview": f"{path}.{shared.opts.samples_format}", "metadata": lora_on_disk.metadata, "sort_keys": {'default': index, **self.get_sort_keys(lora_on_disk.filename)}, diff --git a/modules/ui_extra_networks_checkpoints.py b/modules/ui_extra_networks_checkpoints.py index 77885022..ebb5249f 100644 --- a/modules/ui_extra_networks_checkpoints.py +++ b/modules/ui_extra_networks_checkpoints.py @@ -19,6 +19,7 @@ class ExtraNetworksPageCheckpoints(ui_extra_networks.ExtraNetworksPage): return { "name": checkpoint.name_for_extra, "filename": checkpoint.filename, + "shorthash": checkpoint.shorthash, "preview": self.find_preview(path), "description": self.find_description(path), "search_term": self.search_terms_from_path(checkpoint.filename) + " " + (checkpoint.sha256 or ""), diff --git a/modules/ui_extra_networks_hypernets.py b/modules/ui_extra_networks_hypernets.py index 514a4562..4cedf085 100644 --- a/modules/ui_extra_networks_hypernets.py +++ b/modules/ui_extra_networks_hypernets.py @@ -2,6 +2,7 @@ import os from modules import shared, ui_extra_networks from modules.ui_extra_networks import quote_js +from modules.hashes import sha256_from_cache class ExtraNetworksPageHypernetworks(ui_extra_networks.ExtraNetworksPage): @@ -14,13 +15,16 @@ class ExtraNetworksPageHypernetworks(ui_extra_networks.ExtraNetworksPage): def create_item(self, name, index=None, enable_filter=True): full_path = shared.hypernetworks[name] path, ext = os.path.splitext(full_path) + sha256 = sha256_from_cache(full_path, f'hypernet/{name}') + shorthash = sha256[0:10] if sha256 else None return { "name": name, "filename": full_path, + "shorthash": shorthash, "preview": self.find_preview(path), "description": self.find_description(path), - "search_term": self.search_terms_from_path(path), + "search_term": self.search_terms_from_path(path) + " " + (sha256 or ""), "prompt": quote_js(f""), "local_preview": f"{path}.preview.{shared.opts.samples_format}", "sort_keys": {'default': index, **self.get_sort_keys(path + ext)}, diff --git a/modules/ui_extra_networks_textual_inversion.py b/modules/ui_extra_networks_textual_inversion.py index 73134698..55ef0ea7 100644 --- a/modules/ui_extra_networks_textual_inversion.py +++ b/modules/ui_extra_networks_textual_inversion.py @@ -19,9 +19,10 @@ class ExtraNetworksPageTextualInversion(ui_extra_networks.ExtraNetworksPage): return { "name": name, "filename": embedding.filename, + "shorthash": embedding.shorthash, "preview": self.find_preview(path), "description": self.find_description(path), - "search_term": self.search_terms_from_path(embedding.filename), + "search_term": self.search_terms_from_path(embedding.filename) + " " + (embedding.hash or ""), "prompt": quote_js(embedding.name), "local_preview": f"{path}.preview.{shared.opts.samples_format}", "sort_keys": {'default': index, **self.get_sort_keys(embedding.filename)}, diff --git a/modules/ui_extra_networks_user_metadata.py b/modules/ui_extra_networks_user_metadata.py index cda471e4..b11622a1 100644 --- a/modules/ui_extra_networks_user_metadata.py +++ b/modules/ui_extra_networks_user_metadata.py @@ -93,11 +93,13 @@ class UserMetadataEditor: item = self.page.items.get(name, {}) try: filename = item["filename"] + shorthash = item.get("shorthash", None) stats = os.stat(filename) params = [ ('Filename: ', os.path.basename(filename)), ('File size: ', sysinfo.pretty_bytes(stats.st_size)), + ('Hash: ', shorthash), ('Modified: ', datetime.datetime.fromtimestamp(stats.st_mtime).strftime('%Y-%m-%d %H:%M')), ] @@ -115,7 +117,7 @@ class UserMetadataEditor: errors.display(e, f"reading metadata info for {name}") params = [] - table = '' + "".join(f"" for name, value in params) + '' + table = '' + "".join(f"" for name, value in params if value is not None) + '' return html.escape(name), user_metadata.get('description', ''), table, self.get_card_html(name), user_metadata.get('notes', '') -- cgit v1.2.1 From 822597db49218de17e105e62075096284dfcfd41 Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Sun, 13 Aug 2023 04:16:48 -0400 Subject: Encode batches separately Significantly reduces VRAM. This makes encoding more inline with how decoding currently functions. --- modules/sd_samplers_common.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/modules/sd_samplers_common.py b/modules/sd_samplers_common.py index 09d1e11e..f9d034ca 100644 --- a/modules/sd_samplers_common.py +++ b/modules/sd_samplers_common.py @@ -92,7 +92,15 @@ def images_tensor_to_samples(image, approximation=None, model=None): model = shared.sd_model image = image.to(shared.device, dtype=devices.dtype_vae) image = image * 2 - 1 - x_latent = model.get_first_stage_encoding(model.encode_first_stage(image)) + if len(image) > 1: + x_latent = torch.stack([ + model.get_first_stage_encoding( + model.encode_first_stage(torch.unsqueeze(img, 0)) + )[0] + for img in image + ]) + else: + x_latent = model.get_first_stage_encoding(model.encode_first_stage(image)) return x_latent -- cgit v1.2.1 From 69f49c8d394220331eaa6609825b477eed60e0f4 Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Sun, 13 Aug 2023 04:40:34 -0400 Subject: Clear sampler before decoding images More significant VRAM reduction. --- modules/processing.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/modules/processing.py b/modules/processing.py index 007a4e05..16104f92 100755 --- a/modules/processing.py +++ b/modules/processing.py @@ -1192,6 +1192,9 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): sd_models.apply_token_merging(self.sd_model, self.get_token_merging_ratio()) + self.sampler = None + devices.torch_gc() + decoded_samples = decode_latent_batch(self.sd_model, samples, target_device=devices.cpu, check_for_nans=True) self.is_hr_pass = False -- cgit v1.2.1 From 1ae9dacb4b036a6cb4b5fb9b9ff030962f43908e Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Sun, 13 Aug 2023 07:57:29 -0400 Subject: Add DPM-Solver++(3M) SDE --- modules/launch_utils.py | 2 +- modules/sd_samplers_kdiffusion.py | 3 +++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/modules/launch_utils.py b/modules/launch_utils.py index 65eb684f..e30fbac8 100644 --- a/modules/launch_utils.py +++ b/modules/launch_utils.py @@ -319,7 +319,7 @@ def prepare_environment(): stable_diffusion_commit_hash = os.environ.get('STABLE_DIFFUSION_COMMIT_HASH', "cf1d67a6fd5ea1aa600c4df58e5b47da45f6bdbf") stable_diffusion_xl_commit_hash = os.environ.get('STABLE_DIFFUSION_XL_COMMIT_HASH', "5c10deee76adad0032b412294130090932317a87") - k_diffusion_commit_hash = os.environ.get('K_DIFFUSION_COMMIT_HASH', "c9fe758757e022f05ca5a53fa8fac28889e4f1cf") + k_diffusion_commit_hash = os.environ.get('K_DIFFUSION_COMMIT_HASH', "ab527a9a6d347f364e3d185ba6d714e22d80cb3c") codeformer_commit_hash = os.environ.get('CODEFORMER_COMMIT_HASH', "c5b4593074ba6214284d6acd5f1719b6c5d739af") blip_commit_hash = os.environ.get('BLIP_COMMIT_HASH', "48211a1594f1321b00f14c9f7a5b4813144b2fb9") diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py index 1f8e9c4b..a48a563f 100644 --- a/modules/sd_samplers_kdiffusion.py +++ b/modules/sd_samplers_kdiffusion.py @@ -22,6 +22,9 @@ samplers_k_diffusion = [ ('DPM++ 2M', 'sample_dpmpp_2m', ['k_dpmpp_2m'], {}), ('DPM++ SDE', 'sample_dpmpp_sde', ['k_dpmpp_sde'], {"second_order": True, "brownian_noise": True}), ('DPM++ 2M SDE', 'sample_dpmpp_2m_sde', ['k_dpmpp_2m_sde_ka'], {"brownian_noise": True}), + ('DPM++ 3M SDE', 'sample_dpmpp_3m_sde', ['k_dpmpp_3m_sde'], {"brownian_noise": True}), + ('DPM++ 3M SDE Karras', 'sample_dpmpp_3m_sde', ['k_dpmpp_3m_sde_ka'], {'scheduler': 'karras', "brownian_noise": True}), + ('DPM++ 3M SDE Exponential', 'sample_dpmpp_3m_sde', ['k_dpmpp_3m_sde_exp'], {'scheduler': 'exponential', "brownian_noise": True}), ('DPM fast', 'sample_dpm_fast', ['k_dpm_fast'], {"uses_ensd": True}), ('DPM adaptive', 'sample_dpm_adaptive', ['k_dpm_ad'], {"uses_ensd": True}), ('LMS Karras', 'sample_lms', ['k_lms_ka'], {'scheduler': 'karras'}), -- cgit v1.2.1 From 60a74051656e1e430aa7b466cfee8c13c6dc1a12 Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Sun, 13 Aug 2023 08:06:40 -0400 Subject: Update description of eta setting --- modules/shared_options.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/shared_options.py b/modules/shared_options.py index 9ae51f18..96db759b 100644 --- a/modules/shared_options.py +++ b/modules/shared_options.py @@ -285,7 +285,7 @@ options_templates.update(options_section(('ui', "Live previews"), { options_templates.update(options_section(('sampler-params', "Sampler parameters"), { "hide_samplers": OptionInfo([], "Hide samplers in user interface", gr.CheckboxGroup, lambda: {"choices": [x.name for x in shared_items.list_samplers()]}).needs_reload_ui(), "eta_ddim": OptionInfo(0.0, "Eta for DDIM", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}, infotext='Eta DDIM').info("noise multiplier; higher = more unperdictable results"), - "eta_ancestral": OptionInfo(1.0, "Eta for ancestral samplers", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}, infotext='Eta').info("noise multiplier; applies to Euler a and other samplers that have a in them"), + "eta_ancestral": OptionInfo(1.0, "Eta for k-diffusion samplers", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}, infotext='Eta').info("noise multiplier; currently only applies to ancestral samplers (i.e. Euler a) and SDE samplers"), "ddim_discretize": OptionInfo('uniform', "img2img DDIM discretize", gr.Radio, {"choices": ['uniform', 'quad']}), 's_churn': OptionInfo(0.0, "sigma churn", gr.Slider, {"minimum": 0.0, "maximum": 100.0, "step": 0.01}, infotext='Sigma churn').info('amount of stochasticity; only applies to Euler, Heun, and DPM2'), 's_tmin': OptionInfo(0.0, "sigma tmin", gr.Slider, {"minimum": 0.0, "maximum": 10.0, "step": 0.01}, infotext='Sigma tmin').info('enable stochasticity; start value of the sigma range; only applies to Euler, Heun, and DPM2'), -- cgit v1.2.1 From d8419762c1454ba51baa710d9ce8e762efc056ef Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sun, 13 Aug 2023 15:07:37 +0300 Subject: Lora: output warnings in UI rather than fail for unfitting loras; switch to logging for error output in console --- extensions-builtin/Lora/extra_networks_lora.py | 12 ++++- extensions-builtin/Lora/networks.py | 61 ++++++++++++++++---------- extensions-builtin/Lora/scripts/lora_script.py | 6 +-- modules/processing.py | 13 +++--- 4 files changed, 58 insertions(+), 34 deletions(-) diff --git a/extensions-builtin/Lora/extra_networks_lora.py b/extensions-builtin/Lora/extra_networks_lora.py index ba2945c6..32e32cab 100644 --- a/extensions-builtin/Lora/extra_networks_lora.py +++ b/extensions-builtin/Lora/extra_networks_lora.py @@ -1,4 +1,4 @@ -from modules import extra_networks, shared +from modules import extra_networks, shared, sd_hijack import networks @@ -6,9 +6,14 @@ class ExtraNetworkLora(extra_networks.ExtraNetwork): def __init__(self): super().__init__('lora') + self.errors = {} + """mapping of network names to the number of errors the network had during operation""" + def activate(self, p, params_list): additional = shared.opts.sd_lora + self.errors.clear() + if additional != "None" and additional in networks.available_networks and not any(x for x in params_list if x.items[0] == additional): p.all_prompts = [x + f"" for x in p.all_prompts] params_list.append(extra_networks.ExtraNetworkParams(items=[additional, shared.opts.extra_networks_default_multiplier])) @@ -56,4 +61,7 @@ class ExtraNetworkLora(extra_networks.ExtraNetwork): p.extra_generation_params["Lora hashes"] = ", ".join(network_hashes) def deactivate(self, p): - pass + if self.errors: + p.comment("Networks with errors: " + ", ".join(f"{k} ({v})" for k, v in self.errors.items())) + + self.errors.clear() diff --git a/extensions-builtin/Lora/networks.py b/extensions-builtin/Lora/networks.py index ba621139..c252ed9e 100644 --- a/extensions-builtin/Lora/networks.py +++ b/extensions-builtin/Lora/networks.py @@ -1,3 +1,4 @@ +import logging import os import re @@ -194,7 +195,7 @@ def load_network(name, network_on_disk): net.modules[key] = net_module if keys_failed_to_match: - print(f"Failed to match keys when loading network {network_on_disk.filename}: {keys_failed_to_match}") + logging.debug(f"Network {network_on_disk.filename} didn't match keys: {keys_failed_to_match}") return net @@ -207,7 +208,6 @@ def purge_networks_from_memory(): devices.torch_gc() - def load_networks(names, te_multipliers=None, unet_multipliers=None, dyn_dims=None): already_loaded = {} @@ -248,7 +248,7 @@ def load_networks(names, te_multipliers=None, unet_multipliers=None, dyn_dims=No if net is None: failed_to_load_networks.append(name) - print(f"Couldn't find network with name {name}") + logging.info(f"Couldn't find network with name {name}") continue net.te_multiplier = te_multipliers[i] if te_multipliers else 1.0 @@ -257,7 +257,7 @@ def load_networks(names, te_multipliers=None, unet_multipliers=None, dyn_dims=No loaded_networks.append(net) if failed_to_load_networks: - sd_hijack.model_hijack.comments.append("Failed to find networks: " + ", ".join(failed_to_load_networks)) + sd_hijack.model_hijack.comments.append("Networks not found: " + ", ".join(failed_to_load_networks)) purge_networks_from_memory() @@ -314,17 +314,22 @@ def network_apply_weights(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn for net in loaded_networks: module = net.modules.get(network_layer_name, None) if module is not None and hasattr(self, 'weight'): - with torch.no_grad(): - updown, ex_bias = module.calc_updown(self.weight) + try: + with torch.no_grad(): + updown, ex_bias = module.calc_updown(self.weight) - if len(self.weight.shape) == 4 and self.weight.shape[1] == 9: - # inpainting model. zero pad updown to make channel[1] 4 to 9 - updown = torch.nn.functional.pad(updown, (0, 0, 0, 0, 0, 5)) + if len(self.weight.shape) == 4 and self.weight.shape[1] == 9: + # inpainting model. zero pad updown to make channel[1] 4 to 9 + updown = torch.nn.functional.pad(updown, (0, 0, 0, 0, 0, 5)) - self.weight += updown - if ex_bias is not None and getattr(self, 'bias', None) is not None: - self.bias += ex_bias - continue + self.weight += updown + if ex_bias is not None and getattr(self, 'bias', None) is not None: + self.bias += ex_bias + except RuntimeError as e: + logging.debug(f"Network {net.name} layer {network_layer_name}: {e}") + extra_network_lora.errors[net.name] = extra_network_lora.errors.get(net.name, 0) + 1 + + continue module_q = net.modules.get(network_layer_name + "_q_proj", None) module_k = net.modules.get(network_layer_name + "_k_proj", None) @@ -332,21 +337,28 @@ def network_apply_weights(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn module_out = net.modules.get(network_layer_name + "_out_proj", None) if isinstance(self, torch.nn.MultiheadAttention) and module_q and module_k and module_v and module_out: - with torch.no_grad(): - updown_q = module_q.calc_updown(self.in_proj_weight) - updown_k = module_k.calc_updown(self.in_proj_weight) - updown_v = module_v.calc_updown(self.in_proj_weight) - updown_qkv = torch.vstack([updown_q, updown_k, updown_v]) - updown_out = module_out.calc_updown(self.out_proj.weight) - - self.in_proj_weight += updown_qkv - self.out_proj.weight += updown_out - continue + try: + with torch.no_grad(): + updown_q = module_q.calc_updown(self.in_proj_weight) + updown_k = module_k.calc_updown(self.in_proj_weight) + updown_v = module_v.calc_updown(self.in_proj_weight) + updown_qkv = torch.vstack([updown_q, updown_k, updown_v]) + updown_out = module_out.calc_updown(self.out_proj.weight) + + self.in_proj_weight += updown_qkv + self.out_proj.weight += updown_out + + except RuntimeError as e: + logging.debug(f"Network {net.name} layer {network_layer_name}: {e}") + extra_network_lora.errors[net.name] = extra_network_lora.errors.get(net.name, 0) + 1 + + continue if module is None: continue - print(f'failed to calculate network weights for layer {network_layer_name}') + logging.debug(f"Network {net.name} layer {network_layer_name}: couldn't find supported operation") + extra_network_lora.errors[net.name] = extra_network_lora.errors.get(net.name, 0) + 1 self.network_current_names = wanted_names @@ -519,6 +531,7 @@ def infotext_pasted(infotext, params): if added: params["Prompt"] += "\n" + "".join(added) +extra_network_lora = None available_networks = {} available_network_aliases = {} diff --git a/extensions-builtin/Lora/scripts/lora_script.py b/extensions-builtin/Lora/scripts/lora_script.py index dc307f8c..4c6e774a 100644 --- a/extensions-builtin/Lora/scripts/lora_script.py +++ b/extensions-builtin/Lora/scripts/lora_script.py @@ -23,9 +23,9 @@ def unload(): def before_ui(): ui_extra_networks.register_page(ui_extra_networks_lora.ExtraNetworksPageLora()) - extra_network = extra_networks_lora.ExtraNetworkLora() - extra_networks.register_extra_network(extra_network) - extra_networks.register_extra_network_alias(extra_network, "lyco") + networks.extra_network_lora = extra_networks_lora.ExtraNetworkLora() + extra_networks.register_extra_network(networks.extra_network_lora) + extra_networks.register_extra_network_alias(networks.extra_network_lora, "lyco") if not hasattr(torch.nn, 'Linear_forward_before_network'): diff --git a/modules/processing.py b/modules/processing.py index 007a4e05..10749aa2 100755 --- a/modules/processing.py +++ b/modules/processing.py @@ -157,6 +157,7 @@ class StableDiffusionProcessing: cached_uc = [None, None] cached_c = [None, None] + comments: dict = None sampler: sd_samplers_common.Sampler | None = field(default=None, init=False) is_using_inpainting_conditioning: bool = field(default=False, init=False) paste_to: tuple | None = field(default=None, init=False) @@ -196,6 +197,8 @@ class StableDiffusionProcessing: if self.sampler_index is not None: print("sampler_index argument for StableDiffusionProcessing does not do anything; use sampler_name", file=sys.stderr) + self.comments = {} + self.sampler_noise_scheduler_override = None self.s_min_uncond = self.s_min_uncond if self.s_min_uncond is not None else opts.s_min_uncond self.s_churn = self.s_churn if self.s_churn is not None else opts.s_churn @@ -226,6 +229,9 @@ class StableDiffusionProcessing: def sd_model(self, value): pass + def comment(self, text): + self.comments[text] = 1 + def txt2img_image_conditioning(self, x, width=None, height=None): self.is_using_inpainting_conditioning = self.sd_model.model.conditioning_key in {'hybrid', 'concat'} @@ -429,7 +435,7 @@ class Processed: self.subseed = subseed self.subseed_strength = p.subseed_strength self.info = info - self.comments = comments + self.comments = "".join(f"{comment}\n" for comment in p.comments) self.width = p.width self.height = p.height self.sampler_name = p.sampler_name @@ -720,8 +726,6 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: modules.sd_hijack.model_hijack.apply_circular(p.tiling) modules.sd_hijack.model_hijack.clear_comments() - comments = {} - p.setup_prompts() if type(seed) == list: @@ -801,7 +805,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: p.setup_conds() for comment in model_hijack.comments: - comments[comment] = 1 + p.comment(comment) p.extra_generation_params.update(model_hijack.extra_generation_params) @@ -930,7 +934,6 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: images_list=output_images, seed=p.all_seeds[0], info=infotexts[0], - comments="".join(f"{comment}\n" for comment in comments), subseed=p.all_subseeds[0], index_of_first_image=index_of_first_image, infotexts=infotexts, -- cgit v1.2.1 From d1a70c3f0534b88665d55bdac1e6b48a63f7f035 Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Sun, 13 Aug 2023 08:22:24 -0400 Subject: Add s_noise param to more samplers --- modules/sd_samplers_common.py | 8 ++++---- modules/sd_samplers_kdiffusion.py | 6 ++++++ 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/modules/sd_samplers_common.py b/modules/sd_samplers_common.py index 09d1e11e..d2fb21f4 100644 --- a/modules/sd_samplers_common.py +++ b/modules/sd_samplers_common.py @@ -276,19 +276,19 @@ class Sampler: s_tmax = getattr(opts, 's_tmax', p.s_tmax) or self.s_tmax # 0 = inf s_noise = getattr(opts, 's_noise', p.s_noise) - if s_churn != self.s_churn: + if 's_churn' in extra_params_kwargs and s_churn != self.s_churn: extra_params_kwargs['s_churn'] = s_churn p.s_churn = s_churn p.extra_generation_params['Sigma churn'] = s_churn - if s_tmin != self.s_tmin: + if 's_tmin' in extra_params_kwargs and s_tmin != self.s_tmin: extra_params_kwargs['s_tmin'] = s_tmin p.s_tmin = s_tmin p.extra_generation_params['Sigma tmin'] = s_tmin - if s_tmax != self.s_tmax: + if 's_tmax' in extra_params_kwargs and s_tmax != self.s_tmax: extra_params_kwargs['s_tmax'] = s_tmax p.s_tmax = s_tmax p.extra_generation_params['Sigma tmax'] = s_tmax - if s_noise != self.s_noise: + if 's_noise' in extra_params_kwargs and s_noise != self.s_noise: extra_params_kwargs['s_noise'] = s_noise p.s_noise = s_noise p.extra_generation_params['Sigma noise'] = s_noise diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py index a48a563f..9f5dfd6d 100644 --- a/modules/sd_samplers_kdiffusion.py +++ b/modules/sd_samplers_kdiffusion.py @@ -45,6 +45,12 @@ sampler_extra_params = { 'sample_euler': ['s_churn', 's_tmin', 's_tmax', 's_noise'], 'sample_heun': ['s_churn', 's_tmin', 's_tmax', 's_noise'], 'sample_dpm_2': ['s_churn', 's_tmin', 's_tmax', 's_noise'], + 'sample_dpm_fast': ['s_noise'], + 'sample_dpm_2_ancestral': ['s_noise'], + 'sample_dpmpp_2s_ancestral': ['s_noise'], + 'sample_dpmpp_sde': ['s_noise'], + 'sample_dpmpp_2m_sde': ['s_noise'], + 'sample_dpmpp_3m_sde': ['s_noise'], } k_diffusion_samplers_map = {x.name: x for x in samplers_data_k_diffusion} -- cgit v1.2.1 From f4757032e7a0663abe2695c95048fdfff3fc5e2f Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Sun, 13 Aug 2023 08:24:28 -0400 Subject: Fix s_noise description --- modules/shared_options.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/shared_options.py b/modules/shared_options.py index 9ae51f18..279e9f54 100644 --- a/modules/shared_options.py +++ b/modules/shared_options.py @@ -290,7 +290,7 @@ options_templates.update(options_section(('sampler-params', "Sampler parameters" 's_churn': OptionInfo(0.0, "sigma churn", gr.Slider, {"minimum": 0.0, "maximum": 100.0, "step": 0.01}, infotext='Sigma churn').info('amount of stochasticity; only applies to Euler, Heun, and DPM2'), 's_tmin': OptionInfo(0.0, "sigma tmin", gr.Slider, {"minimum": 0.0, "maximum": 10.0, "step": 0.01}, infotext='Sigma tmin').info('enable stochasticity; start value of the sigma range; only applies to Euler, Heun, and DPM2'), 's_tmax': OptionInfo(0.0, "sigma tmax", gr.Slider, {"minimum": 0.0, "maximum": 999.0, "step": 0.01}, infotext='Sigma tmax').info("0 = inf; end value of the sigma range; only applies to Euler, Heun, and DPM2"), - 's_noise': OptionInfo(1.0, "sigma noise", gr.Slider, {"minimum": 0.0, "maximum": 1.1, "step": 0.001}, infotext='Sigma noise').info('amount of additional noise to counteract loss of detail during sampling; only applies to Euler, Heun, and DPM2'), + 's_noise': OptionInfo(1.0, "sigma noise", gr.Slider, {"minimum": 0.0, "maximum": 1.1, "step": 0.001}, infotext='Sigma noise').info('amount of additional noise to counteract loss of detail during sampling'), 'k_sched_type': OptionInfo("Automatic", "Scheduler type", gr.Dropdown, {"choices": ["Automatic", "karras", "exponential", "polyexponential"]}, infotext='Schedule type').info("lets you override the noise schedule for k-diffusion samplers; choosing Automatic disables the three parameters below"), 'sigma_min': OptionInfo(0.0, "sigma min", gr.Number, infotext='Schedule max sigma').info("0 = default (~0.03); minimum noise strength for k-diffusion noise scheduler"), 'sigma_max': OptionInfo(0.0, "sigma max", gr.Number, infotext='Schedule min sigma').info("0 = default (~14.6); maximum noise strength for k-diffusion noise scheduler"), -- cgit v1.2.1 From ac790fc49b314761a7ad711989d7cc88bba64578 Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Sun, 13 Aug 2023 08:46:07 -0400 Subject: Discard penultimate sigma for DPM-Solver++(3M) SDE --- modules/sd_samplers_kdiffusion.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py index a48a563f..c8b81fa1 100644 --- a/modules/sd_samplers_kdiffusion.py +++ b/modules/sd_samplers_kdiffusion.py @@ -22,9 +22,9 @@ samplers_k_diffusion = [ ('DPM++ 2M', 'sample_dpmpp_2m', ['k_dpmpp_2m'], {}), ('DPM++ SDE', 'sample_dpmpp_sde', ['k_dpmpp_sde'], {"second_order": True, "brownian_noise": True}), ('DPM++ 2M SDE', 'sample_dpmpp_2m_sde', ['k_dpmpp_2m_sde_ka'], {"brownian_noise": True}), - ('DPM++ 3M SDE', 'sample_dpmpp_3m_sde', ['k_dpmpp_3m_sde'], {"brownian_noise": True}), - ('DPM++ 3M SDE Karras', 'sample_dpmpp_3m_sde', ['k_dpmpp_3m_sde_ka'], {'scheduler': 'karras', "brownian_noise": True}), - ('DPM++ 3M SDE Exponential', 'sample_dpmpp_3m_sde', ['k_dpmpp_3m_sde_exp'], {'scheduler': 'exponential', "brownian_noise": True}), + ('DPM++ 3M SDE', 'sample_dpmpp_3m_sde', ['k_dpmpp_3m_sde'], {'discard_next_to_last_sigma': True, "brownian_noise": True}), + ('DPM++ 3M SDE Karras', 'sample_dpmpp_3m_sde', ['k_dpmpp_3m_sde_ka'], {'scheduler': 'karras', 'discard_next_to_last_sigma': True, "brownian_noise": True}), + ('DPM++ 3M SDE Exponential', 'sample_dpmpp_3m_sde', ['k_dpmpp_3m_sde_exp'], {'scheduler': 'exponential', 'discard_next_to_last_sigma': True, "brownian_noise": True}), ('DPM fast', 'sample_dpm_fast', ['k_dpm_fast'], {"uses_ensd": True}), ('DPM adaptive', 'sample_dpm_adaptive', ['k_dpm_ad'], {"uses_ensd": True}), ('LMS Karras', 'sample_lms', ['k_lms_ka'], {'scheduler': 'karras'}), -- cgit v1.2.1 From 525b55b1e9dc589a1133a8031ed3b1646e5cccff Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Sun, 13 Aug 2023 09:08:34 -0400 Subject: Restore extra_params that was lost in merge --- modules/sd_samplers_kdiffusion.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py index 1f8e9c4b..9a89e7fa 100644 --- a/modules/sd_samplers_kdiffusion.py +++ b/modules/sd_samplers_kdiffusion.py @@ -67,6 +67,8 @@ class KDiffusionSampler(sd_samplers_common.Sampler): def __init__(self, funcname, sd_model, options=None): super().__init__(funcname) + self.extra_params = sampler_extra_params.get(funcname, []) + self.options = options or {} self.func = funcname if callable(funcname) else getattr(k_diffusion.sampling, self.funcname) -- cgit v1.2.1 From db40d26d086b6e980bd28ce7eb3008ec09b1f0e8 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sun, 13 Aug 2023 16:38:10 +0300 Subject: linter --- extensions-builtin/Lora/extra_networks_lora.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/extensions-builtin/Lora/extra_networks_lora.py b/extensions-builtin/Lora/extra_networks_lora.py index 32e32cab..005ff32c 100644 --- a/extensions-builtin/Lora/extra_networks_lora.py +++ b/extensions-builtin/Lora/extra_networks_lora.py @@ -1,4 +1,4 @@ -from modules import extra_networks, shared, sd_hijack +from modules import extra_networks, shared import networks -- cgit v1.2.1 From 3163d1269af7f9fd95382e58bb1581fd741b5119 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sun, 13 Aug 2023 16:51:21 +0300 Subject: fix for the broken run_git calls --- modules/launch_utils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/launch_utils.py b/modules/launch_utils.py index e30fbac8..4fc254a2 100644 --- a/modules/launch_utils.py +++ b/modules/launch_utils.py @@ -173,9 +173,9 @@ def git_clone(url, dir, name, commithash=None): if current_hash == commithash: return - run_git('fetch', f"Fetching updates for {name}...", f"Couldn't fetch {name}", autofix=False) + run_git(dir, name, 'fetch', f"Fetching updates for {name}...", f"Couldn't fetch {name}", autofix=False) - run_git('checkout', f"Checking out commit for {name} with hash: {commithash}...", f"Couldn't checkout commit {commithash} for {name}", live=True) + run_git(dir, name, 'checkout', f"Checking out commit for {name} with hash: {commithash}...", f"Couldn't checkout commit {commithash} for {name}", live=True) return -- cgit v1.2.1 From abfa4ad8bc995dcaf832c07a7cf75b6e295a8ca9 Mon Sep 17 00:00:00 2001 From: brkirch Date: Mon, 8 May 2023 18:16:01 -0400 Subject: Use fixed size for sub-quadratic chunking on MPS Even if this causes chunks to be much smaller, performance isn't significantly impacted. This will usually reduce memory usage but should also help with poor performance when free memory is low. --- modules/sd_hijack_optimizations.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/modules/sd_hijack_optimizations.py b/modules/sd_hijack_optimizations.py index 0e810eec..b3e71270 100644 --- a/modules/sd_hijack_optimizations.py +++ b/modules/sd_hijack_optimizations.py @@ -1,6 +1,7 @@ from __future__ import annotations import math import psutil +import platform import torch from torch import einsum @@ -427,7 +428,10 @@ def sub_quad_attention(q, k, v, q_chunk_size=1024, kv_chunk_size=None, kv_chunk_ qk_matmul_size_bytes = batch_x_heads * bytes_per_token * q_tokens * k_tokens if chunk_threshold is None: - chunk_threshold_bytes = int(get_available_vram() * 0.9) if q.device.type == 'mps' else int(get_available_vram() * 0.7) + if q.device.type == 'mps': + chunk_threshold_bytes = 268435456 * (2 if platform.processor() == 'i386' else bytes_per_token) + else: + chunk_threshold_bytes = int(get_available_vram() * 0.7) elif chunk_threshold == 0: chunk_threshold_bytes = None else: -- cgit v1.2.1 From 87dd685224b5f7dbbd832fc73cc08e7e470c9f28 Mon Sep 17 00:00:00 2001 From: brkirch Date: Sun, 21 May 2023 05:00:27 -0400 Subject: Make sub-quadratic the default for MPS --- modules/sd_hijack_optimizations.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/modules/sd_hijack_optimizations.py b/modules/sd_hijack_optimizations.py index b3e71270..7f9e328d 100644 --- a/modules/sd_hijack_optimizations.py +++ b/modules/sd_hijack_optimizations.py @@ -95,7 +95,10 @@ class SdOptimizationSdp(SdOptimizationSdpNoMem): class SdOptimizationSubQuad(SdOptimization): name = "sub-quadratic" cmd_opt = "opt_sub_quad_attention" - priority = 10 + + @property + def priority(self): + return 1000 if shared.device.type == 'mps' else 10 def apply(self): ldm.modules.attention.CrossAttention.forward = sub_quad_attention_forward @@ -121,7 +124,7 @@ class SdOptimizationInvokeAI(SdOptimization): @property def priority(self): - return 1000 if not torch.cuda.is_available() else 10 + return 1000 if shared.device.type != 'mps' and not torch.cuda.is_available() else 10 def apply(self): ldm.modules.attention.CrossAttention.forward = split_cross_attention_forward_invokeAI -- cgit v1.2.1 From 2489252099c299bed49a9d4a39a4ead73b6b6f10 Mon Sep 17 00:00:00 2001 From: brkirch Date: Tue, 25 Jul 2023 03:03:06 -0400 Subject: `torch.empty` can create issues; use `torch.zeros` For MPS, using a tensor created with `torch.empty()` can cause `torch.baddbmm()` to include NaNs in the tensor it returns, even though `beta=0`. However, with a tensor of shape [1,1,1], there should be a negligible performance difference between `torch.empty()` and `torch.zeros()` anyway, so it's better to just use `torch.zeros()` for this and avoid unnecessarily creating issues. --- modules/sub_quadratic_attention.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/sub_quadratic_attention.py b/modules/sub_quadratic_attention.py index 497568eb..ae4ee4bb 100644 --- a/modules/sub_quadratic_attention.py +++ b/modules/sub_quadratic_attention.py @@ -58,7 +58,7 @@ def _summarize_chunk( scale: float, ) -> AttnChunk: attn_weights = torch.baddbmm( - torch.empty(1, 1, 1, device=query.device, dtype=query.dtype), + torch.zeros(1, 1, 1, device=query.device, dtype=query.dtype), query, key.transpose(1,2), alpha=scale, @@ -121,7 +121,7 @@ def _get_attention_scores_no_kv_chunking( scale: float, ) -> Tensor: attn_scores = torch.baddbmm( - torch.empty(1, 1, 1, device=query.device, dtype=query.dtype), + torch.zeros(1, 1, 1, device=query.device, dtype=query.dtype), query, key.transpose(1,2), alpha=scale, -- cgit v1.2.1 From 9058620cec2788495d295f4e68ef2932d6d700e6 Mon Sep 17 00:00:00 2001 From: brkirch Date: Sat, 12 Aug 2023 04:44:16 -0400 Subject: `git checkout` with commit hash --- modules/launch_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/launch_utils.py b/modules/launch_utils.py index 4fc254a2..e77baa52 100644 --- a/modules/launch_utils.py +++ b/modules/launch_utils.py @@ -175,7 +175,7 @@ def git_clone(url, dir, name, commithash=None): run_git(dir, name, 'fetch', f"Fetching updates for {name}...", f"Couldn't fetch {name}", autofix=False) - run_git(dir, name, 'checkout', f"Checking out commit for {name} with hash: {commithash}...", f"Couldn't checkout commit {commithash} for {name}", live=True) + run_git(dir, name, f'checkout {commithash}', f"Checking out commit for {name} with hash: {commithash}...", f"Couldn't checkout commit {commithash} for {name}", live=True) return -- cgit v1.2.1 From f4dbb0c820344798e3481d4104618b95594a3d10 Mon Sep 17 00:00:00 2001 From: brkirch Date: Thu, 20 Jul 2023 01:44:45 -0400 Subject: Change the repositories origin URLs when necessary --- modules/launch_utils.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/modules/launch_utils.py b/modules/launch_utils.py index e77baa52..9eda7c9d 100644 --- a/modules/launch_utils.py +++ b/modules/launch_utils.py @@ -173,6 +173,9 @@ def git_clone(url, dir, name, commithash=None): if current_hash == commithash: return + if run_git(dir, name, 'config --get remote.origin.url', None, f"Couldn't determine {name}'s origin URL", live=False).strip() != url: + run_git(dir, name, f'remote set-url origin "{url}"', None, f"Failed to set {name}'s origin URL", live=False) + run_git(dir, name, 'fetch', f"Fetching updates for {name}...", f"Couldn't fetch {name}", autofix=False) run_git(dir, name, f'checkout {commithash}', f"Checking out commit for {name} with hash: {commithash}...", f"Couldn't checkout commit {commithash} for {name}", live=True) -- cgit v1.2.1 From 232c931f4082ea73bbaca8f77469cfea9d5db459 Mon Sep 17 00:00:00 2001 From: brkirch Date: Mon, 7 Aug 2023 10:33:43 -0400 Subject: Mac k-diffusion workarounds are no longer needed --- webui-macos-env.sh | 2 -- 1 file changed, 2 deletions(-) diff --git a/webui-macos-env.sh b/webui-macos-env.sh index 6354e73b..24bc5c42 100644 --- a/webui-macos-env.sh +++ b/webui-macos-env.sh @@ -12,8 +12,6 @@ fi export install_dir="$HOME" export COMMANDLINE_ARGS="--skip-torch-cuda-test --upcast-sampling --no-half-vae --use-cpu interrogate" export TORCH_COMMAND="pip install torch==2.0.1 torchvision==0.15.2" -export K_DIFFUSION_REPO="https://github.com/brkirch/k-diffusion.git" -export K_DIFFUSION_COMMIT_HASH="51c9778f269cedb55a4d88c79c0246d35bdadb71" export PYTORCH_ENABLE_MPS_FALLBACK=1 #################################################################### -- cgit v1.2.1 From 5df535b7c2374c3324485faaea62fbdbffc71f71 Mon Sep 17 00:00:00 2001 From: brkirch Date: Mon, 7 Aug 2023 10:20:10 -0400 Subject: Remove duplicate code for torchsde randn --- modules/mac_specific.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/modules/mac_specific.py b/modules/mac_specific.py index bce527cc..89256c5b 100644 --- a/modules/mac_specific.py +++ b/modules/mac_specific.py @@ -52,9 +52,6 @@ def cumsum_fix(input, cumsum_func, *args, **kwargs): if has_mps: - # MPS fix for randn in torchsde - CondFunc('torchsde._brownian.brownian_interval._randn', lambda _, size, dtype, device, seed: torch.randn(size, dtype=dtype, device=torch.device("cpu"), generator=torch.Generator(torch.device("cpu")).manual_seed(int(seed))).to(device), lambda _, size, dtype, device, seed: device.type == 'mps') - if platform.mac_ver()[0].startswith("13.2."): # MPS workaround for https://github.com/pytorch/pytorch/issues/95188, thanks to danieldk (https://github.com/explosion/curated-transformers/pull/124) CondFunc('torch.nn.functional.linear', lambda _, input, weight, bias: (torch.matmul(input, weight.t()) + bias) if bias is not None else torch.matmul(input, weight.t()), lambda _, input, weight, bias: input.numel() > 10485760) -- cgit v1.2.1 From 2035cbbd5d6e7678450c701fce1a5de7d8bd7084 Mon Sep 17 00:00:00 2001 From: brkirch Date: Sat, 12 Aug 2023 06:01:36 -0400 Subject: Fix DDIM and PLMS samplers on MPS --- modules/sd_samplers_timesteps_impl.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/sd_samplers_timesteps_impl.py b/modules/sd_samplers_timesteps_impl.py index 48d7e649..d32e3521 100644 --- a/modules/sd_samplers_timesteps_impl.py +++ b/modules/sd_samplers_timesteps_impl.py @@ -11,7 +11,7 @@ from modules.models.diffusion.uni_pc import uni_pc def ddim(model, x, timesteps, extra_args=None, callback=None, disable=None, eta=0.0): alphas_cumprod = model.inner_model.inner_model.alphas_cumprod alphas = alphas_cumprod[timesteps] - alphas_prev = alphas_cumprod[torch.nn.functional.pad(timesteps[:-1], pad=(1, 0))].to(torch.float64) + alphas_prev = alphas_cumprod[torch.nn.functional.pad(timesteps[:-1], pad=(1, 0))].to(torch.float64 if x.device.type != 'mps' else torch.float32) sqrt_one_minus_alphas = torch.sqrt(1 - alphas) sigmas = eta * np.sqrt((1 - alphas_prev.cpu().numpy()) / (1 - alphas.cpu()) * (1 - alphas.cpu() / alphas_prev.cpu().numpy())) @@ -42,7 +42,7 @@ def ddim(model, x, timesteps, extra_args=None, callback=None, disable=None, eta= def plms(model, x, timesteps, extra_args=None, callback=None, disable=None): alphas_cumprod = model.inner_model.inner_model.alphas_cumprod alphas = alphas_cumprod[timesteps] - alphas_prev = alphas_cumprod[torch.nn.functional.pad(timesteps[:-1], pad=(1, 0))].to(torch.float64) + alphas_prev = alphas_cumprod[torch.nn.functional.pad(timesteps[:-1], pad=(1, 0))].to(torch.float64 if x.device.type != 'mps' else torch.float32) sqrt_one_minus_alphas = torch.sqrt(1 - alphas) extra_args = {} if extra_args is None else extra_args -- cgit v1.2.1 From f093c9d39d0fe9951a8f5c570027cecc68778ef2 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sun, 13 Aug 2023 17:31:10 +0300 Subject: fix broken XYZ plot seeds add new callback for scripts to be used before processing --- modules/processing.py | 32 ++++++++++++++++++++++++++++++-- modules/processing_scripts/seed.py | 2 +- modules/scripts.py | 17 ++++++++++++++++- 3 files changed, 47 insertions(+), 4 deletions(-) diff --git a/modules/processing.py b/modules/processing.py index fdf49359..74366655 100755 --- a/modules/processing.py +++ b/modules/processing.py @@ -152,7 +152,9 @@ class StableDiffusionProcessing: token_merging_ratio_hr = 0 disable_extra_networks: bool = False - script_args: list = None + scripts_value: scripts.ScriptRunner = field(default=None, init=False) + script_args_value: list = field(default=None, init=False) + scripts_setup_complete: bool = field(default=False, init=False) cached_uc = [None, None] cached_c = [None, None] @@ -171,7 +173,6 @@ class StableDiffusionProcessing: step_multiplier: int = field(default=1, init=False) color_corrections: list = field(default=None, init=False) - scripts: list = field(default=None, init=False) all_prompts: list = field(default=None, init=False) all_negative_prompts: list = field(default=None, init=False) all_seeds: list = field(default=None, init=False) @@ -229,6 +230,33 @@ class StableDiffusionProcessing: def sd_model(self, value): pass + @property + def scripts(self): + return self.scripts_value + + @scripts.setter + def scripts(self, value): + self.scripts_value = value + + if self.scripts_value and self.script_args_value and not self.scripts_setup_complete: + self.setup_scripts() + + @property + def script_args(self): + return self.script_args_value + + @script_args.setter + def script_args(self, value): + self.script_args_value = value + + if self.scripts_value and self.script_args_value and not self.scripts_setup_complete: + self.setup_scripts() + + def setup_scripts(self): + self.scripts_setup_complete = True + + self.scripts.setup_scrips(self) + def comment(self, text): self.comments[text] = 1 diff --git a/modules/processing_scripts/seed.py b/modules/processing_scripts/seed.py index cc90775a..96b44dfb 100644 --- a/modules/processing_scripts/seed.py +++ b/modules/processing_scripts/seed.py @@ -58,7 +58,7 @@ class ScriptSeed(scripts.ScriptBuiltin): return self.seed, subseed, subseed_strength - def before_process(self, p, seed, subseed, subseed_strength): + def setup(self, p, seed, subseed, subseed_strength): p.seed = seed if subseed_strength > 0: diff --git a/modules/scripts.py b/modules/scripts.py index c6459b45..d4a9da94 100644 --- a/modules/scripts.py +++ b/modules/scripts.py @@ -106,9 +106,16 @@ class Script: pass + def setup(self, p, *args): + """For AlwaysVisible scripts, this function is called when the processing object is set up, before any processing starts. + args contains all values returned by components from ui(). + """ + pass + + def before_process(self, p, *args): """ - This function is called very early before processing begins for AlwaysVisible scripts. + This function is called very early during processing begins for AlwaysVisible scripts. You can modify the processing object (p) here, inject hooks, etc. args contains all values returned by components from ui() """ @@ -706,6 +713,14 @@ class ScriptRunner: except Exception: errors.report(f"Error running before_hr: {script.filename}", exc_info=True) + def setup_scrips(self, p): + for script in self.alwayson_scripts: + try: + script_args = p.script_args[script.args_from:script.args_to] + script.setup(p, *script_args) + except Exception: + errors.report(f"Error running setup: {script.filename}", exc_info=True) + scripts_txt2img: ScriptRunner = None scripts_img2img: ScriptRunner = None -- cgit v1.2.1 From 09ff5b5416e9e989cf2ddb2bab9129e27ed23f14 Mon Sep 17 00:00:00 2001 From: Ikko Eltociear Ashimine Date: Mon, 14 Aug 2023 01:03:49 +0900 Subject: Fix typo in launch_utils.py existance -> existence --- modules/launch_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/launch_utils.py b/modules/launch_utils.py index e1c9cfbe..8d2256ee 100644 --- a/modules/launch_utils.py +++ b/modules/launch_utils.py @@ -291,7 +291,7 @@ def prepare_environment(): blip_commit_hash = os.environ.get('BLIP_COMMIT_HASH', "48211a1594f1321b00f14c9f7a5b4813144b2fb9") try: - # the existance of this file is a signal to webui.sh/bat that webui needs to be restarted when it stops execution + # the existence of this file is a signal to webui.sh/bat that webui needs to be restarted when it stops execution os.remove(os.path.join(script_path, "tmp", "restart")) os.environ.setdefault('SD_WEBUI_RESTARTING', '1') except OSError: -- cgit v1.2.1 From 16781ba09abe1494993f819b91ea0b88c48903b7 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sun, 13 Aug 2023 20:15:20 +0300 Subject: fix 2 for git code botched by previous PRs --- modules/launch_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/launch_utils.py b/modules/launch_utils.py index 4fc254a2..e77baa52 100644 --- a/modules/launch_utils.py +++ b/modules/launch_utils.py @@ -175,7 +175,7 @@ def git_clone(url, dir, name, commithash=None): run_git(dir, name, 'fetch', f"Fetching updates for {name}...", f"Couldn't fetch {name}", autofix=False) - run_git(dir, name, 'checkout', f"Checking out commit for {name} with hash: {commithash}...", f"Couldn't checkout commit {commithash} for {name}", live=True) + run_git(dir, name, f'checkout {commithash}', f"Checking out commit for {name} with hash: {commithash}...", f"Couldn't checkout commit {commithash} for {name}", live=True) return -- cgit v1.2.1 From 007ecfbb29771aa7cdcf0263ab1811bc75fa5446 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sun, 13 Aug 2023 21:01:13 +0300 Subject: also use setup callback for the refiner instead of before_process --- modules/processing_scripts/refiner.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/processing_scripts/refiner.py b/modules/processing_scripts/refiner.py index 7b946d05..3c5b37d2 100644 --- a/modules/processing_scripts/refiner.py +++ b/modules/processing_scripts/refiner.py @@ -38,7 +38,7 @@ class ScriptRefiner(scripts.Script): return enable_refiner, refiner_checkpoint, refiner_switch_at - def before_process(self, p, enable_refiner, refiner_checkpoint, refiner_switch_at): + def setup(self, p, enable_refiner, refiner_checkpoint, refiner_switch_at): # the actual implementation is in sd_samplers_common.py, apply_refiner if not enable_refiner or refiner_checkpoint in (None, "", "None"): -- cgit v1.2.1 From 0ea61a74be5e9666a16de8b92b1d55ed0b678a16 Mon Sep 17 00:00:00 2001 From: Kohaku-Blueleaf <59680068+KohakuBlueleaf@users.noreply.github.com> Date: Mon, 14 Aug 2023 11:46:36 +0800 Subject: add res(dpmdd 2m sde heun) and reorder the sampler list --- modules/sd_samplers_kdiffusion.py | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py index 0bacfe8d..c140d2d0 100644 --- a/modules/sd_samplers_kdiffusion.py +++ b/modules/sd_samplers_kdiffusion.py @@ -8,10 +8,6 @@ from modules.shared import opts import modules.shared as shared samplers_k_diffusion = [ - ('DPM++ 2M Karras', 'sample_dpmpp_2m', ['k_dpmpp_2m_ka'], {'scheduler': 'karras'}), - ('DPM++ SDE Karras', 'sample_dpmpp_sde', ['k_dpmpp_sde_ka'], {'scheduler': 'karras', "second_order": True, "brownian_noise": True}), - ('DPM++ 2M SDE Exponential', 'sample_dpmpp_2m_sde', ['k_dpmpp_2m_sde_exp'], {'scheduler': 'exponential', "brownian_noise": True}), - ('DPM++ 2M SDE Karras', 'sample_dpmpp_2m_sde', ['k_dpmpp_2m_sde_ka'], {'scheduler': 'karras', "brownian_noise": True}), ('Euler a', 'sample_euler_ancestral', ['k_euler_a', 'k_euler_ancestral'], {"uses_ensd": True}), ('Euler', 'sample_euler', ['k_euler'], {}), ('LMS', 'sample_lms', ['k_lms'], {}), @@ -20,8 +16,15 @@ samplers_k_diffusion = [ ('DPM2 a', 'sample_dpm_2_ancestral', ['k_dpm_2_a'], {'discard_next_to_last_sigma': True, "uses_ensd": True}), ('DPM++ 2S a', 'sample_dpmpp_2s_ancestral', ['k_dpmpp_2s_a'], {"uses_ensd": True, "second_order": True}), ('DPM++ 2M', 'sample_dpmpp_2m', ['k_dpmpp_2m'], {}), + ('DPM++ 2M Karras', 'sample_dpmpp_2m', ['k_dpmpp_2m_ka'], {'scheduler': 'karras'}), ('DPM++ SDE', 'sample_dpmpp_sde', ['k_dpmpp_sde'], {"second_order": True, "brownian_noise": True}), + ('DPM++ SDE Karras', 'sample_dpmpp_sde', ['k_dpmpp_sde_ka'], {'scheduler': 'karras', "second_order": True, "brownian_noise": True}), ('DPM++ 2M SDE', 'sample_dpmpp_2m_sde', ['k_dpmpp_2m_sde_ka'], {"brownian_noise": True}), + ('DPM++ 2M SDE Karras', 'sample_dpmpp_2m_sde', ['k_dpmpp_2m_sde_ka'], {'scheduler': 'karras', "brownian_noise": True}), + ('DPM++ 2M SDE Exponential', 'sample_dpmpp_2m_sde', ['k_dpmpp_2m_sde_exp'], {'scheduler': 'exponential', "brownian_noise": True}), + ('DPM++ 2M SDE Heun', 'sample_dpmpp_2m_sde', ['k_dpmpp_2m_sde_heun'], {"brownian_noise": True, "solver_type": "heun"}), + ('DPM++ 2M SDE Heun Karras', 'sample_dpmpp_2m_sde', ['k_dpmpp_2m_sde_heun_ka'], {'scheduler': 'karras', "brownian_noise": True, "solver_type": "heun"}), + ('DPM++ 2M SDE Heun Exponential', 'sample_dpmpp_2m_sde', ['k_dpmpp_2m_sde_heun_exp'], {'scheduler': 'exponential', "brownian_noise": True, "solver_type": "heun"}), ('DPM++ 3M SDE', 'sample_dpmpp_3m_sde', ['k_dpmpp_3m_sde'], {'discard_next_to_last_sigma': True, "brownian_noise": True}), ('DPM++ 3M SDE Karras', 'sample_dpmpp_3m_sde', ['k_dpmpp_3m_sde_ka'], {'scheduler': 'karras', 'discard_next_to_last_sigma': True, "brownian_noise": True}), ('DPM++ 3M SDE Exponential', 'sample_dpmpp_3m_sde', ['k_dpmpp_3m_sde_exp'], {'scheduler': 'exponential', 'discard_next_to_last_sigma': True, "brownian_noise": True}), @@ -161,6 +164,9 @@ class KDiffusionSampler(sd_samplers_common.Sampler): noise_sampler = self.create_noise_sampler(x, sigmas, p) extra_params_kwargs['noise_sampler'] = noise_sampler + if self.config.options.get('solver_type', None) == 'heun': + extra_params_kwargs['solver_type'] = 'heun' + self.model_wrap_cfg.init_latent = x self.last_latent = x self.sampler_extra_args = { @@ -202,6 +208,9 @@ class KDiffusionSampler(sd_samplers_common.Sampler): noise_sampler = self.create_noise_sampler(x, sigmas, p) extra_params_kwargs['noise_sampler'] = noise_sampler + if self.config.options.get('solver_type', None) == 'heun': + extra_params_kwargs['solver_type'] = 'heun' + self.last_latent = x self.sampler_extra_args = { 'cond': conditioning, @@ -210,6 +219,7 @@ class KDiffusionSampler(sd_samplers_common.Sampler): 'cond_scale': p.cfg_scale, 's_min_uncond': self.s_min_uncond } + samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, x, extra_args=self.sampler_extra_args, disable=False, callback=self.callback_state, **extra_params_kwargs)) if self.model_wrap_cfg.padded_cond_uncond: -- cgit v1.2.1 From d9cc27cb29926c9cc5dce331da8fbaf996cf4973 Mon Sep 17 00:00:00 2001 From: Kohaku-Blueleaf <59680068+KohakuBlueleaf@users.noreply.github.com> Date: Mon, 14 Aug 2023 13:32:51 +0800 Subject: Fix MHA updown err and support ex-bias for no-bias layer --- extensions-builtin/Lora/networks.py | 37 +++++++++++++++++++++++++++++-------- 1 file changed, 29 insertions(+), 8 deletions(-) diff --git a/extensions-builtin/Lora/networks.py b/extensions-builtin/Lora/networks.py index ba621139..1645b822 100644 --- a/extensions-builtin/Lora/networks.py +++ b/extensions-builtin/Lora/networks.py @@ -277,7 +277,15 @@ def network_restore_weights_from_backup(self: Union[torch.nn.Conv2d, torch.nn.Li self.weight.copy_(weights_backup) if bias_backup is not None: - self.bias.copy_(bias_backup) + if isinstance(self, torch.nn.MultiheadAttention): + self.out_proj.bias.copy_(bias_backup) + else: + self.bias.copy_(bias_backup) + else: + if isinstance(self, torch.nn.MultiheadAttention): + self.out_proj.bias = None + else: + self.bias = None def network_apply_weights(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn.GroupNorm, torch.nn.LayerNorm, torch.nn.MultiheadAttention]): @@ -305,7 +313,12 @@ def network_apply_weights(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn bias_backup = getattr(self, "network_bias_backup", None) if bias_backup is None and getattr(self, 'bias', None) is not None: - bias_backup = self.bias.to(devices.cpu, copy=True) + if isinstance(self, torch.nn.MultiheadAttention) and self.out_proj.bias is not None: + bias_backup = self.out_proj.bias.to(devices.cpu, copy=True) + elif getattr(self, 'bias', None) is not None: + bias_backup = self.bias.to(devices.cpu, copy=True) + else: + bias_backup = None self.network_bias_backup = bias_backup if current_names != wanted_names: @@ -322,8 +335,11 @@ def network_apply_weights(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn updown = torch.nn.functional.pad(updown, (0, 0, 0, 0, 0, 5)) self.weight += updown - if ex_bias is not None and getattr(self, 'bias', None) is not None: - self.bias += ex_bias + if ex_bias is not None and hasattr(self, 'bias'): + if self.bias is None: + self.bias = torch.nn.Parameter(ex_bias) + else: + self.bias += ex_bias continue module_q = net.modules.get(network_layer_name + "_q_proj", None) @@ -333,14 +349,19 @@ def network_apply_weights(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn if isinstance(self, torch.nn.MultiheadAttention) and module_q and module_k and module_v and module_out: with torch.no_grad(): - updown_q = module_q.calc_updown(self.in_proj_weight) - updown_k = module_k.calc_updown(self.in_proj_weight) - updown_v = module_v.calc_updown(self.in_proj_weight) + updown_q, _ = module_q.calc_updown(self.in_proj_weight) + updown_k, _ = module_k.calc_updown(self.in_proj_weight) + updown_v, _ = module_v.calc_updown(self.in_proj_weight) updown_qkv = torch.vstack([updown_q, updown_k, updown_v]) - updown_out = module_out.calc_updown(self.out_proj.weight) + updown_out, ex_bias = module_out.calc_updown(self.out_proj.weight) self.in_proj_weight += updown_qkv self.out_proj.weight += updown_out + if ex_bias is not None: + if self.out_proj.bias is None: + self.out_proj.bias = torch.nn.Parameter(ex_bias) + else: + self.out_proj.bias += ex_bias continue if module is None: -- cgit v1.2.1 From aeb76ef174bc8a1904b25ca0b0b5009395f07d96 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Mon, 14 Aug 2023 08:49:02 +0300 Subject: repair DDIM/PLMS/UniPC batches --- modules/sd_samplers_timesteps.py | 5 ++--- modules/sd_samplers_timesteps_impl.py | 18 ++++++++++-------- 2 files changed, 12 insertions(+), 11 deletions(-) diff --git a/modules/sd_samplers_timesteps.py b/modules/sd_samplers_timesteps.py index 16572c7e..6aed2974 100644 --- a/modules/sd_samplers_timesteps.py +++ b/modules/sd_samplers_timesteps.py @@ -51,10 +51,9 @@ class CFGDenoiserTimesteps(CFGDenoiser): self.alphas = shared.sd_model.alphas_cumprod def get_pred_x0(self, x_in, x_out, sigma): - ts = int(sigma.item()) + ts = sigma.to(dtype=int) - s_in = x_in.new_ones([x_in.shape[0]]) - a_t = self.alphas[ts].item() * s_in + a_t = self.alphas[ts][:, None, None, None] sqrt_one_minus_at = (1 - a_t).sqrt() pred_x0 = (x_in - sqrt_one_minus_at * x_out) / a_t.sqrt() diff --git a/modules/sd_samplers_timesteps_impl.py b/modules/sd_samplers_timesteps_impl.py index d32e3521..a72daafd 100644 --- a/modules/sd_samplers_timesteps_impl.py +++ b/modules/sd_samplers_timesteps_impl.py @@ -16,16 +16,17 @@ def ddim(model, x, timesteps, extra_args=None, callback=None, disable=None, eta= sigmas = eta * np.sqrt((1 - alphas_prev.cpu().numpy()) / (1 - alphas.cpu()) * (1 - alphas.cpu() / alphas_prev.cpu().numpy())) extra_args = {} if extra_args is None else extra_args - s_in = x.new_ones([x.shape[0]]) + s_in = x.new_ones((x.shape[0])) + s_x = x.new_ones((x.shape[0], 1, 1, 1)) for i in tqdm.trange(len(timesteps) - 1, disable=disable): index = len(timesteps) - 1 - i e_t = model(x, timesteps[index].item() * s_in, **extra_args) - a_t = alphas[index].item() * s_in - a_prev = alphas_prev[index].item() * s_in - sigma_t = sigmas[index].item() * s_in - sqrt_one_minus_at = sqrt_one_minus_alphas[index].item() * s_in + a_t = alphas[index].item() * s_x + a_prev = alphas_prev[index].item() * s_x + sigma_t = sigmas[index].item() * s_x + sqrt_one_minus_at = sqrt_one_minus_alphas[index].item() * s_x pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt() dir_xt = (1. - a_prev - sigma_t ** 2).sqrt() * e_t @@ -47,13 +48,14 @@ def plms(model, x, timesteps, extra_args=None, callback=None, disable=None): extra_args = {} if extra_args is None else extra_args s_in = x.new_ones([x.shape[0]]) + s_x = x.new_ones((x.shape[0], 1, 1, 1)) old_eps = [] def get_x_prev_and_pred_x0(e_t, index): # select parameters corresponding to the currently considered timestep - a_t = alphas[index].item() * s_in - a_prev = alphas_prev[index].item() * s_in - sqrt_one_minus_at = sqrt_one_minus_alphas[index].item() * s_in + a_t = alphas[index].item() * s_x + a_prev = alphas_prev[index].item() * s_x + sqrt_one_minus_at = sqrt_one_minus_alphas[index].item() * s_x # current prediction for x_0 pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt() -- cgit v1.2.1 From cda2f0a1620c3b49bb3408c30796160ed29bc87d Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Mon, 14 Aug 2023 08:49:39 +0300 Subject: make on_before_component/on_after_component possible earlier --- modules/scripts.py | 35 +++++++++++++++++++++++++---------- 1 file changed, 25 insertions(+), 10 deletions(-) diff --git a/modules/scripts.py b/modules/scripts.py index d4a9da94..cbdac2b5 100644 --- a/modules/scripts.py +++ b/modules/scripts.py @@ -239,6 +239,8 @@ class Script: """ Calls callback before a component is created. The callback function is called with a single argument of type OnComponent. + May be called in show() or ui() - but it may be too late in latter as some components may already be created. + This function is an alternative to before_component in that it also cllows to run before a component is created, but it doesn't require to be called for every created component - just for the one you need. """ @@ -445,6 +447,28 @@ class ScriptRunner: self.scripts.append(script) self.selectable_scripts.append(script) + self.apply_on_before_component_callbacks() + + def apply_on_before_component_callbacks(self): + for script in self.scripts: + on_before = script.on_before_component_elem_id or [] + on_after = script.on_after_component_elem_id or [] + + for elem_id, callback in on_before: + if elem_id not in self.on_before_component_elem_id: + self.on_before_component_elem_id[elem_id] = [] + + self.on_before_component_elem_id[elem_id].append((callback, script)) + + for elem_id, callback in on_after: + if elem_id not in self.on_after_component_elem_id: + self.on_after_component_elem_id[elem_id] = [] + + self.on_after_component_elem_id[elem_id].append((callback, script)) + + on_before.clear() + on_after.clear() + def create_script_ui(self, script): import modules.api.models as api_models @@ -555,16 +579,7 @@ class ScriptRunner: self.infotext_fields.append((dropdown, lambda x: gr.update(value=x.get('Script', 'None')))) self.infotext_fields.extend([(script.group, onload_script_visibility) for script in self.selectable_scripts]) - for script in self.scripts: - for elem_id, callback in script.on_before_component_elem_id or []: - items = self.on_before_component_elem_id.get(elem_id, []) - items.append((callback, script)) - self.on_before_component_elem_id[elem_id] = items - - for elem_id, callback in script.on_after_component_elem_id or []: - items = self.on_after_component_elem_id.get(elem_id, []) - items.append((callback, script)) - self.on_after_component_elem_id[elem_id] = items + self.apply_on_before_component_callbacks() return self.inputs -- cgit v1.2.1 From aa26f8eb40c2f99adb6752668b1fad1b5ae0158f Mon Sep 17 00:00:00 2001 From: Kohaku-Blueleaf <59680068+KohakuBlueleaf@users.noreply.github.com> Date: Mon, 14 Aug 2023 13:50:53 +0800 Subject: Put frequently used sampler back --- modules/sd_samplers_kdiffusion.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py index c140d2d0..67853ff1 100644 --- a/modules/sd_samplers_kdiffusion.py +++ b/modules/sd_samplers_kdiffusion.py @@ -8,6 +8,10 @@ from modules.shared import opts import modules.shared as shared samplers_k_diffusion = [ + ('DPM++ 2M Karras', 'sample_dpmpp_2m', ['k_dpmpp_2m_ka'], {'scheduler': 'karras'}), + ('DPM++ SDE Karras', 'sample_dpmpp_sde', ['k_dpmpp_sde_ka'], {'scheduler': 'karras', "second_order": True, "brownian_noise": True}), + ('DPM++ 2M SDE Exponential', 'sample_dpmpp_2m_sde', ['k_dpmpp_2m_sde_exp'], {'scheduler': 'exponential', "brownian_noise": True}), + ('DPM++ 2M SDE Karras', 'sample_dpmpp_2m_sde', ['k_dpmpp_2m_sde_ka'], {'scheduler': 'karras', "brownian_noise": True}), ('Euler a', 'sample_euler_ancestral', ['k_euler_a', 'k_euler_ancestral'], {"uses_ensd": True}), ('Euler', 'sample_euler', ['k_euler'], {}), ('LMS', 'sample_lms', ['k_lms'], {}), @@ -16,12 +20,8 @@ samplers_k_diffusion = [ ('DPM2 a', 'sample_dpm_2_ancestral', ['k_dpm_2_a'], {'discard_next_to_last_sigma': True, "uses_ensd": True}), ('DPM++ 2S a', 'sample_dpmpp_2s_ancestral', ['k_dpmpp_2s_a'], {"uses_ensd": True, "second_order": True}), ('DPM++ 2M', 'sample_dpmpp_2m', ['k_dpmpp_2m'], {}), - ('DPM++ 2M Karras', 'sample_dpmpp_2m', ['k_dpmpp_2m_ka'], {'scheduler': 'karras'}), ('DPM++ SDE', 'sample_dpmpp_sde', ['k_dpmpp_sde'], {"second_order": True, "brownian_noise": True}), - ('DPM++ SDE Karras', 'sample_dpmpp_sde', ['k_dpmpp_sde_ka'], {'scheduler': 'karras', "second_order": True, "brownian_noise": True}), ('DPM++ 2M SDE', 'sample_dpmpp_2m_sde', ['k_dpmpp_2m_sde_ka'], {"brownian_noise": True}), - ('DPM++ 2M SDE Karras', 'sample_dpmpp_2m_sde', ['k_dpmpp_2m_sde_ka'], {'scheduler': 'karras', "brownian_noise": True}), - ('DPM++ 2M SDE Exponential', 'sample_dpmpp_2m_sde', ['k_dpmpp_2m_sde_exp'], {'scheduler': 'exponential', "brownian_noise": True}), ('DPM++ 2M SDE Heun', 'sample_dpmpp_2m_sde', ['k_dpmpp_2m_sde_heun'], {"brownian_noise": True, "solver_type": "heun"}), ('DPM++ 2M SDE Heun Karras', 'sample_dpmpp_2m_sde', ['k_dpmpp_2m_sde_heun_ka'], {'scheduler': 'karras', "brownian_noise": True, "solver_type": "heun"}), ('DPM++ 2M SDE Heun Exponential', 'sample_dpmpp_2m_sde', ['k_dpmpp_2m_sde_heun_exp'], {'scheduler': 'exponential', "brownian_noise": True, "solver_type": "heun"}), -- cgit v1.2.1 From f70ded89365f71d42b6a60a561e8fccfdd25c159 Mon Sep 17 00:00:00 2001 From: Kohaku-Blueleaf <59680068+KohakuBlueleaf@users.noreply.github.com> Date: Mon, 14 Aug 2023 13:53:40 +0800 Subject: remove "if bias exist" check --- extensions-builtin/Lora/networks.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/extensions-builtin/Lora/networks.py b/extensions-builtin/Lora/networks.py index 96d14344..22fdff4a 100644 --- a/extensions-builtin/Lora/networks.py +++ b/extensions-builtin/Lora/networks.py @@ -312,7 +312,7 @@ def network_apply_weights(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn self.network_weights_backup = weights_backup bias_backup = getattr(self, "network_bias_backup", None) - if bias_backup is None and getattr(self, 'bias', None) is not None: + if bias_backup is None: if isinstance(self, torch.nn.MultiheadAttention) and self.out_proj.bias is not None: bias_backup = self.out_proj.bias.to(devices.cpu, copy=True) elif getattr(self, 'bias', None) is not None: -- cgit v1.2.1 From c1a31ec9f75c8dfe4ddcb0061f06e2704db98359 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Mon, 14 Aug 2023 08:59:15 +0300 Subject: revert to applying mask before denoising for k-diffusion, like it was before --- modules/sd_samplers_cfg_denoiser.py | 6 +++++- modules/sd_samplers_timesteps.py | 1 + 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/modules/sd_samplers_cfg_denoiser.py b/modules/sd_samplers_cfg_denoiser.py index 113425b2..bc9b97e4 100644 --- a/modules/sd_samplers_cfg_denoiser.py +++ b/modules/sd_samplers_cfg_denoiser.py @@ -56,6 +56,7 @@ class CFGDenoiser(torch.nn.Module): self.sampler = sampler self.model_wrap = None self.p = None + self.mask_before_denoising = False @property def inner_model(self): @@ -104,7 +105,7 @@ class CFGDenoiser(torch.nn.Module): assert not is_edit_model or all(len(conds) == 1 for conds in conds_list), "AND is not supported for InstructPix2Pix checkpoint (unless using Image CFG scale = 1.0)" - if self.mask is not None: + if self.mask_before_denoising and self.mask is not None: x = self.init_latent * self.mask + self.nmask * x batch_size = len(conds_list) @@ -206,6 +207,9 @@ class CFGDenoiser(torch.nn.Module): else: denoised = self.combine_denoised(x_out, conds_list, uncond, cond_scale) + if not self.mask_before_denoising and self.mask is not None: + denoised = self.init_latent * self.mask + self.nmask * denoised + self.sampler.last_latent = self.get_pred_x0(torch.cat([x_in[i:i + 1] for i in denoised_image_indexes]), torch.cat([x_out[i:i + 1] for i in denoised_image_indexes]), sigma) if opts.live_preview_content == "Prompt": diff --git a/modules/sd_samplers_timesteps.py b/modules/sd_samplers_timesteps.py index 6aed2974..c1f534ed 100644 --- a/modules/sd_samplers_timesteps.py +++ b/modules/sd_samplers_timesteps.py @@ -49,6 +49,7 @@ class CFGDenoiserTimesteps(CFGDenoiser): super().__init__(sampler) self.alphas = shared.sd_model.alphas_cumprod + self.mask_before_denoising = True def get_pred_x0(self, x_in, x_out, sigma): ts = sigma.to(dtype=int) -- cgit v1.2.1 From c7c16f805c9ea0da42d1d993f2ea7bda48beba76 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Mon, 14 Aug 2023 09:48:40 +0300 Subject: repair /docs page --- modules/api/models.py | 9 +++++---- modules/processing.py | 3 ++- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/modules/api/models.py b/modules/api/models.py index 800c9b93..6a574771 100644 --- a/modules/api/models.py +++ b/modules/api/models.py @@ -50,10 +50,12 @@ class PydanticModelGenerator: additional_fields = None, ): def field_type_generator(k, v): - # field_type = str if not overrides.get(k) else overrides[k]["type"] - # print(k, v.annotation, v.default) field_type = v.annotation + if field_type == 'Image': + # images are sent as base64 strings via API + field_type = 'str' + return Optional[field_type] def merge_class_params(class_): @@ -63,7 +65,6 @@ class PydanticModelGenerator: parameters = {**parameters, **inspect.signature(classes.__init__).parameters} return parameters - self._model_name = model_name self._class_data = merge_class_params(class_instance) @@ -72,7 +73,7 @@ class PydanticModelGenerator: field=underscore(k), field_alias=k, field_type=field_type_generator(k, v), - field_value=v.default + field_value=None if isinstance(v.default, property) else v.default ) for (k,v) in self._class_data.items() if k not in API_NOT_ALLOWED ] diff --git a/modules/processing.py b/modules/processing.py index 74366655..69d365b8 100755 --- a/modules/processing.py +++ b/modules/processing.py @@ -114,7 +114,7 @@ class StableDiffusionProcessing: prompt: str = "" prompt_for_display: str = None negative_prompt: str = "" - styles: list[str] = field(default_factory=list) + styles: list[str] = None seed: int = -1 subseed: int = -1 subseed_strength: float = 0 @@ -199,6 +199,7 @@ class StableDiffusionProcessing: print("sampler_index argument for StableDiffusionProcessing does not do anything; use sampler_name", file=sys.stderr) self.comments = {} + self.styles = [] self.sampler_noise_scheduler_override = None self.s_min_uncond = self.s_min_uncond if self.s_min_uncond is not None else opts.s_min_uncond -- cgit v1.2.1 From b39d9364d8a3be1ec8a4bae34fc8ae1840609101 Mon Sep 17 00:00:00 2001 From: whitebell Date: Mon, 14 Aug 2023 15:58:38 +0900 Subject: Fix typo in shared_options.py unperdictable -> unpredictable --- modules/shared_options.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/shared_options.py b/modules/shared_options.py index 7f6c3658..fc0de61f 100644 --- a/modules/shared_options.py +++ b/modules/shared_options.py @@ -284,7 +284,7 @@ options_templates.update(options_section(('ui', "Live previews"), { options_templates.update(options_section(('sampler-params', "Sampler parameters"), { "hide_samplers": OptionInfo([], "Hide samplers in user interface", gr.CheckboxGroup, lambda: {"choices": [x.name for x in shared_items.list_samplers()]}).needs_reload_ui(), - "eta_ddim": OptionInfo(0.0, "Eta for DDIM", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}, infotext='Eta DDIM').info("noise multiplier; higher = more unperdictable results"), + "eta_ddim": OptionInfo(0.0, "Eta for DDIM", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}, infotext='Eta DDIM').info("noise multiplier; higher = more unpredictable results"), "eta_ancestral": OptionInfo(1.0, "Eta for k-diffusion samplers", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}, infotext='Eta').info("noise multiplier; currently only applies to ancestral samplers (i.e. Euler a) and SDE samplers"), "ddim_discretize": OptionInfo('uniform', "img2img DDIM discretize", gr.Radio, {"choices": ['uniform', 'quad']}), 's_churn': OptionInfo(0.0, "sigma churn", gr.Slider, {"minimum": 0.0, "maximum": 100.0, "step": 0.01}, infotext='Sigma churn').info('amount of stochasticity; only applies to Euler, Heun, and DPM2'), -- cgit v1.2.1 From abbecb3e7363e422d6840fbb5746c74fd453ead5 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Mon, 14 Aug 2023 10:15:10 +0300 Subject: further repair the /docs page to not break styles with the attempted fix --- modules/processing.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/modules/processing.py b/modules/processing.py index 69d365b8..f34ba48a 100755 --- a/modules/processing.py +++ b/modules/processing.py @@ -199,7 +199,9 @@ class StableDiffusionProcessing: print("sampler_index argument for StableDiffusionProcessing does not do anything; use sampler_name", file=sys.stderr) self.comments = {} - self.styles = [] + + if self.styles is None: + self.styles = [] self.sampler_noise_scheduler_override = None self.s_min_uncond = self.s_min_uncond if self.s_min_uncond is not None else opts.s_min_uncond -- cgit v1.2.1 From f3b96d4998d8ca376d33efa7a4454e8c28e24255 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Mon, 14 Aug 2023 10:22:52 +0300 Subject: return seed controls UI to how it was before --- modules/processing_scripts/seed.py | 47 +++++++++++++++++++++++--------------- style.css | 15 ++++++++++++ 2 files changed, 44 insertions(+), 18 deletions(-) diff --git a/modules/processing_scripts/seed.py b/modules/processing_scripts/seed.py index 96b44dfb..6ce3b2fc 100644 --- a/modules/processing_scripts/seed.py +++ b/modules/processing_scripts/seed.py @@ -24,47 +24,58 @@ class ScriptSeed(scripts.ScriptBuiltin): def ui(self, is_img2img): with gr.Row(elem_id=self.elem_id("seed_row")): - with gr.Column(scale=1, min_width=205): - with gr.Row(): - if cmd_opts.use_textbox_seed: - self.seed = gr.Textbox(label='Seed', value="", elem_id=self.elem_id("seed"), min_width=100) - else: - self.seed = gr.Number(label='Seed', value=-1, elem_id=self.elem_id("seed"), min_width=100, precision=0) - - random_seed = ToolButton(ui.random_symbol, elem_id=self.elem_id("random_seed"), label='Random seed') - reuse_seed = ToolButton(ui.reuse_symbol, elem_id=self.elem_id("reuse_seed"), label='Reuse seed') + if cmd_opts.use_textbox_seed: + self.seed = gr.Textbox(label='Seed', value="", elem_id=self.elem_id("seed"), min_width=100) + else: + self.seed = gr.Number(label='Seed', value=-1, elem_id=self.elem_id("seed"), min_width=100, precision=0) - with gr.Column(scale=1, min_width=205): - with gr.Row(): - subseed = gr.Number(label='Variation seed', value=-1, elem_id=self.elem_id("subseed"), min_width=100, precision=0) + random_seed = ToolButton(ui.random_symbol, elem_id=self.elem_id("random_seed"), label='Random seed') + reuse_seed = ToolButton(ui.reuse_symbol, elem_id=self.elem_id("reuse_seed"), label='Reuse seed') - random_subseed = ToolButton(ui.random_symbol, elem_id=self.elem_id("random_subseed")) - reuse_subseed = ToolButton(ui.reuse_symbol, elem_id=self.elem_id("reuse_subseed")) + seed_checkbox = gr.Checkbox(label='Extra', elem_id=self.elem_id("subseed_show"), value=False) - with gr.Column(scale=2, min_width=100): + with gr.Group(visible=False, elem_id=self.elem_id("seed_extras")) as seed_extras: + with gr.Row(elem_id=self.elem_id("subseed_row")): + subseed = gr.Number(label='Variation seed', value=-1, elem_id=self.elem_id("subseed"), precision=0) + random_subseed = ToolButton(ui.random_symbol, elem_id=self.elem_id("random_subseed")) + reuse_subseed = ToolButton(ui.reuse_symbol, elem_id=self.elem_id("reuse_subseed")) subseed_strength = gr.Slider(label='Variation strength', value=0.0, minimum=0, maximum=1, step=0.01, elem_id=self.elem_id("subseed_strength")) + with gr.Row(elem_id=self.elem_id("seed_resize_from_row")): + seed_resize_from_w = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize seed from width", value=0, elem_id=self.elem_id("seed_resize_from_w")) + seed_resize_from_h = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize seed from height", value=0, elem_id=self.elem_id("seed_resize_from_h")) + random_seed.click(fn=None, _js="function(){setRandomSeed('" + self.elem_id("seed") + "')}", show_progress=False, inputs=[], outputs=[]) random_subseed.click(fn=None, _js="function(){setRandomSeed('" + self.elem_id("subseed") + "')}", show_progress=False, inputs=[], outputs=[]) + seed_checkbox.change(lambda x: gr.update(visible=x), show_progress=False, inputs=[seed_checkbox], outputs=[seed_extras]) + self.infotext_fields = [ (self.seed, "Seed"), + (seed_checkbox, lambda d: "Variation seed" in d or "Seed resize from-1" in d), (subseed, "Variation seed"), (subseed_strength, "Variation seed strength"), + (seed_resize_from_w, "Seed resize from-1"), + (seed_resize_from_h, "Seed resize from-2"), ] self.on_after_component(lambda x: connect_reuse_seed(self.seed, reuse_seed, x.component, False), elem_id=f'generation_info_{self.tabname}') self.on_after_component(lambda x: connect_reuse_seed(subseed, reuse_subseed, x.component, True), elem_id=f'generation_info_{self.tabname}') - return self.seed, subseed, subseed_strength + return self.seed, seed_checkbox, subseed, subseed_strength, seed_resize_from_w, seed_resize_from_h - def setup(self, p, seed, subseed, subseed_strength): + def setup(self, p, seed, seed_checkbox, subseed, subseed_strength, seed_resize_from_w, seed_resize_from_h): p.seed = seed - if subseed_strength > 0: + if seed_checkbox and subseed_strength > 0: p.subseed = subseed p.subseed_strength = subseed_strength + if seed_checkbox and seed_resize_from_w > 0 and seed_resize_from_h > 0: + p.seed_resize_from_w = seed_resize_from_w + p.seed_resize_from_h = seed_resize_from_h + + def connect_reuse_seed(seed: gr.Number, reuse_seed: gr.Button, generation_info: gr.Textbox, is_subseed): """ Connects a 'reuse (sub)seed' button's click event so that it copies last used diff --git a/style.css b/style.css index dc528422..bdf0635a 100644 --- a/style.css +++ b/style.css @@ -222,6 +222,21 @@ div.block.gradio-accordion { padding: 0.1em 0.75em; } +[id$=_subseed_show]{ + min-width: auto !important; + flex-grow: 0 !important; + display: flex; +} + +[id$=_subseed_show] label{ + margin-bottom: 0.65em; + align-self: end; +} + +[id$=_seed_extras] > div{ + gap: 0.5em; +} + .html-log .comments{ padding-top: 0.5em; } -- cgit v1.2.1 From 353c876172a48c5044130249370c9303e611dd8b Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Mon, 14 Aug 2023 10:43:18 +0300 Subject: fix API always using -1 as seed --- modules/api/api.py | 2 ++ modules/processing.py | 4 +++- modules/processing_scripts/refiner.py | 2 +- modules/processing_scripts/seed.py | 2 +- modules/scripts.py | 12 +++++++++--- 5 files changed, 16 insertions(+), 6 deletions(-) diff --git a/modules/api/api.py b/modules/api/api.py index 908c4514..fb2c2ce9 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -330,6 +330,7 @@ class Api: with self.queue_lock: with closing(StableDiffusionProcessingTxt2Img(sd_model=shared.sd_model, **args)) as p: + p.is_api = True p.scripts = script_runner p.outpath_grids = opts.outdir_txt2img_grids p.outpath_samples = opts.outdir_txt2img_samples @@ -390,6 +391,7 @@ class Api: with self.queue_lock: with closing(StableDiffusionProcessingImg2Img(sd_model=shared.sd_model, **args)) as p: p.init_images = [decode_base64_to_image(x) for x in init_images] + p.is_api = True p.scripts = script_runner p.outpath_grids = opts.outdir_img2img_grids p.outpath_samples = opts.outdir_img2img_samples diff --git a/modules/processing.py b/modules/processing.py index f34ba48a..b1eac2ab 100755 --- a/modules/processing.py +++ b/modules/processing.py @@ -194,6 +194,8 @@ class StableDiffusionProcessing: sd_vae_name: str = field(default=None, init=False) sd_vae_hash: str = field(default=None, init=False) + is_api: bool = field(default=False, init=False) + def __post_init__(self): if self.sampler_index is not None: print("sampler_index argument for StableDiffusionProcessing does not do anything; use sampler_name", file=sys.stderr) @@ -258,7 +260,7 @@ class StableDiffusionProcessing: def setup_scripts(self): self.scripts_setup_complete = True - self.scripts.setup_scrips(self) + self.scripts.setup_scrips(self, is_ui=not self.is_api) def comment(self, text): self.comments[text] = 1 diff --git a/modules/processing_scripts/refiner.py b/modules/processing_scripts/refiner.py index 3c5b37d2..b389c4ef 100644 --- a/modules/processing_scripts/refiner.py +++ b/modules/processing_scripts/refiner.py @@ -5,7 +5,7 @@ from modules.ui_common import create_refresh_button from modules.ui_components import InputAccordion -class ScriptRefiner(scripts.Script): +class ScriptRefiner(scripts.ScriptBuiltinUI): section = "accordions" create_group = False diff --git a/modules/processing_scripts/seed.py b/modules/processing_scripts/seed.py index 6ce3b2fc..6b6ff987 100644 --- a/modules/processing_scripts/seed.py +++ b/modules/processing_scripts/seed.py @@ -7,7 +7,7 @@ from modules.shared import cmd_opts from modules.ui_components import ToolButton -class ScriptSeed(scripts.ScriptBuiltin): +class ScriptSeed(scripts.ScriptBuiltinUI): section = "seed" create_group = False diff --git a/modules/scripts.py b/modules/scripts.py index cbdac2b5..fcab5d3a 100644 --- a/modules/scripts.py +++ b/modules/scripts.py @@ -68,6 +68,9 @@ class Script: on_after_component_elem_id = None """list of callbacks to be called after a component with an elem_id is created""" + setup_for_ui_only = False + """If true, the script setup will only be run in Gradio UI, not in API""" + def title(self): """this function should return the title of the script. This is what will be displayed in the dropdown menu.""" @@ -258,7 +261,6 @@ class Script: self.on_after_component_elem_id.append((elem_id, callback)) - def describe(self): """unused""" return "" @@ -280,7 +282,8 @@ class Script: pass -class ScriptBuiltin(Script): +class ScriptBuiltinUI(Script): + setup_for_ui_only = True def elem_id(self, item_id): """helper function to generate id for a HTML element, constructs final id out of tab and user-supplied item_id""" @@ -728,8 +731,11 @@ class ScriptRunner: except Exception: errors.report(f"Error running before_hr: {script.filename}", exc_info=True) - def setup_scrips(self, p): + def setup_scrips(self, p, *, is_ui=True): for script in self.alwayson_scripts: + if not is_ui and script.setup_for_ui_only: + continue + try: script_args = p.script_args[script.args_from:script.args_to] script.setup(p, *script_args) -- cgit v1.2.1 From 99ab3d43a71e3f66e57d3cd2013b97c97e7ab69b Mon Sep 17 00:00:00 2001 From: Robert Barron Date: Mon, 14 Aug 2023 00:41:36 -0700 Subject: hires prompt timeline: merge to latests, slightly simplify diff --- modules/processing.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/modules/processing.py b/modules/processing.py index 6d3c9365..75f1d66f 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -394,7 +394,7 @@ class StableDiffusionProcessing: self.main_prompt = self.all_prompts[0] self.main_negative_prompt = self.all_negative_prompts[0] - def cached_params(self, required_prompts, steps, hires_steps, extra_network_data, use_old_scheduling): + def cached_params(self, required_prompts, steps, extra_network_data, hires_steps=None, use_old_scheduling=False): """Returns parameters that invalidate the cond cache if changed""" return ( @@ -424,7 +424,7 @@ class StableDiffusionProcessing: caches is a list with items described above. """ - cached_params = self.cached_params(required_prompts, steps, hires_steps, extra_network_data, shared.opts.use_old_scheduling) + cached_params = self.cached_params(required_prompts, steps, extra_network_data, hires_steps, shared.opts.use_old_scheduling) for cache in caches: if cache[0] is not None and cached_params == cache[0]: @@ -447,8 +447,8 @@ class StableDiffusionProcessing: self.step_multiplier = total_steps // self.steps self.firstpass_steps = total_steps - self.uc = self.get_conds_with_caching(prompt_parser.get_learned_conditioning, negative_prompts, total_steps, [self.cached_uc], self.extra_network_data, None) - self.c = self.get_conds_with_caching(prompt_parser.get_multicond_learned_conditioning, prompts, total_steps, [self.cached_c], self.extra_network_data, None ) + self.uc = self.get_conds_with_caching(prompt_parser.get_learned_conditioning, negative_prompts, total_steps, [self.cached_uc], self.extra_network_data) + self.c = self.get_conds_with_caching(prompt_parser.get_multicond_learned_conditioning, prompts, total_steps, [self.cached_c], self.extra_network_data) def get_conds(self): return self.c, self.uc -- cgit v1.2.1 From 6bfd4dfecfd03bb74e157d2b81484276875cc0ad Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Mon, 14 Aug 2023 12:07:38 +0300 Subject: add second_order to samplers that mistakenly didn't have it --- modules/sd_samplers_kdiffusion.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py index 67853ff1..08b9e740 100644 --- a/modules/sd_samplers_kdiffusion.py +++ b/modules/sd_samplers_kdiffusion.py @@ -16,8 +16,8 @@ samplers_k_diffusion = [ ('Euler', 'sample_euler', ['k_euler'], {}), ('LMS', 'sample_lms', ['k_lms'], {}), ('Heun', 'sample_heun', ['k_heun'], {"second_order": True}), - ('DPM2', 'sample_dpm_2', ['k_dpm_2'], {'discard_next_to_last_sigma': True}), - ('DPM2 a', 'sample_dpm_2_ancestral', ['k_dpm_2_a'], {'discard_next_to_last_sigma': True, "uses_ensd": True}), + ('DPM2', 'sample_dpm_2', ['k_dpm_2'], {'discard_next_to_last_sigma': True, "second_order": True}), + ('DPM2 a', 'sample_dpm_2_ancestral', ['k_dpm_2_a'], {'discard_next_to_last_sigma': True, "uses_ensd": True, "second_order": True}), ('DPM++ 2S a', 'sample_dpmpp_2s_ancestral', ['k_dpmpp_2s_a'], {"uses_ensd": True, "second_order": True}), ('DPM++ 2M', 'sample_dpmpp_2m', ['k_dpmpp_2m'], {}), ('DPM++ SDE', 'sample_dpmpp_sde', ['k_dpmpp_sde'], {"second_order": True, "brownian_noise": True}), @@ -34,7 +34,7 @@ samplers_k_diffusion = [ ('DPM2 Karras', 'sample_dpm_2', ['k_dpm_2_ka'], {'scheduler': 'karras', 'discard_next_to_last_sigma': True, "uses_ensd": True, "second_order": True}), ('DPM2 a Karras', 'sample_dpm_2_ancestral', ['k_dpm_2_a_ka'], {'scheduler': 'karras', 'discard_next_to_last_sigma': True, "uses_ensd": True, "second_order": True}), ('DPM++ 2S a Karras', 'sample_dpmpp_2s_ancestral', ['k_dpmpp_2s_a_ka'], {'scheduler': 'karras', "uses_ensd": True, "second_order": True}), - ('Restart', sd_samplers_extra.restart_sampler, ['restart'], {'scheduler': 'karras'}), + ('Restart', sd_samplers_extra.restart_sampler, ['restart'], {'scheduler': 'karras', "second_order": True}), ] -- cgit v1.2.1 From a2e213bc7ba29495559c9e53b734047ea879b343 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Mon, 14 Aug 2023 18:50:22 +0900 Subject: separate Extra options --- .../extra-options-section/scripts/extra_options_section.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/extensions-builtin/extra-options-section/scripts/extra_options_section.py b/extensions-builtin/extra-options-section/scripts/extra_options_section.py index 588b64d2..983f87ff 100644 --- a/extensions-builtin/extra-options-section/scripts/extra_options_section.py +++ b/extensions-builtin/extra-options-section/scripts/extra_options_section.py @@ -22,22 +22,23 @@ class ExtraOptionsSection(scripts.Script): self.comps = [] self.setting_names = [] self.infotext_fields = [] + extra_options = shared.opts.extra_options_img2img if is_img2img else shared.opts.extra_options_txt2img mapping = {k: v for v, k in generation_parameters_copypaste.infotext_to_setting_name_mapping} with gr.Blocks() as interface: - with gr.Accordion("Options", open=False) if shared.opts.extra_options_accordion and shared.opts.extra_options else gr.Group(): + with gr.Accordion("Options", open=False) if shared.opts.extra_options_accordion and extra_options else gr.Group(): - row_count = math.ceil(len(shared.opts.extra_options) / shared.opts.extra_options_cols) + row_count = math.ceil(len(extra_options) / shared.opts.extra_options_cols) for row in range(row_count): with gr.Row(): for col in range(shared.opts.extra_options_cols): index = row * shared.opts.extra_options_cols + col - if index >= len(shared.opts.extra_options): + if index >= len(extra_options): break - setting_name = shared.opts.extra_options[index] + setting_name = extra_options[index] with FormColumn(): comp = ui_settings.create_setting_component(setting_name) @@ -64,7 +65,8 @@ class ExtraOptionsSection(scripts.Script): shared.options_templates.update(shared.options_section(('ui', "User interface"), { - "extra_options": shared.OptionInfo([], "Options in main UI", ui_components.DropdownMulti, lambda: {"choices": list(shared.opts.data_labels.keys())}).js("info", "settingsHintsShowQuicksettings").info("setting entries that also appear in txt2img/img2img interfaces").needs_reload_ui(), + "extra_options_txt2img": shared.OptionInfo([], "Options in main UI - txt2img", ui_components.DropdownMulti, lambda: {"choices": list(shared.opts.data_labels.keys())}).js("info", "settingsHintsShowQuicksettings").info("setting entries that also appear in txt2img interfaces").needs_reload_ui(), + "extra_options_img2img": shared.OptionInfo([], "Options in main UI - img2img", ui_components.DropdownMulti, lambda: {"choices": list(shared.opts.data_labels.keys())}).js("info", "settingsHintsShowQuicksettings").info("setting entries that also appear in img2img interfaces").needs_reload_ui(), "extra_options_cols": shared.OptionInfo(1, "Options in main UI - number of columns", gr.Number, {"precision": 0}).needs_reload_ui(), "extra_options_accordion": shared.OptionInfo(False, "Options in main UI - place into an accordion").needs_reload_ui() })) -- cgit v1.2.1 From bc63339df3d489de9d1b10c936c87be32bd68790 Mon Sep 17 00:00:00 2001 From: brkirch Date: Wed, 26 Jul 2023 23:05:16 -0400 Subject: Update hash for SD XL Repo --- modules/launch_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/launch_utils.py b/modules/launch_utils.py index 449a8755..444f16fa 100644 --- a/modules/launch_utils.py +++ b/modules/launch_utils.py @@ -321,7 +321,7 @@ def prepare_environment(): blip_repo = os.environ.get('BLIP_REPO', 'https://github.com/salesforce/BLIP.git') stable_diffusion_commit_hash = os.environ.get('STABLE_DIFFUSION_COMMIT_HASH', "cf1d67a6fd5ea1aa600c4df58e5b47da45f6bdbf") - stable_diffusion_xl_commit_hash = os.environ.get('STABLE_DIFFUSION_XL_COMMIT_HASH', "5c10deee76adad0032b412294130090932317a87") + stable_diffusion_xl_commit_hash = os.environ.get('STABLE_DIFFUSION_XL_COMMIT_HASH', "45c443b316737a4ab6e40413d7794a7f5657c19f") k_diffusion_commit_hash = os.environ.get('K_DIFFUSION_COMMIT_HASH', "ab527a9a6d347f364e3d185ba6d714e22d80cb3c") codeformer_commit_hash = os.environ.get('CODEFORMER_COMMIT_HASH', "c5b4593074ba6214284d6acd5f1719b6c5d739af") blip_commit_hash = os.environ.get('BLIP_COMMIT_HASH', "48211a1594f1321b00f14c9f7a5b4813144b2fb9") -- cgit v1.2.1 From f23e5ce2daad86d891a5ecdf3f3bad43d7a09c1a Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Mon, 14 Aug 2023 17:58:54 +0300 Subject: revert changed inpainting mask conditioning calculation after #12311 --- modules/processing.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/processing.py b/modules/processing.py index b1eac2ab..c983d001 100755 --- a/modules/processing.py +++ b/modules/processing.py @@ -1499,7 +1499,7 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing): elif self.inpainting_fill == 3: self.init_latent = self.init_latent * self.mask - self.image_conditioning = self.img2img_image_conditioning(image, self.init_latent, image_mask) + self.image_conditioning = self.img2img_image_conditioning(image * 2 - 1, self.init_latent, image_mask) def sample(self, conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength, prompts): x = self.rng.next() -- cgit v1.2.1 From 5daf7983d141d3c79c03cc65238194383e6334c8 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Mon, 14 Aug 2023 19:27:04 +0300 Subject: when refreshing cards in extra networks UI, do not discard user's custom resolution --- javascript/extraNetworks.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/javascript/extraNetworks.js b/javascript/extraNetworks.js index 897ebeba..3bc723d3 100644 --- a/javascript/extraNetworks.js +++ b/javascript/extraNetworks.js @@ -332,7 +332,7 @@ function extraNetworksRefreshSingleCard(page, tabname, name) { newDiv.innerHTML = data.html; var newCard = newDiv.firstElementChild; - newCard.style = ''; + newCard.style.display = ''; card.parentElement.insertBefore(newCard, card); card.parentElement.removeChild(card); } -- cgit v1.2.1 From 45be87afc6b173ebda94f427b8d396a2842ddf3c Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Mon, 14 Aug 2023 21:48:05 +0300 Subject: correctly add Eta DDIM to infotext when it's 1.0 and do not add it when it's 0.0. --- modules/sd_samplers_common.py | 3 ++- modules/sd_samplers_timesteps.py | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/modules/sd_samplers_common.py b/modules/sd_samplers_common.py index 07fc4434..8886de7e 100644 --- a/modules/sd_samplers_common.py +++ b/modules/sd_samplers_common.py @@ -217,6 +217,7 @@ class Sampler: self.eta_option_field = 'eta_ancestral' self.eta_infotext_field = 'Eta' + self.eta_default = 1.0 self.conditioning_key = shared.sd_model.model.conditioning_key @@ -273,7 +274,7 @@ class Sampler: extra_params_kwargs[param_name] = getattr(p, param_name) if 'eta' in inspect.signature(self.func).parameters: - if self.eta != 1.0: + if self.eta != self.eta_default: p.extra_generation_params[self.eta_infotext_field] = self.eta extra_params_kwargs['eta'] = self.eta diff --git a/modules/sd_samplers_timesteps.py b/modules/sd_samplers_timesteps.py index c1f534ed..66e83ff7 100644 --- a/modules/sd_samplers_timesteps.py +++ b/modules/sd_samplers_timesteps.py @@ -76,6 +76,7 @@ class CompVisSampler(sd_samplers_common.Sampler): self.eta_option_field = 'eta_ddim' self.eta_infotext_field = 'Eta DDIM' + self.eta_default = 0.0 self.model_wrap_cfg = CFGDenoiserTimesteps(self) -- cgit v1.2.1 From 7e77a38cbc155baf96db66064afbdd40df715335 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Tue, 15 Aug 2023 08:27:50 +0300 Subject: get XYZ plot to work with recent changes to refined specified in fields of p rather than in settings --- modules/processing.py | 2 +- modules/processing_scripts/refiner.py | 2 +- scripts/xyz_grid.py | 18 +++++------------- 3 files changed, 7 insertions(+), 15 deletions(-) diff --git a/modules/processing.py b/modules/processing.py index c983d001..25a19a77 100755 --- a/modules/processing.py +++ b/modules/processing.py @@ -746,7 +746,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: if p.tiling is None: p.tiling = opts.tiling - if p.refiner_checkpoint not in (None, "", "None"): + if p.refiner_checkpoint not in (None, "", "None", "none"): p.refiner_checkpoint_info = sd_models.get_closet_checkpoint_match(p.refiner_checkpoint) if p.refiner_checkpoint_info is None: raise Exception(f'Could not find checkpoint with name {p.refiner_checkpoint}') diff --git a/modules/processing_scripts/refiner.py b/modules/processing_scripts/refiner.py index b389c4ef..29ccb78f 100644 --- a/modules/processing_scripts/refiner.py +++ b/modules/processing_scripts/refiner.py @@ -42,7 +42,7 @@ class ScriptRefiner(scripts.ScriptBuiltinUI): # the actual implementation is in sd_samplers_common.py, apply_refiner if not enable_refiner or refiner_checkpoint in (None, "", "None"): - p.refiner_checkpoint_info = None + p.refiner_checkpoint = None p.refiner_switch_at = None else: p.refiner_checkpoint = refiner_checkpoint diff --git a/scripts/xyz_grid.py b/scripts/xyz_grid.py index 017a470f..2217cc69 100644 --- a/scripts/xyz_grid.py +++ b/scripts/xyz_grid.py @@ -85,20 +85,12 @@ def confirm_checkpoints(p, xs): if modules.sd_models.get_closet_checkpoint_match(x) is None: raise RuntimeError(f"Unknown checkpoint: {x}") -def apply_refiner_checkpoint(p, x, xs): - if x == 'None': - p.override_settings['sd_refiner_checkpoint'] = 'None' - return - info = modules.sd_models.get_closet_checkpoint_match(x) - if info is None: - raise RuntimeError(f"Unknown checkpoint: {x}") - p.override_settings['sd_refiner_checkpoint'] = info.name - -def confirm_refiner_checkpoints(p, xs): +def confirm_checkpoints_or_none(p, xs): for x in xs: - if x == 'None': + if x in (None, "", "None", "none"): continue + if modules.sd_models.get_closet_checkpoint_match(x) is None: raise RuntimeError(f"Unknown checkpoint: {x}") @@ -267,8 +259,8 @@ axis_options = [ AxisOption("Token merging ratio", float, apply_override('token_merging_ratio')), AxisOption("Token merging ratio high-res", float, apply_override('token_merging_ratio_hr')), AxisOption("Always discard next-to-last sigma", str, apply_override('always_discard_next_to_last_sigma', boolean=True), choices=boolean_choice(reverse=True)), - AxisOption("Refiner checkpoint", str, apply_refiner_checkpoint, format_value=format_remove_path, confirm=confirm_refiner_checkpoints, cost=1.0, choices=lambda: ['None'] + sorted(sd_models.checkpoints_list, key=str.casefold)), - AxisOption("Refiner switch at", float, apply_override('sd_refiner_switch_at')) + AxisOption("Refiner checkpoint", str, apply_field('refiner_checkpoint'), format_value=format_remove_path, confirm=confirm_checkpoints_or_none, cost=1.0, choices=lambda: ['None'] + sorted(sd_models.checkpoints_list, key=str.casefold)), + AxisOption("Refiner switch at", float, apply_field('refiner_switch_at')), ] -- cgit v1.2.1 From 79d4e81984171a047d6c71d97a67dda7dd87c43c Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Tue, 15 Aug 2023 08:46:17 +0300 Subject: fix processing error that happens if batch_size is not a multiple of how many prompts/negative prompts there are #12509 --- modules/processing.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/modules/processing.py b/modules/processing.py index 25a19a77..1d098302 100755 --- a/modules/processing.py +++ b/modules/processing.py @@ -382,13 +382,18 @@ class StableDiffusionProcessing: def setup_prompts(self): if type(self.prompt) == list: self.all_prompts = self.prompt + elif type(self.negative_prompt) == list: + self.all_prompts = [self.prompt] * len(self.negative_prompt) else: self.all_prompts = self.batch_size * self.n_iter * [self.prompt] if type(self.negative_prompt) == list: self.all_negative_prompts = self.negative_prompt else: - self.all_negative_prompts = self.batch_size * self.n_iter * [self.negative_prompt] + self.all_negative_prompts = [self.negative_prompt] * len(self.all_prompts) + + if len(self.all_prompts) != len(self.all_negative_prompts): + raise RuntimeError(f"Received a different number of prompts ({len(self.all_prompts)}) and negative prompts ({len(self.all_negative_prompts)})") self.all_prompts = [shared.prompt_styles.apply_styles_to_prompt(x, self.styles) for x in self.all_prompts] self.all_negative_prompts = [shared.prompt_styles.apply_negative_styles_to_prompt(x, self.styles) for x in self.all_negative_prompts] -- cgit v1.2.1 From 371b24b17c1cf98c9068a4b585b93cc1610702dc Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Tue, 15 Aug 2023 02:19:19 -0400 Subject: Add extra img2img noise --- modules/sd_samplers_kdiffusion.py | 4 ++++ modules/sd_samplers_timesteps.py | 4 ++++ modules/shared_options.py | 3 ++- scripts/xyz_grid.py | 2 ++ 4 files changed, 12 insertions(+), 1 deletion(-) diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py index 08b9e740..08866e41 100644 --- a/modules/sd_samplers_kdiffusion.py +++ b/modules/sd_samplers_kdiffusion.py @@ -145,6 +145,10 @@ class KDiffusionSampler(sd_samplers_common.Sampler): xi = x + noise * sigma_sched[0] + if opts.img2img_extra_noise > 0: + p.extra_generation_params["Extra noise"] = opts.img2img_extra_noise + xi += noise * opts.img2img_extra_noise + extra_params_kwargs = self.initialize(p) parameters = inspect.signature(self.func).parameters diff --git a/modules/sd_samplers_timesteps.py b/modules/sd_samplers_timesteps.py index c1f534ed..670e2151 100644 --- a/modules/sd_samplers_timesteps.py +++ b/modules/sd_samplers_timesteps.py @@ -103,6 +103,10 @@ class CompVisSampler(sd_samplers_common.Sampler): xi = x * sqrt_alpha_cumprod + noise * sqrt_one_minus_alpha_cumprod + if opts.img2img_extra_noise > 0: + p.extra_generation_params["Extra noise"] = opts.img2img_extra_noise + xi += noise * opts.img2img_extra_noise * sqrt_alpha_cumprod + extra_params_kwargs = self.initialize(p) parameters = inspect.signature(self.func).parameters diff --git a/modules/shared_options.py b/modules/shared_options.py index fc0de61f..79cbb92e 100644 --- a/modules/shared_options.py +++ b/modules/shared_options.py @@ -166,7 +166,8 @@ For img2img, VAE is used to process user's input image before the sampling, and options_templates.update(options_section(('img2img', "img2img"), { "inpainting_mask_weight": OptionInfo(1.0, "Inpainting conditioning mask strength", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}, infotext='Conditional mask weight'), - "initial_noise_multiplier": OptionInfo(1.0, "Noise multiplier for img2img", gr.Slider, {"minimum": 0.5, "maximum": 1.5, "step": 0.01}, infotext='Noise multiplier'), + "initial_noise_multiplier": OptionInfo(1.0, "Noise multiplier for img2img", gr.Slider, {"minimum": 0.0, "maximum": 1.5, "step": 0.001}, infotext='Noise multiplier'), + "img2img_extra_noise": OptionInfo(0.0, "Extra noise multiplier for img2img and hires fix", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}, infotext='Extra noise').info("0 = disabled (default); should be lower than denoising strength"), "img2img_color_correction": OptionInfo(False, "Apply color correction to img2img results to match original colors."), "img2img_fix_steps": OptionInfo(False, "With img2img, do exactly the amount of steps the slider specifies.").info("normally you'd do less with less denoising"), "img2img_background_color": OptionInfo("#ffffff", "With img2img, fill transparent parts of the input image with this color.", ui_components.FormColorPicker, {}), diff --git a/scripts/xyz_grid.py b/scripts/xyz_grid.py index da0e48aa..e36bd3c9 100644 --- a/scripts/xyz_grid.py +++ b/scripts/xyz_grid.py @@ -241,6 +241,8 @@ axis_options = [ AxisOption("Eta", float, apply_field("eta")), AxisOption("Clip skip", int, apply_clip_skip), AxisOption("Denoising", float, apply_field("denoising_strength")), + AxisOption("Initial noise multiplier", float, apply_field("initial_noise_multiplier")), + AxisOption("Extra noise", float, apply_override("img2img_extra_noise")), AxisOptionTxt2Img("Hires upscaler", str, apply_field("hr_upscaler"), choices=lambda: [*shared.latent_upscale_modes, *[x.name for x in shared.sd_upscalers]]), AxisOptionImg2Img("Cond. Image Mask Weight", float, apply_field("inpainting_mask_weight")), AxisOption("VAE", str, apply_vae, cost=0.7, choices=lambda: ['None'] + list(sd_vae.vae_dict)), -- cgit v1.2.1 From b0a6d61d733ec5f5c062025e26de7d74bc7eae93 Mon Sep 17 00:00:00 2001 From: NoCrypt <57245077+NoCrypt@users.noreply.github.com> Date: Tue, 15 Aug 2023 13:22:44 +0700 Subject: Add NoCrypt/miku gradio theme --- modules/shared_gradio_themes.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/modules/shared_gradio_themes.py b/modules/shared_gradio_themes.py index 485e89d5..822db0a9 100644 --- a/modules/shared_gradio_themes.py +++ b/modules/shared_gradio_themes.py @@ -36,7 +36,8 @@ gradio_hf_hub_themes = [ "step-3-profit/Midnight-Deep", "Taithrah/Minimal", "ysharma/huggingface", - "ysharma/steampunk" + "ysharma/steampunk", + "NoCrypt/miku" ] -- cgit v1.2.1 From 9ab52caf022f19c34ee4a1339fc3508addda4752 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Tue, 15 Aug 2023 09:50:57 +0300 Subject: update changelog file --- CHANGELOG.md | 96 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 96 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index b18c6867..6eae6a4f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,99 @@ +## 1.6.0 + +### Features: + * refiner support + * add NV option for Random number generator source setting, which allows to generate same pictures on CPU/AMD/Mac as on NVidia videocards + * add style editor dialog + * hires fix: add an option to use a different checkpoint for second pass + * option to keep multiple loaded models in memory + * new samplers: Restart, DPM++ 2M SDE Exponential, DPM++ 2M SDE Heun, DPM++ 2M SDE Heun Karras, DPM++ 2M SDE Heun Exponential, DPM++ 3M SDE, DPM++ 3M SDE Karras, DPM++ 3M SDE Exponential + * rework DDIM, PLMS, UniPC to use CFG denoiser same as in k-diffusion samplers: + * makes all of them work with img2img + * makes prompt composition posssible (AND) + * makes them available for SDXL + * always show extra networks tabs in the UI + * use less RAM when creating models + * textual inversion inference support for SDXL + * extra networks UI: show metadata for SD checkpoints + * checkpoint merger: add metadata support + * prompt editing and attention: add support for whitespace after the number ([ red : green : 0.5 ]) (seed breaking change) + * VAE: allow selecting own VAE for each checkpoint (in user metadata editor) + * VAE: add selected VAE to infotext + * options in main UI: add own separate setting for txt2img and img2img, correctly read values from pasted infotext, add setting for column count + + +### Minor: + * img2img batch: RAM savings, VRAM savings, .tif, .tiff in img2img batch + * postprocessing/extras: RAM savings + * XYZ: in the axis labels, remove pathnames from model filenames + * XYZ: support hires sampler + * XYZ: new option: use text inputs instead of dropdowns + * add gradio version warning + * sort list of VAE checkpoints + * use transparent white for mask in inpainting, along with an option to select the color + * move some settings to their own section: img2img, VAE + * add checkbox to show/hide dirs for extra networks + * Add TAESD(or more) options for all the VAE encode/decode operation + * gradio theme cache, new gradio themes, along with explanation that the user can input his own values + * sampler fixes/tweaks: s_tmax, s_churn, s_noise, s_tmax + * update README.md with correct instructions for Linux installation + * option to not save incomplete images, on by default + * enable cond cache by default + * git autofix for repos that are corrupted + * allow to open images in new browser tab by middle mouse button + * automatically open webui in browser when running "locally" + * put commonly used samplers on top, make DPM++ 2M Karras the default choice + * zoom and pan: option to auto-expand a wide image + * option to cache Lora networks in memory + * rework hires fix UI to use accordion + * face restoration and tiling moved to settings - use "Options in main UI" setting if you want them back + * change quicksettings items to have variable width + * Lora: add Norm module, add support for bias + * Lora: output warnings in UI rather than fail for unfitting loras; switch to logging for error output in console + * support search and display of hashes for all extra network items + * Add extra noise param for img2img operations + +### Extensions and API: + * gradio 3.39 + * also bump versions for packages: transformers, GitPython, accelerate, scikit-image, timm, tomesd + * support tooltip kwarg for gradio elements: gr.Textbox(label='hello', tooltip='world') + * properly clear the total console progressbar when using txt2img and img2img from API + * add cmd_arg --disable-extensions all extra + * shared.py and webui.py split into many files + * add --loglevel commandline argument for logging + * add a custom UI element that combines accordion and checkbox + * avoid importing gradio in tests because it spams warnings + * put infotext label for setting into OptionInfo definition rather than in a separate list + * make `StableDiffusionProcessingImg2Img.mask_blur` a property, make more inline with PIL `GaussianBlur` + * option to make scripts UI without gr.Group + * add a way for scripts to register a callback for before/after just a single component's creation + * use dataclass for StableDiffusionProcessing + +### Bug Fixes: + * Don't crash if out of local storage quota for javascriot localStorage + * fix memory leak when generation fails + * XYZ plot do not fail if an exception occurs + * update doggettx cross attention optimization to not use an unreasonable amount of memory in some edge cases -- suggestion by MorkTheOrk + * fix missing TI hash in infotext if generation uses both negative and positive TI + * localization fixes + * fix sdxl model invalid configuration after the hijack + * correctly toggle extras checkbox for infotext paste + * open raw sysinfo link in new page + * prompt parser: Account for empty field in alternating words syntax + * add tab and carriage return to invalid filename chars + * fix api only Lora not working + * fix options in main UI misbehaving when there's just one element + * make it possible to use a sampler from infotext even if it's hidden in the dropdown + * fix styles missing from the prompt in infotext when making a grid of batch of multiplie images + * prevent bogus progress output in console when calculating hires fix dimensions + * fix --use-textbox-seed + * fix broken `Lora/Networks: use old method` option + * properly return `None` for VAE hash when using `--no-hashing` + * MPS/macOS fixes and optimizations + * add second_order to samplers that mistakenly didn't have it + * when refreshing cards in extra networks UI, do not discard user's custom resolution + * fix processing error that happens if batch_size is not a multiple of how many prompts/negative prompts there are #12509 + ## 1.5.1 ### Minor: -- cgit v1.2.1 From 215466282635972a59b9f094e7a64b81711b11a3 Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Tue, 15 Aug 2023 03:23:44 -0400 Subject: Add PR refs to changelog --- CHANGELOG.md | 78 ++++++++++++++++++++++++++++++------------------------------ 1 file changed, 39 insertions(+), 39 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6eae6a4f..e5986ed7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,70 +1,70 @@ ## 1.6.0 ### Features: - * refiner support + * refiner support [#12371](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12371) * add NV option for Random number generator source setting, which allows to generate same pictures on CPU/AMD/Mac as on NVidia videocards * add style editor dialog - * hires fix: add an option to use a different checkpoint for second pass - * option to keep multiple loaded models in memory - * new samplers: Restart, DPM++ 2M SDE Exponential, DPM++ 2M SDE Heun, DPM++ 2M SDE Heun Karras, DPM++ 2M SDE Heun Exponential, DPM++ 3M SDE, DPM++ 3M SDE Karras, DPM++ 3M SDE Exponential + * hires fix: add an option to use a different checkpoint for second pass ([#12181](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12181)) + * option to keep multiple loaded models in memory ([#12227](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12227)) + * new samplers: Restart, DPM++ 2M SDE Exponential, DPM++ 2M SDE Heun, DPM++ 2M SDE Heun Karras, DPM++ 2M SDE Heun Exponential, DPM++ 3M SDE, DPM++ 3M SDE Karras, DPM++ 3M SDE Exponential ([#12300](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12300), [#12519](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12519), [#12542](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12542)) * rework DDIM, PLMS, UniPC to use CFG denoiser same as in k-diffusion samplers: * makes all of them work with img2img * makes prompt composition posssible (AND) * makes them available for SDXL - * always show extra networks tabs in the UI - * use less RAM when creating models + * always show extra networks tabs in the UI ([#11808](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/11808)) + * use less RAM when creating models ([#11958](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/11958)) * textual inversion inference support for SDXL * extra networks UI: show metadata for SD checkpoints * checkpoint merger: add metadata support - * prompt editing and attention: add support for whitespace after the number ([ red : green : 0.5 ]) (seed breaking change) + * prompt editing and attention: add support for whitespace after the number ([ red : green : 0.5 ]) (seed breaking change) ([#12177](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12177)) * VAE: allow selecting own VAE for each checkpoint (in user metadata editor) * VAE: add selected VAE to infotext - * options in main UI: add own separate setting for txt2img and img2img, correctly read values from pasted infotext, add setting for column count + * options in main UI: add own separate setting for txt2img and img2img, correctly read values from pasted infotext, add setting for column count ([#12551](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12551)) ### Minor: - * img2img batch: RAM savings, VRAM savings, .tif, .tiff in img2img batch - * postprocessing/extras: RAM savings + * img2img batch: RAM savings, VRAM savings, .tif, .tiff in img2img batch ([#12120](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12120), [#12514](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12514), [#12515](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12515)) + * postprocessing/extras: RAM savings ([#12479](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12479)) * XYZ: in the axis labels, remove pathnames from model filenames - * XYZ: support hires sampler - * XYZ: new option: use text inputs instead of dropdowns + * XYZ: support hires sampler ([#12298](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12298)) + * XYZ: new option: use text inputs instead of dropdowns ([#12491](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12491)) * add gradio version warning - * sort list of VAE checkpoints - * use transparent white for mask in inpainting, along with an option to select the color + * sort list of VAE checkpoints ([#12297](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12297)) + * use transparent white for mask in inpainting, along with an option to select the color ([#12326](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12326)) * move some settings to their own section: img2img, VAE * add checkbox to show/hide dirs for extra networks - * Add TAESD(or more) options for all the VAE encode/decode operation - * gradio theme cache, new gradio themes, along with explanation that the user can input his own values - * sampler fixes/tweaks: s_tmax, s_churn, s_noise, s_tmax - * update README.md with correct instructions for Linux installation - * option to not save incomplete images, on by default + * Add TAESD(or more) options for all the VAE encode/decode operation ([#12311](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12311)) + * gradio theme cache, new gradio themes, along with explanation that the user can input his own values ([#12346](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12346), [#12355](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12355)) + * sampler fixes/tweaks: s_tmax, s_churn, s_noise, s_tmax ([#12354](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12354), [#12356](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12356), [#12357](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12357), [#12358](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12358), [#12375](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12375), [#12521](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12521)) + * update README.md with correct instructions for Linux installation ([#12352](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12352)) + * option to not save incomplete images, on by default ([#12338](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12338)) * enable cond cache by default - * git autofix for repos that are corrupted - * allow to open images in new browser tab by middle mouse button - * automatically open webui in browser when running "locally" + * git autofix for repos that are corrupted ([#12230](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12230)) + * allow to open images in new browser tab by middle mouse button ([#12379](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12379)) + * automatically open webui in browser when running "locally" ([#12254](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12254)) * put commonly used samplers on top, make DPM++ 2M Karras the default choice - * zoom and pan: option to auto-expand a wide image + * zoom and pan: option to auto-expand a wide image ([#12413](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12413)) * option to cache Lora networks in memory * rework hires fix UI to use accordion * face restoration and tiling moved to settings - use "Options in main UI" setting if you want them back * change quicksettings items to have variable width - * Lora: add Norm module, add support for bias + * Lora: add Norm module, add support for bias ([#12503](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12503)) * Lora: output warnings in UI rather than fail for unfitting loras; switch to logging for error output in console - * support search and display of hashes for all extra network items - * Add extra noise param for img2img operations + * support search and display of hashes for all extra network items ([#12510](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12510)) + * Add extra noise param for img2img operations ([#12564](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12564)) ### Extensions and API: * gradio 3.39 * also bump versions for packages: transformers, GitPython, accelerate, scikit-image, timm, tomesd * support tooltip kwarg for gradio elements: gr.Textbox(label='hello', tooltip='world') * properly clear the total console progressbar when using txt2img and img2img from API - * add cmd_arg --disable-extensions all extra + * add cmd_arg --disable-extensions all extra ([#12294](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12294)) * shared.py and webui.py split into many files * add --loglevel commandline argument for logging * add a custom UI element that combines accordion and checkbox * avoid importing gradio in tests because it spams warnings * put infotext label for setting into OptionInfo definition rather than in a separate list - * make `StableDiffusionProcessingImg2Img.mask_blur` a property, make more inline with PIL `GaussianBlur` + * make `StableDiffusionProcessingImg2Img.mask_blur` a property, make more inline with PIL `GaussianBlur` ([#12470](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12470)) * option to make scripts UI without gr.Group * add a way for scripts to register a callback for before/after just a single component's creation * use dataclass for StableDiffusionProcessing @@ -74,25 +74,25 @@ * fix memory leak when generation fails * XYZ plot do not fail if an exception occurs * update doggettx cross attention optimization to not use an unreasonable amount of memory in some edge cases -- suggestion by MorkTheOrk - * fix missing TI hash in infotext if generation uses both negative and positive TI - * localization fixes + * fix missing TI hash in infotext if generation uses both negative and positive TI ([#12269](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12269)) + * localization fixes ([#12307](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12307)) * fix sdxl model invalid configuration after the hijack - * correctly toggle extras checkbox for infotext paste - * open raw sysinfo link in new page - * prompt parser: Account for empty field in alternating words syntax - * add tab and carriage return to invalid filename chars - * fix api only Lora not working + * correctly toggle extras checkbox for infotext paste ([#12304](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12304)) + * open raw sysinfo link in new page ([#12318](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12318)) + * prompt parser: Account for empty field in alternating words syntax ([#12319](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12319)) + * add tab and carriage return to invalid filename chars ([#12327](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12327)) + * fix api only Lora not working ([#12387](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12387)) * fix options in main UI misbehaving when there's just one element * make it possible to use a sampler from infotext even if it's hidden in the dropdown * fix styles missing from the prompt in infotext when making a grid of batch of multiplie images * prevent bogus progress output in console when calculating hires fix dimensions * fix --use-textbox-seed - * fix broken `Lora/Networks: use old method` option - * properly return `None` for VAE hash when using `--no-hashing` - * MPS/macOS fixes and optimizations + * fix broken `Lora/Networks: use old method` option ([#12466](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12466)) + * properly return `None` for VAE hash when using `--no-hashing` ([#12463](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12463)) + * MPS/macOS fixes and optimizations ([#12526](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12526)) * add second_order to samplers that mistakenly didn't have it * when refreshing cards in extra networks UI, do not discard user's custom resolution - * fix processing error that happens if batch_size is not a multiple of how many prompts/negative prompts there are #12509 + * fix processing error that happens if batch_size is not a multiple of how many prompts/negative prompts there are ([#12509](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12509)) ## 1.5.1 -- cgit v1.2.1 From 54209c1639d0eb6519baa5c07872b85de0062a87 Mon Sep 17 00:00:00 2001 From: brkirch Date: Tue, 15 Aug 2023 06:29:39 -0400 Subject: Use the new SD VAE override setting --- modules/sd_vae.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/sd_vae.py b/modules/sd_vae.py index fd9a1c2a..4b98b3c2 100644 --- a/modules/sd_vae.py +++ b/modules/sd_vae.py @@ -160,7 +160,7 @@ def resolve_vae_from_user_metadata(checkpoint_file) -> VaeResolution: def resolve_vae_near_checkpoint(checkpoint_file) -> VaeResolution: vae_near_checkpoint = find_vae_near_checkpoint(checkpoint_file) - if vae_near_checkpoint is not None and (shared.opts.sd_vae_as_default or is_automatic): + if vae_near_checkpoint is not None and (not shared.opts.sd_vae_overrides_per_model_preferences or is_automatic): return VaeResolution(vae_near_checkpoint, 'found near the checkpoint') return VaeResolution(resolved=False) -- cgit v1.2.1 From f117bb64fc51a46e348e464145f053d0f7586ad2 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Tue, 15 Aug 2023 20:19:13 +0900 Subject: Update CHANGELOG.md --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index e5986ed7..ea1c8b16 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -58,7 +58,7 @@ * also bump versions for packages: transformers, GitPython, accelerate, scikit-image, timm, tomesd * support tooltip kwarg for gradio elements: gr.Textbox(label='hello', tooltip='world') * properly clear the total console progressbar when using txt2img and img2img from API - * add cmd_arg --disable-extensions all extra ([#12294](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12294)) + * add cmd_arg --disable-extra-extensions and --disable-all-extensions ([#12294](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12294)) * shared.py and webui.py split into many files * add --loglevel commandline argument for logging * add a custom UI element that combines accordion and checkbox -- cgit v1.2.1 From aa57a89a21d7c0e59a2006a642c3838264c35051 Mon Sep 17 00:00:00 2001 From: Kohaku-Blueleaf <59680068+KohakuBlueleaf@users.noreply.github.com> Date: Tue, 15 Aug 2023 23:41:46 +0800 Subject: full module with ex_bias --- extensions-builtin/Lora/network_full.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/extensions-builtin/Lora/network_full.py b/extensions-builtin/Lora/network_full.py index 109b4c2c..bf6930e9 100644 --- a/extensions-builtin/Lora/network_full.py +++ b/extensions-builtin/Lora/network_full.py @@ -14,9 +14,14 @@ class NetworkModuleFull(network.NetworkModule): super().__init__(net, weights) self.weight = weights.w.get("diff") + self.ex_bias = weights.w.get("diff_b") def calc_updown(self, orig_weight): output_shape = self.weight.shape updown = self.weight.to(orig_weight.device, dtype=orig_weight.dtype) + if self.ex_bias is not None: + ex_bias = self.ex_bias.to(orig_weight.device, dtype=orig_weight.dtype) + else: + ex_bias = None - return self.finalize_updown(updown, orig_weight, output_shape) + return self.finalize_updown(updown, orig_weight, output_shape, ex_bias) -- cgit v1.2.1 From f01682ee01e81e8ef84fd6fffe8f7aa17233285d Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Tue, 15 Aug 2023 19:23:27 +0300 Subject: store patches for Lora in a specialized module --- extensions-builtin/Lora/lora_patches.py | 31 +++++++++++++ extensions-builtin/Lora/networks.py | 32 +++++++------ extensions-builtin/Lora/scripts/lora_script.py | 52 ++------------------- modules/patches.py | 64 ++++++++++++++++++++++++++ 4 files changed, 118 insertions(+), 61 deletions(-) create mode 100644 extensions-builtin/Lora/lora_patches.py create mode 100644 modules/patches.py diff --git a/extensions-builtin/Lora/lora_patches.py b/extensions-builtin/Lora/lora_patches.py new file mode 100644 index 00000000..b394d8e9 --- /dev/null +++ b/extensions-builtin/Lora/lora_patches.py @@ -0,0 +1,31 @@ +import torch + +import networks +from modules import patches + + +class LoraPatches: + def __init__(self): + self.Linear_forward = patches.patch(__name__, torch.nn.Linear, 'forward', networks.network_Linear_forward) + self.Linear_load_state_dict = patches.patch(__name__, torch.nn.Linear, '_load_from_state_dict', networks.network_Linear_load_state_dict) + self.Conv2d_forward = patches.patch(__name__, torch.nn.Conv2d, 'forward', networks.network_Conv2d_forward) + self.Conv2d_load_state_dict = patches.patch(__name__, torch.nn.Conv2d, '_load_from_state_dict', networks.network_Conv2d_load_state_dict) + self.GroupNorm_forward = patches.patch(__name__, torch.nn.GroupNorm, 'forward', networks.network_GroupNorm_forward) + self.GroupNorm_load_state_dict = patches.patch(__name__, torch.nn.GroupNorm, '_load_from_state_dict', networks.network_GroupNorm_load_state_dict) + self.LayerNorm_forward = patches.patch(__name__, torch.nn.LayerNorm, 'forward', networks.network_LayerNorm_forward) + self.LayerNorm_load_state_dict = patches.patch(__name__, torch.nn.LayerNorm, '_load_from_state_dict', networks.network_LayerNorm_load_state_dict) + self.MultiheadAttention_forward = patches.patch(__name__, torch.nn.MultiheadAttention, 'forward', networks.network_MultiheadAttention_forward) + self.MultiheadAttention_load_state_dict = patches.patch(__name__, torch.nn.MultiheadAttention, '_load_from_state_dict', networks.network_MultiheadAttention_load_state_dict) + + def undo(self): + self.Linear_forward = patches.undo(__name__, torch.nn.Linear, 'forward') + self.Linear_load_state_dict = patches.undo(__name__, torch.nn.Linear, '_load_from_state_dict') + self.Conv2d_forward = patches.undo(__name__, torch.nn.Conv2d, 'forward') + self.Conv2d_load_state_dict = patches.undo(__name__, torch.nn.Conv2d, '_load_from_state_dict') + self.GroupNorm_forward = patches.undo(__name__, torch.nn.GroupNorm, 'forward') + self.GroupNorm_load_state_dict = patches.undo(__name__, torch.nn.GroupNorm, '_load_from_state_dict') + self.LayerNorm_forward = patches.undo(__name__, torch.nn.LayerNorm, 'forward') + self.LayerNorm_load_state_dict = patches.undo(__name__, torch.nn.LayerNorm, '_load_from_state_dict') + self.MultiheadAttention_forward = patches.undo(__name__, torch.nn.MultiheadAttention, 'forward') + self.MultiheadAttention_load_state_dict = patches.undo(__name__, torch.nn.MultiheadAttention, '_load_from_state_dict') + diff --git a/extensions-builtin/Lora/networks.py b/extensions-builtin/Lora/networks.py index 22fdff4a..9fca36b6 100644 --- a/extensions-builtin/Lora/networks.py +++ b/extensions-builtin/Lora/networks.py @@ -2,6 +2,7 @@ import logging import os import re +import lora_patches import network import network_lora import network_hada @@ -418,74 +419,74 @@ def network_reset_cached_weight(self: Union[torch.nn.Conv2d, torch.nn.Linear]): def network_Linear_forward(self, input): if shared.opts.lora_functional: - return network_forward(self, input, torch.nn.Linear_forward_before_network) + return network_forward(self, input, originals.Linear_forward) network_apply_weights(self) - return torch.nn.Linear_forward_before_network(self, input) + return originals.Linear_forward(self, input) def network_Linear_load_state_dict(self, *args, **kwargs): network_reset_cached_weight(self) - return torch.nn.Linear_load_state_dict_before_network(self, *args, **kwargs) + return originals.Linear_load_state_dict(self, *args, **kwargs) def network_Conv2d_forward(self, input): if shared.opts.lora_functional: - return network_forward(self, input, torch.nn.Conv2d_forward_before_network) + return network_forward(self, input, originals.Conv2d_forward) network_apply_weights(self) - return torch.nn.Conv2d_forward_before_network(self, input) + return originals.Conv2d_forward(self, input) def network_Conv2d_load_state_dict(self, *args, **kwargs): network_reset_cached_weight(self) - return torch.nn.Conv2d_load_state_dict_before_network(self, *args, **kwargs) + return originals.Conv2d_load_state_dict(self, *args, **kwargs) def network_GroupNorm_forward(self, input): if shared.opts.lora_functional: - return network_forward(self, input, torch.nn.GroupNorm_forward_before_network) + return network_forward(self, input, originals.GroupNorm_forward) network_apply_weights(self) - return torch.nn.GroupNorm_forward_before_network(self, input) + return originals.GroupNorm_forward(self, input) def network_GroupNorm_load_state_dict(self, *args, **kwargs): network_reset_cached_weight(self) - return torch.nn.GroupNorm_load_state_dict_before_network(self, *args, **kwargs) + return originals.GroupNorm_load_state_dict(self, *args, **kwargs) def network_LayerNorm_forward(self, input): if shared.opts.lora_functional: - return network_forward(self, input, torch.nn.LayerNorm_forward_before_network) + return network_forward(self, input, originals.LayerNorm_forward) network_apply_weights(self) - return torch.nn.LayerNorm_forward_before_network(self, input) + return originals.LayerNorm_forward(self, input) def network_LayerNorm_load_state_dict(self, *args, **kwargs): network_reset_cached_weight(self) - return torch.nn.LayerNorm_load_state_dict_before_network(self, *args, **kwargs) + return originals.LayerNorm_load_state_dict(self, *args, **kwargs) def network_MultiheadAttention_forward(self, *args, **kwargs): network_apply_weights(self) - return torch.nn.MultiheadAttention_forward_before_network(self, *args, **kwargs) + return originals.MultiheadAttention_forward(self, *args, **kwargs) def network_MultiheadAttention_load_state_dict(self, *args, **kwargs): network_reset_cached_weight(self) - return torch.nn.MultiheadAttention_load_state_dict_before_network(self, *args, **kwargs) + return originals.MultiheadAttention_load_state_dict(self, *args, **kwargs) def list_available_networks(): @@ -552,6 +553,9 @@ def infotext_pasted(infotext, params): if added: params["Prompt"] += "\n" + "".join(added) + +originals: lora_patches.LoraPatches = None + extra_network_lora = None available_networks = {} diff --git a/extensions-builtin/Lora/scripts/lora_script.py b/extensions-builtin/Lora/scripts/lora_script.py index 4c6e774a..546fb55e 100644 --- a/extensions-builtin/Lora/scripts/lora_script.py +++ b/extensions-builtin/Lora/scripts/lora_script.py @@ -7,17 +7,14 @@ from fastapi import FastAPI import network import networks import lora # noqa:F401 +import lora_patches import extra_networks_lora import ui_extra_networks_lora -from modules import script_callbacks, ui_extra_networks, extra_networks, shared +from modules import script_callbacks, ui_extra_networks, extra_networks, shared, patches + def unload(): - torch.nn.Linear.forward = torch.nn.Linear_forward_before_network - torch.nn.Linear._load_from_state_dict = torch.nn.Linear_load_state_dict_before_network - torch.nn.Conv2d.forward = torch.nn.Conv2d_forward_before_network - torch.nn.Conv2d._load_from_state_dict = torch.nn.Conv2d_load_state_dict_before_network - torch.nn.MultiheadAttention.forward = torch.nn.MultiheadAttention_forward_before_network - torch.nn.MultiheadAttention._load_from_state_dict = torch.nn.MultiheadAttention_load_state_dict_before_network + networks.originals.undo() def before_ui(): @@ -28,46 +25,7 @@ def before_ui(): extra_networks.register_extra_network_alias(networks.extra_network_lora, "lyco") -if not hasattr(torch.nn, 'Linear_forward_before_network'): - torch.nn.Linear_forward_before_network = torch.nn.Linear.forward - -if not hasattr(torch.nn, 'Linear_load_state_dict_before_network'): - torch.nn.Linear_load_state_dict_before_network = torch.nn.Linear._load_from_state_dict - -if not hasattr(torch.nn, 'Conv2d_forward_before_network'): - torch.nn.Conv2d_forward_before_network = torch.nn.Conv2d.forward - -if not hasattr(torch.nn, 'Conv2d_load_state_dict_before_network'): - torch.nn.Conv2d_load_state_dict_before_network = torch.nn.Conv2d._load_from_state_dict - -if not hasattr(torch.nn, 'GroupNorm_forward_before_network'): - torch.nn.GroupNorm_forward_before_network = torch.nn.GroupNorm.forward - -if not hasattr(torch.nn, 'GroupNorm_load_state_dict_before_network'): - torch.nn.GroupNorm_load_state_dict_before_network = torch.nn.GroupNorm._load_from_state_dict - -if not hasattr(torch.nn, 'LayerNorm_forward_before_network'): - torch.nn.LayerNorm_forward_before_network = torch.nn.LayerNorm.forward - -if not hasattr(torch.nn, 'LayerNorm_load_state_dict_before_network'): - torch.nn.LayerNorm_load_state_dict_before_network = torch.nn.LayerNorm._load_from_state_dict - -if not hasattr(torch.nn, 'MultiheadAttention_forward_before_network'): - torch.nn.MultiheadAttention_forward_before_network = torch.nn.MultiheadAttention.forward - -if not hasattr(torch.nn, 'MultiheadAttention_load_state_dict_before_network'): - torch.nn.MultiheadAttention_load_state_dict_before_network = torch.nn.MultiheadAttention._load_from_state_dict - -torch.nn.Linear.forward = networks.network_Linear_forward -torch.nn.Linear._load_from_state_dict = networks.network_Linear_load_state_dict -torch.nn.Conv2d.forward = networks.network_Conv2d_forward -torch.nn.Conv2d._load_from_state_dict = networks.network_Conv2d_load_state_dict -torch.nn.GroupNorm.forward = networks.network_GroupNorm_forward -torch.nn.GroupNorm._load_from_state_dict = networks.network_GroupNorm_load_state_dict -torch.nn.LayerNorm.forward = networks.network_LayerNorm_forward -torch.nn.LayerNorm._load_from_state_dict = networks.network_LayerNorm_load_state_dict -torch.nn.MultiheadAttention.forward = networks.network_MultiheadAttention_forward -torch.nn.MultiheadAttention._load_from_state_dict = networks.network_MultiheadAttention_load_state_dict +networks.originals = lora_patches.LoraPatches() script_callbacks.on_model_loaded(networks.assign_network_names_to_compvis_modules) script_callbacks.on_script_unloaded(unload) diff --git a/modules/patches.py b/modules/patches.py new file mode 100644 index 00000000..348235e7 --- /dev/null +++ b/modules/patches.py @@ -0,0 +1,64 @@ +from collections import defaultdict + + +def patch(key, obj, field, replacement): + """Replaces a function in a module or a class. + + Also stores the original function in this module, possible to be retrieved via original(key, obj, field). + If the function is already replaced by this caller (key), an exception is raised -- use undo() before that. + + Arguments: + key: identifying information for who is doing the replacement. You can use __name__. + obj: the module or the class + field: name of the function as a string + replacement: the new function + + Returns: + the original function + """ + + patch_key = (obj, field) + if patch_key in originals[key]: + raise RuntimeError(f"patch for {field} is already applied") + + original_func = getattr(obj, field) + originals[key][patch_key] = original_func + + setattr(obj, field, replacement) + + return original_func + + +def undo(key, obj, field): + """Undoes the peplacement by the patch(). + + If the function is not replaced, raises an exception. + + Arguments: + key: identifying information for who is doing the replacement. You can use __name__. + obj: the module or the class + field: name of the function as a string + + Returns: + Always None + """ + + patch_key = (obj, field) + + if patch_key not in originals[key]: + raise RuntimeError(f"there is no patch for {field} to undo") + + original_func = originals[key].pop(patch_key) + setattr(obj, field, original_func) + + return None + + +def original(key, obj, field): + """Returns the original function for the patch created by the patch() function""" + patch_key = (obj, field) + + return originals[key].get(patch_key, None) + + +originals = defaultdict(dict) -- cgit v1.2.1 From 85fcb7b8dfe7b3dd06931943f095c77f1043dc25 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Tue, 15 Aug 2023 19:24:55 +0300 Subject: lint --- extensions-builtin/Lora/scripts/lora_script.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/extensions-builtin/Lora/scripts/lora_script.py b/extensions-builtin/Lora/scripts/lora_script.py index 546fb55e..ef23968c 100644 --- a/extensions-builtin/Lora/scripts/lora_script.py +++ b/extensions-builtin/Lora/scripts/lora_script.py @@ -1,6 +1,5 @@ import re -import torch import gradio as gr from fastapi import FastAPI @@ -10,7 +9,7 @@ import lora # noqa:F401 import lora_patches import extra_networks_lora import ui_extra_networks_lora -from modules import script_callbacks, ui_extra_networks, extra_networks, shared, patches +from modules import script_callbacks, ui_extra_networks, extra_networks, shared def unload(): -- cgit v1.2.1 From 5b28b7dbc7a339e51b42a05994293e17b958c36c Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Tue, 15 Aug 2023 13:38:37 -0400 Subject: RNG: Make all elements of shape `int`s --- modules/rng.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/rng.py b/modules/rng.py index f927a318..9e8ba2ee 100644 --- a/modules/rng.py +++ b/modules/rng.py @@ -98,7 +98,7 @@ def slerp(val, low, high): class ImageRNG: def __init__(self, shape, seeds, subseeds=None, subseed_strength=0.0, seed_resize_from_h=0, seed_resize_from_w=0): - self.shape = shape + self.shape = tuple(map(int, shape)) self.seeds = seeds self.subseeds = subseeds self.subseed_strength = subseed_strength -- cgit v1.2.1 From 0f77139253f5481d62f7c1eddc220355440b2d1f Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Tue, 15 Aug 2023 14:24:55 -0400 Subject: Fix inpaint upload for alpha masks, create reusable function --- modules/img2img.py | 2 +- modules/processing.py | 10 +++++++++- modules/ui.py | 2 +- 3 files changed, 11 insertions(+), 3 deletions(-) diff --git a/modules/img2img.py b/modules/img2img.py index ac9fd3f8..328cb0e9 100644 --- a/modules/img2img.py +++ b/modules/img2img.py @@ -129,7 +129,7 @@ def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_s mask = None elif mode == 2: # inpaint image, mask = init_img_with_mask["image"], init_img_with_mask["mask"] - mask = mask.split()[-1].convert("L").point(lambda x: 255 if x > 128 else 0) + mask = processing.create_binary_mask(mask) image = image.convert("RGB") elif mode == 3: # inpaint sketch image = inpaint_color_sketch diff --git a/modules/processing.py b/modules/processing.py index 1d098302..e62db62f 100755 --- a/modules/processing.py +++ b/modules/processing.py @@ -81,6 +81,12 @@ def apply_overlay(image, paste_loc, index, overlays): return image +def create_binary_mask(image): + if image.mode == 'RGBA' and image.getextrema()[-1] != (255, 255): + image = image.split()[-1].convert("L").point(lambda x: 255 if x > 128 else 0) + else: + image = image.convert('L') + return image def txt2img_image_conditioning(sd_model, x, width, height): if sd_model.model.conditioning_key in {'hybrid', 'concat'}: # Inpainting models @@ -1385,7 +1391,9 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing): image_mask = self.image_mask if image_mask is not None: - image_mask = image_mask.convert('L') + # image_mask is passed in as RGBA by Gradio to support alpha masks, + # but we still want to support binary masks. + image_mask = create_binary_mask(image_mask) if self.inpainting_mask_invert: image_mask = ImageOps.invert(image_mask) diff --git a/modules/ui.py b/modules/ui.py index a6b1f964..c98d9849 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -598,7 +598,7 @@ def create_ui(): with gr.TabItem('Inpaint upload', id='inpaint_upload', elem_id="img2img_inpaint_upload_tab") as tab_inpaint_upload: init_img_inpaint = gr.Image(label="Image for img2img", show_label=False, source="upload", interactive=True, type="pil", elem_id="img_inpaint_base") - init_mask_inpaint = gr.Image(label="Mask", source="upload", interactive=True, type="pil", elem_id="img_inpaint_mask") + init_mask_inpaint = gr.Image(label="Mask", source="upload", interactive=True, type="pil", image_mode="RGBA", elem_id="img_inpaint_mask") with gr.TabItem('Batch', id='batch', elem_id="img2img_batch_tab") as tab_batch: hidden = '
Disabled when launched with --hide-ui-dir-config.' if shared.cmd_opts.hide_ui_dir_config else '' -- cgit v1.2.1 From 70833919314be39eede4dfa044b28484b12e8c86 Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Tue, 15 Aug 2023 14:44:13 -0400 Subject: CSS: Remove forced visible overflow for Gradio group child divs --- style.css | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/style.css b/style.css index bdf0635a..d7f87f81 100644 --- a/style.css +++ b/style.css @@ -138,7 +138,7 @@ a{ } /* gradio 3.39 puts a lot of overflow: hidden all over the place for an unknown reqasaon. */ -.block.gradio-textbox, div.gradio-group, div.gradio-group div, div.gradio-dropdown{ +.block.gradio-textbox, div.gradio-group, div.gradio-dropdown{ overflow: visible !important; } -- cgit v1.2.1 From d9ddc5d4cd9e707c018d245572310ca6f6cdf1ef Mon Sep 17 00:00:00 2001 From: Kohaku-Blueleaf <59680068+KohakuBlueleaf@users.noreply.github.com> Date: Wed, 16 Aug 2023 11:21:12 +0800 Subject: Remove wrong scale --- modules/sd_samplers_common.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/modules/sd_samplers_common.py b/modules/sd_samplers_common.py index 8886de7e..7dc79ea8 100644 --- a/modules/sd_samplers_common.py +++ b/modules/sd_samplers_common.py @@ -44,8 +44,7 @@ def samples_to_images_tensor(sample, approximation=None, model=None): elif approximation == 1: x_sample = sd_vae_approx.model()(sample.to(devices.device, devices.dtype)).detach() elif approximation == 3: - x_sample = sample * 1.5 - x_sample = sd_vae_taesd.decoder_model()(x_sample.to(devices.device, devices.dtype)).detach() + x_sample = sd_vae_taesd.decoder_model()(sample.to(devices.device, devices.dtype)).detach() x_sample = x_sample * 2 - 1 else: if model is None: -- cgit v1.2.1 From 86221269f98ef9b21a6e6c9d04b86e2fb5cb33d3 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Wed, 16 Aug 2023 09:55:35 +0300 Subject: RAM optimization round 2 --- extensions-builtin/Lora/networks.py | 5 +++- modules/sd_disable_initialization.py | 51 +++++++++++++++++++++++++++++++----- 2 files changed, 48 insertions(+), 8 deletions(-) diff --git a/extensions-builtin/Lora/networks.py b/extensions-builtin/Lora/networks.py index 9fca36b6..96f935b2 100644 --- a/extensions-builtin/Lora/networks.py +++ b/extensions-builtin/Lora/networks.py @@ -304,7 +304,10 @@ def network_apply_weights(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn wanted_names = tuple((x.name, x.te_multiplier, x.unet_multiplier, x.dyn_dim) for x in loaded_networks) weights_backup = getattr(self, "network_weights_backup", None) - if weights_backup is None: + if weights_backup is None and wanted_names != (): + if current_names != (): + raise RuntimeError("no backup weights found and current weights are not unchanged") + if isinstance(self, torch.nn.MultiheadAttention): weights_backup = (self.in_proj_weight.to(devices.cpu, copy=True), self.out_proj.weight.to(devices.cpu, copy=True)) else: diff --git a/modules/sd_disable_initialization.py b/modules/sd_disable_initialization.py index 695c5736..719eeb93 100644 --- a/modules/sd_disable_initialization.py +++ b/modules/sd_disable_initialization.py @@ -168,22 +168,59 @@ class LoadStateDictOnMeta(ReplaceHelper): device = self.device def load_from_state_dict(original, self, state_dict, prefix, *args, **kwargs): - params = [(name, param) for name, param in self._parameters.items() if param is not None and param.is_meta] + used_param_keys = [] + + for name, param in self._parameters.items(): + if param is None: + continue + + key = prefix + name + sd_param = sd.pop(key, None) + if sd_param is not None: + state_dict[key] = sd_param + used_param_keys.append(key) - for name, param in params: if param.is_meta: - self._parameters[name] = torch.nn.parameter.Parameter(torch.zeros_like(param, device=device), requires_grad=param.requires_grad) + dtype = sd_param.dtype if sd_param is not None else param.dtype + self._parameters[name] = torch.nn.parameter.Parameter(torch.zeros_like(param, device=device, dtype=dtype), requires_grad=param.requires_grad) + + for name in self._buffers: + key = prefix + name + + sd_param = sd.pop(key, None) + if sd_param is not None: + state_dict[key] = sd_param + used_param_keys.append(key) original(self, state_dict, prefix, *args, **kwargs) - for name, _ in params: - key = prefix + name - if key in sd: - del sd[key] + for key in used_param_keys: + state_dict.pop(key, None) + + def load_state_dict(original, self, state_dict, strict=True): + """torch makes a lot of copies of the dictionary with weights, so just deleting entries from state_dict does not help + because the same values are stored in multiple copies of the dict. The trick used here is to give torch a dict with + all weights on meta device, i.e. deleted, and then it doesn't matter how many copies torch makes. + + In _load_from_state_dict, the correct weight will be obtained from a single dict with the right weights (sd). + + The dangerous thing about this is if _load_from_state_dict is not called, (if some exotic module overloads + the function and does not call the original) the state dict will just fail to load because weights + would be on the meta device. + """ + + if state_dict == sd: + state_dict = {k: v.to(device="meta", dtype=v.dtype) for k, v in state_dict.items()} + + original(self, state_dict, strict=strict) + module_load_state_dict = self.replace(torch.nn.Module, 'load_state_dict', lambda *args, **kwargs: load_state_dict(module_load_state_dict, *args, **kwargs)) + module_load_from_state_dict = self.replace(torch.nn.Module, '_load_from_state_dict', lambda *args, **kwargs: load_from_state_dict(module_load_from_state_dict, *args, **kwargs)) linear_load_from_state_dict = self.replace(torch.nn.Linear, '_load_from_state_dict', lambda *args, **kwargs: load_from_state_dict(linear_load_from_state_dict, *args, **kwargs)) conv2d_load_from_state_dict = self.replace(torch.nn.Conv2d, '_load_from_state_dict', lambda *args, **kwargs: load_from_state_dict(conv2d_load_from_state_dict, *args, **kwargs)) mha_load_from_state_dict = self.replace(torch.nn.MultiheadAttention, '_load_from_state_dict', lambda *args, **kwargs: load_from_state_dict(mha_load_from_state_dict, *args, **kwargs)) + layer_norm_load_from_state_dict = self.replace(torch.nn.LayerNorm, '_load_from_state_dict', lambda *args, **kwargs: load_from_state_dict(layer_norm_load_from_state_dict, *args, **kwargs)) + group_norm_load_from_state_dict = self.replace(torch.nn.GroupNorm, '_load_from_state_dict', lambda *args, **kwargs: load_from_state_dict(group_norm_load_from_state_dict, *args, **kwargs)) def __exit__(self, exc_type, exc_val, exc_tb): self.restore() -- cgit v1.2.1 From 0815c45bcdec0a2e5c60bdd5b33d95813d799c01 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Wed, 16 Aug 2023 10:44:17 +0300 Subject: send weights to target device instead of CPU memory --- modules/sd_models.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/sd_models.py b/modules/sd_models.py index f6fbdcd6..b01d44c5 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -579,7 +579,7 @@ def load_model(checkpoint_info=None, already_loaded_state_dict=None): timer.record("create model") - with sd_disable_initialization.LoadStateDictOnMeta(state_dict, devices.cpu): + with sd_disable_initialization.LoadStateDictOnMeta(state_dict, devices.device): load_model_weights(sd_model, checkpoint_info, state_dict, timer) timer.record("load weights from state dict") -- cgit v1.2.1 From 57e59c14c8a13a99d6422597d27d92ad10a51ca1 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Wed, 16 Aug 2023 11:28:00 +0300 Subject: Revert "send weights to target device instead of CPU memory" This reverts commit 0815c45bcdec0a2e5c60bdd5b33d95813d799c01. --- modules/sd_models.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/sd_models.py b/modules/sd_models.py index b01d44c5..f6fbdcd6 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -579,7 +579,7 @@ def load_model(checkpoint_info=None, already_loaded_state_dict=None): timer.record("create model") - with sd_disable_initialization.LoadStateDictOnMeta(state_dict, devices.device): + with sd_disable_initialization.LoadStateDictOnMeta(state_dict, devices.cpu): load_model_weights(sd_model, checkpoint_info, state_dict, timer) timer.record("load weights from state dict") -- cgit v1.2.1 From eaba3d7349c6f0e151be66ade3fdc848d693a10d Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Wed, 16 Aug 2023 12:11:01 +0300 Subject: send weights to target device instead of CPU memory --- modules/sd_disable_initialization.py | 24 +++++++++++++++--------- modules/sd_models.py | 17 ++++++++++++++++- 2 files changed, 31 insertions(+), 10 deletions(-) diff --git a/modules/sd_disable_initialization.py b/modules/sd_disable_initialization.py index 719eeb93..8863107a 100644 --- a/modules/sd_disable_initialization.py +++ b/modules/sd_disable_initialization.py @@ -155,10 +155,16 @@ class LoadStateDictOnMeta(ReplaceHelper): ``` """ - def __init__(self, state_dict, device): + def __init__(self, state_dict, device, weight_dtype_conversion=None): super().__init__() self.state_dict = state_dict self.device = device + self.weight_dtype_conversion = weight_dtype_conversion or {} + self.default_dtype = self.weight_dtype_conversion.get('') + + def get_weight_dtype(self, key): + key_first_term, _ = key.split('.', 1) + return self.weight_dtype_conversion.get(key_first_term, self.default_dtype) def __enter__(self): if shared.cmd_opts.disable_model_loading_ram_optimization: @@ -167,24 +173,24 @@ class LoadStateDictOnMeta(ReplaceHelper): sd = self.state_dict device = self.device - def load_from_state_dict(original, self, state_dict, prefix, *args, **kwargs): + def load_from_state_dict(original, module, state_dict, prefix, *args, **kwargs): used_param_keys = [] - for name, param in self._parameters.items(): + for name, param in module._parameters.items(): if param is None: continue key = prefix + name sd_param = sd.pop(key, None) if sd_param is not None: - state_dict[key] = sd_param + state_dict[key] = sd_param.to(dtype=self.get_weight_dtype(key)) used_param_keys.append(key) if param.is_meta: dtype = sd_param.dtype if sd_param is not None else param.dtype - self._parameters[name] = torch.nn.parameter.Parameter(torch.zeros_like(param, device=device, dtype=dtype), requires_grad=param.requires_grad) + module._parameters[name] = torch.nn.parameter.Parameter(torch.zeros_like(param, device=device, dtype=dtype), requires_grad=param.requires_grad) - for name in self._buffers: + for name in module._buffers: key = prefix + name sd_param = sd.pop(key, None) @@ -192,12 +198,12 @@ class LoadStateDictOnMeta(ReplaceHelper): state_dict[key] = sd_param used_param_keys.append(key) - original(self, state_dict, prefix, *args, **kwargs) + original(module, state_dict, prefix, *args, **kwargs) for key in used_param_keys: state_dict.pop(key, None) - def load_state_dict(original, self, state_dict, strict=True): + def load_state_dict(original, module, state_dict, strict=True): """torch makes a lot of copies of the dictionary with weights, so just deleting entries from state_dict does not help because the same values are stored in multiple copies of the dict. The trick used here is to give torch a dict with all weights on meta device, i.e. deleted, and then it doesn't matter how many copies torch makes. @@ -212,7 +218,7 @@ class LoadStateDictOnMeta(ReplaceHelper): if state_dict == sd: state_dict = {k: v.to(device="meta", dtype=v.dtype) for k, v in state_dict.items()} - original(self, state_dict, strict=strict) + original(module, state_dict, strict=strict) module_load_state_dict = self.replace(torch.nn.Module, 'load_state_dict', lambda *args, **kwargs: load_state_dict(module_load_state_dict, *args, **kwargs)) module_load_from_state_dict = self.replace(torch.nn.Module, '_load_from_state_dict', lambda *args, **kwargs: load_from_state_dict(module_load_from_state_dict, *args, **kwargs)) diff --git a/modules/sd_models.py b/modules/sd_models.py index f6fbdcd6..f912fe16 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -518,6 +518,13 @@ def send_model_to_cpu(m): devices.torch_gc() +def model_target_device(): + if shared.cmd_opts.lowvram or shared.cmd_opts.medvram: + return devices.cpu + else: + return devices.device + + def send_model_to_device(m): if shared.cmd_opts.lowvram or shared.cmd_opts.medvram: lowvram.setup_for_low_vram(m, shared.cmd_opts.medvram) @@ -579,7 +586,15 @@ def load_model(checkpoint_info=None, already_loaded_state_dict=None): timer.record("create model") - with sd_disable_initialization.LoadStateDictOnMeta(state_dict, devices.cpu): + if shared.cmd_opts.no_half: + weight_dtype_conversion = None + else: + weight_dtype_conversion = { + 'first_stage_model': None, + '': torch.float16, + } + + with sd_disable_initialization.LoadStateDictOnMeta(state_dict, device=model_target_device(), weight_dtype_conversion=weight_dtype_conversion): load_model_weights(sd_model, checkpoint_info, state_dict, timer) timer.record("load weights from state dict") -- cgit v1.2.1 From 0cf85b24df4c5d46461baea78fb233d721d6e1b1 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Wed, 16 Aug 2023 20:18:46 +0900 Subject: auto add data-dir to gradio-allowed-path --- modules/cmd_args.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/cmd_args.py b/modules/cmd_args.py index b0a11538..f360f484 100644 --- a/modules/cmd_args.py +++ b/modules/cmd_args.py @@ -81,7 +81,7 @@ parser.add_argument("--gradio-auth", type=str, help='set gradio authentication l parser.add_argument("--gradio-auth-path", type=str, help='set gradio authentication file path ex. "/path/to/auth/file" same auth format as --gradio-auth', default=None) parser.add_argument("--gradio-img2img-tool", type=str, help='does not do anything') parser.add_argument("--gradio-inpaint-tool", type=str, help="does not do anything") -parser.add_argument("--gradio-allowed-path", action='append', help="add path to gradio's allowed_paths, make it possible to serve files from it") +parser.add_argument("--gradio-allowed-path", action='append', help="add path to gradio's allowed_paths, make it possible to serve files from it", default=[data_path]) parser.add_argument("--opt-channelslast", action='store_true', help="change memory type for stable diffusion to channels last") parser.add_argument("--styles-file", type=str, help="filename to use for styles", default=os.path.join(data_path, 'styles.csv')) parser.add_argument("--autolaunch", action='store_true', help="open the webui URL in the system's default browser upon launch", default=False) -- cgit v1.2.1 From e1a29266b29455cfd9bb6be207aba5abbd177b37 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Thu, 17 Aug 2023 00:24:24 +0900 Subject: return empty list if extensions_dir not exist --- modules/launch_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/launch_utils.py b/modules/launch_utils.py index 444f16fa..7e4d5a61 100644 --- a/modules/launch_utils.py +++ b/modules/launch_utils.py @@ -246,7 +246,7 @@ def list_extensions(settings_file): disabled_extensions = set(settings.get('disabled_extensions', [])) disable_all_extensions = settings.get('disable_all_extensions', 'none') - if disable_all_extensions != 'none' or args.disable_extra_extensions or args.disable_all_extensions: + if disable_all_extensions != 'none' or args.disable_extra_extensions or args.disable_all_extensions or not os.path.isdir(extensions_dir): return [] return [x for x in os.listdir(extensions_dir) if x not in disabled_extensions] -- cgit v1.2.1 From 254be4eeb259fd3bd2452250fca1bd278fa248ff Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Wed, 16 Aug 2023 21:45:19 -0400 Subject: Add extra noise callback --- modules/script_callbacks.py | 26 ++++++++++++++++++++++++++ modules/sd_samplers_kdiffusion.py | 4 ++++ modules/sd_samplers_timesteps.py | 4 ++++ 3 files changed, 34 insertions(+) diff --git a/modules/script_callbacks.py b/modules/script_callbacks.py index 77ee55ee..fab23551 100644 --- a/modules/script_callbacks.py +++ b/modules/script_callbacks.py @@ -28,6 +28,15 @@ class ImageSaveParams: """dictionary with parameters for image's PNG info data; infotext will have the key 'parameters'""" +class ExtraNoiseParams: + def __init__(self, noise, x): + self.noise = noise + """Random noise generated by the seed""" + + self.x = x + """Latent image representation of the image""" + + class CFGDenoiserParams: def __init__(self, x, image_cond, sigma, sampling_step, total_sampling_steps, text_cond, text_uncond): self.x = x @@ -100,6 +109,7 @@ callback_map = dict( callbacks_ui_settings=[], callbacks_before_image_saved=[], callbacks_image_saved=[], + callbacks_extra_noise=[], callbacks_cfg_denoiser=[], callbacks_cfg_denoised=[], callbacks_cfg_after_cfg=[], @@ -189,6 +199,14 @@ def image_saved_callback(params: ImageSaveParams): report_exception(c, 'image_saved_callback') +def extra_noise_callback(params: ExtraNoiseParams): + for c in callback_map['callbacks_extra_noise']: + try: + c.callback(params) + except Exception: + report_exception(c, 'callbacks_extra_noise') + + def cfg_denoiser_callback(params: CFGDenoiserParams): for c in callback_map['callbacks_cfg_denoiser']: try: @@ -367,6 +385,14 @@ def on_image_saved(callback): add_callback(callback_map['callbacks_image_saved'], callback) +def on_extra_noise(callback): + """register a function to be called before adding extra noise in img2img or hires fix; + The callback is called with one argument: + - params: ExtraNoiseParams - contains noise determined by seed and latent representation of image + """ + add_callback(callback_map['callbacks_extra_noise'], callback) + + def on_cfg_denoiser(callback): """register a function to be called in the kdiffussion cfg_denoiser method after building the inner model inputs. The callback is called with one argument: diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py index 08866e41..b9e0d577 100644 --- a/modules/sd_samplers_kdiffusion.py +++ b/modules/sd_samplers_kdiffusion.py @@ -3,6 +3,7 @@ import inspect import k_diffusion.sampling from modules import sd_samplers_common, sd_samplers_extra, sd_samplers_cfg_denoiser from modules.sd_samplers_cfg_denoiser import CFGDenoiser # noqa: F401 +from modules.script_callbacks import ExtraNoiseParams, extra_noise_callback from modules.shared import opts import modules.shared as shared @@ -147,6 +148,9 @@ class KDiffusionSampler(sd_samplers_common.Sampler): if opts.img2img_extra_noise > 0: p.extra_generation_params["Extra noise"] = opts.img2img_extra_noise + extra_noise_params = ExtraNoiseParams(noise, x) + extra_noise_callback(extra_noise_params) + noise = extra_noise_params.noise xi += noise * opts.img2img_extra_noise extra_params_kwargs = self.initialize(p) diff --git a/modules/sd_samplers_timesteps.py b/modules/sd_samplers_timesteps.py index 68aea454..7a6cbd46 100644 --- a/modules/sd_samplers_timesteps.py +++ b/modules/sd_samplers_timesteps.py @@ -3,6 +3,7 @@ import inspect import sys from modules import devices, sd_samplers_common, sd_samplers_timesteps_impl from modules.sd_samplers_cfg_denoiser import CFGDenoiser +from modules.script_callbacks import ExtraNoiseParams, extra_noise_callback from modules.shared import opts import modules.shared as shared @@ -106,6 +107,9 @@ class CompVisSampler(sd_samplers_common.Sampler): if opts.img2img_extra_noise > 0: p.extra_generation_params["Extra noise"] = opts.img2img_extra_noise + extra_noise_params = ExtraNoiseParams(noise, x) + extra_noise_callback(extra_noise_params) + noise = extra_noise_params.noise xi += noise * opts.img2img_extra_noise * sqrt_alpha_cumprod extra_params_kwargs = self.initialize(p) -- cgit v1.2.1 From 0dc74545c0b5510911757ed9f2be703aab58f014 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Thu, 17 Aug 2023 07:54:07 +0300 Subject: resolve the issue with loading fp16 checkpoints while using --no-half --- modules/sd_models.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/modules/sd_models.py b/modules/sd_models.py index f912fe16..685585b1 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -343,7 +343,10 @@ def load_model_weights(model, checkpoint_info: CheckpointInfo, state_dict, timer model.to(memory_format=torch.channels_last) timer.record("apply channels_last") - if not shared.cmd_opts.no_half: + if shared.cmd_opts.no_half: + model.float() + timer.record("apply float()") + else: vae = model.first_stage_model depth_model = getattr(model, 'depth_model', None) -- cgit v1.2.1 From 3003b10e0ab84abb6298285e707b3618ceec51dc Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Thu, 17 Aug 2023 18:10:55 -0400 Subject: Attempt to resolve NaN issue with unstable VAEs in fp32 mk2 --- modules/sd_samplers_common.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/modules/sd_samplers_common.py b/modules/sd_samplers_common.py index 7dc79ea8..67deefa5 100644 --- a/modules/sd_samplers_common.py +++ b/modules/sd_samplers_common.py @@ -49,7 +49,8 @@ def samples_to_images_tensor(sample, approximation=None, model=None): else: if model is None: model = shared.sd_model - x_sample = model.decode_first_stage(sample.to(model.first_stage_model.dtype)) + with devices.without_autocast(): # fixes an issue with unstable VAEs that are flaky even in fp32 + x_sample = model.decode_first_stage(sample.to(model.first_stage_model.dtype)) return x_sample -- cgit v1.2.1 From 46e8898f654f5567a9704ab18c67be155158373a Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Thu, 17 Aug 2023 19:35:34 -0400 Subject: Fix img2img background color not being used --- modules/img2img.py | 6 ++---- modules/ui.py | 4 ++-- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/modules/img2img.py b/modules/img2img.py index 328cb0e9..1519e132 100644 --- a/modules/img2img.py +++ b/modules/img2img.py @@ -122,15 +122,14 @@ def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_s is_batch = mode == 5 if mode == 0: # img2img - image = init_img.convert("RGB") + image = init_img mask = None elif mode == 1: # img2img sketch - image = sketch.convert("RGB") + image = sketch mask = None elif mode == 2: # inpaint image, mask = init_img_with_mask["image"], init_img_with_mask["mask"] mask = processing.create_binary_mask(mask) - image = image.convert("RGB") elif mode == 3: # inpaint sketch image = inpaint_color_sketch orig = inpaint_color_sketch_orig or inpaint_color_sketch @@ -139,7 +138,6 @@ def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_s mask = ImageEnhance.Brightness(mask).enhance(1 - mask_alpha / 100) blur = ImageFilter.GaussianBlur(mask_blur) image = Image.composite(image.filter(blur), orig, mask.filter(blur)) - image = image.convert("RGB") elif mode == 4: # inpaint upload mask image = init_img_inpaint mask = init_mask_inpaint diff --git a/modules/ui.py b/modules/ui.py index c98d9849..01f77849 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -575,7 +575,7 @@ def create_ui(): add_copy_image_controls('img2img', init_img) with gr.TabItem('Sketch', id='img2img_sketch', elem_id="img2img_img2img_sketch_tab") as tab_sketch: - sketch = gr.Image(label="Image for img2img", elem_id="img2img_sketch", show_label=False, source="upload", interactive=True, type="pil", tool="color-sketch", image_mode="RGBA", height=opts.img2img_editor_height, brush_color=opts.img2img_sketch_default_brush_color) + sketch = gr.Image(label="Image for img2img", elem_id="img2img_sketch", show_label=False, source="upload", interactive=True, type="pil", tool="color-sketch", image_mode="RGB", height=opts.img2img_editor_height, brush_color=opts.img2img_sketch_default_brush_color) add_copy_image_controls('sketch', sketch) with gr.TabItem('Inpaint', id='inpaint', elem_id="img2img_inpaint_tab") as tab_inpaint: @@ -583,7 +583,7 @@ def create_ui(): add_copy_image_controls('inpaint', init_img_with_mask) with gr.TabItem('Inpaint sketch', id='inpaint_sketch', elem_id="img2img_inpaint_sketch_tab") as tab_inpaint_color: - inpaint_color_sketch = gr.Image(label="Color sketch inpainting", show_label=False, elem_id="inpaint_sketch", source="upload", interactive=True, type="pil", tool="color-sketch", image_mode="RGBA", height=opts.img2img_editor_height, brush_color=opts.img2img_inpaint_sketch_default_brush_color) + inpaint_color_sketch = gr.Image(label="Color sketch inpainting", show_label=False, elem_id="inpaint_sketch", source="upload", interactive=True, type="pil", tool="color-sketch", image_mode="RGB", height=opts.img2img_editor_height, brush_color=opts.img2img_inpaint_sketch_default_brush_color) inpaint_color_sketch_orig = gr.State(None) add_copy_image_controls('inpaint_sketch', inpaint_color_sketch) -- cgit v1.2.1 From 3ce5fb8e5c3a6577172e89e964d570f8b31a998b Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Thu, 17 Aug 2023 20:03:26 -0400 Subject: Add option for faster live interrupt --- modules/sd_samplers_common.py | 2 +- modules/shared_options.py | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/modules/sd_samplers_common.py b/modules/sd_samplers_common.py index 7dc79ea8..f0bc8e6a 100644 --- a/modules/sd_samplers_common.py +++ b/modules/sd_samplers_common.py @@ -36,7 +36,7 @@ approximation_indexes = {"Full": 0, "Approx NN": 1, "Approx cheap": 2, "TAESD": def samples_to_images_tensor(sample, approximation=None, model=None): '''latents -> images [-1, 1]''' - if approximation is None: + if approximation is None or (shared.state.interrupted and opts.live_preview_fast_interrupt): approximation = approximation_indexes.get(opts.show_progress_type, 0) if approximation == 2: diff --git a/modules/shared_options.py b/modules/shared_options.py index 79cbb92e..3e4bcaef 100644 --- a/modules/shared_options.py +++ b/modules/shared_options.py @@ -281,6 +281,7 @@ options_templates.update(options_section(('ui', "Live previews"), { "show_progress_type": OptionInfo("Approx NN", "Live preview method", gr.Radio, {"choices": ["Full", "Approx NN", "Approx cheap", "TAESD"]}).info("Full = slow but pretty; Approx NN and TAESD = fast but low quality; Approx cheap = super fast but terrible otherwise"), "live_preview_content": OptionInfo("Prompt", "Live preview subject", gr.Radio, {"choices": ["Combined", "Prompt", "Negative prompt"]}), "live_preview_refresh_period": OptionInfo(1000, "Progressbar and preview update period").info("in milliseconds"), + "live_preview_fast_interrupt": OptionInfo(False, "Return image with chosen live preview method on interrupt").info("makes interrupts faster"), })) options_templates.update(options_section(('sampler-params', "Sampler parameters"), { -- cgit v1.2.1 From 13f1357b7f1e7721d73a56b20a9f5a61472eabba Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Thu, 17 Aug 2023 20:21:46 -0400 Subject: Make image viewer actually fit the whole page --- style.css | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/style.css b/style.css index d7f87f81..44f970e8 100644 --- a/style.css +++ b/style.css @@ -609,13 +609,19 @@ table.popup-table .link{ display: flex; gap: 1em; padding: 1em; - background-color: rgba(0,0,0,0.2); + background-color:rgba(0,0,0,0); + z-index: 1; + transition: 0.2s ease background-color; +} +.modalControls:hover { + background-color:rgba(0,0,0,0.9); } .modalClose { margin-left: auto; } .modalControls span{ color: white; + text-shadow: 0px 0px 0.25em black; font-size: 35px; font-weight: bold; cursor: pointer; @@ -640,6 +646,13 @@ table.popup-table .link{ min-height: 0; } +#modalImage{ + position: absolute; + top: 50%; + left: 50%; + transform: translateX(-50%) translateY(-50%); +} + .modalPrev, .modalNext { cursor: pointer; -- cgit v1.2.1 From 959f8b32d5994b981845bde051ca74139a12d78b Mon Sep 17 00:00:00 2001 From: Cade Schlaefli Date: Thu, 17 Aug 2023 20:48:17 -0500 Subject: fix issues with model refresh --- modules/api/api.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/modules/api/api.py b/modules/api/api.py index fb2c2ce9..51207648 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -24,7 +24,6 @@ from modules.textual_inversion.preprocess import preprocess from modules.hypernetworks.hypernetwork import create_hypernetwork, train_hypernetwork from PIL import PngImagePlugin,Image from modules.sd_models import checkpoints_list, unload_model_weights, reload_model_weights, checkpoint_aliases -from modules.sd_vae import vae_dict from modules.sd_models_config import find_checkpoint_config_near_filename from modules.realesrgan_model import get_realesrgan_models from modules import devices @@ -567,10 +566,12 @@ class Api: ] def get_sd_models(self): - return [{"title": x.title, "model_name": x.model_name, "hash": x.shorthash, "sha256": x.sha256, "filename": x.filename, "config": find_checkpoint_config_near_filename(x)} for x in checkpoints_list.values()] + import modules.sd_models as sd_models + return [{"title": x.title, "model_name": x.model_name, "hash": x.shorthash, "sha256": x.sha256, "filename": x.filename, "config": find_checkpoint_config_near_filename(x)} for x in sd_models.checkpoints_list.values()] def get_sd_vaes(self): - return [{"model_name": x, "filename": vae_dict[x]} for x in vae_dict.keys()] + import modules.sd_vae as sd_vae + return [{"model_name": x, "filename": sd_vae.vae_dict[x]} for x in sd_vae.vae_dict.keys()] def get_hypernetworks(self): return [{"name": name, "path": shared.hypernetworks[name]} for name in shared.hypernetworks] -- cgit v1.2.1 From f9c2216ffaa72e2c435e38ca221fd8707936a9d5 Mon Sep 17 00:00:00 2001 From: Cade Schlaefli Date: Thu, 17 Aug 2023 21:14:14 -0500 Subject: remove unused import --- modules/api/api.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/api/api.py b/modules/api/api.py index 51207648..da1fdbca 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -23,7 +23,7 @@ from modules.textual_inversion.textual_inversion import create_embedding, train_ from modules.textual_inversion.preprocess import preprocess from modules.hypernetworks.hypernetwork import create_hypernetwork, train_hypernetwork from PIL import PngImagePlugin,Image -from modules.sd_models import checkpoints_list, unload_model_weights, reload_model_weights, checkpoint_aliases +from modules.sd_models import unload_model_weights, reload_model_weights, checkpoint_aliases from modules.sd_models_config import find_checkpoint_config_near_filename from modules.realesrgan_model import get_realesrgan_models from modules import devices -- cgit v1.2.1 From 8a1f32b6a5ce4dd5485fafd174eeeb142a877940 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Fri, 18 Aug 2023 14:04:46 +0900 Subject: image hash --- modules/images.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/modules/images.py b/modules/images.py index 019c1d60..ea5df6dc 100644 --- a/modules/images.py +++ b/modules/images.py @@ -368,7 +368,8 @@ class FilenameGenerator: 'denoising': lambda self: self.p.denoising_strength if self.p and self.p.denoising_strength else NOTHING_AND_SKIP_PREVIOUS_TEXT, 'user': lambda self: self.p.user, 'vae_filename': lambda self: self.get_vae_filename(), - 'none': lambda self: '', # Overrides the default so you can get just the sequence number + 'none': lambda self: '', # Overrides the default, so you can get just the sequence number + 'image_hash': lambda self, *args: self.image_hash(*args) # accepts formats: [image_hash] default full hash } default_time_format = '%Y%m%d%H%M%S' @@ -448,6 +449,10 @@ class FilenameGenerator: return sanitize_filename_part(formatted_time, replace_spaces=False) + def image_hash(self, *args): + length = int(args[0]) if (args and args[0] != "") else None + return hashlib.sha256(self.image.tobytes()).hexdigest()[0:length] + def apply(self, x): res = '' -- cgit v1.2.1 From a81dc43fcd99c8952654ee905f9875cea7ba8613 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Fri, 18 Aug 2023 15:07:40 +0900 Subject: negative_prompt full_prompt hash --- modules/images.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/modules/images.py b/modules/images.py index ea5df6dc..a6b4fb1e 100644 --- a/modules/images.py +++ b/modules/images.py @@ -355,7 +355,9 @@ class FilenameGenerator: 'date': lambda self: datetime.datetime.now().strftime('%Y-%m-%d'), 'datetime': lambda self, *args: self.datetime(*args), # accepts formats: [datetime], [datetime], [datetime