aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.eslintrc.js7
-rw-r--r--.github/ISSUE_TEMPLATE/bug_report.yml129
-rw-r--r--.github/workflows/on_pull_request.yaml2
-rw-r--r--.github/workflows/run_tests.yaml10
-rw-r--r--.gitignore1
-rw-r--r--CHANGELOG.md322
-rw-r--r--CITATION.cff7
-rw-r--r--README.md24
-rw-r--r--configs/alt-diffusion-m18-inference.yaml73
-rw-r--r--configs/sd_xl_inpaint.yaml98
-rw-r--r--extensions-builtin/Lora/extra_networks_lora.py10
-rw-r--r--extensions-builtin/Lora/lora_logger.py33
-rw-r--r--extensions-builtin/Lora/lora_patches.py31
-rw-r--r--extensions-builtin/Lora/lyco_helpers.py47
-rw-r--r--extensions-builtin/Lora/network.py10
-rw-r--r--extensions-builtin/Lora/network_full.py9
-rw-r--r--extensions-builtin/Lora/network_glora.py33
-rw-r--r--extensions-builtin/Lora/network_hada.py12
-rw-r--r--extensions-builtin/Lora/network_ia3.py2
-rw-r--r--extensions-builtin/Lora/network_lokr.py18
-rw-r--r--extensions-builtin/Lora/network_lora.py6
-rw-r--r--extensions-builtin/Lora/network_norm.py28
-rw-r--r--extensions-builtin/Lora/network_oft.py82
-rw-r--r--extensions-builtin/Lora/networks.py261
-rw-r--r--extensions-builtin/Lora/scripts/lora_script.py46
-rw-r--r--extensions-builtin/Lora/ui_edit_user_metadata.py12
-rw-r--r--extensions-builtin/Lora/ui_extra_networks_lora.py15
-rw-r--r--extensions-builtin/ScuNET/scripts/scunet_model.py68
-rw-r--r--extensions-builtin/ScuNET/scunet_model_arch.py268
-rw-r--r--extensions-builtin/SwinIR/scripts/swinir_model.py156
-rw-r--r--extensions-builtin/SwinIR/swinir_model_arch.py867
-rw-r--r--extensions-builtin/SwinIR/swinir_model_arch_v2.py1017
-rw-r--r--extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js216
-rw-r--r--extensions-builtin/canvas-zoom-and-pan/scripts/hotkey_config.py1
-rw-r--r--extensions-builtin/canvas-zoom-and-pan/style.css3
-rw-r--r--extensions-builtin/extra-options-section/scripts/extra_options_section.py52
-rw-r--r--extensions-builtin/hypertile/hypertile.py351
-rw-r--r--extensions-builtin/hypertile/scripts/hypertile_script.py109
-rw-r--r--extensions-builtin/hypertile/scripts/hypertile_xyz.py51
-rw-r--r--extensions-builtin/mobile/javascript/mobile.js8
-rw-r--r--extensions-builtin/soft-inpainting/scripts/soft_inpainting.py747
-rw-r--r--javascript/dragdrop.js2
-rw-r--r--javascript/edit-attention.js79
-rw-r--r--javascript/extensions.js2
-rw-r--r--javascript/extraNetworks.js174
-rw-r--r--javascript/hints.js11
-rw-r--r--javascript/imageviewer.js12
-rw-r--r--javascript/inputAccordion.js68
-rw-r--r--javascript/localStorage.js26
-rw-r--r--javascript/localization.js43
-rw-r--r--javascript/notification.js8
-rw-r--r--javascript/progressbar.js67
-rw-r--r--javascript/resizeHandle.js141
-rw-r--r--javascript/settings.js71
-rw-r--r--javascript/token-counters.js26
-rw-r--r--javascript/ui.js112
-rw-r--r--launch.py15
-rw-r--r--modules/api/api.py269
-rw-r--r--modules/api/models.py52
-rw-r--r--modules/cache.py10
-rw-r--r--modules/call_queue.py7
-rw-r--r--modules/cmd_args.py19
-rw-r--r--modules/codeformer/codeformer_arch.py276
-rw-r--r--modules/codeformer/vqgan_arch.py435
-rw-r--r--modules/codeformer_model.py158
-rw-r--r--modules/config_states.py19
-rw-r--r--modules/devices.py109
-rw-r--r--modules/errors.py67
-rw-r--r--modules/esrgan_model.py199
-rw-r--r--modules/esrgan_model_arch.py465
-rw-r--r--modules/extensions.py110
-rw-r--r--modules/extra_networks.py77
-rw-r--r--modules/extras.py39
-rw-r--r--modules/face_restoration_utils.py180
-rw-r--r--modules/fifo_lock.py37
-rw-r--r--modules/gfpgan_model.py151
-rw-r--r--modules/gitpython_hack.py2
-rw-r--r--modules/gradio_extensons.py83
-rw-r--r--modules/hat_model.py43
-rw-r--r--modules/hypernetworks/hypernetwork.py9
-rw-r--r--modules/images.py83
-rw-r--r--modules/img2img.py92
-rw-r--r--modules/import_hook.py11
-rw-r--r--modules/infotext.py (renamed from modules/generation_parameters_copypaste.py)149
-rw-r--r--modules/infotext_versions.py39
-rw-r--r--modules/initialize.py165
-rw-r--r--modules/initialize_util.py208
-rw-r--r--modules/interrogate.py9
-rw-r--r--modules/launch_utils.py162
-rw-r--r--modules/localization.py24
-rw-r--r--modules/logging_config.py41
-rw-r--r--modules/lowvram.py21
-rw-r--r--modules/mac_specific.py22
-rw-r--r--modules/modelloader.py84
-rw-r--r--modules/models/diffusion/ddpm_edit.py7
-rw-r--r--modules/options.py308
-rw-r--r--modules/patches.py64
-rw-r--r--modules/paths.py3
-rw-r--r--modules/paths_internal.py2
-rw-r--r--modules/postprocessing.py149
-rw-r--r--modules/processing.py1003
-rw-r--r--modules/processing_scripts/refiner.py50
-rw-r--r--modules/processing_scripts/seed.py112
-rw-r--r--modules/progress.py75
-rw-r--r--modules/prompt_parser.py67
-rw-r--r--modules/realesrgan_model.py159
-rw-r--r--modules/restart.py4
-rw-r--r--modules/rng.py170
-rw-r--r--modules/rng_philox.py102
-rw-r--r--modules/script_callbacks.py35
-rw-r--r--modules/scripts.py407
-rw-r--r--modules/scripts_postprocessing.py86
-rw-r--r--modules/sd_disable_initialization.py149
-rw-r--r--modules/sd_hijack.py77
-rw-r--r--modules/sd_hijack_clip.py4
-rw-r--r--modules/sd_hijack_inpainting.py97
-rw-r--r--modules/sd_hijack_optimizations.py17
-rw-r--r--modules/sd_models.py414
-rw-r--r--modules/sd_models_config.py14
-rw-r--r--modules/sd_models_types.py34
-rw-r--r--modules/sd_models_xl.py28
-rw-r--r--modules/sd_samplers.py19
-rw-r--r--modules/sd_samplers_cfg_denoiser.py247
-rw-r--r--modules/sd_samplers_common.py270
-rw-r--r--modules/sd_samplers_compvis.py224
-rw-r--r--modules/sd_samplers_extra.py74
-rw-r--r--modules/sd_samplers_kdiffusion.py382
-rw-r--r--modules/sd_samplers_timesteps.py168
-rw-r--r--modules/sd_samplers_timesteps_impl.py137
-rw-r--r--modules/sd_unet.py16
-rw-r--r--modules/sd_vae.py105
-rw-r--r--modules/sd_vae_approx.py2
-rw-r--r--modules/sd_vae_taesd.py52
-rw-r--r--modules/shared.py880
-rw-r--r--modules/shared_cmd_options.py18
-rw-r--r--modules/shared_gradio_themes.py67
-rw-r--r--modules/shared_init.py49
-rw-r--r--modules/shared_items.py72
-rw-r--r--modules/shared_options.py377
-rw-r--r--modules/shared_state.py159
-rw-r--r--modules/shared_total_tqdm.py37
-rw-r--r--modules/styles.py141
-rw-r--r--modules/sub_quadratic_attention.py8
-rw-r--r--modules/sysinfo.py36
-rw-r--r--modules/textual_inversion/autocrop.py239
-rw-r--r--modules/textual_inversion/preprocess.py232
-rw-r--r--modules/textual_inversion/textual_inversion.py83
-rw-r--r--modules/textual_inversion/ui.py7
-rw-r--r--modules/timer.py23
-rw-r--r--modules/torch_utils.py17
-rw-r--r--modules/txt2img.py23
-rw-r--r--modules/ui.py993
-rw-r--r--modules/ui_checkpoint_merger.py124
-rw-r--r--modules/ui_common.py65
-rw-r--r--modules/ui_components.py73
-rw-r--r--modules/ui_extensions.py283
-rw-r--r--modules/ui_extra_networks.py131
-rw-r--r--modules/ui_extra_networks_checkpoints.py20
-rw-r--r--modules/ui_extra_networks_checkpoints_user_metadata.py66
-rw-r--r--modules/ui_extra_networks_hypernets.py21
-rw-r--r--modules/ui_extra_networks_textual_inversion.py15
-rw-r--r--modules/ui_extra_networks_user_metadata.py22
-rw-r--r--modules/ui_gradio_extensions.py9
-rw-r--r--modules/ui_loadsave.py43
-rw-r--r--modules/ui_postprocessing.py22
-rw-r--r--modules/ui_prompt_styles.py120
-rw-r--r--modules/ui_settings.py63
-rw-r--r--modules/ui_tempdir.py7
-rw-r--r--modules/ui_toprow.py143
-rw-r--r--modules/upscaler.py9
-rw-r--r--modules/upscaler_utils.py140
-rw-r--r--modules/util.py68
-rw-r--r--modules/xlmr.py5
-rw-r--r--modules/xlmr_m18.py166
-rw-r--r--modules/xpu_specific.py59
-rw-r--r--pyproject.toml1
-rw-r--r--requirements.txt10
-rw-r--r--requirements_versions.txt23
-rw-r--r--script.js56
-rw-r--r--scripts/postprocessing_caption.py30
-rw-r--r--scripts/postprocessing_codeformer.py16
-rw-r--r--scripts/postprocessing_create_flipped_copies.py32
-rw-r--r--scripts/postprocessing_focal_crop.py54
-rw-r--r--scripts/postprocessing_gfpgan.py13
-rw-r--r--scripts/postprocessing_split_oversized.py71
-rw-r--r--scripts/postprocessing_upscale.py14
-rw-r--r--scripts/processing_autosized_crop.py64
-rw-r--r--scripts/prompts_from_file.py32
-rw-r--r--scripts/xyz_grid.py189
-rw-r--r--style.css279
-rw-r--r--test/conftest.py27
-rw-r--r--test/test_face_restorers.py29
-rw-r--r--test/test_files/two-faces.jpgbin0 -> 14768 bytes
-rw-r--r--test/test_outputs/.gitkeep0
-rw-r--r--test/test_torch_utils.py19
-rw-r--r--webui-macos-env.sh4
-rw-r--r--webui.bat5
-rw-r--r--webui.py410
-rwxr-xr-xwebui.sh54
199 files changed, 12972 insertions, 8953 deletions
diff --git a/.eslintrc.js b/.eslintrc.js
index f33aca09..cf839769 100644
--- a/.eslintrc.js
+++ b/.eslintrc.js
@@ -74,6 +74,7 @@ module.exports = {
create_submit_args: "readonly",
restart_reload: "readonly",
updateInput: "readonly",
+ onEdit: "readonly",
//extraNetworks.js
requestGet: "readonly",
popup: "readonly",
@@ -87,5 +88,11 @@ module.exports = {
modalNextImage: "readonly",
// token-counters.js
setupTokenCounters: "readonly",
+ // localStorage.js
+ localSet: "readonly",
+ localGet: "readonly",
+ localRemove: "readonly",
+ // resizeHandle.js
+ setupResizeHandle: "writable"
}
};
diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml
index d80b24e2..5876e941 100644
--- a/.github/ISSUE_TEMPLATE/bug_report.yml
+++ b/.github/ISSUE_TEMPLATE/bug_report.yml
@@ -1,35 +1,55 @@
name: Bug Report
-description: You think somethings is broken in the UI
+description: You think something is broken in the UI
title: "[Bug]: "
labels: ["bug-report"]
body:
+ - type: markdown
+ attributes:
+ value: |
+ > The title of the bug report should be short and descriptive.
+ > Use relevant keywords for searchability.
+ > Do not leave it blank, but also do not put an entire error log in it.
- type: checkboxes
attributes:
- label: Is there an existing issue for this?
- description: Please search to see if an issue already exists for the bug you encountered, and that it hasn't been fixed in a recent build/commit.
+ label: Checklist
+ description: |
+ Please perform basic debugging to see if extensions or configuration is the cause of the issue.
+ Basic debug procedure
+  1. Disable all third-party extensions - check if extension is the cause
+  2. Update extensions and webui - sometimes things just need to be updated
+  3. Backup and remove your config.json and ui-config.json - check if the issue is caused by bad configuration
+  4. Delete venv with third-party extensions disabled - sometimes extensions might cause wrong libraries to be installed
+  5. Try a fresh installation webui in a different directory - see if a clean installation solves the issue
+ Before making a issue report please, check that the issue hasn't been reported recently.
options:
- - label: I have searched the existing issues and checked the recent builds/commits
- required: true
+ - label: The issue exists after disabling all extensions
+ - label: The issue exists on a clean installation of webui
+ - label: The issue is caused by an extension, but I believe it is caused by a bug in the webui
+ - label: The issue exists in the current version of the webui
+ - label: The issue has not been reported before recently
+ - label: The issue has been reported before but has not been fixed yet
- type: markdown
attributes:
value: |
- *Please fill this form with as much information as possible, don't forget to fill "What OS..." and "What browsers" and *provide screenshots if possible**
+ > Please fill this form with as much information as possible. Don't forget to "Upload Sysinfo" and "What browsers" and provide screenshots if possible
- type: textarea
id: what-did
attributes:
label: What happened?
description: Tell us what happened in a very clear and simple way
+ placeholder: |
+ txt2img is not working as intended.
validations:
required: true
- type: textarea
id: steps
attributes:
label: Steps to reproduce the problem
- description: Please provide us with precise step by step information on how to reproduce the bug
- value: |
- 1. Go to ....
- 2. Press ....
+ description: Please provide us with precise step by step instructions on how to reproduce the bug
+ placeholder: |
+ 1. Go to ...
+ 2. Press ...
3. ...
validations:
required: true
@@ -37,64 +57,9 @@ body:
id: what-should
attributes:
label: What should have happened?
- description: Tell what you think the normal behavior should be
- validations:
- required: true
- - type: input
- id: commit
- attributes:
- label: Version or Commit where the problem happens
- description: "Which webui version or commit are you running ? (Do not write *Latest Version/repo/commit*, as this means nothing and will have changed by the time we read your issue. Rather, copy the **Version: v1.2.3** link at the bottom of the UI, or from the cmd/terminal if you can't launch it.)"
- validations:
- required: true
- - type: dropdown
- id: py-version
- attributes:
- label: What Python version are you running on ?
- multiple: false
- options:
- - Python 3.10.x
- - Python 3.11.x (above, no supported yet)
- - Python 3.9.x (below, no recommended)
- - type: dropdown
- id: platforms
- attributes:
- label: What platforms do you use to access the UI ?
- multiple: true
- options:
- - Windows
- - Linux
- - MacOS
- - iOS
- - Android
- - Other/Cloud
- - type: dropdown
- id: device
- attributes:
- label: What device are you running WebUI on?
- multiple: true
- options:
- - Nvidia GPUs (RTX 20 above)
- - Nvidia GPUs (GTX 16 below)
- - AMD GPUs (RX 6000 above)
- - AMD GPUs (RX 5000 below)
- - CPU
- - Other GPUs
- - type: dropdown
- id: cross_attention_opt
- attributes:
- label: Cross attention optimization
- description: What cross attention optimization are you using, Settings -> Optimizations -> Cross attention optimization
- multiple: false
- options:
- - Automatic
- - xformers
- - sdp-no-mem
- - sdp
- - Doggettx
- - V1
- - InvokeAI
- - "None "
+ description: Tell us what you think the normal behavior should be
+ placeholder: |
+ WebUI should ...
validations:
required: true
- type: dropdown
@@ -108,26 +73,25 @@ body:
- Brave
- Apple Safari
- Microsoft Edge
+ - Android
+ - iOS
+ - Other
- type: textarea
- id: cmdargs
- attributes:
- label: Command Line Arguments
- description: Are you using any launching parameters/command line arguments (modified webui-user .bat/.sh) ? If yes, please write them below. Write "No" otherwise.
- render: Shell
- validations:
- required: true
- - type: textarea
- id: extensions
+ id: sysinfo
attributes:
- label: List of extensions
- description: Are you using any extensions other than built-ins? If yes, provide a list, you can copy it at "Extensions" tab. Write "No" otherwise.
+ label: Sysinfo
+ description: System info file, generated by WebUI. You can generate it in settings, on the Sysinfo page. Drag the file into the field to upload it. If you submit your report without including the sysinfo file, the report will be closed. If needed, review the report to make sure it includes no personal information you don't want to share. If you can't start WebUI, you can use --dump-sysinfo commandline argument to generate the file.
+ placeholder: |
+ 1. Go to WebUI Settings -> Sysinfo -> Download system info.
+ If WebUI fails to launch, use --dump-sysinfo commandline argument to generate the file
+ 2. Upload the Sysinfo as a attached file, Do NOT paste it in as plain text.
validations:
required: true
- type: textarea
id: logs
attributes:
label: Console logs
- description: Please provide **full** cmd/terminal logs from the moment you started UI to the end of it, after your bug happened. If it's very long, provide a link to pastebin or similar service.
+ description: Please provide **full** cmd/terminal logs from the moment you started UI to the end of it, after the bug occured. If it's very long, provide a link to pastebin or similar service.
render: Shell
validations:
required: true
@@ -135,4 +99,7 @@ body:
id: misc
attributes:
label: Additional information
- description: Please provide us with any relevant additional info or context.
+ description: |
+ Please provide us with any relevant additional info or context.
+ Examples:
+  I have updated my GPU driver recently.
diff --git a/.github/workflows/on_pull_request.yaml b/.github/workflows/on_pull_request.yaml
index 78e608ee..9e44c806 100644
--- a/.github/workflows/on_pull_request.yaml
+++ b/.github/workflows/on_pull_request.yaml
@@ -20,7 +20,7 @@ jobs:
# not to have GHA download an (at the time of writing) 4 GB cache
# of PyTorch and other dependencies.
- name: Install Ruff
- run: pip install ruff==0.0.272
+ run: pip install ruff==0.1.6
- name: Run Ruff
run: ruff .
lint-js:
diff --git a/.github/workflows/run_tests.yaml b/.github/workflows/run_tests.yaml
index 3dafaf8d..f42e4758 100644
--- a/.github/workflows/run_tests.yaml
+++ b/.github/workflows/run_tests.yaml
@@ -20,6 +20,12 @@ jobs:
cache-dependency-path: |
**/requirements*txt
launch.py
+ - name: Cache models
+ id: cache-models
+ uses: actions/cache@v3
+ with:
+ path: models
+ key: "2023-12-30"
- name: Install test dependencies
run: pip install wait-for-it -r requirements-test.txt
env:
@@ -33,6 +39,8 @@ jobs:
TORCH_INDEX_URL: https://download.pytorch.org/whl/cpu
WEBUI_LAUNCH_LIVE_OUTPUT: "1"
PYTHONUNBUFFERED: "1"
+ - name: Print installed packages
+ run: pip freeze
- name: Start test server
run: >
python -m coverage run
@@ -49,7 +57,7 @@ jobs:
2>&1 | tee output.txt &
- name: Run tests
run: |
- wait-for-it --service 127.0.0.1:7860 -t 600
+ wait-for-it --service 127.0.0.1:7860 -t 20
python -m pytest -vv --junitxml=test/results.xml --cov . --cov-report=xml --verify-base-url test
- name: Kill test server
if: always()
diff --git a/.gitignore b/.gitignore
index 09734267..6790e9ee 100644
--- a/.gitignore
+++ b/.gitignore
@@ -37,3 +37,4 @@ notification.mp3
/node_modules
/package-lock.json
/.coverage*
+/test/test_outputs
diff --git a/CHANGELOG.md b/CHANGELOG.md
index b18c6867..67429bbf 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,3 +1,325 @@
+## 1.7.0
+
+### Features:
+* settings tab rework: add search field, add categories, split UI settings page into many
+* add altdiffusion-m18 support ([#13364](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13364))
+* support inference with LyCORIS GLora networks ([#13610](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13610))
+* add lora-embedding bundle system ([#13568](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13568))
+* option to move prompt from top row into generation parameters
+* add support for SSD-1B ([#13865](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13865))
+* support inference with OFT networks ([#13692](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13692))
+* script metadata and DAG sorting mechanism ([#13944](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13944))
+* support HyperTile optimization ([#13948](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13948))
+* add support for SD 2.1 Turbo ([#14170](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14170))
+* remove Train->Preprocessing tab and put all its functionality into Extras tab
+* initial IPEX support for Intel Arc GPU ([#14171](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14171))
+
+### Minor:
+* allow reading model hash from images in img2img batch mode ([#12767](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12767))
+* add option to align with sgm repo's sampling implementation ([#12818](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12818))
+* extra field for lora metadata viewer: `ss_output_name` ([#12838](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12838))
+* add action in settings page to calculate all SD checkpoint hashes ([#12909](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12909))
+* add button to copy prompt to style editor ([#12975](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12975))
+* add --skip-load-model-at-start option ([#13253](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13253))
+* write infotext to gif images
+* read infotext from gif images ([#13068](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13068))
+* allow configuring the initial state of InputAccordion in ui-config.json ([#13189](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13189))
+* allow editing whitespace delimiters for ctrl+up/ctrl+down prompt editing ([#13444](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13444))
+* prevent accidentally closing popup dialogs ([#13480](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13480))
+* added option to play notification sound or not ([#13631](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13631))
+* show the preview image in the full screen image viewer if available ([#13459](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13459))
+* support for webui.settings.bat ([#13638](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13638))
+* add an option to not print stack traces on ctrl+c
+* start/restart generation by Ctrl (Alt) + Enter ([#13644](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13644))
+* update prompts_from_file script to allow concatenating entries with the general prompt ([#13733](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13733))
+* added a visible checkbox to input accordion
+* added an option to hide all txt2img/img2img parameters in an accordion ([#13826](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13826))
+* added 'Path' sorting option for Extra network cards ([#13968](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13968))
+* enable prompt hotkeys in style editor ([#13931](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13931))
+* option to show batch img2img results in UI ([#14009](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14009))
+* infotext updates: add option to disregard certain infotext fields, add option to not include VAE in infotext, add explanation to infotext settings page, move some options to infotext settings page
+* add FP32 fallback support on sd_vae_approx ([#14046](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14046))
+* support XYZ scripts / split hires path from unet ([#14126](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14126))
+* allow use of mutiple styles csv files ([#14125](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14125))
+
+### Extensions and API:
+* update gradio to 3.41.2
+* support installed extensions list api ([#12774](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12774))
+* update pnginfo API to return dict with parsed values
+* add noisy latent to `ExtraNoiseParams` for callback ([#12856](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12856))
+* show extension datetime in UTC ([#12864](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12864), [#12865](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12865), [#13281](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13281))
+* add an option to choose how to combine hires fix and refiner
+* include program version in info response. ([#13135](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13135))
+* sd_unet support for SDXL
+* patch DDPM.register_betas so that users can put given_betas in model yaml ([#13276](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13276))
+* xyz_grid: add prepare ([#13266](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13266))
+* allow multiple localization files with same language in extensions ([#13077](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13077))
+* add onEdit function for js and rework token-counter.js to use it
+* fix the key error exception when processing override_settings keys ([#13567](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13567))
+* ability for extensions to return custom data via api in response.images ([#13463](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13463))
+* call state.jobnext() before postproces*() ([#13762](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13762))
+* add option to set notification sound volume ([#13884](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13884))
+* update Ruff to 0.1.6 ([#14059](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14059))
+* add Block component creation callback ([#14119](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14119))
+* catch uncaught exception with ui creation scripts ([#14120](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14120))
+* use extension name for determining an extension is installed in the index ([#14063](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14063))
+* update is_installed() from launch_utils.py to fix reinstalling already installed packages ([#14192](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14192))
+
+### Bug Fixes:
+* fix pix2pix producing bad results
+* fix defaults settings page breaking when any of main UI tabs are hidden
+* fix error that causes some extra networks to be disabled if both <lora:> and <lyco:> are present in the prompt
+* fix for Reload UI function: if you reload UI on one tab, other opened tabs will no longer stop working
+* prevent duplicate resize handler ([#12795](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12795))
+* small typo: vae resolve bug ([#12797](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12797))
+* hide broken image crop tool ([#12792](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12792))
+* don't show hidden samplers in dropdown for XYZ script ([#12780](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12780))
+* fix style editing dialog breaking if it's opened in both img2img and txt2img tabs
+* hide --gradio-auth and --api-auth values from /internal/sysinfo report
+* add missing infotext for RNG in options ([#12819](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12819))
+* fix notification not playing when built-in webui tab is inactive ([#12834](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12834))
+* honor `--skip-install` for extension installers ([#12832](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12832))
+* don't print blank stdout in extension installers ([#12833](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12833), [#12855](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12855))
+* get progressbar to display correctly in extensions tab
+* keep order in list of checkpoints when loading model that doesn't have a checksum
+* fix inpainting models in txt2img creating black pictures
+* fix generation params regex ([#12876](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12876))
+* fix batch img2img output dir with script ([#12926](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12926))
+* fix #13080 - Hypernetwork/TI preview generation ([#13084](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13084))
+* fix bug with sigma min/max overrides. ([#12995](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12995))
+* more accurate check for enabling cuDNN benchmark on 16XX cards ([#12924](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12924))
+* don't use multicond parser for negative prompt counter ([#13118](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13118))
+* fix data-sort-name containing spaces ([#13412](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13412))
+* update card on correct tab when editing metadata ([#13411](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13411))
+* fix viewing/editing metadata when filename contains an apostrophe ([#13395](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13395))
+* fix: --sd_model in "Prompts from file or textbox" script is not working ([#13302](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13302))
+* better Support for Portable Git ([#13231](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13231))
+* fix issues when webui_dir is not work_dir ([#13210](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13210))
+* fix: lora-bias-backup don't reset cache ([#13178](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13178))
+* account for customizable extra network separators whyen removing extra network text from the prompt ([#12877](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12877))
+* re fix batch img2img output dir with script ([#13170](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13170))
+* fix `--ckpt-dir` path separator and option use `short name` for checkpoint dropdown ([#13139](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13139))
+* consolidated allowed preview formats, Fix extra network `.gif` not woking as preview ([#13121](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13121))
+* fix venv_dir=- environment variable not working as expected on linux ([#13469](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13469))
+* repair unload sd checkpoint button
+* edit-attention fixes ([#13533](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13533))
+* fix bug when using --gfpgan-models-path ([#13718](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13718))
+* properly apply sort order for extra network cards when selected from dropdown
+* fixes generation restart not working for some users when 'Ctrl+Enter' is pressed ([#13962](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13962))
+* thread safe extra network list_items ([#13014](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13014))
+* fix not able to exit metadata popup when pop up is too big ([#14156](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14156))
+* fix auto focal point crop for opencv >= 4.8 ([#14121](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14121))
+* make 'use-cpu all' actually apply to 'all' ([#14131](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14131))
+* extras tab batch: actually use original filename
+* make webui not crash when running with --disable-all-extensions option
+
+### Other:
+* non-local condition ([#12814](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12814))
+* fix minor typos ([#12827](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12827))
+* remove xformers Python version check ([#12842](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12842))
+* style: file-metadata word-break ([#12837](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12837))
+* revert SGM noise multiplier change for img2img because it breaks hires fix
+* do not change quicksettings dropdown option when value returned is `None` ([#12854](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12854))
+* [RC 1.6.0 - zoom is partly hidden] Update style.css ([#12839](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12839))
+* chore: change extension time format ([#12851](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12851))
+* WEBUI.SH - Use torch 2.1.0 release candidate for Navi 3 ([#12929](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12929))
+* add Fallback at images.read_info_from_image if exif data was invalid ([#13028](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13028))
+* update cmd arg description ([#12986](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12986))
+* fix: update shared.opts.data when add_option ([#12957](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12957), [#13213](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13213))
+* restore missing tooltips ([#12976](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12976))
+* use default dropdown padding on mobile ([#12880](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12880))
+* put enable console prompts option into settings from commandline args ([#13119](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13119))
+* fix some deprecated types ([#12846](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12846))
+* bump to torchsde==0.2.6 ([#13418](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13418))
+* update dragdrop.js ([#13372](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13372))
+* use orderdict as lru cache:opt/bug ([#13313](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13313))
+* XYZ if not include sub grids do not save sub grid ([#13282](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13282))
+* initialize state.time_start befroe state.job_count ([#13229](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13229))
+* fix fieldname regex ([#13458](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13458))
+* change denoising_strength default to None. ([#13466](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13466))
+* fix regression ([#13475](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13475))
+* fix IndexError ([#13630](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13630))
+* fix: checkpoints_loaded:{checkpoint:state_dict}, model.load_state_dict issue in dict value empty ([#13535](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13535))
+* update bug_report.yml ([#12991](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12991))
+* requirements_versions httpx==0.24.1 ([#13839](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13839))
+* fix parenthesis auto selection ([#13829](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13829))
+* fix #13796 ([#13797](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13797))
+* corrected a typo in `modules/cmd_args.py` ([#13855](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13855))
+* feat: fix randn found element of type float at pos 2 ([#14004](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14004))
+* adds tqdm handler to logging_config.py for progress bar integration ([#13996](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13996))
+* hotfix: call shared.state.end() after postprocessing done ([#13977](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13977))
+* fix dependency address patch 1 ([#13929](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13929))
+* save sysinfo as .json ([#14035](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14035))
+* move exception_records related methods to errors.py ([#14084](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14084))
+* compatibility ([#13936](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13936))
+* json.dump(ensure_ascii=False) ([#14108](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14108))
+* dir buttons start with / so only the correct dir will be shown and no… ([#13957](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13957))
+* alternate implementation for unet forward replacement that does not depend on hijack being applied
+* re-add `keyedit_delimiters_whitespace` setting lost as part of commit e294e46 ([#14178](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14178))
+* fix `save_samples` being checked early when saving masked composite ([#14177](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14177))
+* slight optimization for mask and mask_composite ([#14181](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14181))
+* add import_hook hack to work around basicsr/torchvision incompatibility ([#14186](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14186))
+
+## 1.6.1
+
+### Bug Fixes:
+ * fix an error causing the webui to fail to start ([#13839](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13839))
+
+## 1.6.0
+
+### Features:
+ * refiner support [#12371](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12371)
+ * add NV option for Random number generator source setting, which allows to generate same pictures on CPU/AMD/Mac as on NVidia videocards
+ * add style editor dialog
+ * hires fix: add an option to use a different checkpoint for second pass ([#12181](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12181))
+ * option to keep multiple loaded models in memory ([#12227](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12227))
+ * new samplers: Restart, DPM++ 2M SDE Exponential, DPM++ 2M SDE Heun, DPM++ 2M SDE Heun Karras, DPM++ 2M SDE Heun Exponential, DPM++ 3M SDE, DPM++ 3M SDE Karras, DPM++ 3M SDE Exponential ([#12300](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12300), [#12519](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12519), [#12542](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12542))
+ * rework DDIM, PLMS, UniPC to use CFG denoiser same as in k-diffusion samplers:
+ * makes all of them work with img2img
+ * makes prompt composition posssible (AND)
+ * makes them available for SDXL
+ * always show extra networks tabs in the UI ([#11808](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/11808))
+ * use less RAM when creating models ([#11958](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/11958), [#12599](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12599))
+ * textual inversion inference support for SDXL
+ * extra networks UI: show metadata for SD checkpoints
+ * checkpoint merger: add metadata support
+ * prompt editing and attention: add support for whitespace after the number ([ red : green : 0.5 ]) (seed breaking change) ([#12177](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12177))
+ * VAE: allow selecting own VAE for each checkpoint (in user metadata editor)
+ * VAE: add selected VAE to infotext
+ * options in main UI: add own separate setting for txt2img and img2img, correctly read values from pasted infotext, add setting for column count ([#12551](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12551))
+ * add resize handle to txt2img and img2img tabs, allowing to change the amount of horizontable space given to generation parameters and resulting image gallery ([#12687](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12687), [#12723](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12723))
+ * change default behavior for batching cond/uncond -- now it's on by default, and is disabled by an UI setting (Optimizatios -> Batch cond/uncond) - if you are on lowvram/medvram and are getting OOM exceptions, you will need to enable it
+ * show current position in queue and make it so that requests are processed in the order of arrival ([#12707](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12707))
+ * add `--medvram-sdxl` flag that only enables `--medvram` for SDXL models
+ * prompt editing timeline has separate range for first pass and hires-fix pass (seed breaking change) ([#12457](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12457))
+
+### Minor:
+ * img2img batch: RAM savings, VRAM savings, .tif, .tiff in img2img batch ([#12120](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12120), [#12514](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12514), [#12515](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12515))
+ * postprocessing/extras: RAM savings ([#12479](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12479))
+ * XYZ: in the axis labels, remove pathnames from model filenames
+ * XYZ: support hires sampler ([#12298](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12298))
+ * XYZ: new option: use text inputs instead of dropdowns ([#12491](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12491))
+ * add gradio version warning
+ * sort list of VAE checkpoints ([#12297](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12297))
+ * use transparent white for mask in inpainting, along with an option to select the color ([#12326](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12326))
+ * move some settings to their own section: img2img, VAE
+ * add checkbox to show/hide dirs for extra networks
+ * Add TAESD(or more) options for all the VAE encode/decode operation ([#12311](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12311))
+ * gradio theme cache, new gradio themes, along with explanation that the user can input his own values ([#12346](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12346), [#12355](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12355))
+ * sampler fixes/tweaks: s_tmax, s_churn, s_noise, s_tmax ([#12354](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12354), [#12356](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12356), [#12357](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12357), [#12358](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12358), [#12375](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12375), [#12521](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12521))
+ * update README.md with correct instructions for Linux installation ([#12352](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12352))
+ * option to not save incomplete images, on by default ([#12338](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12338))
+ * enable cond cache by default
+ * git autofix for repos that are corrupted ([#12230](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12230))
+ * allow to open images in new browser tab by middle mouse button ([#12379](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12379))
+ * automatically open webui in browser when running "locally" ([#12254](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12254))
+ * put commonly used samplers on top, make DPM++ 2M Karras the default choice
+ * zoom and pan: option to auto-expand a wide image, improved integration ([#12413](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12413), [#12727](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12727))
+ * option to cache Lora networks in memory
+ * rework hires fix UI to use accordion
+ * face restoration and tiling moved to settings - use "Options in main UI" setting if you want them back
+ * change quicksettings items to have variable width
+ * Lora: add Norm module, add support for bias ([#12503](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12503))
+ * Lora: output warnings in UI rather than fail for unfitting loras; switch to logging for error output in console
+ * support search and display of hashes for all extra network items ([#12510](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12510))
+ * add extra noise param for img2img operations ([#12564](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12564))
+ * support for Lora with bias ([#12584](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12584))
+ * make interrupt quicker ([#12634](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12634))
+ * configurable gallery height ([#12648](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12648))
+ * make results column sticky ([#12645](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12645))
+ * more hash filename patterns ([#12639](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12639))
+ * make image viewer actually fit the whole page ([#12635](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12635))
+ * make progress bar work independently from live preview display which results in it being updated a lot more often
+ * forbid Full live preview method for medvram and add a setting to undo the forbidding
+ * make it possible to localize tooltips and placeholders
+ * add option to align with sgm repo's sampling implementation ([#12818](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12818))
+ * Restore faces and Tiling generation parameters have been moved to settings out of main UI
+ * if you want to put them back into main UI, use `Options in main UI` setting on the UI page.
+
+### Extensions and API:
+ * gradio 3.41.2
+ * also bump versions for packages: transformers, GitPython, accelerate, scikit-image, timm, tomesd
+ * support tooltip kwarg for gradio elements: gr.Textbox(label='hello', tooltip='world')
+ * properly clear the total console progressbar when using txt2img and img2img from API
+ * add cmd_arg --disable-extra-extensions and --disable-all-extensions ([#12294](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12294))
+ * shared.py and webui.py split into many files
+ * add --loglevel commandline argument for logging
+ * add a custom UI element that combines accordion and checkbox
+ * avoid importing gradio in tests because it spams warnings
+ * put infotext label for setting into OptionInfo definition rather than in a separate list
+ * make `StableDiffusionProcessingImg2Img.mask_blur` a property, make more inline with PIL `GaussianBlur` ([#12470](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12470))
+ * option to make scripts UI without gr.Group
+ * add a way for scripts to register a callback for before/after just a single component's creation
+ * use dataclass for StableDiffusionProcessing
+ * store patches for Lora in a specialized module instead of inside torch
+ * support http/https URLs in API ([#12663](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12663), [#12698](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12698))
+ * add extra noise callback ([#12616](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12616))
+ * dump current stack traces when exiting with SIGINT
+ * add type annotations for extra fields of shared.sd_model
+
+### Bug Fixes:
+ * Don't crash if out of local storage quota for javascriot localStorage
+ * XYZ plot do not fail if an exception occurs
+ * fix missing TI hash in infotext if generation uses both negative and positive TI ([#12269](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12269))
+ * localization fixes ([#12307](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12307))
+ * fix sdxl model invalid configuration after the hijack
+ * correctly toggle extras checkbox for infotext paste ([#12304](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12304))
+ * open raw sysinfo link in new page ([#12318](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12318))
+ * prompt parser: Account for empty field in alternating words syntax ([#12319](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12319))
+ * add tab and carriage return to invalid filename chars ([#12327](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12327))
+ * fix api only Lora not working ([#12387](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12387))
+ * fix options in main UI misbehaving when there's just one element
+ * make it possible to use a sampler from infotext even if it's hidden in the dropdown
+ * fix styles missing from the prompt in infotext when making a grid of batch of multiplie images
+ * prevent bogus progress output in console when calculating hires fix dimensions
+ * fix --use-textbox-seed
+ * fix broken `Lora/Networks: use old method` option ([#12466](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12466))
+ * properly return `None` for VAE hash when using `--no-hashing` ([#12463](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12463))
+ * MPS/macOS fixes and optimizations ([#12526](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12526))
+ * add second_order to samplers that mistakenly didn't have it
+ * when refreshing cards in extra networks UI, do not discard user's custom resolution
+ * fix processing error that happens if batch_size is not a multiple of how many prompts/negative prompts there are ([#12509](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12509))
+ * fix inpaint upload for alpha masks ([#12588](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12588))
+ * fix exception when image sizes are not integers ([#12586](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12586))
+ * fix incorrect TAESD Latent scale ([#12596](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12596))
+ * auto add data-dir to gradio-allowed-path ([#12603](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12603))
+ * fix exception if extensuions dir is missing ([#12607](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12607))
+ * fix issues with api model-refresh and vae-refresh ([#12638](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12638))
+ * fix img2img background color for transparent images option not being used ([#12633](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12633))
+ * attempt to resolve NaN issue with unstable VAEs in fp32 mk2 ([#12630](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12630))
+ * implement missing undo hijack for SDXL
+ * fix xyz swap axes ([#12684](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12684))
+ * fix errors in backup/restore tab if any of config files are broken ([#12689](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12689))
+ * fix SD VAE switch error after model reuse ([#12685](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12685))
+ * fix trying to create images too large for the chosen format ([#12667](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12667))
+ * create Gradio temp directory if necessary ([#12717](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12717))
+ * prevent possible cache loss if exiting as it's being written by using an atomic operation to replace the cache with the new version
+ * set devices.dtype_unet correctly
+ * run RealESRGAN on GPU for non-CUDA devices ([#12737](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12737))
+ * prevent extra network buttons being obscured by description for very small card sizes ([#12745](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12745))
+ * fix error that causes some extra networks to be disabled if both <lora:> and <lyco:> are present in the prompt
+ * fix defaults settings page breaking when any of main UI tabs are hidden
+ * fix incorrect save/display of new values in Defaults page in settings
+ * fix for Reload UI function: if you reload UI on one tab, other opened tabs will no longer stop working
+ * fix an error that prevents VAE being reloaded after an option change if a VAE near the checkpoint exists ([#12797](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12737))
+ * hide broken image crop tool ([#12792](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12737))
+ * don't show hidden samplers in dropdown for XYZ script ([#12780](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12737))
+ * fix style editing dialog breaking if it's opened in both img2img and txt2img tabs
+ * fix a bug allowing users to bypass gradio and API authentication (reported by vysecurity)
+ * fix notification not playing when built-in webui tab is inactive ([#12834](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12834))
+ * honor `--skip-install` for extension installers ([#12832](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12832))
+ * don't print blank stdout in extension installers ([#12833](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12832), [#12855](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12855))
+ * do not change quicksettings dropdown option when value returned is `None` ([#12854](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12854))
+ * get progressbar to display correctly in extensions tab
+
+
+## 1.5.2
+
+### Bug Fixes:
+ * fix memory leak when generation fails
+ * update doggettx cross attention optimization to not use an unreasonable amount of memory in some edge cases -- suggestion by MorkTheOrk
+
+
## 1.5.1
### Minor:
diff --git a/CITATION.cff b/CITATION.cff
new file mode 100644
index 00000000..2c781aff
--- /dev/null
+++ b/CITATION.cff
@@ -0,0 +1,7 @@
+cff-version: 1.2.0
+message: "If you use this software, please cite it as below."
+authors:
+ - given-names: AUTOMATIC1111
+title: "Stable Diffusion Web UI"
+date-released: 2022-08-22
+url: "https://github.com/AUTOMATIC1111/stable-diffusion-webui"
diff --git a/README.md b/README.md
index b796d150..9f9f33b1 100644
--- a/README.md
+++ b/README.md
@@ -78,7 +78,7 @@ A browser interface based on Gradio library for Stable Diffusion.
- Clip skip
- Hypernetworks
- Loras (same as Hypernetworks but more pretty)
-- A sparate UI where you can choose, with preview, which embeddings, hypernetworks or Loras to add to your prompt
+- A separate UI where you can choose, with preview, which embeddings, hypernetworks or Loras to add to your prompt
- Can select to load a different VAE from settings screen
- Estimated completion time in progress bar
- API
@@ -88,19 +88,23 @@ A browser interface based on Gradio library for Stable Diffusion.
- [Alt-Diffusion](https://arxiv.org/abs/2211.06679) support - see [wiki](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Features#alt-diffusion) for instructions
- Now without any bad letters!
- Load checkpoints in safetensors format
-- Eased resolution restriction: generated image's domension must be a multiple of 8 rather than 64
+- Eased resolution restriction: generated image's dimensions must be a multiple of 8 rather than 64
- Now with a license!
- Reorder elements in the UI from settings screen
+- [Segmind Stable Diffusion](https://huggingface.co/segmind/SSD-1B) support
## Installation and Running
-Make sure the required [dependencies](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Dependencies) are met and follow the instructions available for both [NVidia](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Install-and-Run-on-NVidia-GPUs) (recommended) and [AMD](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Install-and-Run-on-AMD-GPUs) GPUs.
+Make sure the required [dependencies](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Dependencies) are met and follow the instructions available for:
+- [NVidia](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Install-and-Run-on-NVidia-GPUs) (recommended)
+- [AMD](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Install-and-Run-on-AMD-GPUs) GPUs.
+- [Intel CPUs, Intel GPUs (both integrated and discrete)](https://github.com/openvinotoolkit/stable-diffusion-webui/wiki/Installation-on-Intel-Silicon) (external wiki page)
Alternatively, use online services (like Google Colab):
- [List of Online Services](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Online-Services)
### Installation on Windows 10/11 with NVidia-GPUs using release package
-1. Download `sd.webui.zip` from [v1.0.0-pre](https://github.com/AUTOMATIC1111/stable-diffusion-webui/releases/tag/v1.0.0-pre) and extract it's contents.
+1. Download `sd.webui.zip` from [v1.0.0-pre](https://github.com/AUTOMATIC1111/stable-diffusion-webui/releases/tag/v1.0.0-pre) and extract its contents.
2. Run `update.bat`.
3. Run `run.bat`.
> For more details see [Install-and-Run-on-NVidia-GPUs](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Install-and-Run-on-NVidia-GPUs)
@@ -115,15 +119,17 @@ Alternatively, use online services (like Google Colab):
1. Install the dependencies:
```bash
# Debian-based:
-sudo apt install wget git python3 python3-venv
+sudo apt install wget git python3 python3-venv libgl1 libglib2.0-0
# Red Hat-based:
-sudo dnf install wget git python3
+sudo dnf install wget git python3 gperftools-libs libglvnd-glx
+# openSUSE-based:
+sudo zypper install wget git python3 libtcmalloc4 libglvnd
# Arch-based:
sudo pacman -S wget git python3
```
2. Navigate to the directory you would like the webui to be installed and execute the following command:
```bash
-bash <(wget -qO- https://raw.githubusercontent.com/AUTOMATIC1111/stable-diffusion-webui/master/webui.sh)
+wget -q https://raw.githubusercontent.com/AUTOMATIC1111/stable-diffusion-webui/master/webui.sh
```
3. Run `webui.sh`.
4. Check `webui-user.sh` for options.
@@ -143,7 +149,7 @@ For the purposes of getting Google and other search engines to crawl the wiki, h
## Credits
Licenses for borrowed code can be found in `Settings -> Licenses` screen, and also in `html/licenses.html` file.
-- Stable Diffusion - https://github.com/CompVis/stable-diffusion, https://github.com/CompVis/taming-transformers
+- Stable Diffusion - https://github.com/Stability-AI/stablediffusion, https://github.com/CompVis/taming-transformers
- k-diffusion - https://github.com/crowsonkb/k-diffusion.git
- GFPGAN - https://github.com/TencentARC/GFPGAN.git
- CodeFormer - https://github.com/sczhou/CodeFormer
@@ -169,5 +175,7 @@ Licenses for borrowed code can be found in `Settings -> Licenses` screen, and al
- UniPC sampler - Wenliang Zhao - https://github.com/wl-zhao/UniPC
- TAESD - Ollin Boer Bohan - https://github.com/madebyollin/taesd
- LyCORIS - KohakuBlueleaf
+- Restart sampling - lambertae - https://github.com/Newbeeer/diffusion_restart_sampling
+- Hypertile - tfernd - https://github.com/tfernd/HyperTile
- Initial Gradio script - posted on 4chan by an Anonymous user. Thank you Anonymous user.
- (You)
diff --git a/configs/alt-diffusion-m18-inference.yaml b/configs/alt-diffusion-m18-inference.yaml
new file mode 100644
index 00000000..41a031d5
--- /dev/null
+++ b/configs/alt-diffusion-m18-inference.yaml
@@ -0,0 +1,73 @@
+model:
+ base_learning_rate: 1.0e-04
+ target: ldm.models.diffusion.ddpm.LatentDiffusion
+ params:
+ linear_start: 0.00085
+ linear_end: 0.0120
+ num_timesteps_cond: 1
+ log_every_t: 200
+ timesteps: 1000
+ first_stage_key: "jpg"
+ cond_stage_key: "txt"
+ image_size: 64
+ channels: 4
+ cond_stage_trainable: false # Note: different from the one we trained before
+ conditioning_key: crossattn
+ monitor: val/loss_simple_ema
+ scale_factor: 0.18215
+ use_ema: False
+
+ scheduler_config: # 10000 warmup steps
+ target: ldm.lr_scheduler.LambdaLinearScheduler
+ params:
+ warm_up_steps: [ 10000 ]
+ cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
+ f_start: [ 1.e-6 ]
+ f_max: [ 1. ]
+ f_min: [ 1. ]
+
+ unet_config:
+ target: ldm.modules.diffusionmodules.openaimodel.UNetModel
+ params:
+ image_size: 32 # unused
+ in_channels: 4
+ out_channels: 4
+ model_channels: 320
+ attention_resolutions: [ 4, 2, 1 ]
+ num_res_blocks: 2
+ channel_mult: [ 1, 2, 4, 4 ]
+ num_head_channels: 64
+ use_spatial_transformer: True
+ use_linear_in_transformer: True
+ transformer_depth: 1
+ context_dim: 1024
+ use_checkpoint: True
+ legacy: False
+
+ first_stage_config:
+ target: ldm.models.autoencoder.AutoencoderKL
+ params:
+ embed_dim: 4
+ monitor: val/rec_loss
+ ddconfig:
+ double_z: true
+ z_channels: 4
+ resolution: 256
+ in_channels: 3
+ out_ch: 3
+ ch: 128
+ ch_mult:
+ - 1
+ - 2
+ - 4
+ - 4
+ num_res_blocks: 2
+ attn_resolutions: []
+ dropout: 0.0
+ lossconfig:
+ target: torch.nn.Identity
+
+ cond_stage_config:
+ target: modules.xlmr_m18.BertSeriesModelWithTransformation
+ params:
+ name: "XLMR-Large"
diff --git a/configs/sd_xl_inpaint.yaml b/configs/sd_xl_inpaint.yaml
new file mode 100644
index 00000000..3bad3721
--- /dev/null
+++ b/configs/sd_xl_inpaint.yaml
@@ -0,0 +1,98 @@
+model:
+ target: sgm.models.diffusion.DiffusionEngine
+ params:
+ scale_factor: 0.13025
+ disable_first_stage_autocast: True
+
+ denoiser_config:
+ target: sgm.modules.diffusionmodules.denoiser.DiscreteDenoiser
+ params:
+ num_idx: 1000
+
+ weighting_config:
+ target: sgm.modules.diffusionmodules.denoiser_weighting.EpsWeighting
+ scaling_config:
+ target: sgm.modules.diffusionmodules.denoiser_scaling.EpsScaling
+ discretization_config:
+ target: sgm.modules.diffusionmodules.discretizer.LegacyDDPMDiscretization
+
+ network_config:
+ target: sgm.modules.diffusionmodules.openaimodel.UNetModel
+ params:
+ adm_in_channels: 2816
+ num_classes: sequential
+ use_checkpoint: True
+ in_channels: 9
+ out_channels: 4
+ model_channels: 320
+ attention_resolutions: [4, 2]
+ num_res_blocks: 2
+ channel_mult: [1, 2, 4]
+ num_head_channels: 64
+ use_spatial_transformer: True
+ use_linear_in_transformer: True
+ transformer_depth: [1, 2, 10] # note: the first is unused (due to attn_res starting at 2) 32, 16, 8 --> 64, 32, 16
+ context_dim: 2048
+ spatial_transformer_attn_type: softmax-xformers
+ legacy: False
+
+ conditioner_config:
+ target: sgm.modules.GeneralConditioner
+ params:
+ emb_models:
+ # crossattn cond
+ - is_trainable: False
+ input_key: txt
+ target: sgm.modules.encoders.modules.FrozenCLIPEmbedder
+ params:
+ layer: hidden
+ layer_idx: 11
+ # crossattn and vector cond
+ - is_trainable: False
+ input_key: txt
+ target: sgm.modules.encoders.modules.FrozenOpenCLIPEmbedder2
+ params:
+ arch: ViT-bigG-14
+ version: laion2b_s39b_b160k
+ freeze: True
+ layer: penultimate
+ always_return_pooled: True
+ legacy: False
+ # vector cond
+ - is_trainable: False
+ input_key: original_size_as_tuple
+ target: sgm.modules.encoders.modules.ConcatTimestepEmbedderND
+ params:
+ outdim: 256 # multiplied by two
+ # vector cond
+ - is_trainable: False
+ input_key: crop_coords_top_left
+ target: sgm.modules.encoders.modules.ConcatTimestepEmbedderND
+ params:
+ outdim: 256 # multiplied by two
+ # vector cond
+ - is_trainable: False
+ input_key: target_size_as_tuple
+ target: sgm.modules.encoders.modules.ConcatTimestepEmbedderND
+ params:
+ outdim: 256 # multiplied by two
+
+ first_stage_config:
+ target: sgm.models.autoencoder.AutoencoderKLInferenceWrapper
+ params:
+ embed_dim: 4
+ monitor: val/rec_loss
+ ddconfig:
+ attn_type: vanilla-xformers
+ double_z: true
+ z_channels: 4
+ resolution: 256
+ in_channels: 3
+ out_ch: 3
+ ch: 128
+ ch_mult: [1, 2, 4, 4]
+ num_res_blocks: 2
+ attn_resolutions: []
+ dropout: 0.0
+ lossconfig:
+ target: torch.nn.Identity
diff --git a/extensions-builtin/Lora/extra_networks_lora.py b/extensions-builtin/Lora/extra_networks_lora.py
index ba2945c6..005ff32c 100644
--- a/extensions-builtin/Lora/extra_networks_lora.py
+++ b/extensions-builtin/Lora/extra_networks_lora.py
@@ -6,9 +6,14 @@ class ExtraNetworkLora(extra_networks.ExtraNetwork):
def __init__(self):
super().__init__('lora')
+ self.errors = {}
+ """mapping of network names to the number of errors the network had during operation"""
+
def activate(self, p, params_list):
additional = shared.opts.sd_lora
+ self.errors.clear()
+
if additional != "None" and additional in networks.available_networks and not any(x for x in params_list if x.items[0] == additional):
p.all_prompts = [x + f"<lora:{additional}:{shared.opts.extra_networks_default_multiplier}>" for x in p.all_prompts]
params_list.append(extra_networks.ExtraNetworkParams(items=[additional, shared.opts.extra_networks_default_multiplier]))
@@ -56,4 +61,7 @@ class ExtraNetworkLora(extra_networks.ExtraNetwork):
p.extra_generation_params["Lora hashes"] = ", ".join(network_hashes)
def deactivate(self, p):
- pass
+ if self.errors:
+ p.comment("Networks with errors: " + ", ".join(f"{k} ({v})" for k, v in self.errors.items()))
+
+ self.errors.clear()
diff --git a/extensions-builtin/Lora/lora_logger.py b/extensions-builtin/Lora/lora_logger.py
new file mode 100644
index 00000000..d51de297
--- /dev/null
+++ b/extensions-builtin/Lora/lora_logger.py
@@ -0,0 +1,33 @@
+import sys
+import copy
+import logging
+
+
+class ColoredFormatter(logging.Formatter):
+ COLORS = {
+ "DEBUG": "\033[0;36m", # CYAN
+ "INFO": "\033[0;32m", # GREEN
+ "WARNING": "\033[0;33m", # YELLOW
+ "ERROR": "\033[0;31m", # RED
+ "CRITICAL": "\033[0;37;41m", # WHITE ON RED
+ "RESET": "\033[0m", # RESET COLOR
+ }
+
+ def format(self, record):
+ colored_record = copy.copy(record)
+ levelname = colored_record.levelname
+ seq = self.COLORS.get(levelname, self.COLORS["RESET"])
+ colored_record.levelname = f"{seq}{levelname}{self.COLORS['RESET']}"
+ return super().format(colored_record)
+
+
+logger = logging.getLogger("lora")
+logger.propagate = False
+
+
+if not logger.handlers:
+ handler = logging.StreamHandler(sys.stdout)
+ handler.setFormatter(
+ ColoredFormatter("[%(name)s]-%(levelname)s: %(message)s")
+ )
+ logger.addHandler(handler)
diff --git a/extensions-builtin/Lora/lora_patches.py b/extensions-builtin/Lora/lora_patches.py
new file mode 100644
index 00000000..b394d8e9
--- /dev/null
+++ b/extensions-builtin/Lora/lora_patches.py
@@ -0,0 +1,31 @@
+import torch
+
+import networks
+from modules import patches
+
+
+class LoraPatches:
+ def __init__(self):
+ self.Linear_forward = patches.patch(__name__, torch.nn.Linear, 'forward', networks.network_Linear_forward)
+ self.Linear_load_state_dict = patches.patch(__name__, torch.nn.Linear, '_load_from_state_dict', networks.network_Linear_load_state_dict)
+ self.Conv2d_forward = patches.patch(__name__, torch.nn.Conv2d, 'forward', networks.network_Conv2d_forward)
+ self.Conv2d_load_state_dict = patches.patch(__name__, torch.nn.Conv2d, '_load_from_state_dict', networks.network_Conv2d_load_state_dict)
+ self.GroupNorm_forward = patches.patch(__name__, torch.nn.GroupNorm, 'forward', networks.network_GroupNorm_forward)
+ self.GroupNorm_load_state_dict = patches.patch(__name__, torch.nn.GroupNorm, '_load_from_state_dict', networks.network_GroupNorm_load_state_dict)
+ self.LayerNorm_forward = patches.patch(__name__, torch.nn.LayerNorm, 'forward', networks.network_LayerNorm_forward)
+ self.LayerNorm_load_state_dict = patches.patch(__name__, torch.nn.LayerNorm, '_load_from_state_dict', networks.network_LayerNorm_load_state_dict)
+ self.MultiheadAttention_forward = patches.patch(__name__, torch.nn.MultiheadAttention, 'forward', networks.network_MultiheadAttention_forward)
+ self.MultiheadAttention_load_state_dict = patches.patch(__name__, torch.nn.MultiheadAttention, '_load_from_state_dict', networks.network_MultiheadAttention_load_state_dict)
+
+ def undo(self):
+ self.Linear_forward = patches.undo(__name__, torch.nn.Linear, 'forward')
+ self.Linear_load_state_dict = patches.undo(__name__, torch.nn.Linear, '_load_from_state_dict')
+ self.Conv2d_forward = patches.undo(__name__, torch.nn.Conv2d, 'forward')
+ self.Conv2d_load_state_dict = patches.undo(__name__, torch.nn.Conv2d, '_load_from_state_dict')
+ self.GroupNorm_forward = patches.undo(__name__, torch.nn.GroupNorm, 'forward')
+ self.GroupNorm_load_state_dict = patches.undo(__name__, torch.nn.GroupNorm, '_load_from_state_dict')
+ self.LayerNorm_forward = patches.undo(__name__, torch.nn.LayerNorm, 'forward')
+ self.LayerNorm_load_state_dict = patches.undo(__name__, torch.nn.LayerNorm, '_load_from_state_dict')
+ self.MultiheadAttention_forward = patches.undo(__name__, torch.nn.MultiheadAttention, 'forward')
+ self.MultiheadAttention_load_state_dict = patches.undo(__name__, torch.nn.MultiheadAttention, '_load_from_state_dict')
+
diff --git a/extensions-builtin/Lora/lyco_helpers.py b/extensions-builtin/Lora/lyco_helpers.py
index 279b34bc..1679a0ce 100644
--- a/extensions-builtin/Lora/lyco_helpers.py
+++ b/extensions-builtin/Lora/lyco_helpers.py
@@ -19,3 +19,50 @@ def rebuild_cp_decomposition(up, down, mid):
up = up.reshape(up.size(0), -1)
down = down.reshape(down.size(0), -1)
return torch.einsum('n m k l, i n, m j -> i j k l', mid, up, down)
+
+
+# copied from https://github.com/KohakuBlueleaf/LyCORIS/blob/dev/lycoris/modules/lokr.py
+def factorization(dimension: int, factor:int=-1) -> tuple[int, int]:
+ '''
+ return a tuple of two value of input dimension decomposed by the number closest to factor
+ second value is higher or equal than first value.
+
+ In LoRA with Kroneckor Product, first value is a value for weight scale.
+ secon value is a value for weight.
+
+ Becuase of non-commutative property, A⊗B ≠ B⊗A. Meaning of two matrices is slightly different.
+
+ examples)
+ factor
+ -1 2 4 8 16 ...
+ 127 -> 1, 127 127 -> 1, 127 127 -> 1, 127 127 -> 1, 127 127 -> 1, 127
+ 128 -> 8, 16 128 -> 2, 64 128 -> 4, 32 128 -> 8, 16 128 -> 8, 16
+ 250 -> 10, 25 250 -> 2, 125 250 -> 2, 125 250 -> 5, 50 250 -> 10, 25
+ 360 -> 8, 45 360 -> 2, 180 360 -> 4, 90 360 -> 8, 45 360 -> 12, 30
+ 512 -> 16, 32 512 -> 2, 256 512 -> 4, 128 512 -> 8, 64 512 -> 16, 32
+ 1024 -> 32, 32 1024 -> 2, 512 1024 -> 4, 256 1024 -> 8, 128 1024 -> 16, 64
+ '''
+
+ if factor > 0 and (dimension % factor) == 0:
+ m = factor
+ n = dimension // factor
+ if m > n:
+ n, m = m, n
+ return m, n
+ if factor < 0:
+ factor = dimension
+ m, n = 1, dimension
+ length = m + n
+ while m<n:
+ new_m = m + 1
+ while dimension%new_m != 0:
+ new_m += 1
+ new_n = dimension // new_m
+ if new_m + new_n > length or new_m>factor:
+ break
+ else:
+ m, n = new_m, new_n
+ if m > n:
+ n, m = m, n
+ return m, n
+
diff --git a/extensions-builtin/Lora/network.py b/extensions-builtin/Lora/network.py
index 0a18d69e..a62e5eff 100644
--- a/extensions-builtin/Lora/network.py
+++ b/extensions-builtin/Lora/network.py
@@ -93,6 +93,7 @@ class Network: # LoraModule
self.unet_multiplier = 1.0
self.dyn_dim = None
self.modules = {}
+ self.bundle_embeddings = {}
self.mtime = None
self.mentioned_name = None
@@ -133,10 +134,10 @@ class NetworkModule:
return 1.0
- def finalize_updown(self, updown, orig_weight, output_shape):
+ def finalize_updown(self, updown, orig_weight, output_shape, ex_bias=None):
if self.bias is not None:
updown = updown.reshape(self.bias.shape)
- updown += self.bias.to(orig_weight.device, dtype=orig_weight.dtype)
+ updown += self.bias.to(orig_weight.device, dtype=updown.dtype)
updown = updown.reshape(output_shape)
if len(output_shape) == 4:
@@ -145,7 +146,10 @@ class NetworkModule:
if orig_weight.size().numel() == updown.size().numel():
updown = updown.reshape(orig_weight.shape)
- return updown * self.calc_scale() * self.multiplier()
+ if ex_bias is not None:
+ ex_bias = ex_bias * self.multiplier()
+
+ return updown * self.calc_scale() * self.multiplier(), ex_bias
def calc_updown(self, target):
raise NotImplementedError()
diff --git a/extensions-builtin/Lora/network_full.py b/extensions-builtin/Lora/network_full.py
index 109b4c2c..f221c95f 100644
--- a/extensions-builtin/Lora/network_full.py
+++ b/extensions-builtin/Lora/network_full.py
@@ -14,9 +14,14 @@ class NetworkModuleFull(network.NetworkModule):
super().__init__(net, weights)
self.weight = weights.w.get("diff")
+ self.ex_bias = weights.w.get("diff_b")
def calc_updown(self, orig_weight):
output_shape = self.weight.shape
- updown = self.weight.to(orig_weight.device, dtype=orig_weight.dtype)
+ updown = self.weight.to(orig_weight.device)
+ if self.ex_bias is not None:
+ ex_bias = self.ex_bias.to(orig_weight.device)
+ else:
+ ex_bias = None
- return self.finalize_updown(updown, orig_weight, output_shape)
+ return self.finalize_updown(updown, orig_weight, output_shape, ex_bias)
diff --git a/extensions-builtin/Lora/network_glora.py b/extensions-builtin/Lora/network_glora.py
new file mode 100644
index 00000000..efe5c681
--- /dev/null
+++ b/extensions-builtin/Lora/network_glora.py
@@ -0,0 +1,33 @@
+
+import network
+
+class ModuleTypeGLora(network.ModuleType):
+ def create_module(self, net: network.Network, weights: network.NetworkWeights):
+ if all(x in weights.w for x in ["a1.weight", "a2.weight", "alpha", "b1.weight", "b2.weight"]):
+ return NetworkModuleGLora(net, weights)
+
+ return None
+
+# adapted from https://github.com/KohakuBlueleaf/LyCORIS
+class NetworkModuleGLora(network.NetworkModule):
+ def __init__(self, net: network.Network, weights: network.NetworkWeights):
+ super().__init__(net, weights)
+
+ if hasattr(self.sd_module, 'weight'):
+ self.shape = self.sd_module.weight.shape
+
+ self.w1a = weights.w["a1.weight"]
+ self.w1b = weights.w["b1.weight"]
+ self.w2a = weights.w["a2.weight"]
+ self.w2b = weights.w["b2.weight"]
+
+ def calc_updown(self, orig_weight):
+ w1a = self.w1a.to(orig_weight.device)
+ w1b = self.w1b.to(orig_weight.device)
+ w2a = self.w2a.to(orig_weight.device)
+ w2b = self.w2b.to(orig_weight.device)
+
+ output_shape = [w1a.size(0), w1b.size(1)]
+ updown = ((w2b @ w1b) + ((orig_weight.to(dtype = w1a.dtype) @ w2a) @ w1a))
+
+ return self.finalize_updown(updown, orig_weight, output_shape)
diff --git a/extensions-builtin/Lora/network_hada.py b/extensions-builtin/Lora/network_hada.py
index 5fcb0695..d95a0fd1 100644
--- a/extensions-builtin/Lora/network_hada.py
+++ b/extensions-builtin/Lora/network_hada.py
@@ -27,16 +27,16 @@ class NetworkModuleHada(network.NetworkModule):
self.t2 = weights.w.get("hada_t2")
def calc_updown(self, orig_weight):
- w1a = self.w1a.to(orig_weight.device, dtype=orig_weight.dtype)
- w1b = self.w1b.to(orig_weight.device, dtype=orig_weight.dtype)
- w2a = self.w2a.to(orig_weight.device, dtype=orig_weight.dtype)
- w2b = self.w2b.to(orig_weight.device, dtype=orig_weight.dtype)
+ w1a = self.w1a.to(orig_weight.device)
+ w1b = self.w1b.to(orig_weight.device)
+ w2a = self.w2a.to(orig_weight.device)
+ w2b = self.w2b.to(orig_weight.device)
output_shape = [w1a.size(0), w1b.size(1)]
if self.t1 is not None:
output_shape = [w1a.size(1), w1b.size(1)]
- t1 = self.t1.to(orig_weight.device, dtype=orig_weight.dtype)
+ t1 = self.t1.to(orig_weight.device)
updown1 = lyco_helpers.make_weight_cp(t1, w1a, w1b)
output_shape += t1.shape[2:]
else:
@@ -45,7 +45,7 @@ class NetworkModuleHada(network.NetworkModule):
updown1 = lyco_helpers.rebuild_conventional(w1a, w1b, output_shape)
if self.t2 is not None:
- t2 = self.t2.to(orig_weight.device, dtype=orig_weight.dtype)
+ t2 = self.t2.to(orig_weight.device)
updown2 = lyco_helpers.make_weight_cp(t2, w2a, w2b)
else:
updown2 = lyco_helpers.rebuild_conventional(w2a, w2b, output_shape)
diff --git a/extensions-builtin/Lora/network_ia3.py b/extensions-builtin/Lora/network_ia3.py
index 7edc4249..96faeaf3 100644
--- a/extensions-builtin/Lora/network_ia3.py
+++ b/extensions-builtin/Lora/network_ia3.py
@@ -17,7 +17,7 @@ class NetworkModuleIa3(network.NetworkModule):
self.on_input = weights.w["on_input"].item()
def calc_updown(self, orig_weight):
- w = self.w.to(orig_weight.device, dtype=orig_weight.dtype)
+ w = self.w.to(orig_weight.device)
output_shape = [w.size(0), orig_weight.size(1)]
if self.on_input:
diff --git a/extensions-builtin/Lora/network_lokr.py b/extensions-builtin/Lora/network_lokr.py
index 340acdab..fcdaeafd 100644
--- a/extensions-builtin/Lora/network_lokr.py
+++ b/extensions-builtin/Lora/network_lokr.py
@@ -37,22 +37,22 @@ class NetworkModuleLokr(network.NetworkModule):
def calc_updown(self, orig_weight):
if self.w1 is not None:
- w1 = self.w1.to(orig_weight.device, dtype=orig_weight.dtype)
+ w1 = self.w1.to(orig_weight.device)
else:
- w1a = self.w1a.to(orig_weight.device, dtype=orig_weight.dtype)
- w1b = self.w1b.to(orig_weight.device, dtype=orig_weight.dtype)
+ w1a = self.w1a.to(orig_weight.device)
+ w1b = self.w1b.to(orig_weight.device)
w1 = w1a @ w1b
if self.w2 is not None:
- w2 = self.w2.to(orig_weight.device, dtype=orig_weight.dtype)
+ w2 = self.w2.to(orig_weight.device)
elif self.t2 is None:
- w2a = self.w2a.to(orig_weight.device, dtype=orig_weight.dtype)
- w2b = self.w2b.to(orig_weight.device, dtype=orig_weight.dtype)
+ w2a = self.w2a.to(orig_weight.device)
+ w2b = self.w2b.to(orig_weight.device)
w2 = w2a @ w2b
else:
- t2 = self.t2.to(orig_weight.device, dtype=orig_weight.dtype)
- w2a = self.w2a.to(orig_weight.device, dtype=orig_weight.dtype)
- w2b = self.w2b.to(orig_weight.device, dtype=orig_weight.dtype)
+ t2 = self.t2.to(orig_weight.device)
+ w2a = self.w2a.to(orig_weight.device)
+ w2b = self.w2b.to(orig_weight.device)
w2 = lyco_helpers.make_weight_cp(t2, w2a, w2b)
output_shape = [w1.size(0) * w2.size(0), w1.size(1) * w2.size(1)]
diff --git a/extensions-builtin/Lora/network_lora.py b/extensions-builtin/Lora/network_lora.py
index 26c0a72c..4cc40295 100644
--- a/extensions-builtin/Lora/network_lora.py
+++ b/extensions-builtin/Lora/network_lora.py
@@ -61,13 +61,13 @@ class NetworkModuleLora(network.NetworkModule):
return module
def calc_updown(self, orig_weight):
- up = self.up_model.weight.to(orig_weight.device, dtype=orig_weight.dtype)
- down = self.down_model.weight.to(orig_weight.device, dtype=orig_weight.dtype)
+ up = self.up_model.weight.to(orig_weight.device)
+ down = self.down_model.weight.to(orig_weight.device)
output_shape = [up.size(0), down.size(1)]
if self.mid_model is not None:
# cp-decomposition
- mid = self.mid_model.weight.to(orig_weight.device, dtype=orig_weight.dtype)
+ mid = self.mid_model.weight.to(orig_weight.device)
updown = lyco_helpers.rebuild_cp_decomposition(up, down, mid)
output_shape += mid.shape[2:]
else:
diff --git a/extensions-builtin/Lora/network_norm.py b/extensions-builtin/Lora/network_norm.py
new file mode 100644
index 00000000..d25afcbb
--- /dev/null
+++ b/extensions-builtin/Lora/network_norm.py
@@ -0,0 +1,28 @@
+import network
+
+
+class ModuleTypeNorm(network.ModuleType):
+ def create_module(self, net: network.Network, weights: network.NetworkWeights):
+ if all(x in weights.w for x in ["w_norm", "b_norm"]):
+ return NetworkModuleNorm(net, weights)
+
+ return None
+
+
+class NetworkModuleNorm(network.NetworkModule):
+ def __init__(self, net: network.Network, weights: network.NetworkWeights):
+ super().__init__(net, weights)
+
+ self.w_norm = weights.w.get("w_norm")
+ self.b_norm = weights.w.get("b_norm")
+
+ def calc_updown(self, orig_weight):
+ output_shape = self.w_norm.shape
+ updown = self.w_norm.to(orig_weight.device)
+
+ if self.b_norm is not None:
+ ex_bias = self.b_norm.to(orig_weight.device)
+ else:
+ ex_bias = None
+
+ return self.finalize_updown(updown, orig_weight, output_shape, ex_bias)
diff --git a/extensions-builtin/Lora/network_oft.py b/extensions-builtin/Lora/network_oft.py
new file mode 100644
index 00000000..fa647020
--- /dev/null
+++ b/extensions-builtin/Lora/network_oft.py
@@ -0,0 +1,82 @@
+import torch
+import network
+from lyco_helpers import factorization
+from einops import rearrange
+
+
+class ModuleTypeOFT(network.ModuleType):
+ def create_module(self, net: network.Network, weights: network.NetworkWeights):
+ if all(x in weights.w for x in ["oft_blocks"]) or all(x in weights.w for x in ["oft_diag"]):
+ return NetworkModuleOFT(net, weights)
+
+ return None
+
+# Supports both kohya-ss' implementation of COFT https://github.com/kohya-ss/sd-scripts/blob/main/networks/oft.py
+# and KohakuBlueleaf's implementation of OFT/COFT https://github.com/KohakuBlueleaf/LyCORIS/blob/dev/lycoris/modules/diag_oft.py
+class NetworkModuleOFT(network.NetworkModule):
+ def __init__(self, net: network.Network, weights: network.NetworkWeights):
+
+ super().__init__(net, weights)
+
+ self.lin_module = None
+ self.org_module: list[torch.Module] = [self.sd_module]
+
+ self.scale = 1.0
+
+ # kohya-ss
+ if "oft_blocks" in weights.w.keys():
+ self.is_kohya = True
+ self.oft_blocks = weights.w["oft_blocks"] # (num_blocks, block_size, block_size)
+ self.alpha = weights.w["alpha"] # alpha is constraint
+ self.dim = self.oft_blocks.shape[0] # lora dim
+ # LyCORIS
+ elif "oft_diag" in weights.w.keys():
+ self.is_kohya = False
+ self.oft_blocks = weights.w["oft_diag"]
+ # self.alpha is unused
+ self.dim = self.oft_blocks.shape[1] # (num_blocks, block_size, block_size)
+
+ is_linear = type(self.sd_module) in [torch.nn.Linear, torch.nn.modules.linear.NonDynamicallyQuantizableLinear]
+ is_conv = type(self.sd_module) in [torch.nn.Conv2d]
+ is_other_linear = type(self.sd_module) in [torch.nn.MultiheadAttention] # unsupported
+
+ if is_linear:
+ self.out_dim = self.sd_module.out_features
+ elif is_conv:
+ self.out_dim = self.sd_module.out_channels
+ elif is_other_linear:
+ self.out_dim = self.sd_module.embed_dim
+
+ if self.is_kohya:
+ self.constraint = self.alpha * self.out_dim
+ self.num_blocks = self.dim
+ self.block_size = self.out_dim // self.dim
+ else:
+ self.constraint = None
+ self.block_size, self.num_blocks = factorization(self.out_dim, self.dim)
+
+ def calc_updown(self, orig_weight):
+ oft_blocks = self.oft_blocks.to(orig_weight.device, dtype=orig_weight.dtype)
+ eye = torch.eye(self.block_size, device=self.oft_blocks.device)
+
+ if self.is_kohya:
+ block_Q = oft_blocks - oft_blocks.transpose(1, 2) # ensure skew-symmetric orthogonal matrix
+ norm_Q = torch.norm(block_Q.flatten())
+ new_norm_Q = torch.clamp(norm_Q, max=self.constraint)
+ block_Q = block_Q * ((new_norm_Q + 1e-8) / (norm_Q + 1e-8))
+ oft_blocks = torch.matmul(eye + block_Q, (eye - block_Q).float().inverse())
+
+ R = oft_blocks.to(orig_weight.device, dtype=orig_weight.dtype)
+
+ # This errors out for MultiheadAttention, might need to be handled up-stream
+ merged_weight = rearrange(orig_weight, '(k n) ... -> k n ...', k=self.num_blocks, n=self.block_size)
+ merged_weight = torch.einsum(
+ 'k n m, k n ... -> k m ...',
+ R,
+ merged_weight
+ )
+ merged_weight = rearrange(merged_weight, 'k m ... -> (k m) ...')
+
+ updown = merged_weight.to(orig_weight.device, dtype=orig_weight.dtype) - orig_weight
+ output_shape = orig_weight.shape
+ return self.finalize_updown(updown, orig_weight, output_shape)
diff --git a/extensions-builtin/Lora/networks.py b/extensions-builtin/Lora/networks.py
index 17cbe1bb..72ebd624 100644
--- a/extensions-builtin/Lora/networks.py
+++ b/extensions-builtin/Lora/networks.py
@@ -1,17 +1,26 @@
+import gradio as gr
+import logging
import os
import re
+import lora_patches
import network
import network_lora
+import network_glora
import network_hada
import network_ia3
import network_lokr
import network_full
+import network_norm
+import network_oft
import torch
from typing import Union
from modules import shared, devices, sd_models, errors, scripts, sd_hijack
+import modules.textual_inversion.textual_inversion as textual_inversion
+
+from lora_logger import logger
module_types = [
network_lora.ModuleTypeLora(),
@@ -19,6 +28,9 @@ module_types = [
network_ia3.ModuleTypeIa3(),
network_lokr.ModuleTypeLokr(),
network_full.ModuleTypeFull(),
+ network_norm.ModuleTypeNorm(),
+ network_glora.ModuleTypeGLora(),
+ network_oft.ModuleTypeOFT(),
]
@@ -31,6 +43,8 @@ suffix_conversion = {
"resnets": {
"conv1": "in_layers_2",
"conv2": "out_layers_3",
+ "norm1": "in_layers_0",
+ "norm2": "out_layers_0",
"time_emb_proj": "emb_layers_1",
"conv_shortcut": "skip_connection",
}
@@ -143,9 +157,20 @@ def load_network(name, network_on_disk):
is_sd2 = 'model_transformer_resblocks' in shared.sd_model.network_layer_mapping
matched_networks = {}
+ bundle_embeddings = {}
for key_network, weight in sd.items():
- key_network_without_network_parts, network_part = key_network.split(".", 1)
+ key_network_without_network_parts, _, network_part = key_network.partition(".")
+
+ if key_network_without_network_parts == "bundle_emb":
+ emb_name, vec_name = network_part.split(".", 1)
+ emb_dict = bundle_embeddings.get(emb_name, {})
+ if vec_name.split('.')[0] == 'string_to_param':
+ _, k2 = vec_name.split('.', 1)
+ emb_dict['string_to_param'] = {k2: weight}
+ else:
+ emb_dict[vec_name] = weight
+ bundle_embeddings[emb_name] = emb_dict
key = convert_diffusers_name_to_compvis(key_network_without_network_parts, is_sd2)
sd_module = shared.sd_model.network_layer_mapping.get(key, None)
@@ -168,6 +193,17 @@ def load_network(name, network_on_disk):
key = key_network_without_network_parts.replace("lora_te1_text_model", "transformer_text_model")
sd_module = shared.sd_model.network_layer_mapping.get(key, None)
+ # kohya_ss OFT module
+ elif sd_module is None and "oft_unet" in key_network_without_network_parts:
+ key = key_network_without_network_parts.replace("oft_unet", "diffusion_model")
+ sd_module = shared.sd_model.network_layer_mapping.get(key, None)
+
+ # KohakuBlueLeaf OFT module
+ if sd_module is None and "oft_diag" in key:
+ key = key_network_without_network_parts.replace("lora_unet", "diffusion_model")
+ key = key_network_without_network_parts.replace("lora_te1_text_model", "0_transformer_text_model")
+ sd_module = shared.sd_model.network_layer_mapping.get(key, None)
+
if sd_module is None:
keys_failed_to_match[key_network] = key
continue
@@ -189,18 +225,38 @@ def load_network(name, network_on_disk):
net.modules[key] = net_module
+ embeddings = {}
+ for emb_name, data in bundle_embeddings.items():
+ embedding = textual_inversion.create_embedding_from_data(data, emb_name, filename=network_on_disk.filename + "/" + emb_name)
+ embedding.loaded = None
+ embeddings[emb_name] = embedding
+
+ net.bundle_embeddings = embeddings
+
if keys_failed_to_match:
- print(f"Failed to match keys when loading network {network_on_disk.filename}: {keys_failed_to_match}")
+ logging.debug(f"Network {network_on_disk.filename} didn't match keys: {keys_failed_to_match}")
return net
+def purge_networks_from_memory():
+ while len(networks_in_memory) > shared.opts.lora_in_memory_limit and len(networks_in_memory) > 0:
+ name = next(iter(networks_in_memory))
+ networks_in_memory.pop(name, None)
+
+ devices.torch_gc()
+
+
def load_networks(names, te_multipliers=None, unet_multipliers=None, dyn_dims=None):
+ emb_db = sd_hijack.model_hijack.embedding_db
already_loaded = {}
for net in loaded_networks:
if net.name in names:
already_loaded[net.name] = net
+ for emb_name, embedding in net.bundle_embeddings.items():
+ if embedding.loaded:
+ emb_db.register_embedding_by_name(None, shared.sd_model, emb_name)
loaded_networks.clear()
@@ -212,15 +268,19 @@ def load_networks(names, te_multipliers=None, unet_multipliers=None, dyn_dims=No
failed_to_load_networks = []
- for i, name in enumerate(names):
+ for i, (network_on_disk, name) in enumerate(zip(networks_on_disk, names)):
net = already_loaded.get(name, None)
- network_on_disk = networks_on_disk[i]
-
if network_on_disk is not None:
+ if net is None:
+ net = networks_in_memory.get(name)
+
if net is None or os.path.getmtime(network_on_disk.filename) > net.mtime:
try:
net = load_network(name, network_on_disk)
+
+ networks_in_memory.pop(name, None)
+ networks_in_memory[name] = net
except Exception as e:
errors.display(e, f"loading network {network_on_disk.filename}")
continue
@@ -231,7 +291,7 @@ def load_networks(names, te_multipliers=None, unet_multipliers=None, dyn_dims=No
if net is None:
failed_to_load_networks.append(name)
- print(f"Couldn't find network with name {name}")
+ logging.info(f"Couldn't find network with name {name}")
continue
net.te_multiplier = te_multipliers[i] if te_multipliers else 1.0
@@ -239,24 +299,59 @@ def load_networks(names, te_multipliers=None, unet_multipliers=None, dyn_dims=No
net.dyn_dim = dyn_dims[i] if dyn_dims else 1.0
loaded_networks.append(net)
+ for emb_name, embedding in net.bundle_embeddings.items():
+ if embedding.loaded is None and emb_name in emb_db.word_embeddings:
+ logger.warning(
+ f'Skip bundle embedding: "{emb_name}"'
+ ' as it was already loaded from embeddings folder'
+ )
+ continue
+
+ embedding.loaded = False
+ if emb_db.expected_shape == -1 or emb_db.expected_shape == embedding.shape:
+ embedding.loaded = True
+ emb_db.register_embedding(embedding, shared.sd_model)
+ else:
+ emb_db.skipped_embeddings[name] = embedding
+
if failed_to_load_networks:
- sd_hijack.model_hijack.comments.append("Failed to find networks: " + ", ".join(failed_to_load_networks))
+ lora_not_found_message = f'Lora not found: {", ".join(failed_to_load_networks)}'
+ sd_hijack.model_hijack.comments.append(lora_not_found_message)
+ if shared.opts.lora_not_found_warning_console:
+ print(f'\n{lora_not_found_message}\n')
+ if shared.opts.lora_not_found_gradio_warning:
+ gr.Warning(lora_not_found_message)
+ purge_networks_from_memory()
-def network_restore_weights_from_backup(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn.MultiheadAttention]):
+
+def network_restore_weights_from_backup(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn.GroupNorm, torch.nn.LayerNorm, torch.nn.MultiheadAttention]):
weights_backup = getattr(self, "network_weights_backup", None)
+ bias_backup = getattr(self, "network_bias_backup", None)
- if weights_backup is None:
+ if weights_backup is None and bias_backup is None:
return
- if isinstance(self, torch.nn.MultiheadAttention):
- self.in_proj_weight.copy_(weights_backup[0])
- self.out_proj.weight.copy_(weights_backup[1])
+ if weights_backup is not None:
+ if isinstance(self, torch.nn.MultiheadAttention):
+ self.in_proj_weight.copy_(weights_backup[0])
+ self.out_proj.weight.copy_(weights_backup[1])
+ else:
+ self.weight.copy_(weights_backup)
+
+ if bias_backup is not None:
+ if isinstance(self, torch.nn.MultiheadAttention):
+ self.out_proj.bias.copy_(bias_backup)
+ else:
+ self.bias.copy_(bias_backup)
else:
- self.weight.copy_(weights_backup)
+ if isinstance(self, torch.nn.MultiheadAttention):
+ self.out_proj.bias = None
+ else:
+ self.bias = None
-def network_apply_weights(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn.MultiheadAttention]):
+def network_apply_weights(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn.GroupNorm, torch.nn.LayerNorm, torch.nn.MultiheadAttention]):
"""
Applies the currently selected set of networks to the weights of torch layer self.
If weights already have this particular set of networks applied, does nothing.
@@ -271,7 +366,10 @@ def network_apply_weights(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn
wanted_names = tuple((x.name, x.te_multiplier, x.unet_multiplier, x.dyn_dim) for x in loaded_networks)
weights_backup = getattr(self, "network_weights_backup", None)
- if weights_backup is None:
+ if weights_backup is None and wanted_names != ():
+ if current_names != ():
+ raise RuntimeError("no backup weights found and current weights are not unchanged")
+
if isinstance(self, torch.nn.MultiheadAttention):
weights_backup = (self.in_proj_weight.to(devices.cpu, copy=True), self.out_proj.weight.to(devices.cpu, copy=True))
else:
@@ -279,21 +377,49 @@ def network_apply_weights(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn
self.network_weights_backup = weights_backup
+ bias_backup = getattr(self, "network_bias_backup", None)
+ if bias_backup is None:
+ if isinstance(self, torch.nn.MultiheadAttention) and self.out_proj.bias is not None:
+ bias_backup = self.out_proj.bias.to(devices.cpu, copy=True)
+ elif getattr(self, 'bias', None) is not None:
+ bias_backup = self.bias.to(devices.cpu, copy=True)
+ else:
+ bias_backup = None
+ self.network_bias_backup = bias_backup
+
if current_names != wanted_names:
network_restore_weights_from_backup(self)
for net in loaded_networks:
module = net.modules.get(network_layer_name, None)
if module is not None and hasattr(self, 'weight'):
- with torch.no_grad():
- updown = module.calc_updown(self.weight)
-
- if len(self.weight.shape) == 4 and self.weight.shape[1] == 9:
- # inpainting model. zero pad updown to make channel[1] 4 to 9
- updown = torch.nn.functional.pad(updown, (0, 0, 0, 0, 0, 5))
+ try:
+ with torch.no_grad():
+ if getattr(self, 'fp16_weight', None) is None:
+ weight = self.weight
+ bias = self.bias
+ else:
+ weight = self.fp16_weight.clone().to(self.weight.device)
+ bias = getattr(self, 'fp16_bias', None)
+ if bias is not None:
+ bias = bias.clone().to(self.bias.device)
+ updown, ex_bias = module.calc_updown(weight)
+
+ if len(weight.shape) == 4 and weight.shape[1] == 9:
+ # inpainting model. zero pad updown to make channel[1] 4 to 9
+ updown = torch.nn.functional.pad(updown, (0, 0, 0, 0, 0, 5))
+
+ self.weight.copy_((weight.to(dtype=updown.dtype) + updown).to(dtype=self.weight.dtype))
+ if ex_bias is not None and hasattr(self, 'bias'):
+ if self.bias is None:
+ self.bias = torch.nn.Parameter(ex_bias).to(self.weight.dtype)
+ else:
+ self.bias.copy_((bias + ex_bias).to(dtype=self.bias.dtype))
+ except RuntimeError as e:
+ logging.debug(f"Network {net.name} layer {network_layer_name}: {e}")
+ extra_network_lora.errors[net.name] = extra_network_lora.errors.get(net.name, 0) + 1
- self.weight += updown
- continue
+ continue
module_q = net.modules.get(network_layer_name + "_q_proj", None)
module_k = net.modules.get(network_layer_name + "_k_proj", None)
@@ -301,21 +427,33 @@ def network_apply_weights(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn
module_out = net.modules.get(network_layer_name + "_out_proj", None)
if isinstance(self, torch.nn.MultiheadAttention) and module_q and module_k and module_v and module_out:
- with torch.no_grad():
- updown_q = module_q.calc_updown(self.in_proj_weight)
- updown_k = module_k.calc_updown(self.in_proj_weight)
- updown_v = module_v.calc_updown(self.in_proj_weight)
- updown_qkv = torch.vstack([updown_q, updown_k, updown_v])
- updown_out = module_out.calc_updown(self.out_proj.weight)
-
- self.in_proj_weight += updown_qkv
- self.out_proj.weight += updown_out
- continue
+ try:
+ with torch.no_grad():
+ updown_q, _ = module_q.calc_updown(self.in_proj_weight)
+ updown_k, _ = module_k.calc_updown(self.in_proj_weight)
+ updown_v, _ = module_v.calc_updown(self.in_proj_weight)
+ updown_qkv = torch.vstack([updown_q, updown_k, updown_v])
+ updown_out, ex_bias = module_out.calc_updown(self.out_proj.weight)
+
+ self.in_proj_weight += updown_qkv
+ self.out_proj.weight += updown_out
+ if ex_bias is not None:
+ if self.out_proj.bias is None:
+ self.out_proj.bias = torch.nn.Parameter(ex_bias)
+ else:
+ self.out_proj.bias += ex_bias
+
+ except RuntimeError as e:
+ logging.debug(f"Network {net.name} layer {network_layer_name}: {e}")
+ extra_network_lora.errors[net.name] = extra_network_lora.errors.get(net.name, 0) + 1
+
+ continue
if module is None:
continue
- print(f'failed to calculate network weights for layer {network_layer_name}')
+ logging.debug(f"Network {net.name} layer {network_layer_name}: couldn't find supported operation")
+ extra_network_lora.errors[net.name] = extra_network_lora.errors.get(net.name, 0) + 1
self.network_current_names = wanted_names
@@ -342,7 +480,7 @@ def network_forward(module, input, original_forward):
if module is None:
continue
- y = module.forward(y, input)
+ y = module.forward(input, y)
return y
@@ -350,48 +488,79 @@ def network_forward(module, input, original_forward):
def network_reset_cached_weight(self: Union[torch.nn.Conv2d, torch.nn.Linear]):
self.network_current_names = ()
self.network_weights_backup = None
+ self.network_bias_backup = None
def network_Linear_forward(self, input):
if shared.opts.lora_functional:
- return network_forward(self, input, torch.nn.Linear_forward_before_network)
+ return network_forward(self, input, originals.Linear_forward)
network_apply_weights(self)
- return torch.nn.Linear_forward_before_network(self, input)
+ return originals.Linear_forward(self, input)
def network_Linear_load_state_dict(self, *args, **kwargs):
network_reset_cached_weight(self)
- return torch.nn.Linear_load_state_dict_before_network(self, *args, **kwargs)
+ return originals.Linear_load_state_dict(self, *args, **kwargs)
def network_Conv2d_forward(self, input):
if shared.opts.lora_functional:
- return network_forward(self, input, torch.nn.Conv2d_forward_before_network)
+ return network_forward(self, input, originals.Conv2d_forward)
network_apply_weights(self)
- return torch.nn.Conv2d_forward_before_network(self, input)
+ return originals.Conv2d_forward(self, input)
def network_Conv2d_load_state_dict(self, *args, **kwargs):
network_reset_cached_weight(self)
- return torch.nn.Conv2d_load_state_dict_before_network(self, *args, **kwargs)
+ return originals.Conv2d_load_state_dict(self, *args, **kwargs)
+
+
+def network_GroupNorm_forward(self, input):
+ if shared.opts.lora_functional:
+ return network_forward(self, input, originals.GroupNorm_forward)
+
+ network_apply_weights(self)
+
+ return originals.GroupNorm_forward(self, input)
+
+
+def network_GroupNorm_load_state_dict(self, *args, **kwargs):
+ network_reset_cached_weight(self)
+
+ return originals.GroupNorm_load_state_dict(self, *args, **kwargs)
+
+
+def network_LayerNorm_forward(self, input):
+ if shared.opts.lora_functional:
+ return network_forward(self, input, originals.LayerNorm_forward)
+
+ network_apply_weights(self)
+
+ return originals.LayerNorm_forward(self, input)
+
+
+def network_LayerNorm_load_state_dict(self, *args, **kwargs):
+ network_reset_cached_weight(self)
+
+ return originals.LayerNorm_load_state_dict(self, *args, **kwargs)
def network_MultiheadAttention_forward(self, *args, **kwargs):
network_apply_weights(self)
- return torch.nn.MultiheadAttention_forward_before_network(self, *args, **kwargs)
+ return originals.MultiheadAttention_forward(self, *args, **kwargs)
def network_MultiheadAttention_load_state_dict(self, *args, **kwargs):
network_reset_cached_weight(self)
- return torch.nn.MultiheadAttention_load_state_dict_before_network(self, *args, **kwargs)
+ return originals.MultiheadAttention_load_state_dict(self, *args, **kwargs)
def list_available_networks():
@@ -459,9 +628,15 @@ def infotext_pasted(infotext, params):
params["Prompt"] += "\n" + "".join(added)
+originals: lora_patches.LoraPatches = None
+
+extra_network_lora = None
+
available_networks = {}
available_network_aliases = {}
loaded_networks = []
+loaded_bundle_embeddings = {}
+networks_in_memory = {}
available_network_hash_lookup = {}
forbidden_network_aliases = {}
diff --git a/extensions-builtin/Lora/scripts/lora_script.py b/extensions-builtin/Lora/scripts/lora_script.py
index cd28afc9..1518f7e5 100644
--- a/extensions-builtin/Lora/scripts/lora_script.py
+++ b/extensions-builtin/Lora/scripts/lora_script.py
@@ -1,57 +1,30 @@
import re
-import torch
import gradio as gr
from fastapi import FastAPI
import network
import networks
import lora # noqa:F401
+import lora_patches
import extra_networks_lora
import ui_extra_networks_lora
from modules import script_callbacks, ui_extra_networks, extra_networks, shared
+
def unload():
- torch.nn.Linear.forward = torch.nn.Linear_forward_before_network
- torch.nn.Linear._load_from_state_dict = torch.nn.Linear_load_state_dict_before_network
- torch.nn.Conv2d.forward = torch.nn.Conv2d_forward_before_network
- torch.nn.Conv2d._load_from_state_dict = torch.nn.Conv2d_load_state_dict_before_network
- torch.nn.MultiheadAttention.forward = torch.nn.MultiheadAttention_forward_before_network
- torch.nn.MultiheadAttention._load_from_state_dict = torch.nn.MultiheadAttention_load_state_dict_before_network
+ networks.originals.undo()
def before_ui():
ui_extra_networks.register_page(ui_extra_networks_lora.ExtraNetworksPageLora())
- extra_network = extra_networks_lora.ExtraNetworkLora()
- extra_networks.register_extra_network(extra_network)
- extra_networks.register_extra_network_alias(extra_network, "lyco")
-
-
-if not hasattr(torch.nn, 'Linear_forward_before_network'):
- torch.nn.Linear_forward_before_network = torch.nn.Linear.forward
-
-if not hasattr(torch.nn, 'Linear_load_state_dict_before_network'):
- torch.nn.Linear_load_state_dict_before_network = torch.nn.Linear._load_from_state_dict
+ networks.extra_network_lora = extra_networks_lora.ExtraNetworkLora()
+ extra_networks.register_extra_network(networks.extra_network_lora)
+ extra_networks.register_extra_network_alias(networks.extra_network_lora, "lyco")
-if not hasattr(torch.nn, 'Conv2d_forward_before_network'):
- torch.nn.Conv2d_forward_before_network = torch.nn.Conv2d.forward
-if not hasattr(torch.nn, 'Conv2d_load_state_dict_before_network'):
- torch.nn.Conv2d_load_state_dict_before_network = torch.nn.Conv2d._load_from_state_dict
-
-if not hasattr(torch.nn, 'MultiheadAttention_forward_before_network'):
- torch.nn.MultiheadAttention_forward_before_network = torch.nn.MultiheadAttention.forward
-
-if not hasattr(torch.nn, 'MultiheadAttention_load_state_dict_before_network'):
- torch.nn.MultiheadAttention_load_state_dict_before_network = torch.nn.MultiheadAttention._load_from_state_dict
-
-torch.nn.Linear.forward = networks.network_Linear_forward
-torch.nn.Linear._load_from_state_dict = networks.network_Linear_load_state_dict
-torch.nn.Conv2d.forward = networks.network_Conv2d_forward
-torch.nn.Conv2d._load_from_state_dict = networks.network_Conv2d_load_state_dict
-torch.nn.MultiheadAttention.forward = networks.network_MultiheadAttention_forward
-torch.nn.MultiheadAttention._load_from_state_dict = networks.network_MultiheadAttention_load_state_dict
+networks.originals = lora_patches.LoraPatches()
script_callbacks.on_model_loaded(networks.assign_network_names_to_compvis_modules)
script_callbacks.on_script_unloaded(unload)
@@ -65,6 +38,9 @@ shared.options_templates.update(shared.options_section(('extra_networks', "Extra
"lora_add_hashes_to_infotext": shared.OptionInfo(True, "Add Lora hashes to infotext"),
"lora_show_all": shared.OptionInfo(False, "Always show all networks on the Lora page").info("otherwise, those detected as for incompatible version of Stable Diffusion will be hidden"),
"lora_hide_unknown_for_versions": shared.OptionInfo([], "Hide networks of unknown versions for model versions", gr.CheckboxGroup, {"choices": ["SD1", "SD2", "SDXL"]}),
+ "lora_in_memory_limit": shared.OptionInfo(0, "Number of Lora networks to keep cached in memory", gr.Number, {"precision": 0}),
+ "lora_not_found_warning_console": shared.OptionInfo(False, "Lora not found warning in console"),
+ "lora_not_found_gradio_warning": shared.OptionInfo(False, "Lora not found warning popup in webui"),
}))
@@ -121,3 +97,5 @@ def infotext_pasted(infotext, d):
script_callbacks.on_infotext_pasted(infotext_pasted)
+
+shared.opts.onchange("lora_in_memory_limit", networks.purge_networks_from_memory)
diff --git a/extensions-builtin/Lora/ui_edit_user_metadata.py b/extensions-builtin/Lora/ui_edit_user_metadata.py
index 2ca997f7..3160aecf 100644
--- a/extensions-builtin/Lora/ui_edit_user_metadata.py
+++ b/extensions-builtin/Lora/ui_edit_user_metadata.py
@@ -54,12 +54,13 @@ class LoraUserMetadataEditor(ui_extra_networks_user_metadata.UserMetadataEditor)
self.slider_preferred_weight = None
self.edit_notes = None
- def save_lora_user_metadata(self, name, desc, sd_version, activation_text, preferred_weight, notes):
+ def save_lora_user_metadata(self, name, desc, sd_version, activation_text, preferred_weight, negative_text, notes):
user_metadata = self.get_user_metadata(name)
user_metadata["description"] = desc
user_metadata["sd version"] = sd_version
user_metadata["activation text"] = activation_text
user_metadata["preferred weight"] = preferred_weight
+ user_metadata["negative text"] = negative_text
user_metadata["notes"] = notes
self.write_user_metadata(name, user_metadata)
@@ -70,6 +71,7 @@ class LoraUserMetadataEditor(ui_extra_networks_user_metadata.UserMetadataEditor)
metadata = item.get("metadata") or {}
keys = {
+ 'ss_output_name': "Output name:",
'ss_sd_model_name': "Model:",
'ss_clip_skip': "Clip skip:",
'ss_network_module': "Kohya module:",
@@ -126,6 +128,7 @@ class LoraUserMetadataEditor(ui_extra_networks_user_metadata.UserMetadataEditor)
gr.HighlightedText.update(value=gradio_tags, visible=True if tags else False),
user_metadata.get('activation text', ''),
float(user_metadata.get('preferred weight', 0.0)),
+ user_metadata.get('negative text', ''),
gr.update(visible=True if tags else False),
gr.update(value=self.generate_random_prompt_from_tags(tags), visible=True if tags else False),
]
@@ -161,13 +164,13 @@ class LoraUserMetadataEditor(ui_extra_networks_user_metadata.UserMetadataEditor)
self.taginfo = gr.HighlightedText(label="Training dataset tags")
self.edit_activation_text = gr.Text(label='Activation text', info="Will be added to prompt along with Lora")
self.slider_preferred_weight = gr.Slider(label='Preferred weight', info="Set to 0 to disable", minimum=0.0, maximum=2.0, step=0.01)
-
+ self.edit_negative_text = gr.Text(label='Negative prompt', info="Will be added to negative prompts")
with gr.Row() as row_random_prompt:
with gr.Column(scale=8):
random_prompt = gr.Textbox(label='Random prompt', lines=4, max_lines=4, interactive=False)
with gr.Column(scale=1, min_width=120):
- generate_random_prompt = gr.Button('Generate').style(full_width=True, size="lg")
+ generate_random_prompt = gr.Button('Generate', size="lg", scale=1)
self.edit_notes = gr.TextArea(label='Notes', lines=4)
@@ -197,6 +200,7 @@ class LoraUserMetadataEditor(ui_extra_networks_user_metadata.UserMetadataEditor)
self.taginfo,
self.edit_activation_text,
self.slider_preferred_weight,
+ self.edit_negative_text,
row_random_prompt,
random_prompt,
]
@@ -210,7 +214,9 @@ class LoraUserMetadataEditor(ui_extra_networks_user_metadata.UserMetadataEditor)
self.select_sd_version,
self.edit_activation_text,
self.slider_preferred_weight,
+ self.edit_negative_text,
self.edit_notes,
]
+
self.setup_save_handler(self.button_save, self.save_lora_user_metadata, edited_components)
diff --git a/extensions-builtin/Lora/ui_extra_networks_lora.py b/extensions-builtin/Lora/ui_extra_networks_lora.py
index 3629e5c0..e714fac4 100644
--- a/extensions-builtin/Lora/ui_extra_networks_lora.py
+++ b/extensions-builtin/Lora/ui_extra_networks_lora.py
@@ -17,6 +17,8 @@ class ExtraNetworksPageLora(ui_extra_networks.ExtraNetworksPage):
def create_item(self, name, index=None, enable_filter=True):
lora_on_disk = networks.available_networks.get(name)
+ if lora_on_disk is None:
+ return
path, ext = os.path.splitext(lora_on_disk.filename)
@@ -25,9 +27,10 @@ class ExtraNetworksPageLora(ui_extra_networks.ExtraNetworksPage):
item = {
"name": name,
"filename": lora_on_disk.filename,
+ "shorthash": lora_on_disk.shorthash,
"preview": self.find_preview(path),
"description": self.find_description(path),
- "search_term": self.search_terms_from_path(lora_on_disk.filename),
+ "search_term": self.search_terms_from_path(lora_on_disk.filename) + " " + (lora_on_disk.hash or ""),
"local_preview": f"{path}.{shared.opts.samples_format}",
"metadata": lora_on_disk.metadata,
"sort_keys": {'default': index, **self.get_sort_keys(lora_on_disk.filename)},
@@ -42,6 +45,11 @@ class ExtraNetworksPageLora(ui_extra_networks.ExtraNetworksPage):
if activation_text:
item["prompt"] += " + " + quote_js(" " + activation_text)
+ negative_prompt = item["user_metadata"].get("negative text")
+ item["negative_prompt"] = quote_js("")
+ if negative_prompt:
+ item["negative_prompt"] = quote_js('(' + negative_prompt + ':1)')
+
sd_version = item["user_metadata"].get("sd version")
if sd_version in network.SdVersion.__members__:
item["sd_version"] = sd_version
@@ -65,9 +73,10 @@ class ExtraNetworksPageLora(ui_extra_networks.ExtraNetworksPage):
return item
def list_items(self):
- for index, name in enumerate(networks.available_networks):
+ # instantiate a list to protect against concurrent modification
+ names = list(networks.available_networks)
+ for index, name in enumerate(names):
item = self.create_item(name, index)
-
if item is not None:
yield item
diff --git a/extensions-builtin/ScuNET/scripts/scunet_model.py b/extensions-builtin/ScuNET/scripts/scunet_model.py
index 167d2f64..f799cb76 100644
--- a/extensions-builtin/ScuNET/scripts/scunet_model.py
+++ b/extensions-builtin/ScuNET/scripts/scunet_model.py
@@ -3,14 +3,11 @@ import sys
import PIL.Image
import numpy as np
import torch
-from tqdm import tqdm
import modules.upscaler
from modules import devices, modelloader, script_callbacks, errors
-from scunet_model_arch import SCUNet
-
-from modules.modelloader import load_file_from_url
from modules.shared import opts
+from modules.upscaler_utils import tiled_upscale_2
class UpscalerScuNET(modules.upscaler.Upscaler):
@@ -42,47 +39,6 @@ class UpscalerScuNET(modules.upscaler.Upscaler):
scalers.append(scaler_data2)
self.scalers = scalers
- @staticmethod
- @torch.no_grad()
- def tiled_inference(img, model):
- # test the image tile by tile
- h, w = img.shape[2:]
- tile = opts.SCUNET_tile
- tile_overlap = opts.SCUNET_tile_overlap
- if tile == 0:
- return model(img)
-
- device = devices.get_device_for('scunet')
- assert tile % 8 == 0, "tile size should be a multiple of window_size"
- sf = 1
-
- stride = tile - tile_overlap
- h_idx_list = list(range(0, h - tile, stride)) + [h - tile]
- w_idx_list = list(range(0, w - tile, stride)) + [w - tile]
- E = torch.zeros(1, 3, h * sf, w * sf, dtype=img.dtype, device=device)
- W = torch.zeros_like(E, dtype=devices.dtype, device=device)
-
- with tqdm(total=len(h_idx_list) * len(w_idx_list), desc="ScuNET tiles") as pbar:
- for h_idx in h_idx_list:
-
- for w_idx in w_idx_list:
-
- in_patch = img[..., h_idx: h_idx + tile, w_idx: w_idx + tile]
-
- out_patch = model(in_patch)
- out_patch_mask = torch.ones_like(out_patch)
-
- E[
- ..., h_idx * sf: (h_idx + tile) * sf, w_idx * sf: (w_idx + tile) * sf
- ].add_(out_patch)
- W[
- ..., h_idx * sf: (h_idx + tile) * sf, w_idx * sf: (w_idx + tile) * sf
- ].add_(out_patch_mask)
- pbar.update(1)
- output = E.div_(W)
-
- return output
-
def do_upscale(self, img: PIL.Image.Image, selected_file):
devices.torch_gc()
@@ -106,7 +62,16 @@ class UpscalerScuNET(modules.upscaler.Upscaler):
_img[:, :, :h, :w] = torch_img # pad image
torch_img = _img
- torch_output = self.tiled_inference(torch_img, model).squeeze(0)
+ with torch.no_grad():
+ torch_output = tiled_upscale_2(
+ torch_img,
+ model,
+ tile_size=opts.SCUNET_tile,
+ tile_overlap=opts.SCUNET_tile_overlap,
+ scale=1,
+ device=devices.get_device_for('scunet'),
+ desc="ScuNET tiles",
+ ).squeeze(0)
torch_output = torch_output[:, :h * 1, :w * 1] # remove padding, if any
np_output: np.ndarray = torch_output.float().cpu().clamp_(0, 1).numpy()
del torch_img, torch_output
@@ -120,17 +85,10 @@ class UpscalerScuNET(modules.upscaler.Upscaler):
device = devices.get_device_for('scunet')
if path.startswith("http"):
# TODO: this doesn't use `path` at all?
- filename = load_file_from_url(self.model_url, model_dir=self.model_download_path, file_name=f"{self.name}.pth")
+ filename = modelloader.load_file_from_url(self.model_url, model_dir=self.model_download_path, file_name=f"{self.name}.pth")
else:
filename = path
- model = SCUNet(in_nc=3, config=[4, 4, 4, 4, 4, 4, 4], dim=64)
- model.load_state_dict(torch.load(filename), strict=True)
- model.eval()
- for _, v in model.named_parameters():
- v.requires_grad = False
- model = model.to(device)
-
- return model
+ return modelloader.load_spandrel_model(filename, device=device, expected_architecture='SCUNet')
def on_ui_settings():
diff --git a/extensions-builtin/ScuNET/scunet_model_arch.py b/extensions-builtin/ScuNET/scunet_model_arch.py
deleted file mode 100644
index b51a8806..00000000
--- a/extensions-builtin/ScuNET/scunet_model_arch.py
+++ /dev/null
@@ -1,268 +0,0 @@
-# -*- coding: utf-8 -*-
-import numpy as np
-import torch
-import torch.nn as nn
-from einops import rearrange
-from einops.layers.torch import Rearrange
-from timm.models.layers import trunc_normal_, DropPath
-
-
-class WMSA(nn.Module):
- """ Self-attention module in Swin Transformer
- """
-
- def __init__(self, input_dim, output_dim, head_dim, window_size, type):
- super(WMSA, self).__init__()
- self.input_dim = input_dim
- self.output_dim = output_dim
- self.head_dim = head_dim
- self.scale = self.head_dim ** -0.5
- self.n_heads = input_dim // head_dim
- self.window_size = window_size
- self.type = type
- self.embedding_layer = nn.Linear(self.input_dim, 3 * self.input_dim, bias=True)
-
- self.relative_position_params = nn.Parameter(
- torch.zeros((2 * window_size - 1) * (2 * window_size - 1), self.n_heads))
-
- self.linear = nn.Linear(self.input_dim, self.output_dim)
-
- trunc_normal_(self.relative_position_params, std=.02)
- self.relative_position_params = torch.nn.Parameter(
- self.relative_position_params.view(2 * window_size - 1, 2 * window_size - 1, self.n_heads).transpose(1,
- 2).transpose(
- 0, 1))
-
- def generate_mask(self, h, w, p, shift):
- """ generating the mask of SW-MSA
- Args:
- shift: shift parameters in CyclicShift.
- Returns:
- attn_mask: should be (1 1 w p p),
- """
- # supporting square.
- attn_mask = torch.zeros(h, w, p, p, p, p, dtype=torch.bool, device=self.relative_position_params.device)
- if self.type == 'W':
- return attn_mask
-
- s = p - shift
- attn_mask[-1, :, :s, :, s:, :] = True
- attn_mask[-1, :, s:, :, :s, :] = True
- attn_mask[:, -1, :, :s, :, s:] = True
- attn_mask[:, -1, :, s:, :, :s] = True
- attn_mask = rearrange(attn_mask, 'w1 w2 p1 p2 p3 p4 -> 1 1 (w1 w2) (p1 p2) (p3 p4)')
- return attn_mask
-
- def forward(self, x):
- """ Forward pass of Window Multi-head Self-attention module.
- Args:
- x: input tensor with shape of [b h w c];
- attn_mask: attention mask, fill -inf where the value is True;
- Returns:
- output: tensor shape [b h w c]
- """
- if self.type != 'W':
- x = torch.roll(x, shifts=(-(self.window_size // 2), -(self.window_size // 2)), dims=(1, 2))
-
- x = rearrange(x, 'b (w1 p1) (w2 p2) c -> b w1 w2 p1 p2 c', p1=self.window_size, p2=self.window_size)
- h_windows = x.size(1)
- w_windows = x.size(2)
- # square validation
- # assert h_windows == w_windows
-
- x = rearrange(x, 'b w1 w2 p1 p2 c -> b (w1 w2) (p1 p2) c', p1=self.window_size, p2=self.window_size)
- qkv = self.embedding_layer(x)
- q, k, v = rearrange(qkv, 'b nw np (threeh c) -> threeh b nw np c', c=self.head_dim).chunk(3, dim=0)
- sim = torch.einsum('hbwpc,hbwqc->hbwpq', q, k) * self.scale
- # Adding learnable relative embedding
- sim = sim + rearrange(self.relative_embedding(), 'h p q -> h 1 1 p q')
- # Using Attn Mask to distinguish different subwindows.
- if self.type != 'W':
- attn_mask = self.generate_mask(h_windows, w_windows, self.window_size, shift=self.window_size // 2)
- sim = sim.masked_fill_(attn_mask, float("-inf"))
-
- probs = nn.functional.softmax(sim, dim=-1)
- output = torch.einsum('hbwij,hbwjc->hbwic', probs, v)
- output = rearrange(output, 'h b w p c -> b w p (h c)')
- output = self.linear(output)
- output = rearrange(output, 'b (w1 w2) (p1 p2) c -> b (w1 p1) (w2 p2) c', w1=h_windows, p1=self.window_size)
-
- if self.type != 'W':
- output = torch.roll(output, shifts=(self.window_size // 2, self.window_size // 2), dims=(1, 2))
-
- return output
-
- def relative_embedding(self):
- cord = torch.tensor(np.array([[i, j] for i in range(self.window_size) for j in range(self.window_size)]))
- relation = cord[:, None, :] - cord[None, :, :] + self.window_size - 1
- # negative is allowed
- return self.relative_position_params[:, relation[:, :, 0].long(), relation[:, :, 1].long()]
-
-
-class Block(nn.Module):
- def __init__(self, input_dim, output_dim, head_dim, window_size, drop_path, type='W', input_resolution=None):
- """ SwinTransformer Block
- """
- super(Block, self).__init__()
- self.input_dim = input_dim
- self.output_dim = output_dim
- assert type in ['W', 'SW']
- self.type = type
- if input_resolution <= window_size:
- self.type = 'W'
-
- self.ln1 = nn.LayerNorm(input_dim)
- self.msa = WMSA(input_dim, input_dim, head_dim, window_size, self.type)
- self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
- self.ln2 = nn.LayerNorm(input_dim)
- self.mlp = nn.Sequential(
- nn.Linear(input_dim, 4 * input_dim),
- nn.GELU(),
- nn.Linear(4 * input_dim, output_dim),
- )
-
- def forward(self, x):
- x = x + self.drop_path(self.msa(self.ln1(x)))
- x = x + self.drop_path(self.mlp(self.ln2(x)))
- return x
-
-
-class ConvTransBlock(nn.Module):
- def __init__(self, conv_dim, trans_dim, head_dim, window_size, drop_path, type='W', input_resolution=None):
- """ SwinTransformer and Conv Block
- """
- super(ConvTransBlock, self).__init__()
- self.conv_dim = conv_dim
- self.trans_dim = trans_dim
- self.head_dim = head_dim
- self.window_size = window_size
- self.drop_path = drop_path
- self.type = type
- self.input_resolution = input_resolution
-
- assert self.type in ['W', 'SW']
- if self.input_resolution <= self.window_size:
- self.type = 'W'
-
- self.trans_block = Block(self.trans_dim, self.trans_dim, self.head_dim, self.window_size, self.drop_path,
- self.type, self.input_resolution)
- self.conv1_1 = nn.Conv2d(self.conv_dim + self.trans_dim, self.conv_dim + self.trans_dim, 1, 1, 0, bias=True)
- self.conv1_2 = nn.Conv2d(self.conv_dim + self.trans_dim, self.conv_dim + self.trans_dim, 1, 1, 0, bias=True)
-
- self.conv_block = nn.Sequential(
- nn.Conv2d(self.conv_dim, self.conv_dim, 3, 1, 1, bias=False),
- nn.ReLU(True),
- nn.Conv2d(self.conv_dim, self.conv_dim, 3, 1, 1, bias=False)
- )
-
- def forward(self, x):
- conv_x, trans_x = torch.split(self.conv1_1(x), (self.conv_dim, self.trans_dim), dim=1)
- conv_x = self.conv_block(conv_x) + conv_x
- trans_x = Rearrange('b c h w -> b h w c')(trans_x)
- trans_x = self.trans_block(trans_x)
- trans_x = Rearrange('b h w c -> b c h w')(trans_x)
- res = self.conv1_2(torch.cat((conv_x, trans_x), dim=1))
- x = x + res
-
- return x
-
-
-class SCUNet(nn.Module):
- # def __init__(self, in_nc=3, config=[2, 2, 2, 2, 2, 2, 2], dim=64, drop_path_rate=0.0, input_resolution=256):
- def __init__(self, in_nc=3, config=None, dim=64, drop_path_rate=0.0, input_resolution=256):
- super(SCUNet, self).__init__()
- if config is None:
- config = [2, 2, 2, 2, 2, 2, 2]
- self.config = config
- self.dim = dim
- self.head_dim = 32
- self.window_size = 8
-
- # drop path rate for each layer
- dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(config))]
-
- self.m_head = [nn.Conv2d(in_nc, dim, 3, 1, 1, bias=False)]
-
- begin = 0
- self.m_down1 = [ConvTransBlock(dim // 2, dim // 2, self.head_dim, self.window_size, dpr[i + begin],
- 'W' if not i % 2 else 'SW', input_resolution)
- for i in range(config[0])] + \
- [nn.Conv2d(dim, 2 * dim, 2, 2, 0, bias=False)]
-
- begin += config[0]
- self.m_down2 = [ConvTransBlock(dim, dim, self.head_dim, self.window_size, dpr[i + begin],
- 'W' if not i % 2 else 'SW', input_resolution // 2)
- for i in range(config[1])] + \
- [nn.Conv2d(2 * dim, 4 * dim, 2, 2, 0, bias=False)]
-
- begin += config[1]
- self.m_down3 = [ConvTransBlock(2 * dim, 2 * dim, self.head_dim, self.window_size, dpr[i + begin],
- 'W' if not i % 2 else 'SW', input_resolution // 4)
- for i in range(config[2])] + \
- [nn.Conv2d(4 * dim, 8 * dim, 2, 2, 0, bias=False)]
-
- begin += config[2]
- self.m_body = [ConvTransBlock(4 * dim, 4 * dim, self.head_dim, self.window_size, dpr[i + begin],
- 'W' if not i % 2 else 'SW', input_resolution // 8)
- for i in range(config[3])]
-
- begin += config[3]
- self.m_up3 = [nn.ConvTranspose2d(8 * dim, 4 * dim, 2, 2, 0, bias=False), ] + \
- [ConvTransBlock(2 * dim, 2 * dim, self.head_dim, self.window_size, dpr[i + begin],
- 'W' if not i % 2 else 'SW', input_resolution // 4)
- for i in range(config[4])]
-
- begin += config[4]
- self.m_up2 = [nn.ConvTranspose2d(4 * dim, 2 * dim, 2, 2, 0, bias=False), ] + \
- [ConvTransBlock(dim, dim, self.head_dim, self.window_size, dpr[i + begin],
- 'W' if not i % 2 else 'SW', input_resolution // 2)
- for i in range(config[5])]
-
- begin += config[5]
- self.m_up1 = [nn.ConvTranspose2d(2 * dim, dim, 2, 2, 0, bias=False), ] + \
- [ConvTransBlock(dim // 2, dim // 2, self.head_dim, self.window_size, dpr[i + begin],
- 'W' if not i % 2 else 'SW', input_resolution)
- for i in range(config[6])]
-
- self.m_tail = [nn.Conv2d(dim, in_nc, 3, 1, 1, bias=False)]
-
- self.m_head = nn.Sequential(*self.m_head)
- self.m_down1 = nn.Sequential(*self.m_down1)
- self.m_down2 = nn.Sequential(*self.m_down2)
- self.m_down3 = nn.Sequential(*self.m_down3)
- self.m_body = nn.Sequential(*self.m_body)
- self.m_up3 = nn.Sequential(*self.m_up3)
- self.m_up2 = nn.Sequential(*self.m_up2)
- self.m_up1 = nn.Sequential(*self.m_up1)
- self.m_tail = nn.Sequential(*self.m_tail)
- # self.apply(self._init_weights)
-
- def forward(self, x0):
-
- h, w = x0.size()[-2:]
- paddingBottom = int(np.ceil(h / 64) * 64 - h)
- paddingRight = int(np.ceil(w / 64) * 64 - w)
- x0 = nn.ReplicationPad2d((0, paddingRight, 0, paddingBottom))(x0)
-
- x1 = self.m_head(x0)
- x2 = self.m_down1(x1)
- x3 = self.m_down2(x2)
- x4 = self.m_down3(x3)
- x = self.m_body(x4)
- x = self.m_up3(x + x4)
- x = self.m_up2(x + x3)
- x = self.m_up1(x + x2)
- x = self.m_tail(x + x1)
-
- x = x[..., :h, :w]
-
- return x
-
- def _init_weights(self, m):
- if isinstance(m, nn.Linear):
- trunc_normal_(m.weight, std=.02)
- if m.bias is not None:
- nn.init.constant_(m.bias, 0)
- elif isinstance(m, nn.LayerNorm):
- nn.init.constant_(m.bias, 0)
- nn.init.constant_(m.weight, 1.0)
diff --git a/extensions-builtin/SwinIR/scripts/swinir_model.py b/extensions-builtin/SwinIR/scripts/swinir_model.py
index ae0d0e6a..8a555c79 100644
--- a/extensions-builtin/SwinIR/scripts/swinir_model.py
+++ b/extensions-builtin/SwinIR/scripts/swinir_model.py
@@ -1,20 +1,18 @@
+import logging
import sys
-import platform
import numpy as np
import torch
from PIL import Image
-from tqdm import tqdm
from modules import modelloader, devices, script_callbacks, shared
-from modules.shared import opts, state
-from swinir_model_arch import SwinIR
-from swinir_model_arch_v2 import Swin2SR
+from modules.shared import opts
from modules.upscaler import Upscaler, UpscalerData
+from modules.upscaler_utils import tiled_upscale_2
SWINIR_MODEL_URL = "https://github.com/JingyunLiang/SwinIR/releases/download/v0.0/003_realSR_BSRGAN_DFOWMFC_s64w8_SwinIR-L_x4_GAN.pth"
-device_swinir = devices.get_device_for('swinir')
+logger = logging.getLogger(__name__)
class UpscalerSwinIR(Upscaler):
@@ -37,26 +35,29 @@ class UpscalerSwinIR(Upscaler):
scalers.append(model_data)
self.scalers = scalers
- def do_upscale(self, img, model_file):
- use_compile = hasattr(opts, 'SWIN_torch_compile') and opts.SWIN_torch_compile \
- and int(torch.__version__.split('.')[0]) >= 2 and platform.system() != "Windows"
+ def do_upscale(self, img: Image.Image, model_file: str) -> Image.Image:
current_config = (model_file, opts.SWIN_tile)
- if use_compile and self._cached_model_config == current_config:
+ device = self._get_device()
+
+ if self._cached_model_config == current_config:
model = self._cached_model
else:
- self._cached_model = None
try:
model = self.load_model(model_file)
except Exception as e:
print(f"Failed loading SwinIR model {model_file}: {e}", file=sys.stderr)
return img
- model = model.to(device_swinir, dtype=devices.dtype)
- if use_compile:
- model = torch.compile(model)
- self._cached_model = model
- self._cached_model_config = current_config
- img = upscale(img, model)
+ self._cached_model = model
+ self._cached_model_config = current_config
+
+ img = upscale(
+ img,
+ model,
+ tile=opts.SWIN_tile,
+ tile_overlap=opts.SWIN_tile_overlap,
+ device=device,
+ )
devices.torch_gc()
return img
@@ -69,69 +70,55 @@ class UpscalerSwinIR(Upscaler):
)
else:
filename = path
- if filename.endswith(".v2.pth"):
- model = Swin2SR(
- upscale=scale,
- in_chans=3,
- img_size=64,
- window_size=8,
- img_range=1.0,
- depths=[6, 6, 6, 6, 6, 6],
- embed_dim=180,
- num_heads=[6, 6, 6, 6, 6, 6],
- mlp_ratio=2,
- upsampler="nearest+conv",
- resi_connection="1conv",
- )
- params = None
- else:
- model = SwinIR(
- upscale=scale,
- in_chans=3,
- img_size=64,
- window_size=8,
- img_range=1.0,
- depths=[6, 6, 6, 6, 6, 6, 6, 6, 6],
- embed_dim=240,
- num_heads=[8, 8, 8, 8, 8, 8, 8, 8, 8],
- mlp_ratio=2,
- upsampler="nearest+conv",
- resi_connection="3conv",
- )
- params = "params_ema"
- pretrained_model = torch.load(filename)
- if params is not None:
- model.load_state_dict(pretrained_model[params], strict=True)
- else:
- model.load_state_dict(pretrained_model, strict=True)
- return model
+ model_descriptor = modelloader.load_spandrel_model(
+ filename,
+ device=self._get_device(),
+ dtype=devices.dtype,
+ expected_architecture="SwinIR",
+ )
+ if getattr(opts, 'SWIN_torch_compile', False):
+ try:
+ model_descriptor.model.compile()
+ except Exception:
+ logger.warning("Failed to compile SwinIR model, fallback to JIT", exc_info=True)
+ return model_descriptor
+
+ def _get_device(self):
+ return devices.get_device_for('swinir')
def upscale(
- img,
- model,
- tile=None,
- tile_overlap=None,
- window_size=8,
- scale=4,
+ img,
+ model,
+ *,
+ tile: int,
+ tile_overlap: int,
+ window_size=8,
+ scale=4,
+ device,
):
- tile = tile or opts.SWIN_tile
- tile_overlap = tile_overlap or opts.SWIN_tile_overlap
-
img = np.array(img)
img = img[:, :, ::-1]
img = np.moveaxis(img, 2, 0) / 255
img = torch.from_numpy(img).float()
- img = img.unsqueeze(0).to(device_swinir, dtype=devices.dtype)
+ img = img.unsqueeze(0).to(device, dtype=devices.dtype)
with torch.no_grad(), devices.autocast():
_, _, h_old, w_old = img.size()
h_pad = (h_old // window_size + 1) * window_size - h_old
w_pad = (w_old // window_size + 1) * window_size - w_old
img = torch.cat([img, torch.flip(img, [2])], 2)[:, :, : h_old + h_pad, :]
img = torch.cat([img, torch.flip(img, [3])], 3)[:, :, :, : w_old + w_pad]
- output = inference(img, model, tile, tile_overlap, window_size, scale)
+ output = tiled_upscale_2(
+ img,
+ model,
+ tile_size=tile,
+ tile_overlap=tile_overlap,
+ scale=scale,
+ device=device,
+ desc="SwinIR tiles",
+ )
output = output[..., : h_old * scale, : w_old * scale]
output = output.data.squeeze().float().cpu().clamp_(0, 1).numpy()
if output.ndim == 3:
@@ -142,51 +129,12 @@ def upscale(
return Image.fromarray(output, "RGB")
-def inference(img, model, tile, tile_overlap, window_size, scale):
- # test the image tile by tile
- b, c, h, w = img.size()
- tile = min(tile, h, w)
- assert tile % window_size == 0, "tile size should be a multiple of window_size"
- sf = scale
-
- stride = tile - tile_overlap
- h_idx_list = list(range(0, h - tile, stride)) + [h - tile]
- w_idx_list = list(range(0, w - tile, stride)) + [w - tile]
- E = torch.zeros(b, c, h * sf, w * sf, dtype=devices.dtype, device=device_swinir).type_as(img)
- W = torch.zeros_like(E, dtype=devices.dtype, device=device_swinir)
-
- with tqdm(total=len(h_idx_list) * len(w_idx_list), desc="SwinIR tiles") as pbar:
- for h_idx in h_idx_list:
- if state.interrupted or state.skipped:
- break
-
- for w_idx in w_idx_list:
- if state.interrupted or state.skipped:
- break
-
- in_patch = img[..., h_idx: h_idx + tile, w_idx: w_idx + tile]
- out_patch = model(in_patch)
- out_patch_mask = torch.ones_like(out_patch)
-
- E[
- ..., h_idx * sf: (h_idx + tile) * sf, w_idx * sf: (w_idx + tile) * sf
- ].add_(out_patch)
- W[
- ..., h_idx * sf: (h_idx + tile) * sf, w_idx * sf: (w_idx + tile) * sf
- ].add_(out_patch_mask)
- pbar.update(1)
- output = E.div_(W)
-
- return output
-
-
def on_ui_settings():
import gradio as gr
shared.opts.add_option("SWIN_tile", shared.OptionInfo(192, "Tile size for all SwinIR.", gr.Slider, {"minimum": 16, "maximum": 512, "step": 16}, section=('upscaling', "Upscaling")))
shared.opts.add_option("SWIN_tile_overlap", shared.OptionInfo(8, "Tile overlap, in pixels for SwinIR. Low values = visible seam.", gr.Slider, {"minimum": 0, "maximum": 48, "step": 1}, section=('upscaling', "Upscaling")))
- if int(torch.__version__.split('.')[0]) >= 2 and platform.system() != "Windows": # torch.compile() require pytorch 2.0 or above, and not on Windows
- shared.opts.add_option("SWIN_torch_compile", shared.OptionInfo(False, "Use torch.compile to accelerate SwinIR.", gr.Checkbox, {"interactive": True}, section=('upscaling', "Upscaling")).info("Takes longer on first run"))
+ shared.opts.add_option("SWIN_torch_compile", shared.OptionInfo(False, "Use torch.compile to accelerate SwinIR.", gr.Checkbox, {"interactive": True}, section=('upscaling', "Upscaling")).info("Takes longer on first run"))
script_callbacks.on_ui_settings(on_ui_settings)
diff --git a/extensions-builtin/SwinIR/swinir_model_arch.py b/extensions-builtin/SwinIR/swinir_model_arch.py
deleted file mode 100644
index 93b93274..00000000
--- a/extensions-builtin/SwinIR/swinir_model_arch.py
+++ /dev/null
@@ -1,867 +0,0 @@
-# -----------------------------------------------------------------------------------
-# SwinIR: Image Restoration Using Swin Transformer, https://arxiv.org/abs/2108.10257
-# Originally Written by Ze Liu, Modified by Jingyun Liang.
-# -----------------------------------------------------------------------------------
-
-import math
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-import torch.utils.checkpoint as checkpoint
-from timm.models.layers import DropPath, to_2tuple, trunc_normal_
-
-
-class Mlp(nn.Module):
- def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
- super().__init__()
- out_features = out_features or in_features
- hidden_features = hidden_features or in_features
- self.fc1 = nn.Linear(in_features, hidden_features)
- self.act = act_layer()
- self.fc2 = nn.Linear(hidden_features, out_features)
- self.drop = nn.Dropout(drop)
-
- def forward(self, x):
- x = self.fc1(x)
- x = self.act(x)
- x = self.drop(x)
- x = self.fc2(x)
- x = self.drop(x)
- return x
-
-
-def window_partition(x, window_size):
- """
- Args:
- x: (B, H, W, C)
- window_size (int): window size
-
- Returns:
- windows: (num_windows*B, window_size, window_size, C)
- """
- B, H, W, C = x.shape
- x = x.view(B, H // window_size, window_size, W // window_size, window_size, C)
- windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
- return windows
-
-
-def window_reverse(windows, window_size, H, W):
- """
- Args:
- windows: (num_windows*B, window_size, window_size, C)
- window_size (int): Window size
- H (int): Height of image
- W (int): Width of image
-
- Returns:
- x: (B, H, W, C)
- """
- B = int(windows.shape[0] / (H * W / window_size / window_size))
- x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1)
- x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)
- return x
-
-
-class WindowAttention(nn.Module):
- r""" Window based multi-head self attention (W-MSA) module with relative position bias.
- It supports both of shifted and non-shifted window.
-
- Args:
- dim (int): Number of input channels.
- window_size (tuple[int]): The height and width of the window.
- num_heads (int): Number of attention heads.
- qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
- qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set
- attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0
- proj_drop (float, optional): Dropout ratio of output. Default: 0.0
- """
-
- def __init__(self, dim, window_size, num_heads, qkv_bias=True, qk_scale=None, attn_drop=0., proj_drop=0.):
-
- super().__init__()
- self.dim = dim
- self.window_size = window_size # Wh, Ww
- self.num_heads = num_heads
- head_dim = dim // num_heads
- self.scale = qk_scale or head_dim ** -0.5
-
- # define a parameter table of relative position bias
- self.relative_position_bias_table = nn.Parameter(
- torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)) # 2*Wh-1 * 2*Ww-1, nH
-
- # get pair-wise relative position index for each token inside the window
- coords_h = torch.arange(self.window_size[0])
- coords_w = torch.arange(self.window_size[1])
- coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
- coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
- relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
- relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
- relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0
- relative_coords[:, :, 1] += self.window_size[1] - 1
- relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
- relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
- self.register_buffer("relative_position_index", relative_position_index)
-
- self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
- self.attn_drop = nn.Dropout(attn_drop)
- self.proj = nn.Linear(dim, dim)
-
- self.proj_drop = nn.Dropout(proj_drop)
-
- trunc_normal_(self.relative_position_bias_table, std=.02)
- self.softmax = nn.Softmax(dim=-1)
-
- def forward(self, x, mask=None):
- """
- Args:
- x: input features with shape of (num_windows*B, N, C)
- mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None
- """
- B_, N, C = x.shape
- qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
- q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
-
- q = q * self.scale
- attn = (q @ k.transpose(-2, -1))
-
- relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view(
- self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1) # Wh*Ww,Wh*Ww,nH
- relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
- attn = attn + relative_position_bias.unsqueeze(0)
-
- if mask is not None:
- nW = mask.shape[0]
- attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0)
- attn = attn.view(-1, self.num_heads, N, N)
- attn = self.softmax(attn)
- else:
- attn = self.softmax(attn)
-
- attn = self.attn_drop(attn)
-
- x = (attn @ v).transpose(1, 2).reshape(B_, N, C)
- x = self.proj(x)
- x = self.proj_drop(x)
- return x
-
- def extra_repr(self) -> str:
- return f'dim={self.dim}, window_size={self.window_size}, num_heads={self.num_heads}'
-
- def flops(self, N):
- # calculate flops for 1 window with token length of N
- flops = 0
- # qkv = self.qkv(x)
- flops += N * self.dim * 3 * self.dim
- # attn = (q @ k.transpose(-2, -1))
- flops += self.num_heads * N * (self.dim // self.num_heads) * N
- # x = (attn @ v)
- flops += self.num_heads * N * N * (self.dim // self.num_heads)
- # x = self.proj(x)
- flops += N * self.dim * self.dim
- return flops
-
-
-class SwinTransformerBlock(nn.Module):
- r""" Swin Transformer Block.
-
- Args:
- dim (int): Number of input channels.
- input_resolution (tuple[int]): Input resolution.
- num_heads (int): Number of attention heads.
- window_size (int): Window size.
- shift_size (int): Shift size for SW-MSA.
- mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
- qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
- qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
- drop (float, optional): Dropout rate. Default: 0.0
- attn_drop (float, optional): Attention dropout rate. Default: 0.0
- drop_path (float, optional): Stochastic depth rate. Default: 0.0
- act_layer (nn.Module, optional): Activation layer. Default: nn.GELU
- norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
- """
-
- def __init__(self, dim, input_resolution, num_heads, window_size=7, shift_size=0,
- mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0., drop_path=0.,
- act_layer=nn.GELU, norm_layer=nn.LayerNorm):
- super().__init__()
- self.dim = dim
- self.input_resolution = input_resolution
- self.num_heads = num_heads
- self.window_size = window_size
- self.shift_size = shift_size
- self.mlp_ratio = mlp_ratio
- if min(self.input_resolution) <= self.window_size:
- # if window size is larger than input resolution, we don't partition windows
- self.shift_size = 0
- self.window_size = min(self.input_resolution)
- assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size"
-
- self.norm1 = norm_layer(dim)
- self.attn = WindowAttention(
- dim, window_size=to_2tuple(self.window_size), num_heads=num_heads,
- qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
-
- self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
- self.norm2 = norm_layer(dim)
- mlp_hidden_dim = int(dim * mlp_ratio)
- self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
-
- if self.shift_size > 0:
- attn_mask = self.calculate_mask(self.input_resolution)
- else:
- attn_mask = None
-
- self.register_buffer("attn_mask", attn_mask)
-
- def calculate_mask(self, x_size):
- # calculate attention mask for SW-MSA
- H, W = x_size
- img_mask = torch.zeros((1, H, W, 1)) # 1 H W 1
- h_slices = (slice(0, -self.window_size),
- slice(-self.window_size, -self.shift_size),
- slice(-self.shift_size, None))
- w_slices = (slice(0, -self.window_size),
- slice(-self.window_size, -self.shift_size),
- slice(-self.shift_size, None))
- cnt = 0
- for h in h_slices:
- for w in w_slices:
- img_mask[:, h, w, :] = cnt
- cnt += 1
-
- mask_windows = window_partition(img_mask, self.window_size) # nW, window_size, window_size, 1
- mask_windows = mask_windows.view(-1, self.window_size * self.window_size)
- attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
- attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))
-
- return attn_mask
-
- def forward(self, x, x_size):
- H, W = x_size
- B, L, C = x.shape
- # assert L == H * W, "input feature has wrong size"
-
- shortcut = x
- x = self.norm1(x)
- x = x.view(B, H, W, C)
-
- # cyclic shift
- if self.shift_size > 0:
- shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
- else:
- shifted_x = x
-
- # partition windows
- x_windows = window_partition(shifted_x, self.window_size) # nW*B, window_size, window_size, C
- x_windows = x_windows.view(-1, self.window_size * self.window_size, C) # nW*B, window_size*window_size, C
-
- # W-MSA/SW-MSA (to be compatible for testing on images whose shapes are the multiple of window size
- if self.input_resolution == x_size:
- attn_windows = self.attn(x_windows, mask=self.attn_mask) # nW*B, window_size*window_size, C
- else:
- attn_windows = self.attn(x_windows, mask=self.calculate_mask(x_size).to(x.device))
-
- # merge windows
- attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C)
- shifted_x = window_reverse(attn_windows, self.window_size, H, W) # B H' W' C
-
- # reverse cyclic shift
- if self.shift_size > 0:
- x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2))
- else:
- x = shifted_x
- x = x.view(B, H * W, C)
-
- # FFN
- x = shortcut + self.drop_path(x)
- x = x + self.drop_path(self.mlp(self.norm2(x)))
-
- return x
-
- def extra_repr(self) -> str:
- return f"dim={self.dim}, input_resolution={self.input_resolution}, num_heads={self.num_heads}, " \
- f"window_size={self.window_size}, shift_size={self.shift_size}, mlp_ratio={self.mlp_ratio}"
-
- def flops(self):
- flops = 0
- H, W = self.input_resolution
- # norm1
- flops += self.dim * H * W
- # W-MSA/SW-MSA
- nW = H * W / self.window_size / self.window_size
- flops += nW * self.attn.flops(self.window_size * self.window_size)
- # mlp
- flops += 2 * H * W * self.dim * self.dim * self.mlp_ratio
- # norm2
- flops += self.dim * H * W
- return flops
-
-
-class PatchMerging(nn.Module):
- r""" Patch Merging Layer.
-
- Args:
- input_resolution (tuple[int]): Resolution of input feature.
- dim (int): Number of input channels.
- norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
- """
-
- def __init__(self, input_resolution, dim, norm_layer=nn.LayerNorm):
- super().__init__()
- self.input_resolution = input_resolution
- self.dim = dim
- self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)
- self.norm = norm_layer(4 * dim)
-
- def forward(self, x):
- """
- x: B, H*W, C
- """
- H, W = self.input_resolution
- B, L, C = x.shape
- assert L == H * W, "input feature has wrong size"
- assert H % 2 == 0 and W % 2 == 0, f"x size ({H}*{W}) are not even."
-
- x = x.view(B, H, W, C)
-
- x0 = x[:, 0::2, 0::2, :] # B H/2 W/2 C
- x1 = x[:, 1::2, 0::2, :] # B H/2 W/2 C
- x2 = x[:, 0::2, 1::2, :] # B H/2 W/2 C
- x3 = x[:, 1::2, 1::2, :] # B H/2 W/2 C
- x = torch.cat([x0, x1, x2, x3], -1) # B H/2 W/2 4*C
- x = x.view(B, -1, 4 * C) # B H/2*W/2 4*C
-
- x = self.norm(x)
- x = self.reduction(x)
-
- return x
-
- def extra_repr(self) -> str:
- return f"input_resolution={self.input_resolution}, dim={self.dim}"
-
- def flops(self):
- H, W = self.input_resolution
- flops = H * W * self.dim
- flops += (H // 2) * (W // 2) * 4 * self.dim * 2 * self.dim
- return flops
-
-
-class BasicLayer(nn.Module):
- """ A basic Swin Transformer layer for one stage.
-
- Args:
- dim (int): Number of input channels.
- input_resolution (tuple[int]): Input resolution.
- depth (int): Number of blocks.
- num_heads (int): Number of attention heads.
- window_size (int): Local window size.
- mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
- qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
- qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
- drop (float, optional): Dropout rate. Default: 0.0
- attn_drop (float, optional): Attention dropout rate. Default: 0.0
- drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
- norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
- downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
- use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
- """
-
- def __init__(self, dim, input_resolution, depth, num_heads, window_size,
- mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0.,
- drop_path=0., norm_layer=nn.LayerNorm, downsample=None, use_checkpoint=False):
-
- super().__init__()
- self.dim = dim
- self.input_resolution = input_resolution
- self.depth = depth
- self.use_checkpoint = use_checkpoint
-
- # build blocks
- self.blocks = nn.ModuleList([
- SwinTransformerBlock(dim=dim, input_resolution=input_resolution,
- num_heads=num_heads, window_size=window_size,
- shift_size=0 if (i % 2 == 0) else window_size // 2,
- mlp_ratio=mlp_ratio,
- qkv_bias=qkv_bias, qk_scale=qk_scale,
- drop=drop, attn_drop=attn_drop,
- drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
- norm_layer=norm_layer)
- for i in range(depth)])
-
- # patch merging layer
- if downsample is not None:
- self.downsample = downsample(input_resolution, dim=dim, norm_layer=norm_layer)
- else:
- self.downsample = None
-
- def forward(self, x, x_size):
- for blk in self.blocks:
- if self.use_checkpoint:
- x = checkpoint.checkpoint(blk, x, x_size)
- else:
- x = blk(x, x_size)
- if self.downsample is not None:
- x = self.downsample(x)
- return x
-
- def extra_repr(self) -> str:
- return f"dim={self.dim}, input_resolution={self.input_resolution}, depth={self.depth}"
-
- def flops(self):
- flops = 0
- for blk in self.blocks:
- flops += blk.flops()
- if self.downsample is not None:
- flops += self.downsample.flops()
- return flops
-
-
-class RSTB(nn.Module):
- """Residual Swin Transformer Block (RSTB).
-
- Args:
- dim (int): Number of input channels.
- input_resolution (tuple[int]): Input resolution.
- depth (int): Number of blocks.
- num_heads (int): Number of attention heads.
- window_size (int): Local window size.
- mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
- qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
- qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
- drop (float, optional): Dropout rate. Default: 0.0
- attn_drop (float, optional): Attention dropout rate. Default: 0.0
- drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
- norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
- downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
- use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
- img_size: Input image size.
- patch_size: Patch size.
- resi_connection: The convolutional block before residual connection.
- """
-
- def __init__(self, dim, input_resolution, depth, num_heads, window_size,
- mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0.,
- drop_path=0., norm_layer=nn.LayerNorm, downsample=None, use_checkpoint=False,
- img_size=224, patch_size=4, resi_connection='1conv'):
- super(RSTB, self).__init__()
-
- self.dim = dim
- self.input_resolution = input_resolution
-
- self.residual_group = BasicLayer(dim=dim,
- input_resolution=input_resolution,
- depth=depth,
- num_heads=num_heads,
- window_size=window_size,
- mlp_ratio=mlp_ratio,
- qkv_bias=qkv_bias, qk_scale=qk_scale,
- drop=drop, attn_drop=attn_drop,
- drop_path=drop_path,
- norm_layer=norm_layer,
- downsample=downsample,
- use_checkpoint=use_checkpoint)
-
- if resi_connection == '1conv':
- self.conv = nn.Conv2d(dim, dim, 3, 1, 1)
- elif resi_connection == '3conv':
- # to save parameters and memory
- self.conv = nn.Sequential(nn.Conv2d(dim, dim // 4, 3, 1, 1), nn.LeakyReLU(negative_slope=0.2, inplace=True),
- nn.Conv2d(dim // 4, dim // 4, 1, 1, 0),
- nn.LeakyReLU(negative_slope=0.2, inplace=True),
- nn.Conv2d(dim // 4, dim, 3, 1, 1))
-
- self.patch_embed = PatchEmbed(
- img_size=img_size, patch_size=patch_size, in_chans=0, embed_dim=dim,
- norm_layer=None)
-
- self.patch_unembed = PatchUnEmbed(
- img_size=img_size, patch_size=patch_size, in_chans=0, embed_dim=dim,
- norm_layer=None)
-
- def forward(self, x, x_size):
- return self.patch_embed(self.conv(self.patch_unembed(self.residual_group(x, x_size), x_size))) + x
-
- def flops(self):
- flops = 0
- flops += self.residual_group.flops()
- H, W = self.input_resolution
- flops += H * W * self.dim * self.dim * 9
- flops += self.patch_embed.flops()
- flops += self.patch_unembed.flops()
-
- return flops
-
-
-class PatchEmbed(nn.Module):
- r""" Image to Patch Embedding
-
- Args:
- img_size (int): Image size. Default: 224.
- patch_size (int): Patch token size. Default: 4.
- in_chans (int): Number of input image channels. Default: 3.
- embed_dim (int): Number of linear projection output channels. Default: 96.
- norm_layer (nn.Module, optional): Normalization layer. Default: None
- """
-
- def __init__(self, img_size=224, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None):
- super().__init__()
- img_size = to_2tuple(img_size)
- patch_size = to_2tuple(patch_size)
- patches_resolution = [img_size[0] // patch_size[0], img_size[1] // patch_size[1]]
- self.img_size = img_size
- self.patch_size = patch_size
- self.patches_resolution = patches_resolution
- self.num_patches = patches_resolution[0] * patches_resolution[1]
-
- self.in_chans = in_chans
- self.embed_dim = embed_dim
-
- if norm_layer is not None:
- self.norm = norm_layer(embed_dim)
- else:
- self.norm = None
-
- def forward(self, x):
- x = x.flatten(2).transpose(1, 2) # B Ph*Pw C
- if self.norm is not None:
- x = self.norm(x)
- return x
-
- def flops(self):
- flops = 0
- H, W = self.img_size
- if self.norm is not None:
- flops += H * W * self.embed_dim
- return flops
-
-
-class PatchUnEmbed(nn.Module):
- r""" Image to Patch Unembedding
-
- Args:
- img_size (int): Image size. Default: 224.
- patch_size (int): Patch token size. Default: 4.
- in_chans (int): Number of input image channels. Default: 3.
- embed_dim (int): Number of linear projection output channels. Default: 96.
- norm_layer (nn.Module, optional): Normalization layer. Default: None
- """
-
- def __init__(self, img_size=224, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None):
- super().__init__()
- img_size = to_2tuple(img_size)
- patch_size = to_2tuple(patch_size)
- patches_resolution = [img_size[0] // patch_size[0], img_size[1] // patch_size[1]]
- self.img_size = img_size
- self.patch_size = patch_size
- self.patches_resolution = patches_resolution
- self.num_patches = patches_resolution[0] * patches_resolution[1]
-
- self.in_chans = in_chans
- self.embed_dim = embed_dim
-
- def forward(self, x, x_size):
- B, HW, C = x.shape
- x = x.transpose(1, 2).view(B, self.embed_dim, x_size[0], x_size[1]) # B Ph*Pw C
- return x
-
- def flops(self):
- flops = 0
- return flops
-
-
-class Upsample(nn.Sequential):
- """Upsample module.
-
- Args:
- scale (int): Scale factor. Supported scales: 2^n and 3.
- num_feat (int): Channel number of intermediate features.
- """
-
- def __init__(self, scale, num_feat):
- m = []
- if (scale & (scale - 1)) == 0: # scale = 2^n
- for _ in range(int(math.log(scale, 2))):
- m.append(nn.Conv2d(num_feat, 4 * num_feat, 3, 1, 1))
- m.append(nn.PixelShuffle(2))
- elif scale == 3:
- m.append(nn.Conv2d(num_feat, 9 * num_feat, 3, 1, 1))
- m.append(nn.PixelShuffle(3))
- else:
- raise ValueError(f'scale {scale} is not supported. ' 'Supported scales: 2^n and 3.')
- super(Upsample, self).__init__(*m)
-
-
-class UpsampleOneStep(nn.Sequential):
- """UpsampleOneStep module (the difference with Upsample is that it always only has 1conv + 1pixelshuffle)
- Used in lightweight SR to save parameters.
-
- Args:
- scale (int): Scale factor. Supported scales: 2^n and 3.
- num_feat (int): Channel number of intermediate features.
-
- """
-
- def __init__(self, scale, num_feat, num_out_ch, input_resolution=None):
- self.num_feat = num_feat
- self.input_resolution = input_resolution
- m = []
- m.append(nn.Conv2d(num_feat, (scale ** 2) * num_out_ch, 3, 1, 1))
- m.append(nn.PixelShuffle(scale))
- super(UpsampleOneStep, self).__init__(*m)
-
- def flops(self):
- H, W = self.input_resolution
- flops = H * W * self.num_feat * 3 * 9
- return flops
-
-
-class SwinIR(nn.Module):
- r""" SwinIR
- A PyTorch impl of : `SwinIR: Image Restoration Using Swin Transformer`, based on Swin Transformer.
-
- Args:
- img_size (int | tuple(int)): Input image size. Default 64
- patch_size (int | tuple(int)): Patch size. Default: 1
- in_chans (int): Number of input image channels. Default: 3
- embed_dim (int): Patch embedding dimension. Default: 96
- depths (tuple(int)): Depth of each Swin Transformer layer.
- num_heads (tuple(int)): Number of attention heads in different layers.
- window_size (int): Window size. Default: 7
- mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4
- qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True
- qk_scale (float): Override default qk scale of head_dim ** -0.5 if set. Default: None
- drop_rate (float): Dropout rate. Default: 0
- attn_drop_rate (float): Attention dropout rate. Default: 0
- drop_path_rate (float): Stochastic depth rate. Default: 0.1
- norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.
- ape (bool): If True, add absolute position embedding to the patch embedding. Default: False
- patch_norm (bool): If True, add normalization after patch embedding. Default: True
- use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False
- upscale: Upscale factor. 2/3/4/8 for image SR, 1 for denoising and compress artifact reduction
- img_range: Image range. 1. or 255.
- upsampler: The reconstruction reconstruction module. 'pixelshuffle'/'pixelshuffledirect'/'nearest+conv'/None
- resi_connection: The convolutional block before residual connection. '1conv'/'3conv'
- """
-
- def __init__(self, img_size=64, patch_size=1, in_chans=3,
- embed_dim=96, depths=(6, 6, 6, 6), num_heads=(6, 6, 6, 6),
- window_size=7, mlp_ratio=4., qkv_bias=True, qk_scale=None,
- drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1,
- norm_layer=nn.LayerNorm, ape=False, patch_norm=True,
- use_checkpoint=False, upscale=2, img_range=1., upsampler='', resi_connection='1conv',
- **kwargs):
- super(SwinIR, self).__init__()
- num_in_ch = in_chans
- num_out_ch = in_chans
- num_feat = 64
- self.img_range = img_range
- if in_chans == 3:
- rgb_mean = (0.4488, 0.4371, 0.4040)
- self.mean = torch.Tensor(rgb_mean).view(1, 3, 1, 1)
- else:
- self.mean = torch.zeros(1, 1, 1, 1)
- self.upscale = upscale
- self.upsampler = upsampler
- self.window_size = window_size
-
- #####################################################################################################
- ################################### 1, shallow feature extraction ###################################
- self.conv_first = nn.Conv2d(num_in_ch, embed_dim, 3, 1, 1)
-
- #####################################################################################################
- ################################### 2, deep feature extraction ######################################
- self.num_layers = len(depths)
- self.embed_dim = embed_dim
- self.ape = ape
- self.patch_norm = patch_norm
- self.num_features = embed_dim
- self.mlp_ratio = mlp_ratio
-
- # split image into non-overlapping patches
- self.patch_embed = PatchEmbed(
- img_size=img_size, patch_size=patch_size, in_chans=embed_dim, embed_dim=embed_dim,
- norm_layer=norm_layer if self.patch_norm else None)
- num_patches = self.patch_embed.num_patches
- patches_resolution = self.patch_embed.patches_resolution
- self.patches_resolution = patches_resolution
-
- # merge non-overlapping patches into image
- self.patch_unembed = PatchUnEmbed(
- img_size=img_size, patch_size=patch_size, in_chans=embed_dim, embed_dim=embed_dim,
- norm_layer=norm_layer if self.patch_norm else None)
-
- # absolute position embedding
- if self.ape:
- self.absolute_pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim))
- trunc_normal_(self.absolute_pos_embed, std=.02)
-
- self.pos_drop = nn.Dropout(p=drop_rate)
-
- # stochastic depth
- dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule
-
- # build Residual Swin Transformer blocks (RSTB)
- self.layers = nn.ModuleList()
- for i_layer in range(self.num_layers):
- layer = RSTB(dim=embed_dim,
- input_resolution=(patches_resolution[0],
- patches_resolution[1]),
- depth=depths[i_layer],
- num_heads=num_heads[i_layer],
- window_size=window_size,
- mlp_ratio=self.mlp_ratio,
- qkv_bias=qkv_bias, qk_scale=qk_scale,
- drop=drop_rate, attn_drop=attn_drop_rate,
- drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])], # no impact on SR results
- norm_layer=norm_layer,
- downsample=None,
- use_checkpoint=use_checkpoint,
- img_size=img_size,
- patch_size=patch_size,
- resi_connection=resi_connection
-
- )
- self.layers.append(layer)
- self.norm = norm_layer(self.num_features)
-
- # build the last conv layer in deep feature extraction
- if resi_connection == '1conv':
- self.conv_after_body = nn.Conv2d(embed_dim, embed_dim, 3, 1, 1)
- elif resi_connection == '3conv':
- # to save parameters and memory
- self.conv_after_body = nn.Sequential(nn.Conv2d(embed_dim, embed_dim // 4, 3, 1, 1),
- nn.LeakyReLU(negative_slope=0.2, inplace=True),
- nn.Conv2d(embed_dim // 4, embed_dim // 4, 1, 1, 0),
- nn.LeakyReLU(negative_slope=0.2, inplace=True),
- nn.Conv2d(embed_dim // 4, embed_dim, 3, 1, 1))
-
- #####################################################################################################
- ################################ 3, high quality image reconstruction ################################
- if self.upsampler == 'pixelshuffle':
- # for classical SR
- self.conv_before_upsample = nn.Sequential(nn.Conv2d(embed_dim, num_feat, 3, 1, 1),
- nn.LeakyReLU(inplace=True))
- self.upsample = Upsample(upscale, num_feat)
- self.conv_last = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1)
- elif self.upsampler == 'pixelshuffledirect':
- # for lightweight SR (to save parameters)
- self.upsample = UpsampleOneStep(upscale, embed_dim, num_out_ch,
- (patches_resolution[0], patches_resolution[1]))
- elif self.upsampler == 'nearest+conv':
- # for real-world SR (less artifacts)
- self.conv_before_upsample = nn.Sequential(nn.Conv2d(embed_dim, num_feat, 3, 1, 1),
- nn.LeakyReLU(inplace=True))
- self.conv_up1 = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
- if self.upscale == 4:
- self.conv_up2 = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
- self.conv_hr = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
- self.conv_last = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1)
- self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
- else:
- # for image denoising and JPEG compression artifact reduction
- self.conv_last = nn.Conv2d(embed_dim, num_out_ch, 3, 1, 1)
-
- self.apply(self._init_weights)
-
- def _init_weights(self, m):
- if isinstance(m, nn.Linear):
- trunc_normal_(m.weight, std=.02)
- if isinstance(m, nn.Linear) and m.bias is not None:
- nn.init.constant_(m.bias, 0)
- elif isinstance(m, nn.LayerNorm):
- nn.init.constant_(m.bias, 0)
- nn.init.constant_(m.weight, 1.0)
-
- @torch.jit.ignore
- def no_weight_decay(self):
- return {'absolute_pos_embed'}
-
- @torch.jit.ignore
- def no_weight_decay_keywords(self):
- return {'relative_position_bias_table'}
-
- def check_image_size(self, x):
- _, _, h, w = x.size()
- mod_pad_h = (self.window_size - h % self.window_size) % self.window_size
- mod_pad_w = (self.window_size - w % self.window_size) % self.window_size
- x = F.pad(x, (0, mod_pad_w, 0, mod_pad_h), 'reflect')
- return x
-
- def forward_features(self, x):
- x_size = (x.shape[2], x.shape[3])
- x = self.patch_embed(x)
- if self.ape:
- x = x + self.absolute_pos_embed
- x = self.pos_drop(x)
-
- for layer in self.layers:
- x = layer(x, x_size)
-
- x = self.norm(x) # B L C
- x = self.patch_unembed(x, x_size)
-
- return x
-
- def forward(self, x):
- H, W = x.shape[2:]
- x = self.check_image_size(x)
-
- self.mean = self.mean.type_as(x)
- x = (x - self.mean) * self.img_range
-
- if self.upsampler == 'pixelshuffle':
- # for classical SR
- x = self.conv_first(x)
- x = self.conv_after_body(self.forward_features(x)) + x
- x = self.conv_before_upsample(x)
- x = self.conv_last(self.upsample(x))
- elif self.upsampler == 'pixelshuffledirect':
- # for lightweight SR
- x = self.conv_first(x)
- x = self.conv_after_body(self.forward_features(x)) + x
- x = self.upsample(x)
- elif self.upsampler == 'nearest+conv':
- # for real-world SR
- x = self.conv_first(x)
- x = self.conv_after_body(self.forward_features(x)) + x
- x = self.conv_before_upsample(x)
- x = self.lrelu(self.conv_up1(torch.nn.functional.interpolate(x, scale_factor=2, mode='nearest')))
- if self.upscale == 4:
- x = self.lrelu(self.conv_up2(torch.nn.functional.interpolate(x, scale_factor=2, mode='nearest')))
- x = self.conv_last(self.lrelu(self.conv_hr(x)))
- else:
- # for image denoising and JPEG compression artifact reduction
- x_first = self.conv_first(x)
- res = self.conv_after_body(self.forward_features(x_first)) + x_first
- x = x + self.conv_last(res)
-
- x = x / self.img_range + self.mean
-
- return x[:, :, :H*self.upscale, :W*self.upscale]
-
- def flops(self):
- flops = 0
- H, W = self.patches_resolution
- flops += H * W * 3 * self.embed_dim * 9
- flops += self.patch_embed.flops()
- for layer in self.layers:
- flops += layer.flops()
- flops += H * W * 3 * self.embed_dim * self.embed_dim
- flops += self.upsample.flops()
- return flops
-
-
-if __name__ == '__main__':
- upscale = 4
- window_size = 8
- height = (1024 // upscale // window_size + 1) * window_size
- width = (720 // upscale // window_size + 1) * window_size
- model = SwinIR(upscale=2, img_size=(height, width),
- window_size=window_size, img_range=1., depths=[6, 6, 6, 6],
- embed_dim=60, num_heads=[6, 6, 6, 6], mlp_ratio=2, upsampler='pixelshuffledirect')
- print(model)
- print(height, width, model.flops() / 1e9)
-
- x = torch.randn((1, 3, height, width))
- x = model(x)
- print(x.shape)
diff --git a/extensions-builtin/SwinIR/swinir_model_arch_v2.py b/extensions-builtin/SwinIR/swinir_model_arch_v2.py
deleted file mode 100644
index dad22cca..00000000
--- a/extensions-builtin/SwinIR/swinir_model_arch_v2.py
+++ /dev/null
@@ -1,1017 +0,0 @@
-# -----------------------------------------------------------------------------------
-# Swin2SR: Swin2SR: SwinV2 Transformer for Compressed Image Super-Resolution and Restoration, https://arxiv.org/abs/
-# Written by Conde and Choi et al.
-# -----------------------------------------------------------------------------------
-
-import math
-import numpy as np
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-import torch.utils.checkpoint as checkpoint
-from timm.models.layers import DropPath, to_2tuple, trunc_normal_
-
-
-class Mlp(nn.Module):
- def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
- super().__init__()
- out_features = out_features or in_features
- hidden_features = hidden_features or in_features
- self.fc1 = nn.Linear(in_features, hidden_features)
- self.act = act_layer()
- self.fc2 = nn.Linear(hidden_features, out_features)
- self.drop = nn.Dropout(drop)
-
- def forward(self, x):
- x = self.fc1(x)
- x = self.act(x)
- x = self.drop(x)
- x = self.fc2(x)
- x = self.drop(x)
- return x
-
-
-def window_partition(x, window_size):
- """
- Args:
- x: (B, H, W, C)
- window_size (int): window size
- Returns:
- windows: (num_windows*B, window_size, window_size, C)
- """
- B, H, W, C = x.shape
- x = x.view(B, H // window_size, window_size, W // window_size, window_size, C)
- windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
- return windows
-
-
-def window_reverse(windows, window_size, H, W):
- """
- Args:
- windows: (num_windows*B, window_size, window_size, C)
- window_size (int): Window size
- H (int): Height of image
- W (int): Width of image
- Returns:
- x: (B, H, W, C)
- """
- B = int(windows.shape[0] / (H * W / window_size / window_size))
- x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1)
- x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)
- return x
-
-class WindowAttention(nn.Module):
- r""" Window based multi-head self attention (W-MSA) module with relative position bias.
- It supports both of shifted and non-shifted window.
- Args:
- dim (int): Number of input channels.
- window_size (tuple[int]): The height and width of the window.
- num_heads (int): Number of attention heads.
- qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
- attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0
- proj_drop (float, optional): Dropout ratio of output. Default: 0.0
- pretrained_window_size (tuple[int]): The height and width of the window in pre-training.
- """
-
- def __init__(self, dim, window_size, num_heads, qkv_bias=True, attn_drop=0., proj_drop=0.,
- pretrained_window_size=(0, 0)):
-
- super().__init__()
- self.dim = dim
- self.window_size = window_size # Wh, Ww
- self.pretrained_window_size = pretrained_window_size
- self.num_heads = num_heads
-
- self.logit_scale = nn.Parameter(torch.log(10 * torch.ones((num_heads, 1, 1))), requires_grad=True)
-
- # mlp to generate continuous relative position bias
- self.cpb_mlp = nn.Sequential(nn.Linear(2, 512, bias=True),
- nn.ReLU(inplace=True),
- nn.Linear(512, num_heads, bias=False))
-
- # get relative_coords_table
- relative_coords_h = torch.arange(-(self.window_size[0] - 1), self.window_size[0], dtype=torch.float32)
- relative_coords_w = torch.arange(-(self.window_size[1] - 1), self.window_size[1], dtype=torch.float32)
- relative_coords_table = torch.stack(
- torch.meshgrid([relative_coords_h,
- relative_coords_w])).permute(1, 2, 0).contiguous().unsqueeze(0) # 1, 2*Wh-1, 2*Ww-1, 2
- if pretrained_window_size[0] > 0:
- relative_coords_table[:, :, :, 0] /= (pretrained_window_size[0] - 1)
- relative_coords_table[:, :, :, 1] /= (pretrained_window_size[1] - 1)
- else:
- relative_coords_table[:, :, :, 0] /= (self.window_size[0] - 1)
- relative_coords_table[:, :, :, 1] /= (self.window_size[1] - 1)
- relative_coords_table *= 8 # normalize to -8, 8
- relative_coords_table = torch.sign(relative_coords_table) * torch.log2(
- torch.abs(relative_coords_table) + 1.0) / np.log2(8)
-
- self.register_buffer("relative_coords_table", relative_coords_table)
-
- # get pair-wise relative position index for each token inside the window
- coords_h = torch.arange(self.window_size[0])
- coords_w = torch.arange(self.window_size[1])
- coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
- coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
- relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
- relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
- relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0
- relative_coords[:, :, 1] += self.window_size[1] - 1
- relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
- relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
- self.register_buffer("relative_position_index", relative_position_index)
-
- self.qkv = nn.Linear(dim, dim * 3, bias=False)
- if qkv_bias:
- self.q_bias = nn.Parameter(torch.zeros(dim))
- self.v_bias = nn.Parameter(torch.zeros(dim))
- else:
- self.q_bias = None
- self.v_bias = None
- self.attn_drop = nn.Dropout(attn_drop)
- self.proj = nn.Linear(dim, dim)
- self.proj_drop = nn.Dropout(proj_drop)
- self.softmax = nn.Softmax(dim=-1)
-
- def forward(self, x, mask=None):
- """
- Args:
- x: input features with shape of (num_windows*B, N, C)
- mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None
- """
- B_, N, C = x.shape
- qkv_bias = None
- if self.q_bias is not None:
- qkv_bias = torch.cat((self.q_bias, torch.zeros_like(self.v_bias, requires_grad=False), self.v_bias))
- qkv = F.linear(input=x, weight=self.qkv.weight, bias=qkv_bias)
- qkv = qkv.reshape(B_, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4)
- q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
-
- # cosine attention
- attn = (F.normalize(q, dim=-1) @ F.normalize(k, dim=-1).transpose(-2, -1))
- logit_scale = torch.clamp(self.logit_scale, max=torch.log(torch.tensor(1. / 0.01)).to(self.logit_scale.device)).exp()
- attn = attn * logit_scale
-
- relative_position_bias_table = self.cpb_mlp(self.relative_coords_table).view(-1, self.num_heads)
- relative_position_bias = relative_position_bias_table[self.relative_position_index.view(-1)].view(
- self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1) # Wh*Ww,Wh*Ww,nH
- relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
- relative_position_bias = 16 * torch.sigmoid(relative_position_bias)
- attn = attn + relative_position_bias.unsqueeze(0)
-
- if mask is not None:
- nW = mask.shape[0]
- attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0)
- attn = attn.view(-1, self.num_heads, N, N)
- attn = self.softmax(attn)
- else:
- attn = self.softmax(attn)
-
- attn = self.attn_drop(attn)
-
- x = (attn @ v).transpose(1, 2).reshape(B_, N, C)
- x = self.proj(x)
- x = self.proj_drop(x)
- return x
-
- def extra_repr(self) -> str:
- return f'dim={self.dim}, window_size={self.window_size}, ' \
- f'pretrained_window_size={self.pretrained_window_size}, num_heads={self.num_heads}'
-
- def flops(self, N):
- # calculate flops for 1 window with token length of N
- flops = 0
- # qkv = self.qkv(x)
- flops += N * self.dim * 3 * self.dim
- # attn = (q @ k.transpose(-2, -1))
- flops += self.num_heads * N * (self.dim // self.num_heads) * N
- # x = (attn @ v)
- flops += self.num_heads * N * N * (self.dim // self.num_heads)
- # x = self.proj(x)
- flops += N * self.dim * self.dim
- return flops
-
-class SwinTransformerBlock(nn.Module):
- r""" Swin Transformer Block.
- Args:
- dim (int): Number of input channels.
- input_resolution (tuple[int]): Input resulotion.
- num_heads (int): Number of attention heads.
- window_size (int): Window size.
- shift_size (int): Shift size for SW-MSA.
- mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
- qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
- drop (float, optional): Dropout rate. Default: 0.0
- attn_drop (float, optional): Attention dropout rate. Default: 0.0
- drop_path (float, optional): Stochastic depth rate. Default: 0.0
- act_layer (nn.Module, optional): Activation layer. Default: nn.GELU
- norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
- pretrained_window_size (int): Window size in pre-training.
- """
-
- def __init__(self, dim, input_resolution, num_heads, window_size=7, shift_size=0,
- mlp_ratio=4., qkv_bias=True, drop=0., attn_drop=0., drop_path=0.,
- act_layer=nn.GELU, norm_layer=nn.LayerNorm, pretrained_window_size=0):
- super().__init__()
- self.dim = dim
- self.input_resolution = input_resolution
- self.num_heads = num_heads
- self.window_size = window_size
- self.shift_size = shift_size
- self.mlp_ratio = mlp_ratio
- if min(self.input_resolution) <= self.window_size:
- # if window size is larger than input resolution, we don't partition windows
- self.shift_size = 0
- self.window_size = min(self.input_resolution)
- assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size"
-
- self.norm1 = norm_layer(dim)
- self.attn = WindowAttention(
- dim, window_size=to_2tuple(self.window_size), num_heads=num_heads,
- qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop,
- pretrained_window_size=to_2tuple(pretrained_window_size))
-
- self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
- self.norm2 = norm_layer(dim)
- mlp_hidden_dim = int(dim * mlp_ratio)
- self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
-
- if self.shift_size > 0:
- attn_mask = self.calculate_mask(self.input_resolution)
- else:
- attn_mask = None
-
- self.register_buffer("attn_mask", attn_mask)
-
- def calculate_mask(self, x_size):
- # calculate attention mask for SW-MSA
- H, W = x_size
- img_mask = torch.zeros((1, H, W, 1)) # 1 H W 1
- h_slices = (slice(0, -self.window_size),
- slice(-self.window_size, -self.shift_size),
- slice(-self.shift_size, None))
- w_slices = (slice(0, -self.window_size),
- slice(-self.window_size, -self.shift_size),
- slice(-self.shift_size, None))
- cnt = 0
- for h in h_slices:
- for w in w_slices:
- img_mask[:, h, w, :] = cnt
- cnt += 1
-
- mask_windows = window_partition(img_mask, self.window_size) # nW, window_size, window_size, 1
- mask_windows = mask_windows.view(-1, self.window_size * self.window_size)
- attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
- attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))
-
- return attn_mask
-
- def forward(self, x, x_size):
- H, W = x_size
- B, L, C = x.shape
- #assert L == H * W, "input feature has wrong size"
-
- shortcut = x
- x = x.view(B, H, W, C)
-
- # cyclic shift
- if self.shift_size > 0:
- shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
- else:
- shifted_x = x
-
- # partition windows
- x_windows = window_partition(shifted_x, self.window_size) # nW*B, window_size, window_size, C
- x_windows = x_windows.view(-1, self.window_size * self.window_size, C) # nW*B, window_size*window_size, C
-
- # W-MSA/SW-MSA (to be compatible for testing on images whose shapes are the multiple of window size
- if self.input_resolution == x_size:
- attn_windows = self.attn(x_windows, mask=self.attn_mask) # nW*B, window_size*window_size, C
- else:
- attn_windows = self.attn(x_windows, mask=self.calculate_mask(x_size).to(x.device))
-
- # merge windows
- attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C)
- shifted_x = window_reverse(attn_windows, self.window_size, H, W) # B H' W' C
-
- # reverse cyclic shift
- if self.shift_size > 0:
- x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2))
- else:
- x = shifted_x
- x = x.view(B, H * W, C)
- x = shortcut + self.drop_path(self.norm1(x))
-
- # FFN
- x = x + self.drop_path(self.norm2(self.mlp(x)))
-
- return x
-
- def extra_repr(self) -> str:
- return f"dim={self.dim}, input_resolution={self.input_resolution}, num_heads={self.num_heads}, " \
- f"window_size={self.window_size}, shift_size={self.shift_size}, mlp_ratio={self.mlp_ratio}"
-
- def flops(self):
- flops = 0
- H, W = self.input_resolution
- # norm1
- flops += self.dim * H * W
- # W-MSA/SW-MSA
- nW = H * W / self.window_size / self.window_size
- flops += nW * self.attn.flops(self.window_size * self.window_size)
- # mlp
- flops += 2 * H * W * self.dim * self.dim * self.mlp_ratio
- # norm2
- flops += self.dim * H * W
- return flops
-
-class PatchMerging(nn.Module):
- r""" Patch Merging Layer.
- Args:
- input_resolution (tuple[int]): Resolution of input feature.
- dim (int): Number of input channels.
- norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
- """
-
- def __init__(self, input_resolution, dim, norm_layer=nn.LayerNorm):
- super().__init__()
- self.input_resolution = input_resolution
- self.dim = dim
- self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)
- self.norm = norm_layer(2 * dim)
-
- def forward(self, x):
- """
- x: B, H*W, C
- """
- H, W = self.input_resolution
- B, L, C = x.shape
- assert L == H * W, "input feature has wrong size"
- assert H % 2 == 0 and W % 2 == 0, f"x size ({H}*{W}) are not even."
-
- x = x.view(B, H, W, C)
-
- x0 = x[:, 0::2, 0::2, :] # B H/2 W/2 C
- x1 = x[:, 1::2, 0::2, :] # B H/2 W/2 C
- x2 = x[:, 0::2, 1::2, :] # B H/2 W/2 C
- x3 = x[:, 1::2, 1::2, :] # B H/2 W/2 C
- x = torch.cat([x0, x1, x2, x3], -1) # B H/2 W/2 4*C
- x = x.view(B, -1, 4 * C) # B H/2*W/2 4*C
-
- x = self.reduction(x)
- x = self.norm(x)
-
- return x
-
- def extra_repr(self) -> str:
- return f"input_resolution={self.input_resolution}, dim={self.dim}"
-
- def flops(self):
- H, W = self.input_resolution
- flops = (H // 2) * (W // 2) * 4 * self.dim * 2 * self.dim
- flops += H * W * self.dim // 2
- return flops
-
-class BasicLayer(nn.Module):
- """ A basic Swin Transformer layer for one stage.
- Args:
- dim (int): Number of input channels.
- input_resolution (tuple[int]): Input resolution.
- depth (int): Number of blocks.
- num_heads (int): Number of attention heads.
- window_size (int): Local window size.
- mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
- qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
- drop (float, optional): Dropout rate. Default: 0.0
- attn_drop (float, optional): Attention dropout rate. Default: 0.0
- drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
- norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
- downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
- use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
- pretrained_window_size (int): Local window size in pre-training.
- """
-
- def __init__(self, dim, input_resolution, depth, num_heads, window_size,
- mlp_ratio=4., qkv_bias=True, drop=0., attn_drop=0.,
- drop_path=0., norm_layer=nn.LayerNorm, downsample=None, use_checkpoint=False,
- pretrained_window_size=0):
-
- super().__init__()
- self.dim = dim
- self.input_resolution = input_resolution
- self.depth = depth
- self.use_checkpoint = use_checkpoint
-
- # build blocks
- self.blocks = nn.ModuleList([
- SwinTransformerBlock(dim=dim, input_resolution=input_resolution,
- num_heads=num_heads, window_size=window_size,
- shift_size=0 if (i % 2 == 0) else window_size // 2,
- mlp_ratio=mlp_ratio,
- qkv_bias=qkv_bias,
- drop=drop, attn_drop=attn_drop,
- drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
- norm_layer=norm_layer,
- pretrained_window_size=pretrained_window_size)
- for i in range(depth)])
-
- # patch merging layer
- if downsample is not None:
- self.downsample = downsample(input_resolution, dim=dim, norm_layer=norm_layer)
- else:
- self.downsample = None
-
- def forward(self, x, x_size):
- for blk in self.blocks:
- if self.use_checkpoint:
- x = checkpoint.checkpoint(blk, x, x_size)
- else:
- x = blk(x, x_size)
- if self.downsample is not None:
- x = self.downsample(x)
- return x
-
- def extra_repr(self) -> str:
- return f"dim={self.dim}, input_resolution={self.input_resolution}, depth={self.depth}"
-
- def flops(self):
- flops = 0
- for blk in self.blocks:
- flops += blk.flops()
- if self.downsample is not None:
- flops += self.downsample.flops()
- return flops
-
- def _init_respostnorm(self):
- for blk in self.blocks:
- nn.init.constant_(blk.norm1.bias, 0)
- nn.init.constant_(blk.norm1.weight, 0)
- nn.init.constant_(blk.norm2.bias, 0)
- nn.init.constant_(blk.norm2.weight, 0)
-
-class PatchEmbed(nn.Module):
- r""" Image to Patch Embedding
- Args:
- img_size (int): Image size. Default: 224.
- patch_size (int): Patch token size. Default: 4.
- in_chans (int): Number of input image channels. Default: 3.
- embed_dim (int): Number of linear projection output channels. Default: 96.
- norm_layer (nn.Module, optional): Normalization layer. Default: None
- """
-
- def __init__(self, img_size=224, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None):
- super().__init__()
- img_size = to_2tuple(img_size)
- patch_size = to_2tuple(patch_size)
- patches_resolution = [img_size[0] // patch_size[0], img_size[1] // patch_size[1]]
- self.img_size = img_size
- self.patch_size = patch_size
- self.patches_resolution = patches_resolution
- self.num_patches = patches_resolution[0] * patches_resolution[1]
-
- self.in_chans = in_chans
- self.embed_dim = embed_dim
-
- self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
- if norm_layer is not None:
- self.norm = norm_layer(embed_dim)
- else:
- self.norm = None
-
- def forward(self, x):
- B, C, H, W = x.shape
- # FIXME look at relaxing size constraints
- # assert H == self.img_size[0] and W == self.img_size[1],
- # f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
- x = self.proj(x).flatten(2).transpose(1, 2) # B Ph*Pw C
- if self.norm is not None:
- x = self.norm(x)
- return x
-
- def flops(self):
- Ho, Wo = self.patches_resolution
- flops = Ho * Wo * self.embed_dim * self.in_chans * (self.patch_size[0] * self.patch_size[1])
- if self.norm is not None:
- flops += Ho * Wo * self.embed_dim
- return flops
-
-class RSTB(nn.Module):
- """Residual Swin Transformer Block (RSTB).
-
- Args:
- dim (int): Number of input channels.
- input_resolution (tuple[int]): Input resolution.
- depth (int): Number of blocks.
- num_heads (int): Number of attention heads.
- window_size (int): Local window size.
- mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
- qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
- drop (float, optional): Dropout rate. Default: 0.0
- attn_drop (float, optional): Attention dropout rate. Default: 0.0
- drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
- norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
- downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
- use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
- img_size: Input image size.
- patch_size: Patch size.
- resi_connection: The convolutional block before residual connection.
- """
-
- def __init__(self, dim, input_resolution, depth, num_heads, window_size,
- mlp_ratio=4., qkv_bias=True, drop=0., attn_drop=0.,
- drop_path=0., norm_layer=nn.LayerNorm, downsample=None, use_checkpoint=False,
- img_size=224, patch_size=4, resi_connection='1conv'):
- super(RSTB, self).__init__()
-
- self.dim = dim
- self.input_resolution = input_resolution
-
- self.residual_group = BasicLayer(dim=dim,
- input_resolution=input_resolution,
- depth=depth,
- num_heads=num_heads,
- window_size=window_size,
- mlp_ratio=mlp_ratio,
- qkv_bias=qkv_bias,
- drop=drop, attn_drop=attn_drop,
- drop_path=drop_path,
- norm_layer=norm_layer,
- downsample=downsample,
- use_checkpoint=use_checkpoint)
-
- if resi_connection == '1conv':
- self.conv = nn.Conv2d(dim, dim, 3, 1, 1)
- elif resi_connection == '3conv':
- # to save parameters and memory
- self.conv = nn.Sequential(nn.Conv2d(dim, dim // 4, 3, 1, 1), nn.LeakyReLU(negative_slope=0.2, inplace=True),
- nn.Conv2d(dim // 4, dim // 4, 1, 1, 0),
- nn.LeakyReLU(negative_slope=0.2, inplace=True),
- nn.Conv2d(dim // 4, dim, 3, 1, 1))
-
- self.patch_embed = PatchEmbed(
- img_size=img_size, patch_size=patch_size, in_chans=dim, embed_dim=dim,
- norm_layer=None)
-
- self.patch_unembed = PatchUnEmbed(
- img_size=img_size, patch_size=patch_size, in_chans=dim, embed_dim=dim,
- norm_layer=None)
-
- def forward(self, x, x_size):
- return self.patch_embed(self.conv(self.patch_unembed(self.residual_group(x, x_size), x_size))) + x
-
- def flops(self):
- flops = 0
- flops += self.residual_group.flops()
- H, W = self.input_resolution
- flops += H * W * self.dim * self.dim * 9
- flops += self.patch_embed.flops()
- flops += self.patch_unembed.flops()
-
- return flops
-
-class PatchUnEmbed(nn.Module):
- r""" Image to Patch Unembedding
-
- Args:
- img_size (int): Image size. Default: 224.
- patch_size (int): Patch token size. Default: 4.
- in_chans (int): Number of input image channels. Default: 3.
- embed_dim (int): Number of linear projection output channels. Default: 96.
- norm_layer (nn.Module, optional): Normalization layer. Default: None
- """
-
- def __init__(self, img_size=224, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None):
- super().__init__()
- img_size = to_2tuple(img_size)
- patch_size = to_2tuple(patch_size)
- patches_resolution = [img_size[0] // patch_size[0], img_size[1] // patch_size[1]]
- self.img_size = img_size
- self.patch_size = patch_size
- self.patches_resolution = patches_resolution
- self.num_patches = patches_resolution[0] * patches_resolution[1]
-
- self.in_chans = in_chans
- self.embed_dim = embed_dim
-
- def forward(self, x, x_size):
- B, HW, C = x.shape
- x = x.transpose(1, 2).view(B, self.embed_dim, x_size[0], x_size[1]) # B Ph*Pw C
- return x
-
- def flops(self):
- flops = 0
- return flops
-
-
-class Upsample(nn.Sequential):
- """Upsample module.
-
- Args:
- scale (int): Scale factor. Supported scales: 2^n and 3.
- num_feat (int): Channel number of intermediate features.
- """
-
- def __init__(self, scale, num_feat):
- m = []
- if (scale & (scale - 1)) == 0: # scale = 2^n
- for _ in range(int(math.log(scale, 2))):
- m.append(nn.Conv2d(num_feat, 4 * num_feat, 3, 1, 1))
- m.append(nn.PixelShuffle(2))
- elif scale == 3:
- m.append(nn.Conv2d(num_feat, 9 * num_feat, 3, 1, 1))
- m.append(nn.PixelShuffle(3))
- else:
- raise ValueError(f'scale {scale} is not supported. ' 'Supported scales: 2^n and 3.')
- super(Upsample, self).__init__(*m)
-
-class Upsample_hf(nn.Sequential):
- """Upsample module.
-
- Args:
- scale (int): Scale factor. Supported scales: 2^n and 3.
- num_feat (int): Channel number of intermediate features.
- """
-
- def __init__(self, scale, num_feat):
- m = []
- if (scale & (scale - 1)) == 0: # scale = 2^n
- for _ in range(int(math.log(scale, 2))):
- m.append(nn.Conv2d(num_feat, 4 * num_feat, 3, 1, 1))
- m.append(nn.PixelShuffle(2))
- elif scale == 3:
- m.append(nn.Conv2d(num_feat, 9 * num_feat, 3, 1, 1))
- m.append(nn.PixelShuffle(3))
- else:
- raise ValueError(f'scale {scale} is not supported. ' 'Supported scales: 2^n and 3.')
- super(Upsample_hf, self).__init__(*m)
-
-
-class UpsampleOneStep(nn.Sequential):
- """UpsampleOneStep module (the difference with Upsample is that it always only has 1conv + 1pixelshuffle)
- Used in lightweight SR to save parameters.
-
- Args:
- scale (int): Scale factor. Supported scales: 2^n and 3.
- num_feat (int): Channel number of intermediate features.
-
- """
-
- def __init__(self, scale, num_feat, num_out_ch, input_resolution=None):
- self.num_feat = num_feat
- self.input_resolution = input_resolution
- m = []
- m.append(nn.Conv2d(num_feat, (scale ** 2) * num_out_ch, 3, 1, 1))
- m.append(nn.PixelShuffle(scale))
- super(UpsampleOneStep, self).__init__(*m)
-
- def flops(self):
- H, W = self.input_resolution
- flops = H * W * self.num_feat * 3 * 9
- return flops
-
-
-
-class Swin2SR(nn.Module):
- r""" Swin2SR
- A PyTorch impl of : `Swin2SR: SwinV2 Transformer for Compressed Image Super-Resolution and Restoration`.
-
- Args:
- img_size (int | tuple(int)): Input image size. Default 64
- patch_size (int | tuple(int)): Patch size. Default: 1
- in_chans (int): Number of input image channels. Default: 3
- embed_dim (int): Patch embedding dimension. Default: 96
- depths (tuple(int)): Depth of each Swin Transformer layer.
- num_heads (tuple(int)): Number of attention heads in different layers.
- window_size (int): Window size. Default: 7
- mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4
- qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True
- drop_rate (float): Dropout rate. Default: 0
- attn_drop_rate (float): Attention dropout rate. Default: 0
- drop_path_rate (float): Stochastic depth rate. Default: 0.1
- norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.
- ape (bool): If True, add absolute position embedding to the patch embedding. Default: False
- patch_norm (bool): If True, add normalization after patch embedding. Default: True
- use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False
- upscale: Upscale factor. 2/3/4/8 for image SR, 1 for denoising and compress artifact reduction
- img_range: Image range. 1. or 255.
- upsampler: The reconstruction reconstruction module. 'pixelshuffle'/'pixelshuffledirect'/'nearest+conv'/None
- resi_connection: The convolutional block before residual connection. '1conv'/'3conv'
- """
-
- def __init__(self, img_size=64, patch_size=1, in_chans=3,
- embed_dim=96, depths=(6, 6, 6, 6), num_heads=(6, 6, 6, 6),
- window_size=7, mlp_ratio=4., qkv_bias=True,
- drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1,
- norm_layer=nn.LayerNorm, ape=False, patch_norm=True,
- use_checkpoint=False, upscale=2, img_range=1., upsampler='', resi_connection='1conv',
- **kwargs):
- super(Swin2SR, self).__init__()
- num_in_ch = in_chans
- num_out_ch = in_chans
- num_feat = 64
- self.img_range = img_range
- if in_chans == 3:
- rgb_mean = (0.4488, 0.4371, 0.4040)
- self.mean = torch.Tensor(rgb_mean).view(1, 3, 1, 1)
- else:
- self.mean = torch.zeros(1, 1, 1, 1)
- self.upscale = upscale
- self.upsampler = upsampler
- self.window_size = window_size
-
- #####################################################################################################
- ################################### 1, shallow feature extraction ###################################
- self.conv_first = nn.Conv2d(num_in_ch, embed_dim, 3, 1, 1)
-
- #####################################################################################################
- ################################### 2, deep feature extraction ######################################
- self.num_layers = len(depths)
- self.embed_dim = embed_dim
- self.ape = ape
- self.patch_norm = patch_norm
- self.num_features = embed_dim
- self.mlp_ratio = mlp_ratio
-
- # split image into non-overlapping patches
- self.patch_embed = PatchEmbed(
- img_size=img_size, patch_size=patch_size, in_chans=embed_dim, embed_dim=embed_dim,
- norm_layer=norm_layer if self.patch_norm else None)
- num_patches = self.patch_embed.num_patches
- patches_resolution = self.patch_embed.patches_resolution
- self.patches_resolution = patches_resolution
-
- # merge non-overlapping patches into image
- self.patch_unembed = PatchUnEmbed(
- img_size=img_size, patch_size=patch_size, in_chans=embed_dim, embed_dim=embed_dim,
- norm_layer=norm_layer if self.patch_norm else None)
-
- # absolute position embedding
- if self.ape:
- self.absolute_pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim))
- trunc_normal_(self.absolute_pos_embed, std=.02)
-
- self.pos_drop = nn.Dropout(p=drop_rate)
-
- # stochastic depth
- dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule
-
- # build Residual Swin Transformer blocks (RSTB)
- self.layers = nn.ModuleList()
- for i_layer in range(self.num_layers):
- layer = RSTB(dim=embed_dim,
- input_resolution=(patches_resolution[0],
- patches_resolution[1]),
- depth=depths[i_layer],
- num_heads=num_heads[i_layer],
- window_size=window_size,
- mlp_ratio=self.mlp_ratio,
- qkv_bias=qkv_bias,
- drop=drop_rate, attn_drop=attn_drop_rate,
- drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])], # no impact on SR results
- norm_layer=norm_layer,
- downsample=None,
- use_checkpoint=use_checkpoint,
- img_size=img_size,
- patch_size=patch_size,
- resi_connection=resi_connection
-
- )
- self.layers.append(layer)
-
- if self.upsampler == 'pixelshuffle_hf':
- self.layers_hf = nn.ModuleList()
- for i_layer in range(self.num_layers):
- layer = RSTB(dim=embed_dim,
- input_resolution=(patches_resolution[0],
- patches_resolution[1]),
- depth=depths[i_layer],
- num_heads=num_heads[i_layer],
- window_size=window_size,
- mlp_ratio=self.mlp_ratio,
- qkv_bias=qkv_bias,
- drop=drop_rate, attn_drop=attn_drop_rate,
- drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])], # no impact on SR results
- norm_layer=norm_layer,
- downsample=None,
- use_checkpoint=use_checkpoint,
- img_size=img_size,
- patch_size=patch_size,
- resi_connection=resi_connection
-
- )
- self.layers_hf.append(layer)
-
- self.norm = norm_layer(self.num_features)
-
- # build the last conv layer in deep feature extraction
- if resi_connection == '1conv':
- self.conv_after_body = nn.Conv2d(embed_dim, embed_dim, 3, 1, 1)
- elif resi_connection == '3conv':
- # to save parameters and memory
- self.conv_after_body = nn.Sequential(nn.Conv2d(embed_dim, embed_dim // 4, 3, 1, 1),
- nn.LeakyReLU(negative_slope=0.2, inplace=True),
- nn.Conv2d(embed_dim // 4, embed_dim // 4, 1, 1, 0),
- nn.LeakyReLU(negative_slope=0.2, inplace=True),
- nn.Conv2d(embed_dim // 4, embed_dim, 3, 1, 1))
-
- #####################################################################################################
- ################################ 3, high quality image reconstruction ################################
- if self.upsampler == 'pixelshuffle':
- # for classical SR
- self.conv_before_upsample = nn.Sequential(nn.Conv2d(embed_dim, num_feat, 3, 1, 1),
- nn.LeakyReLU(inplace=True))
- self.upsample = Upsample(upscale, num_feat)
- self.conv_last = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1)
- elif self.upsampler == 'pixelshuffle_aux':
- self.conv_bicubic = nn.Conv2d(num_in_ch, num_feat, 3, 1, 1)
- self.conv_before_upsample = nn.Sequential(
- nn.Conv2d(embed_dim, num_feat, 3, 1, 1),
- nn.LeakyReLU(inplace=True))
- self.conv_aux = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1)
- self.conv_after_aux = nn.Sequential(
- nn.Conv2d(3, num_feat, 3, 1, 1),
- nn.LeakyReLU(inplace=True))
- self.upsample = Upsample(upscale, num_feat)
- self.conv_last = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1)
-
- elif self.upsampler == 'pixelshuffle_hf':
- self.conv_before_upsample = nn.Sequential(nn.Conv2d(embed_dim, num_feat, 3, 1, 1),
- nn.LeakyReLU(inplace=True))
- self.upsample = Upsample(upscale, num_feat)
- self.upsample_hf = Upsample_hf(upscale, num_feat)
- self.conv_last = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1)
- self.conv_first_hf = nn.Sequential(nn.Conv2d(num_feat, embed_dim, 3, 1, 1),
- nn.LeakyReLU(inplace=True))
- self.conv_after_body_hf = nn.Conv2d(embed_dim, embed_dim, 3, 1, 1)
- self.conv_before_upsample_hf = nn.Sequential(
- nn.Conv2d(embed_dim, num_feat, 3, 1, 1),
- nn.LeakyReLU(inplace=True))
- self.conv_last_hf = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1)
-
- elif self.upsampler == 'pixelshuffledirect':
- # for lightweight SR (to save parameters)
- self.upsample = UpsampleOneStep(upscale, embed_dim, num_out_ch,
- (patches_resolution[0], patches_resolution[1]))
- elif self.upsampler == 'nearest+conv':
- # for real-world SR (less artifacts)
- assert self.upscale == 4, 'only support x4 now.'
- self.conv_before_upsample = nn.Sequential(nn.Conv2d(embed_dim, num_feat, 3, 1, 1),
- nn.LeakyReLU(inplace=True))
- self.conv_up1 = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
- self.conv_up2 = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
- self.conv_hr = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
- self.conv_last = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1)
- self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
- else:
- # for image denoising and JPEG compression artifact reduction
- self.conv_last = nn.Conv2d(embed_dim, num_out_ch, 3, 1, 1)
-
- self.apply(self._init_weights)
-
- def _init_weights(self, m):
- if isinstance(m, nn.Linear):
- trunc_normal_(m.weight, std=.02)
- if isinstance(m, nn.Linear) and m.bias is not None:
- nn.init.constant_(m.bias, 0)
- elif isinstance(m, nn.LayerNorm):
- nn.init.constant_(m.bias, 0)
- nn.init.constant_(m.weight, 1.0)
-
- @torch.jit.ignore
- def no_weight_decay(self):
- return {'absolute_pos_embed'}
-
- @torch.jit.ignore
- def no_weight_decay_keywords(self):
- return {'relative_position_bias_table'}
-
- def check_image_size(self, x):
- _, _, h, w = x.size()
- mod_pad_h = (self.window_size - h % self.window_size) % self.window_size
- mod_pad_w = (self.window_size - w % self.window_size) % self.window_size
- x = F.pad(x, (0, mod_pad_w, 0, mod_pad_h), 'reflect')
- return x
-
- def forward_features(self, x):
- x_size = (x.shape[2], x.shape[3])
- x = self.patch_embed(x)
- if self.ape:
- x = x + self.absolute_pos_embed
- x = self.pos_drop(x)
-
- for layer in self.layers:
- x = layer(x, x_size)
-
- x = self.norm(x) # B L C
- x = self.patch_unembed(x, x_size)
-
- return x
-
- def forward_features_hf(self, x):
- x_size = (x.shape[2], x.shape[3])
- x = self.patch_embed(x)
- if self.ape:
- x = x + self.absolute_pos_embed
- x = self.pos_drop(x)
-
- for layer in self.layers_hf:
- x = layer(x, x_size)
-
- x = self.norm(x) # B L C
- x = self.patch_unembed(x, x_size)
-
- return x
-
- def forward(self, x):
- H, W = x.shape[2:]
- x = self.check_image_size(x)
-
- self.mean = self.mean.type_as(x)
- x = (x - self.mean) * self.img_range
-
- if self.upsampler == 'pixelshuffle':
- # for classical SR
- x = self.conv_first(x)
- x = self.conv_after_body(self.forward_features(x)) + x
- x = self.conv_before_upsample(x)
- x = self.conv_last(self.upsample(x))
- elif self.upsampler == 'pixelshuffle_aux':
- bicubic = F.interpolate(x, size=(H * self.upscale, W * self.upscale), mode='bicubic', align_corners=False)
- bicubic = self.conv_bicubic(bicubic)
- x = self.conv_first(x)
- x = self.conv_after_body(self.forward_features(x)) + x
- x = self.conv_before_upsample(x)
- aux = self.conv_aux(x) # b, 3, LR_H, LR_W
- x = self.conv_after_aux(aux)
- x = self.upsample(x)[:, :, :H * self.upscale, :W * self.upscale] + bicubic[:, :, :H * self.upscale, :W * self.upscale]
- x = self.conv_last(x)
- aux = aux / self.img_range + self.mean
- elif self.upsampler == 'pixelshuffle_hf':
- # for classical SR with HF
- x = self.conv_first(x)
- x = self.conv_after_body(self.forward_features(x)) + x
- x_before = self.conv_before_upsample(x)
- x_out = self.conv_last(self.upsample(x_before))
-
- x_hf = self.conv_first_hf(x_before)
- x_hf = self.conv_after_body_hf(self.forward_features_hf(x_hf)) + x_hf
- x_hf = self.conv_before_upsample_hf(x_hf)
- x_hf = self.conv_last_hf(self.upsample_hf(x_hf))
- x = x_out + x_hf
- x_hf = x_hf / self.img_range + self.mean
-
- elif self.upsampler == 'pixelshuffledirect':
- # for lightweight SR
- x = self.conv_first(x)
- x = self.conv_after_body(self.forward_features(x)) + x
- x = self.upsample(x)
- elif self.upsampler == 'nearest+conv':
- # for real-world SR
- x = self.conv_first(x)
- x = self.conv_after_body(self.forward_features(x)) + x
- x = self.conv_before_upsample(x)
- x = self.lrelu(self.conv_up1(torch.nn.functional.interpolate(x, scale_factor=2, mode='nearest')))
- x = self.lrelu(self.conv_up2(torch.nn.functional.interpolate(x, scale_factor=2, mode='nearest')))
- x = self.conv_last(self.lrelu(self.conv_hr(x)))
- else:
- # for image denoising and JPEG compression artifact reduction
- x_first = self.conv_first(x)
- res = self.conv_after_body(self.forward_features(x_first)) + x_first
- x = x + self.conv_last(res)
-
- x = x / self.img_range + self.mean
- if self.upsampler == "pixelshuffle_aux":
- return x[:, :, :H*self.upscale, :W*self.upscale], aux
-
- elif self.upsampler == "pixelshuffle_hf":
- x_out = x_out / self.img_range + self.mean
- return x_out[:, :, :H*self.upscale, :W*self.upscale], x[:, :, :H*self.upscale, :W*self.upscale], x_hf[:, :, :H*self.upscale, :W*self.upscale]
-
- else:
- return x[:, :, :H*self.upscale, :W*self.upscale]
-
- def flops(self):
- flops = 0
- H, W = self.patches_resolution
- flops += H * W * 3 * self.embed_dim * 9
- flops += self.patch_embed.flops()
- for layer in self.layers:
- flops += layer.flops()
- flops += H * W * 3 * self.embed_dim * self.embed_dim
- flops += self.upsample.flops()
- return flops
-
-
-if __name__ == '__main__':
- upscale = 4
- window_size = 8
- height = (1024 // upscale // window_size + 1) * window_size
- width = (720 // upscale // window_size + 1) * window_size
- model = Swin2SR(upscale=2, img_size=(height, width),
- window_size=window_size, img_range=1., depths=[6, 6, 6, 6],
- embed_dim=60, num_heads=[6, 6, 6, 6], mlp_ratio=2, upsampler='pixelshuffledirect')
- print(model)
- print(height, width, model.flops() / 1e9)
-
- x = torch.randn((1, 3, height, width))
- x = model(x)
- print(x.shape)
diff --git a/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js b/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js
index 30199dcd..45c7600a 100644
--- a/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js
+++ b/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js
@@ -12,8 +12,22 @@ onUiLoaded(async() => {
"Sketch": elementIDs.sketch
};
+
// Helper functions
// Get active tab
+
+ /**
+ * Waits for an element to be present in the DOM.
+ */
+ const waitForElement = (id) => new Promise(resolve => {
+ const checkForElement = () => {
+ const element = document.querySelector(id);
+ if (element) return resolve(element);
+ setTimeout(checkForElement, 100);
+ };
+ checkForElement();
+ });
+
function getActiveTab(elements, all = false) {
const tabs = elements.img2imgTabs.querySelectorAll("button");
@@ -34,7 +48,7 @@ onUiLoaded(async() => {
// Wait until opts loaded
async function waitForOpts() {
- for (;;) {
+ for (; ;) {
if (window.opts && Object.keys(window.opts).length) {
return window.opts;
}
@@ -42,6 +56,11 @@ onUiLoaded(async() => {
}
}
+ // Detect whether the element has a horizontal scroll bar
+ function hasHorizontalScrollbar(element) {
+ return element.scrollWidth > element.clientWidth;
+ }
+
// Function for defining the "Ctrl", "Shift" and "Alt" keys
function isModifierKey(event, key) {
switch (key) {
@@ -201,7 +220,8 @@ onUiLoaded(async() => {
canvas_hotkey_overlap: "KeyO",
canvas_disabled_functions: [],
canvas_show_tooltip: true,
- canvas_blur_prompt: false
+ canvas_auto_expand: true,
+ canvas_blur_prompt: false,
};
const functionMap = {
@@ -249,7 +269,7 @@ onUiLoaded(async() => {
input?.addEventListener("input", () => restoreImgRedMask(elements));
}
- function applyZoomAndPan(elemId) {
+ function applyZoomAndPan(elemId, isExtension = true) {
const targetElement = gradioApp().querySelector(elemId);
if (!targetElement) {
@@ -361,6 +381,12 @@ onUiLoaded(async() => {
panY: 0
};
+ if (isExtension) {
+ targetElement.style.overflow = "hidden";
+ }
+
+ targetElement.isZoomed = false;
+
fixCanvas();
targetElement.style.transform = `scale(${elemData[elemId].zoomLevel}) translate(${elemData[elemId].panX}px, ${elemData[elemId].panY}px)`;
@@ -371,8 +397,27 @@ onUiLoaded(async() => {
toggleOverlap("off");
fullScreenMode = false;
+ const closeBtn = targetElement.querySelector("button[aria-label='Remove Image']");
+ if (closeBtn) {
+ closeBtn.addEventListener("click", resetZoom);
+ }
+
+ if (canvas && isExtension) {
+ const parentElement = targetElement.closest('[id^="component-"]');
+ if (
+ canvas &&
+ parseFloat(canvas.style.width) > parentElement.offsetWidth &&
+ parseFloat(targetElement.style.width) > parentElement.offsetWidth
+ ) {
+ fitToElement();
+ return;
+ }
+
+ }
+
if (
canvas &&
+ !isExtension &&
parseFloat(canvas.style.width) > 865 &&
parseFloat(targetElement.style.width) > 865
) {
@@ -381,9 +426,6 @@ onUiLoaded(async() => {
}
targetElement.style.width = "";
- if (canvas) {
- targetElement.style.height = canvas.style.height;
- }
}
// Toggle the zIndex of the target element between two values, allowing it to overlap or be overlapped by other elements
@@ -439,7 +481,7 @@ onUiLoaded(async() => {
// Update the zoom level and pan position of the target element based on the values of the zoomLevel, panX and panY variables
function updateZoom(newZoomLevel, mouseX, mouseY) {
- newZoomLevel = Math.max(0.5, Math.min(newZoomLevel, 15));
+ newZoomLevel = Math.max(0.1, Math.min(newZoomLevel, 15));
elemData[elemId].panX +=
mouseX - (mouseX * newZoomLevel) / elemData[elemId].zoomLevel;
@@ -450,6 +492,10 @@ onUiLoaded(async() => {
targetElement.style.transform = `translate(${elemData[elemId].panX}px, ${elemData[elemId].panY}px) scale(${newZoomLevel})`;
toggleOverlap("on");
+ if (isExtension) {
+ targetElement.style.overflow = "visible";
+ }
+
return newZoomLevel;
}
@@ -472,10 +518,12 @@ onUiLoaded(async() => {
fullScreenMode = false;
elemData[elemId].zoomLevel = updateZoom(
elemData[elemId].zoomLevel +
- (operation === "+" ? delta : -delta),
+ (operation === "+" ? delta : -delta),
zoomPosX - targetElement.getBoundingClientRect().left,
zoomPosY - targetElement.getBoundingClientRect().top
);
+
+ targetElement.isZoomed = true;
}
}
@@ -489,10 +537,19 @@ onUiLoaded(async() => {
//Reset Zoom
targetElement.style.transform = `translate(${0}px, ${0}px) scale(${1})`;
+ let parentElement;
+
+ if (isExtension) {
+ parentElement = targetElement.closest('[id^="component-"]');
+ } else {
+ parentElement = targetElement.parentElement;
+ }
+
+
// Get element and screen dimensions
const elementWidth = targetElement.offsetWidth;
const elementHeight = targetElement.offsetHeight;
- const parentElement = targetElement.parentElement;
+
const screenWidth = parentElement.clientWidth;
const screenHeight = parentElement.clientHeight;
@@ -545,8 +602,12 @@ onUiLoaded(async() => {
if (!canvas) return;
- if (canvas.offsetWidth > 862) {
- targetElement.style.width = canvas.offsetWidth + "px";
+ if (canvas.offsetWidth > 862 || isExtension) {
+ targetElement.style.width = (canvas.offsetWidth + 2) + "px";
+ }
+
+ if (isExtension) {
+ targetElement.style.overflow = "visible";
}
if (fullScreenMode) {
@@ -648,8 +709,48 @@ onUiLoaded(async() => {
mouseY = e.offsetY;
}
+ // Simulation of the function to put a long image into the screen.
+ // We detect if an image has a scroll bar or not, make a fullscreen to reveal the image, then reduce it to fit into the element.
+ // We hide the image and show it to the user when it is ready.
+
+ targetElement.isExpanded = false;
+ function autoExpand() {
+ const canvas = document.querySelector(`${elemId} canvas[key="interface"]`);
+ if (canvas) {
+ if (hasHorizontalScrollbar(targetElement) && targetElement.isExpanded === false) {
+ targetElement.style.visibility = "hidden";
+ setTimeout(() => {
+ fitToScreen();
+ resetZoom();
+ targetElement.style.visibility = "visible";
+ targetElement.isExpanded = true;
+ }, 10);
+ }
+ }
+ }
+
targetElement.addEventListener("mousemove", getMousePosition);
+ //observers
+ // Creating an observer with a callback function to handle DOM changes
+ const observer = new MutationObserver((mutationsList, observer) => {
+ for (let mutation of mutationsList) {
+ // If the style attribute of the canvas has changed, by observation it happens only when the picture changes
+ if (mutation.type === 'attributes' && mutation.attributeName === 'style' &&
+ mutation.target.tagName.toLowerCase() === 'canvas') {
+ targetElement.isExpanded = false;
+ setTimeout(resetZoom, 10);
+ }
+ }
+ });
+
+ // Apply auto expand if enabled
+ if (hotkeysConfig.canvas_auto_expand) {
+ targetElement.addEventListener("mousemove", autoExpand);
+ // Set up an observer to track attribute changes
+ observer.observe(targetElement, {attributes: true, childList: true, subtree: true});
+ }
+
// Handle events only inside the targetElement
let isKeyDownHandlerAttached = false;
@@ -754,6 +855,11 @@ onUiLoaded(async() => {
if (isMoving && elemId === activeElement) {
updatePanPosition(e.movementX, e.movementY);
targetElement.style.pointerEvents = "none";
+
+ if (isExtension) {
+ targetElement.style.overflow = "visible";
+ }
+
} else {
targetElement.style.pointerEvents = "auto";
}
@@ -764,13 +870,93 @@ onUiLoaded(async() => {
isMoving = false;
};
+ // Checks for extension
+ function checkForOutBox() {
+ const parentElement = targetElement.closest('[id^="component-"]');
+ if (parentElement.offsetWidth < targetElement.offsetWidth && !targetElement.isExpanded) {
+ resetZoom();
+ targetElement.isExpanded = true;
+ }
+
+ if (parentElement.offsetWidth < targetElement.offsetWidth && elemData[elemId].zoomLevel == 1) {
+ resetZoom();
+ }
+
+ if (parentElement.offsetWidth < targetElement.offsetWidth && targetElement.offsetWidth * elemData[elemId].zoomLevel > parentElement.offsetWidth && elemData[elemId].zoomLevel < 1 && !targetElement.isZoomed) {
+ resetZoom();
+ }
+ }
+
+ if (isExtension) {
+ targetElement.addEventListener("mousemove", checkForOutBox);
+ }
+
+
+ window.addEventListener('resize', (e) => {
+ resetZoom();
+
+ if (isExtension) {
+ targetElement.isExpanded = false;
+ targetElement.isZoomed = false;
+ }
+ });
+
gradioApp().addEventListener("mousemove", handleMoveByKey);
+
+
}
- applyZoomAndPan(elementIDs.sketch);
- applyZoomAndPan(elementIDs.inpaint);
- applyZoomAndPan(elementIDs.inpaintSketch);
+ applyZoomAndPan(elementIDs.sketch, false);
+ applyZoomAndPan(elementIDs.inpaint, false);
+ applyZoomAndPan(elementIDs.inpaintSketch, false);
// Make the function global so that other extensions can take advantage of this solution
- window.applyZoomAndPan = applyZoomAndPan;
+ const applyZoomAndPanIntegration = async(id, elementIDs) => {
+ const mainEl = document.querySelector(id);
+ if (id.toLocaleLowerCase() === "none") {
+ for (const elementID of elementIDs) {
+ const el = await waitForElement(elementID);
+ if (!el) break;
+ applyZoomAndPan(elementID);
+ }
+ return;
+ }
+
+ if (!mainEl) return;
+ mainEl.addEventListener("click", async() => {
+ for (const elementID of elementIDs) {
+ const el = await waitForElement(elementID);
+ if (!el) break;
+ applyZoomAndPan(elementID);
+ }
+ }, {once: true});
+ };
+
+ window.applyZoomAndPan = applyZoomAndPan; // Only 1 elements, argument elementID, for example applyZoomAndPan("#txt2img_controlnet_ControlNet_input_image")
+
+ window.applyZoomAndPanIntegration = applyZoomAndPanIntegration; // for any extension
+
+ /*
+ The function `applyZoomAndPanIntegration` takes two arguments:
+
+ 1. `id`: A string identifier for the element to which zoom and pan functionality will be applied on click.
+ If the `id` value is "none", the functionality will be applied to all elements specified in the second argument without a click event.
+
+ 2. `elementIDs`: An array of string identifiers for elements. Zoom and pan functionality will be applied to each of these elements on click of the element specified by the first argument.
+ If "none" is specified in the first argument, the functionality will be applied to each of these elements without a click event.
+
+ Example usage:
+ applyZoomAndPanIntegration("#txt2img_controlnet", ["#txt2img_controlnet_ControlNet_input_image"]);
+ In this example, zoom and pan functionality will be applied to the element with the identifier "txt2img_controlnet_ControlNet_input_image" upon clicking the element with the identifier "txt2img_controlnet".
+ */
+
+ // More examples
+ // Add integration with ControlNet txt2img One TAB
+ // applyZoomAndPanIntegration("#txt2img_controlnet", ["#txt2img_controlnet_ControlNet_input_image"]);
+
+ // Add integration with ControlNet txt2img Tabs
+ // applyZoomAndPanIntegration("#txt2img_controlnet",Array.from({ length: 10 }, (_, i) => `#txt2img_controlnet_ControlNet-${i}_input_image`));
+
+ // Add integration with Inpaint Anything
+ // applyZoomAndPanIntegration("None", ["#ia_sam_image", "#ia_sel_mask"]);
});
diff --git a/extensions-builtin/canvas-zoom-and-pan/scripts/hotkey_config.py b/extensions-builtin/canvas-zoom-and-pan/scripts/hotkey_config.py
index 380176ce..2d8d2d1c 100644
--- a/extensions-builtin/canvas-zoom-and-pan/scripts/hotkey_config.py
+++ b/extensions-builtin/canvas-zoom-and-pan/scripts/hotkey_config.py
@@ -9,6 +9,7 @@ shared.options_templates.update(shared.options_section(('canvas_hotkey', "Canvas
"canvas_hotkey_reset": shared.OptionInfo("R", "Reset zoom and canvas positon"),
"canvas_hotkey_overlap": shared.OptionInfo("O", "Toggle overlap").info("Technical button, neededs for testing"),
"canvas_show_tooltip": shared.OptionInfo(True, "Enable tooltip on the canvas"),
+ "canvas_auto_expand": shared.OptionInfo(True, "Automatically expands an image that does not fit completely in the canvas area, similar to manually pressing the S and R buttons"),
"canvas_blur_prompt": shared.OptionInfo(False, "Take the focus off the prompt when working with a canvas"),
"canvas_disabled_functions": shared.OptionInfo(["Overlap"], "Disable function that you don't use", gr.CheckboxGroup, {"choices": ["Zoom","Adjust brush size", "Moving canvas","Fullscreen","Reset Zoom","Overlap"]}),
}))
diff --git a/extensions-builtin/canvas-zoom-and-pan/style.css b/extensions-builtin/canvas-zoom-and-pan/style.css
index 6bcc9570..5d8054e6 100644
--- a/extensions-builtin/canvas-zoom-and-pan/style.css
+++ b/extensions-builtin/canvas-zoom-and-pan/style.css
@@ -61,3 +61,6 @@
to {opacity: 1;}
}
+.styler {
+ overflow:inherit !important;
+} \ No newline at end of file
diff --git a/extensions-builtin/extra-options-section/scripts/extra_options_section.py b/extensions-builtin/extra-options-section/scripts/extra_options_section.py
index a05e10d8..8aa901fd 100644
--- a/extensions-builtin/extra-options-section/scripts/extra_options_section.py
+++ b/extensions-builtin/extra-options-section/scripts/extra_options_section.py
@@ -1,5 +1,7 @@
+import math
+
import gradio as gr
-from modules import scripts, shared, ui_components, ui_settings
+from modules import scripts, shared, ui_components, ui_settings, infotext
from modules.ui_components import FormColumn
@@ -19,18 +21,39 @@ class ExtraOptionsSection(scripts.Script):
def ui(self, is_img2img):
self.comps = []
self.setting_names = []
+ self.infotext_fields = []
+ extra_options = shared.opts.extra_options_img2img if is_img2img else shared.opts.extra_options_txt2img
+ elem_id_tabname = "extra_options_" + ("img2img" if is_img2img else "txt2img")
+
+ mapping = {k: v for v, k in infotext.infotext_to_setting_name_mapping}
with gr.Blocks() as interface:
- with gr.Accordion("Options", open=False) if shared.opts.extra_options_accordion and shared.opts.extra_options else gr.Group(), gr.Row():
- for setting_name in shared.opts.extra_options:
- with FormColumn():
- comp = ui_settings.create_setting_component(setting_name)
+ with gr.Accordion("Options", open=False, elem_id=elem_id_tabname) if shared.opts.extra_options_accordion and extra_options else gr.Group(elem_id=elem_id_tabname):
+
+ row_count = math.ceil(len(extra_options) / shared.opts.extra_options_cols)
+
+ for row in range(row_count):
+ with gr.Row():
+ for col in range(shared.opts.extra_options_cols):
+ index = row * shared.opts.extra_options_cols + col
+ if index >= len(extra_options):
+ break
+
+ setting_name = extra_options[index]
- self.comps.append(comp)
- self.setting_names.append(setting_name)
+ with FormColumn():
+ comp = ui_settings.create_setting_component(setting_name)
+
+ self.comps.append(comp)
+ self.setting_names.append(setting_name)
+
+ setting_infotext_name = mapping.get(setting_name)
+ if setting_infotext_name is not None:
+ self.infotext_fields.append((comp, setting_infotext_name))
def get_settings_values():
- return [ui_settings.get_value_for_setting(key) for key in self.setting_names]
+ res = [ui_settings.get_value_for_setting(key) for key in self.setting_names]
+ return res[0] if len(res) == 1 else res
interface.load(fn=get_settings_values, inputs=[], outputs=self.comps, queue=False, show_progress=False)
@@ -42,7 +65,14 @@ class ExtraOptionsSection(scripts.Script):
p.override_settings[name] = value
-shared.options_templates.update(shared.options_section(('ui', "User interface"), {
- "extra_options": shared.OptionInfo([], "Options in main UI", ui_components.DropdownMulti, lambda: {"choices": list(shared.opts.data_labels.keys())}).js("info", "settingsHintsShowQuicksettings").info("setting entries that also appear in txt2img/img2img interfaces").needs_restart(),
- "extra_options_accordion": shared.OptionInfo(False, "Place options in main UI into an accordion")
+shared.options_templates.update(shared.options_section(('settings_in_ui', "Settings in UI", "ui"), {
+ "settings_in_ui": shared.OptionHTML("""
+This page allows you to add some settings to the main interface of txt2img and img2img tabs.
+"""),
+ "extra_options_txt2img": shared.OptionInfo([], "Settings for txt2img", ui_components.DropdownMulti, lambda: {"choices": list(shared.opts.data_labels.keys())}).js("info", "settingsHintsShowQuicksettings").info("setting entries that also appear in txt2img interfaces").needs_reload_ui(),
+ "extra_options_img2img": shared.OptionInfo([], "Settings for img2img", ui_components.DropdownMulti, lambda: {"choices": list(shared.opts.data_labels.keys())}).js("info", "settingsHintsShowQuicksettings").info("setting entries that also appear in img2img interfaces").needs_reload_ui(),
+ "extra_options_cols": shared.OptionInfo(1, "Number of columns for added settings", gr.Slider, {"step": 1, "minimum": 1, "maximum": 20}).info("displayed amount will depend on the actual browser window width").needs_reload_ui(),
+ "extra_options_accordion": shared.OptionInfo(False, "Place added settings into an accordion").needs_reload_ui()
}))
+
+
diff --git a/extensions-builtin/hypertile/hypertile.py b/extensions-builtin/hypertile/hypertile.py
new file mode 100644
index 00000000..0f40e2d3
--- /dev/null
+++ b/extensions-builtin/hypertile/hypertile.py
@@ -0,0 +1,351 @@
+"""
+Hypertile module for splitting attention layers in SD-1.5 U-Net and SD-1.5 VAE
+Warn: The patch works well only if the input image has a width and height that are multiples of 128
+Original author: @tfernd Github: https://github.com/tfernd/HyperTile
+"""
+
+from __future__ import annotations
+
+from dataclasses import dataclass
+from typing import Callable
+
+from functools import wraps, cache
+
+import math
+import torch.nn as nn
+import random
+
+from einops import rearrange
+
+
+@dataclass
+class HypertileParams:
+ depth = 0
+ layer_name = ""
+ tile_size: int = 0
+ swap_size: int = 0
+ aspect_ratio: float = 1.0
+ forward = None
+ enabled = False
+
+
+
+# TODO add SD-XL layers
+DEPTH_LAYERS = {
+ 0: [
+ # SD 1.5 U-Net (diffusers)
+ "down_blocks.0.attentions.0.transformer_blocks.0.attn1",
+ "down_blocks.0.attentions.1.transformer_blocks.0.attn1",
+ "up_blocks.3.attentions.0.transformer_blocks.0.attn1",
+ "up_blocks.3.attentions.1.transformer_blocks.0.attn1",
+ "up_blocks.3.attentions.2.transformer_blocks.0.attn1",
+ # SD 1.5 U-Net (ldm)
+ "input_blocks.1.1.transformer_blocks.0.attn1",
+ "input_blocks.2.1.transformer_blocks.0.attn1",
+ "output_blocks.9.1.transformer_blocks.0.attn1",
+ "output_blocks.10.1.transformer_blocks.0.attn1",
+ "output_blocks.11.1.transformer_blocks.0.attn1",
+ # SD 1.5 VAE
+ "decoder.mid_block.attentions.0",
+ "decoder.mid.attn_1",
+ ],
+ 1: [
+ # SD 1.5 U-Net (diffusers)
+ "down_blocks.1.attentions.0.transformer_blocks.0.attn1",
+ "down_blocks.1.attentions.1.transformer_blocks.0.attn1",
+ "up_blocks.2.attentions.0.transformer_blocks.0.attn1",
+ "up_blocks.2.attentions.1.transformer_blocks.0.attn1",
+ "up_blocks.2.attentions.2.transformer_blocks.0.attn1",
+ # SD 1.5 U-Net (ldm)
+ "input_blocks.4.1.transformer_blocks.0.attn1",
+ "input_blocks.5.1.transformer_blocks.0.attn1",
+ "output_blocks.6.1.transformer_blocks.0.attn1",
+ "output_blocks.7.1.transformer_blocks.0.attn1",
+ "output_blocks.8.1.transformer_blocks.0.attn1",
+ ],
+ 2: [
+ # SD 1.5 U-Net (diffusers)
+ "down_blocks.2.attentions.0.transformer_blocks.0.attn1",
+ "down_blocks.2.attentions.1.transformer_blocks.0.attn1",
+ "up_blocks.1.attentions.0.transformer_blocks.0.attn1",
+ "up_blocks.1.attentions.1.transformer_blocks.0.attn1",
+ "up_blocks.1.attentions.2.transformer_blocks.0.attn1",
+ # SD 1.5 U-Net (ldm)
+ "input_blocks.7.1.transformer_blocks.0.attn1",
+ "input_blocks.8.1.transformer_blocks.0.attn1",
+ "output_blocks.3.1.transformer_blocks.0.attn1",
+ "output_blocks.4.1.transformer_blocks.0.attn1",
+ "output_blocks.5.1.transformer_blocks.0.attn1",
+ ],
+ 3: [
+ # SD 1.5 U-Net (diffusers)
+ "mid_block.attentions.0.transformer_blocks.0.attn1",
+ # SD 1.5 U-Net (ldm)
+ "middle_block.1.transformer_blocks.0.attn1",
+ ],
+}
+# XL layers, thanks for GitHub@gel-crabs for the help
+DEPTH_LAYERS_XL = {
+ 0: [
+ # SD 1.5 U-Net (diffusers)
+ "down_blocks.0.attentions.0.transformer_blocks.0.attn1",
+ "down_blocks.0.attentions.1.transformer_blocks.0.attn1",
+ "up_blocks.3.attentions.0.transformer_blocks.0.attn1",
+ "up_blocks.3.attentions.1.transformer_blocks.0.attn1",
+ "up_blocks.3.attentions.2.transformer_blocks.0.attn1",
+ # SD 1.5 U-Net (ldm)
+ "input_blocks.4.1.transformer_blocks.0.attn1",
+ "input_blocks.5.1.transformer_blocks.0.attn1",
+ "output_blocks.3.1.transformer_blocks.0.attn1",
+ "output_blocks.4.1.transformer_blocks.0.attn1",
+ "output_blocks.5.1.transformer_blocks.0.attn1",
+ # SD 1.5 VAE
+ "decoder.mid_block.attentions.0",
+ "decoder.mid.attn_1",
+ ],
+ 1: [
+ # SD 1.5 U-Net (diffusers)
+ #"down_blocks.1.attentions.0.transformer_blocks.0.attn1",
+ #"down_blocks.1.attentions.1.transformer_blocks.0.attn1",
+ #"up_blocks.2.attentions.0.transformer_blocks.0.attn1",
+ #"up_blocks.2.attentions.1.transformer_blocks.0.attn1",
+ #"up_blocks.2.attentions.2.transformer_blocks.0.attn1",
+ # SD 1.5 U-Net (ldm)
+ "input_blocks.4.1.transformer_blocks.1.attn1",
+ "input_blocks.5.1.transformer_blocks.1.attn1",
+ "output_blocks.3.1.transformer_blocks.1.attn1",
+ "output_blocks.4.1.transformer_blocks.1.attn1",
+ "output_blocks.5.1.transformer_blocks.1.attn1",
+ "input_blocks.7.1.transformer_blocks.0.attn1",
+ "input_blocks.8.1.transformer_blocks.0.attn1",
+ "output_blocks.0.1.transformer_blocks.0.attn1",
+ "output_blocks.1.1.transformer_blocks.0.attn1",
+ "output_blocks.2.1.transformer_blocks.0.attn1",
+ "input_blocks.7.1.transformer_blocks.1.attn1",
+ "input_blocks.8.1.transformer_blocks.1.attn1",
+ "output_blocks.0.1.transformer_blocks.1.attn1",
+ "output_blocks.1.1.transformer_blocks.1.attn1",
+ "output_blocks.2.1.transformer_blocks.1.attn1",
+ "input_blocks.7.1.transformer_blocks.2.attn1",
+ "input_blocks.8.1.transformer_blocks.2.attn1",
+ "output_blocks.0.1.transformer_blocks.2.attn1",
+ "output_blocks.1.1.transformer_blocks.2.attn1",
+ "output_blocks.2.1.transformer_blocks.2.attn1",
+ "input_blocks.7.1.transformer_blocks.3.attn1",
+ "input_blocks.8.1.transformer_blocks.3.attn1",
+ "output_blocks.0.1.transformer_blocks.3.attn1",
+ "output_blocks.1.1.transformer_blocks.3.attn1",
+ "output_blocks.2.1.transformer_blocks.3.attn1",
+ "input_blocks.7.1.transformer_blocks.4.attn1",
+ "input_blocks.8.1.transformer_blocks.4.attn1",
+ "output_blocks.0.1.transformer_blocks.4.attn1",
+ "output_blocks.1.1.transformer_blocks.4.attn1",
+ "output_blocks.2.1.transformer_blocks.4.attn1",
+ "input_blocks.7.1.transformer_blocks.5.attn1",
+ "input_blocks.8.1.transformer_blocks.5.attn1",
+ "output_blocks.0.1.transformer_blocks.5.attn1",
+ "output_blocks.1.1.transformer_blocks.5.attn1",
+ "output_blocks.2.1.transformer_blocks.5.attn1",
+ "input_blocks.7.1.transformer_blocks.6.attn1",
+ "input_blocks.8.1.transformer_blocks.6.attn1",
+ "output_blocks.0.1.transformer_blocks.6.attn1",
+ "output_blocks.1.1.transformer_blocks.6.attn1",
+ "output_blocks.2.1.transformer_blocks.6.attn1",
+ "input_blocks.7.1.transformer_blocks.7.attn1",
+ "input_blocks.8.1.transformer_blocks.7.attn1",
+ "output_blocks.0.1.transformer_blocks.7.attn1",
+ "output_blocks.1.1.transformer_blocks.7.attn1",
+ "output_blocks.2.1.transformer_blocks.7.attn1",
+ "input_blocks.7.1.transformer_blocks.8.attn1",
+ "input_blocks.8.1.transformer_blocks.8.attn1",
+ "output_blocks.0.1.transformer_blocks.8.attn1",
+ "output_blocks.1.1.transformer_blocks.8.attn1",
+ "output_blocks.2.1.transformer_blocks.8.attn1",
+ "input_blocks.7.1.transformer_blocks.9.attn1",
+ "input_blocks.8.1.transformer_blocks.9.attn1",
+ "output_blocks.0.1.transformer_blocks.9.attn1",
+ "output_blocks.1.1.transformer_blocks.9.attn1",
+ "output_blocks.2.1.transformer_blocks.9.attn1",
+ ],
+ 2: [
+ # SD 1.5 U-Net (diffusers)
+ "mid_block.attentions.0.transformer_blocks.0.attn1",
+ # SD 1.5 U-Net (ldm)
+ "middle_block.1.transformer_blocks.0.attn1",
+ "middle_block.1.transformer_blocks.1.attn1",
+ "middle_block.1.transformer_blocks.2.attn1",
+ "middle_block.1.transformer_blocks.3.attn1",
+ "middle_block.1.transformer_blocks.4.attn1",
+ "middle_block.1.transformer_blocks.5.attn1",
+ "middle_block.1.transformer_blocks.6.attn1",
+ "middle_block.1.transformer_blocks.7.attn1",
+ "middle_block.1.transformer_blocks.8.attn1",
+ "middle_block.1.transformer_blocks.9.attn1",
+ ],
+ 3 : [] # TODO - separate layers for SD-XL
+}
+
+
+RNG_INSTANCE = random.Random()
+
+@cache
+def get_divisors(value: int, min_value: int, /, max_options: int = 1) -> list[int]:
+ """
+ Returns divisors of value that
+ x * min_value <= value
+ in big -> small order, amount of divisors is limited by max_options
+ """
+ max_options = max(1, max_options) # at least 1 option should be returned
+ min_value = min(min_value, value)
+ divisors = [i for i in range(min_value, value + 1) if value % i == 0] # divisors in small -> big order
+ ns = [value // i for i in divisors[:max_options]] # has at least 1 element # big -> small order
+ return ns
+
+
+def random_divisor(value: int, min_value: int, /, max_options: int = 1) -> int:
+ """
+ Returns a random divisor of value that
+ x * min_value <= value
+ if max_options is 1, the behavior is deterministic
+ """
+ ns = get_divisors(value, min_value, max_options=max_options) # get cached divisors
+ idx = RNG_INSTANCE.randint(0, len(ns) - 1)
+
+ return ns[idx]
+
+
+def set_hypertile_seed(seed: int) -> None:
+ RNG_INSTANCE.seed(seed)
+
+
+@cache
+def largest_tile_size_available(width: int, height: int) -> int:
+ """
+ Calculates the largest tile size available for a given width and height
+ Tile size is always a power of 2
+ """
+ gcd = math.gcd(width, height)
+ largest_tile_size_available = 1
+ while gcd % (largest_tile_size_available * 2) == 0:
+ largest_tile_size_available *= 2
+ return largest_tile_size_available
+
+
+def iterative_closest_divisors(hw:int, aspect_ratio:float) -> tuple[int, int]:
+ """
+ Finds h and w such that h*w = hw and h/w = aspect_ratio
+ We check all possible divisors of hw and return the closest to the aspect ratio
+ """
+ divisors = [i for i in range(2, hw + 1) if hw % i == 0] # all divisors of hw
+ pairs = [(i, hw // i) for i in divisors] # all pairs of divisors of hw
+ ratios = [w/h for h, w in pairs] # all ratios of pairs of divisors of hw
+ closest_ratio = min(ratios, key=lambda x: abs(x - aspect_ratio)) # closest ratio to aspect_ratio
+ closest_pair = pairs[ratios.index(closest_ratio)] # closest pair of divisors to aspect_ratio
+ return closest_pair
+
+
+@cache
+def find_hw_candidates(hw:int, aspect_ratio:float) -> tuple[int, int]:
+ """
+ Finds h and w such that h*w = hw and h/w = aspect_ratio
+ """
+ h, w = round(math.sqrt(hw * aspect_ratio)), round(math.sqrt(hw / aspect_ratio))
+ # find h and w such that h*w = hw and h/w = aspect_ratio
+ if h * w != hw:
+ w_candidate = hw / h
+ # check if w is an integer
+ if not w_candidate.is_integer():
+ h_candidate = hw / w
+ # check if h is an integer
+ if not h_candidate.is_integer():
+ return iterative_closest_divisors(hw, aspect_ratio)
+ else:
+ h = int(h_candidate)
+ else:
+ w = int(w_candidate)
+ return h, w
+
+
+def self_attn_forward(params: HypertileParams, scale_depth=True) -> Callable:
+
+ @wraps(params.forward)
+ def wrapper(*args, **kwargs):
+ if not params.enabled:
+ return params.forward(*args, **kwargs)
+
+ latent_tile_size = max(128, params.tile_size) // 8
+ x = args[0]
+
+ # VAE
+ if x.ndim == 4:
+ b, c, h, w = x.shape
+
+ nh = random_divisor(h, latent_tile_size, params.swap_size)
+ nw = random_divisor(w, latent_tile_size, params.swap_size)
+
+ if nh * nw > 1:
+ x = rearrange(x, "b c (nh h) (nw w) -> (b nh nw) c h w", nh=nh, nw=nw) # split into nh * nw tiles
+
+ out = params.forward(x, *args[1:], **kwargs)
+
+ if nh * nw > 1:
+ out = rearrange(out, "(b nh nw) c h w -> b c (nh h) (nw w)", nh=nh, nw=nw)
+
+ # U-Net
+ else:
+ hw: int = x.size(1)
+ h, w = find_hw_candidates(hw, params.aspect_ratio)
+ assert h * w == hw, f"Invalid aspect ratio {params.aspect_ratio} for input of shape {x.shape}, hw={hw}, h={h}, w={w}"
+
+ factor = 2 ** params.depth if scale_depth else 1
+ nh = random_divisor(h, latent_tile_size * factor, params.swap_size)
+ nw = random_divisor(w, latent_tile_size * factor, params.swap_size)
+
+ if nh * nw > 1:
+ x = rearrange(x, "b (nh h nw w) c -> (b nh nw) (h w) c", h=h // nh, w=w // nw, nh=nh, nw=nw)
+
+ out = params.forward(x, *args[1:], **kwargs)
+
+ if nh * nw > 1:
+ out = rearrange(out, "(b nh nw) hw c -> b nh nw hw c", nh=nh, nw=nw)
+ out = rearrange(out, "b nh nw (h w) c -> b (nh h nw w) c", h=h // nh, w=w // nw)
+
+ return out
+
+ return wrapper
+
+
+def hypertile_hook_model(model: nn.Module, width, height, *, enable=False, tile_size_max=128, swap_size=1, max_depth=3, is_sdxl=False):
+ hypertile_layers = getattr(model, "__webui_hypertile_layers", None)
+ if hypertile_layers is None:
+ if not enable:
+ return
+
+ hypertile_layers = {}
+ layers = DEPTH_LAYERS_XL if is_sdxl else DEPTH_LAYERS
+
+ for depth in range(4):
+ for layer_name, module in model.named_modules():
+ if any(layer_name.endswith(try_name) for try_name in layers[depth]):
+ params = HypertileParams()
+ module.__webui_hypertile_params = params
+ params.forward = module.forward
+ params.depth = depth
+ params.layer_name = layer_name
+ module.forward = self_attn_forward(params)
+
+ hypertile_layers[layer_name] = 1
+
+ model.__webui_hypertile_layers = hypertile_layers
+
+ aspect_ratio = width / height
+ tile_size = min(largest_tile_size_available(width, height), tile_size_max)
+
+ for layer_name, module in model.named_modules():
+ if layer_name in hypertile_layers:
+ params = module.__webui_hypertile_params
+
+ params.tile_size = tile_size
+ params.swap_size = swap_size
+ params.aspect_ratio = aspect_ratio
+ params.enabled = enable and params.depth <= max_depth
diff --git a/extensions-builtin/hypertile/scripts/hypertile_script.py b/extensions-builtin/hypertile/scripts/hypertile_script.py
new file mode 100644
index 00000000..395d584b
--- /dev/null
+++ b/extensions-builtin/hypertile/scripts/hypertile_script.py
@@ -0,0 +1,109 @@
+import hypertile
+from modules import scripts, script_callbacks, shared
+from scripts.hypertile_xyz import add_axis_options
+
+
+class ScriptHypertile(scripts.Script):
+ name = "Hypertile"
+
+ def title(self):
+ return self.name
+
+ def show(self, is_img2img):
+ return scripts.AlwaysVisible
+
+ def process(self, p, *args):
+ hypertile.set_hypertile_seed(p.all_seeds[0])
+
+ configure_hypertile(p.width, p.height, enable_unet=shared.opts.hypertile_enable_unet)
+
+ self.add_infotext(p)
+
+ def before_hr(self, p, *args):
+
+ enable = shared.opts.hypertile_enable_unet_secondpass or shared.opts.hypertile_enable_unet
+
+ # exclusive hypertile seed for the second pass
+ if enable:
+ hypertile.set_hypertile_seed(p.all_seeds[0])
+
+ configure_hypertile(p.hr_upscale_to_x, p.hr_upscale_to_y, enable_unet=enable)
+
+ if enable and not shared.opts.hypertile_enable_unet:
+ p.extra_generation_params["Hypertile U-Net second pass"] = True
+
+ self.add_infotext(p, add_unet_params=True)
+
+ def add_infotext(self, p, add_unet_params=False):
+ def option(name):
+ value = getattr(shared.opts, name)
+ default_value = shared.opts.get_default(name)
+ return None if value == default_value else value
+
+ if shared.opts.hypertile_enable_unet:
+ p.extra_generation_params["Hypertile U-Net"] = True
+
+ if shared.opts.hypertile_enable_unet or add_unet_params:
+ p.extra_generation_params["Hypertile U-Net max depth"] = option('hypertile_max_depth_unet')
+ p.extra_generation_params["Hypertile U-Net max tile size"] = option('hypertile_max_tile_unet')
+ p.extra_generation_params["Hypertile U-Net swap size"] = option('hypertile_swap_size_unet')
+
+ if shared.opts.hypertile_enable_vae:
+ p.extra_generation_params["Hypertile VAE"] = True
+ p.extra_generation_params["Hypertile VAE max depth"] = option('hypertile_max_depth_vae')
+ p.extra_generation_params["Hypertile VAE max tile size"] = option('hypertile_max_tile_vae')
+ p.extra_generation_params["Hypertile VAE swap size"] = option('hypertile_swap_size_vae')
+
+
+def configure_hypertile(width, height, enable_unet=True):
+ hypertile.hypertile_hook_model(
+ shared.sd_model.first_stage_model,
+ width,
+ height,
+ swap_size=shared.opts.hypertile_swap_size_vae,
+ max_depth=shared.opts.hypertile_max_depth_vae,
+ tile_size_max=shared.opts.hypertile_max_tile_vae,
+ enable=shared.opts.hypertile_enable_vae,
+ )
+
+ hypertile.hypertile_hook_model(
+ shared.sd_model.model,
+ width,
+ height,
+ swap_size=shared.opts.hypertile_swap_size_unet,
+ max_depth=shared.opts.hypertile_max_depth_unet,
+ tile_size_max=shared.opts.hypertile_max_tile_unet,
+ enable=enable_unet,
+ is_sdxl=shared.sd_model.is_sdxl
+ )
+
+
+def on_ui_settings():
+ import gradio as gr
+
+ options = {
+ "hypertile_explanation": shared.OptionHTML("""
+ <a href='https://github.com/tfernd/HyperTile'>Hypertile</a> optimizes the self-attention layer within U-Net and VAE models,
+ resulting in a reduction in computation time ranging from 1 to 4 times. The larger the generated image is, the greater the
+ benefit.
+ """),
+
+ "hypertile_enable_unet": shared.OptionInfo(False, "Enable Hypertile U-Net", infotext="Hypertile U-Net").info("enables hypertile for all modes, including hires fix second pass; noticeable change in details of the generated picture"),
+ "hypertile_enable_unet_secondpass": shared.OptionInfo(False, "Enable Hypertile U-Net for hires fix second pass", infotext="Hypertile U-Net second pass").info("enables hypertile just for hires fix second pass - regardless of whether the above setting is enabled"),
+ "hypertile_max_depth_unet": shared.OptionInfo(3, "Hypertile U-Net max depth", gr.Slider, {"minimum": 0, "maximum": 3, "step": 1}, infotext="Hypertile U-Net max depth").info("larger = more neural network layers affected; minor effect on performance"),
+ "hypertile_max_tile_unet": shared.OptionInfo(256, "Hypertile U-Net max tile size", gr.Slider, {"minimum": 0, "maximum": 512, "step": 16}, infotext="Hypertile U-Net max tile size").info("larger = worse performance"),
+ "hypertile_swap_size_unet": shared.OptionInfo(3, "Hypertile U-Net swap size", gr.Slider, {"minimum": 0, "maximum": 64, "step": 1}, infotext="Hypertile U-Net swap size"),
+
+ "hypertile_enable_vae": shared.OptionInfo(False, "Enable Hypertile VAE", infotext="Hypertile VAE").info("minimal change in the generated picture"),
+ "hypertile_max_depth_vae": shared.OptionInfo(3, "Hypertile VAE max depth", gr.Slider, {"minimum": 0, "maximum": 3, "step": 1}, infotext="Hypertile VAE max depth"),
+ "hypertile_max_tile_vae": shared.OptionInfo(128, "Hypertile VAE max tile size", gr.Slider, {"minimum": 0, "maximum": 512, "step": 16}, infotext="Hypertile VAE max tile size"),
+ "hypertile_swap_size_vae": shared.OptionInfo(3, "Hypertile VAE swap size ", gr.Slider, {"minimum": 0, "maximum": 64, "step": 1}, infotext="Hypertile VAE swap size"),
+ }
+
+ for name, opt in options.items():
+ opt.section = ('hypertile', "Hypertile")
+ shared.opts.add_option(name, opt)
+
+
+script_callbacks.on_ui_settings(on_ui_settings)
+script_callbacks.on_before_ui(add_axis_options)
diff --git a/extensions-builtin/hypertile/scripts/hypertile_xyz.py b/extensions-builtin/hypertile/scripts/hypertile_xyz.py
new file mode 100644
index 00000000..9e96ae3c
--- /dev/null
+++ b/extensions-builtin/hypertile/scripts/hypertile_xyz.py
@@ -0,0 +1,51 @@
+from modules import scripts
+from modules.shared import opts
+
+xyz_grid = [x for x in scripts.scripts_data if x.script_class.__module__ == "xyz_grid.py"][0].module
+
+def int_applier(value_name:str, min_range:int = -1, max_range:int = -1):
+ """
+ Returns a function that applies the given value to the given value_name in opts.data.
+ """
+ def validate(value_name:str, value:str):
+ value = int(value)
+ # validate value
+ if not min_range == -1:
+ assert value >= min_range, f"Value {value} for {value_name} must be greater than or equal to {min_range}"
+ if not max_range == -1:
+ assert value <= max_range, f"Value {value} for {value_name} must be less than or equal to {max_range}"
+ def apply_int(p, x, xs):
+ validate(value_name, x)
+ opts.data[value_name] = int(x)
+ return apply_int
+
+def bool_applier(value_name:str):
+ """
+ Returns a function that applies the given value to the given value_name in opts.data.
+ """
+ def validate(value_name:str, value:str):
+ assert value.lower() in ["true", "false"], f"Value {value} for {value_name} must be either true or false"
+ def apply_bool(p, x, xs):
+ validate(value_name, x)
+ value_boolean = x.lower() == "true"
+ opts.data[value_name] = value_boolean
+ return apply_bool
+
+def add_axis_options():
+ extra_axis_options = [
+ xyz_grid.AxisOption("[Hypertile] Unet First pass Enabled", str, bool_applier("hypertile_enable_unet"), choices=xyz_grid.boolean_choice(reverse=True)),
+ xyz_grid.AxisOption("[Hypertile] Unet Second pass Enabled", str, bool_applier("hypertile_enable_unet_secondpass"), choices=xyz_grid.boolean_choice(reverse=True)),
+ xyz_grid.AxisOption("[Hypertile] Unet Max Depth", int, int_applier("hypertile_max_depth_unet", 0, 3), choices=lambda: [str(x) for x in range(4)]),
+ xyz_grid.AxisOption("[Hypertile] Unet Max Tile Size", int, int_applier("hypertile_max_tile_unet", 0, 512)),
+ xyz_grid.AxisOption("[Hypertile] Unet Swap Size", int, int_applier("hypertile_swap_size_unet", 0, 64)),
+ xyz_grid.AxisOption("[Hypertile] VAE Enabled", str, bool_applier("hypertile_enable_vae"), choices=xyz_grid.boolean_choice(reverse=True)),
+ xyz_grid.AxisOption("[Hypertile] VAE Max Depth", int, int_applier("hypertile_max_depth_vae", 0, 3), choices=lambda: [str(x) for x in range(4)]),
+ xyz_grid.AxisOption("[Hypertile] VAE Max Tile Size", int, int_applier("hypertile_max_tile_vae", 0, 512)),
+ xyz_grid.AxisOption("[Hypertile] VAE Swap Size", int, int_applier("hypertile_swap_size_vae", 0, 64)),
+ ]
+ set_a = {opt.label for opt in xyz_grid.axis_options}
+ set_b = {opt.label for opt in extra_axis_options}
+ if set_a.intersection(set_b):
+ return
+
+ xyz_grid.axis_options.extend(extra_axis_options)
diff --git a/extensions-builtin/mobile/javascript/mobile.js b/extensions-builtin/mobile/javascript/mobile.js
index 12cae4b7..bff1aced 100644
--- a/extensions-builtin/mobile/javascript/mobile.js
+++ b/extensions-builtin/mobile/javascript/mobile.js
@@ -12,6 +12,8 @@ function isMobile() {
}
function reportWindowSize() {
+ if (gradioApp().querySelector('.toprow-compact-tools')) return; // not applicable for compact prompt layout
+
var currentlyMobile = isMobile();
if (currentlyMobile == isSetupForMobile) return;
isSetupForMobile = currentlyMobile;
@@ -20,7 +22,13 @@ function reportWindowSize() {
var button = gradioApp().getElementById(tab + '_generate_box');
var target = gradioApp().getElementById(currentlyMobile ? tab + '_results' : tab + '_actions_column');
target.insertBefore(button, target.firstElementChild);
+
+ gradioApp().getElementById(tab + '_results').classList.toggle('mobile', currentlyMobile);
}
}
window.addEventListener("resize", reportWindowSize);
+
+onUiLoaded(function() {
+ reportWindowSize();
+});
diff --git a/extensions-builtin/soft-inpainting/scripts/soft_inpainting.py b/extensions-builtin/soft-inpainting/scripts/soft_inpainting.py
new file mode 100644
index 00000000..d9024344
--- /dev/null
+++ b/extensions-builtin/soft-inpainting/scripts/soft_inpainting.py
@@ -0,0 +1,747 @@
+import numpy as np
+import gradio as gr
+import math
+from modules.ui_components import InputAccordion
+import modules.scripts as scripts
+
+
+class SoftInpaintingSettings:
+ def __init__(self,
+ mask_blend_power,
+ mask_blend_scale,
+ inpaint_detail_preservation,
+ composite_mask_influence,
+ composite_difference_threshold,
+ composite_difference_contrast):
+ self.mask_blend_power = mask_blend_power
+ self.mask_blend_scale = mask_blend_scale
+ self.inpaint_detail_preservation = inpaint_detail_preservation
+ self.composite_mask_influence = composite_mask_influence
+ self.composite_difference_threshold = composite_difference_threshold
+ self.composite_difference_contrast = composite_difference_contrast
+
+ def add_generation_params(self, dest):
+ dest[enabled_gen_param_label] = True
+ dest[gen_param_labels.mask_blend_power] = self.mask_blend_power
+ dest[gen_param_labels.mask_blend_scale] = self.mask_blend_scale
+ dest[gen_param_labels.inpaint_detail_preservation] = self.inpaint_detail_preservation
+ dest[gen_param_labels.composite_mask_influence] = self.composite_mask_influence
+ dest[gen_param_labels.composite_difference_threshold] = self.composite_difference_threshold
+ dest[gen_param_labels.composite_difference_contrast] = self.composite_difference_contrast
+
+
+# ------------------- Methods -------------------
+
+def processing_uses_inpainting(p):
+ # TODO: Figure out a better way to determine if inpainting is being used by p
+ if getattr(p, "image_mask", None) is not None:
+ return True
+
+ if getattr(p, "mask", None) is not None:
+ return True
+
+ if getattr(p, "nmask", None) is not None:
+ return True
+
+ return False
+
+
+def latent_blend(settings, a, b, t):
+ """
+ Interpolates two latent image representations according to the parameter t,
+ where the interpolated vectors' magnitudes are also interpolated separately.
+ The "detail_preservation" factor biases the magnitude interpolation towards
+ the larger of the two magnitudes.
+ """
+ import torch
+
+ # NOTE: We use inplace operations wherever possible.
+
+ # [4][w][h] to [1][4][w][h]
+ t2 = t.unsqueeze(0)
+ # [4][w][h] to [1][1][w][h] - the [4] seem redundant.
+ t3 = t[0].unsqueeze(0).unsqueeze(0)
+
+ one_minus_t2 = 1 - t2
+ one_minus_t3 = 1 - t3
+
+ # Linearly interpolate the image vectors.
+ a_scaled = a * one_minus_t2
+ b_scaled = b * t2
+ image_interp = a_scaled
+ image_interp.add_(b_scaled)
+ result_type = image_interp.dtype
+ del a_scaled, b_scaled, t2, one_minus_t2
+
+ # Calculate the magnitude of the interpolated vectors. (We will remove this magnitude.)
+ # 64-bit operations are used here to allow large exponents.
+ current_magnitude = torch.norm(image_interp, p=2, dim=1, keepdim=True).to(torch.float64).add_(0.00001)
+
+ # Interpolate the powered magnitudes, then un-power them (bring them back to a power of 1).
+ a_magnitude = torch.norm(a, p=2, dim=1, keepdim=True).to(torch.float64).pow_(
+ settings.inpaint_detail_preservation) * one_minus_t3
+ b_magnitude = torch.norm(b, p=2, dim=1, keepdim=True).to(torch.float64).pow_(
+ settings.inpaint_detail_preservation) * t3
+ desired_magnitude = a_magnitude
+ desired_magnitude.add_(b_magnitude).pow_(1 / settings.inpaint_detail_preservation)
+ del a_magnitude, b_magnitude, t3, one_minus_t3
+
+ # Change the linearly interpolated image vectors' magnitudes to the value we want.
+ # This is the last 64-bit operation.
+ image_interp_scaling_factor = desired_magnitude
+ image_interp_scaling_factor.div_(current_magnitude)
+ image_interp_scaling_factor = image_interp_scaling_factor.to(result_type)
+ image_interp_scaled = image_interp
+ image_interp_scaled.mul_(image_interp_scaling_factor)
+ del current_magnitude
+ del desired_magnitude
+ del image_interp
+ del image_interp_scaling_factor
+ del result_type
+
+ return image_interp_scaled
+
+
+def get_modified_nmask(settings, nmask, sigma):
+ """
+ Converts a negative mask representing the transparency of the original latent vectors being overlayed
+ to a mask that is scaled according to the denoising strength for this step.
+
+ Where:
+ 0 = fully opaque, infinite density, fully masked
+ 1 = fully transparent, zero density, fully unmasked
+
+ We bring this transparency to a power, as this allows one to simulate N number of blending operations
+ where N can be any positive real value. Using this one can control the balance of influence between
+ the denoiser and the original latents according to the sigma value.
+
+ NOTE: "mask" is not used
+ """
+ import torch
+ return torch.pow(nmask, (sigma ** settings.mask_blend_power) * settings.mask_blend_scale)
+
+
+def apply_adaptive_masks(
+ settings: SoftInpaintingSettings,
+ nmask,
+ latent_orig,
+ latent_processed,
+ overlay_images,
+ width, height,
+ paste_to):
+ import torch
+ import modules.processing as proc
+ import modules.images as images
+ from PIL import Image, ImageOps, ImageFilter
+
+ # TODO: Bias the blending according to the latent mask, add adjustable parameter for bias control.
+ latent_mask = nmask[0].float()
+ # convert the original mask into a form we use to scale distances for thresholding
+ mask_scalar = 1 - (torch.clamp(latent_mask, min=0, max=1) ** (settings.mask_blend_scale / 2))
+ mask_scalar = (0.5 * (1 - settings.composite_mask_influence)
+ + mask_scalar * settings.composite_mask_influence)
+ mask_scalar = mask_scalar / (1.00001 - mask_scalar)
+ mask_scalar = mask_scalar.cpu().numpy()
+
+ latent_distance = torch.norm(latent_processed - latent_orig, p=2, dim=1)
+
+ kernel, kernel_center = get_gaussian_kernel(stddev_radius=1.5, max_radius=2)
+
+ masks_for_overlay = []
+
+ for i, (distance_map, overlay_image) in enumerate(zip(latent_distance, overlay_images)):
+ converted_mask = distance_map.float().cpu().numpy()
+ converted_mask = weighted_histogram_filter(converted_mask, kernel, kernel_center,
+ percentile_min=0.9, percentile_max=1, min_width=1)
+ converted_mask = weighted_histogram_filter(converted_mask, kernel, kernel_center,
+ percentile_min=0.25, percentile_max=0.75, min_width=1)
+
+ # The distance at which opacity of original decreases to 50%
+ half_weighted_distance = settings.composite_difference_threshold * mask_scalar
+ converted_mask = converted_mask / half_weighted_distance
+
+ converted_mask = 1 / (1 + converted_mask ** settings.composite_difference_contrast)
+ converted_mask = smootherstep(converted_mask)
+ converted_mask = 1 - converted_mask
+ converted_mask = 255. * converted_mask
+ converted_mask = converted_mask.astype(np.uint8)
+ converted_mask = Image.fromarray(converted_mask)
+ converted_mask = images.resize_image(2, converted_mask, width, height)
+ converted_mask = proc.create_binary_mask(converted_mask, round=False)
+
+ # Remove aliasing artifacts using a gaussian blur.
+ converted_mask = converted_mask.filter(ImageFilter.GaussianBlur(radius=4))
+
+ # Expand the mask to fit the whole image if needed.
+ if paste_to is not None:
+ converted_mask = proc.uncrop(converted_mask,
+ (overlay_image.width, overlay_image.height),
+ paste_to)
+
+ masks_for_overlay.append(converted_mask)
+
+ image_masked = Image.new('RGBa', (overlay_image.width, overlay_image.height))
+ image_masked.paste(overlay_image.convert("RGBA").convert("RGBa"),
+ mask=ImageOps.invert(converted_mask.convert('L')))
+
+ overlay_images[i] = image_masked.convert('RGBA')
+
+ return masks_for_overlay
+
+
+def apply_masks(
+ settings,
+ nmask,
+ overlay_images,
+ width, height,
+ paste_to):
+ import torch
+ import modules.processing as proc
+ import modules.images as images
+ from PIL import Image, ImageOps, ImageFilter
+
+ converted_mask = nmask[0].float()
+ converted_mask = torch.clamp(converted_mask, min=0, max=1).pow_(settings.mask_blend_scale / 2)
+ converted_mask = 255. * converted_mask
+ converted_mask = converted_mask.cpu().numpy().astype(np.uint8)
+ converted_mask = Image.fromarray(converted_mask)
+ converted_mask = images.resize_image(2, converted_mask, width, height)
+ converted_mask = proc.create_binary_mask(converted_mask, round=False)
+
+ # Remove aliasing artifacts using a gaussian blur.
+ converted_mask = converted_mask.filter(ImageFilter.GaussianBlur(radius=4))
+
+ # Expand the mask to fit the whole image if needed.
+ if paste_to is not None:
+ converted_mask = proc.uncrop(converted_mask,
+ (width, height),
+ paste_to)
+
+ masks_for_overlay = []
+
+ for i, overlay_image in enumerate(overlay_images):
+ masks_for_overlay[i] = converted_mask
+
+ image_masked = Image.new('RGBa', (overlay_image.width, overlay_image.height))
+ image_masked.paste(overlay_image.convert("RGBA").convert("RGBa"),
+ mask=ImageOps.invert(converted_mask.convert('L')))
+
+ overlay_images[i] = image_masked.convert('RGBA')
+
+ return masks_for_overlay
+
+
+def weighted_histogram_filter(img, kernel, kernel_center, percentile_min=0.0, percentile_max=1.0, min_width=1.0):
+ """
+ Generalization convolution filter capable of applying
+ weighted mean, median, maximum, and minimum filters
+ parametrically using an arbitrary kernel.
+
+ Args:
+ img (nparray):
+ The image, a 2-D array of floats, to which the filter is being applied.
+ kernel (nparray):
+ The kernel, a 2-D array of floats.
+ kernel_center (nparray):
+ The kernel center coordinate, a 1-D array with two elements.
+ percentile_min (float):
+ The lower bound of the histogram window used by the filter,
+ from 0 to 1.
+ percentile_max (float):
+ The upper bound of the histogram window used by the filter,
+ from 0 to 1.
+ min_width (float):
+ The minimum size of the histogram window bounds, in weight units.
+ Must be greater than 0.
+
+ Returns:
+ (nparray): A filtered copy of the input image "img", a 2-D array of floats.
+ """
+
+ # Converts an index tuple into a vector.
+ def vec(x):
+ return np.array(x)
+
+ kernel_min = -kernel_center
+ kernel_max = vec(kernel.shape) - kernel_center
+
+ def weighted_histogram_filter_single(idx):
+ idx = vec(idx)
+ min_index = np.maximum(0, idx + kernel_min)
+ max_index = np.minimum(vec(img.shape), idx + kernel_max)
+ window_shape = max_index - min_index
+
+ class WeightedElement:
+ """
+ An element of the histogram, its weight
+ and bounds.
+ """
+
+ def __init__(self, value, weight):
+ self.value: float = value
+ self.weight: float = weight
+ self.window_min: float = 0.0
+ self.window_max: float = 1.0
+
+ # Collect the values in the image as WeightedElements,
+ # weighted by their corresponding kernel values.
+ values = []
+ for window_tup in np.ndindex(tuple(window_shape)):
+ window_index = vec(window_tup)
+ image_index = window_index + min_index
+ centered_kernel_index = image_index - idx
+ kernel_index = centered_kernel_index + kernel_center
+ element = WeightedElement(img[tuple(image_index)], kernel[tuple(kernel_index)])
+ values.append(element)
+
+ def sort_key(x: WeightedElement):
+ return x.value
+
+ values.sort(key=sort_key)
+
+ # Calculate the height of the stack (sum)
+ # and each sample's range they occupy in the stack
+ sum = 0
+ for i in range(len(values)):
+ values[i].window_min = sum
+ sum += values[i].weight
+ values[i].window_max = sum
+
+ # Calculate what range of this stack ("window")
+ # we want to get the weighted average across.
+ window_min = sum * percentile_min
+ window_max = sum * percentile_max
+ window_width = window_max - window_min
+
+ # Ensure the window is within the stack and at least a certain size.
+ if window_width < min_width:
+ window_center = (window_min + window_max) / 2
+ window_min = window_center - min_width / 2
+ window_max = window_center + min_width / 2
+
+ if window_max > sum:
+ window_max = sum
+ window_min = sum - min_width
+
+ if window_min < 0:
+ window_min = 0
+ window_max = min_width
+
+ value = 0
+ value_weight = 0
+
+ # Get the weighted average of all the samples
+ # that overlap with the window, weighted
+ # by the size of their overlap.
+ for i in range(len(values)):
+ if window_min >= values[i].window_max:
+ continue
+ if window_max <= values[i].window_min:
+ break
+
+ s = max(window_min, values[i].window_min)
+ e = min(window_max, values[i].window_max)
+ w = e - s
+
+ value += values[i].value * w
+ value_weight += w
+
+ return value / value_weight if value_weight != 0 else 0
+
+ img_out = img.copy()
+
+ # Apply the kernel operation over each pixel.
+ for index in np.ndindex(img.shape):
+ img_out[index] = weighted_histogram_filter_single(index)
+
+ return img_out
+
+
+def smoothstep(x):
+ """
+ The smoothstep function, input should be clamped to 0-1 range.
+ Turns a diagonal line (f(x) = x) into a sigmoid-like curve.
+ """
+ return x * x * (3 - 2 * x)
+
+
+def smootherstep(x):
+ """
+ The smootherstep function, input should be clamped to 0-1 range.
+ Turns a diagonal line (f(x) = x) into a sigmoid-like curve.
+ """
+ return x * x * x * (x * (6 * x - 15) + 10)
+
+
+def get_gaussian_kernel(stddev_radius=1.0, max_radius=2):
+ """
+ Creates a Gaussian kernel with thresholded edges.
+
+ Args:
+ stddev_radius (float):
+ Standard deviation of the gaussian kernel, in pixels.
+ max_radius (int):
+ The size of the filter kernel. The number of pixels is (max_radius*2+1) ** 2.
+ The kernel is thresholded so that any values one pixel beyond this radius
+ is weighted at 0.
+
+ Returns:
+ (nparray, nparray): A kernel array (shape: (N, N)), its center coordinate (shape: (2))
+ """
+
+ # Evaluates a 0-1 normalized gaussian function for a given square distance from the mean.
+ def gaussian(sqr_mag):
+ return math.exp(-sqr_mag / (stddev_radius * stddev_radius))
+
+ # Helper function for converting a tuple to an array.
+ def vec(x):
+ return np.array(x)
+
+ """
+ Since a gaussian is unbounded, we need to limit ourselves
+ to a finite range.
+ We taper the ends off at the end of that range so they equal zero
+ while preserving the maximum value of 1 at the mean.
+ """
+ zero_radius = max_radius + 1.0
+ gauss_zero = gaussian(zero_radius * zero_radius)
+ gauss_kernel_scale = 1 / (1 - gauss_zero)
+
+ def gaussian_kernel_func(coordinate):
+ x = coordinate[0] ** 2.0 + coordinate[1] ** 2.0
+ x = gaussian(x)
+ x -= gauss_zero
+ x *= gauss_kernel_scale
+ x = max(0.0, x)
+ return x
+
+ size = max_radius * 2 + 1
+ kernel_center = max_radius
+ kernel = np.zeros((size, size))
+
+ for index in np.ndindex(kernel.shape):
+ kernel[index] = gaussian_kernel_func(vec(index) - kernel_center)
+
+ return kernel, kernel_center
+
+
+# ------------------- Constants -------------------
+
+
+default = SoftInpaintingSettings(1, 0.5, 4, 0, 0.5, 2)
+
+enabled_ui_label = "Soft inpainting"
+enabled_gen_param_label = "Soft inpainting enabled"
+enabled_el_id = "soft_inpainting_enabled"
+
+ui_labels = SoftInpaintingSettings(
+ "Schedule bias",
+ "Preservation strength",
+ "Transition contrast boost",
+ "Mask influence",
+ "Difference threshold",
+ "Difference contrast")
+
+ui_info = SoftInpaintingSettings(
+ "Shifts when preservation of original content occurs during denoising.",
+ "How strongly partially masked content should be preserved.",
+ "Amplifies the contrast that may be lost in partially masked regions.",
+ "How strongly the original mask should bias the difference threshold.",
+ "How much an image region can change before the original pixels are not blended in anymore.",
+ "How sharp the transition should be between blended and not blended.")
+
+gen_param_labels = SoftInpaintingSettings(
+ "Soft inpainting schedule bias",
+ "Soft inpainting preservation strength",
+ "Soft inpainting transition contrast boost",
+ "Soft inpainting mask influence",
+ "Soft inpainting difference threshold",
+ "Soft inpainting difference contrast")
+
+el_ids = SoftInpaintingSettings(
+ "mask_blend_power",
+ "mask_blend_scale",
+ "inpaint_detail_preservation",
+ "composite_mask_influence",
+ "composite_difference_threshold",
+ "composite_difference_contrast")
+
+
+# ------------------- Script -------------------
+
+
+class Script(scripts.Script):
+ def __init__(self):
+ self.section = "inpaint"
+ self.masks_for_overlay = None
+ self.overlay_images = None
+
+ def title(self):
+ return "Soft Inpainting"
+
+ def show(self, is_img2img):
+ return scripts.AlwaysVisible if is_img2img else False
+
+ def ui(self, is_img2img):
+ if not is_img2img:
+ return
+
+ with InputAccordion(False, label=enabled_ui_label, elem_id=enabled_el_id) as soft_inpainting_enabled:
+ with gr.Group():
+ gr.Markdown(
+ """
+ Soft inpainting allows you to **seamlessly blend original content with inpainted content** according to the mask opacity.
+ **High _Mask blur_** values are recommended!
+ """)
+
+ power = \
+ gr.Slider(label=ui_labels.mask_blend_power,
+ info=ui_info.mask_blend_power,
+ minimum=0,
+ maximum=8,
+ step=0.1,
+ value=default.mask_blend_power,
+ elem_id=el_ids.mask_blend_power)
+ scale = \
+ gr.Slider(label=ui_labels.mask_blend_scale,
+ info=ui_info.mask_blend_scale,
+ minimum=0,
+ maximum=8,
+ step=0.05,
+ value=default.mask_blend_scale,
+ elem_id=el_ids.mask_blend_scale)
+ detail = \
+ gr.Slider(label=ui_labels.inpaint_detail_preservation,
+ info=ui_info.inpaint_detail_preservation,
+ minimum=1,
+ maximum=32,
+ step=0.5,
+ value=default.inpaint_detail_preservation,
+ elem_id=el_ids.inpaint_detail_preservation)
+
+ gr.Markdown(
+ """
+ ### Pixel Composite Settings
+ """)
+
+ mask_inf = \
+ gr.Slider(label=ui_labels.composite_mask_influence,
+ info=ui_info.composite_mask_influence,
+ minimum=0,
+ maximum=1,
+ step=0.05,
+ value=default.composite_mask_influence,
+ elem_id=el_ids.composite_mask_influence)
+
+ dif_thresh = \
+ gr.Slider(label=ui_labels.composite_difference_threshold,
+ info=ui_info.composite_difference_threshold,
+ minimum=0,
+ maximum=8,
+ step=0.25,
+ value=default.composite_difference_threshold,
+ elem_id=el_ids.composite_difference_threshold)
+
+ dif_contr = \
+ gr.Slider(label=ui_labels.composite_difference_contrast,
+ info=ui_info.composite_difference_contrast,
+ minimum=0,
+ maximum=8,
+ step=0.25,
+ value=default.composite_difference_contrast,
+ elem_id=el_ids.composite_difference_contrast)
+
+ with gr.Accordion("Help", open=False):
+ gr.Markdown(
+ f"""
+ ### {ui_labels.mask_blend_power}
+
+ The blending strength of original content is scaled proportionally with the decreasing noise level values at each step (sigmas).
+ This ensures that the influence of the denoiser and original content preservation is roughly balanced at each step.
+ This balance can be shifted using this parameter, controlling whether earlier or later steps have stronger preservation.
+
+ - **Below 1**: Stronger preservation near the end (with low sigma)
+ - **1**: Balanced (proportional to sigma)
+ - **Above 1**: Stronger preservation in the beginning (with high sigma)
+ """)
+ gr.Markdown(
+ f"""
+ ### {ui_labels.mask_blend_scale}
+
+ Skews whether partially masked image regions should be more likely to preserve the original content or favor inpainted content.
+ This may need to be adjusted depending on the {ui_labels.mask_blend_power}, CFG Scale, prompt and Denoising strength.
+
+ - **Low values**: Favors generated content.
+ - **High values**: Favors original content.
+ """)
+ gr.Markdown(
+ f"""
+ ### {ui_labels.inpaint_detail_preservation}
+
+ This parameter controls how the original latent vectors and denoised latent vectors are interpolated.
+ With higher values, the magnitude of the resulting blended vector will be closer to the maximum of the two interpolated vectors.
+ This can prevent the loss of contrast that occurs with linear interpolation.
+
+ - **Low values**: Softer blending, details may fade.
+ - **High values**: Stronger contrast, may over-saturate colors.
+ """)
+
+ gr.Markdown(
+ """
+ ## Pixel Composite Settings
+
+ Masks are generated based on how much a part of the image changed after denoising.
+ These masks are used to blend the original and final images together.
+ If the difference is low, the original pixels are used instead of the pixels returned by the inpainting process.
+ """)
+
+ gr.Markdown(
+ f"""
+ ### {ui_labels.composite_mask_influence}
+
+ This parameter controls how much the mask should bias this sensitivity to difference.
+
+ - **0**: Ignore the mask, only consider differences in image content.
+ - **1**: Follow the mask closely despite image content changes.
+ """)
+
+ gr.Markdown(
+ f"""
+ ### {ui_labels.composite_difference_threshold}
+
+ This value represents the difference at which the original pixels will have less than 50% opacity.
+
+ - **Low values**: Two images patches must be almost the same in order to retain original pixels.
+ - **High values**: Two images patches can be very different and still retain original pixels.
+ """)
+
+ gr.Markdown(
+ f"""
+ ### {ui_labels.composite_difference_contrast}
+
+ This value represents the contrast between the opacity of the original and inpainted content.
+
+ - **Low values**: The blend will be more gradual and have longer transitions, but may cause ghosting.
+ - **High values**: Ghosting will be less common, but transitions may be very sudden.
+ """)
+
+ self.infotext_fields = [(soft_inpainting_enabled, enabled_gen_param_label),
+ (power, gen_param_labels.mask_blend_power),
+ (scale, gen_param_labels.mask_blend_scale),
+ (detail, gen_param_labels.inpaint_detail_preservation),
+ (mask_inf, gen_param_labels.composite_mask_influence),
+ (dif_thresh, gen_param_labels.composite_difference_threshold),
+ (dif_contr, gen_param_labels.composite_difference_contrast)]
+
+ self.paste_field_names = []
+ for _, field_name in self.infotext_fields:
+ self.paste_field_names.append(field_name)
+
+ return [soft_inpainting_enabled,
+ power,
+ scale,
+ detail,
+ mask_inf,
+ dif_thresh,
+ dif_contr]
+
+ def process(self, p, enabled, power, scale, detail_preservation, mask_inf, dif_thresh, dif_contr):
+ if not enabled:
+ return
+
+ if not processing_uses_inpainting(p):
+ return
+
+ # Shut off the rounding it normally does.
+ p.mask_round = False
+
+ settings = SoftInpaintingSettings(power, scale, detail_preservation, mask_inf, dif_thresh, dif_contr)
+
+ # p.extra_generation_params["Mask rounding"] = False
+ settings.add_generation_params(p.extra_generation_params)
+
+ def on_mask_blend(self, p, mba: scripts.MaskBlendArgs, enabled, power, scale, detail_preservation, mask_inf,
+ dif_thresh, dif_contr):
+ if not enabled:
+ return
+
+ if not processing_uses_inpainting(p):
+ return
+
+ if mba.is_final_blend:
+ mba.blended_latent = mba.current_latent
+ return
+
+ settings = SoftInpaintingSettings(power, scale, detail_preservation, mask_inf, dif_thresh, dif_contr)
+
+ # todo: Why is sigma 2D? Both values are the same.
+ mba.blended_latent = latent_blend(settings,
+ mba.init_latent,
+ mba.current_latent,
+ get_modified_nmask(settings, mba.nmask, mba.sigma[0]))
+
+ def post_sample(self, p, ps: scripts.PostSampleArgs, enabled, power, scale, detail_preservation, mask_inf,
+ dif_thresh, dif_contr):
+ if not enabled:
+ return
+
+ if not processing_uses_inpainting(p):
+ return
+
+ nmask = getattr(p, "nmask", None)
+ if nmask is None:
+ return
+
+ from modules import images
+ from modules.shared import opts
+
+ settings = SoftInpaintingSettings(power, scale, detail_preservation, mask_inf, dif_thresh, dif_contr)
+
+ # since the original code puts holes in the existing overlay images,
+ # we have to rebuild them.
+ self.overlay_images = []
+ for img in p.init_images:
+
+ image = images.flatten(img, opts.img2img_background_color)
+
+ if p.paste_to is None and p.resize_mode != 3:
+ image = images.resize_image(p.resize_mode, image, p.width, p.height)
+
+ self.overlay_images.append(image.convert('RGBA'))
+
+ if len(p.init_images) == 1:
+ self.overlay_images = self.overlay_images * p.batch_size
+
+ if getattr(ps.samples, 'already_decoded', False):
+ self.masks_for_overlay = apply_masks(settings=settings,
+ nmask=nmask,
+ overlay_images=self.overlay_images,
+ width=p.width,
+ height=p.height,
+ paste_to=p.paste_to)
+ else:
+ self.masks_for_overlay = apply_adaptive_masks(settings=settings,
+ nmask=nmask,
+ latent_orig=p.init_latent,
+ latent_processed=ps.samples,
+ overlay_images=self.overlay_images,
+ width=p.width,
+ height=p.height,
+ paste_to=p.paste_to)
+
+ def postprocess_maskoverlay(self, p, ppmo: scripts.PostProcessMaskOverlayArgs, enabled, power, scale,
+ detail_preservation, mask_inf, dif_thresh, dif_contr):
+ if not enabled:
+ return
+
+ if not processing_uses_inpainting(p):
+ return
+
+ if self.masks_for_overlay is None:
+ return
+
+ if self.overlay_images is None:
+ return
+
+ ppmo.mask_for_overlay = self.masks_for_overlay[ppmo.index]
+ ppmo.overlay_image = self.overlay_images[ppmo.index]
diff --git a/javascript/dragdrop.js b/javascript/dragdrop.js
index 5803daea..d680daf5 100644
--- a/javascript/dragdrop.js
+++ b/javascript/dragdrop.js
@@ -119,7 +119,7 @@ window.addEventListener('paste', e => {
}
const firstFreeImageField = visibleImageFields
- .filter(el => el.querySelector('input[type=file]'))?.[0];
+ .filter(el => !el.querySelector('img'))?.[0];
dropReplaceImage(
firstFreeImageField ?
diff --git a/javascript/edit-attention.js b/javascript/edit-attention.js
index 8906c892..688c2f11 100644
--- a/javascript/edit-attention.js
+++ b/javascript/edit-attention.js
@@ -18,37 +18,43 @@ function keyupEditAttention(event) {
const before = text.substring(0, selectionStart);
let beforeParen = before.lastIndexOf(OPEN);
if (beforeParen == -1) return false;
- let beforeParenClose = before.lastIndexOf(CLOSE);
- while (beforeParenClose !== -1 && beforeParenClose > beforeParen) {
- beforeParen = before.lastIndexOf(OPEN, beforeParen - 1);
- beforeParenClose = before.lastIndexOf(CLOSE, beforeParenClose - 1);
- }
+
+ let beforeClosingParen = before.lastIndexOf(CLOSE);
+ if (beforeClosingParen != -1 && beforeClosingParen > beforeParen) return false;
// Find closing parenthesis around current cursor
const after = text.substring(selectionStart);
let afterParen = after.indexOf(CLOSE);
if (afterParen == -1) return false;
- let afterParenOpen = after.indexOf(OPEN);
- while (afterParenOpen !== -1 && afterParen > afterParenOpen) {
- afterParen = after.indexOf(CLOSE, afterParen + 1);
- afterParenOpen = after.indexOf(OPEN, afterParenOpen + 1);
- }
- if (beforeParen === -1 || afterParen === -1) return false;
+
+ let afterOpeningParen = after.indexOf(OPEN);
+ if (afterOpeningParen != -1 && afterOpeningParen < afterParen) return false;
// Set the selection to the text between the parenthesis
const parenContent = text.substring(beforeParen + 1, selectionStart + afterParen);
- const lastColon = parenContent.lastIndexOf(":");
- selectionStart = beforeParen + 1;
- selectionEnd = selectionStart + lastColon;
+ if (/.*:-?[\d.]+/s.test(parenContent)) {
+ const lastColon = parenContent.lastIndexOf(":");
+ selectionStart = beforeParen + 1;
+ selectionEnd = selectionStart + lastColon;
+ } else {
+ selectionStart = beforeParen + 1;
+ selectionEnd = selectionStart + parenContent.length;
+ }
+
target.setSelectionRange(selectionStart, selectionEnd);
return true;
}
function selectCurrentWord() {
if (selectionStart !== selectionEnd) return false;
- const delimiters = opts.keyedit_delimiters + " \r\n\t";
+ const whitespace_delimiters = {"Tab": "\t", "Carriage Return": "\r", "Line Feed": "\n"};
+ let delimiters = opts.keyedit_delimiters;
+
+ for (let i of opts.keyedit_delimiters_whitespace) {
+ delimiters += whitespace_delimiters[i];
+ }
- // seek backward until to find beggining
+ // seek backward to find beginning
while (!delimiters.includes(text[selectionStart - 1]) && selectionStart > 0) {
selectionStart--;
}
@@ -63,7 +69,7 @@ function keyupEditAttention(event) {
}
// If the user hasn't selected anything, let's select their current parenthesis block or word
- if (!selectCurrentParenthesisBlock('<', '>') && !selectCurrentParenthesisBlock('(', ')')) {
+ if (!selectCurrentParenthesisBlock('<', '>') && !selectCurrentParenthesisBlock('(', ')') && !selectCurrentParenthesisBlock('[', ']')) {
selectCurrentWord();
}
@@ -71,33 +77,54 @@ function keyupEditAttention(event) {
var closeCharacter = ')';
var delta = opts.keyedit_precision_attention;
+ var start = selectionStart > 0 ? text[selectionStart - 1] : "";
+ var end = text[selectionEnd];
- if (selectionStart > 0 && text[selectionStart - 1] == '<') {
+ if (start == '<') {
closeCharacter = '>';
delta = opts.keyedit_precision_extra;
- } else if (selectionStart == 0 || text[selectionStart - 1] != "(") {
+ } else if (start == '(' && end == ')' || start == '[' && end == ']') { // convert old-style (((emphasis)))
+ let numParen = 0;
+
+ while (text[selectionStart - numParen - 1] == start && text[selectionEnd + numParen] == end) {
+ numParen++;
+ }
+ if (start == "[") {
+ weight = (1 / 1.1) ** numParen;
+ } else {
+ weight = 1.1 ** numParen;
+ }
+
+ weight = Math.round(weight / opts.keyedit_precision_attention) * opts.keyedit_precision_attention;
+
+ text = text.slice(0, selectionStart - numParen) + "(" + text.slice(selectionStart, selectionEnd) + ":" + weight + ")" + text.slice(selectionEnd + numParen);
+ selectionStart -= numParen - 1;
+ selectionEnd -= numParen - 1;
+ } else if (start != '(') {
// do not include spaces at the end
while (selectionEnd > selectionStart && text[selectionEnd - 1] == ' ') {
- selectionEnd -= 1;
+ selectionEnd--;
}
+
if (selectionStart == selectionEnd) {
return;
}
text = text.slice(0, selectionStart) + "(" + text.slice(selectionStart, selectionEnd) + ":1.0)" + text.slice(selectionEnd);
- selectionStart += 1;
- selectionEnd += 1;
+ selectionStart++;
+ selectionEnd++;
}
- var end = text.slice(selectionEnd + 1).indexOf(closeCharacter) + 1;
- var weight = parseFloat(text.slice(selectionEnd + 1, selectionEnd + 1 + end));
+ if (text[selectionEnd] != ':') return;
+ var weightLength = text.slice(selectionEnd + 1).indexOf(closeCharacter) + 1;
+ var weight = parseFloat(text.slice(selectionEnd + 1, selectionEnd + weightLength));
if (isNaN(weight)) return;
weight += isPlus ? delta : -delta;
weight = parseFloat(weight.toPrecision(12));
- if (String(weight).length == 1) weight += ".0";
+ if (Number.isInteger(weight)) weight += ".0";
if (closeCharacter == ')' && weight == 1) {
var endParenPos = text.substring(selectionEnd).indexOf(')');
@@ -105,7 +132,7 @@ function keyupEditAttention(event) {
selectionStart--;
selectionEnd--;
} else {
- text = text.slice(0, selectionEnd + 1) + weight + text.slice(selectionEnd + end);
+ text = text.slice(0, selectionEnd + 1) + weight + text.slice(selectionEnd + weightLength);
}
target.focus();
diff --git a/javascript/extensions.js b/javascript/extensions.js
index 1f7254c5..312131b7 100644
--- a/javascript/extensions.js
+++ b/javascript/extensions.js
@@ -33,7 +33,7 @@ function extensions_check() {
var id = randomId();
- requestProgress(id, gradioApp().getElementById('extensions_installed_top'), null, function() {
+ requestProgress(id, gradioApp().getElementById('extensions_installed_html'), null, function() {
});
diff --git a/javascript/extraNetworks.js b/javascript/extraNetworks.js
index 5582a6e5..f1ad19a6 100644
--- a/javascript/extraNetworks.js
+++ b/javascript/extraNetworks.js
@@ -1,20 +1,39 @@
+function toggleCss(key, css, enable) {
+ var style = document.getElementById(key);
+ if (enable && !style) {
+ style = document.createElement('style');
+ style.id = key;
+ style.type = 'text/css';
+ document.head.appendChild(style);
+ }
+ if (style && !enable) {
+ document.head.removeChild(style);
+ }
+ if (style) {
+ style.innerHTML == '';
+ style.appendChild(document.createTextNode(css));
+ }
+}
+
function setupExtraNetworksForTab(tabname) {
gradioApp().querySelector('#' + tabname + '_extra_tabs').classList.add('extra-networks');
var tabs = gradioApp().querySelector('#' + tabname + '_extra_tabs > div');
- var search = gradioApp().querySelector('#' + tabname + '_extra_search textarea');
+ var searchDiv = gradioApp().getElementById(tabname + '_extra_search');
+ var search = searchDiv.querySelector('textarea');
var sort = gradioApp().getElementById(tabname + '_extra_sort');
var sortOrder = gradioApp().getElementById(tabname + '_extra_sortorder');
var refresh = gradioApp().getElementById(tabname + '_extra_refresh');
+ var showDirsDiv = gradioApp().getElementById(tabname + '_extra_show_dirs');
+ var showDirs = gradioApp().querySelector('#' + tabname + '_extra_show_dirs input');
+ var promptContainer = gradioApp().querySelector('.prompt-container-compact#' + tabname + '_prompt_container');
+ var negativePrompt = gradioApp().querySelector('#' + tabname + '_neg_prompt');
- search.classList.add('search');
- sort.classList.add('sort');
- sortOrder.classList.add('sortorder');
- sort.dataset.sortkey = 'sortDefault';
- tabs.appendChild(search);
+ tabs.appendChild(searchDiv);
tabs.appendChild(sort);
tabs.appendChild(sortOrder);
tabs.appendChild(refresh);
+ tabs.appendChild(showDirsDiv);
var applyFilter = function() {
var searchTerm = search.value.toLowerCase();
@@ -31,20 +50,23 @@ function setupExtraNetworksForTab(tabname) {
elem.style.display = visible ? "" : "none";
});
+
+ applySort();
};
var applySort = function() {
+ var cards = gradioApp().querySelectorAll('#' + tabname + '_extra_tabs div.card');
+
var reverse = sortOrder.classList.contains("sortReverse");
- var sortKey = sort.querySelector("input").value.toLowerCase().replace("sort", "").replaceAll(" ", "_").replace(/_+$/, "").trim();
- sortKey = sortKey ? "sort" + sortKey.charAt(0).toUpperCase() + sortKey.slice(1) : "";
- var sortKeyStore = sortKey ? sortKey + (reverse ? "Reverse" : "") : "";
- if (!sortKey || sortKeyStore == sort.dataset.sortkey) {
+ var sortKey = sort.querySelector("input").value.toLowerCase().replace("sort", "").replaceAll(" ", "_").replace(/_+$/, "").trim() || "name";
+ sortKey = "sort" + sortKey.charAt(0).toUpperCase() + sortKey.slice(1);
+ var sortKeyStore = sortKey + "-" + (reverse ? "Descending" : "Ascending") + "-" + cards.length;
+
+ if (sortKeyStore == sort.dataset.sortkey) {
return;
}
-
sort.dataset.sortkey = sortKeyStore;
- var cards = gradioApp().querySelectorAll('#' + tabname + '_extra_tabs div.card');
cards.forEach(function(card) {
card.originalParentElement = card.parentElement;
});
@@ -70,23 +92,70 @@ function setupExtraNetworksForTab(tabname) {
};
search.addEventListener("input", applyFilter);
- applyFilter();
- ["change", "blur", "click"].forEach(function(evt) {
- sort.querySelector("input").addEventListener(evt, applySort);
- });
sortOrder.addEventListener("click", function() {
sortOrder.classList.toggle("sortReverse");
applySort();
});
+ applyFilter();
+ extraNetworksApplySort[tabname] = applySort;
extraNetworksApplyFilter[tabname] = applyFilter;
+
+ var showDirsUpdate = function() {
+ var css = '#' + tabname + '_extra_tabs .extra-network-subdirs { display: none; }';
+ toggleCss(tabname + '_extra_show_dirs_style', css, !showDirs.checked);
+ localSet('extra-networks-show-dirs', showDirs.checked ? 1 : 0);
+ };
+ showDirs.checked = localGet('extra-networks-show-dirs', 1) == 1;
+ showDirs.addEventListener("change", showDirsUpdate);
+ showDirsUpdate();
+}
+
+function extraNetworksMovePromptToTab(tabname, id, showPrompt, showNegativePrompt) {
+ if (!gradioApp().querySelector('.toprow-compact-tools')) return; // only applicable for compact prompt layout
+
+ var promptContainer = gradioApp().getElementById(tabname + '_prompt_container');
+ var prompt = gradioApp().getElementById(tabname + '_prompt_row');
+ var negPrompt = gradioApp().getElementById(tabname + '_neg_prompt_row');
+ var elem = id ? gradioApp().getElementById(id) : null;
+
+ if (showNegativePrompt && elem) {
+ elem.insertBefore(negPrompt, elem.firstChild);
+ } else {
+ promptContainer.insertBefore(negPrompt, promptContainer.firstChild);
+ }
+
+ if (showPrompt && elem) {
+ elem.insertBefore(prompt, elem.firstChild);
+ } else {
+ promptContainer.insertBefore(prompt, promptContainer.firstChild);
+ }
+
+ if (elem) {
+ elem.classList.toggle('extra-page-prompts-active', showNegativePrompt || showPrompt);
+ }
+}
+
+
+function extraNetworksUrelatedTabSelected(tabname) { // called from python when user selects an unrelated tab (generate)
+ extraNetworksMovePromptToTab(tabname, '', false, false);
+}
+
+function extraNetworksTabSelected(tabname, id, showPrompt, showNegativePrompt) { // called from python when user selects an extra networks tab
+ extraNetworksMovePromptToTab(tabname, id, showPrompt, showNegativePrompt);
+
}
function applyExtraNetworkFilter(tabname) {
setTimeout(extraNetworksApplyFilter[tabname], 1);
}
+function applyExtraNetworkSort(tabname) {
+ setTimeout(extraNetworksApplySort[tabname], 1);
+}
+
var extraNetworksApplyFilter = {};
+var extraNetworksApplySort = {};
var activePromptTextarea = {};
function setupExtraNetworks() {
@@ -113,19 +182,22 @@ function setupExtraNetworks() {
onUiLoaded(setupExtraNetworks);
-var re_extranet = /<([^:]+:[^:]+):[\d.]+>(.*)/;
-var re_extranet_g = /\s+<([^:]+:[^:]+):[\d.]+>/g;
+var re_extranet = /<([^:^>]+:[^:]+):[\d.]+>(.*)/;
+var re_extranet_g = /<([^:^>]+:[^:]+):[\d.]+>/g;
-function tryToRemoveExtraNetworkFromPrompt(textarea, text) {
- var m = text.match(re_extranet);
+var re_extranet_neg = /\(([^:^>]+:[\d.]+)\)/;
+var re_extranet_g_neg = /\(([^:^>]+:[\d.]+)\)/g;
+function tryToRemoveExtraNetworkFromPrompt(textarea, text, isNeg) {
+ var m = text.match(isNeg ? re_extranet_neg : re_extranet);
var replaced = false;
var newTextareaText;
if (m) {
+ var extraTextBeforeNet = opts.extra_networks_add_text_separator;
var extraTextAfterNet = m[2];
var partToSearch = m[1];
var foundAtPosition = -1;
- newTextareaText = textarea.value.replaceAll(re_extranet_g, function(found, net, pos) {
- m = found.match(re_extranet);
+ newTextareaText = textarea.value.replaceAll(isNeg ? re_extranet_g_neg : re_extranet_g, function(found, net, pos) {
+ m = found.match(isNeg ? re_extranet_neg : re_extranet);
if (m[1] == partToSearch) {
replaced = true;
foundAtPosition = pos;
@@ -134,8 +206,13 @@ function tryToRemoveExtraNetworkFromPrompt(textarea, text) {
return found;
});
- if (foundAtPosition >= 0 && newTextareaText.substr(foundAtPosition, extraTextAfterNet.length) == extraTextAfterNet) {
- newTextareaText = newTextareaText.substr(0, foundAtPosition) + newTextareaText.substr(foundAtPosition + extraTextAfterNet.length);
+ if (foundAtPosition >= 0) {
+ if (extraTextAfterNet && newTextareaText.substr(foundAtPosition, extraTextAfterNet.length) == extraTextAfterNet) {
+ newTextareaText = newTextareaText.substr(0, foundAtPosition) + newTextareaText.substr(foundAtPosition + extraTextAfterNet.length);
+ }
+ if (newTextareaText.substr(foundAtPosition - extraTextBeforeNet.length, extraTextBeforeNet.length) == extraTextBeforeNet) {
+ newTextareaText = newTextareaText.substr(0, foundAtPosition - extraTextBeforeNet.length) + newTextareaText.substr(foundAtPosition);
+ }
}
} else {
newTextareaText = textarea.value.replaceAll(new RegExp(text, "g"), function(found) {
@@ -155,14 +232,23 @@ function tryToRemoveExtraNetworkFromPrompt(textarea, text) {
return false;
}
-function cardClicked(tabname, textToAdd, allowNegativePrompt) {
- var textarea = allowNegativePrompt ? activePromptTextarea[tabname] : gradioApp().querySelector("#" + tabname + "_prompt > label > textarea");
+function updatePromptArea(text, textArea, isNeg) {
- if (!tryToRemoveExtraNetworkFromPrompt(textarea, textToAdd)) {
- textarea.value = textarea.value + opts.extra_networks_add_text_separator + textToAdd;
+ if (!tryToRemoveExtraNetworkFromPrompt(textArea, text, isNeg)) {
+ textArea.value = textArea.value + opts.extra_networks_add_text_separator + text;
}
- updateInput(textarea);
+ updateInput(textArea);
+}
+
+function cardClicked(tabname, textToAdd, textToAddNegative, allowNegativePrompt) {
+ if (textToAddNegative.length > 0) {
+ updatePromptArea(textToAdd, gradioApp().querySelector("#" + tabname + "_prompt > label > textarea"));
+ updatePromptArea(textToAddNegative, gradioApp().querySelector("#" + tabname + "_neg_prompt > label > textarea"), true);
+ } else {
+ var textarea = allowNegativePrompt ? activePromptTextarea[tabname] : gradioApp().querySelector("#" + tabname + "_prompt > label > textarea");
+ updatePromptArea(textToAdd, textarea);
+ }
}
function saveCardPreview(event, tabname, filename) {
@@ -179,7 +265,7 @@ function saveCardPreview(event, tabname, filename) {
}
function extraNetworksSearchButton(tabs_id, event) {
- var searchTextarea = gradioApp().querySelector("#" + tabs_id + ' > div > textarea');
+ var searchTextarea = gradioApp().querySelector("#" + tabs_id + ' > label > textarea');
var button = event.target;
var text = button.classList.contains("search-all") ? "" : button.textContent.trim();
@@ -189,27 +275,24 @@ function extraNetworksSearchButton(tabs_id, event) {
var globalPopup = null;
var globalPopupInner = null;
+
function closePopup() {
if (!globalPopup) return;
-
globalPopup.style.display = "none";
}
+
function popup(contents) {
if (!globalPopup) {
globalPopup = document.createElement('div');
- globalPopup.onclick = closePopup;
globalPopup.classList.add('global-popup');
var close = document.createElement('div');
close.classList.add('global-popup-close');
- close.onclick = closePopup;
+ close.addEventListener("click", closePopup);
close.title = "Close";
globalPopup.appendChild(close);
globalPopupInner = document.createElement('div');
- globalPopupInner.onclick = function(event) {
- event.stopPropagation(); return false;
- };
globalPopupInner.classList.add('global-popup-inner');
globalPopup.appendChild(globalPopupInner);
@@ -222,6 +305,15 @@ function popup(contents) {
globalPopup.style.display = "flex";
}
+var storedPopupIds = {};
+function popupId(id) {
+ if (!storedPopupIds[id]) {
+ storedPopupIds[id] = gradioApp().getElementById(id);
+ }
+
+ popup(storedPopupIds[id]);
+}
+
function extraNetworksShowMetadata(text) {
var elem = document.createElement('pre');
elem.classList.add('popup-metadata');
@@ -299,15 +391,21 @@ function extraNetworksEditUserMetadata(event, tabname, extraPage, cardName) {
function extraNetworksRefreshSingleCard(page, tabname, name) {
requestGet("./sd_extra_networks/get-single-card", {page: page, tabname: tabname, name: name}, function(data) {
if (data && data.html) {
- var card = gradioApp().querySelector('.card[data-name=' + JSON.stringify(name) + ']'); // likely using the wrong stringify function
+ var card = gradioApp().querySelector(`#${tabname}_${page.replace(" ", "_")}_cards > .card[data-name="${name}"]`);
var newDiv = document.createElement('DIV');
newDiv.innerHTML = data.html;
var newCard = newDiv.firstElementChild;
- newCard.style = '';
+ newCard.style.display = '';
card.parentElement.insertBefore(newCard, card);
card.parentElement.removeChild(card);
}
});
}
+
+window.addEventListener("keydown", function(event) {
+ if (event.key == "Escape") {
+ closePopup();
+ }
+});
diff --git a/javascript/hints.js b/javascript/hints.js
index 4167cb28..6de9372e 100644
--- a/javascript/hints.js
+++ b/javascript/hints.js
@@ -190,3 +190,14 @@ onUiUpdate(function(mutationRecords) {
tooltipCheckTimer = setTimeout(processTooltipCheckNodes, 1000);
}
});
+
+onUiLoaded(function() {
+ for (var comp of window.gradio_config.components) {
+ if (comp.props.webui_tooltip && comp.props.elem_id) {
+ var elem = gradioApp().getElementById(comp.props.elem_id);
+ if (elem) {
+ elem.title = comp.props.webui_tooltip;
+ }
+ }
+ }
+});
diff --git a/javascript/imageviewer.js b/javascript/imageviewer.js
index 677e95c1..625c5d14 100644
--- a/javascript/imageviewer.js
+++ b/javascript/imageviewer.js
@@ -33,8 +33,11 @@ function updateOnBackgroundChange() {
const modalImage = gradioApp().getElementById("modalImage");
if (modalImage && modalImage.offsetParent) {
let currentButton = selected_gallery_button();
-
- if (currentButton?.children?.length > 0 && modalImage.src != currentButton.children[0].src) {
+ let preview = gradioApp().querySelectorAll('.livePreview > img');
+ if (opts.js_live_preview_in_modal_lightbox && preview.length > 0) {
+ // show preview image if available
+ modalImage.src = preview[preview.length - 1].src;
+ } else if (currentButton?.children?.length > 0 && modalImage.src != currentButton.children[0].src) {
modalImage.src = currentButton.children[0].src;
if (modalImage.style.display === 'none') {
const modal = gradioApp().getElementById("lightboxModal");
@@ -136,6 +139,11 @@ function setupImageForLightbox(e) {
var event = isFirefox ? 'mousedown' : 'click';
e.addEventListener(event, function(evt) {
+ if (evt.button == 1) {
+ open(evt.target.src);
+ evt.preventDefault();
+ return;
+ }
if (!opts.js_modal_lightbox || evt.button != 0) return;
modalZoomSet(gradioApp().getElementById('modalImage'), opts.js_modal_lightbox_initially_zoomed);
diff --git a/javascript/inputAccordion.js b/javascript/inputAccordion.js
new file mode 100644
index 00000000..7570309a
--- /dev/null
+++ b/javascript/inputAccordion.js
@@ -0,0 +1,68 @@
+function inputAccordionChecked(id, checked) {
+ var accordion = gradioApp().getElementById(id);
+ accordion.visibleCheckbox.checked = checked;
+ accordion.onVisibleCheckboxChange();
+}
+
+function setupAccordion(accordion) {
+ var labelWrap = accordion.querySelector('.label-wrap');
+ var gradioCheckbox = gradioApp().querySelector('#' + accordion.id + "-checkbox input");
+ var extra = gradioApp().querySelector('#' + accordion.id + "-extra");
+ var span = labelWrap.querySelector('span');
+ var linked = true;
+
+ var isOpen = function() {
+ return labelWrap.classList.contains('open');
+ };
+
+ var observerAccordionOpen = new MutationObserver(function(mutations) {
+ mutations.forEach(function(mutationRecord) {
+ accordion.classList.toggle('input-accordion-open', isOpen());
+
+ if (linked) {
+ accordion.visibleCheckbox.checked = isOpen();
+ accordion.onVisibleCheckboxChange();
+ }
+ });
+ });
+ observerAccordionOpen.observe(labelWrap, {attributes: true, attributeFilter: ['class']});
+
+ if (extra) {
+ labelWrap.insertBefore(extra, labelWrap.lastElementChild);
+ }
+
+ accordion.onChecked = function(checked) {
+ if (isOpen() != checked) {
+ labelWrap.click();
+ }
+ };
+
+ var visibleCheckbox = document.createElement('INPUT');
+ visibleCheckbox.type = 'checkbox';
+ visibleCheckbox.checked = isOpen();
+ visibleCheckbox.id = accordion.id + "-visible-checkbox";
+ visibleCheckbox.className = gradioCheckbox.className + " input-accordion-checkbox";
+ span.insertBefore(visibleCheckbox, span.firstChild);
+
+ accordion.visibleCheckbox = visibleCheckbox;
+ accordion.onVisibleCheckboxChange = function() {
+ if (linked && isOpen() != visibleCheckbox.checked) {
+ labelWrap.click();
+ }
+
+ gradioCheckbox.checked = visibleCheckbox.checked;
+ updateInput(gradioCheckbox);
+ };
+
+ visibleCheckbox.addEventListener('click', function(event) {
+ linked = false;
+ event.stopPropagation();
+ });
+ visibleCheckbox.addEventListener('input', accordion.onVisibleCheckboxChange);
+}
+
+onUiLoaded(function() {
+ for (var accordion of gradioApp().querySelectorAll('.input-accordion')) {
+ setupAccordion(accordion);
+ }
+});
diff --git a/javascript/localStorage.js b/javascript/localStorage.js
new file mode 100644
index 00000000..dc1a36c3
--- /dev/null
+++ b/javascript/localStorage.js
@@ -0,0 +1,26 @@
+
+function localSet(k, v) {
+ try {
+ localStorage.setItem(k, v);
+ } catch (e) {
+ console.warn(`Failed to save ${k} to localStorage: ${e}`);
+ }
+}
+
+function localGet(k, def) {
+ try {
+ return localStorage.getItem(k);
+ } catch (e) {
+ console.warn(`Failed to load ${k} from localStorage: ${e}`);
+ }
+
+ return def;
+}
+
+function localRemove(k) {
+ try {
+ return localStorage.removeItem(k);
+ } catch (e) {
+ console.warn(`Failed to remove ${k} from localStorage: ${e}`);
+ }
+}
diff --git a/javascript/localization.js b/javascript/localization.js
index eb22b8a7..8f00c186 100644
--- a/javascript/localization.js
+++ b/javascript/localization.js
@@ -11,11 +11,11 @@ var ignore_ids_for_localization = {
train_hypernetwork: 'OPTION',
txt2img_styles: 'OPTION',
img2img_styles: 'OPTION',
- setting_random_artist_categories: 'SPAN',
- setting_face_restoration_model: 'SPAN',
- setting_realesrgan_enabled_models: 'SPAN',
- extras_upscaler_1: 'SPAN',
- extras_upscaler_2: 'SPAN',
+ setting_random_artist_categories: 'OPTION',
+ setting_face_restoration_model: 'OPTION',
+ setting_realesrgan_enabled_models: 'OPTION',
+ extras_upscaler_1: 'OPTION',
+ extras_upscaler_2: 'OPTION',
};
var re_num = /^[.\d]+$/;
@@ -107,12 +107,41 @@ function processNode(node) {
});
}
+function localizeWholePage() {
+ processNode(gradioApp());
+
+ function elem(comp) {
+ var elem_id = comp.props.elem_id ? comp.props.elem_id : "component-" + comp.id;
+ return gradioApp().getElementById(elem_id);
+ }
+
+ for (var comp of window.gradio_config.components) {
+ if (comp.props.webui_tooltip) {
+ let e = elem(comp);
+
+ let tl = e ? getTranslation(e.title) : undefined;
+ if (tl !== undefined) {
+ e.title = tl;
+ }
+ }
+ if (comp.props.placeholder) {
+ let e = elem(comp);
+ let textbox = e ? e.querySelector('[placeholder]') : null;
+
+ let tl = textbox ? getTranslation(textbox.placeholder) : undefined;
+ if (tl !== undefined) {
+ textbox.placeholder = tl;
+ }
+ }
+ }
+}
+
function dumpTranslations() {
if (!hasLocalization()) {
// If we don't have any localization,
// we will not have traversed the app to find
// original_lines, so do that now.
- processNode(gradioApp());
+ localizeWholePage();
}
var dumped = {};
if (localization.rtl) {
@@ -154,7 +183,7 @@ document.addEventListener("DOMContentLoaded", function() {
});
});
- processNode(gradioApp());
+ localizeWholePage();
if (localization.rtl) { // if the language is from right to left,
(new MutationObserver((mutations, observer) => { // wait for the style to load
diff --git a/javascript/notification.js b/javascript/notification.js
index 76c5715d..3ee972ae 100644
--- a/javascript/notification.js
+++ b/javascript/notification.js
@@ -15,7 +15,7 @@ onAfterUiUpdate(function() {
}
}
- const galleryPreviews = gradioApp().querySelectorAll('div[id^="tab_"][style*="display: block"] div[id$="_results"] .thumbnail-item > img');
+ const galleryPreviews = gradioApp().querySelectorAll('div[id^="tab_"] div[id$="_results"] .thumbnail-item > img');
if (galleryPreviews == null) return;
@@ -26,7 +26,11 @@ onAfterUiUpdate(function() {
lastHeadImg = headImg;
// play notification sound if available
- gradioApp().querySelector('#audio_notification audio')?.play();
+ const notificationAudio = gradioApp().querySelector('#audio_notification audio');
+ if (notificationAudio) {
+ notificationAudio.volume = opts.notification_volume / 100.0 || 1.0;
+ notificationAudio.play();
+ }
if (document.hasFocus()) return;
diff --git a/javascript/progressbar.js b/javascript/progressbar.js
index 29299787..77761495 100644
--- a/javascript/progressbar.js
+++ b/javascript/progressbar.js
@@ -69,7 +69,6 @@ function requestProgress(id_task, progressbarContainer, gallery, atEnd, onProgre
var dateStart = new Date();
var wasEverActive = false;
var parentProgressbar = progressbarContainer.parentNode;
- var parentGallery = gallery ? gallery.parentNode : null;
var divProgress = document.createElement('div');
divProgress.className = 'progressDiv';
@@ -80,32 +79,26 @@ function requestProgress(id_task, progressbarContainer, gallery, atEnd, onProgre
divProgress.appendChild(divInner);
parentProgressbar.insertBefore(divProgress, progressbarContainer);
- if (parentGallery) {
- var livePreview = document.createElement('div');
- livePreview.className = 'livePreview';
- parentGallery.insertBefore(livePreview, gallery);
- }
+ var livePreview = null;
var removeProgressBar = function() {
+ if (!divProgress) return;
+
setTitle("");
parentProgressbar.removeChild(divProgress);
- if (parentGallery) parentGallery.removeChild(livePreview);
+ if (gallery && livePreview) gallery.removeChild(livePreview);
atEnd();
+
+ divProgress = null;
};
- var fun = function(id_task, id_live_preview) {
- request("./internal/progress", {id_task: id_task, id_live_preview: id_live_preview}, function(res) {
+ var funProgress = function(id_task) {
+ request("./internal/progress", {id_task: id_task, live_preview: false}, function(res) {
if (res.completed) {
removeProgressBar();
return;
}
- var rect = progressbarContainer.getBoundingClientRect();
-
- if (rect.width) {
- divProgress.style.width = rect.width + "px";
- }
-
let progressText = "";
divInner.style.width = ((res.progress || 0) * 100.0) + '%';
@@ -119,7 +112,6 @@ function requestProgress(id_task, progressbarContainer, gallery, atEnd, onProgre
progressText += " ETA: " + formatTime(res.eta);
}
-
setTitle(progressText);
if (res.textinfo && res.textinfo.indexOf("\n") == -1) {
@@ -142,16 +134,33 @@ function requestProgress(id_task, progressbarContainer, gallery, atEnd, onProgre
return;
}
+ if (onProgress) {
+ onProgress(res);
+ }
- if (res.live_preview && gallery) {
- rect = gallery.getBoundingClientRect();
- if (rect.width) {
- livePreview.style.width = rect.width + "px";
- livePreview.style.height = rect.height + "px";
- }
+ setTimeout(() => {
+ funProgress(id_task, res.id_live_preview);
+ }, opts.live_preview_refresh_period || 500);
+ }, function() {
+ removeProgressBar();
+ });
+ };
+ var funLivePreview = function(id_task, id_live_preview) {
+ request("./internal/progress", {id_task: id_task, id_live_preview: id_live_preview}, function(res) {
+ if (!divProgress) {
+ return;
+ }
+
+ if (res.live_preview && gallery) {
var img = new Image();
img.onload = function() {
+ if (!livePreview) {
+ livePreview = document.createElement('div');
+ livePreview.className = 'livePreview';
+ gallery.insertBefore(livePreview, gallery.firstElementChild);
+ }
+
livePreview.appendChild(img);
if (livePreview.childElementCount > 2) {
livePreview.removeChild(livePreview.firstElementChild);
@@ -160,18 +169,18 @@ function requestProgress(id_task, progressbarContainer, gallery, atEnd, onProgre
img.src = res.live_preview;
}
-
- if (onProgress) {
- onProgress(res);
- }
-
setTimeout(() => {
- fun(id_task, res.id_live_preview);
+ funLivePreview(id_task, res.id_live_preview);
}, opts.live_preview_refresh_period || 500);
}, function() {
removeProgressBar();
});
};
- fun(id_task, 0);
+ funProgress(id_task, 0);
+
+ if (gallery) {
+ funLivePreview(id_task, 0);
+ }
+
}
diff --git a/javascript/resizeHandle.js b/javascript/resizeHandle.js
new file mode 100644
index 00000000..8c5c5169
--- /dev/null
+++ b/javascript/resizeHandle.js
@@ -0,0 +1,141 @@
+(function() {
+ const GRADIO_MIN_WIDTH = 320;
+ const GRID_TEMPLATE_COLUMNS = '1fr 16px 1fr';
+ const PAD = 16;
+ const DEBOUNCE_TIME = 100;
+
+ const R = {
+ tracking: false,
+ parent: null,
+ parentWidth: null,
+ leftCol: null,
+ leftColStartWidth: null,
+ screenX: null,
+ };
+
+ let resizeTimer;
+ let parents = [];
+
+ function setLeftColGridTemplate(el, width) {
+ el.style.gridTemplateColumns = `${width}px 16px 1fr`;
+ }
+
+ function displayResizeHandle(parent) {
+ if (window.innerWidth < GRADIO_MIN_WIDTH * 2 + PAD * 4) {
+ parent.style.display = 'flex';
+ if (R.handle != null) {
+ R.handle.style.opacity = '0';
+ }
+ return false;
+ } else {
+ parent.style.display = 'grid';
+ if (R.handle != null) {
+ R.handle.style.opacity = '100';
+ }
+ return true;
+ }
+ }
+
+ function afterResize(parent) {
+ if (displayResizeHandle(parent) && parent.style.gridTemplateColumns != GRID_TEMPLATE_COLUMNS) {
+ const oldParentWidth = R.parentWidth;
+ const newParentWidth = parent.offsetWidth;
+ const widthL = parseInt(parent.style.gridTemplateColumns.split(' ')[0]);
+
+ const ratio = newParentWidth / oldParentWidth;
+
+ const newWidthL = Math.max(Math.floor(ratio * widthL), GRADIO_MIN_WIDTH);
+ setLeftColGridTemplate(parent, newWidthL);
+
+ R.parentWidth = newParentWidth;
+ }
+ }
+
+ function setup(parent) {
+ const leftCol = parent.firstElementChild;
+ const rightCol = parent.lastElementChild;
+
+ parents.push(parent);
+
+ parent.style.display = 'grid';
+ parent.style.gap = '0';
+ parent.style.gridTemplateColumns = GRID_TEMPLATE_COLUMNS;
+
+ const resizeHandle = document.createElement('div');
+ resizeHandle.classList.add('resize-handle');
+ parent.insertBefore(resizeHandle, rightCol);
+
+ resizeHandle.addEventListener('mousedown', (evt) => {
+ if (evt.button !== 0) return;
+
+ evt.preventDefault();
+ evt.stopPropagation();
+
+ document.body.classList.add('resizing');
+
+ R.tracking = true;
+ R.parent = parent;
+ R.parentWidth = parent.offsetWidth;
+ R.handle = resizeHandle;
+ R.leftCol = leftCol;
+ R.leftColStartWidth = leftCol.offsetWidth;
+ R.screenX = evt.screenX;
+ });
+
+ resizeHandle.addEventListener('dblclick', (evt) => {
+ evt.preventDefault();
+ evt.stopPropagation();
+
+ parent.style.gridTemplateColumns = GRID_TEMPLATE_COLUMNS;
+ });
+
+ afterResize(parent);
+ }
+
+ window.addEventListener('mousemove', (evt) => {
+ if (evt.button !== 0) return;
+
+ if (R.tracking) {
+ evt.preventDefault();
+ evt.stopPropagation();
+
+ const delta = R.screenX - evt.screenX;
+ const leftColWidth = Math.max(Math.min(R.leftColStartWidth - delta, R.parent.offsetWidth - GRADIO_MIN_WIDTH - PAD), GRADIO_MIN_WIDTH);
+ setLeftColGridTemplate(R.parent, leftColWidth);
+ }
+ });
+
+ window.addEventListener('mouseup', (evt) => {
+ if (evt.button !== 0) return;
+
+ if (R.tracking) {
+ evt.preventDefault();
+ evt.stopPropagation();
+
+ R.tracking = false;
+
+ document.body.classList.remove('resizing');
+ }
+ });
+
+
+ window.addEventListener('resize', () => {
+ clearTimeout(resizeTimer);
+
+ resizeTimer = setTimeout(function() {
+ for (const parent of parents) {
+ afterResize(parent);
+ }
+ }, DEBOUNCE_TIME);
+ });
+
+ setupResizeHandle = setup;
+})();
+
+onUiLoaded(function() {
+ for (var elem of gradioApp().querySelectorAll('.resize-handle-row')) {
+ if (!elem.querySelector('.resize-handle')) {
+ setupResizeHandle(elem);
+ }
+ }
+});
diff --git a/javascript/settings.js b/javascript/settings.js
new file mode 100644
index 00000000..e6009290
--- /dev/null
+++ b/javascript/settings.js
@@ -0,0 +1,71 @@
+let settingsExcludeTabsFromShowAll = {
+ settings_tab_defaults: 1,
+ settings_tab_sysinfo: 1,
+ settings_tab_actions: 1,
+ settings_tab_licenses: 1,
+};
+
+function settingsShowAllTabs() {
+ gradioApp().querySelectorAll('#settings > div').forEach(function(elem) {
+ if (settingsExcludeTabsFromShowAll[elem.id]) return;
+
+ elem.style.display = "block";
+ });
+}
+
+function settingsShowOneTab() {
+ gradioApp().querySelector('#settings_show_one_page').click();
+}
+
+onUiLoaded(function() {
+ var edit = gradioApp().querySelector('#settings_search');
+ var editTextarea = gradioApp().querySelector('#settings_search > label > input');
+ var buttonShowAllPages = gradioApp().getElementById('settings_show_all_pages');
+ var settings_tabs = gradioApp().querySelector('#settings div');
+
+ onEdit('settingsSearch', editTextarea, 250, function() {
+ var searchText = (editTextarea.value || "").trim().toLowerCase();
+
+ gradioApp().querySelectorAll('#settings > div[id^=settings_] div[id^=column_settings_] > *').forEach(function(elem) {
+ var visible = elem.textContent.trim().toLowerCase().indexOf(searchText) != -1;
+ elem.style.display = visible ? "" : "none";
+ });
+
+ if (searchText != "") {
+ settingsShowAllTabs();
+ } else {
+ settingsShowOneTab();
+ }
+ });
+
+ settings_tabs.insertBefore(edit, settings_tabs.firstChild);
+ settings_tabs.appendChild(buttonShowAllPages);
+
+
+ buttonShowAllPages.addEventListener("click", settingsShowAllTabs);
+});
+
+
+onOptionsChanged(function() {
+ if (gradioApp().querySelector('#settings .settings-category')) return;
+
+ var sectionMap = {};
+ gradioApp().querySelectorAll('#settings > div > button').forEach(function(x) {
+ sectionMap[x.textContent.trim()] = x;
+ });
+
+ opts._categories.forEach(function(x) {
+ var section = x[0];
+ var category = x[1];
+
+ var span = document.createElement('SPAN');
+ span.textContent = category;
+ span.className = 'settings-category';
+
+ var sectionElem = sectionMap[section];
+ if (!sectionElem) return;
+
+ sectionElem.parentElement.insertBefore(span, sectionElem);
+ });
+});
+
diff --git a/javascript/token-counters.js b/javascript/token-counters.js
index 9d81a723..2ecc7d91 100644
--- a/javascript/token-counters.js
+++ b/javascript/token-counters.js
@@ -1,10 +1,9 @@
-let promptTokenCountDebounceTime = 800;
-let promptTokenCountTimeouts = {};
-var promptTokenCountUpdateFunctions = {};
+let promptTokenCountUpdateFunctions = {};
function update_txt2img_tokens(...args) {
// Called from Gradio
update_token_counter("txt2img_token_button");
+ update_token_counter("txt2img_negative_token_button");
if (args.length == 2) {
return args[0];
}
@@ -14,6 +13,7 @@ function update_txt2img_tokens(...args) {
function update_img2img_tokens(...args) {
// Called from Gradio
update_token_counter("img2img_token_button");
+ update_token_counter("img2img_negative_token_button");
if (args.length == 2) {
return args[0];
}
@@ -21,16 +21,7 @@ function update_img2img_tokens(...args) {
}
function update_token_counter(button_id) {
- if (opts.disable_token_counters) {
- return;
- }
- if (promptTokenCountTimeouts[button_id]) {
- clearTimeout(promptTokenCountTimeouts[button_id]);
- }
- promptTokenCountTimeouts[button_id] = setTimeout(
- () => gradioApp().getElementById(button_id)?.click(),
- promptTokenCountDebounceTime,
- );
+ promptTokenCountUpdateFunctions[button_id]?.();
}
@@ -69,10 +60,11 @@ function setupTokenCounting(id, id_counter, id_button) {
prompt.parentElement.insertBefore(counter, prompt);
prompt.parentElement.style.position = "relative";
- promptTokenCountUpdateFunctions[id] = function() {
- update_token_counter(id_button);
- };
- textarea.addEventListener("input", promptTokenCountUpdateFunctions[id]);
+ var func = onEdit(id, textarea, 800, function() {
+ gradioApp().getElementById(id_button)?.click();
+ });
+ promptTokenCountUpdateFunctions[id] = func;
+ promptTokenCountUpdateFunctions[id_button] = func;
}
function setupTokenCounters() {
diff --git a/javascript/ui.js b/javascript/ui.js
index d70a681b..18c9f891 100644
--- a/javascript/ui.js
+++ b/javascript/ui.js
@@ -19,28 +19,11 @@ function all_gallery_buttons() {
}
function selected_gallery_button() {
- var allCurrentButtons = gradioApp().querySelectorAll('[style="display: block;"].tabitem div[id$=_gallery].gradio-gallery .thumbnail-item.thumbnail-small.selected');
- var visibleCurrentButton = null;
- allCurrentButtons.forEach(function(elem) {
- if (elem.parentElement.offsetParent) {
- visibleCurrentButton = elem;
- }
- });
- return visibleCurrentButton;
+ return all_gallery_buttons().find(elem => elem.classList.contains('selected')) ?? null;
}
function selected_gallery_index() {
- var buttons = all_gallery_buttons();
- var button = selected_gallery_button();
-
- var result = -1;
- buttons.forEach(function(v, i) {
- if (v == button) {
- result = i;
- }
- });
-
- return result;
+ return all_gallery_buttons().findIndex(elem => elem.classList.contains('selected'));
}
function extract_image_from_gallery(gallery) {
@@ -152,11 +135,11 @@ function submit() {
showSubmitButtons('txt2img', false);
var id = randomId();
- localStorage.setItem("txt2img_task_id", id);
+ localSet("txt2img_task_id", id);
requestProgress(id, gradioApp().getElementById('txt2img_gallery_container'), gradioApp().getElementById('txt2img_gallery'), function() {
showSubmitButtons('txt2img', true);
- localStorage.removeItem("txt2img_task_id");
+ localRemove("txt2img_task_id");
showRestoreProgressButton('txt2img', false);
});
@@ -171,11 +154,11 @@ function submit_img2img() {
showSubmitButtons('img2img', false);
var id = randomId();
- localStorage.setItem("img2img_task_id", id);
+ localSet("img2img_task_id", id);
requestProgress(id, gradioApp().getElementById('img2img_gallery_container'), gradioApp().getElementById('img2img_gallery'), function() {
showSubmitButtons('img2img', true);
- localStorage.removeItem("img2img_task_id");
+ localRemove("img2img_task_id");
showRestoreProgressButton('img2img', false);
});
@@ -187,11 +170,26 @@ function submit_img2img() {
return res;
}
+function submit_extras() {
+ showSubmitButtons('extras', false);
+
+ var id = randomId();
+
+ requestProgress(id, gradioApp().getElementById('extras_gallery_container'), gradioApp().getElementById('extras_gallery'), function() {
+ showSubmitButtons('extras', true);
+ });
+
+ var res = create_submit_args(arguments);
+
+ res[0] = id;
+
+ console.log(res);
+ return res;
+}
+
function restoreProgressTxt2img() {
showRestoreProgressButton("txt2img", false);
- var id = localStorage.getItem("txt2img_task_id");
-
- id = localStorage.getItem("txt2img_task_id");
+ var id = localGet("txt2img_task_id");
if (id) {
requestProgress(id, gradioApp().getElementById('txt2img_gallery_container'), gradioApp().getElementById('txt2img_gallery'), function() {
@@ -205,7 +203,7 @@ function restoreProgressTxt2img() {
function restoreProgressImg2img() {
showRestoreProgressButton("img2img", false);
- var id = localStorage.getItem("img2img_task_id");
+ var id = localGet("img2img_task_id");
if (id) {
requestProgress(id, gradioApp().getElementById('img2img_gallery_container'), gradioApp().getElementById('img2img_gallery'), function() {
@@ -217,9 +215,33 @@ function restoreProgressImg2img() {
}
+/**
+ * Configure the width and height elements on `tabname` to accept
+ * pasting of resolutions in the form of "width x height".
+ */
+function setupResolutionPasting(tabname) {
+ var width = gradioApp().querySelector(`#${tabname}_width input[type=number]`);
+ var height = gradioApp().querySelector(`#${tabname}_height input[type=number]`);
+ for (const el of [width, height]) {
+ el.addEventListener('paste', function(event) {
+ var pasteData = event.clipboardData.getData('text/plain');
+ var parsed = pasteData.match(/^\s*(\d+)\D+(\d+)\s*$/);
+ if (parsed) {
+ width.value = parsed[1];
+ height.value = parsed[2];
+ updateInput(width);
+ updateInput(height);
+ event.preventDefault();
+ }
+ });
+ }
+}
+
onUiLoaded(function() {
- showRestoreProgressButton('txt2img', localStorage.getItem("txt2img_task_id"));
- showRestoreProgressButton('img2img', localStorage.getItem("img2img_task_id"));
+ showRestoreProgressButton('txt2img', localGet("txt2img_task_id"));
+ showRestoreProgressButton('img2img', localGet("img2img_task_id"));
+ setupResolutionPasting('txt2img');
+ setupResolutionPasting('img2img');
});
@@ -282,21 +304,6 @@ onAfterUiUpdate(function() {
json_elem.parentElement.style.display = "none";
setupTokenCounters();
-
- var show_all_pages = gradioApp().getElementById('settings_show_all_pages');
- var settings_tabs = gradioApp().querySelector('#settings div');
- if (show_all_pages && settings_tabs) {
- settings_tabs.appendChild(show_all_pages);
- show_all_pages.onclick = function() {
- gradioApp().querySelectorAll('#settings > div').forEach(function(elem) {
- if (elem.id == "settings_tab_licenses") {
- return;
- }
-
- elem.style.display = "block";
- });
- };
- }
});
onOptionsChanged(function() {
@@ -385,3 +392,20 @@ function switchWidthHeight(tabname) {
updateInput(height);
return [];
}
+
+
+var onEditTimers = {};
+
+// calls func after afterMs milliseconds has passed since the input elem has beed enited by user
+function onEdit(editId, elem, afterMs, func) {
+ var edited = function() {
+ var existingTimer = onEditTimers[editId];
+ if (existingTimer) clearTimeout(existingTimer);
+
+ onEditTimers[editId] = setTimeout(func, afterMs);
+ };
+
+ elem.addEventListener("input", edited);
+
+ return edited;
+}
diff --git a/launch.py b/launch.py
index 1dbc4c6e..f83820d2 100644
--- a/launch.py
+++ b/launch.py
@@ -1,6 +1,5 @@
from modules import launch_utils
-
args = launch_utils.args
python = launch_utils.python
git = launch_utils.git
@@ -26,8 +25,18 @@ start = launch_utils.start
def main():
- if not args.skip_prepare_environment:
- prepare_environment()
+ if args.dump_sysinfo:
+ filename = launch_utils.dump_sysinfo()
+
+ print(f"Sysinfo saved as {filename}. Exiting...")
+
+ exit(0)
+
+ launch_utils.startup_timer.record("initial startup")
+
+ with launch_utils.startup_timer.subcategory("prepare environment"):
+ if not args.skip_prepare_environment:
+ prepare_environment()
if args.test_server:
configure_for_tests()
diff --git a/modules/api/api.py b/modules/api/api.py
index 606db179..0e2807de 100644
--- a/modules/api/api.py
+++ b/modules/api/api.py
@@ -4,6 +4,8 @@ import os
import time
import datetime
import uvicorn
+import ipaddress
+import requests
import gradio as gr
from threading import Lock
from io import BytesIO
@@ -15,24 +17,21 @@ from fastapi.encoders import jsonable_encoder
from secrets import compare_digest
import modules.shared as shared
-from modules import sd_samplers, deepbooru, sd_hijack, images, scripts, ui, postprocessing, errors, restart
+from modules import sd_samplers, deepbooru, sd_hijack, images, scripts, ui, postprocessing, errors, restart, shared_items, script_callbacks, infotext, sd_models
from modules.api import models
from modules.shared import opts
from modules.processing import StableDiffusionProcessingTxt2Img, StableDiffusionProcessingImg2Img, process_images
from modules.textual_inversion.textual_inversion import create_embedding, train_embedding
-from modules.textual_inversion.preprocess import preprocess
from modules.hypernetworks.hypernetwork import create_hypernetwork, train_hypernetwork
-from PIL import PngImagePlugin,Image
-from modules.sd_models import checkpoints_list, unload_model_weights, reload_model_weights, checkpoint_aliases
-from modules.sd_vae import vae_dict
+from PIL import PngImagePlugin, Image
from modules.sd_models_config import find_checkpoint_config_near_filename
from modules.realesrgan_model import get_realesrgan_models
from modules import devices
-from typing import Dict, List, Any
+from typing import Any
import piexif
import piexif.helper
from contextlib import closing
-
+from modules.progress import create_task_id, add_task_to_queue, start_task, finish_task, current_task
def script_name_to_index(name, scripts):
try:
@@ -56,7 +55,41 @@ def setUpscalers(req: dict):
return reqDict
+def verify_url(url):
+ """Returns True if the url refers to a global resource."""
+
+ import socket
+ from urllib.parse import urlparse
+ try:
+ parsed_url = urlparse(url)
+ domain_name = parsed_url.netloc
+ host = socket.gethostbyname_ex(domain_name)
+ for ip in host[2]:
+ ip_addr = ipaddress.ip_address(ip)
+ if not ip_addr.is_global:
+ return False
+ except Exception:
+ return False
+
+ return True
+
+
def decode_base64_to_image(encoding):
+ if encoding.startswith("http://") or encoding.startswith("https://"):
+ if not opts.api_enable_requests:
+ raise HTTPException(status_code=500, detail="Requests not allowed")
+
+ if opts.api_forbid_local_requests and not verify_url(encoding):
+ raise HTTPException(status_code=500, detail="Request to local resource not allowed")
+
+ headers = {'user-agent': opts.api_useragent} if opts.api_useragent else {}
+ response = requests.get(encoding, timeout=30, headers=headers)
+ try:
+ image = Image.open(BytesIO(response.content))
+ return image
+ except Exception as e:
+ raise HTTPException(status_code=500, detail="Invalid image url") from e
+
if encoding.startswith("data:image/"):
encoding = encoding.split(";")[1].split(",")[1]
try:
@@ -68,7 +101,8 @@ def decode_base64_to_image(encoding):
def encode_pil_to_base64(image):
with io.BytesIO() as output_bytes:
-
+ if isinstance(image, str):
+ return image
if opts.samples_format.lower() == 'png':
use_metadata = False
metadata = PngImagePlugin.PngInfo()
@@ -186,27 +220,28 @@ class Api:
self.add_api_route("/sdapi/v1/options", self.get_config, methods=["GET"], response_model=models.OptionsModel)
self.add_api_route("/sdapi/v1/options", self.set_config, methods=["POST"])
self.add_api_route("/sdapi/v1/cmd-flags", self.get_cmd_flags, methods=["GET"], response_model=models.FlagsModel)
- self.add_api_route("/sdapi/v1/samplers", self.get_samplers, methods=["GET"], response_model=List[models.SamplerItem])
- self.add_api_route("/sdapi/v1/upscalers", self.get_upscalers, methods=["GET"], response_model=List[models.UpscalerItem])
- self.add_api_route("/sdapi/v1/latent-upscale-modes", self.get_latent_upscale_modes, methods=["GET"], response_model=List[models.LatentUpscalerModeItem])
- self.add_api_route("/sdapi/v1/sd-models", self.get_sd_models, methods=["GET"], response_model=List[models.SDModelItem])
- self.add_api_route("/sdapi/v1/sd-vae", self.get_sd_vaes, methods=["GET"], response_model=List[models.SDVaeItem])
- self.add_api_route("/sdapi/v1/hypernetworks", self.get_hypernetworks, methods=["GET"], response_model=List[models.HypernetworkItem])
- self.add_api_route("/sdapi/v1/face-restorers", self.get_face_restorers, methods=["GET"], response_model=List[models.FaceRestorerItem])
- self.add_api_route("/sdapi/v1/realesrgan-models", self.get_realesrgan_models, methods=["GET"], response_model=List[models.RealesrganItem])
- self.add_api_route("/sdapi/v1/prompt-styles", self.get_prompt_styles, methods=["GET"], response_model=List[models.PromptStyleItem])
+ self.add_api_route("/sdapi/v1/samplers", self.get_samplers, methods=["GET"], response_model=list[models.SamplerItem])
+ self.add_api_route("/sdapi/v1/upscalers", self.get_upscalers, methods=["GET"], response_model=list[models.UpscalerItem])
+ self.add_api_route("/sdapi/v1/latent-upscale-modes", self.get_latent_upscale_modes, methods=["GET"], response_model=list[models.LatentUpscalerModeItem])
+ self.add_api_route("/sdapi/v1/sd-models", self.get_sd_models, methods=["GET"], response_model=list[models.SDModelItem])
+ self.add_api_route("/sdapi/v1/sd-vae", self.get_sd_vaes, methods=["GET"], response_model=list[models.SDVaeItem])
+ self.add_api_route("/sdapi/v1/hypernetworks", self.get_hypernetworks, methods=["GET"], response_model=list[models.HypernetworkItem])
+ self.add_api_route("/sdapi/v1/face-restorers", self.get_face_restorers, methods=["GET"], response_model=list[models.FaceRestorerItem])
+ self.add_api_route("/sdapi/v1/realesrgan-models", self.get_realesrgan_models, methods=["GET"], response_model=list[models.RealesrganItem])
+ self.add_api_route("/sdapi/v1/prompt-styles", self.get_prompt_styles, methods=["GET"], response_model=list[models.PromptStyleItem])
self.add_api_route("/sdapi/v1/embeddings", self.get_embeddings, methods=["GET"], response_model=models.EmbeddingsResponse)
self.add_api_route("/sdapi/v1/refresh-checkpoints", self.refresh_checkpoints, methods=["POST"])
+ self.add_api_route("/sdapi/v1/refresh-vae", self.refresh_vae, methods=["POST"])
self.add_api_route("/sdapi/v1/create/embedding", self.create_embedding, methods=["POST"], response_model=models.CreateResponse)
self.add_api_route("/sdapi/v1/create/hypernetwork", self.create_hypernetwork, methods=["POST"], response_model=models.CreateResponse)
- self.add_api_route("/sdapi/v1/preprocess", self.preprocess, methods=["POST"], response_model=models.PreprocessResponse)
self.add_api_route("/sdapi/v1/train/embedding", self.train_embedding, methods=["POST"], response_model=models.TrainResponse)
self.add_api_route("/sdapi/v1/train/hypernetwork", self.train_hypernetwork, methods=["POST"], response_model=models.TrainResponse)
self.add_api_route("/sdapi/v1/memory", self.get_memory, methods=["GET"], response_model=models.MemoryResponse)
self.add_api_route("/sdapi/v1/unload-checkpoint", self.unloadapi, methods=["POST"])
self.add_api_route("/sdapi/v1/reload-checkpoint", self.reloadapi, methods=["POST"])
self.add_api_route("/sdapi/v1/scripts", self.get_scripts_list, methods=["GET"], response_model=models.ScriptsList)
- self.add_api_route("/sdapi/v1/script-info", self.get_script_info, methods=["GET"], response_model=List[models.ScriptInfo])
+ self.add_api_route("/sdapi/v1/script-info", self.get_script_info, methods=["GET"], response_model=list[models.ScriptInfo])
+ self.add_api_route("/sdapi/v1/extensions", self.get_extensions_list, methods=["GET"], response_model=list[models.ExtensionItem])
if shared.cmd_opts.api_server_stop:
self.add_api_route("/sdapi/v1/server-kill", self.kill_webui, methods=["POST"])
@@ -216,6 +251,24 @@ class Api:
self.default_script_arg_txt2img = []
self.default_script_arg_img2img = []
+ txt2img_script_runner = scripts.scripts_txt2img
+ img2img_script_runner = scripts.scripts_img2img
+
+ if not txt2img_script_runner.scripts or not img2img_script_runner.scripts:
+ ui.create_ui()
+
+ if not txt2img_script_runner.scripts:
+ txt2img_script_runner.initialize_scripts(False)
+ if not self.default_script_arg_txt2img:
+ self.default_script_arg_txt2img = self.init_default_script_args(txt2img_script_runner)
+
+ if not img2img_script_runner.scripts:
+ img2img_script_runner.initialize_scripts(True)
+ if not self.default_script_arg_img2img:
+ self.default_script_arg_img2img = self.init_default_script_args(img2img_script_runner)
+
+
+
def add_api_route(self, path: str, endpoint, **kwargs):
if shared.cmd_opts.api_auth:
return self.app.add_api_route(path, endpoint, dependencies=[Depends(self.auth)], **kwargs)
@@ -277,8 +330,13 @@ class Api:
script_args[script.args_from:script.args_to] = ui_default_values
return script_args
- def init_script_args(self, request, default_script_args, selectable_scripts, selectable_idx, script_runner):
+ def init_script_args(self, request, default_script_args, selectable_scripts, selectable_idx, script_runner, *, input_script_args=None):
script_args = default_script_args.copy()
+
+ if input_script_args is not None:
+ for index, value in input_script_args.items():
+ script_args[index] = value
+
# position 0 in script_arg is the idx+1 of the selectable script that is going to be run when using scripts.scripts_*2img.run()
if selectable_scripts:
script_args[selectable_scripts.args_from:selectable_scripts.args_to] = request.script_args
@@ -300,13 +358,83 @@ class Api:
script_args[alwayson_script.args_from + idx] = request.alwayson_scripts[alwayson_script_name]["args"][idx]
return script_args
+ def apply_infotext(self, request, tabname, *, script_runner=None, mentioned_script_args=None):
+ """Processes `infotext` field from the `request`, and sets other fields of the `request` accoring to what's in infotext.
+
+ If request already has a field set, and that field is encountered in infotext too, the value from infotext is ignored.
+
+ Additionally, fills `mentioned_script_args` dict with index: value pairs for script arguments read from infotext.
+ """
+
+ if not request.infotext:
+ return {}
+
+ possible_fields = infotext.paste_fields[tabname]["fields"]
+ set_fields = request.model_dump(exclude_unset=True) if hasattr(request, "request") else request.dict(exclude_unset=True) # pydantic v1/v2 have differenrt names for this
+ params = infotext.parse_generation_parameters(request.infotext)
+
+ def get_field_value(field, params):
+ value = field.function(params) if field.function else params.get(field.label)
+ if value is None:
+ return None
+
+ if field.api in request.__fields__:
+ target_type = request.__fields__[field.api].type_
+ else:
+ target_type = type(field.component.value)
+
+ if target_type == type(None):
+ return None
+
+ if isinstance(value, dict) and value.get('__type__') == 'generic_update': # this is a gradio.update rather than a value
+ value = value.get('value')
+
+ if value is not None and not isinstance(value, target_type):
+ value = target_type(value)
+
+ return value
+
+ for field in possible_fields:
+ if not field.api:
+ continue
+
+ if field.api in set_fields:
+ continue
+
+ value = get_field_value(field, params)
+ if value is not None:
+ setattr(request, field.api, value)
+
+ if request.override_settings is None:
+ request.override_settings = {}
+
+ overriden_settings = infotext.get_override_settings(params)
+ for _, setting_name, value in overriden_settings:
+ if setting_name not in request.override_settings:
+ request.override_settings[setting_name] = value
+
+ if script_runner is not None and mentioned_script_args is not None:
+ indexes = {v: i for i, v in enumerate(script_runner.inputs)}
+ script_fields = ((field, indexes[field.component]) for field in possible_fields if field.component in indexes)
+
+ for field, index in script_fields:
+ value = get_field_value(field, params)
+
+ if value is None:
+ continue
+
+ mentioned_script_args[index] = value
+
+ return params
+
def text2imgapi(self, txt2imgreq: models.StableDiffusionTxt2ImgProcessingAPI):
+ task_id = txt2imgreq.force_task_id or create_task_id("txt2img")
+
script_runner = scripts.scripts_txt2img
- if not script_runner.scripts:
- script_runner.initialize_scripts(False)
- ui.create_ui()
- if not self.default_script_arg_txt2img:
- self.default_script_arg_txt2img = self.init_default_script_args(script_runner)
+
+ infotext_script_args = {}
+ self.apply_infotext(txt2imgreq, "txt2img", script_runner=script_runner, mentioned_script_args=infotext_script_args)
+
selectable_scripts, selectable_script_idx = self.get_selectable_script(txt2imgreq.script_name, script_runner)
populate = txt2imgreq.copy(update={ # Override __init__ params
@@ -321,34 +449,43 @@ class Api:
args.pop('script_name', None)
args.pop('script_args', None) # will refeed them to the pipeline directly after initializing them
args.pop('alwayson_scripts', None)
+ args.pop('infotext', None)
- script_args = self.init_script_args(txt2imgreq, self.default_script_arg_txt2img, selectable_scripts, selectable_script_idx, script_runner)
+ script_args = self.init_script_args(txt2imgreq, self.default_script_arg_txt2img, selectable_scripts, selectable_script_idx, script_runner, input_script_args=infotext_script_args)
send_images = args.pop('send_images', True)
args.pop('save_images', None)
+ add_task_to_queue(task_id)
+
with self.queue_lock:
with closing(StableDiffusionProcessingTxt2Img(sd_model=shared.sd_model, **args)) as p:
+ p.is_api = True
p.scripts = script_runner
p.outpath_grids = opts.outdir_txt2img_grids
p.outpath_samples = opts.outdir_txt2img_samples
try:
shared.state.begin(job="scripts_txt2img")
+ start_task(task_id)
if selectable_scripts is not None:
p.script_args = script_args
processed = scripts.scripts_txt2img.run(p, *p.script_args) # Need to pass args as list here
else:
p.script_args = tuple(script_args) # Need to pass args as tuple here
processed = process_images(p)
+ finish_task(task_id)
finally:
shared.state.end()
+ shared.total_tqdm.clear()
b64images = list(map(encode_pil_to_base64, processed.images)) if send_images else []
return models.TextToImageResponse(images=b64images, parameters=vars(txt2imgreq), info=processed.js())
def img2imgapi(self, img2imgreq: models.StableDiffusionImg2ImgProcessingAPI):
+ task_id = img2imgreq.force_task_id or create_task_id("img2img")
+
init_images = img2imgreq.init_images
if init_images is None:
raise HTTPException(status_code=404, detail="Init image not found")
@@ -358,11 +495,10 @@ class Api:
mask = decode_base64_to_image(mask)
script_runner = scripts.scripts_img2img
- if not script_runner.scripts:
- script_runner.initialize_scripts(True)
- ui.create_ui()
- if not self.default_script_arg_img2img:
- self.default_script_arg_img2img = self.init_default_script_args(script_runner)
+
+ infotext_script_args = {}
+ self.apply_infotext(img2imgreq, "img2img", script_runner=script_runner, mentioned_script_args=infotext_script_args)
+
selectable_scripts, selectable_script_idx = self.get_selectable_script(img2imgreq.script_name, script_runner)
populate = img2imgreq.copy(update={ # Override __init__ params
@@ -379,29 +515,36 @@ class Api:
args.pop('script_name', None)
args.pop('script_args', None) # will refeed them to the pipeline directly after initializing them
args.pop('alwayson_scripts', None)
+ args.pop('infotext', None)
- script_args = self.init_script_args(img2imgreq, self.default_script_arg_img2img, selectable_scripts, selectable_script_idx, script_runner)
+ script_args = self.init_script_args(img2imgreq, self.default_script_arg_img2img, selectable_scripts, selectable_script_idx, script_runner, input_script_args=infotext_script_args)
send_images = args.pop('send_images', True)
args.pop('save_images', None)
+ add_task_to_queue(task_id)
+
with self.queue_lock:
with closing(StableDiffusionProcessingImg2Img(sd_model=shared.sd_model, **args)) as p:
p.init_images = [decode_base64_to_image(x) for x in init_images]
+ p.is_api = True
p.scripts = script_runner
p.outpath_grids = opts.outdir_img2img_grids
p.outpath_samples = opts.outdir_img2img_samples
try:
shared.state.begin(job="scripts_img2img")
+ start_task(task_id)
if selectable_scripts is not None:
p.script_args = script_args
processed = scripts.scripts_img2img.run(p, *p.script_args) # Need to pass args as list here
else:
p.script_args = tuple(script_args) # Need to pass args as tuple here
processed = process_images(p)
+ finish_task(task_id)
finally:
shared.state.end()
+ shared.total_tqdm.clear()
b64images = list(map(encode_pil_to_base64, processed.images)) if send_images else []
@@ -433,9 +576,6 @@ class Api:
return models.ExtrasBatchImagesResponse(images=list(map(encode_pil_to_base64, result[0])), html_info=result[1])
def pnginfoapi(self, req: models.PNGInfoRequest):
- if(not req.image.strip()):
- return models.PNGInfoResponse(info="")
-
image = decode_base64_to_image(req.image.strip())
if image is None:
return models.PNGInfoResponse(info="")
@@ -444,9 +584,10 @@ class Api:
if geninfo is None:
geninfo = ""
- items = {**{'parameters': geninfo}, **items}
+ params = infotext.parse_generation_parameters(geninfo)
+ script_callbacks.infotext_pasted_callback(geninfo, params)
- return models.PNGInfoResponse(info=geninfo, items=items)
+ return models.PNGInfoResponse(info=geninfo, items=items, parameters=params)
def progressapi(self, req: models.ProgressRequest = Depends()):
# copy from check_progress_call of ui.py
@@ -474,7 +615,7 @@ class Api:
if shared.state.current_image and not req.skip_current_image:
current_image = encode_pil_to_base64(shared.state.current_image)
- return models.ProgressResponse(progress=progress, eta_relative=eta_relative, state=shared.state.dict(), current_image=current_image, textinfo=shared.state.textinfo)
+ return models.ProgressResponse(progress=progress, eta_relative=eta_relative, state=shared.state.dict(), current_image=current_image, textinfo=shared.state.textinfo, current_task=current_task)
def interrogateapi(self, interrogatereq: models.InterrogateRequest):
image_b64 = interrogatereq.image
@@ -501,12 +642,12 @@ class Api:
return {}
def unloadapi(self):
- unload_model_weights()
+ sd_models.unload_model_weights()
return {}
def reloadapi(self):
- reload_model_weights()
+ sd_models.send_model_to_device(shared.sd_model)
return {}
@@ -524,13 +665,13 @@ class Api:
return options
- def set_config(self, req: Dict[str, Any]):
+ def set_config(self, req: dict[str, Any]):
checkpoint_name = req.get("sd_model_checkpoint", None)
- if checkpoint_name is not None and checkpoint_name not in checkpoint_aliases:
+ if checkpoint_name is not None and checkpoint_name not in sd_models.checkpoint_aliases:
raise RuntimeError(f"model {checkpoint_name!r} not found")
for k, v in req.items():
- shared.opts.set(k, v)
+ shared.opts.set(k, v, is_api=True)
shared.opts.save(shared.config_filename)
return
@@ -562,10 +703,12 @@ class Api:
]
def get_sd_models(self):
- return [{"title": x.title, "model_name": x.model_name, "hash": x.shorthash, "sha256": x.sha256, "filename": x.filename, "config": find_checkpoint_config_near_filename(x)} for x in checkpoints_list.values()]
+ import modules.sd_models as sd_models
+ return [{"title": x.title, "model_name": x.model_name, "hash": x.shorthash, "sha256": x.sha256, "filename": x.filename, "config": find_checkpoint_config_near_filename(x)} for x in sd_models.checkpoints_list.values()]
def get_sd_vaes(self):
- return [{"model_name": x, "filename": vae_dict[x]} for x in vae_dict.keys()]
+ import modules.sd_vae as sd_vae
+ return [{"model_name": x, "filename": sd_vae.vae_dict[x]} for x in sd_vae.vae_dict.keys()]
def get_hypernetworks(self):
return [{"name": name, "path": shared.hypernetworks[name]} for name in shared.hypernetworks]
@@ -608,6 +751,10 @@ class Api:
with self.queue_lock:
shared.refresh_checkpoints()
+ def refresh_vae(self):
+ with self.queue_lock:
+ shared_items.refresh_vae_list()
+
def create_embedding(self, args: dict):
try:
shared.state.begin(job="create_embedding")
@@ -630,19 +777,6 @@ class Api:
finally:
shared.state.end()
- def preprocess(self, args: dict):
- try:
- shared.state.begin(job="preprocess")
- preprocess(**args) # quick operation unless blip/booru interrogation is enabled
- shared.state.end()
- return models.PreprocessResponse(info='preprocess complete')
- except KeyError as e:
- return models.PreprocessResponse(info=f"preprocess error: invalid token: {e}")
- except Exception as e:
- return models.PreprocessResponse(info=f"preprocess error: {e}")
- finally:
- shared.state.end()
-
def train_embedding(self, args: dict):
try:
shared.state.begin(job="train_embedding")
@@ -724,6 +858,25 @@ class Api:
cuda = {'error': f'{err}'}
return models.MemoryResponse(ram=ram, cuda=cuda)
+ def get_extensions_list(self):
+ from modules import extensions
+ extensions.list_extensions()
+ ext_list = []
+ for ext in extensions.extensions:
+ ext: extensions.Extension
+ ext.read_info_from_repo()
+ if ext.remote is not None:
+ ext_list.append({
+ "name": ext.name,
+ "remote": ext.remote,
+ "branch": ext.branch,
+ "commit_hash":ext.commit_hash,
+ "commit_date":ext.commit_date,
+ "version":ext.version,
+ "enabled":ext.enabled
+ })
+ return ext_list
+
def launch(self, server_name, port, root_path):
self.app.include_router(self.router)
uvicorn.run(self.app, host=server_name, port=port, timeout_keep_alive=shared.cmd_opts.timeout_keep_alive, root_path=root_path)
diff --git a/modules/api/models.py b/modules/api/models.py
index 800c9b93..16edf11c 100644
--- a/modules/api/models.py
+++ b/modules/api/models.py
@@ -1,12 +1,10 @@
import inspect
from pydantic import BaseModel, Field, create_model
-from typing import Any, Optional
-from typing_extensions import Literal
+from typing import Any, Optional, Literal
from inflection import underscore
from modules.processing import StableDiffusionProcessingTxt2Img, StableDiffusionProcessingImg2Img
from modules.shared import sd_upscalers, opts, parser
-from typing import Dict, List
API_NOT_ALLOWED = [
"self",
@@ -50,10 +48,12 @@ class PydanticModelGenerator:
additional_fields = None,
):
def field_type_generator(k, v):
- # field_type = str if not overrides.get(k) else overrides[k]["type"]
- # print(k, v.annotation, v.default)
field_type = v.annotation
+ if field_type == 'Image':
+ # images are sent as base64 strings via API
+ field_type = 'str'
+
return Optional[field_type]
def merge_class_params(class_):
@@ -63,7 +63,6 @@ class PydanticModelGenerator:
parameters = {**parameters, **inspect.signature(classes.__init__).parameters}
return parameters
-
self._model_name = model_name
self._class_data = merge_class_params(class_instance)
@@ -72,7 +71,7 @@ class PydanticModelGenerator:
field=underscore(k),
field_alias=k,
field_type=field_type_generator(k, v),
- field_value=v.default
+ field_value=None if isinstance(v.default, property) else v.default
)
for (k,v) in self._class_data.items() if k not in API_NOT_ALLOWED
]
@@ -108,6 +107,8 @@ StableDiffusionTxt2ImgProcessingAPI = PydanticModelGenerator(
{"key": "send_images", "type": bool, "default": True},
{"key": "save_images", "type": bool, "default": False},
{"key": "alwayson_scripts", "type": dict, "default": {}},
+ {"key": "force_task_id", "type": str, "default": None},
+ {"key": "infotext", "type": str, "default": None},
]
).generate_model()
@@ -125,16 +126,18 @@ StableDiffusionImg2ImgProcessingAPI = PydanticModelGenerator(
{"key": "send_images", "type": bool, "default": True},
{"key": "save_images", "type": bool, "default": False},
{"key": "alwayson_scripts", "type": dict, "default": {}},
+ {"key": "force_task_id", "type": str, "default": None},
+ {"key": "infotext", "type": str, "default": None},
]
).generate_model()
class TextToImageResponse(BaseModel):
- images: List[str] = Field(default=None, title="Image", description="The generated image in base64 format.")
+ images: list[str] = Field(default=None, title="Image", description="The generated image in base64 format.")
parameters: dict
info: str
class ImageToImageResponse(BaseModel):
- images: List[str] = Field(default=None, title="Image", description="The generated image in base64 format.")
+ images: list[str] = Field(default=None, title="Image", description="The generated image in base64 format.")
parameters: dict
info: str
@@ -167,17 +170,18 @@ class FileData(BaseModel):
name: str = Field(title="File name")
class ExtrasBatchImagesRequest(ExtrasBaseRequest):
- imageList: List[FileData] = Field(title="Images", description="List of images to work on. Must be Base64 strings")
+ imageList: list[FileData] = Field(title="Images", description="List of images to work on. Must be Base64 strings")
class ExtrasBatchImagesResponse(ExtraBaseResponse):
- images: List[str] = Field(title="Images", description="The generated images in base64 format.")
+ images: list[str] = Field(title="Images", description="The generated images in base64 format.")
class PNGInfoRequest(BaseModel):
image: str = Field(title="Image", description="The base64 encoded PNG image")
class PNGInfoResponse(BaseModel):
info: str = Field(title="Image info", description="A string with the parameters used to generate the image")
- items: dict = Field(title="Items", description="An object containing all the info the image had")
+ items: dict = Field(title="Items", description="A dictionary containing all the other fields the image had")
+ parameters: dict = Field(title="Parameters", description="A dictionary with parsed generation info fields")
class ProgressRequest(BaseModel):
skip_current_image: bool = Field(default=False, title="Skip current image", description="Skip current image serialization")
@@ -202,9 +206,6 @@ class TrainResponse(BaseModel):
class CreateResponse(BaseModel):
info: str = Field(title="Create info", description="Response string from create embedding or hypernetwork task.")
-class PreprocessResponse(BaseModel):
- info: str = Field(title="Preprocess info", description="Response string from preprocessing task.")
-
fields = {}
for key, metadata in opts.data_labels.items():
value = opts.data.get(key)
@@ -231,8 +232,8 @@ FlagsModel = create_model("Flags", **flags)
class SamplerItem(BaseModel):
name: str = Field(title="Name")
- aliases: List[str] = Field(title="Aliases")
- options: Dict[str, str] = Field(title="Options")
+ aliases: list[str] = Field(title="Aliases")
+ options: dict[str, str] = Field(title="Options")
class UpscalerItem(BaseModel):
name: str = Field(title="Name")
@@ -283,8 +284,8 @@ class EmbeddingItem(BaseModel):
vectors: int = Field(title="Vectors", description="The number of vectors in the embedding")
class EmbeddingsResponse(BaseModel):
- loaded: Dict[str, EmbeddingItem] = Field(title="Loaded", description="Embeddings loaded for the current model")
- skipped: Dict[str, EmbeddingItem] = Field(title="Skipped", description="Embeddings skipped for the current model (likely due to architecture incompatibility)")
+ loaded: dict[str, EmbeddingItem] = Field(title="Loaded", description="Embeddings loaded for the current model")
+ skipped: dict[str, EmbeddingItem] = Field(title="Skipped", description="Embeddings skipped for the current model (likely due to architecture incompatibility)")
class MemoryResponse(BaseModel):
ram: dict = Field(title="RAM", description="System memory stats")
@@ -302,11 +303,20 @@ class ScriptArg(BaseModel):
minimum: Optional[Any] = Field(default=None, title="Minimum", description="Minimum allowed value for the argumentin UI")
maximum: Optional[Any] = Field(default=None, title="Minimum", description="Maximum allowed value for the argumentin UI")
step: Optional[Any] = Field(default=None, title="Minimum", description="Step for changing value of the argumentin UI")
- choices: Optional[List[str]] = Field(default=None, title="Choices", description="Possible values for the argument")
+ choices: Optional[list[str]] = Field(default=None, title="Choices", description="Possible values for the argument")
class ScriptInfo(BaseModel):
name: str = Field(default=None, title="Name", description="Script name")
is_alwayson: bool = Field(default=None, title="IsAlwayson", description="Flag specifying whether this script is an alwayson script")
is_img2img: bool = Field(default=None, title="IsImg2img", description="Flag specifying whether this script is an img2img script")
- args: List[ScriptArg] = Field(title="Arguments", description="List of script's arguments")
+ args: list[ScriptArg] = Field(title="Arguments", description="List of script's arguments")
+
+class ExtensionItem(BaseModel):
+ name: str = Field(title="Name", description="Extension name")
+ remote: str = Field(title="Remote", description="Extension Repository URL")
+ branch: str = Field(title="Branch", description="Extension Repository Branch")
+ commit_hash: str = Field(title="Commit Hash", description="Extension Repository Commit Hash")
+ version: str = Field(title="Version", description="Extension Version")
+ commit_date: str = Field(title="Commit Date", description="Extension Repository Commit Date")
+ enabled: bool = Field(title="Enabled", description="Flag specifying whether this extension is enabled")
diff --git a/modules/cache.py b/modules/cache.py
index 71fe6302..2d37e7b9 100644
--- a/modules/cache.py
+++ b/modules/cache.py
@@ -1,11 +1,12 @@
import json
+import os
import os.path
import threading
import time
from modules.paths import data_path, script_path
-cache_filename = os.path.join(data_path, "cache.json")
+cache_filename = os.environ.get('SD_WEBUI_CACHE_FILE', os.path.join(data_path, "cache.json"))
cache_data = None
cache_lock = threading.Lock()
@@ -29,8 +30,11 @@ def dump_cache():
time.sleep(1)
with cache_lock:
- with open(cache_filename, "w", encoding="utf8") as file:
- json.dump(cache_data, file, indent=4)
+ cache_filename_tmp = cache_filename + "-"
+ with open(cache_filename_tmp, "w", encoding="utf8") as file:
+ json.dump(cache_data, file, indent=4, ensure_ascii=False)
+
+ os.replace(cache_filename_tmp, cache_filename)
dump_cache_after = None
dump_cache_thread = None
diff --git a/modules/call_queue.py b/modules/call_queue.py
index 61aa240f..ddf0d573 100644
--- a/modules/call_queue.py
+++ b/modules/call_queue.py
@@ -1,11 +1,10 @@
from functools import wraps
import html
-import threading
import time
-from modules import shared, progress, errors
+from modules import shared, progress, errors, devices, fifo_lock
-queue_lock = threading.Lock()
+queue_lock = fifo_lock.FIFOLock()
def wrap_queued_call(func):
@@ -75,6 +74,8 @@ def wrap_gradio_call(func, extra_outputs=None, add_stats=False):
error_message = f'{type(e).__name__}: {e}'
res = extra_outputs_array + [f"<div class='error'>{html.escape(error_message)}</div>"]
+ devices.torch_gc()
+
shared.state.skipped = False
shared.state.interrupted = False
shared.state.job_count = 0
diff --git a/modules/cmd_args.py b/modules/cmd_args.py
index e401f641..da93eb26 100644
--- a/modules/cmd_args.py
+++ b/modules/cmd_args.py
@@ -13,8 +13,11 @@ parser.add_argument("--reinstall-xformers", action='store_true', help="launch.py
parser.add_argument("--reinstall-torch", action='store_true', help="launch.py argument: install the appropriate version of torch even if you have some version already installed")
parser.add_argument("--update-check", action='store_true', help="launch.py argument: check for updates at startup")
parser.add_argument("--test-server", action='store_true', help="launch.py argument: configure server for testing")
+parser.add_argument("--log-startup", action='store_true', help="launch.py argument: print a detailed log of what's happening at startup")
parser.add_argument("--skip-prepare-environment", action='store_true', help="launch.py argument: skip all environment preparation")
parser.add_argument("--skip-install", action='store_true', help="launch.py argument: skip installation of packages")
+parser.add_argument("--dump-sysinfo", action='store_true', help="launch.py argument: dump limited sysinfo file (without information about extensions, options) to disk and quit")
+parser.add_argument("--loglevel", type=str, help="log level; one of: CRITICAL, ERROR, WARNING, INFO, DEBUG", default=None)
parser.add_argument("--do-not-download-clip", action='store_true', help="do not download CLIP model even if it's not included in the checkpoint")
parser.add_argument("--data-dir", type=str, default=os.path.dirname(os.path.dirname(os.path.realpath(__file__))), help="base path where all user data is stored")
parser.add_argument("--config", type=str, default=sd_default_config, help="path to config which constructs model",)
@@ -33,9 +36,10 @@ parser.add_argument("--hypernetwork-dir", type=str, default=os.path.join(models_
parser.add_argument("--localizations-dir", type=str, default=os.path.join(script_path, 'localizations'), help="localizations directory")
parser.add_argument("--allow-code", action='store_true', help="allow custom script execution from webui")
parser.add_argument("--medvram", action='store_true', help="enable stable diffusion model optimizations for sacrificing a little speed for low VRM usage")
+parser.add_argument("--medvram-sdxl", action='store_true', help="enable --medvram optimization just for SDXL models")
parser.add_argument("--lowvram", action='store_true', help="enable stable diffusion model optimizations for sacrificing a lot of speed for very low VRM usage")
parser.add_argument("--lowram", action='store_true', help="load stable diffusion checkpoint weights to VRAM instead of RAM")
-parser.add_argument("--always-batch-cond-uncond", action='store_true', help="disables cond/uncond batching that is enabled to save memory with --medvram or --lowvram")
+parser.add_argument("--always-batch-cond-uncond", action='store_true', help="does not do anything")
parser.add_argument("--unload-gfpgan", action='store_true', help="does not do anything.")
parser.add_argument("--precision", type=str, help="evaluate at this precision", choices=["full", "autocast"], default="autocast")
parser.add_argument("--upcast-sampling", action='store_true', help="upcast sampling. No effect with --no-half. Usually produces similar results to --no-half with better performance while using less memory.")
@@ -66,6 +70,8 @@ parser.add_argument("--opt-sdp-no-mem-attention", action='store_true', help="pre
parser.add_argument("--disable-opt-split-attention", action='store_true', help="prefer no cross-attention layer optimization for automatic choice of optimization")
parser.add_argument("--disable-nan-check", action='store_true', help="do not check if produced images/latent spaces have nans; useful for running without a checkpoint in CI")
parser.add_argument("--use-cpu", nargs='+', help="use CPU as torch device for specified modules", default=[], type=str.lower)
+parser.add_argument("--use-ipex", action="store_true", help="use Intel XPU as torch device")
+parser.add_argument("--disable-model-loading-ram-optimization", action='store_true', help="disable an optimization that reduces RAM use when loading a model")
parser.add_argument("--listen", action='store_true', help="launch gradio with 0.0.0.0 as server name, allowing to respond to network requests")
parser.add_argument("--port", type=int, help="launch gradio with given server port, you need root/admin rights for ports < 1024, defaults to 7860 if available", default=None)
parser.add_argument("--show-negative-prompt", action='store_true', help="does not do anything", default=False)
@@ -78,14 +84,14 @@ parser.add_argument("--gradio-auth", type=str, help='set gradio authentication l
parser.add_argument("--gradio-auth-path", type=str, help='set gradio authentication file path ex. "/path/to/auth/file" same auth format as --gradio-auth', default=None)
parser.add_argument("--gradio-img2img-tool", type=str, help='does not do anything')
parser.add_argument("--gradio-inpaint-tool", type=str, help="does not do anything")
-parser.add_argument("--gradio-allowed-path", action='append', help="add path to gradio's allowed_paths, make it possible to serve files from it")
+parser.add_argument("--gradio-allowed-path", action='append', help="add path to gradio's allowed_paths, make it possible to serve files from it", default=[data_path])
parser.add_argument("--opt-channelslast", action='store_true', help="change memory type for stable diffusion to channels last")
parser.add_argument("--styles-file", type=str, help="filename to use for styles", default=os.path.join(data_path, 'styles.csv'))
parser.add_argument("--autolaunch", action='store_true', help="open the webui URL in the system's default browser upon launch", default=False)
parser.add_argument("--theme", type=str, help="launches the UI with light or dark theme", default=None)
parser.add_argument("--use-textbox-seed", action='store_true', help="use textbox for seeds in UI (no up/down, but possible to input long seeds)", default=False)
parser.add_argument("--disable-console-progressbars", action='store_true', help="do not output progressbars to console", default=False)
-parser.add_argument("--enable-console-prompts", action='store_true', help="print prompts to console when generating with txt2img and img2img", default=False)
+parser.add_argument("--enable-console-prompts", action='store_true', help="does not do anything", default=False) # Legacy compatibility, use as default value shared.opts.enable_console_prompts
parser.add_argument('--vae-path', type=str, help='Checkpoint to use as VAE; setting this argument disables all settings related to VAE', default=None)
parser.add_argument("--disable-safe-unpickle", action='store_true', help="disable checking pytorch models for malicious code", default=False)
parser.add_argument("--api", action='store_true', help="use api=True to launch the API together with the webui (use --nowebui instead for only the API)")
@@ -102,11 +108,14 @@ parser.add_argument("--tls-certfile", type=str, help="Partially enables TLS, req
parser.add_argument("--disable-tls-verify", action="store_false", help="When passed, enables the use of self-signed certificates.", default=None)
parser.add_argument("--server-name", type=str, help="Sets hostname of server", default=None)
parser.add_argument("--gradio-queue", action='store_true', help="does not do anything", default=True)
-parser.add_argument("--no-gradio-queue", action='store_true', help="Disables gradio queue; causes the webpage to use http requests instead of websockets; was the defaul in earlier versions")
+parser.add_argument("--no-gradio-queue", action='store_true', help="Disables gradio queue; causes the webpage to use http requests instead of websockets; was the default in earlier versions")
parser.add_argument("--skip-version-check", action='store_true', help="Do not check versions of torch and xformers")
parser.add_argument("--no-hashing", action='store_true', help="disable sha256 hashing of checkpoints to help loading performance", default=False)
parser.add_argument("--no-download-sd-model", action='store_true', help="don't download SD1.5 model even if no model is found in --ckpt-dir", default=False)
parser.add_argument('--subpath', type=str, help='customize the subpath for gradio, use with reverse proxy')
-parser.add_argument('--add-stop-route', action='store_true', help='add /_stop route to stop server')
+parser.add_argument('--add-stop-route', action='store_true', help='does not do anything')
parser.add_argument('--api-server-stop', action='store_true', help='enable server stop/restart/kill via api')
parser.add_argument('--timeout-keep-alive', type=int, default=30, help='set timeout_keep_alive for uvicorn')
+parser.add_argument("--disable-all-extensions", action='store_true', help="prevent all extensions from running regardless of any other settings", default=False)
+parser.add_argument("--disable-extra-extensions", action='store_true', help="prevent all extensions except built-in from running regardless of any other settings", default=False)
+parser.add_argument("--skip-load-model-at-start", action='store_true', help="if load a model at web start, only take effect when --nowebui", )
diff --git a/modules/codeformer/codeformer_arch.py b/modules/codeformer/codeformer_arch.py
deleted file mode 100644
index 12db6814..00000000
--- a/modules/codeformer/codeformer_arch.py
+++ /dev/null
@@ -1,276 +0,0 @@
-# this file is copied from CodeFormer repository. Please see comment in modules/codeformer_model.py
-
-import math
-import torch
-from torch import nn, Tensor
-import torch.nn.functional as F
-from typing import Optional
-
-from modules.codeformer.vqgan_arch import VQAutoEncoder, ResBlock
-from basicsr.utils.registry import ARCH_REGISTRY
-
-def calc_mean_std(feat, eps=1e-5):
- """Calculate mean and std for adaptive_instance_normalization.
-
- Args:
- feat (Tensor): 4D tensor.
- eps (float): A small value added to the variance to avoid
- divide-by-zero. Default: 1e-5.
- """
- size = feat.size()
- assert len(size) == 4, 'The input feature should be 4D tensor.'
- b, c = size[:2]
- feat_var = feat.view(b, c, -1).var(dim=2) + eps
- feat_std = feat_var.sqrt().view(b, c, 1, 1)
- feat_mean = feat.view(b, c, -1).mean(dim=2).view(b, c, 1, 1)
- return feat_mean, feat_std
-
-
-def adaptive_instance_normalization(content_feat, style_feat):
- """Adaptive instance normalization.
-
- Adjust the reference features to have the similar color and illuminations
- as those in the degradate features.
-
- Args:
- content_feat (Tensor): The reference feature.
- style_feat (Tensor): The degradate features.
- """
- size = content_feat.size()
- style_mean, style_std = calc_mean_std(style_feat)
- content_mean, content_std = calc_mean_std(content_feat)
- normalized_feat = (content_feat - content_mean.expand(size)) / content_std.expand(size)
- return normalized_feat * style_std.expand(size) + style_mean.expand(size)
-
-
-class PositionEmbeddingSine(nn.Module):
- """
- This is a more standard version of the position embedding, very similar to the one
- used by the Attention is all you need paper, generalized to work on images.
- """
-
- def __init__(self, num_pos_feats=64, temperature=10000, normalize=False, scale=None):
- super().__init__()
- self.num_pos_feats = num_pos_feats
- self.temperature = temperature
- self.normalize = normalize
- if scale is not None and normalize is False:
- raise ValueError("normalize should be True if scale is passed")
- if scale is None:
- scale = 2 * math.pi
- self.scale = scale
-
- def forward(self, x, mask=None):
- if mask is None:
- mask = torch.zeros((x.size(0), x.size(2), x.size(3)), device=x.device, dtype=torch.bool)
- not_mask = ~mask
- y_embed = not_mask.cumsum(1, dtype=torch.float32)
- x_embed = not_mask.cumsum(2, dtype=torch.float32)
- if self.normalize:
- eps = 1e-6
- y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale
- x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale
-
- dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device)
- dim_t = self.temperature ** (2 * (dim_t // 2) / self.num_pos_feats)
-
- pos_x = x_embed[:, :, :, None] / dim_t
- pos_y = y_embed[:, :, :, None] / dim_t
- pos_x = torch.stack(
- (pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4
- ).flatten(3)
- pos_y = torch.stack(
- (pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4
- ).flatten(3)
- pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)
- return pos
-
-def _get_activation_fn(activation):
- """Return an activation function given a string"""
- if activation == "relu":
- return F.relu
- if activation == "gelu":
- return F.gelu
- if activation == "glu":
- return F.glu
- raise RuntimeError(F"activation should be relu/gelu, not {activation}.")
-
-
-class TransformerSALayer(nn.Module):
- def __init__(self, embed_dim, nhead=8, dim_mlp=2048, dropout=0.0, activation="gelu"):
- super().__init__()
- self.self_attn = nn.MultiheadAttention(embed_dim, nhead, dropout=dropout)
- # Implementation of Feedforward model - MLP
- self.linear1 = nn.Linear(embed_dim, dim_mlp)
- self.dropout = nn.Dropout(dropout)
- self.linear2 = nn.Linear(dim_mlp, embed_dim)
-
- self.norm1 = nn.LayerNorm(embed_dim)
- self.norm2 = nn.LayerNorm(embed_dim)
- self.dropout1 = nn.Dropout(dropout)
- self.dropout2 = nn.Dropout(dropout)
-
- self.activation = _get_activation_fn(activation)
-
- def with_pos_embed(self, tensor, pos: Optional[Tensor]):
- return tensor if pos is None else tensor + pos
-
- def forward(self, tgt,
- tgt_mask: Optional[Tensor] = None,
- tgt_key_padding_mask: Optional[Tensor] = None,
- query_pos: Optional[Tensor] = None):
-
- # self attention
- tgt2 = self.norm1(tgt)
- q = k = self.with_pos_embed(tgt2, query_pos)
- tgt2 = self.self_attn(q, k, value=tgt2, attn_mask=tgt_mask,
- key_padding_mask=tgt_key_padding_mask)[0]
- tgt = tgt + self.dropout1(tgt2)
-
- # ffn
- tgt2 = self.norm2(tgt)
- tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2))))
- tgt = tgt + self.dropout2(tgt2)
- return tgt
-
-class Fuse_sft_block(nn.Module):
- def __init__(self, in_ch, out_ch):
- super().__init__()
- self.encode_enc = ResBlock(2*in_ch, out_ch)
-
- self.scale = nn.Sequential(
- nn.Conv2d(in_ch, out_ch, kernel_size=3, padding=1),
- nn.LeakyReLU(0.2, True),
- nn.Conv2d(out_ch, out_ch, kernel_size=3, padding=1))
-
- self.shift = nn.Sequential(
- nn.Conv2d(in_ch, out_ch, kernel_size=3, padding=1),
- nn.LeakyReLU(0.2, True),
- nn.Conv2d(out_ch, out_ch, kernel_size=3, padding=1))
-
- def forward(self, enc_feat, dec_feat, w=1):
- enc_feat = self.encode_enc(torch.cat([enc_feat, dec_feat], dim=1))
- scale = self.scale(enc_feat)
- shift = self.shift(enc_feat)
- residual = w * (dec_feat * scale + shift)
- out = dec_feat + residual
- return out
-
-
-@ARCH_REGISTRY.register()
-class CodeFormer(VQAutoEncoder):
- def __init__(self, dim_embd=512, n_head=8, n_layers=9,
- codebook_size=1024, latent_size=256,
- connect_list=('32', '64', '128', '256'),
- fix_modules=('quantize', 'generator')):
- super(CodeFormer, self).__init__(512, 64, [1, 2, 2, 4, 4, 8], 'nearest',2, [16], codebook_size)
-
- if fix_modules is not None:
- for module in fix_modules:
- for param in getattr(self, module).parameters():
- param.requires_grad = False
-
- self.connect_list = connect_list
- self.n_layers = n_layers
- self.dim_embd = dim_embd
- self.dim_mlp = dim_embd*2
-
- self.position_emb = nn.Parameter(torch.zeros(latent_size, self.dim_embd))
- self.feat_emb = nn.Linear(256, self.dim_embd)
-
- # transformer
- self.ft_layers = nn.Sequential(*[TransformerSALayer(embed_dim=dim_embd, nhead=n_head, dim_mlp=self.dim_mlp, dropout=0.0)
- for _ in range(self.n_layers)])
-
- # logits_predict head
- self.idx_pred_layer = nn.Sequential(
- nn.LayerNorm(dim_embd),
- nn.Linear(dim_embd, codebook_size, bias=False))
-
- self.channels = {
- '16': 512,
- '32': 256,
- '64': 256,
- '128': 128,
- '256': 128,
- '512': 64,
- }
-
- # after second residual block for > 16, before attn layer for ==16
- self.fuse_encoder_block = {'512':2, '256':5, '128':8, '64':11, '32':14, '16':18}
- # after first residual block for > 16, before attn layer for ==16
- self.fuse_generator_block = {'16':6, '32': 9, '64':12, '128':15, '256':18, '512':21}
-
- # fuse_convs_dict
- self.fuse_convs_dict = nn.ModuleDict()
- for f_size in self.connect_list:
- in_ch = self.channels[f_size]
- self.fuse_convs_dict[f_size] = Fuse_sft_block(in_ch, in_ch)
-
- def _init_weights(self, module):
- if isinstance(module, (nn.Linear, nn.Embedding)):
- module.weight.data.normal_(mean=0.0, std=0.02)
- if isinstance(module, nn.Linear) and module.bias is not None:
- module.bias.data.zero_()
- elif isinstance(module, nn.LayerNorm):
- module.bias.data.zero_()
- module.weight.data.fill_(1.0)
-
- def forward(self, x, w=0, detach_16=True, code_only=False, adain=False):
- # ################### Encoder #####################
- enc_feat_dict = {}
- out_list = [self.fuse_encoder_block[f_size] for f_size in self.connect_list]
- for i, block in enumerate(self.encoder.blocks):
- x = block(x)
- if i in out_list:
- enc_feat_dict[str(x.shape[-1])] = x.clone()
-
- lq_feat = x
- # ################# Transformer ###################
- # quant_feat, codebook_loss, quant_stats = self.quantize(lq_feat)
- pos_emb = self.position_emb.unsqueeze(1).repeat(1,x.shape[0],1)
- # BCHW -> BC(HW) -> (HW)BC
- feat_emb = self.feat_emb(lq_feat.flatten(2).permute(2,0,1))
- query_emb = feat_emb
- # Transformer encoder
- for layer in self.ft_layers:
- query_emb = layer(query_emb, query_pos=pos_emb)
-
- # output logits
- logits = self.idx_pred_layer(query_emb) # (hw)bn
- logits = logits.permute(1,0,2) # (hw)bn -> b(hw)n
-
- if code_only: # for training stage II
- # logits doesn't need softmax before cross_entropy loss
- return logits, lq_feat
-
- # ################# Quantization ###################
- # if self.training:
- # quant_feat = torch.einsum('btn,nc->btc', [soft_one_hot, self.quantize.embedding.weight])
- # # b(hw)c -> bc(hw) -> bchw
- # quant_feat = quant_feat.permute(0,2,1).view(lq_feat.shape)
- # ------------
- soft_one_hot = F.softmax(logits, dim=2)
- _, top_idx = torch.topk(soft_one_hot, 1, dim=2)
- quant_feat = self.quantize.get_codebook_feat(top_idx, shape=[x.shape[0],16,16,256])
- # preserve gradients
- # quant_feat = lq_feat + (quant_feat - lq_feat).detach()
-
- if detach_16:
- quant_feat = quant_feat.detach() # for training stage III
- if adain:
- quant_feat = adaptive_instance_normalization(quant_feat, lq_feat)
-
- # ################## Generator ####################
- x = quant_feat
- fuse_list = [self.fuse_generator_block[f_size] for f_size in self.connect_list]
-
- for i, block in enumerate(self.generator.blocks):
- x = block(x)
- if i in fuse_list: # fuse after i-th block
- f_size = str(x.shape[-1])
- if w>0:
- x = self.fuse_convs_dict[f_size](enc_feat_dict[f_size].detach(), x, w)
- out = x
- # logits doesn't need softmax before cross_entropy loss
- return out, logits, lq_feat
diff --git a/modules/codeformer/vqgan_arch.py b/modules/codeformer/vqgan_arch.py
deleted file mode 100644
index 09ee6660..00000000
--- a/modules/codeformer/vqgan_arch.py
+++ /dev/null
@@ -1,435 +0,0 @@
-# this file is copied from CodeFormer repository. Please see comment in modules/codeformer_model.py
-
-'''
-VQGAN code, adapted from the original created by the Unleashing Transformers authors:
-https://github.com/samb-t/unleashing-transformers/blob/master/models/vqgan.py
-
-'''
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-from basicsr.utils import get_root_logger
-from basicsr.utils.registry import ARCH_REGISTRY
-
-def normalize(in_channels):
- return torch.nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6, affine=True)
-
-
-@torch.jit.script
-def swish(x):
- return x*torch.sigmoid(x)
-
-
-# Define VQVAE classes
-class VectorQuantizer(nn.Module):
- def __init__(self, codebook_size, emb_dim, beta):
- super(VectorQuantizer, self).__init__()
- self.codebook_size = codebook_size # number of embeddings
- self.emb_dim = emb_dim # dimension of embedding
- self.beta = beta # commitment cost used in loss term, beta * ||z_e(x)-sg[e]||^2
- self.embedding = nn.Embedding(self.codebook_size, self.emb_dim)
- self.embedding.weight.data.uniform_(-1.0 / self.codebook_size, 1.0 / self.codebook_size)
-
- def forward(self, z):
- # reshape z -> (batch, height, width, channel) and flatten
- z = z.permute(0, 2, 3, 1).contiguous()
- z_flattened = z.view(-1, self.emb_dim)
-
- # distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
- d = (z_flattened ** 2).sum(dim=1, keepdim=True) + (self.embedding.weight**2).sum(1) - \
- 2 * torch.matmul(z_flattened, self.embedding.weight.t())
-
- mean_distance = torch.mean(d)
- # find closest encodings
- # min_encoding_indices = torch.argmin(d, dim=1).unsqueeze(1)
- min_encoding_scores, min_encoding_indices = torch.topk(d, 1, dim=1, largest=False)
- # [0-1], higher score, higher confidence
- min_encoding_scores = torch.exp(-min_encoding_scores/10)
-
- min_encodings = torch.zeros(min_encoding_indices.shape[0], self.codebook_size).to(z)
- min_encodings.scatter_(1, min_encoding_indices, 1)
-
- # get quantized latent vectors
- z_q = torch.matmul(min_encodings, self.embedding.weight).view(z.shape)
- # compute loss for embedding
- loss = torch.mean((z_q.detach()-z)**2) + self.beta * torch.mean((z_q - z.detach()) ** 2)
- # preserve gradients
- z_q = z + (z_q - z).detach()
-
- # perplexity
- e_mean = torch.mean(min_encodings, dim=0)
- perplexity = torch.exp(-torch.sum(e_mean * torch.log(e_mean + 1e-10)))
- # reshape back to match original input shape
- z_q = z_q.permute(0, 3, 1, 2).contiguous()
-
- return z_q, loss, {
- "perplexity": perplexity,
- "min_encodings": min_encodings,
- "min_encoding_indices": min_encoding_indices,
- "min_encoding_scores": min_encoding_scores,
- "mean_distance": mean_distance
- }
-
- def get_codebook_feat(self, indices, shape):
- # input indices: batch*token_num -> (batch*token_num)*1
- # shape: batch, height, width, channel
- indices = indices.view(-1,1)
- min_encodings = torch.zeros(indices.shape[0], self.codebook_size).to(indices)
- min_encodings.scatter_(1, indices, 1)
- # get quantized latent vectors
- z_q = torch.matmul(min_encodings.float(), self.embedding.weight)
-
- if shape is not None: # reshape back to match original input shape
- z_q = z_q.view(shape).permute(0, 3, 1, 2).contiguous()
-
- return z_q
-
-
-class GumbelQuantizer(nn.Module):
- def __init__(self, codebook_size, emb_dim, num_hiddens, straight_through=False, kl_weight=5e-4, temp_init=1.0):
- super().__init__()
- self.codebook_size = codebook_size # number of embeddings
- self.emb_dim = emb_dim # dimension of embedding
- self.straight_through = straight_through
- self.temperature = temp_init
- self.kl_weight = kl_weight
- self.proj = nn.Conv2d(num_hiddens, codebook_size, 1) # projects last encoder layer to quantized logits
- self.embed = nn.Embedding(codebook_size, emb_dim)
-
- def forward(self, z):
- hard = self.straight_through if self.training else True
-
- logits = self.proj(z)
-
- soft_one_hot = F.gumbel_softmax(logits, tau=self.temperature, dim=1, hard=hard)
-
- z_q = torch.einsum("b n h w, n d -> b d h w", soft_one_hot, self.embed.weight)
-
- # + kl divergence to the prior loss
- qy = F.softmax(logits, dim=1)
- diff = self.kl_weight * torch.sum(qy * torch.log(qy * self.codebook_size + 1e-10), dim=1).mean()
- min_encoding_indices = soft_one_hot.argmax(dim=1)
-
- return z_q, diff, {
- "min_encoding_indices": min_encoding_indices
- }
-
-
-class Downsample(nn.Module):
- def __init__(self, in_channels):
- super().__init__()
- self.conv = torch.nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=2, padding=0)
-
- def forward(self, x):
- pad = (0, 1, 0, 1)
- x = torch.nn.functional.pad(x, pad, mode="constant", value=0)
- x = self.conv(x)
- return x
-
-
-class Upsample(nn.Module):
- def __init__(self, in_channels):
- super().__init__()
- self.conv = nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1)
-
- def forward(self, x):
- x = F.interpolate(x, scale_factor=2.0, mode="nearest")
- x = self.conv(x)
-
- return x
-
-
-class ResBlock(nn.Module):
- def __init__(self, in_channels, out_channels=None):
- super(ResBlock, self).__init__()
- self.in_channels = in_channels
- self.out_channels = in_channels if out_channels is None else out_channels
- self.norm1 = normalize(in_channels)
- self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1)
- self.norm2 = normalize(out_channels)
- self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1)
- if self.in_channels != self.out_channels:
- self.conv_out = nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0)
-
- def forward(self, x_in):
- x = x_in
- x = self.norm1(x)
- x = swish(x)
- x = self.conv1(x)
- x = self.norm2(x)
- x = swish(x)
- x = self.conv2(x)
- if self.in_channels != self.out_channels:
- x_in = self.conv_out(x_in)
-
- return x + x_in
-
-
-class AttnBlock(nn.Module):
- def __init__(self, in_channels):
- super().__init__()
- self.in_channels = in_channels
-
- self.norm = normalize(in_channels)
- self.q = torch.nn.Conv2d(
- in_channels,
- in_channels,
- kernel_size=1,
- stride=1,
- padding=0
- )
- self.k = torch.nn.Conv2d(
- in_channels,
- in_channels,
- kernel_size=1,
- stride=1,
- padding=0
- )
- self.v = torch.nn.Conv2d(
- in_channels,
- in_channels,
- kernel_size=1,
- stride=1,
- padding=0
- )
- self.proj_out = torch.nn.Conv2d(
- in_channels,
- in_channels,
- kernel_size=1,
- stride=1,
- padding=0
- )
-
- def forward(self, x):
- h_ = x
- h_ = self.norm(h_)
- q = self.q(h_)
- k = self.k(h_)
- v = self.v(h_)
-
- # compute attention
- b, c, h, w = q.shape
- q = q.reshape(b, c, h*w)
- q = q.permute(0, 2, 1)
- k = k.reshape(b, c, h*w)
- w_ = torch.bmm(q, k)
- w_ = w_ * (int(c)**(-0.5))
- w_ = F.softmax(w_, dim=2)
-
- # attend to values
- v = v.reshape(b, c, h*w)
- w_ = w_.permute(0, 2, 1)
- h_ = torch.bmm(v, w_)
- h_ = h_.reshape(b, c, h, w)
-
- h_ = self.proj_out(h_)
-
- return x+h_
-
-
-class Encoder(nn.Module):
- def __init__(self, in_channels, nf, emb_dim, ch_mult, num_res_blocks, resolution, attn_resolutions):
- super().__init__()
- self.nf = nf
- self.num_resolutions = len(ch_mult)
- self.num_res_blocks = num_res_blocks
- self.resolution = resolution
- self.attn_resolutions = attn_resolutions
-
- curr_res = self.resolution
- in_ch_mult = (1,)+tuple(ch_mult)
-
- blocks = []
- # initial convultion
- blocks.append(nn.Conv2d(in_channels, nf, kernel_size=3, stride=1, padding=1))
-
- # residual and downsampling blocks, with attention on smaller res (16x16)
- for i in range(self.num_resolutions):
- block_in_ch = nf * in_ch_mult[i]
- block_out_ch = nf * ch_mult[i]
- for _ in range(self.num_res_blocks):
- blocks.append(ResBlock(block_in_ch, block_out_ch))
- block_in_ch = block_out_ch
- if curr_res in attn_resolutions:
- blocks.append(AttnBlock(block_in_ch))
-
- if i != self.num_resolutions - 1:
- blocks.append(Downsample(block_in_ch))
- curr_res = curr_res // 2
-
- # non-local attention block
- blocks.append(ResBlock(block_in_ch, block_in_ch))
- blocks.append(AttnBlock(block_in_ch))
- blocks.append(ResBlock(block_in_ch, block_in_ch))
-
- # normalise and convert to latent size
- blocks.append(normalize(block_in_ch))
- blocks.append(nn.Conv2d(block_in_ch, emb_dim, kernel_size=3, stride=1, padding=1))
- self.blocks = nn.ModuleList(blocks)
-
- def forward(self, x):
- for block in self.blocks:
- x = block(x)
-
- return x
-
-
-class Generator(nn.Module):
- def __init__(self, nf, emb_dim, ch_mult, res_blocks, img_size, attn_resolutions):
- super().__init__()
- self.nf = nf
- self.ch_mult = ch_mult
- self.num_resolutions = len(self.ch_mult)
- self.num_res_blocks = res_blocks
- self.resolution = img_size
- self.attn_resolutions = attn_resolutions
- self.in_channels = emb_dim
- self.out_channels = 3
- block_in_ch = self.nf * self.ch_mult[-1]
- curr_res = self.resolution // 2 ** (self.num_resolutions-1)
-
- blocks = []
- # initial conv
- blocks.append(nn.Conv2d(self.in_channels, block_in_ch, kernel_size=3, stride=1, padding=1))
-
- # non-local attention block
- blocks.append(ResBlock(block_in_ch, block_in_ch))
- blocks.append(AttnBlock(block_in_ch))
- blocks.append(ResBlock(block_in_ch, block_in_ch))
-
- for i in reversed(range(self.num_resolutions)):
- block_out_ch = self.nf * self.ch_mult[i]
-
- for _ in range(self.num_res_blocks):
- blocks.append(ResBlock(block_in_ch, block_out_ch))
- block_in_ch = block_out_ch
-
- if curr_res in self.attn_resolutions:
- blocks.append(AttnBlock(block_in_ch))
-
- if i != 0:
- blocks.append(Upsample(block_in_ch))
- curr_res = curr_res * 2
-
- blocks.append(normalize(block_in_ch))
- blocks.append(nn.Conv2d(block_in_ch, self.out_channels, kernel_size=3, stride=1, padding=1))
-
- self.blocks = nn.ModuleList(blocks)
-
-
- def forward(self, x):
- for block in self.blocks:
- x = block(x)
-
- return x
-
-
-@ARCH_REGISTRY.register()
-class VQAutoEncoder(nn.Module):
- def __init__(self, img_size, nf, ch_mult, quantizer="nearest", res_blocks=2, attn_resolutions=None, codebook_size=1024, emb_dim=256,
- beta=0.25, gumbel_straight_through=False, gumbel_kl_weight=1e-8, model_path=None):
- super().__init__()
- logger = get_root_logger()
- self.in_channels = 3
- self.nf = nf
- self.n_blocks = res_blocks
- self.codebook_size = codebook_size
- self.embed_dim = emb_dim
- self.ch_mult = ch_mult
- self.resolution = img_size
- self.attn_resolutions = attn_resolutions or [16]
- self.quantizer_type = quantizer
- self.encoder = Encoder(
- self.in_channels,
- self.nf,
- self.embed_dim,
- self.ch_mult,
- self.n_blocks,
- self.resolution,
- self.attn_resolutions
- )
- if self.quantizer_type == "nearest":
- self.beta = beta #0.25
- self.quantize = VectorQuantizer(self.codebook_size, self.embed_dim, self.beta)
- elif self.quantizer_type == "gumbel":
- self.gumbel_num_hiddens = emb_dim
- self.straight_through = gumbel_straight_through
- self.kl_weight = gumbel_kl_weight
- self.quantize = GumbelQuantizer(
- self.codebook_size,
- self.embed_dim,
- self.gumbel_num_hiddens,
- self.straight_through,
- self.kl_weight
- )
- self.generator = Generator(
- self.nf,
- self.embed_dim,
- self.ch_mult,
- self.n_blocks,
- self.resolution,
- self.attn_resolutions
- )
-
- if model_path is not None:
- chkpt = torch.load(model_path, map_location='cpu')
- if 'params_ema' in chkpt:
- self.load_state_dict(torch.load(model_path, map_location='cpu')['params_ema'])
- logger.info(f'vqgan is loaded from: {model_path} [params_ema]')
- elif 'params' in chkpt:
- self.load_state_dict(torch.load(model_path, map_location='cpu')['params'])
- logger.info(f'vqgan is loaded from: {model_path} [params]')
- else:
- raise ValueError('Wrong params!')
-
-
- def forward(self, x):
- x = self.encoder(x)
- quant, codebook_loss, quant_stats = self.quantize(x)
- x = self.generator(quant)
- return x, codebook_loss, quant_stats
-
-
-
-# patch based discriminator
-@ARCH_REGISTRY.register()
-class VQGANDiscriminator(nn.Module):
- def __init__(self, nc=3, ndf=64, n_layers=4, model_path=None):
- super().__init__()
-
- layers = [nn.Conv2d(nc, ndf, kernel_size=4, stride=2, padding=1), nn.LeakyReLU(0.2, True)]
- ndf_mult = 1
- ndf_mult_prev = 1
- for n in range(1, n_layers): # gradually increase the number of filters
- ndf_mult_prev = ndf_mult
- ndf_mult = min(2 ** n, 8)
- layers += [
- nn.Conv2d(ndf * ndf_mult_prev, ndf * ndf_mult, kernel_size=4, stride=2, padding=1, bias=False),
- nn.BatchNorm2d(ndf * ndf_mult),
- nn.LeakyReLU(0.2, True)
- ]
-
- ndf_mult_prev = ndf_mult
- ndf_mult = min(2 ** n_layers, 8)
-
- layers += [
- nn.Conv2d(ndf * ndf_mult_prev, ndf * ndf_mult, kernel_size=4, stride=1, padding=1, bias=False),
- nn.BatchNorm2d(ndf * ndf_mult),
- nn.LeakyReLU(0.2, True)
- ]
-
- layers += [
- nn.Conv2d(ndf * ndf_mult, 1, kernel_size=4, stride=1, padding=1)] # output 1 channel prediction map
- self.main = nn.Sequential(*layers)
-
- if model_path is not None:
- chkpt = torch.load(model_path, map_location='cpu')
- if 'params_d' in chkpt:
- self.load_state_dict(torch.load(model_path, map_location='cpu')['params_d'])
- elif 'params' in chkpt:
- self.load_state_dict(torch.load(model_path, map_location='cpu')['params'])
- else:
- raise ValueError('Wrong params!')
-
- def forward(self, x):
- return self.main(x)
diff --git a/modules/codeformer_model.py b/modules/codeformer_model.py
index da42b5e9..44b84618 100644
--- a/modules/codeformer_model.py
+++ b/modules/codeformer_model.py
@@ -1,132 +1,64 @@
-import os
+from __future__ import annotations
-import cv2
-import torch
-
-import modules.face_restoration
-import modules.shared
-from modules import shared, devices, modelloader, errors
-from modules.paths import models_path
-
-# codeformer people made a choice to include modified basicsr library to their project which makes
-# it utterly impossible to use it alongside with other libraries that also use basicsr, like GFPGAN.
-# I am making a choice to include some files from codeformer to work around this issue.
-model_dir = "Codeformer"
-model_path = os.path.join(models_path, model_dir)
-model_url = 'https://github.com/sczhou/CodeFormer/releases/download/v0.1.0/codeformer.pth'
-
-codeformer = None
-
-
-def setup_model(dirname):
- os.makedirs(model_path, exist_ok=True)
-
- path = modules.paths.paths.get("CodeFormer", None)
- if path is None:
- return
-
- try:
- from torchvision.transforms.functional import normalize
- from modules.codeformer.codeformer_arch import CodeFormer
- from basicsr.utils import img2tensor, tensor2img
- from facelib.utils.face_restoration_helper import FaceRestoreHelper
- from facelib.detection.retinaface import retinaface
-
- net_class = CodeFormer
-
- class FaceRestorerCodeFormer(modules.face_restoration.FaceRestoration):
- def name(self):
- return "CodeFormer"
-
- def __init__(self, dirname):
- self.net = None
- self.face_helper = None
- self.cmd_dir = dirname
+import logging
- def create_models(self):
-
- if self.net is not None and self.face_helper is not None:
- self.net.to(devices.device_codeformer)
- return self.net, self.face_helper
- model_paths = modelloader.load_models(model_path, model_url, self.cmd_dir, download_name='codeformer-v0.1.0.pth', ext_filter=['.pth'])
- if len(model_paths) != 0:
- ckpt_path = model_paths[0]
- else:
- print("Unable to load codeformer model.")
- return None, None
- net = net_class(dim_embd=512, codebook_size=1024, n_head=8, n_layers=9, connect_list=['32', '64', '128', '256']).to(devices.device_codeformer)
- checkpoint = torch.load(ckpt_path)['params_ema']
- net.load_state_dict(checkpoint)
- net.eval()
-
- if hasattr(retinaface, 'device'):
- retinaface.device = devices.device_codeformer
- face_helper = FaceRestoreHelper(1, face_size=512, crop_ratio=(1, 1), det_model='retinaface_resnet50', save_ext='png', use_parse=True, device=devices.device_codeformer)
-
- self.net = net
- self.face_helper = face_helper
-
- return net, face_helper
-
- def send_model_to(self, device):
- self.net.to(device)
- self.face_helper.face_det.to(device)
- self.face_helper.face_parse.to(device)
-
- def restore(self, np_image, w=None):
- np_image = np_image[:, :, ::-1]
-
- original_resolution = np_image.shape[0:2]
+import torch
- self.create_models()
- if self.net is None or self.face_helper is None:
- return np_image
+from modules import (
+ devices,
+ errors,
+ face_restoration,
+ face_restoration_utils,
+ modelloader,
+ shared,
+)
- self.send_model_to(devices.device_codeformer)
+logger = logging.getLogger(__name__)
- self.face_helper.clean_all()
- self.face_helper.read_image(np_image)
- self.face_helper.get_face_landmarks_5(only_center_face=False, resize=640, eye_dist_threshold=5)
- self.face_helper.align_warp_face()
+model_url = 'https://github.com/sczhou/CodeFormer/releases/download/v0.1.0/codeformer.pth'
+model_download_name = 'codeformer-v0.1.0.pth'
- for cropped_face in self.face_helper.cropped_faces:
- cropped_face_t = img2tensor(cropped_face / 255., bgr2rgb=True, float32=True)
- normalize(cropped_face_t, (0.5, 0.5, 0.5), (0.5, 0.5, 0.5), inplace=True)
- cropped_face_t = cropped_face_t.unsqueeze(0).to(devices.device_codeformer)
+# used by e.g. postprocessing_codeformer.py
+codeformer: face_restoration.FaceRestoration | None = None
- try:
- with torch.no_grad():
- output = self.net(cropped_face_t, w=w if w is not None else shared.opts.code_former_weight, adain=True)[0]
- restored_face = tensor2img(output, rgb2bgr=True, min_max=(-1, 1))
- del output
- devices.torch_gc()
- except Exception:
- errors.report('Failed inference for CodeFormer', exc_info=True)
- restored_face = tensor2img(cropped_face_t, rgb2bgr=True, min_max=(-1, 1))
- restored_face = restored_face.astype('uint8')
- self.face_helper.add_restored_face(restored_face)
+class FaceRestorerCodeFormer(face_restoration_utils.CommonFaceRestoration):
+ def name(self):
+ return "CodeFormer"
- self.face_helper.get_inverse_affine(None)
+ def load_net(self) -> torch.Module:
+ for model_path in modelloader.load_models(
+ model_path=self.model_path,
+ model_url=model_url,
+ command_path=self.model_path,
+ download_name=model_download_name,
+ ext_filter=['.pth'],
+ ):
+ return modelloader.load_spandrel_model(
+ model_path,
+ device=devices.device_codeformer,
+ expected_architecture='CodeFormer',
+ ).model
+ raise ValueError("No codeformer model found")
- restored_img = self.face_helper.paste_faces_to_input_image()
- restored_img = restored_img[:, :, ::-1]
+ def get_device(self):
+ return devices.device_codeformer
- if original_resolution != restored_img.shape[0:2]:
- restored_img = cv2.resize(restored_img, (0, 0), fx=original_resolution[1]/restored_img.shape[1], fy=original_resolution[0]/restored_img.shape[0], interpolation=cv2.INTER_LINEAR)
+ def restore(self, np_image, w: float | None = None):
+ if w is None:
+ w = getattr(shared.opts, "code_former_weight", 0.5)
- self.face_helper.clean_all()
+ def restore_face(cropped_face_t):
+ assert self.net is not None
+ return self.net(cropped_face_t, w=w, adain=True)[0]
- if shared.opts.face_restoration_unload:
- self.send_model_to(devices.cpu)
+ return self.restore_with_helper(np_image, restore_face)
- return restored_img
- global codeformer
+def setup_model(dirname: str) -> None:
+ global codeformer
+ try:
codeformer = FaceRestorerCodeFormer(dirname)
shared.face_restorers.append(codeformer)
-
except Exception:
errors.report("Error setting up CodeFormer", exc_info=True)
-
- # sys.path = stored_sys_path
diff --git a/modules/config_states.py b/modules/config_states.py
index 6f1ab53f..651793c7 100644
--- a/modules/config_states.py
+++ b/modules/config_states.py
@@ -4,18 +4,15 @@ Supports saving and restoring webui and extensions from a known working set of c
import os
import json
-import time
import tqdm
from datetime import datetime
-from collections import OrderedDict
import git
from modules import shared, extensions, errors
from modules.paths_internal import script_path, config_states_dir
-
-all_config_states = OrderedDict()
+all_config_states = {}
def list_config_states():
@@ -28,15 +25,19 @@ def list_config_states():
for filename in os.listdir(config_states_dir):
if filename.endswith(".json"):
path = os.path.join(config_states_dir, filename)
- with open(path, "r", encoding="utf-8") as f:
- j = json.load(f)
- j["filepath"] = path
- config_states.append(j)
+ try:
+ with open(path, "r", encoding="utf-8") as f:
+ j = json.load(f)
+ assert "created_at" in j, '"created_at" does not exist'
+ j["filepath"] = path
+ config_states.append(j)
+ except Exception as e:
+ print(f'[ERROR]: Config states {path}, {e}')
config_states = sorted(config_states, key=lambda cs: cs["created_at"], reverse=True)
for cs in config_states:
- timestamp = time.asctime(time.gmtime(cs["created_at"]))
+ timestamp = datetime.fromtimestamp(cs["created_at"]).strftime('%Y-%m-%d %H:%M:%S')
name = cs.get("name", "Config")
full_name = f"{name}: {timestamp}"
all_config_states[full_name] = cs
diff --git a/modules/devices.py b/modules/devices.py
index 57e51da3..ff279ac5 100644
--- a/modules/devices.py
+++ b/modules/devices.py
@@ -3,11 +3,19 @@ import contextlib
from functools import lru_cache
import torch
-from modules import errors
+from modules import errors, shared
+from modules import torch_utils
if sys.platform == "darwin":
from modules import mac_specific
+if shared.cmd_opts.use_ipex:
+ from modules import xpu_specific
+
+
+def has_xpu() -> bool:
+ return shared.cmd_opts.use_ipex and xpu_specific.has_xpu
+
def has_mps() -> bool:
if sys.platform != "darwin":
@@ -16,9 +24,24 @@ def has_mps() -> bool:
return mac_specific.has_mps
-def get_cuda_device_string():
- from modules import shared
+def cuda_no_autocast(device_id=None) -> bool:
+ if device_id is None:
+ device_id = get_cuda_device_id()
+ return (
+ torch.cuda.get_device_capability(device_id) == (7, 5)
+ and torch.cuda.get_device_name(device_id).startswith("NVIDIA GeForce GTX 16")
+ )
+
+def get_cuda_device_id():
+ return (
+ int(shared.cmd_opts.device_id)
+ if shared.cmd_opts.device_id is not None and shared.cmd_opts.device_id.isdigit()
+ else 0
+ ) or torch.cuda.current_device()
+
+
+def get_cuda_device_string():
if shared.cmd_opts.device_id is not None:
return f"cuda:{shared.cmd_opts.device_id}"
@@ -32,6 +55,9 @@ def get_optimal_device_name():
if has_mps():
return "mps"
+ if has_xpu():
+ return xpu_specific.get_xpu_device_string()
+
return "cpu"
@@ -40,9 +66,7 @@ def get_optimal_device():
def get_device_for(task):
- from modules import shared
-
- if task in shared.cmd_opts.use_cpu:
+ if task in shared.cmd_opts.use_cpu or "all" in shared.cmd_opts.use_cpu:
return cpu
return get_optimal_device()
@@ -58,27 +82,34 @@ def torch_gc():
if has_mps():
mac_specific.torch_mps_gc()
+ if has_xpu():
+ xpu_specific.torch_xpu_gc()
+
def enable_tf32():
if torch.cuda.is_available():
# enabling benchmark option seems to enable a range of cards to do fp16 when they otherwise can't
# see https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/4407
- if any(torch.cuda.get_device_capability(devid) == (7, 5) for devid in range(0, torch.cuda.device_count())):
+ if cuda_no_autocast():
torch.backends.cudnn.benchmark = True
torch.backends.cuda.matmul.allow_tf32 = True
torch.backends.cudnn.allow_tf32 = True
-
errors.run(enable_tf32, "Enabling TF32")
-cpu = torch.device("cpu")
-device = device_interrogate = device_gfpgan = device_esrgan = device_codeformer = None
-dtype = torch.float16
-dtype_vae = torch.float16
-dtype_unet = torch.float16
+cpu: torch.device = torch.device("cpu")
+fp8: bool = False
+device: torch.device = None
+device_interrogate: torch.device = None
+device_gfpgan: torch.device = None
+device_esrgan: torch.device = None
+device_codeformer: torch.device = None
+dtype: torch.dtype = torch.float16
+dtype_vae: torch.dtype = torch.float16
+dtype_unet: torch.dtype = torch.float16
unet_needs_upcast = False
@@ -90,29 +121,52 @@ def cond_cast_float(input):
return input.float() if unet_needs_upcast else input
-def randn(seed, shape):
- from modules.shared import opts
+nv_rng = None
+patch_module_list = [
+ torch.nn.Linear,
+ torch.nn.Conv2d,
+ torch.nn.MultiheadAttention,
+ torch.nn.GroupNorm,
+ torch.nn.LayerNorm,
+]
- torch.manual_seed(seed)
- if opts.randn_source == "CPU" or device.type == 'mps':
- return torch.randn(shape, device=cpu).to(device)
- return torch.randn(shape, device=device)
+def manual_cast_forward(self, *args, **kwargs):
+ org_dtype = torch_utils.get_param(self).dtype
+ self.to(dtype)
+ args = [arg.to(dtype) if isinstance(arg, torch.Tensor) else arg for arg in args]
+ kwargs = {k: v.to(dtype) if isinstance(v, torch.Tensor) else v for k, v in kwargs.items()}
+ result = self.org_forward(*args, **kwargs)
+ self.to(org_dtype)
+ return result
-def randn_without_seed(shape):
- from modules.shared import opts
- if opts.randn_source == "CPU" or device.type == 'mps':
- return torch.randn(shape, device=cpu).to(device)
- return torch.randn(shape, device=device)
+@contextlib.contextmanager
+def manual_cast():
+ for module_type in patch_module_list:
+ org_forward = module_type.forward
+ module_type.forward = manual_cast_forward
+ module_type.org_forward = org_forward
+ try:
+ yield None
+ finally:
+ for module_type in patch_module_list:
+ module_type.forward = module_type.org_forward
def autocast(disable=False):
- from modules import shared
-
if disable:
return contextlib.nullcontext()
+ if fp8 and device==cpu:
+ return torch.autocast("cpu", dtype=torch.bfloat16, enabled=True)
+
+ if fp8 and (dtype == torch.float32 or shared.cmd_opts.precision == "full" or cuda_no_autocast()):
+ return manual_cast()
+
+ if has_mps() and shared.cmd_opts.precision != "full":
+ return manual_cast()
+
if dtype == torch.float32 or shared.cmd_opts.precision == "full":
return contextlib.nullcontext()
@@ -128,8 +182,6 @@ class NansException(Exception):
def test_for_nans(x, where):
- from modules import shared
-
if shared.cmd_opts.disable_nan_check:
return
@@ -169,3 +221,4 @@ def first_time_calculation():
x = torch.zeros((1, 1, 3, 3)).to(device, dtype)
conv2d = torch.nn.Conv2d(1, 1, (3, 3)).to(device, dtype)
conv2d(x)
+
diff --git a/modules/errors.py b/modules/errors.py
index 5271a9fe..48aa13a1 100644
--- a/modules/errors.py
+++ b/modules/errors.py
@@ -6,6 +6,21 @@ import traceback
exception_records = []
+def format_traceback(tb):
+ return [[f"{x.filename}, line {x.lineno}, {x.name}", x.line] for x in traceback.extract_tb(tb)]
+
+
+def format_exception(e, tb):
+ return {"exception": str(e), "traceback": format_traceback(tb)}
+
+
+def get_exceptions():
+ try:
+ return list(reversed(exception_records))
+ except Exception as e:
+ return str(e)
+
+
def record_exception():
_, e, tb = sys.exc_info()
if e is None:
@@ -14,7 +29,7 @@ def record_exception():
if exception_records and exception_records[-1] == e:
return
- exception_records.append((e, tb))
+ exception_records.append(format_exception(e, tb))
if len(exception_records) > 5:
exception_records.pop(0)
@@ -83,3 +98,53 @@ def run(code, task):
code()
except Exception as e:
display(task, e)
+
+
+def check_versions():
+ from packaging import version
+ from modules import shared
+
+ import torch
+ import gradio
+
+ expected_torch_version = "2.1.2"
+ expected_xformers_version = "0.0.23.post1"
+ expected_gradio_version = "3.41.2"
+
+ if version.parse(torch.__version__) < version.parse(expected_torch_version):
+ print_error_explanation(f"""
+You are running torch {torch.__version__}.
+The program is tested to work with torch {expected_torch_version}.
+To reinstall the desired version, run with commandline flag --reinstall-torch.
+Beware that this will cause a lot of large files to be downloaded, as well as
+there are reports of issues with training tab on the latest version.
+
+Use --skip-version-check commandline argument to disable this check.
+ """.strip())
+
+ if shared.xformers_available:
+ import xformers
+
+ if version.parse(xformers.__version__) < version.parse(expected_xformers_version):
+ print_error_explanation(f"""
+You are running xformers {xformers.__version__}.
+The program is tested to work with xformers {expected_xformers_version}.
+To reinstall the desired version, run with commandline flag --reinstall-xformers.
+
+Use --skip-version-check commandline argument to disable this check.
+ """.strip())
+
+ if gradio.__version__ != expected_gradio_version:
+ print_error_explanation(f"""
+You are running gradio {gradio.__version__}.
+The program is designed to work with gradio {expected_gradio_version}.
+Using a different version of gradio is extremely likely to break the program.
+
+Reasons why you have the mismatched gradio version can be:
+ - you use --skip-install flag.
+ - you use webui.py to start the program instead of launch.py.
+ - an extension installs the incompatible gradio version.
+
+Use --skip-version-check commandline argument to disable this check.
+ """.strip())
+
diff --git a/modules/esrgan_model.py b/modules/esrgan_model.py
index 02a1727d..70041ab0 100644
--- a/modules/esrgan_model.py
+++ b/modules/esrgan_model.py
@@ -1,121 +1,7 @@
-import sys
-
-import numpy as np
-import torch
-from PIL import Image
-
-import modules.esrgan_model_arch as arch
-from modules import modelloader, images, devices
+from modules import modelloader, devices, errors
from modules.shared import opts
from modules.upscaler import Upscaler, UpscalerData
-
-
-def mod2normal(state_dict):
- # this code is copied from https://github.com/victorca25/iNNfer
- if 'conv_first.weight' in state_dict:
- crt_net = {}
- items = list(state_dict)
-
- crt_net['model.0.weight'] = state_dict['conv_first.weight']
- crt_net['model.0.bias'] = state_dict['conv_first.bias']
-
- for k in items.copy():
- if 'RDB' in k:
- ori_k = k.replace('RRDB_trunk.', 'model.1.sub.')
- if '.weight' in k:
- ori_k = ori_k.replace('.weight', '.0.weight')
- elif '.bias' in k:
- ori_k = ori_k.replace('.bias', '.0.bias')
- crt_net[ori_k] = state_dict[k]
- items.remove(k)
-
- crt_net['model.1.sub.23.weight'] = state_dict['trunk_conv.weight']
- crt_net['model.1.sub.23.bias'] = state_dict['trunk_conv.bias']
- crt_net['model.3.weight'] = state_dict['upconv1.weight']
- crt_net['model.3.bias'] = state_dict['upconv1.bias']
- crt_net['model.6.weight'] = state_dict['upconv2.weight']
- crt_net['model.6.bias'] = state_dict['upconv2.bias']
- crt_net['model.8.weight'] = state_dict['HRconv.weight']
- crt_net['model.8.bias'] = state_dict['HRconv.bias']
- crt_net['model.10.weight'] = state_dict['conv_last.weight']
- crt_net['model.10.bias'] = state_dict['conv_last.bias']
- state_dict = crt_net
- return state_dict
-
-
-def resrgan2normal(state_dict, nb=23):
- # this code is copied from https://github.com/victorca25/iNNfer
- if "conv_first.weight" in state_dict and "body.0.rdb1.conv1.weight" in state_dict:
- re8x = 0
- crt_net = {}
- items = list(state_dict)
-
- crt_net['model.0.weight'] = state_dict['conv_first.weight']
- crt_net['model.0.bias'] = state_dict['conv_first.bias']
-
- for k in items.copy():
- if "rdb" in k:
- ori_k = k.replace('body.', 'model.1.sub.')
- ori_k = ori_k.replace('.rdb', '.RDB')
- if '.weight' in k:
- ori_k = ori_k.replace('.weight', '.0.weight')
- elif '.bias' in k:
- ori_k = ori_k.replace('.bias', '.0.bias')
- crt_net[ori_k] = state_dict[k]
- items.remove(k)
-
- crt_net[f'model.1.sub.{nb}.weight'] = state_dict['conv_body.weight']
- crt_net[f'model.1.sub.{nb}.bias'] = state_dict['conv_body.bias']
- crt_net['model.3.weight'] = state_dict['conv_up1.weight']
- crt_net['model.3.bias'] = state_dict['conv_up1.bias']
- crt_net['model.6.weight'] = state_dict['conv_up2.weight']
- crt_net['model.6.bias'] = state_dict['conv_up2.bias']
-
- if 'conv_up3.weight' in state_dict:
- # modification supporting: https://github.com/ai-forever/Real-ESRGAN/blob/main/RealESRGAN/rrdbnet_arch.py
- re8x = 3
- crt_net['model.9.weight'] = state_dict['conv_up3.weight']
- crt_net['model.9.bias'] = state_dict['conv_up3.bias']
-
- crt_net[f'model.{8+re8x}.weight'] = state_dict['conv_hr.weight']
- crt_net[f'model.{8+re8x}.bias'] = state_dict['conv_hr.bias']
- crt_net[f'model.{10+re8x}.weight'] = state_dict['conv_last.weight']
- crt_net[f'model.{10+re8x}.bias'] = state_dict['conv_last.bias']
-
- state_dict = crt_net
- return state_dict
-
-
-def infer_params(state_dict):
- # this code is copied from https://github.com/victorca25/iNNfer
- scale2x = 0
- scalemin = 6
- n_uplayer = 0
- plus = False
-
- for block in list(state_dict):
- parts = block.split(".")
- n_parts = len(parts)
- if n_parts == 5 and parts[2] == "sub":
- nb = int(parts[3])
- elif n_parts == 3:
- part_num = int(parts[1])
- if (part_num > scalemin
- and parts[0] == "model"
- and parts[2] == "weight"):
- scale2x += 1
- if part_num > n_uplayer:
- n_uplayer = part_num
- out_nc = state_dict[block].shape[0]
- if not plus and "conv1x1" in block:
- plus = True
-
- nf = state_dict["model.0.weight"].shape[0]
- in_nc = state_dict["model.0.weight"].shape[1]
- out_nc = out_nc
- scale = 2 ** scale2x
-
- return in_nc, out_nc, nf, nb, plus, scale
+from modules.upscaler_utils import upscale_with_model
class UpscalerESRGAN(Upscaler):
@@ -143,12 +29,11 @@ class UpscalerESRGAN(Upscaler):
def do_upscale(self, img, selected_model):
try:
model = self.load_model(selected_model)
- except Exception as e:
- print(f"Unable to load ESRGAN model {selected_model}: {e}", file=sys.stderr)
+ except Exception:
+ errors.report(f"Unable to load ESRGAN model {selected_model}", exc_info=True)
return img
model.to(devices.device_esrgan)
- img = esrgan_upscale(model, img)
- return img
+ return esrgan_upscale(model, img)
def load_model(self, path: str):
if path.startswith("http"):
@@ -161,69 +46,17 @@ class UpscalerESRGAN(Upscaler):
else:
filename = path
- state_dict = torch.load(filename, map_location='cpu' if devices.device_esrgan.type == 'mps' else None)
-
- if "params_ema" in state_dict:
- state_dict = state_dict["params_ema"]
- elif "params" in state_dict:
- state_dict = state_dict["params"]
- num_conv = 16 if "realesr-animevideov3" in filename else 32
- model = arch.SRVGGNetCompact(num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=num_conv, upscale=4, act_type='prelu')
- model.load_state_dict(state_dict)
- model.eval()
- return model
-
- if "body.0.rdb1.conv1.weight" in state_dict and "conv_first.weight" in state_dict:
- nb = 6 if "RealESRGAN_x4plus_anime_6B" in filename else 23
- state_dict = resrgan2normal(state_dict, nb)
- elif "conv_first.weight" in state_dict:
- state_dict = mod2normal(state_dict)
- elif "model.0.weight" not in state_dict:
- raise Exception("The file is not a recognized ESRGAN model.")
-
- in_nc, out_nc, nf, nb, plus, mscale = infer_params(state_dict)
-
- model = arch.RRDBNet(in_nc=in_nc, out_nc=out_nc, nf=nf, nb=nb, upscale=mscale, plus=plus)
- model.load_state_dict(state_dict)
- model.eval()
-
- return model
-
-
-def upscale_without_tiling(model, img):
- img = np.array(img)
- img = img[:, :, ::-1]
- img = np.ascontiguousarray(np.transpose(img, (2, 0, 1))) / 255
- img = torch.from_numpy(img).float()
- img = img.unsqueeze(0).to(devices.device_esrgan)
- with torch.no_grad():
- output = model(img)
- output = output.squeeze().float().cpu().clamp_(0, 1).numpy()
- output = 255. * np.moveaxis(output, 0, 2)
- output = output.astype(np.uint8)
- output = output[:, :, ::-1]
- return Image.fromarray(output, 'RGB')
+ return modelloader.load_spandrel_model(
+ filename,
+ device=('cpu' if devices.device_esrgan.type == 'mps' else None),
+ expected_architecture='ESRGAN',
+ )
def esrgan_upscale(model, img):
- if opts.ESRGAN_tile == 0:
- return upscale_without_tiling(model, img)
-
- grid = images.split_grid(img, opts.ESRGAN_tile, opts.ESRGAN_tile, opts.ESRGAN_tile_overlap)
- newtiles = []
- scale_factor = 1
-
- for y, h, row in grid.tiles:
- newrow = []
- for tiledata in row:
- x, w, tile = tiledata
-
- output = upscale_without_tiling(model, tile)
- scale_factor = output.width // tile.width
-
- newrow.append([x * scale_factor, w * scale_factor, output])
- newtiles.append([y * scale_factor, h * scale_factor, newrow])
-
- newgrid = images.Grid(newtiles, grid.tile_w * scale_factor, grid.tile_h * scale_factor, grid.image_w * scale_factor, grid.image_h * scale_factor, grid.overlap * scale_factor)
- output = images.combine_grid(newgrid)
- return output
+ return upscale_with_model(
+ model,
+ img,
+ tile_size=opts.ESRGAN_tile,
+ tile_overlap=opts.ESRGAN_tile_overlap,
+ )
diff --git a/modules/esrgan_model_arch.py b/modules/esrgan_model_arch.py
deleted file mode 100644
index 2b9888ba..00000000
--- a/modules/esrgan_model_arch.py
+++ /dev/null
@@ -1,465 +0,0 @@
-# this file is adapted from https://github.com/victorca25/iNNfer
-
-from collections import OrderedDict
-import math
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-
-
-####################
-# RRDBNet Generator
-####################
-
-class RRDBNet(nn.Module):
- def __init__(self, in_nc, out_nc, nf, nb, nr=3, gc=32, upscale=4, norm_type=None,
- act_type='leakyrelu', mode='CNA', upsample_mode='upconv', convtype='Conv2D',
- finalact=None, gaussian_noise=False, plus=False):
- super(RRDBNet, self).__init__()
- n_upscale = int(math.log(upscale, 2))
- if upscale == 3:
- n_upscale = 1
-
- self.resrgan_scale = 0
- if in_nc % 16 == 0:
- self.resrgan_scale = 1
- elif in_nc != 4 and in_nc % 4 == 0:
- self.resrgan_scale = 2
-
- fea_conv = conv_block(in_nc, nf, kernel_size=3, norm_type=None, act_type=None, convtype=convtype)
- rb_blocks = [RRDB(nf, nr, kernel_size=3, gc=32, stride=1, bias=1, pad_type='zero',
- norm_type=norm_type, act_type=act_type, mode='CNA', convtype=convtype,
- gaussian_noise=gaussian_noise, plus=plus) for _ in range(nb)]
- LR_conv = conv_block(nf, nf, kernel_size=3, norm_type=norm_type, act_type=None, mode=mode, convtype=convtype)
-
- if upsample_mode == 'upconv':
- upsample_block = upconv_block
- elif upsample_mode == 'pixelshuffle':
- upsample_block = pixelshuffle_block
- else:
- raise NotImplementedError(f'upsample mode [{upsample_mode}] is not found')
- if upscale == 3:
- upsampler = upsample_block(nf, nf, 3, act_type=act_type, convtype=convtype)
- else:
- upsampler = [upsample_block(nf, nf, act_type=act_type, convtype=convtype) for _ in range(n_upscale)]
- HR_conv0 = conv_block(nf, nf, kernel_size=3, norm_type=None, act_type=act_type, convtype=convtype)
- HR_conv1 = conv_block(nf, out_nc, kernel_size=3, norm_type=None, act_type=None, convtype=convtype)
-
- outact = act(finalact) if finalact else None
-
- self.model = sequential(fea_conv, ShortcutBlock(sequential(*rb_blocks, LR_conv)),
- *upsampler, HR_conv0, HR_conv1, outact)
-
- def forward(self, x, outm=None):
- if self.resrgan_scale == 1:
- feat = pixel_unshuffle(x, scale=4)
- elif self.resrgan_scale == 2:
- feat = pixel_unshuffle(x, scale=2)
- else:
- feat = x
-
- return self.model(feat)
-
-
-class RRDB(nn.Module):
- """
- Residual in Residual Dense Block
- (ESRGAN: Enhanced Super-Resolution Generative Adversarial Networks)
- """
-
- def __init__(self, nf, nr=3, kernel_size=3, gc=32, stride=1, bias=1, pad_type='zero',
- norm_type=None, act_type='leakyrelu', mode='CNA', convtype='Conv2D',
- spectral_norm=False, gaussian_noise=False, plus=False):
- super(RRDB, self).__init__()
- # This is for backwards compatibility with existing models
- if nr == 3:
- self.RDB1 = ResidualDenseBlock_5C(nf, kernel_size, gc, stride, bias, pad_type,
- norm_type, act_type, mode, convtype, spectral_norm=spectral_norm,
- gaussian_noise=gaussian_noise, plus=plus)
- self.RDB2 = ResidualDenseBlock_5C(nf, kernel_size, gc, stride, bias, pad_type,
- norm_type, act_type, mode, convtype, spectral_norm=spectral_norm,
- gaussian_noise=gaussian_noise, plus=plus)
- self.RDB3 = ResidualDenseBlock_5C(nf, kernel_size, gc, stride, bias, pad_type,
- norm_type, act_type, mode, convtype, spectral_norm=spectral_norm,
- gaussian_noise=gaussian_noise, plus=plus)
- else:
- RDB_list = [ResidualDenseBlock_5C(nf, kernel_size, gc, stride, bias, pad_type,
- norm_type, act_type, mode, convtype, spectral_norm=spectral_norm,
- gaussian_noise=gaussian_noise, plus=plus) for _ in range(nr)]
- self.RDBs = nn.Sequential(*RDB_list)
-
- def forward(self, x):
- if hasattr(self, 'RDB1'):
- out = self.RDB1(x)
- out = self.RDB2(out)
- out = self.RDB3(out)
- else:
- out = self.RDBs(x)
- return out * 0.2 + x
-
-
-class ResidualDenseBlock_5C(nn.Module):
- """
- Residual Dense Block
- The core module of paper: (Residual Dense Network for Image Super-Resolution, CVPR 18)
- Modified options that can be used:
- - "Partial Convolution based Padding" arXiv:1811.11718
- - "Spectral normalization" arXiv:1802.05957
- - "ICASSP 2020 - ESRGAN+ : Further Improving ESRGAN" N. C.
- {Rakotonirina} and A. {Rasoanaivo}
- """
-
- def __init__(self, nf=64, kernel_size=3, gc=32, stride=1, bias=1, pad_type='zero',
- norm_type=None, act_type='leakyrelu', mode='CNA', convtype='Conv2D',
- spectral_norm=False, gaussian_noise=False, plus=False):
- super(ResidualDenseBlock_5C, self).__init__()
-
- self.noise = GaussianNoise() if gaussian_noise else None
- self.conv1x1 = conv1x1(nf, gc) if plus else None
-
- self.conv1 = conv_block(nf, gc, kernel_size, stride, bias=bias, pad_type=pad_type,
- norm_type=norm_type, act_type=act_type, mode=mode, convtype=convtype,
- spectral_norm=spectral_norm)
- self.conv2 = conv_block(nf+gc, gc, kernel_size, stride, bias=bias, pad_type=pad_type,
- norm_type=norm_type, act_type=act_type, mode=mode, convtype=convtype,
- spectral_norm=spectral_norm)
- self.conv3 = conv_block(nf+2*gc, gc, kernel_size, stride, bias=bias, pad_type=pad_type,
- norm_type=norm_type, act_type=act_type, mode=mode, convtype=convtype,
- spectral_norm=spectral_norm)
- self.conv4 = conv_block(nf+3*gc, gc, kernel_size, stride, bias=bias, pad_type=pad_type,
- norm_type=norm_type, act_type=act_type, mode=mode, convtype=convtype,
- spectral_norm=spectral_norm)
- if mode == 'CNA':
- last_act = None
- else:
- last_act = act_type
- self.conv5 = conv_block(nf+4*gc, nf, 3, stride, bias=bias, pad_type=pad_type,
- norm_type=norm_type, act_type=last_act, mode=mode, convtype=convtype,
- spectral_norm=spectral_norm)
-
- def forward(self, x):
- x1 = self.conv1(x)
- x2 = self.conv2(torch.cat((x, x1), 1))
- if self.conv1x1:
- x2 = x2 + self.conv1x1(x)
- x3 = self.conv3(torch.cat((x, x1, x2), 1))
- x4 = self.conv4(torch.cat((x, x1, x2, x3), 1))
- if self.conv1x1:
- x4 = x4 + x2
- x5 = self.conv5(torch.cat((x, x1, x2, x3, x4), 1))
- if self.noise:
- return self.noise(x5.mul(0.2) + x)
- else:
- return x5 * 0.2 + x
-
-
-####################
-# ESRGANplus
-####################
-
-class GaussianNoise(nn.Module):
- def __init__(self, sigma=0.1, is_relative_detach=False):
- super().__init__()
- self.sigma = sigma
- self.is_relative_detach = is_relative_detach
- self.noise = torch.tensor(0, dtype=torch.float)
-
- def forward(self, x):
- if self.training and self.sigma != 0:
- self.noise = self.noise.to(x.device)
- scale = self.sigma * x.detach() if self.is_relative_detach else self.sigma * x
- sampled_noise = self.noise.repeat(*x.size()).normal_() * scale
- x = x + sampled_noise
- return x
-
-def conv1x1(in_planes, out_planes, stride=1):
- return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
-
-
-####################
-# SRVGGNetCompact
-####################
-
-class SRVGGNetCompact(nn.Module):
- """A compact VGG-style network structure for super-resolution.
- This class is copied from https://github.com/xinntao/Real-ESRGAN
- """
-
- def __init__(self, num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=16, upscale=4, act_type='prelu'):
- super(SRVGGNetCompact, self).__init__()
- self.num_in_ch = num_in_ch
- self.num_out_ch = num_out_ch
- self.num_feat = num_feat
- self.num_conv = num_conv
- self.upscale = upscale
- self.act_type = act_type
-
- self.body = nn.ModuleList()
- # the first conv
- self.body.append(nn.Conv2d(num_in_ch, num_feat, 3, 1, 1))
- # the first activation
- if act_type == 'relu':
- activation = nn.ReLU(inplace=True)
- elif act_type == 'prelu':
- activation = nn.PReLU(num_parameters=num_feat)
- elif act_type == 'leakyrelu':
- activation = nn.LeakyReLU(negative_slope=0.1, inplace=True)
- self.body.append(activation)
-
- # the body structure
- for _ in range(num_conv):
- self.body.append(nn.Conv2d(num_feat, num_feat, 3, 1, 1))
- # activation
- if act_type == 'relu':
- activation = nn.ReLU(inplace=True)
- elif act_type == 'prelu':
- activation = nn.PReLU(num_parameters=num_feat)
- elif act_type == 'leakyrelu':
- activation = nn.LeakyReLU(negative_slope=0.1, inplace=True)
- self.body.append(activation)
-
- # the last conv
- self.body.append(nn.Conv2d(num_feat, num_out_ch * upscale * upscale, 3, 1, 1))
- # upsample
- self.upsampler = nn.PixelShuffle(upscale)
-
- def forward(self, x):
- out = x
- for i in range(0, len(self.body)):
- out = self.body[i](out)
-
- out = self.upsampler(out)
- # add the nearest upsampled image, so that the network learns the residual
- base = F.interpolate(x, scale_factor=self.upscale, mode='nearest')
- out += base
- return out
-
-
-####################
-# Upsampler
-####################
-
-class Upsample(nn.Module):
- r"""Upsamples a given multi-channel 1D (temporal), 2D (spatial) or 3D (volumetric) data.
- The input data is assumed to be of the form
- `minibatch x channels x [optional depth] x [optional height] x width`.
- """
-
- def __init__(self, size=None, scale_factor=None, mode="nearest", align_corners=None):
- super(Upsample, self).__init__()
- if isinstance(scale_factor, tuple):
- self.scale_factor = tuple(float(factor) for factor in scale_factor)
- else:
- self.scale_factor = float(scale_factor) if scale_factor else None
- self.mode = mode
- self.size = size
- self.align_corners = align_corners
-
- def forward(self, x):
- return nn.functional.interpolate(x, size=self.size, scale_factor=self.scale_factor, mode=self.mode, align_corners=self.align_corners)
-
- def extra_repr(self):
- if self.scale_factor is not None:
- info = f'scale_factor={self.scale_factor}'
- else:
- info = f'size={self.size}'
- info += f', mode={self.mode}'
- return info
-
-
-def pixel_unshuffle(x, scale):
- """ Pixel unshuffle.
- Args:
- x (Tensor): Input feature with shape (b, c, hh, hw).
- scale (int): Downsample ratio.
- Returns:
- Tensor: the pixel unshuffled feature.
- """
- b, c, hh, hw = x.size()
- out_channel = c * (scale**2)
- assert hh % scale == 0 and hw % scale == 0
- h = hh // scale
- w = hw // scale
- x_view = x.view(b, c, h, scale, w, scale)
- return x_view.permute(0, 1, 3, 5, 2, 4).reshape(b, out_channel, h, w)
-
-
-def pixelshuffle_block(in_nc, out_nc, upscale_factor=2, kernel_size=3, stride=1, bias=True,
- pad_type='zero', norm_type=None, act_type='relu', convtype='Conv2D'):
- """
- Pixel shuffle layer
- (Real-Time Single Image and Video Super-Resolution Using an Efficient Sub-Pixel Convolutional
- Neural Network, CVPR17)
- """
- conv = conv_block(in_nc, out_nc * (upscale_factor ** 2), kernel_size, stride, bias=bias,
- pad_type=pad_type, norm_type=None, act_type=None, convtype=convtype)
- pixel_shuffle = nn.PixelShuffle(upscale_factor)
-
- n = norm(norm_type, out_nc) if norm_type else None
- a = act(act_type) if act_type else None
- return sequential(conv, pixel_shuffle, n, a)
-
-
-def upconv_block(in_nc, out_nc, upscale_factor=2, kernel_size=3, stride=1, bias=True,
- pad_type='zero', norm_type=None, act_type='relu', mode='nearest', convtype='Conv2D'):
- """ Upconv layer """
- upscale_factor = (1, upscale_factor, upscale_factor) if convtype == 'Conv3D' else upscale_factor
- upsample = Upsample(scale_factor=upscale_factor, mode=mode)
- conv = conv_block(in_nc, out_nc, kernel_size, stride, bias=bias,
- pad_type=pad_type, norm_type=norm_type, act_type=act_type, convtype=convtype)
- return sequential(upsample, conv)
-
-
-
-
-
-
-
-
-####################
-# Basic blocks
-####################
-
-
-def make_layer(basic_block, num_basic_block, **kwarg):
- """Make layers by stacking the same blocks.
- Args:
- basic_block (nn.module): nn.module class for basic block. (block)
- num_basic_block (int): number of blocks. (n_layers)
- Returns:
- nn.Sequential: Stacked blocks in nn.Sequential.
- """
- layers = []
- for _ in range(num_basic_block):
- layers.append(basic_block(**kwarg))
- return nn.Sequential(*layers)
-
-
-def act(act_type, inplace=True, neg_slope=0.2, n_prelu=1, beta=1.0):
- """ activation helper """
- act_type = act_type.lower()
- if act_type == 'relu':
- layer = nn.ReLU(inplace)
- elif act_type in ('leakyrelu', 'lrelu'):
- layer = nn.LeakyReLU(neg_slope, inplace)
- elif act_type == 'prelu':
- layer = nn.PReLU(num_parameters=n_prelu, init=neg_slope)
- elif act_type == 'tanh': # [-1, 1] range output
- layer = nn.Tanh()
- elif act_type == 'sigmoid': # [0, 1] range output
- layer = nn.Sigmoid()
- else:
- raise NotImplementedError(f'activation layer [{act_type}] is not found')
- return layer
-
-
-class Identity(nn.Module):
- def __init__(self, *kwargs):
- super(Identity, self).__init__()
-
- def forward(self, x, *kwargs):
- return x
-
-
-def norm(norm_type, nc):
- """ Return a normalization layer """
- norm_type = norm_type.lower()
- if norm_type == 'batch':
- layer = nn.BatchNorm2d(nc, affine=True)
- elif norm_type == 'instance':
- layer = nn.InstanceNorm2d(nc, affine=False)
- elif norm_type == 'none':
- def norm_layer(x): return Identity()
- else:
- raise NotImplementedError(f'normalization layer [{norm_type}] is not found')
- return layer
-
-
-def pad(pad_type, padding):
- """ padding layer helper """
- pad_type = pad_type.lower()
- if padding == 0:
- return None
- if pad_type == 'reflect':
- layer = nn.ReflectionPad2d(padding)
- elif pad_type == 'replicate':
- layer = nn.ReplicationPad2d(padding)
- elif pad_type == 'zero':
- layer = nn.ZeroPad2d(padding)
- else:
- raise NotImplementedError(f'padding layer [{pad_type}] is not implemented')
- return layer
-
-
-def get_valid_padding(kernel_size, dilation):
- kernel_size = kernel_size + (kernel_size - 1) * (dilation - 1)
- padding = (kernel_size - 1) // 2
- return padding
-
-
-class ShortcutBlock(nn.Module):
- """ Elementwise sum the output of a submodule to its input """
- def __init__(self, submodule):
- super(ShortcutBlock, self).__init__()
- self.sub = submodule
-
- def forward(self, x):
- output = x + self.sub(x)
- return output
-
- def __repr__(self):
- return 'Identity + \n|' + self.sub.__repr__().replace('\n', '\n|')
-
-
-def sequential(*args):
- """ Flatten Sequential. It unwraps nn.Sequential. """
- if len(args) == 1:
- if isinstance(args[0], OrderedDict):
- raise NotImplementedError('sequential does not support OrderedDict input.')
- return args[0] # No sequential is needed.
- modules = []
- for module in args:
- if isinstance(module, nn.Sequential):
- for submodule in module.children():
- modules.append(submodule)
- elif isinstance(module, nn.Module):
- modules.append(module)
- return nn.Sequential(*modules)
-
-
-def conv_block(in_nc, out_nc, kernel_size, stride=1, dilation=1, groups=1, bias=True,
- pad_type='zero', norm_type=None, act_type='relu', mode='CNA', convtype='Conv2D',
- spectral_norm=False):
- """ Conv layer with padding, normalization, activation """
- assert mode in ['CNA', 'NAC', 'CNAC'], f'Wrong conv mode [{mode}]'
- padding = get_valid_padding(kernel_size, dilation)
- p = pad(pad_type, padding) if pad_type and pad_type != 'zero' else None
- padding = padding if pad_type == 'zero' else 0
-
- if convtype=='PartialConv2D':
- from torchvision.ops import PartialConv2d # this is definitely not going to work, but PartialConv2d doesn't work anyway and this shuts up static analyzer
- c = PartialConv2d(in_nc, out_nc, kernel_size=kernel_size, stride=stride, padding=padding,
- dilation=dilation, bias=bias, groups=groups)
- elif convtype=='DeformConv2D':
- from torchvision.ops import DeformConv2d # not tested
- c = DeformConv2d(in_nc, out_nc, kernel_size=kernel_size, stride=stride, padding=padding,
- dilation=dilation, bias=bias, groups=groups)
- elif convtype=='Conv3D':
- c = nn.Conv3d(in_nc, out_nc, kernel_size=kernel_size, stride=stride, padding=padding,
- dilation=dilation, bias=bias, groups=groups)
- else:
- c = nn.Conv2d(in_nc, out_nc, kernel_size=kernel_size, stride=stride, padding=padding,
- dilation=dilation, bias=bias, groups=groups)
-
- if spectral_norm:
- c = nn.utils.spectral_norm(c)
-
- a = act(act_type) if act_type else None
- if 'CNA' in mode:
- n = norm(norm_type, out_nc) if norm_type else None
- return sequential(p, c, n, a)
- elif mode == 'NAC':
- if norm_type is None and act_type is not None:
- a = act(act_type, inplace=False)
- n = norm(norm_type, in_nc) if norm_type else None
- return sequential(n, a, p, c)
diff --git a/modules/extensions.py b/modules/extensions.py
index 3ad5ed53..1899cd52 100644
--- a/modules/extensions.py
+++ b/modules/extensions.py
@@ -1,29 +1,76 @@
+from __future__ import annotations
+
+import configparser
import os
import threading
+import re
-from modules import shared, errors, cache
+from modules import shared, errors, cache, scripts
from modules.gitpython_hack import Repo
from modules.paths_internal import extensions_dir, extensions_builtin_dir, script_path # noqa: F401
-extensions = []
os.makedirs(extensions_dir, exist_ok=True)
def active():
- if shared.opts.disable_all_extensions == "all":
+ if shared.cmd_opts.disable_all_extensions or shared.opts.disable_all_extensions == "all":
return []
- elif shared.opts.disable_all_extensions == "extra":
+ elif shared.cmd_opts.disable_extra_extensions or shared.opts.disable_all_extensions == "extra":
return [x for x in extensions if x.enabled and x.is_builtin]
else:
return [x for x in extensions if x.enabled]
+class ExtensionMetadata:
+ filename = "metadata.ini"
+ config: configparser.ConfigParser
+ canonical_name: str
+ requires: list
+
+ def __init__(self, path, canonical_name):
+ self.config = configparser.ConfigParser()
+
+ filepath = os.path.join(path, self.filename)
+ if os.path.isfile(filepath):
+ try:
+ self.config.read(filepath)
+ except Exception:
+ errors.report(f"Error reading {self.filename} for extension {canonical_name}.", exc_info=True)
+
+ self.canonical_name = self.config.get("Extension", "Name", fallback=canonical_name)
+ self.canonical_name = canonical_name.lower().strip()
+
+ self.requires = self.get_script_requirements("Requires", "Extension")
+
+ def get_script_requirements(self, field, section, extra_section=None):
+ """reads a list of requirements from the config; field is the name of the field in the ini file,
+ like Requires or Before, and section is the name of the [section] in the ini file; additionally,
+ reads more requirements from [extra_section] if specified."""
+
+ x = self.config.get(section, field, fallback='')
+
+ if extra_section:
+ x = x + ', ' + self.config.get(extra_section, field, fallback='')
+
+ return self.parse_list(x.lower())
+
+ def parse_list(self, text):
+ """converts a line from config ("ext1 ext2, ext3 ") into a python list (["ext1", "ext2", "ext3"])"""
+
+ if not text:
+ return []
+
+ # both "," and " " are accepted as separator
+ return [x for x in re.split(r"[,\s]+", text.strip()) if x]
+
+
class Extension:
lock = threading.Lock()
cached_fields = ['remote', 'commit_date', 'branch', 'commit_hash', 'version']
+ metadata: ExtensionMetadata
- def __init__(self, name, path, enabled=True, is_builtin=False):
+ def __init__(self, name, path, enabled=True, is_builtin=False, metadata=None):
self.name = name
self.path = path
self.enabled = enabled
@@ -36,6 +83,8 @@ class Extension:
self.branch = None
self.remote = None
self.have_info_from_repo = False
+ self.metadata = metadata if metadata else ExtensionMetadata(self.path, name.lower())
+ self.canonical_name = metadata.canonical_name
def to_dict(self):
return {x: getattr(self, x) for x in self.cached_fields}
@@ -56,6 +105,7 @@ class Extension:
self.do_read_info_from_repo()
return self.to_dict()
+
try:
d = cache.cached_data_for_file('extensions-git', self.name, os.path.join(self.path, ".git"), read_from_repo)
self.from_dict(d)
@@ -90,8 +140,6 @@ class Extension:
self.have_info_from_repo = True
def list_files(self, subdir, extension):
- from modules import scripts
-
dirpath = os.path.join(self.path, subdir)
if not os.path.isdir(dirpath):
return []
@@ -138,26 +186,52 @@ class Extension:
def list_extensions():
extensions.clear()
- if not os.path.isdir(extensions_dir):
- return
-
- if shared.opts.disable_all_extensions == "all":
+ if shared.cmd_opts.disable_all_extensions:
+ print("*** \"--disable-all-extensions\" arg was used, will not load any extensions ***")
+ elif shared.opts.disable_all_extensions == "all":
print("*** \"Disable all extensions\" option was set, will not load any extensions ***")
+ elif shared.cmd_opts.disable_extra_extensions:
+ print("*** \"--disable-extra-extensions\" arg was used, will only load built-in extensions ***")
elif shared.opts.disable_all_extensions == "extra":
print("*** \"Disable all extensions\" option was set, will only load built-in extensions ***")
- extension_paths = []
- for dirname in [extensions_dir, extensions_builtin_dir]:
+ loaded_extensions = {}
+
+ # scan through extensions directory and load metadata
+ for dirname in [extensions_builtin_dir, extensions_dir]:
if not os.path.isdir(dirname):
- return
+ continue
for extension_dirname in sorted(os.listdir(dirname)):
path = os.path.join(dirname, extension_dirname)
if not os.path.isdir(path):
continue
- extension_paths.append((extension_dirname, path, dirname == extensions_builtin_dir))
+ canonical_name = extension_dirname
+ metadata = ExtensionMetadata(path, canonical_name)
+
+ # check for duplicated canonical names
+ already_loaded_extension = loaded_extensions.get(metadata.canonical_name)
+ if already_loaded_extension is not None:
+ errors.report(f'Duplicate canonical name "{canonical_name}" found in extensions "{extension_dirname}" and "{already_loaded_extension.name}". Former will be discarded.', exc_info=False)
+ continue
+
+ is_builtin = dirname == extensions_builtin_dir
+ extension = Extension(name=extension_dirname, path=path, enabled=extension_dirname not in shared.opts.disabled_extensions, is_builtin=is_builtin, metadata=metadata)
+ extensions.append(extension)
+ loaded_extensions[canonical_name] = extension
+
+ # check for requirements
+ for extension in extensions:
+ for req in extension.metadata.requires:
+ required_extension = loaded_extensions.get(req)
+ if required_extension is None:
+ errors.report(f'Extension "{extension.name}" requires "{req}" which is not installed.', exc_info=False)
+ continue
+
+ if not extension.enabled:
+ errors.report(f'Extension "{extension.name}" requires "{required_extension.name}" which is disabled.', exc_info=False)
+ continue
+
- for dirname, path, is_builtin in extension_paths:
- extension = Extension(name=dirname, path=path, enabled=dirname not in shared.opts.disabled_extensions, is_builtin=is_builtin)
- extensions.append(extension)
+extensions: list[Extension] = []
diff --git a/modules/extra_networks.py b/modules/extra_networks.py
index 6ae07e91..b9533677 100644
--- a/modules/extra_networks.py
+++ b/modules/extra_networks.py
@@ -1,4 +1,7 @@
+import json
+import os
import re
+import logging
from collections import defaultdict
from modules import errors
@@ -84,27 +87,55 @@ class ExtraNetwork:
raise NotImplementedError
-def activate(p, extra_network_data):
- """call activate for extra networks in extra_network_data in specified order, then call
- activate for all remaining registered networks with an empty argument list"""
+def lookup_extra_networks(extra_network_data):
+ """returns a dict mapping ExtraNetwork objects to lists of arguments for those extra networks.
- activated = []
+ Example input:
+ {
+ 'lora': [<modules.extra_networks.ExtraNetworkParams object at 0x0000020690D58310>],
+ 'lyco': [<modules.extra_networks.ExtraNetworkParams object at 0x0000020690D58F70>],
+ 'hypernet': [<modules.extra_networks.ExtraNetworkParams object at 0x0000020690D5A800>]
+ }
+
+ Example output:
+
+ {
+ <extra_networks_lora.ExtraNetworkLora object at 0x0000020581BEECE0>: [<modules.extra_networks.ExtraNetworkParams object at 0x0000020690D58310>, <modules.extra_networks.ExtraNetworkParams object at 0x0000020690D58F70>],
+ <modules.extra_networks_hypernet.ExtraNetworkHypernet object at 0x0000020581BEEE60>: [<modules.extra_networks.ExtraNetworkParams object at 0x0000020690D5A800>]
+ }
+ """
+
+ res = {}
- for extra_network_name, extra_network_args in extra_network_data.items():
+ for extra_network_name, extra_network_args in list(extra_network_data.items()):
extra_network = extra_network_registry.get(extra_network_name, None)
+ alias = extra_network_aliases.get(extra_network_name, None)
- if extra_network is None:
- extra_network = extra_network_aliases.get(extra_network_name, None)
+ if alias is not None and extra_network is None:
+ extra_network = alias
if extra_network is None:
- print(f"Skipping unknown extra network: {extra_network_name}")
+ logging.info(f"Skipping unknown extra network: {extra_network_name}")
continue
+ res.setdefault(extra_network, []).extend(extra_network_args)
+
+ return res
+
+
+def activate(p, extra_network_data):
+ """call activate for extra networks in extra_network_data in specified order, then call
+ activate for all remaining registered networks with an empty argument list"""
+
+ activated = []
+
+ for extra_network, extra_network_args in lookup_extra_networks(extra_network_data).items():
+
try:
extra_network.activate(p, extra_network_args)
activated.append(extra_network)
except Exception as e:
- errors.display(e, f"activating extra network {extra_network_name} with arguments {extra_network_args}")
+ errors.display(e, f"activating extra network {extra_network.name} with arguments {extra_network_args}")
for extra_network_name, extra_network in extra_network_registry.items():
if extra_network in activated:
@@ -123,19 +154,16 @@ def deactivate(p, extra_network_data):
"""call deactivate for extra networks in extra_network_data in specified order, then call
deactivate for all remaining registered networks"""
- for extra_network_name in extra_network_data:
- extra_network = extra_network_registry.get(extra_network_name, None)
- if extra_network is None:
- continue
+ data = lookup_extra_networks(extra_network_data)
+ for extra_network in data:
try:
extra_network.deactivate(p)
except Exception as e:
- errors.display(e, f"deactivating extra network {extra_network_name}")
+ errors.display(e, f"deactivating extra network {extra_network.name}")
for extra_network_name, extra_network in extra_network_registry.items():
- args = extra_network_data.get(extra_network_name, None)
- if args is not None:
+ if extra_network in data:
continue
try:
@@ -177,3 +205,20 @@ def parse_prompts(prompts):
return res, extra_data
+
+def get_user_metadata(filename):
+ if filename is None:
+ return {}
+
+ basename, ext = os.path.splitext(filename)
+ metadata_filename = basename + '.json'
+
+ metadata = {}
+ try:
+ if os.path.isfile(metadata_filename):
+ with open(metadata_filename, "r", encoding="utf8") as file:
+ metadata = json.load(file)
+ except Exception as e:
+ errors.display(e, f"reading extra network user metadata from {metadata_filename}")
+
+ return metadata
diff --git a/modules/extras.py b/modules/extras.py
index e9c0263e..2a310ae3 100644
--- a/modules/extras.py
+++ b/modules/extras.py
@@ -7,7 +7,7 @@ import json
import torch
import tqdm
-from modules import shared, images, sd_models, sd_vae, sd_models_config
+from modules import shared, images, sd_models, sd_vae, sd_models_config, errors
from modules.ui_common import plaintext_to_html
import gradio as gr
import safetensors.torch
@@ -72,7 +72,20 @@ def to_half(tensor, enable):
return tensor
-def run_modelmerger(id_task, primary_model_name, secondary_model_name, tertiary_model_name, interp_method, multiplier, save_as_half, custom_name, checkpoint_format, config_source, bake_in_vae, discard_weights, save_metadata):
+def read_metadata(primary_model_name, secondary_model_name, tertiary_model_name):
+ metadata = {}
+
+ for checkpoint_name in [primary_model_name, secondary_model_name, tertiary_model_name]:
+ checkpoint_info = sd_models.checkpoints_list.get(checkpoint_name, None)
+ if checkpoint_info is None:
+ continue
+
+ metadata.update(checkpoint_info.metadata)
+
+ return json.dumps(metadata, indent=4, ensure_ascii=False)
+
+
+def run_modelmerger(id_task, primary_model_name, secondary_model_name, tertiary_model_name, interp_method, multiplier, save_as_half, custom_name, checkpoint_format, config_source, bake_in_vae, discard_weights, save_metadata, add_merge_recipe, copy_metadata_fields, metadata_json):
shared.state.begin(job="model-merge")
def fail(message):
@@ -241,11 +254,25 @@ def run_modelmerger(id_task, primary_model_name, secondary_model_name, tertiary_
shared.state.textinfo = "Saving"
print(f"Saving to {output_modelname}...")
- metadata = None
+ metadata = {}
+
+ if save_metadata and copy_metadata_fields:
+ if primary_model_info:
+ metadata.update(primary_model_info.metadata)
+ if secondary_model_info:
+ metadata.update(secondary_model_info.metadata)
+ if tertiary_model_info:
+ metadata.update(tertiary_model_info.metadata)
if save_metadata:
- metadata = {"format": "pt"}
+ try:
+ metadata.update(json.loads(metadata_json))
+ except Exception as e:
+ errors.display(e, "readin metadata from json")
+
+ metadata["format"] = "pt"
+ if save_metadata and add_merge_recipe:
merge_recipe = {
"type": "webui", # indicate this model was merged with webui's built-in merger
"primary_model_hash": primary_model_info.sha256,
@@ -261,7 +288,6 @@ def run_modelmerger(id_task, primary_model_name, secondary_model_name, tertiary_
"is_inpainting": result_is_inpainting_model,
"is_instruct_pix2pix": result_is_instruct_pix2pix_model
}
- metadata["sd_merge_recipe"] = json.dumps(merge_recipe)
sd_merge_models = {}
@@ -281,11 +307,12 @@ def run_modelmerger(id_task, primary_model_name, secondary_model_name, tertiary_
if tertiary_model_info:
add_model_metadata(tertiary_model_info)
+ metadata["sd_merge_recipe"] = json.dumps(merge_recipe)
metadata["sd_merge_models"] = json.dumps(sd_merge_models)
_, extension = os.path.splitext(output_modelname)
if extension.lower() == ".safetensors":
- safetensors.torch.save_file(theta_0, output_modelname, metadata=metadata)
+ safetensors.torch.save_file(theta_0, output_modelname, metadata=metadata if len(metadata)>0 else None)
else:
torch.save(theta_0, output_modelname)
diff --git a/modules/face_restoration_utils.py b/modules/face_restoration_utils.py
new file mode 100644
index 00000000..1cbac236
--- /dev/null
+++ b/modules/face_restoration_utils.py
@@ -0,0 +1,180 @@
+from __future__ import annotations
+
+import logging
+import os
+from functools import cached_property
+from typing import TYPE_CHECKING, Callable
+
+import cv2
+import numpy as np
+import torch
+
+from modules import devices, errors, face_restoration, shared
+
+if TYPE_CHECKING:
+ from facexlib.utils.face_restoration_helper import FaceRestoreHelper
+
+logger = logging.getLogger(__name__)
+
+
+def bgr_image_to_rgb_tensor(img: np.ndarray) -> torch.Tensor:
+ """Convert a BGR NumPy image in [0..1] range to a PyTorch RGB float32 tensor."""
+ assert img.shape[2] == 3, "image must be RGB"
+ if img.dtype == "float64":
+ img = img.astype("float32")
+ img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
+ return torch.from_numpy(img.transpose(2, 0, 1)).float()
+
+
+def rgb_tensor_to_bgr_image(tensor: torch.Tensor, *, min_max=(0.0, 1.0)) -> np.ndarray:
+ """
+ Convert a PyTorch RGB tensor in range `min_max` to a BGR NumPy image in [0..1] range.
+ """
+ tensor = tensor.squeeze(0).float().detach().cpu().clamp_(*min_max)
+ tensor = (tensor - min_max[0]) / (min_max[1] - min_max[0])
+ assert tensor.dim() == 3, "tensor must be RGB"
+ img_np = tensor.numpy().transpose(1, 2, 0)
+ if img_np.shape[2] == 1: # gray image, no RGB/BGR required
+ return np.squeeze(img_np, axis=2)
+ return cv2.cvtColor(img_np, cv2.COLOR_BGR2RGB)
+
+
+def create_face_helper(device) -> FaceRestoreHelper:
+ from facexlib.detection import retinaface
+ from facexlib.utils.face_restoration_helper import FaceRestoreHelper
+ if hasattr(retinaface, 'device'):
+ retinaface.device = device
+ return FaceRestoreHelper(
+ upscale_factor=1,
+ face_size=512,
+ crop_ratio=(1, 1),
+ det_model='retinaface_resnet50',
+ save_ext='png',
+ use_parse=True,
+ device=device,
+ )
+
+
+def restore_with_face_helper(
+ np_image: np.ndarray,
+ face_helper: FaceRestoreHelper,
+ restore_face: Callable[[torch.Tensor], torch.Tensor],
+) -> np.ndarray:
+ """
+ Find faces in the image using face_helper, restore them using restore_face, and paste them back into the image.
+
+ `restore_face` should take a cropped face image and return a restored face image.
+ """
+ from torchvision.transforms.functional import normalize
+ np_image = np_image[:, :, ::-1]
+ original_resolution = np_image.shape[0:2]
+
+ try:
+ logger.debug("Detecting faces...")
+ face_helper.clean_all()
+ face_helper.read_image(np_image)
+ face_helper.get_face_landmarks_5(only_center_face=False, resize=640, eye_dist_threshold=5)
+ face_helper.align_warp_face()
+ logger.debug("Found %d faces, restoring", len(face_helper.cropped_faces))
+ for cropped_face in face_helper.cropped_faces:
+ cropped_face_t = bgr_image_to_rgb_tensor(cropped_face / 255.0)
+ normalize(cropped_face_t, (0.5, 0.5, 0.5), (0.5, 0.5, 0.5), inplace=True)
+ cropped_face_t = cropped_face_t.unsqueeze(0).to(devices.device_codeformer)
+
+ try:
+ with torch.no_grad():
+ cropped_face_t = restore_face(cropped_face_t)
+ devices.torch_gc()
+ except Exception:
+ errors.report('Failed face-restoration inference', exc_info=True)
+
+ restored_face = rgb_tensor_to_bgr_image(cropped_face_t, min_max=(-1, 1))
+ restored_face = (restored_face * 255.0).astype('uint8')
+ face_helper.add_restored_face(restored_face)
+
+ logger.debug("Merging restored faces into image")
+ face_helper.get_inverse_affine(None)
+ img = face_helper.paste_faces_to_input_image()
+ img = img[:, :, ::-1]
+ if original_resolution != img.shape[0:2]:
+ img = cv2.resize(
+ img,
+ (0, 0),
+ fx=original_resolution[1] / img.shape[1],
+ fy=original_resolution[0] / img.shape[0],
+ interpolation=cv2.INTER_LINEAR,
+ )
+ logger.debug("Face restoration complete")
+ finally:
+ face_helper.clean_all()
+ return img
+
+
+class CommonFaceRestoration(face_restoration.FaceRestoration):
+ net: torch.Module | None
+ model_url: str
+ model_download_name: str
+
+ def __init__(self, model_path: str):
+ super().__init__()
+ self.net = None
+ self.model_path = model_path
+ os.makedirs(model_path, exist_ok=True)
+
+ @cached_property
+ def face_helper(self) -> FaceRestoreHelper:
+ return create_face_helper(self.get_device())
+
+ def send_model_to(self, device):
+ if self.net:
+ logger.debug("Sending %s to %s", self.net, device)
+ self.net.to(device)
+ if self.face_helper:
+ logger.debug("Sending face helper to %s", device)
+ self.face_helper.face_det.to(device)
+ self.face_helper.face_parse.to(device)
+
+ def get_device(self):
+ raise NotImplementedError("get_device must be implemented by subclasses")
+
+ def load_net(self) -> torch.Module:
+ raise NotImplementedError("load_net must be implemented by subclasses")
+
+ def restore_with_helper(
+ self,
+ np_image: np.ndarray,
+ restore_face: Callable[[torch.Tensor], torch.Tensor],
+ ) -> np.ndarray:
+ try:
+ if self.net is None:
+ self.net = self.load_net()
+ except Exception:
+ logger.warning("Unable to load face-restoration model", exc_info=True)
+ return np_image
+
+ try:
+ self.send_model_to(self.get_device())
+ return restore_with_face_helper(np_image, self.face_helper, restore_face)
+ finally:
+ if shared.opts.face_restoration_unload:
+ self.send_model_to(devices.cpu)
+
+
+def patch_facexlib(dirname: str) -> None:
+ import facexlib.detection
+ import facexlib.parsing
+
+ det_facex_load_file_from_url = facexlib.detection.load_file_from_url
+ par_facex_load_file_from_url = facexlib.parsing.load_file_from_url
+
+ def update_kwargs(kwargs):
+ return dict(kwargs, save_dir=dirname, model_dir=None)
+
+ def facex_load_file_from_url(**kwargs):
+ return det_facex_load_file_from_url(**update_kwargs(kwargs))
+
+ def facex_load_file_from_url2(**kwargs):
+ return par_facex_load_file_from_url(**update_kwargs(kwargs))
+
+ facexlib.detection.load_file_from_url = facex_load_file_from_url
+ facexlib.parsing.load_file_from_url = facex_load_file_from_url2
diff --git a/modules/fifo_lock.py b/modules/fifo_lock.py
new file mode 100644
index 00000000..c35b3ae2
--- /dev/null
+++ b/modules/fifo_lock.py
@@ -0,0 +1,37 @@
+import threading
+import collections
+
+
+# reference: https://gist.github.com/vitaliyp/6d54dd76ca2c3cdfc1149d33007dc34a
+class FIFOLock(object):
+ def __init__(self):
+ self._lock = threading.Lock()
+ self._inner_lock = threading.Lock()
+ self._pending_threads = collections.deque()
+
+ def acquire(self, blocking=True):
+ with self._inner_lock:
+ lock_acquired = self._lock.acquire(False)
+ if lock_acquired:
+ return True
+ elif not blocking:
+ return False
+
+ release_event = threading.Event()
+ self._pending_threads.append(release_event)
+
+ release_event.wait()
+ return self._lock.acquire()
+
+ def release(self):
+ with self._inner_lock:
+ if self._pending_threads:
+ release_event = self._pending_threads.popleft()
+ release_event.set()
+
+ self._lock.release()
+
+ __enter__ = acquire
+
+ def __exit__(self, t, v, tb):
+ self.release()
diff --git a/modules/gfpgan_model.py b/modules/gfpgan_model.py
index 8e0f13bd..445b0409 100644
--- a/modules/gfpgan_model.py
+++ b/modules/gfpgan_model.py
@@ -1,110 +1,71 @@
+from __future__ import annotations
+
+import logging
import os
-import facexlib
-import gfpgan
+import torch
-import modules.face_restoration
-from modules import paths, shared, devices, modelloader, errors
+from modules import (
+ devices,
+ errors,
+ face_restoration,
+ face_restoration_utils,
+ modelloader,
+ shared,
+)
-model_dir = "GFPGAN"
-user_path = None
-model_path = os.path.join(paths.models_path, model_dir)
+logger = logging.getLogger(__name__)
model_url = "https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.4.pth"
-have_gfpgan = False
-loaded_gfpgan_model = None
-
-
-def gfpgann():
- global loaded_gfpgan_model
- global model_path
- if loaded_gfpgan_model is not None:
- loaded_gfpgan_model.gfpgan.to(devices.device_gfpgan)
- return loaded_gfpgan_model
-
- if gfpgan_constructor is None:
- return None
-
- models = modelloader.load_models(model_path, model_url, user_path, ext_filter="GFPGAN")
- if len(models) == 1 and models[0].startswith("http"):
- model_file = models[0]
- elif len(models) != 0:
- latest_file = max(models, key=os.path.getctime)
- model_file = latest_file
- else:
- print("Unable to load gfpgan model!")
- return None
- if hasattr(facexlib.detection.retinaface, 'device'):
- facexlib.detection.retinaface.device = devices.device_gfpgan
- model = gfpgan_constructor(model_path=model_file, upscale=1, arch='clean', channel_multiplier=2, bg_upsampler=None, device=devices.device_gfpgan)
- loaded_gfpgan_model = model
-
- return model
-
-
-def send_model_to(model, device):
- model.gfpgan.to(device)
- model.face_helper.face_det.to(device)
- model.face_helper.face_parse.to(device)
+model_download_name = "GFPGANv1.4.pth"
+gfpgan_face_restorer: face_restoration.FaceRestoration | None = None
+
+
+class FaceRestorerGFPGAN(face_restoration_utils.CommonFaceRestoration):
+ def name(self):
+ return "GFPGAN"
+
+ def get_device(self):
+ return devices.device_gfpgan
+
+ def load_net(self) -> torch.Module:
+ for model_path in modelloader.load_models(
+ model_path=self.model_path,
+ model_url=model_url,
+ command_path=self.model_path,
+ download_name=model_download_name,
+ ext_filter=['.pth'],
+ ):
+ if 'GFPGAN' in os.path.basename(model_path):
+ model = modelloader.load_spandrel_model(
+ model_path,
+ device=self.get_device(),
+ expected_architecture='GFPGAN',
+ ).model
+ model.different_w = True # see https://github.com/chaiNNer-org/spandrel/pull/81
+ return model
+ raise ValueError("No GFPGAN model found")
+
+ def restore(self, np_image):
+ def restore_face(cropped_face_t):
+ assert self.net is not None
+ return self.net(cropped_face_t, return_rgb=False)[0]
+
+ return self.restore_with_helper(np_image, restore_face)
def gfpgan_fix_faces(np_image):
- model = gfpgann()
- if model is None:
- return np_image
-
- send_model_to(model, devices.device_gfpgan)
-
- np_image_bgr = np_image[:, :, ::-1]
- cropped_faces, restored_faces, gfpgan_output_bgr = model.enhance(np_image_bgr, has_aligned=False, only_center_face=False, paste_back=True)
- np_image = gfpgan_output_bgr[:, :, ::-1]
-
- model.face_helper.clean_all()
-
- if shared.opts.face_restoration_unload:
- send_model_to(model, devices.cpu)
-
+ if gfpgan_face_restorer:
+ return gfpgan_face_restorer.restore(np_image)
+ logger.warning("GFPGAN face restorer not set up")
return np_image
-gfpgan_constructor = None
-
+def setup_model(dirname: str) -> None:
+ global gfpgan_face_restorer
-def setup_model(dirname):
try:
- os.makedirs(model_path, exist_ok=True)
- from gfpgan import GFPGANer
- from facexlib import detection, parsing # noqa: F401
- global user_path
- global have_gfpgan
- global gfpgan_constructor
-
- load_file_from_url_orig = gfpgan.utils.load_file_from_url
- facex_load_file_from_url_orig = facexlib.detection.load_file_from_url
- facex_load_file_from_url_orig2 = facexlib.parsing.load_file_from_url
-
- def my_load_file_from_url(**kwargs):
- return load_file_from_url_orig(**dict(kwargs, model_dir=model_path))
-
- def facex_load_file_from_url(**kwargs):
- return facex_load_file_from_url_orig(**dict(kwargs, save_dir=model_path, model_dir=None))
-
- def facex_load_file_from_url2(**kwargs):
- return facex_load_file_from_url_orig2(**dict(kwargs, save_dir=model_path, model_dir=None))
-
- gfpgan.utils.load_file_from_url = my_load_file_from_url
- facexlib.detection.load_file_from_url = facex_load_file_from_url
- facexlib.parsing.load_file_from_url = facex_load_file_from_url2
- user_path = dirname
- have_gfpgan = True
- gfpgan_constructor = GFPGANer
-
- class FaceRestorerGFPGAN(modules.face_restoration.FaceRestoration):
- def name(self):
- return "GFPGAN"
-
- def restore(self, np_image):
- return gfpgan_fix_faces(np_image)
-
- shared.face_restorers.append(FaceRestorerGFPGAN())
+ face_restoration_utils.patch_facexlib(dirname)
+ gfpgan_face_restorer = FaceRestorerGFPGAN(model_path=dirname)
+ shared.face_restorers.append(gfpgan_face_restorer)
except Exception:
errors.report("Error setting up GFPGAN", exc_info=True)
diff --git a/modules/gitpython_hack.py b/modules/gitpython_hack.py
index e537c1df..b55f0640 100644
--- a/modules/gitpython_hack.py
+++ b/modules/gitpython_hack.py
@@ -23,7 +23,7 @@ class Git(git.Git):
)
return self._parse_object_header(ret)
- def stream_object_data(self, ref: str) -> tuple[str, str, int, "Git.CatFileContentStream"]:
+ def stream_object_data(self, ref: str) -> tuple[str, str, int, Git.CatFileContentStream]:
# Not really streaming, per se; this buffers the entire object in memory.
# Shouldn't be a problem for our use case, since we're only using this for
# object headers (commit objects).
diff --git a/modules/gradio_extensons.py b/modules/gradio_extensons.py
new file mode 100644
index 00000000..7d88dc98
--- /dev/null
+++ b/modules/gradio_extensons.py
@@ -0,0 +1,83 @@
+import gradio as gr
+
+from modules import scripts, ui_tempdir, patches
+
+
+def add_classes_to_gradio_component(comp):
+ """
+ this adds gradio-* to the component for css styling (ie gradio-button to gr.Button), as well as some others
+ """
+
+ comp.elem_classes = [f"gradio-{comp.get_block_name()}", *(comp.elem_classes or [])]
+
+ if getattr(comp, 'multiselect', False):
+ comp.elem_classes.append('multiselect')
+
+
+def IOComponent_init(self, *args, **kwargs):
+ self.webui_tooltip = kwargs.pop('tooltip', None)
+
+ if scripts.scripts_current is not None:
+ scripts.scripts_current.before_component(self, **kwargs)
+
+ scripts.script_callbacks.before_component_callback(self, **kwargs)
+
+ res = original_IOComponent_init(self, *args, **kwargs)
+
+ add_classes_to_gradio_component(self)
+
+ scripts.script_callbacks.after_component_callback(self, **kwargs)
+
+ if scripts.scripts_current is not None:
+ scripts.scripts_current.after_component(self, **kwargs)
+
+ return res
+
+
+def Block_get_config(self):
+ config = original_Block_get_config(self)
+
+ webui_tooltip = getattr(self, 'webui_tooltip', None)
+ if webui_tooltip:
+ config["webui_tooltip"] = webui_tooltip
+
+ config.pop('example_inputs', None)
+
+ return config
+
+
+def BlockContext_init(self, *args, **kwargs):
+ if scripts.scripts_current is not None:
+ scripts.scripts_current.before_component(self, **kwargs)
+
+ scripts.script_callbacks.before_component_callback(self, **kwargs)
+
+ res = original_BlockContext_init(self, *args, **kwargs)
+
+ add_classes_to_gradio_component(self)
+
+ scripts.script_callbacks.after_component_callback(self, **kwargs)
+
+ if scripts.scripts_current is not None:
+ scripts.scripts_current.after_component(self, **kwargs)
+
+ return res
+
+
+def Blocks_get_config_file(self, *args, **kwargs):
+ config = original_Blocks_get_config_file(self, *args, **kwargs)
+
+ for comp_config in config["components"]:
+ if "example_inputs" in comp_config:
+ comp_config["example_inputs"] = {"serialized": []}
+
+ return config
+
+
+original_IOComponent_init = patches.patch(__name__, obj=gr.components.IOComponent, field="__init__", replacement=IOComponent_init)
+original_Block_get_config = patches.patch(__name__, obj=gr.blocks.Block, field="get_config", replacement=Block_get_config)
+original_BlockContext_init = patches.patch(__name__, obj=gr.blocks.BlockContext, field="__init__", replacement=BlockContext_init)
+original_Blocks_get_config_file = patches.patch(__name__, obj=gr.blocks.Blocks, field="get_config_file", replacement=Blocks_get_config_file)
+
+
+ui_tempdir.install_ui_tempdir_override()
diff --git a/modules/hat_model.py b/modules/hat_model.py
new file mode 100644
index 00000000..7f2abb41
--- /dev/null
+++ b/modules/hat_model.py
@@ -0,0 +1,43 @@
+import os
+import sys
+
+from modules import modelloader, devices
+from modules.shared import opts
+from modules.upscaler import Upscaler, UpscalerData
+from modules.upscaler_utils import upscale_with_model
+
+
+class UpscalerHAT(Upscaler):
+ def __init__(self, dirname):
+ self.name = "HAT"
+ self.scalers = []
+ self.user_path = dirname
+ super().__init__()
+ for file in self.find_models(ext_filter=[".pt", ".pth"]):
+ name = modelloader.friendly_name(file)
+ scale = 4 # TODO: scale might not be 4, but we can't know without loading the model
+ scaler_data = UpscalerData(name, file, upscaler=self, scale=scale)
+ self.scalers.append(scaler_data)
+
+ def do_upscale(self, img, selected_model):
+ try:
+ model = self.load_model(selected_model)
+ except Exception as e:
+ print(f"Unable to load HAT model {selected_model}: {e}", file=sys.stderr)
+ return img
+ model.to(devices.device_esrgan) # TODO: should probably be device_hat
+ return upscale_with_model(
+ model,
+ img,
+ tile_size=opts.ESRGAN_tile, # TODO: should probably be HAT_tile
+ tile_overlap=opts.ESRGAN_tile_overlap, # TODO: should probably be HAT_tile_overlap
+ )
+
+ def load_model(self, path: str):
+ if not os.path.isfile(path):
+ raise FileNotFoundError(f"Model file {path} not found")
+ return modelloader.load_spandrel_model(
+ path,
+ device=devices.device_esrgan, # TODO: should probably be device_hat
+ expected_architecture='HAT',
+ )
diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py
index c4821d21..be3e4648 100644
--- a/modules/hypernetworks/hypernetwork.py
+++ b/modules/hypernetworks/hypernetwork.py
@@ -10,7 +10,7 @@ import torch
import tqdm
from einops import rearrange, repeat
from ldm.util import default
-from modules import devices, processing, sd_models, shared, sd_samplers, hashes, sd_hijack_checkpoint, errors
+from modules import devices, sd_models, shared, sd_samplers, hashes, sd_hijack_checkpoint, errors
from modules.textual_inversion import textual_inversion, logging
from modules.textual_inversion.learn_schedule import LearnRateScheduler
from torch import einsum
@@ -468,9 +468,8 @@ def create_hypernetwork(name, enable_sizes, overwrite_old, layer_structure=None,
shared.reload_hypernetworks()
-def train_hypernetwork(id_task, hypernetwork_name, learn_rate, batch_size, gradient_step, data_root, log_directory, training_width, training_height, varsize, steps, clip_grad_mode, clip_grad_value, shuffle_tags, tag_drop_out, latent_sampling_method, use_weight, create_image_every, save_hypernetwork_every, template_filename, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height):
- # images allows training previews to have infotext. Importing it at the top causes a circular import problem.
- from modules import images
+def train_hypernetwork(id_task, hypernetwork_name: str, learn_rate: float, batch_size: int, gradient_step: int, data_root: str, log_directory: str, training_width: int, training_height: int, varsize: bool, steps: int, clip_grad_mode: str, clip_grad_value: float, shuffle_tags: bool, tag_drop_out: bool, latent_sampling_method: str, use_weight: bool, create_image_every: int, save_hypernetwork_every: int, template_filename: str, preview_from_txt2img: bool, preview_prompt: str, preview_negative_prompt: str, preview_steps: int, preview_sampler_name: str, preview_cfg_scale: float, preview_seed: int, preview_width: int, preview_height: int):
+ from modules import images, processing
save_hypernetwork_every = save_hypernetwork_every or 0
create_image_every = create_image_every or 0
@@ -699,7 +698,7 @@ def train_hypernetwork(id_task, hypernetwork_name, learn_rate, batch_size, gradi
p.prompt = preview_prompt
p.negative_prompt = preview_negative_prompt
p.steps = preview_steps
- p.sampler_name = sd_samplers.samplers[preview_sampler_index].name
+ p.sampler_name = sd_samplers.samplers_map[preview_sampler_name.lower()]
p.cfg_scale = preview_cfg_scale
p.seed = preview_seed
p.width = preview_width
diff --git a/modules/images.py b/modules/images.py
index 38aa933d..87a7bf22 100644
--- a/modules/images.py
+++ b/modules/images.py
@@ -21,8 +21,6 @@ from modules import sd_samplers, shared, script_callbacks, errors
from modules.paths_internal import roboto_ttf_file
from modules.shared import opts
-import modules.sd_vae as sd_vae
-
LANCZOS = (Image.Resampling.LANCZOS if hasattr(Image, 'Resampling') else Image.LANCZOS)
@@ -63,12 +61,17 @@ def image_grid(imgs, batch_size=1, rows=None):
return grid
-Grid = namedtuple("Grid", ["tiles", "tile_w", "tile_h", "image_w", "image_h", "overlap"])
+class Grid(namedtuple("_Grid", ["tiles", "tile_w", "tile_h", "image_w", "image_h", "overlap"])):
+ @property
+ def tile_count(self) -> int:
+ """
+ The total number of tiles in the grid.
+ """
+ return sum(len(row[2]) for row in self.tiles)
-def split_grid(image, tile_w=512, tile_h=512, overlap=64):
- w = image.width
- h = image.height
+def split_grid(image: Image.Image, tile_w: int = 512, tile_h: int = 512, overlap: int = 64) -> Grid:
+ w, h = image.size
non_overlap_width = tile_w - overlap
non_overlap_height = tile_h - overlap
@@ -318,7 +321,7 @@ def resize_image(resize_mode, im, width, height, upscaler_name=None):
return res
-invalid_filename_chars = '<>:"/\\|?*\n'
+invalid_filename_chars = '<>:"/\\|?*\n\r\t'
invalid_filename_prefix = ' '
invalid_filename_postfix = ' .'
re_nonletters = re.compile(r'[\s' + string.punctuation + ']+')
@@ -342,16 +345,6 @@ def sanitize_filename_part(text, replace_spaces=True):
class FilenameGenerator:
- def get_vae_filename(self): #get the name of the VAE file.
- if sd_vae.loaded_vae_file is None:
- return "NoneType"
- file_name = os.path.basename(sd_vae.loaded_vae_file)
- split_file_name = file_name.split('.')
- if len(split_file_name) > 1 and split_file_name[0] == '':
- return split_file_name[1] # if the first character of the filename is "." then [1] is obtained.
- else:
- return split_file_name[0]
-
replacements = {
'seed': lambda self: self.seed if self.seed is not None else '',
'seed_first': lambda self: self.seed if self.p.batch_size == 1 else self.p.all_seeds[0],
@@ -367,7 +360,9 @@ class FilenameGenerator:
'date': lambda self: datetime.datetime.now().strftime('%Y-%m-%d'),
'datetime': lambda self, *args: self.datetime(*args), # accepts formats: [datetime], [datetime<Format>], [datetime<Format><Time Zone>]
'job_timestamp': lambda self: getattr(self.p, "job_timestamp", shared.state.job_timestamp),
- 'prompt_hash': lambda self: hashlib.sha256(self.prompt.encode()).hexdigest()[0:8],
+ 'prompt_hash': lambda self, *args: self.string_hash(self.prompt, *args),
+ 'negative_prompt_hash': lambda self, *args: self.string_hash(self.p.negative_prompt, *args),
+ 'full_prompt_hash': lambda self, *args: self.string_hash(f"{self.p.prompt} {self.p.negative_prompt}", *args), # a space in between to create a unique string
'prompt': lambda self: sanitize_filename_part(self.prompt),
'prompt_no_styles': lambda self: self.prompt_no_style(),
'prompt_spaces': lambda self: sanitize_filename_part(self.prompt, replace_spaces=False),
@@ -380,7 +375,8 @@ class FilenameGenerator:
'denoising': lambda self: self.p.denoising_strength if self.p and self.p.denoising_strength else NOTHING_AND_SKIP_PREVIOUS_TEXT,
'user': lambda self: self.p.user,
'vae_filename': lambda self: self.get_vae_filename(),
- 'none': lambda self: '', # Overrides the default so you can get just the sequence number
+ 'none': lambda self: '', # Overrides the default, so you can get just the sequence number
+ 'image_hash': lambda self, *args: self.image_hash(*args) # accepts formats: [image_hash<length>] default full hash
}
default_time_format = '%Y%m%d%H%M%S'
@@ -391,6 +387,22 @@ class FilenameGenerator:
self.image = image
self.zip = zip
+ def get_vae_filename(self):
+ """Get the name of the VAE file."""
+
+ import modules.sd_vae as sd_vae
+
+ if sd_vae.loaded_vae_file is None:
+ return "NoneType"
+
+ file_name = os.path.basename(sd_vae.loaded_vae_file)
+ split_file_name = file_name.split('.')
+ if len(split_file_name) > 1 and split_file_name[0] == '':
+ return split_file_name[1] # if the first character of the filename is "." then [1] is obtained.
+ else:
+ return split_file_name[0]
+
+
def hasprompt(self, *args):
lower = self.prompt.lower()
if self.p is None or self.prompt is None:
@@ -444,6 +456,14 @@ class FilenameGenerator:
return sanitize_filename_part(formatted_time, replace_spaces=False)
+ def image_hash(self, *args):
+ length = int(args[0]) if (args and args[0] != "") else None
+ return hashlib.sha256(self.image.tobytes()).hexdigest()[0:length]
+
+ def string_hash(self, text, *args):
+ length = int(args[0]) if (args and args[0] != "") else 8
+ return hashlib.sha256(text.encode()).hexdigest()[0:length]
+
def apply(self, x):
res = ''
@@ -546,6 +566,8 @@ def save_image_with_geninfo(image, geninfo, filename, extension=None, existing_p
})
piexif.insert(exif_bytes, filename)
+ elif extension.lower() == ".gif":
+ image.save(filename, format=image_format, comment=geninfo)
else:
image.save(filename, format=image_format, quality=opts.jpeg_quality)
@@ -585,6 +607,11 @@ def save_image(image, path, basename, seed=None, prompt=None, extension='png', i
"""
namegen = FilenameGenerator(p, seed, prompt, image)
+ # WebP and JPG formats have maximum dimension limits of 16383 and 65535 respectively. switch to PNG which has a much higher limit
+ if (image.height > 65535 or image.width > 65535) and extension.lower() in ("jpg", "jpeg") or (image.height > 16383 or image.width > 16383) and extension.lower() == "webp":
+ print('Image dimensions too large; saving as PNG')
+ extension = ".png"
+
if save_to_dirs is None:
save_to_dirs = (grid and opts.grid_save_to_dirs) or (not grid and opts.save_to_dirs and not no_prompt)
@@ -641,7 +668,13 @@ def save_image(image, path, basename, seed=None, prompt=None, extension='png', i
save_image_with_geninfo(image_to_save, info, temp_file_path, extension, existing_pnginfo=params.pnginfo, pnginfo_section_name=pnginfo_section_name)
- os.replace(temp_file_path, filename_without_extension + extension)
+ filename = filename_without_extension + extension
+ if shared.opts.save_images_replace_action != "Replace":
+ n = 0
+ while os.path.exists(filename):
+ n += 1
+ filename = f"{filename_without_extension}-{n}{extension}"
+ os.replace(temp_file_path, filename)
fullfn_without_extension, extension = os.path.splitext(params.filename)
if hasattr(os, 'statvfs'):
@@ -698,7 +731,12 @@ def read_info_from_image(image: Image.Image) -> tuple[str | None, dict]:
geninfo = items.pop('parameters', None)
if "exif" in items:
- exif = piexif.load(items["exif"])
+ exif_data = items["exif"]
+ try:
+ exif = piexif.load(exif_data)
+ except OSError:
+ # memory / exif was not valid so piexif tried to read from a file
+ exif = None
exif_comment = (exif or {}).get("Exif", {}).get(piexif.ExifIFD.UserComment, b'')
try:
exif_comment = piexif.helper.UserComment.load(exif_comment)
@@ -708,6 +746,8 @@ def read_info_from_image(image: Image.Image) -> tuple[str | None, dict]:
if exif_comment:
items['exif comment'] = exif_comment
geninfo = exif_comment
+ elif "comment" in items: # for gif
+ geninfo = items["comment"].decode('utf8', errors="ignore")
for field in IGNORED_INFO_KEYS:
items.pop(field, None)
@@ -756,3 +796,4 @@ def flatten(img, bgcolor):
img = background
return img.convert('RGB')
+
diff --git a/modules/img2img.py b/modules/img2img.py
index a811e7a4..75b3d346 100644
--- a/modules/img2img.py
+++ b/modules/img2img.py
@@ -3,14 +3,14 @@ from contextlib import closing
from pathlib import Path
import numpy as np
-from PIL import Image, ImageOps, ImageFilter, ImageEnhance, ImageChops, UnidentifiedImageError
+from PIL import Image, ImageOps, ImageFilter, ImageEnhance, UnidentifiedImageError
import gradio as gr
-from modules import sd_samplers, images as imgutil
-from modules.generation_parameters_copypaste import create_override_settings_dict, parse_generation_parameters
+from modules import images as imgutil
+from modules.infotext import create_override_settings_dict, parse_generation_parameters
from modules.processing import Processed, StableDiffusionProcessingImg2Img, process_images
from modules.shared import opts, state
-from modules.images import save_image
+from modules.sd_models import get_closet_checkpoint_match
import modules.shared as shared
import modules.processing as processing
from modules.ui import plaintext_to_html
@@ -18,9 +18,10 @@ import modules.scripts
def process_batch(p, input_dir, output_dir, inpaint_mask_dir, args, to_scale=False, scale_by=1.0, use_png_info=False, png_info_props=None, png_info_dir=None):
+ output_dir = output_dir.strip()
processing.fix_seed(p)
- images = list(shared.walk_files(input_dir, allowed_extensions=(".png", ".jpg", ".jpeg", ".webp")))
+ images = list(shared.walk_files(input_dir, allowed_extensions=(".png", ".jpg", ".jpeg", ".webp", ".tif", ".tiff")))
is_inpaint_batch = False
if inpaint_mask_dir:
@@ -32,11 +33,6 @@ def process_batch(p, input_dir, output_dir, inpaint_mask_dir, args, to_scale=Fal
print(f"Will process {len(images)} images, creating {p.n_iter * p.batch_size} new images for each.")
- save_normally = output_dir == ''
-
- p.do_not_save_grid = True
- p.do_not_save_samples = not save_normally
-
state.job_count = len(images) * p.n_iter
# extract "default" params to use in case getting png info fails
@@ -46,7 +42,10 @@ def process_batch(p, input_dir, output_dir, inpaint_mask_dir, args, to_scale=Fal
cfg_scale = p.cfg_scale
sampler_name = p.sampler_name
steps = p.steps
-
+ override_settings = p.override_settings
+ sd_model_checkpoint_override = get_closet_checkpoint_match(override_settings.get("sd_model_checkpoint", None))
+ batch_results = None
+ discard_further_results = False
for i, image in enumerate(images):
state.job = f"{i+1} out of {len(images)}"
if state.skipped:
@@ -109,42 +108,58 @@ def process_batch(p, input_dir, output_dir, inpaint_mask_dir, args, to_scale=Fal
p.sampler_name = parsed_parameters.get("Sampler", sampler_name)
p.steps = int(parsed_parameters.get("Steps", steps))
+ model_info = get_closet_checkpoint_match(parsed_parameters.get("Model hash", None))
+ if model_info is not None:
+ p.override_settings['sd_model_checkpoint'] = model_info.name
+ elif sd_model_checkpoint_override:
+ p.override_settings['sd_model_checkpoint'] = sd_model_checkpoint_override
+ else:
+ p.override_settings.pop("sd_model_checkpoint", None)
+
+ if output_dir:
+ p.outpath_samples = output_dir
+ p.override_settings['save_to_dirs'] = False
+ p.override_settings['save_images_replace_action'] = "Add number suffix"
+ if p.n_iter > 1 or p.batch_size > 1:
+ p.override_settings['samples_filename_pattern'] = f'{image_path.stem}-[generation_number]'
+ else:
+ p.override_settings['samples_filename_pattern'] = f'{image_path.stem}'
+
proc = modules.scripts.scripts_img2img.run(p, *args)
+
if proc is None:
+ p.override_settings.pop('save_images_replace_action', None)
proc = process_images(p)
- for n, processed_image in enumerate(proc.images):
- filename = image_path.stem
- infotext = proc.infotext(p, n)
- relpath = os.path.dirname(os.path.relpath(image, input_dir))
+ if not discard_further_results and proc:
+ if batch_results:
+ batch_results.images.extend(proc.images)
+ batch_results.infotexts.extend(proc.infotexts)
+ else:
+ batch_results = proc
- if n > 0:
- filename += f"-{n}"
+ if 0 <= shared.opts.img2img_batch_show_results_limit < len(batch_results.images):
+ discard_further_results = True
+ batch_results.images = batch_results.images[:int(shared.opts.img2img_batch_show_results_limit)]
+ batch_results.infotexts = batch_results.infotexts[:int(shared.opts.img2img_batch_show_results_limit)]
- if not save_normally:
- os.makedirs(os.path.join(output_dir, relpath), exist_ok=True)
- if processed_image.mode == 'RGBA':
- processed_image = processed_image.convert("RGB")
- save_image(processed_image, os.path.join(output_dir, relpath), None, extension=opts.samples_format, info=infotext, forced_filename=filename, save_to_dirs=False)
+ return batch_results
-def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_styles, init_img, sketch, init_img_with_mask, inpaint_color_sketch, inpaint_color_sketch_orig, init_img_inpaint, init_mask_inpaint, steps: int, sampler_index: int, mask_blur: int, mask_alpha: float, inpainting_fill: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, image_cfg_scale: float, denoising_strength: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, selected_scale_tab: int, height: int, width: int, scale_by: float, resize_mode: int, inpaint_full_res: bool, inpaint_full_res_padding: int, inpainting_mask_invert: int, img2img_batch_input_dir: str, img2img_batch_output_dir: str, img2img_batch_inpaint_mask_dir: str, override_settings_texts, img2img_batch_use_png_info: bool, img2img_batch_png_info_props: list, img2img_batch_png_info_dir: str, request: gr.Request, *args):
+def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_styles, init_img, sketch, init_img_with_mask, inpaint_color_sketch, inpaint_color_sketch_orig, init_img_inpaint, init_mask_inpaint, steps: int, sampler_name: str, mask_blur: int, mask_alpha: float, inpainting_fill: int, n_iter: int, batch_size: int, cfg_scale: float, image_cfg_scale: float, denoising_strength: float, selected_scale_tab: int, height: int, width: int, scale_by: float, resize_mode: int, inpaint_full_res: bool, inpaint_full_res_padding: int, inpainting_mask_invert: int, img2img_batch_input_dir: str, img2img_batch_output_dir: str, img2img_batch_inpaint_mask_dir: str, override_settings_texts, img2img_batch_use_png_info: bool, img2img_batch_png_info_props: list, img2img_batch_png_info_dir: str, request: gr.Request, *args):
override_settings = create_override_settings_dict(override_settings_texts)
is_batch = mode == 5
if mode == 0: # img2img
- image = init_img.convert("RGB")
+ image = init_img
mask = None
elif mode == 1: # img2img sketch
- image = sketch.convert("RGB")
+ image = sketch
mask = None
elif mode == 2: # inpaint
image, mask = init_img_with_mask["image"], init_img_with_mask["mask"]
- alpha_mask = ImageOps.invert(image.split()[-1]).convert('L').point(lambda x: 255 if x > 0 else 0, mode='1')
- mask = mask.convert('L').point(lambda x: 255 if x > 128 else 0, mode='1')
- mask = ImageChops.lighter(alpha_mask, mask).convert('L')
- image = image.convert("RGB")
+ mask = processing.create_binary_mask(mask)
elif mode == 3: # inpaint sketch
image = inpaint_color_sketch
orig = inpaint_color_sketch_orig or inpaint_color_sketch
@@ -153,7 +168,6 @@ def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_s
mask = ImageEnhance.Brightness(mask).enhance(1 - mask_alpha / 100)
blur = ImageFilter.GaussianBlur(mask_blur)
image = Image.composite(image.filter(blur), orig, mask.filter(blur))
- image = image.convert("RGB")
elif mode == 4: # inpaint upload mask
image = init_img_inpaint
mask = init_mask_inpaint
@@ -180,21 +194,13 @@ def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_s
prompt=prompt,
negative_prompt=negative_prompt,
styles=prompt_styles,
- seed=seed,
- subseed=subseed,
- subseed_strength=subseed_strength,
- seed_resize_from_h=seed_resize_from_h,
- seed_resize_from_w=seed_resize_from_w,
- seed_enable_extras=seed_enable_extras,
- sampler_name=sd_samplers.samplers_for_img2img[sampler_index].name,
+ sampler_name=sampler_name,
batch_size=batch_size,
n_iter=n_iter,
steps=steps,
cfg_scale=cfg_scale,
width=width,
height=height,
- restore_faces=restore_faces,
- tiling=tiling,
init_images=[image],
mask=mask,
mask_blur=mask_blur,
@@ -213,7 +219,7 @@ def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_s
p.user = request.username
- if shared.cmd_opts.enable_console_prompts:
+ if shared.opts.enable_console_prompts:
print(f"\nimg2img: {prompt}", file=shared.progress_print_out)
if mask:
@@ -222,10 +228,10 @@ def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_s
with closing(p):
if is_batch:
assert not shared.cmd_opts.hide_ui_dir_config, "Launched with --hide-ui-dir-config, batch img2img disabled"
+ processed = process_batch(p, img2img_batch_input_dir, img2img_batch_output_dir, img2img_batch_inpaint_mask_dir, args, to_scale=selected_scale_tab == 1, scale_by=scale_by, use_png_info=img2img_batch_use_png_info, png_info_props=img2img_batch_png_info_props, png_info_dir=img2img_batch_png_info_dir)
- process_batch(p, img2img_batch_input_dir, img2img_batch_output_dir, img2img_batch_inpaint_mask_dir, args, to_scale=selected_scale_tab == 1, scale_by=scale_by, use_png_info=img2img_batch_use_png_info, png_info_props=img2img_batch_png_info_props, png_info_dir=img2img_batch_png_info_dir)
-
- processed = Processed(p, [], p.seed, "")
+ if processed is None:
+ processed = Processed(p, [], p.seed, "")
else:
processed = modules.scripts.scripts_img2img.run(p, *args)
if processed is None:
diff --git a/modules/import_hook.py b/modules/import_hook.py
index 28c67dfa..eba9a372 100644
--- a/modules/import_hook.py
+++ b/modules/import_hook.py
@@ -3,3 +3,14 @@ import sys
# this will break any attempt to import xformers which will prevent stability diffusion repo from trying to use it
if "--xformers" not in "".join(sys.argv):
sys.modules["xformers"] = None
+
+# Hack to fix a changed import in torchvision 0.17+, which otherwise breaks
+# basicsr; see https://github.com/AUTOMATIC1111/stable-diffusion-webui/issues/13985
+try:
+ import torchvision.transforms.functional_tensor # noqa: F401
+except ImportError:
+ try:
+ import torchvision.transforms.functional as functional
+ sys.modules["torchvision.transforms.functional_tensor"] = functional
+ except ImportError:
+ pass # shrug...
diff --git a/modules/generation_parameters_copypaste.py b/modules/infotext.py
index a3448be9..26e9b949 100644
--- a/modules/generation_parameters_copypaste.py
+++ b/modules/infotext.py
@@ -1,23 +1,24 @@
+from __future__ import annotations
import base64
import io
import json
import os
import re
+import sys
import gradio as gr
from modules.paths import data_path
-from modules import shared, ui_tempdir, script_callbacks
+from modules import shared, ui_tempdir, script_callbacks, processing, infotext_versions
from PIL import Image
-re_param_code = r'\s*([\w ]+):\s*("(?:\\"[^,]|\\"|\\|[^\"])+"|[^,]*)(?:,|$)'
+sys.modules['modules.generation_parameters_copypaste'] = sys.modules[__name__] # alias for old name
+
+re_param_code = r'\s*(\w[\w \-/]+):\s*("(?:\\.|[^\\"])+"|[^,]*)(?:,|$)'
re_param = re.compile(re_param_code)
re_imagesize = re.compile(r"^(\d+)x(\d+)$")
re_hypernet_hash = re.compile("\(([0-9a-f]+)\)$")
type_of_gr_update = type(gr.update())
-paste_fields = {}
-registered_param_bindings = []
-
class ParamBinding:
def __init__(self, paste_button, tabname, source_text_component=None, source_image_component=None, source_tabname=None, override_settings_component=None, paste_field_names=None):
@@ -30,8 +31,26 @@ class ParamBinding:
self.paste_field_names = paste_field_names or []
+class PasteField(tuple):
+ def __new__(cls, component, target, *, api=None):
+ return super().__new__(cls, (component, target))
+
+ def __init__(self, component, target, *, api=None):
+ super().__init__()
+
+ self.api = api
+ self.component = component
+ self.label = target if isinstance(target, str) else None
+ self.function = target if callable(target) else None
+
+
+paste_fields: dict[str, dict] = {}
+registered_param_bindings: list[ParamBinding] = []
+
+
def reset():
paste_fields.clear()
+ registered_param_bindings.clear()
def quote(text):
@@ -81,6 +100,12 @@ def image_from_url_text(filedata):
def add_paste_fields(tabname, init_img, fields, override_settings_component=None):
+
+ if fields:
+ for i in range(len(fields)):
+ if not isinstance(fields[i], PasteField):
+ fields[i] = PasteField(*fields[i])
+
paste_fields[tabname] = {"init_img": init_img, "fields": fields, "override_settings_component": override_settings_component}
# backwards compatibility for existing extensions
@@ -112,7 +137,6 @@ def register_paste_params_button(binding: ParamBinding):
def connect_paste_params_buttons():
- binding: ParamBinding
for binding in registered_param_bindings:
destination_image_component = paste_fields[binding.tabname]["init_img"]
fields = paste_fields[binding.tabname]["fields"]
@@ -198,7 +222,6 @@ def restore_old_hires_fix_params(res):
height = int(res.get("Size-2", 512))
if firstpass_width == 0 or firstpass_height == 0:
- from modules import processing
firstpass_width, firstpass_height = processing.old_hires_fix_first_pass_dimensions(width, height)
res['Size-1'] = firstpass_width
@@ -280,6 +303,9 @@ Steps: 20, Sampler: Euler a, CFG scale: 7, Seed: 965400086, Size: 512x512, Model
if "Hires sampler" not in res:
res["Hires sampler"] = "Use same sampler"
+ if "Hires checkpoint" not in res:
+ res["Hires checkpoint"] = "Use same checkpoint"
+
if "Hires prompt" not in res:
res["Hires prompt"] = ""
@@ -304,32 +330,39 @@ Steps: 20, Sampler: Euler a, CFG scale: 7, Seed: 965400086, Size: 512x512, Model
if "Schedule rho" not in res:
res["Schedule rho"] = 0
+ if "VAE Encoder" not in res:
+ res["VAE Encoder"] = "Full"
+
+ if "VAE Decoder" not in res:
+ res["VAE Decoder"] = "Full"
+
+ if "FP8 weight" not in res:
+ res["FP8 weight"] = "Disable"
+
+ if "Cache FP16 weight for LoRA" not in res and res["FP8 weight"] != "Disable":
+ res["Cache FP16 weight for LoRA"] = False
+
+ infotext_versions.backcompat(res)
+
+ skip = set(shared.opts.infotext_skip_pasting)
+ res = {k: v for k, v in res.items() if k not in skip}
+
return res
infotext_to_setting_name_mapping = [
- ('Clip skip', 'CLIP_stop_at_last_layers', ),
+
+]
+"""Mapping of infotext labels to setting names. Only left for backwards compatibility - use OptionInfo(..., infotext='...') instead.
+Example content:
+
+infotext_to_setting_name_mapping = [
('Conditional mask weight', 'inpainting_mask_weight'),
('Model hash', 'sd_model_checkpoint'),
('ENSD', 'eta_noise_seed_delta'),
('Schedule type', 'k_sched_type'),
- ('Schedule max sigma', 'sigma_max'),
- ('Schedule min sigma', 'sigma_min'),
- ('Schedule rho', 'rho'),
- ('Noise multiplier', 'initial_noise_multiplier'),
- ('Eta', 'eta_ancestral'),
- ('Eta DDIM', 'eta_ddim'),
- ('Discard penultimate sigma', 'always_discard_next_to_last_sigma'),
- ('UniPC variant', 'uni_pc_variant'),
- ('UniPC skip type', 'uni_pc_skip_type'),
- ('UniPC order', 'uni_pc_order'),
- ('UniPC lower order final', 'uni_pc_lower_order_final'),
- ('Token merging ratio', 'token_merging_ratio'),
- ('Token merging ratio hr', 'token_merging_ratio_hr'),
- ('RNG', 'randn_source'),
- ('NGMS', 's_min_uncond'),
- ('Pad conds', 'pad_cond_uncond'),
]
+"""
def create_override_settings_dict(text_pairs):
@@ -350,7 +383,8 @@ def create_override_settings_dict(text_pairs):
params[k] = v.strip()
- for param_name, setting_name in infotext_to_setting_name_mapping:
+ mapping = [(info.infotext, k) for k, info in shared.opts.data_labels.items() if info.infotext]
+ for param_name, setting_name in mapping + infotext_to_setting_name_mapping:
value = params.get(param_name, None)
if value is None:
@@ -361,6 +395,48 @@ def create_override_settings_dict(text_pairs):
return res
+def get_override_settings(params, *, skip_fields=None):
+ """Returns a list of settings overrides from the infotext parameters dictionary.
+
+ This function checks the `params` dictionary for any keys that correspond to settings in `shared.opts` and returns
+ a list of tuples containing the parameter name, setting name, and new value cast to correct type.
+
+ It checks for conditions before adding an override:
+ - ignores settings that match the current value
+ - ignores parameter keys present in skip_fields argument.
+
+ Example input:
+ {"Clip skip": "2"}
+
+ Example output:
+ [("Clip skip", "CLIP_stop_at_last_layers", 2)]
+ """
+
+ res = []
+
+ mapping = [(info.infotext, k) for k, info in shared.opts.data_labels.items() if info.infotext]
+ for param_name, setting_name in mapping + infotext_to_setting_name_mapping:
+ if param_name in (skip_fields or {}):
+ continue
+
+ v = params.get(param_name, None)
+ if v is None:
+ continue
+
+ if setting_name == "sd_model_checkpoint" and shared.opts.disable_weights_auto_swap:
+ continue
+
+ v = shared.opts.cast_value(setting_name, v)
+ current_value = getattr(shared.opts, setting_name, None)
+
+ if v == current_value:
+ continue
+
+ res.append((param_name, setting_name, v))
+
+ return res
+
+
def connect_paste(button, paste_fields, input_comp, override_settings_component, tabname):
def paste_func(prompt):
if not prompt and not shared.cmd_opts.hide_ui_dir_config:
@@ -399,26 +475,12 @@ def connect_paste(button, paste_fields, input_comp, override_settings_component,
return res
if override_settings_component is not None:
- def paste_settings(params):
- vals = {}
+ already_handled_fields = {key: 1 for _, key in paste_fields}
- for param_name, setting_name in infotext_to_setting_name_mapping:
- v = params.get(param_name, None)
- if v is None:
- continue
-
- if setting_name == "sd_model_checkpoint" and shared.opts.disable_weights_auto_swap:
- continue
-
- v = shared.opts.cast_value(setting_name, v)
- current_value = getattr(shared.opts, setting_name, None)
-
- if v == current_value:
- continue
-
- vals[param_name] = v
+ def paste_settings(params):
+ vals = get_override_settings(params, skip_fields=already_handled_fields)
- vals_pairs = [f"{k}: {v}" for k, v in vals.items()]
+ vals_pairs = [f"{infotext_text}: {value}" for infotext_text, setting_name, value in vals]
return gr.Dropdown.update(value=vals_pairs, choices=vals_pairs, visible=bool(vals_pairs))
@@ -437,3 +499,4 @@ def connect_paste(button, paste_fields, input_comp, override_settings_component,
outputs=[],
show_progress=False,
)
+
diff --git a/modules/infotext_versions.py b/modules/infotext_versions.py
new file mode 100644
index 00000000..a5afeebf
--- /dev/null
+++ b/modules/infotext_versions.py
@@ -0,0 +1,39 @@
+from modules import shared
+from packaging import version
+import re
+
+
+v160 = version.parse("1.6.0")
+v170_tsnr = version.parse("v1.7.0-225")
+
+
+def parse_version(text):
+ if text is None:
+ return None
+
+ m = re.match(r'([^-]+-[^-]+)-.*', text)
+ if m:
+ text = m.group(1)
+
+ try:
+ return version.parse(text)
+ except Exception:
+ return None
+
+
+def backcompat(d):
+ """Checks infotext Version field, and enables backwards compatibility options according to it."""
+
+ if not shared.opts.auto_backcompat:
+ return
+
+ ver = parse_version(d.get("Version"))
+ if ver is None:
+ return
+
+ if ver < v160:
+ d["Old prompt editing timelines"] = True
+
+ if ver < v170_tsnr:
+ d["Downcast alphas_cumprod"] = True
+
diff --git a/modules/initialize.py b/modules/initialize.py
new file mode 100644
index 00000000..4a3cd98c
--- /dev/null
+++ b/modules/initialize.py
@@ -0,0 +1,165 @@
+import importlib
+import logging
+import sys
+import warnings
+from threading import Thread
+
+from modules.timer import startup_timer
+
+
+def imports():
+ logging.getLogger("torch.distributed.nn").setLevel(logging.ERROR) # sshh...
+ logging.getLogger("xformers").addFilter(lambda record: 'A matching Triton is not available' not in record.getMessage())
+
+ import torch # noqa: F401
+ startup_timer.record("import torch")
+ import pytorch_lightning # noqa: F401
+ startup_timer.record("import torch")
+ warnings.filterwarnings(action="ignore", category=DeprecationWarning, module="pytorch_lightning")
+ warnings.filterwarnings(action="ignore", category=UserWarning, module="torchvision")
+
+ import gradio # noqa: F401
+ startup_timer.record("import gradio")
+
+ from modules import paths, timer, import_hook, errors # noqa: F401
+ startup_timer.record("setup paths")
+
+ import ldm.modules.encoders.modules # noqa: F401
+ startup_timer.record("import ldm")
+
+ import sgm.modules.encoders.modules # noqa: F401
+ startup_timer.record("import sgm")
+
+ from modules import shared_init
+ shared_init.initialize()
+ startup_timer.record("initialize shared")
+
+ from modules import processing, gradio_extensons, ui # noqa: F401
+ startup_timer.record("other imports")
+
+
+def check_versions():
+ from modules.shared_cmd_options import cmd_opts
+
+ if not cmd_opts.skip_version_check:
+ from modules import errors
+ errors.check_versions()
+
+
+def initialize():
+ from modules import initialize_util
+ initialize_util.fix_torch_version()
+ initialize_util.fix_asyncio_event_loop_policy()
+ initialize_util.validate_tls_options()
+ initialize_util.configure_sigint_handler()
+ initialize_util.configure_opts_onchange()
+
+ from modules import sd_models
+ sd_models.setup_model()
+ startup_timer.record("setup SD model")
+
+ from modules.shared_cmd_options import cmd_opts
+
+ from modules import codeformer_model
+ warnings.filterwarnings(action="ignore", category=UserWarning, module="torchvision.transforms.functional_tensor")
+ codeformer_model.setup_model(cmd_opts.codeformer_models_path)
+ startup_timer.record("setup codeformer")
+
+ from modules import gfpgan_model
+ gfpgan_model.setup_model(cmd_opts.gfpgan_models_path)
+ startup_timer.record("setup gfpgan")
+
+ initialize_rest(reload_script_modules=False)
+
+
+def initialize_rest(*, reload_script_modules=False):
+ """
+ Called both from initialize() and when reloading the webui.
+ """
+ from modules.shared_cmd_options import cmd_opts
+
+ from modules import sd_samplers
+ sd_samplers.set_samplers()
+ startup_timer.record("set samplers")
+
+ from modules import extensions
+ extensions.list_extensions()
+ startup_timer.record("list extensions")
+
+ from modules import initialize_util
+ initialize_util.restore_config_state_file()
+ startup_timer.record("restore config state file")
+
+ from modules import shared, upscaler, scripts
+ if cmd_opts.ui_debug_mode:
+ shared.sd_upscalers = upscaler.UpscalerLanczos().scalers
+ scripts.load_scripts()
+ return
+
+ from modules import sd_models
+ sd_models.list_models()
+ startup_timer.record("list SD models")
+
+ from modules import localization
+ localization.list_localizations(cmd_opts.localizations_dir)
+ startup_timer.record("list localizations")
+
+ with startup_timer.subcategory("load scripts"):
+ scripts.load_scripts()
+
+ if reload_script_modules:
+ for module in [module for name, module in sys.modules.items() if name.startswith("modules.ui")]:
+ importlib.reload(module)
+ startup_timer.record("reload script modules")
+
+ from modules import modelloader
+ modelloader.load_upscalers()
+ startup_timer.record("load upscalers")
+
+ from modules import sd_vae
+ sd_vae.refresh_vae_list()
+ startup_timer.record("refresh VAE")
+
+ from modules import textual_inversion
+ textual_inversion.textual_inversion.list_textual_inversion_templates()
+ startup_timer.record("refresh textual inversion templates")
+
+ from modules import script_callbacks, sd_hijack_optimizations, sd_hijack
+ script_callbacks.on_list_optimizers(sd_hijack_optimizations.list_optimizers)
+ sd_hijack.list_optimizers()
+ startup_timer.record("scripts list_optimizers")
+
+ from modules import sd_unet
+ sd_unet.list_unets()
+ startup_timer.record("scripts list_unets")
+
+ def load_model():
+ """
+ Accesses shared.sd_model property to load model.
+ After it's available, if it has been loaded before this access by some extension,
+ its optimization may be None because the list of optimizaers has neet been filled
+ by that time, so we apply optimization again.
+ """
+
+ shared.sd_model # noqa: B018
+
+ if sd_hijack.current_optimizer is None:
+ sd_hijack.apply_optimizations()
+
+ from modules import devices
+ devices.first_time_calculation()
+ if not shared.cmd_opts.skip_load_model_at_start:
+ Thread(target=load_model).start()
+
+ from modules import shared_items
+ shared_items.reload_hypernetworks()
+ startup_timer.record("reload hypernetworks")
+
+ from modules import ui_extra_networks
+ ui_extra_networks.initialize()
+ ui_extra_networks.register_default_pages()
+
+ from modules import extra_networks
+ extra_networks.initialize()
+ extra_networks.register_default_extra_networks()
+ startup_timer.record("initialize extra networks")
diff --git a/modules/initialize_util.py b/modules/initialize_util.py
new file mode 100644
index 00000000..b6767138
--- /dev/null
+++ b/modules/initialize_util.py
@@ -0,0 +1,208 @@
+import json
+import os
+import signal
+import sys
+import re
+
+from modules.timer import startup_timer
+
+
+def gradio_server_name():
+ from modules.shared_cmd_options import cmd_opts
+
+ if cmd_opts.server_name:
+ return cmd_opts.server_name
+ else:
+ return "0.0.0.0" if cmd_opts.listen else None
+
+
+def fix_torch_version():
+ import torch
+
+ # Truncate version number of nightly/local build of PyTorch to not cause exceptions with CodeFormer or Safetensors
+ if ".dev" in torch.__version__ or "+git" in torch.__version__:
+ torch.__long_version__ = torch.__version__
+ torch.__version__ = re.search(r'[\d.]+[\d]', torch.__version__).group(0)
+
+
+def fix_asyncio_event_loop_policy():
+ """
+ The default `asyncio` event loop policy only automatically creates
+ event loops in the main threads. Other threads must create event
+ loops explicitly or `asyncio.get_event_loop` (and therefore
+ `.IOLoop.current`) will fail. Installing this policy allows event
+ loops to be created automatically on any thread, matching the
+ behavior of Tornado versions prior to 5.0 (or 5.0 on Python 2).
+ """
+
+ import asyncio
+
+ if sys.platform == "win32" and hasattr(asyncio, "WindowsSelectorEventLoopPolicy"):
+ # "Any thread" and "selector" should be orthogonal, but there's not a clean
+ # interface for composing policies so pick the right base.
+ _BasePolicy = asyncio.WindowsSelectorEventLoopPolicy # type: ignore
+ else:
+ _BasePolicy = asyncio.DefaultEventLoopPolicy
+
+ class AnyThreadEventLoopPolicy(_BasePolicy): # type: ignore
+ """Event loop policy that allows loop creation on any thread.
+ Usage::
+
+ asyncio.set_event_loop_policy(AnyThreadEventLoopPolicy())
+ """
+
+ def get_event_loop(self) -> asyncio.AbstractEventLoop:
+ try:
+ return super().get_event_loop()
+ except (RuntimeError, AssertionError):
+ # This was an AssertionError in python 3.4.2 (which ships with debian jessie)
+ # and changed to a RuntimeError in 3.4.3.
+ # "There is no current event loop in thread %r"
+ loop = self.new_event_loop()
+ self.set_event_loop(loop)
+ return loop
+
+ asyncio.set_event_loop_policy(AnyThreadEventLoopPolicy())
+
+
+def restore_config_state_file():
+ from modules import shared, config_states
+
+ config_state_file = shared.opts.restore_config_state_file
+ if config_state_file == "":
+ return
+
+ shared.opts.restore_config_state_file = ""
+ shared.opts.save(shared.config_filename)
+
+ if os.path.isfile(config_state_file):
+ print(f"*** About to restore extension state from file: {config_state_file}")
+ with open(config_state_file, "r", encoding="utf-8") as f:
+ config_state = json.load(f)
+ config_states.restore_extension_config(config_state)
+ startup_timer.record("restore extension config")
+ elif config_state_file:
+ print(f"!!! Config state backup not found: {config_state_file}")
+
+
+def validate_tls_options():
+ from modules.shared_cmd_options import cmd_opts
+
+ if not (cmd_opts.tls_keyfile and cmd_opts.tls_certfile):
+ return
+
+ try:
+ if not os.path.exists(cmd_opts.tls_keyfile):
+ print("Invalid path to TLS keyfile given")
+ if not os.path.exists(cmd_opts.tls_certfile):
+ print(f"Invalid path to TLS certfile: '{cmd_opts.tls_certfile}'")
+ except TypeError:
+ cmd_opts.tls_keyfile = cmd_opts.tls_certfile = None
+ print("TLS setup invalid, running webui without TLS")
+ else:
+ print("Running with TLS")
+ startup_timer.record("TLS")
+
+
+def get_gradio_auth_creds():
+ """
+ Convert the gradio_auth and gradio_auth_path commandline arguments into
+ an iterable of (username, password) tuples.
+ """
+ from modules.shared_cmd_options import cmd_opts
+
+ def process_credential_line(s):
+ s = s.strip()
+ if not s:
+ return None
+ return tuple(s.split(':', 1))
+
+ if cmd_opts.gradio_auth:
+ for cred in cmd_opts.gradio_auth.split(','):
+ cred = process_credential_line(cred)
+ if cred:
+ yield cred
+
+ if cmd_opts.gradio_auth_path:
+ with open(cmd_opts.gradio_auth_path, 'r', encoding="utf8") as file:
+ for line in file.readlines():
+ for cred in line.strip().split(','):
+ cred = process_credential_line(cred)
+ if cred:
+ yield cred
+
+
+def dumpstacks():
+ import threading
+ import traceback
+
+ id2name = {th.ident: th.name for th in threading.enumerate()}
+ code = []
+ for threadId, stack in sys._current_frames().items():
+ code.append(f"\n# Thread: {id2name.get(threadId, '')}({threadId})")
+ for filename, lineno, name, line in traceback.extract_stack(stack):
+ code.append(f"""File: "{filename}", line {lineno}, in {name}""")
+ if line:
+ code.append(" " + line.strip())
+
+ print("\n".join(code))
+
+
+def configure_sigint_handler():
+ # make the program just exit at ctrl+c without waiting for anything
+
+ from modules import shared
+
+ def sigint_handler(sig, frame):
+ print(f'Interrupted with signal {sig} in {frame}')
+
+ if shared.opts.dump_stacks_on_signal:
+ dumpstacks()
+
+ os._exit(0)
+
+ if not os.environ.get("COVERAGE_RUN"):
+ # Don't install the immediate-quit handler when running under coverage,
+ # as then the coverage report won't be generated.
+ signal.signal(signal.SIGINT, sigint_handler)
+
+
+def configure_opts_onchange():
+ from modules import shared, sd_models, sd_vae, ui_tempdir, sd_hijack
+ from modules.call_queue import wrap_queued_call
+
+ shared.opts.onchange("sd_model_checkpoint", wrap_queued_call(lambda: sd_models.reload_model_weights()), call=False)
+ shared.opts.onchange("sd_vae", wrap_queued_call(lambda: sd_vae.reload_vae_weights()), call=False)
+ shared.opts.onchange("sd_vae_overrides_per_model_preferences", wrap_queued_call(lambda: sd_vae.reload_vae_weights()), call=False)
+ shared.opts.onchange("temp_dir", ui_tempdir.on_tmpdir_changed)
+ shared.opts.onchange("gradio_theme", shared.reload_gradio_theme)
+ shared.opts.onchange("cross_attention_optimization", wrap_queued_call(lambda: sd_hijack.model_hijack.redo_hijack(shared.sd_model)), call=False)
+ shared.opts.onchange("fp8_storage", wrap_queued_call(lambda: sd_models.reload_model_weights()), call=False)
+ shared.opts.onchange("cache_fp16_weight", wrap_queued_call(lambda: sd_models.reload_model_weights(forced_reload=True)), call=False)
+ startup_timer.record("opts onchange")
+
+
+def setup_middleware(app):
+ from starlette.middleware.gzip import GZipMiddleware
+
+ app.middleware_stack = None # reset current middleware to allow modifying user provided list
+ app.add_middleware(GZipMiddleware, minimum_size=1000)
+ configure_cors_middleware(app)
+ app.build_middleware_stack() # rebuild middleware stack on-the-fly
+
+
+def configure_cors_middleware(app):
+ from starlette.middleware.cors import CORSMiddleware
+ from modules.shared_cmd_options import cmd_opts
+
+ cors_options = {
+ "allow_methods": ["*"],
+ "allow_headers": ["*"],
+ "allow_credentials": True,
+ }
+ if cmd_opts.cors_allow_origins:
+ cors_options["allow_origins"] = cmd_opts.cors_allow_origins.split(',')
+ if cmd_opts.cors_allow_origins_regex:
+ cors_options["allow_origin_regex"] = cmd_opts.cors_allow_origins_regex
+ app.add_middleware(CORSMiddleware, **cors_options)
+
diff --git a/modules/interrogate.py b/modules/interrogate.py
index a3ae1dd5..35a627ca 100644
--- a/modules/interrogate.py
+++ b/modules/interrogate.py
@@ -10,7 +10,7 @@ import torch.hub
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
-from modules import devices, paths, shared, lowvram, modelloader, errors
+from modules import devices, paths, shared, lowvram, modelloader, errors, torch_utils
blip_image_eval_size = 384
clip_model_name = 'ViT-L/14'
@@ -131,7 +131,7 @@ class InterrogateModels:
self.clip_model = self.clip_model.to(devices.device_interrogate)
- self.dtype = next(self.clip_model.parameters()).dtype
+ self.dtype = torch_utils.get_param(self.clip_model).dtype
def send_clip_to_ram(self):
if not shared.opts.interrogate_keep_models_in_memory:
@@ -186,9 +186,8 @@ class InterrogateModels:
res = ""
shared.state.begin(job="interrogate")
try:
- if shared.cmd_opts.lowvram or shared.cmd_opts.medvram:
- lowvram.send_everything_to_cpu()
- devices.torch_gc()
+ lowvram.send_everything_to_cpu()
+ devices.torch_gc()
self.load()
diff --git a/modules/launch_utils.py b/modules/launch_utils.py
index e1c9cfbe..c2cbd8ce 100644
--- a/modules/launch_utils.py
+++ b/modules/launch_utils.py
@@ -1,20 +1,23 @@
# this scripts installs necessary requirements and launches main program in webui.py
+import logging
import re
import subprocess
import os
+import shutil
import sys
import importlib.util
+import importlib.metadata
import platform
import json
from functools import lru_cache
from modules import cmd_args, errors
from modules.paths_internal import script_path, extensions_dir
-from modules import timer
-
-timer.startup_timer.record("start")
+from modules.timer import startup_timer
+from modules import logging_config
args, _ = cmd_args.parser.parse_known_args()
+logging_config.setup_logging(args.loglevel)
python = sys.executable
git = os.environ.get('GIT', "git")
@@ -62,7 +65,7 @@ Use --skip-python-version-check to suppress this warning.
@lru_cache()
def commit_hash():
try:
- return subprocess.check_output([git, "rev-parse", "HEAD"], shell=False, encoding='utf8').strip()
+ return subprocess.check_output([git, "-C", script_path, "rev-parse", "HEAD"], shell=False, encoding='utf8').strip()
except Exception:
return "<none>"
@@ -70,7 +73,7 @@ def commit_hash():
@lru_cache()
def git_tag():
try:
- return subprocess.check_output([git, "describe", "--tags"], shell=False, encoding='utf8').strip()
+ return subprocess.check_output([git, "-C", script_path, "describe", "--tags"], shell=False, encoding='utf8').strip()
except Exception:
try:
@@ -117,11 +120,16 @@ def run(command, desc=None, errdesc=None, custom_env=None, live: bool = default_
def is_installed(package):
try:
- spec = importlib.util.find_spec(package)
- except ModuleNotFoundError:
- return False
+ dist = importlib.metadata.distribution(package)
+ except importlib.metadata.PackageNotFoundError:
+ try:
+ spec = importlib.util.find_spec(package)
+ except ModuleNotFoundError:
+ return False
- return spec is not None
+ return spec is not None
+
+ return dist is not None
def repo_dir(name):
@@ -141,6 +149,25 @@ def check_run_python(code: str) -> bool:
return result.returncode == 0
+def git_fix_workspace(dir, name):
+ run(f'"{git}" -C "{dir}" fetch --refetch --no-auto-gc', f"Fetching all contents for {name}", f"Couldn't fetch {name}", live=True)
+ run(f'"{git}" -C "{dir}" gc --aggressive --prune=now', f"Pruning {name}", f"Couldn't prune {name}", live=True)
+ return
+
+
+def run_git(dir, name, command, desc=None, errdesc=None, custom_env=None, live: bool = default_command_live, autofix=True):
+ try:
+ return run(f'"{git}" -C "{dir}" {command}', desc=desc, errdesc=errdesc, custom_env=custom_env, live=live)
+ except RuntimeError:
+ if not autofix:
+ raise
+
+ print(f"{errdesc}, attempting autofix...")
+ git_fix_workspace(dir, name)
+
+ return run(f'"{git}" -C "{dir}" {command}', desc=desc, errdesc=errdesc, custom_env=custom_env, live=live)
+
+
def git_clone(url, dir, name, commithash=None):
# TODO clone into temporary dir and move if successful
@@ -148,15 +175,24 @@ def git_clone(url, dir, name, commithash=None):
if commithash is None:
return
- current_hash = run(f'"{git}" -C "{dir}" rev-parse HEAD', None, f"Couldn't determine {name}'s hash: {commithash}", live=False).strip()
+ current_hash = run_git(dir, name, 'rev-parse HEAD', None, f"Couldn't determine {name}'s hash: {commithash}", live=False).strip()
if current_hash == commithash:
return
- run(f'"{git}" -C "{dir}" fetch', f"Fetching updates for {name}...", f"Couldn't fetch {name}")
- run(f'"{git}" -C "{dir}" checkout {commithash}', f"Checking out commit for {name} with hash: {commithash}...", f"Couldn't checkout commit {commithash} for {name}", live=True)
+ if run_git(dir, name, 'config --get remote.origin.url', None, f"Couldn't determine {name}'s origin URL", live=False).strip() != url:
+ run_git(dir, name, f'remote set-url origin "{url}"', None, f"Failed to set {name}'s origin URL", live=False)
+
+ run_git(dir, name, 'fetch', f"Fetching updates for {name}...", f"Couldn't fetch {name}", autofix=False)
+
+ run_git(dir, name, f'checkout {commithash}', f"Checking out commit for {name} with hash: {commithash}...", f"Couldn't checkout commit {commithash} for {name}", live=True)
+
return
- run(f'"{git}" clone "{url}" "{dir}"', f"Cloning {name} into {dir}...", f"Couldn't clone {name}", live=True)
+ try:
+ run(f'"{git}" clone "{url}" "{dir}"', f"Cloning {name} into {dir}...", f"Couldn't clone {name}", live=True)
+ except RuntimeError:
+ shutil.rmtree(dir, ignore_errors=True)
+ raise
if commithash is not None:
run(f'"{git}" -C "{dir}" checkout {commithash}', None, "Couldn't checkout {name}'s hash: {commithash}")
@@ -198,7 +234,9 @@ def run_extension_installer(extension_dir):
env = os.environ.copy()
env['PYTHONPATH'] = f"{os.path.abspath('.')}{os.pathsep}{env.get('PYTHONPATH', '')}"
- print(run(f'"{python}" "{path_installer}"', errdesc=f"Error running install.py for extension {extension_dir}", custom_env=env))
+ stdout = run(f'"{python}" "{path_installer}"', errdesc=f"Error running install.py for extension {extension_dir}", custom_env=env).strip()
+ if stdout:
+ print(stdout)
except Exception as e:
errors.report(str(e))
@@ -216,7 +254,7 @@ def list_extensions(settings_file):
disabled_extensions = set(settings.get('disabled_extensions', []))
disable_all_extensions = settings.get('disable_all_extensions', 'none')
- if disable_all_extensions != 'none':
+ if disable_all_extensions != 'none' or args.disable_extra_extensions or args.disable_all_extensions or not os.path.isdir(extensions_dir):
return []
return [x for x in os.listdir(extensions_dir) if x not in disabled_extensions]
@@ -226,8 +264,15 @@ def run_extensions_installers(settings_file):
if not os.path.isdir(extensions_dir):
return
- for dirname_extension in list_extensions(settings_file):
- run_extension_installer(os.path.join(extensions_dir, dirname_extension))
+ with startup_timer.subcategory("run extensions installers"):
+ for dirname_extension in list_extensions(settings_file):
+ logging.debug(f"Installing {dirname_extension}")
+
+ path = os.path.join(extensions_dir, dirname_extension)
+
+ if os.path.isdir(path):
+ run_extension_installer(path)
+ startup_timer.record(dirname_extension)
re_requirement = re.compile(r"\s*([-_a-zA-Z0-9]+)\s*(?:==\s*([-+_.a-zA-Z0-9]+))?\s*")
@@ -269,29 +314,46 @@ def requirements_met(requirements_file):
def prepare_environment():
- torch_index_url = os.environ.get('TORCH_INDEX_URL', "https://download.pytorch.org/whl/cu118")
- torch_command = os.environ.get('TORCH_COMMAND', f"pip install torch==2.0.1 torchvision==0.15.2 --extra-index-url {torch_index_url}")
+ torch_index_url = os.environ.get('TORCH_INDEX_URL', "https://download.pytorch.org/whl/cu121")
+ torch_command = os.environ.get('TORCH_COMMAND', f"pip install torch==2.1.2 torchvision==0.16.2 --extra-index-url {torch_index_url}")
+ if args.use_ipex:
+ if platform.system() == "Windows":
+ # The "Nuullll/intel-extension-for-pytorch" wheels were built from IPEX source for Intel Arc GPU: https://github.com/intel/intel-extension-for-pytorch/tree/xpu-main
+ # This is NOT an Intel official release so please use it at your own risk!!
+ # See https://github.com/Nuullll/intel-extension-for-pytorch/releases/tag/v2.0.110%2Bxpu-master%2Bdll-bundle for details.
+ #
+ # Strengths (over official IPEX 2.0.110 windows release):
+ # - AOT build (for Arc GPU only) to eliminate JIT compilation overhead: https://github.com/intel/intel-extension-for-pytorch/issues/399
+ # - Bundles minimal oneAPI 2023.2 dependencies into the python wheels, so users don't need to install oneAPI for the whole system.
+ # - Provides a compatible torchvision wheel: https://github.com/intel/intel-extension-for-pytorch/issues/465
+ # Limitation:
+ # - Only works for python 3.10
+ url_prefix = "https://github.com/Nuullll/intel-extension-for-pytorch/releases/download/v2.0.110%2Bxpu-master%2Bdll-bundle"
+ torch_command = os.environ.get('TORCH_COMMAND', f"pip install {url_prefix}/torch-2.0.0a0+gite9ebda2-cp310-cp310-win_amd64.whl {url_prefix}/torchvision-0.15.2a0+fa99a53-cp310-cp310-win_amd64.whl {url_prefix}/intel_extension_for_pytorch-2.0.110+gitc6ea20b-cp310-cp310-win_amd64.whl")
+ else:
+ # Using official IPEX release for linux since it's already an AOT build.
+ # However, users still have to install oneAPI toolkit and activate oneAPI environment manually.
+ # See https://intel.github.io/intel-extension-for-pytorch/index.html#installation for details.
+ torch_index_url = os.environ.get('TORCH_INDEX_URL', "https://pytorch-extension.intel.com/release-whl/stable/xpu/us/")
+ torch_command = os.environ.get('TORCH_COMMAND', f"pip install torch==2.0.0a0 intel-extension-for-pytorch==2.0.110+gitba7f6c1 --extra-index-url {torch_index_url}")
requirements_file = os.environ.get('REQS_FILE', "requirements_versions.txt")
- xformers_package = os.environ.get('XFORMERS_PACKAGE', 'xformers==0.0.20')
- gfpgan_package = os.environ.get('GFPGAN_PACKAGE', "https://github.com/TencentARC/GFPGAN/archive/8d2447a2d918f8eba5a4a01463fd48e45126a379.zip")
+ xformers_package = os.environ.get('XFORMERS_PACKAGE', 'xformers==0.0.23.post1')
clip_package = os.environ.get('CLIP_PACKAGE', "https://github.com/openai/CLIP/archive/d50d76daa670286dd6cacf3bcd80b5e4823fc8e1.zip")
openclip_package = os.environ.get('OPENCLIP_PACKAGE', "https://github.com/mlfoundations/open_clip/archive/bb6e834e9c70d9c27d0dc3ecedeebeaeb1ffad6b.zip")
stable_diffusion_repo = os.environ.get('STABLE_DIFFUSION_REPO', "https://github.com/Stability-AI/stablediffusion.git")
stable_diffusion_xl_repo = os.environ.get('STABLE_DIFFUSION_XL_REPO', "https://github.com/Stability-AI/generative-models.git")
k_diffusion_repo = os.environ.get('K_DIFFUSION_REPO', 'https://github.com/crowsonkb/k-diffusion.git')
- codeformer_repo = os.environ.get('CODEFORMER_REPO', 'https://github.com/sczhou/CodeFormer.git')
blip_repo = os.environ.get('BLIP_REPO', 'https://github.com/salesforce/BLIP.git')
stable_diffusion_commit_hash = os.environ.get('STABLE_DIFFUSION_COMMIT_HASH', "cf1d67a6fd5ea1aa600c4df58e5b47da45f6bdbf")
- stable_diffusion_xl_commit_hash = os.environ.get('STABLE_DIFFUSION_XL_COMMIT_HASH', "5c10deee76adad0032b412294130090932317a87")
- k_diffusion_commit_hash = os.environ.get('K_DIFFUSION_COMMIT_HASH', "c9fe758757e022f05ca5a53fa8fac28889e4f1cf")
- codeformer_commit_hash = os.environ.get('CODEFORMER_COMMIT_HASH', "c5b4593074ba6214284d6acd5f1719b6c5d739af")
+ stable_diffusion_xl_commit_hash = os.environ.get('STABLE_DIFFUSION_XL_COMMIT_HASH', "45c443b316737a4ab6e40413d7794a7f5657c19f")
+ k_diffusion_commit_hash = os.environ.get('K_DIFFUSION_COMMIT_HASH', "ab527a9a6d347f364e3d185ba6d714e22d80cb3c")
blip_commit_hash = os.environ.get('BLIP_COMMIT_HASH', "48211a1594f1321b00f14c9f7a5b4813144b2fb9")
try:
- # the existance of this file is a signal to webui.sh/bat that webui needs to be restarted when it stops execution
+ # the existence of this file is a signal to webui.sh/bat that webui needs to be restarted when it stops execution
os.remove(os.path.join(script_path, "tmp", "restart"))
os.environ.setdefault('SD_WEBUI_RESTARTING', '1')
except OSError:
@@ -300,8 +362,11 @@ def prepare_environment():
if not args.skip_python_version_check:
check_python_version()
+ startup_timer.record("checks")
+
commit = commit_hash()
tag = git_tag()
+ startup_timer.record("git version info")
print(f"Python {sys.version}")
print(f"Version: {tag}")
@@ -309,61 +374,59 @@ def prepare_environment():
if args.reinstall_torch or not is_installed("torch") or not is_installed("torchvision"):
run(f'"{python}" -m {torch_command}', "Installing torch and torchvision", "Couldn't install torch", live=True)
+ startup_timer.record("install torch")
+ if args.use_ipex:
+ args.skip_torch_cuda_test = True
if not args.skip_torch_cuda_test and not check_run_python("import torch; assert torch.cuda.is_available()"):
raise RuntimeError(
'Torch is not able to use GPU; '
'add --skip-torch-cuda-test to COMMANDLINE_ARGS variable to disable this check'
)
-
- if not is_installed("gfpgan"):
- run_pip(f"install {gfpgan_package}", "gfpgan")
+ startup_timer.record("torch GPU test")
if not is_installed("clip"):
run_pip(f"install {clip_package}", "clip")
+ startup_timer.record("install clip")
if not is_installed("open_clip"):
run_pip(f"install {openclip_package}", "open_clip")
+ startup_timer.record("install open_clip")
if (not is_installed("xformers") or args.reinstall_xformers) and args.xformers:
- if platform.system() == "Windows":
- if platform.python_version().startswith("3.10"):
- run_pip(f"install -U -I --no-deps {xformers_package}", "xformers", live=True)
- else:
- print("Installation of xformers is not supported in this version of Python.")
- print("You can also check this and build manually: https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Xformers#building-xformers-on-windows-by-duckness")
- if not is_installed("xformers"):
- exit(0)
- elif platform.system() == "Linux":
- run_pip(f"install -U -I --no-deps {xformers_package}", "xformers")
+ run_pip(f"install -U -I --no-deps {xformers_package}", "xformers")
+ startup_timer.record("install xformers")
if not is_installed("ngrok") and args.ngrok:
run_pip("install ngrok", "ngrok")
+ startup_timer.record("install ngrok")
os.makedirs(os.path.join(script_path, dir_repos), exist_ok=True)
git_clone(stable_diffusion_repo, repo_dir('stable-diffusion-stability-ai'), "Stable Diffusion", stable_diffusion_commit_hash)
git_clone(stable_diffusion_xl_repo, repo_dir('generative-models'), "Stable Diffusion XL", stable_diffusion_xl_commit_hash)
git_clone(k_diffusion_repo, repo_dir('k-diffusion'), "K-diffusion", k_diffusion_commit_hash)
- git_clone(codeformer_repo, repo_dir('CodeFormer'), "CodeFormer", codeformer_commit_hash)
git_clone(blip_repo, repo_dir('BLIP'), "BLIP", blip_commit_hash)
- if not is_installed("lpips"):
- run_pip(f"install -r \"{os.path.join(repo_dir('CodeFormer'), 'requirements.txt')}\"", "requirements for CodeFormer")
+ startup_timer.record("clone repositores")
if not os.path.isfile(requirements_file):
requirements_file = os.path.join(script_path, requirements_file)
if not requirements_met(requirements_file):
run_pip(f"install -r \"{requirements_file}\"", "requirements")
+ startup_timer.record("install requirements")
- run_extensions_installers(settings_file=args.ui_settings_file)
+ if not args.skip_install:
+ run_extensions_installers(settings_file=args.ui_settings_file)
if args.update_check:
version_check(commit)
+ startup_timer.record("check version")
if args.update_all_extensions:
git_pull_recursive(extensions_dir)
+ startup_timer.record("update extensions")
if "--exit" in sys.argv:
print("Exiting because of --exit argument")
@@ -392,3 +455,16 @@ def start():
webui.api_only()
else:
webui.webui()
+
+
+def dump_sysinfo():
+ from modules import sysinfo
+ import datetime
+
+ text = sysinfo.get()
+ filename = f"sysinfo-{datetime.datetime.utcnow().strftime('%Y-%m-%d-%H-%M')}.json"
+
+ with open(filename, "w", encoding="utf8") as file:
+ file.write(text)
+
+ return filename
diff --git a/modules/localization.py b/modules/localization.py
index e8f585da..108f792e 100644
--- a/modules/localization.py
+++ b/modules/localization.py
@@ -1,7 +1,7 @@
import json
import os
-from modules import errors
+from modules import errors, scripts
localizations = {}
@@ -14,22 +14,24 @@ def list_localizations(dirname):
if ext.lower() != ".json":
continue
- localizations[fn] = os.path.join(dirname, file)
+ localizations[fn] = [os.path.join(dirname, file)]
- from modules import scripts
for file in scripts.list_scripts("localizations", ".json"):
fn, ext = os.path.splitext(file.filename)
- localizations[fn] = file.path
+ if fn not in localizations:
+ localizations[fn] = []
+ localizations[fn].append(file.path)
def localization_js(current_localization_name: str) -> str:
- fn = localizations.get(current_localization_name, None)
+ fns = localizations.get(current_localization_name, None)
data = {}
- if fn is not None:
- try:
- with open(fn, "r", encoding="utf8") as file:
- data = json.load(file)
- except Exception:
- errors.report(f"Error loading localization from {fn}", exc_info=True)
+ if fns is not None:
+ for fn in fns:
+ try:
+ with open(fn, "r", encoding="utf8") as file:
+ data.update(json.load(file))
+ except Exception:
+ errors.report(f"Error loading localization from {fn}", exc_info=True)
return f"window.localization = {json.dumps(data)}"
diff --git a/modules/logging_config.py b/modules/logging_config.py
new file mode 100644
index 00000000..79269875
--- /dev/null
+++ b/modules/logging_config.py
@@ -0,0 +1,41 @@
+import os
+import logging
+
+try:
+ from tqdm.auto import tqdm
+
+ class TqdmLoggingHandler(logging.Handler):
+ def __init__(self, level=logging.INFO):
+ super().__init__(level)
+
+ def emit(self, record):
+ try:
+ msg = self.format(record)
+ tqdm.write(msg)
+ self.flush()
+ except Exception:
+ self.handleError(record)
+
+ TQDM_IMPORTED = True
+except ImportError:
+ # tqdm does not exist before first launch
+ # I will import once the UI finishes seting up the enviroment and reloads.
+ TQDM_IMPORTED = False
+
+def setup_logging(loglevel):
+ if loglevel is None:
+ loglevel = os.environ.get("SD_WEBUI_LOG_LEVEL")
+
+ loghandlers = []
+
+ if TQDM_IMPORTED:
+ loghandlers.append(TqdmLoggingHandler())
+
+ if loglevel:
+ log_level = getattr(logging, loglevel.upper(), None) or logging.INFO
+ logging.basicConfig(
+ level=log_level,
+ format='%(asctime)s %(levelname)s [%(name)s] %(message)s',
+ datefmt='%Y-%m-%d %H:%M:%S',
+ handlers=loghandlers
+ )
diff --git a/modules/lowvram.py b/modules/lowvram.py
index 3f830664..45701046 100644
--- a/modules/lowvram.py
+++ b/modules/lowvram.py
@@ -1,5 +1,5 @@
import torch
-from modules import devices
+from modules import devices, shared
module_in_gpu = None
cpu = torch.device("cpu")
@@ -14,7 +14,24 @@ def send_everything_to_cpu():
module_in_gpu = None
+def is_needed(sd_model):
+ return shared.cmd_opts.lowvram or shared.cmd_opts.medvram or shared.cmd_opts.medvram_sdxl and hasattr(sd_model, 'conditioner')
+
+
+def apply(sd_model):
+ enable = is_needed(sd_model)
+ shared.parallel_processing_allowed = not enable
+
+ if enable:
+ setup_for_low_vram(sd_model, not shared.cmd_opts.lowvram)
+ else:
+ sd_model.lowvram = False
+
+
def setup_for_low_vram(sd_model, use_medvram):
+ if getattr(sd_model, 'lowvram', False):
+ return
+
sd_model.lowvram = True
parents = {}
@@ -127,4 +144,4 @@ def setup_for_low_vram(sd_model, use_medvram):
def is_enabled(sd_model):
- return getattr(sd_model, 'lowvram', False)
+ return sd_model.lowvram
diff --git a/modules/mac_specific.py b/modules/mac_specific.py
index 9ceb43ba..d96d86d7 100644
--- a/modules/mac_specific.py
+++ b/modules/mac_specific.py
@@ -1,9 +1,11 @@
import logging
import torch
+from torch import Tensor
import platform
from modules.sd_hijack_utils import CondFunc
from packaging import version
+from modules import shared
log = logging.getLogger(__name__)
@@ -30,8 +32,7 @@ has_mps = check_for_mps()
def torch_mps_gc() -> None:
try:
- from modules.shared import state
- if state.current_latent is not None:
+ if shared.state.current_latent is not None:
log.debug("`current_latent` is set, skipping MPS garbage collection")
return
from torch.mps import empty_cache
@@ -51,10 +52,18 @@ def cumsum_fix(input, cumsum_func, *args, **kwargs):
return cumsum_func(input, *args, **kwargs)
-if has_mps:
- # MPS fix for randn in torchsde
- CondFunc('torchsde._brownian.brownian_interval._randn', lambda _, size, dtype, device, seed: torch.randn(size, dtype=dtype, device=torch.device("cpu"), generator=torch.Generator(torch.device("cpu")).manual_seed(int(seed))).to(device), lambda _, size, dtype, device, seed: device.type == 'mps')
+# MPS workaround for https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14046
+def interpolate_with_fp32_fallback(orig_func, *args, **kwargs) -> Tensor:
+ try:
+ return orig_func(*args, **kwargs)
+ except RuntimeError as e:
+ if "not implemented for" in str(e) and "Half" in str(e):
+ input_tensor = args[0]
+ return orig_func(input_tensor.to(torch.float32), *args[1:], **kwargs).to(input_tensor.dtype)
+ else:
+ print(f"An unexpected RuntimeError occurred: {str(e)}")
+if has_mps:
if platform.mac_ver()[0].startswith("13.2."):
# MPS workaround for https://github.com/pytorch/pytorch/issues/95188, thanks to danieldk (https://github.com/explosion/curated-transformers/pull/124)
CondFunc('torch.nn.functional.linear', lambda _, input, weight, bias: (torch.matmul(input, weight.t()) + bias) if bias is not None else torch.matmul(input, weight.t()), lambda _, input, weight, bias: input.numel() > 10485760)
@@ -80,6 +89,9 @@ if has_mps:
# MPS workaround for https://github.com/pytorch/pytorch/issues/96113
CondFunc('torch.nn.functional.layer_norm', lambda orig_func, x, normalized_shape, weight, bias, eps, **kwargs: orig_func(x.float(), normalized_shape, weight.float() if weight is not None else None, bias.float() if bias is not None else bias, eps).to(x.dtype), lambda _, input, *args, **kwargs: len(args) == 4 and input.device.type == 'mps')
+ # MPS workaround for https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14046
+ CondFunc('torch.nn.functional.interpolate', interpolate_with_fp32_fallback, None)
+
# MPS workaround for https://github.com/pytorch/pytorch/issues/92311
if platform.processor() == 'i386':
for funcName in ['torch.argmax', 'torch.Tensor.argmax']:
diff --git a/modules/modelloader.py b/modules/modelloader.py
index 098bcb79..a7194137 100644
--- a/modules/modelloader.py
+++ b/modules/modelloader.py
@@ -1,13 +1,20 @@
from __future__ import annotations
-import os
-import shutil
import importlib
+import logging
+import os
+from typing import TYPE_CHECKING
from urllib.parse import urlparse
+import torch
+
from modules import shared
from modules.upscaler import Upscaler, UpscalerLanczos, UpscalerNearest, UpscalerNone
-from modules.paths import script_path, models_path
+
+if TYPE_CHECKING:
+ import spandrel
+
+logger = logging.getLogger(__name__)
def load_file_from_url(
@@ -90,54 +97,6 @@ def friendly_name(file: str):
return model_name
-def cleanup_models():
- # This code could probably be more efficient if we used a tuple list or something to store the src/destinations
- # and then enumerate that, but this works for now. In the future, it'd be nice to just have every "model" scaler
- # somehow auto-register and just do these things...
- root_path = script_path
- src_path = models_path
- dest_path = os.path.join(models_path, "Stable-diffusion")
- move_files(src_path, dest_path, ".ckpt")
- move_files(src_path, dest_path, ".safetensors")
- src_path = os.path.join(root_path, "ESRGAN")
- dest_path = os.path.join(models_path, "ESRGAN")
- move_files(src_path, dest_path)
- src_path = os.path.join(models_path, "BSRGAN")
- dest_path = os.path.join(models_path, "ESRGAN")
- move_files(src_path, dest_path, ".pth")
- src_path = os.path.join(root_path, "gfpgan")
- dest_path = os.path.join(models_path, "GFPGAN")
- move_files(src_path, dest_path)
- src_path = os.path.join(root_path, "SwinIR")
- dest_path = os.path.join(models_path, "SwinIR")
- move_files(src_path, dest_path)
- src_path = os.path.join(root_path, "repositories/latent-diffusion/experiments/pretrained_models/")
- dest_path = os.path.join(models_path, "LDSR")
- move_files(src_path, dest_path)
-
-
-def move_files(src_path: str, dest_path: str, ext_filter: str = None):
- try:
- os.makedirs(dest_path, exist_ok=True)
- if os.path.exists(src_path):
- for file in os.listdir(src_path):
- fullpath = os.path.join(src_path, file)
- if os.path.isfile(fullpath):
- if ext_filter is not None:
- if ext_filter not in file:
- continue
- print(f"Moving {file} from {src_path} to {dest_path}.")
- try:
- shutil.move(fullpath, dest_path)
- except Exception:
- pass
- if len(os.listdir(src_path)) == 0:
- print(f"Removing empty folder: {src_path}")
- shutil.rmtree(src_path, True)
- except Exception:
- pass
-
-
def load_upscalers():
# We can only do this 'magic' method to dynamically load upscalers if they are referenced,
# so we'll try to import any _model.py files before looking in __subclasses__
@@ -177,3 +136,26 @@ def load_upscalers():
# Special case for UpscalerNone keeps it at the beginning of the list.
key=lambda x: x.name.lower() if not isinstance(x.scaler, (UpscalerNone, UpscalerLanczos, UpscalerNearest)) else ""
)
+
+
+def load_spandrel_model(
+ path: str,
+ *,
+ device: str | torch.device | None,
+ half: bool = False,
+ dtype: str | torch.dtype | None = None,
+ expected_architecture: str | None = None,
+) -> spandrel.ModelDescriptor:
+ import spandrel
+ model_descriptor = spandrel.ModelLoader(device=device).load_from_file(path)
+ if expected_architecture and model_descriptor.architecture != expected_architecture:
+ logger.warning(
+ f"Model {path!r} is not a {expected_architecture!r} model (got {model_descriptor.architecture!r})",
+ )
+ if half:
+ model_descriptor.model.half()
+ if dtype:
+ model_descriptor.model.to(dtype=dtype)
+ model_descriptor.model.eval()
+ logger.debug("Loaded %s from %s (device=%s, half=%s, dtype=%s)", model_descriptor, path, device, half, dtype)
+ return model_descriptor
diff --git a/modules/models/diffusion/ddpm_edit.py b/modules/models/diffusion/ddpm_edit.py
index b892d5fc..6db340da 100644
--- a/modules/models/diffusion/ddpm_edit.py
+++ b/modules/models/diffusion/ddpm_edit.py
@@ -24,10 +24,15 @@ from pytorch_lightning.utilities.distributed import rank_zero_only
from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config
from ldm.modules.ema import LitEma
from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution
-from ldm.models.autoencoder import VQModelInterface, IdentityFirstStage, AutoencoderKL
+from ldm.models.autoencoder import IdentityFirstStage, AutoencoderKL
from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like
from ldm.models.diffusion.ddim import DDIMSampler
+try:
+ from ldm.models.autoencoder import VQModelInterface
+except Exception:
+ class VQModelInterface:
+ pass
__conditioning_keys__ = {'concat': 'c_concat',
'crossattn': 'c_crossattn',
diff --git a/modules/options.py b/modules/options.py
new file mode 100644
index 00000000..4fead690
--- /dev/null
+++ b/modules/options.py
@@ -0,0 +1,308 @@
+import json
+import sys
+from dataclasses import dataclass
+
+import gradio as gr
+
+from modules import errors
+from modules.shared_cmd_options import cmd_opts
+
+
+class OptionInfo:
+ def __init__(self, default=None, label="", component=None, component_args=None, onchange=None, section=None, refresh=None, comment_before='', comment_after='', infotext=None, restrict_api=False, category_id=None):
+ self.default = default
+ self.label = label
+ self.component = component
+ self.component_args = component_args
+ self.onchange = onchange
+ self.section = section
+ self.category_id = category_id
+ self.refresh = refresh
+ self.do_not_save = False
+
+ self.comment_before = comment_before
+ """HTML text that will be added after label in UI"""
+
+ self.comment_after = comment_after
+ """HTML text that will be added before label in UI"""
+
+ self.infotext = infotext
+
+ self.restrict_api = restrict_api
+ """If True, the setting will not be accessible via API"""
+
+ def link(self, label, url):
+ self.comment_before += f"[<a href='{url}' target='_blank'>{label}</a>]"
+ return self
+
+ def js(self, label, js_func):
+ self.comment_before += f"[<a onclick='{js_func}(); return false'>{label}</a>]"
+ return self
+
+ def info(self, info):
+ self.comment_after += f"<span class='info'>({info})</span>"
+ return self
+
+ def html(self, html):
+ self.comment_after += html
+ return self
+
+ def needs_restart(self):
+ self.comment_after += " <span class='info'>(requires restart)</span>"
+ return self
+
+ def needs_reload_ui(self):
+ self.comment_after += " <span class='info'>(requires Reload UI)</span>"
+ return self
+
+
+class OptionHTML(OptionInfo):
+ def __init__(self, text):
+ super().__init__(str(text).strip(), label='', component=lambda **kwargs: gr.HTML(elem_classes="settings-info", **kwargs))
+
+ self.do_not_save = True
+
+
+def options_section(section_identifier, options_dict):
+ for v in options_dict.values():
+ if len(section_identifier) == 2:
+ v.section = section_identifier
+ elif len(section_identifier) == 3:
+ v.section = section_identifier[0:2]
+ v.category_id = section_identifier[2]
+
+ return options_dict
+
+
+options_builtin_fields = {"data_labels", "data", "restricted_opts", "typemap"}
+
+
+class Options:
+ typemap = {int: float}
+
+ def __init__(self, data_labels: dict[str, OptionInfo], restricted_opts):
+ self.data_labels = data_labels
+ self.data = {k: v.default for k, v in self.data_labels.items() if not v.do_not_save}
+ self.restricted_opts = restricted_opts
+
+ def __setattr__(self, key, value):
+ if key in options_builtin_fields:
+ return super(Options, self).__setattr__(key, value)
+
+ if self.data is not None:
+ if key in self.data or key in self.data_labels:
+ assert not cmd_opts.freeze_settings, "changing settings is disabled"
+
+ info = self.data_labels.get(key, None)
+ if info.do_not_save:
+ return
+
+ comp_args = info.component_args if info else None
+ if isinstance(comp_args, dict) and comp_args.get('visible', True) is False:
+ raise RuntimeError(f"not possible to set {key} because it is restricted")
+
+ if cmd_opts.hide_ui_dir_config and key in self.restricted_opts:
+ raise RuntimeError(f"not possible to set {key} because it is restricted")
+
+ self.data[key] = value
+ return
+
+ return super(Options, self).__setattr__(key, value)
+
+ def __getattr__(self, item):
+ if item in options_builtin_fields:
+ return super(Options, self).__getattribute__(item)
+
+ if self.data is not None:
+ if item in self.data:
+ return self.data[item]
+
+ if item in self.data_labels:
+ return self.data_labels[item].default
+
+ return super(Options, self).__getattribute__(item)
+
+ def set(self, key, value, is_api=False, run_callbacks=True):
+ """sets an option and calls its onchange callback, returning True if the option changed and False otherwise"""
+
+ oldval = self.data.get(key, None)
+ if oldval == value:
+ return False
+
+ option = self.data_labels[key]
+ if option.do_not_save:
+ return False
+
+ if is_api and option.restrict_api:
+ return False
+
+ try:
+ setattr(self, key, value)
+ except RuntimeError:
+ return False
+
+ if run_callbacks and option.onchange is not None:
+ try:
+ option.onchange()
+ except Exception as e:
+ errors.display(e, f"changing setting {key} to {value}")
+ setattr(self, key, oldval)
+ return False
+
+ return True
+
+ def get_default(self, key):
+ """returns the default value for the key"""
+
+ data_label = self.data_labels.get(key)
+ if data_label is None:
+ return None
+
+ return data_label.default
+
+ def save(self, filename):
+ assert not cmd_opts.freeze_settings, "saving settings is disabled"
+
+ with open(filename, "w", encoding="utf8") as file:
+ json.dump(self.data, file, indent=4, ensure_ascii=False)
+
+ def same_type(self, x, y):
+ if x is None or y is None:
+ return True
+
+ type_x = self.typemap.get(type(x), type(x))
+ type_y = self.typemap.get(type(y), type(y))
+
+ return type_x == type_y
+
+ def load(self, filename):
+ with open(filename, "r", encoding="utf8") as file:
+ self.data = json.load(file)
+
+ # 1.6.0 VAE defaults
+ if self.data.get('sd_vae_as_default') is not None and self.data.get('sd_vae_overrides_per_model_preferences') is None:
+ self.data['sd_vae_overrides_per_model_preferences'] = not self.data.get('sd_vae_as_default')
+
+ # 1.1.1 quicksettings list migration
+ if self.data.get('quicksettings') is not None and self.data.get('quicksettings_list') is None:
+ self.data['quicksettings_list'] = [i.strip() for i in self.data.get('quicksettings').split(',')]
+
+ # 1.4.0 ui_reorder
+ if isinstance(self.data.get('ui_reorder'), str) and self.data.get('ui_reorder') and "ui_reorder_list" not in self.data:
+ self.data['ui_reorder_list'] = [i.strip() for i in self.data.get('ui_reorder').split(',')]
+
+ bad_settings = 0
+ for k, v in self.data.items():
+ info = self.data_labels.get(k, None)
+ if info is not None and not self.same_type(info.default, v):
+ print(f"Warning: bad setting value: {k}: {v} ({type(v).__name__}; expected {type(info.default).__name__})", file=sys.stderr)
+ bad_settings += 1
+
+ if bad_settings > 0:
+ print(f"The program is likely to not work with bad settings.\nSettings file: {filename}\nEither fix the file, or delete it and restart.", file=sys.stderr)
+
+ def onchange(self, key, func, call=True):
+ item = self.data_labels.get(key)
+ item.onchange = func
+
+ if call:
+ func()
+
+ def dumpjson(self):
+ d = {k: self.data.get(k, v.default) for k, v in self.data_labels.items()}
+ d["_comments_before"] = {k: v.comment_before for k, v in self.data_labels.items() if v.comment_before is not None}
+ d["_comments_after"] = {k: v.comment_after for k, v in self.data_labels.items() if v.comment_after is not None}
+
+ item_categories = {}
+ for item in self.data_labels.values():
+ category = categories.mapping.get(item.category_id)
+ category = "Uncategorized" if category is None else category.label
+ if category not in item_categories:
+ item_categories[category] = item.section[1]
+
+ # _categories is a list of pairs: [section, category]. Each section (a setting page) will get a special heading above it with the category as text.
+ d["_categories"] = [[v, k] for k, v in item_categories.items()] + [["Defaults", "Other"]]
+
+ return json.dumps(d)
+
+ def add_option(self, key, info):
+ self.data_labels[key] = info
+ if key not in self.data and not info.do_not_save:
+ self.data[key] = info.default
+
+ def reorder(self):
+ """Reorder settings so that:
+ - all items related to section always go together
+ - all sections belonging to a category go together
+ - sections inside a category are ordered alphabetically
+ - categories are ordered by creation order
+
+ Category is a superset of sections: for category "postprocessing" there could be multiple sections: "face restoration", "upscaling".
+
+ This function also changes items' category_id so that all items belonging to a section have the same category_id.
+ """
+
+ category_ids = {}
+ section_categories = {}
+
+ settings_items = self.data_labels.items()
+ for _, item in settings_items:
+ if item.section not in section_categories:
+ section_categories[item.section] = item.category_id
+
+ for _, item in settings_items:
+ item.category_id = section_categories.get(item.section)
+
+ for category_id in categories.mapping:
+ if category_id not in category_ids:
+ category_ids[category_id] = len(category_ids)
+
+ def sort_key(x):
+ item: OptionInfo = x[1]
+ category_order = category_ids.get(item.category_id, len(category_ids))
+ section_order = item.section[1]
+
+ return category_order, section_order
+
+ self.data_labels = dict(sorted(settings_items, key=sort_key))
+
+ def cast_value(self, key, value):
+ """casts an arbitrary to the same type as this setting's value with key
+ Example: cast_value("eta_noise_seed_delta", "12") -> returns 12 (an int rather than str)
+ """
+
+ if value is None:
+ return None
+
+ default_value = self.data_labels[key].default
+ if default_value is None:
+ default_value = getattr(self, key, None)
+ if default_value is None:
+ return None
+
+ expected_type = type(default_value)
+ if expected_type == bool and value == "False":
+ value = False
+ else:
+ value = expected_type(value)
+
+ return value
+
+
+@dataclass
+class OptionsCategory:
+ id: str
+ label: str
+
+class OptionsCategories:
+ def __init__(self):
+ self.mapping = {}
+
+ def register_category(self, category_id, label):
+ if category_id in self.mapping:
+ return category_id
+
+ self.mapping[category_id] = OptionsCategory(category_id, label)
+
+
+categories = OptionsCategories()
diff --git a/modules/patches.py b/modules/patches.py
new file mode 100644
index 00000000..348235e7
--- /dev/null
+++ b/modules/patches.py
@@ -0,0 +1,64 @@
+from collections import defaultdict
+
+
+def patch(key, obj, field, replacement):
+ """Replaces a function in a module or a class.
+
+ Also stores the original function in this module, possible to be retrieved via original(key, obj, field).
+ If the function is already replaced by this caller (key), an exception is raised -- use undo() before that.
+
+ Arguments:
+ key: identifying information for who is doing the replacement. You can use __name__.
+ obj: the module or the class
+ field: name of the function as a string
+ replacement: the new function
+
+ Returns:
+ the original function
+ """
+
+ patch_key = (obj, field)
+ if patch_key in originals[key]:
+ raise RuntimeError(f"patch for {field} is already applied")
+
+ original_func = getattr(obj, field)
+ originals[key][patch_key] = original_func
+
+ setattr(obj, field, replacement)
+
+ return original_func
+
+
+def undo(key, obj, field):
+ """Undoes the peplacement by the patch().
+
+ If the function is not replaced, raises an exception.
+
+ Arguments:
+ key: identifying information for who is doing the replacement. You can use __name__.
+ obj: the module or the class
+ field: name of the function as a string
+
+ Returns:
+ Always None
+ """
+
+ patch_key = (obj, field)
+
+ if patch_key not in originals[key]:
+ raise RuntimeError(f"there is no patch for {field} to undo")
+
+ original_func = originals[key].pop(patch_key)
+ setattr(obj, field, original_func)
+
+ return None
+
+
+def original(key, obj, field):
+ """Returns the original function for the patch created by the patch() function"""
+ patch_key = (obj, field)
+
+ return originals[key].get(patch_key, None)
+
+
+originals = defaultdict(dict)
diff --git a/modules/paths.py b/modules/paths.py
index 25052339..03064651 100644
--- a/modules/paths.py
+++ b/modules/paths.py
@@ -1,6 +1,6 @@
import os
import sys
-from modules.paths_internal import models_path, script_path, data_path, extensions_dir, extensions_builtin_dir # noqa: F401
+from modules.paths_internal import models_path, script_path, data_path, extensions_dir, extensions_builtin_dir, cwd # noqa: F401
import modules.safe # noqa: F401
@@ -38,7 +38,6 @@ mute_sdxl_imports()
path_dirs = [
(sd_path, 'ldm', 'Stable Diffusion', []),
(os.path.join(sd_path, '../generative-models'), 'sgm', 'Stable Diffusion XL', ["sgm"]),
- (os.path.join(sd_path, '../CodeFormer'), 'inference_codeformer.py', 'CodeFormer', []),
(os.path.join(sd_path, '../BLIP'), 'models/blip.py', 'BLIP', []),
(os.path.join(sd_path, '../k-diffusion'), 'k_diffusion/sampling.py', 'k_diffusion', ["atstart"]),
]
diff --git a/modules/paths_internal.py b/modules/paths_internal.py
index 005a9b0a..b86ecd7f 100644
--- a/modules/paths_internal.py
+++ b/modules/paths_internal.py
@@ -8,6 +8,7 @@ import shlex
commandline_args = os.environ.get('COMMANDLINE_ARGS', "")
sys.argv += shlex.split(commandline_args)
+cwd = os.getcwd()
modules_path = os.path.dirname(os.path.realpath(__file__))
script_path = os.path.dirname(modules_path)
@@ -27,5 +28,6 @@ models_path = os.path.join(data_path, "models")
extensions_dir = os.path.join(data_path, "extensions")
extensions_builtin_dir = os.path.join(script_path, "extensions-builtin")
config_states_dir = os.path.join(script_path, "config_states")
+default_output_dir = os.path.join(data_path, "output")
roboto_ttf_file = os.path.join(modules_path, 'Roboto-Regular.ttf')
diff --git a/modules/postprocessing.py b/modules/postprocessing.py
index 136e9c88..facea899 100644
--- a/modules/postprocessing.py
+++ b/modules/postprocessing.py
@@ -2,7 +2,7 @@ import os
from PIL import Image
-from modules import shared, images, devices, scripts, scripts_postprocessing, ui_common, generation_parameters_copypaste
+from modules import shared, images, devices, scripts, scripts_postprocessing, ui_common
from modules.shared import opts
@@ -11,37 +11,28 @@ def run_postprocessing(extras_mode, image, image_folder, input_dir, output_dir,
shared.state.begin(job="extras")
- image_data = []
- image_names = []
outputs = []
- if extras_mode == 1:
- for img in image_folder:
- if isinstance(img, Image.Image):
- image = img
- fn = ''
- else:
- image = Image.open(os.path.abspath(img.name))
- fn = os.path.splitext(img.orig_name)[0]
- image_data.append(image)
- image_names.append(fn)
- elif extras_mode == 2:
- assert not shared.cmd_opts.hide_ui_dir_config, '--hide-ui-dir-config option must be disabled'
- assert input_dir, 'input directory not selected'
-
- image_list = shared.listfiles(input_dir)
- for filename in image_list:
- try:
- image = Image.open(filename)
- except Exception:
- continue
- image_data.append(image)
- image_names.append(filename)
- else:
- assert image, 'image not selected'
-
- image_data.append(image)
- image_names.append(None)
+ def get_images(extras_mode, image, image_folder, input_dir):
+ if extras_mode == 1:
+ for img in image_folder:
+ if isinstance(img, Image.Image):
+ image = img
+ fn = ''
+ else:
+ image = Image.open(os.path.abspath(img.name))
+ fn = os.path.splitext(img.orig_name)[0]
+ yield image, fn
+ elif extras_mode == 2:
+ assert not shared.cmd_opts.hide_ui_dir_config, '--hide-ui-dir-config option must be disabled'
+ assert input_dir, 'input directory not selected'
+
+ image_list = shared.listfiles(input_dir)
+ for filename in image_list:
+ yield filename, filename
+ else:
+ assert image, 'image not selected'
+ yield image, None
if extras_mode == 2 and output_dir != '':
outpath = output_dir
@@ -50,39 +41,97 @@ def run_postprocessing(extras_mode, image, image_folder, input_dir, output_dir,
infotext = ''
- for image, name in zip(image_data, image_names):
- shared.state.textinfo = name
+ data_to_process = list(get_images(extras_mode, image, image_folder, input_dir))
+ shared.state.job_count = len(data_to_process)
- parameters, existing_pnginfo = images.read_info_from_image(image)
- if parameters:
- existing_pnginfo["parameters"] = parameters
+ for image_placeholder, name in data_to_process:
+ image_data: Image.Image
- pp = scripts_postprocessing.PostprocessedImage(image.convert("RGB"))
+ shared.state.nextjob()
+ shared.state.textinfo = name
+ shared.state.skipped = False
- scripts.scripts_postproc.run(pp, args)
+ if shared.state.interrupted:
+ break
- if opts.use_original_name_batch and name is not None:
- basename = os.path.splitext(os.path.basename(name))[0]
+ if isinstance(image_placeholder, str):
+ try:
+ image_data = Image.open(image_placeholder)
+ except Exception:
+ continue
else:
- basename = ''
+ image_data = image_placeholder
+
+ shared.state.assign_current_image(image_data)
- infotext = ", ".join([k if k == v else f'{k}: {generation_parameters_copypaste.quote(v)}' for k, v in pp.info.items() if v is not None])
+ parameters, existing_pnginfo = images.read_info_from_image(image_data)
+ if parameters:
+ existing_pnginfo["parameters"] = parameters
- if opts.enable_pnginfo:
- pp.image.info = existing_pnginfo
- pp.image.info["postprocessing"] = infotext
+ initial_pp = scripts_postprocessing.PostprocessedImage(image_data.convert("RGB"))
- if save_output:
- images.save_image(pp.image, path=outpath, basename=basename, seed=None, prompt=None, extension=opts.samples_format, info=infotext, short_filename=True, no_prompt=True, grid=False, pnginfo_section_name="extras", existing_info=existing_pnginfo, forced_filename=None)
+ scripts.scripts_postproc.run(initial_pp, args)
- if extras_mode != 2 or show_extras_results:
- outputs.append(pp.image)
+ if shared.state.skipped:
+ continue
- devices.torch_gc()
+ used_suffixes = {}
+ for pp in [initial_pp, *initial_pp.extra_images]:
+ suffix = pp.get_suffix(used_suffixes)
+ if opts.use_original_name_batch and name is not None:
+ basename = os.path.splitext(os.path.basename(name))[0]
+ forced_filename = basename + suffix
+ else:
+ basename = ''
+ forced_filename = None
+
+ infotext = ", ".join([k if k == v else f'{k}: {infotext.quote(v)}' for k, v in pp.info.items() if v is not None])
+
+ if opts.enable_pnginfo:
+ pp.image.info = existing_pnginfo
+ pp.image.info["postprocessing"] = infotext
+
+ if save_output:
+ fullfn, _ = images.save_image(pp.image, path=outpath, basename=basename, extension=opts.samples_format, info=infotext, short_filename=True, no_prompt=True, grid=False, pnginfo_section_name="extras", existing_info=existing_pnginfo, forced_filename=forced_filename, suffix=suffix)
+
+ if pp.caption:
+ caption_filename = os.path.splitext(fullfn)[0] + ".txt"
+ if os.path.isfile(caption_filename):
+ with open(caption_filename, encoding="utf8") as file:
+ existing_caption = file.read().strip()
+ else:
+ existing_caption = ""
+
+ action = shared.opts.postprocessing_existing_caption_action
+ if action == 'Prepend' and existing_caption:
+ caption = f"{existing_caption} {pp.caption}"
+ elif action == 'Append' and existing_caption:
+ caption = f"{pp.caption} {existing_caption}"
+ elif action == 'Keep' and existing_caption:
+ caption = existing_caption
+ else:
+ caption = pp.caption
+
+ caption = caption.strip()
+ if caption:
+ with open(caption_filename, "w", encoding="utf8") as file:
+ file.write(caption)
+
+ if extras_mode != 2 or show_extras_results:
+ outputs.append(pp.image)
+
+ image_data.close()
+
+ devices.torch_gc()
+ shared.state.end()
return outputs, ui_common.plaintext_to_html(infotext), ''
+def run_postprocessing_webui(id_task, *args, **kwargs):
+ return run_postprocessing(*args, **kwargs)
+
+
def run_extras(extras_mode, resize_mode, image, image_folder, input_dir, output_dir, show_extras_results, gfpgan_visibility, codeformer_visibility, codeformer_weight, upscaling_resize, upscaling_resize_w, upscaling_resize_h, upscaling_crop, extras_upscaler_1, extras_upscaler_2, extras_upscaler_2_visibility, upscale_first: bool, save_output: bool = True):
"""old handler for API"""
@@ -98,9 +147,11 @@ def run_extras(extras_mode, resize_mode, image, image_folder, input_dir, output_
"upscaler_2_visibility": extras_upscaler_2_visibility,
},
"GFPGAN": {
+ "enable": True,
"gfpgan_visibility": gfpgan_visibility,
},
"CodeFormer": {
+ "enable": True,
"codeformer_visibility": codeformer_visibility,
"codeformer_weight": codeformer_weight,
},
diff --git a/modules/processing.py b/modules/processing.py
index b0992ee1..846e4796 100644
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -1,9 +1,11 @@
+from __future__ import annotations
import json
import logging
import math
import os
import sys
import hashlib
+from dataclasses import dataclass, field
import torch
import numpy as np
@@ -11,11 +13,13 @@ from PIL import Image, ImageOps
import random
import cv2
from skimage import exposure
-from typing import Any, Dict, List
+from typing import Any
import modules.sd_hijack
-from modules import devices, prompt_parser, masking, sd_samplers, lowvram, generation_parameters_copypaste, extra_networks, sd_vae_approx, scripts, sd_samplers_common, sd_unet, errors
+from modules import devices, prompt_parser, masking, sd_samplers, lowvram, infotext, extra_networks, sd_vae_approx, scripts, sd_samplers_common, sd_unet, errors, rng
+from modules.rng import slerp # noqa: F401
from modules.sd_hijack import model_hijack
+from modules.sd_samplers_common import images_tensor_to_samples, decode_first_stage, approximation_indexes
from modules.shared import opts, cmd_opts, state
import modules.shared as shared
import modules.paths as paths
@@ -55,21 +59,25 @@ def apply_color_correction(correction, original_image):
image = blendLayers(image, original_image, BlendType.LUMINOSITY)
+ return image.convert('RGB')
+
+
+def uncrop(image, dest_size, paste_loc):
+ x, y, w, h = paste_loc
+ base_image = Image.new('RGBA', dest_size)
+ image = images.resize_image(1, image, w, h)
+ base_image.paste(image, (x, y))
+ image = base_image
+
return image
-def apply_overlay(image, paste_loc, index, overlays):
- if overlays is None or index >= len(overlays):
+def apply_overlay(image, paste_loc, overlay):
+ if overlay is None:
return image
- overlay = overlays[index]
-
if paste_loc is not None:
- x, y, w, h = paste_loc
- base_image = Image.new('RGBA', (overlay.width, overlay.height))
- image = images.resize_image(1, image, w, h)
- base_image.paste(image, (x, y))
- image = base_image
+ image = uncrop(image, (overlay.width, overlay.height), paste_loc)
image = image.convert('RGBA')
image.alpha_composite(overlay)
@@ -77,13 +85,22 @@ def apply_overlay(image, paste_loc, index, overlays):
return image
+def create_binary_mask(image, round=True):
+ if image.mode == 'RGBA' and image.getextrema()[-1] != (255, 255):
+ if round:
+ image = image.split()[-1].convert("L").point(lambda x: 255 if x > 128 else 0)
+ else:
+ image = image.split()[-1].convert("L")
+ else:
+ image = image.convert('L')
+ return image
def txt2img_image_conditioning(sd_model, x, width, height):
if sd_model.model.conditioning_key in {'hybrid', 'concat'}: # Inpainting models
- # The "masked-image" in this case will just be all zeros since the entire image is masked.
- image_conditioning = torch.zeros(x.shape[0], 3, height, width, device=x.device)
- image_conditioning = sd_model.get_first_stage_encoding(sd_model.encode_first_stage(image_conditioning))
+ # The "masked-image" in this case will just be all 0.5 since the entire image is masked.
+ image_conditioning = torch.ones(x.shape[0], 3, height, width, device=x.device) * 0.5
+ image_conditioning = images_tensor_to_samples(image_conditioning, approximation_indexes.get(opts.sd_vae_encode_method))
# Add the fake full 1s mask to the first dimension.
image_conditioning = torch.nn.functional.pad(image_conditioning, (0, 0, 0, 0, 1, 0), value=1.0)
@@ -96,100 +113,186 @@ def txt2img_image_conditioning(sd_model, x, width, height):
return x.new_zeros(x.shape[0], 2*sd_model.noise_augmentor.time_embed.dim, dtype=x.dtype, device=x.device)
else:
+ sd = sd_model.model.state_dict()
+ diffusion_model_input = sd.get('diffusion_model.input_blocks.0.0.weight', None)
+ if diffusion_model_input is not None:
+ if diffusion_model_input.shape[1] == 9:
+ # The "masked-image" in this case will just be all 0.5 since the entire image is masked.
+ image_conditioning = torch.ones(x.shape[0], 3, height, width, device=x.device) * 0.5
+ image_conditioning = images_tensor_to_samples(image_conditioning,
+ approximation_indexes.get(opts.sd_vae_encode_method))
+
+ # Add the fake full 1s mask to the first dimension.
+ image_conditioning = torch.nn.functional.pad(image_conditioning, (0, 0, 0, 0, 1, 0), value=1.0)
+ image_conditioning = image_conditioning.to(x.dtype)
+
+ return image_conditioning
+
# Dummy zero conditioning if we're not using inpainting or unclip models.
# Still takes up a bit of memory, but no encoder call.
# Pretty sure we can just make this a 1x1 image since its not going to be used besides its batch size.
return x.new_zeros(x.shape[0], 5, 1, 1, dtype=x.dtype, device=x.device)
+@dataclass(repr=False)
class StableDiffusionProcessing:
- """
- The first set of paramaters: sd_models -> do_not_reload_embeddings represent the minimum required to create a StableDiffusionProcessing
- """
+ sd_model: object = None
+ outpath_samples: str = None
+ outpath_grids: str = None
+ prompt: str = ""
+ prompt_for_display: str = None
+ negative_prompt: str = ""
+ styles: list[str] = None
+ seed: int = -1
+ subseed: int = -1
+ subseed_strength: float = 0
+ seed_resize_from_h: int = -1
+ seed_resize_from_w: int = -1
+ seed_enable_extras: bool = True
+ sampler_name: str = None
+ batch_size: int = 1
+ n_iter: int = 1
+ steps: int = 50
+ cfg_scale: float = 7.0
+ width: int = 512
+ height: int = 512
+ restore_faces: bool = None
+ tiling: bool = None
+ do_not_save_samples: bool = False
+ do_not_save_grid: bool = False
+ extra_generation_params: dict[str, Any] = None
+ overlay_images: list = None
+ eta: float = None
+ do_not_reload_embeddings: bool = False
+ denoising_strength: float = None
+ ddim_discretize: str = None
+ s_min_uncond: float = None
+ s_churn: float = None
+ s_tmax: float = None
+ s_tmin: float = None
+ s_noise: float = None
+ override_settings: dict[str, Any] = None
+ override_settings_restore_afterwards: bool = True
+ sampler_index: int = None
+ refiner_checkpoint: str = None
+ refiner_switch_at: float = None
+ token_merging_ratio = 0
+ token_merging_ratio_hr = 0
+ disable_extra_networks: bool = False
+
+ scripts_value: scripts.ScriptRunner = field(default=None, init=False)
+ script_args_value: list = field(default=None, init=False)
+ scripts_setup_complete: bool = field(default=False, init=False)
+
cached_uc = [None, None]
cached_c = [None, None]
- def __init__(self, sd_model=None, outpath_samples=None, outpath_grids=None, prompt: str = "", styles: List[str] = None, seed: int = -1, subseed: int = -1, subseed_strength: float = 0, seed_resize_from_h: int = -1, seed_resize_from_w: int = -1, seed_enable_extras: bool = True, sampler_name: str = None, batch_size: int = 1, n_iter: int = 1, steps: int = 50, cfg_scale: float = 7.0, width: int = 512, height: int = 512, restore_faces: bool = False, tiling: bool = False, do_not_save_samples: bool = False, do_not_save_grid: bool = False, extra_generation_params: Dict[Any, Any] = None, overlay_images: Any = None, negative_prompt: str = None, eta: float = None, do_not_reload_embeddings: bool = False, denoising_strength: float = 0, ddim_discretize: str = None, s_min_uncond: float = 0.0, s_churn: float = 0.0, s_tmax: float = None, s_tmin: float = 0.0, s_noise: float = 1.0, override_settings: Dict[str, Any] = None, override_settings_restore_afterwards: bool = True, sampler_index: int = None, script_args: list = None):
- if sampler_index is not None:
+ comments: dict = None
+ sampler: sd_samplers_common.Sampler | None = field(default=None, init=False)
+ is_using_inpainting_conditioning: bool = field(default=False, init=False)
+ paste_to: tuple | None = field(default=None, init=False)
+
+ is_hr_pass: bool = field(default=False, init=False)
+
+ c: tuple = field(default=None, init=False)
+ uc: tuple = field(default=None, init=False)
+
+ rng: rng.ImageRNG | None = field(default=None, init=False)
+ step_multiplier: int = field(default=1, init=False)
+ color_corrections: list = field(default=None, init=False)
+
+ all_prompts: list = field(default=None, init=False)
+ all_negative_prompts: list = field(default=None, init=False)
+ all_seeds: list = field(default=None, init=False)
+ all_subseeds: list = field(default=None, init=False)
+ iteration: int = field(default=0, init=False)
+ main_prompt: str = field(default=None, init=False)
+ main_negative_prompt: str = field(default=None, init=False)
+
+ prompts: list = field(default=None, init=False)
+ negative_prompts: list = field(default=None, init=False)
+ seeds: list = field(default=None, init=False)
+ subseeds: list = field(default=None, init=False)
+ extra_network_data: dict = field(default=None, init=False)
+
+ user: str = field(default=None, init=False)
+
+ sd_model_name: str = field(default=None, init=False)
+ sd_model_hash: str = field(default=None, init=False)
+ sd_vae_name: str = field(default=None, init=False)
+ sd_vae_hash: str = field(default=None, init=False)
+
+ is_api: bool = field(default=False, init=False)
+
+ def __post_init__(self):
+ if self.sampler_index is not None:
print("sampler_index argument for StableDiffusionProcessing does not do anything; use sampler_name", file=sys.stderr)
- self.outpath_samples: str = outpath_samples
- self.outpath_grids: str = outpath_grids
- self.prompt: str = prompt
- self.prompt_for_display: str = None
- self.negative_prompt: str = (negative_prompt or "")
- self.styles: list = styles or []
- self.seed: int = seed
- self.subseed: int = subseed
- self.subseed_strength: float = subseed_strength
- self.seed_resize_from_h: int = seed_resize_from_h
- self.seed_resize_from_w: int = seed_resize_from_w
- self.sampler_name: str = sampler_name
- self.batch_size: int = batch_size
- self.n_iter: int = n_iter
- self.steps: int = steps
- self.cfg_scale: float = cfg_scale
- self.width: int = width
- self.height: int = height
- self.restore_faces: bool = restore_faces
- self.tiling: bool = tiling
- self.do_not_save_samples: bool = do_not_save_samples
- self.do_not_save_grid: bool = do_not_save_grid
- self.extra_generation_params: dict = extra_generation_params or {}
- self.overlay_images = overlay_images
- self.eta = eta
- self.do_not_reload_embeddings = do_not_reload_embeddings
- self.paste_to = None
- self.color_corrections = None
- self.denoising_strength: float = denoising_strength
+ self.comments = {}
+
+ if self.styles is None:
+ self.styles = []
+
self.sampler_noise_scheduler_override = None
- self.ddim_discretize = ddim_discretize or opts.ddim_discretize
- self.s_min_uncond = s_min_uncond or opts.s_min_uncond
- self.s_churn = s_churn or opts.s_churn
- self.s_tmin = s_tmin or opts.s_tmin
- self.s_tmax = s_tmax or float('inf') # not representable as a standard ui option
- self.s_noise = s_noise or opts.s_noise
- self.override_settings = {k: v for k, v in (override_settings or {}).items() if k not in shared.restricted_opts}
- self.override_settings_restore_afterwards = override_settings_restore_afterwards
- self.is_using_inpainting_conditioning = False
- self.disable_extra_networks = False
- self.token_merging_ratio = 0
- self.token_merging_ratio_hr = 0
-
- if not seed_enable_extras:
+ self.s_min_uncond = self.s_min_uncond if self.s_min_uncond is not None else opts.s_min_uncond
+ self.s_churn = self.s_churn if self.s_churn is not None else opts.s_churn
+ self.s_tmin = self.s_tmin if self.s_tmin is not None else opts.s_tmin
+ self.s_tmax = (self.s_tmax if self.s_tmax is not None else opts.s_tmax) or float('inf')
+ self.s_noise = self.s_noise if self.s_noise is not None else opts.s_noise
+
+ self.extra_generation_params = self.extra_generation_params or {}
+ self.override_settings = self.override_settings or {}
+ self.script_args = self.script_args or {}
+
+ self.refiner_checkpoint_info = None
+
+ if not self.seed_enable_extras:
self.subseed = -1
self.subseed_strength = 0
self.seed_resize_from_h = 0
self.seed_resize_from_w = 0
- self.scripts = None
- self.script_args = script_args
- self.all_prompts = None
- self.all_negative_prompts = None
- self.all_seeds = None
- self.all_subseeds = None
- self.iteration = 0
- self.is_hr_pass = False
- self.sampler = None
-
- self.prompts = None
- self.negative_prompts = None
- self.extra_network_data = None
- self.seeds = None
- self.subseeds = None
-
- self.step_multiplier = 1
self.cached_uc = StableDiffusionProcessing.cached_uc
self.cached_c = StableDiffusionProcessing.cached_c
- self.uc = None
- self.c = None
-
- self.user = None
@property
def sd_model(self):
return shared.sd_model
+ @sd_model.setter
+ def sd_model(self, value):
+ pass
+
+ @property
+ def scripts(self):
+ return self.scripts_value
+
+ @scripts.setter
+ def scripts(self, value):
+ self.scripts_value = value
+
+ if self.scripts_value and self.script_args_value and not self.scripts_setup_complete:
+ self.setup_scripts()
+
+ @property
+ def script_args(self):
+ return self.script_args_value
+
+ @script_args.setter
+ def script_args(self, value):
+ self.script_args_value = value
+
+ if self.scripts_value and self.script_args_value and not self.scripts_setup_complete:
+ self.setup_scripts()
+
+ def setup_scripts(self):
+ self.scripts_setup_complete = True
+
+ self.scripts.setup_scrips(self, is_ui=not self.is_api)
+
+ def comment(self, text):
+ self.comments[text] = 1
+
def txt2img_image_conditioning(self, x, width=None, height=None):
self.is_using_inpainting_conditioning = self.sd_model.model.conditioning_key in {'hybrid', 'concat'}
@@ -202,7 +305,7 @@ class StableDiffusionProcessing:
midas_in = torch.from_numpy(transformed["midas_in"][None, ...]).to(device=shared.device)
midas_in = repeat(midas_in, "1 ... -> n ...", n=self.batch_size)
- conditioning_image = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(source_image))
+ conditioning_image = images_tensor_to_samples(source_image*0.5+0.5, approximation_indexes.get(opts.sd_vae_encode_method))
conditioning = torch.nn.functional.interpolate(
self.sd_model.depth_model(midas_in),
size=conditioning_image.shape[2:],
@@ -215,7 +318,7 @@ class StableDiffusionProcessing:
return conditioning
def edit_image_conditioning(self, source_image):
- conditioning_image = self.sd_model.encode_first_stage(source_image).mode()
+ conditioning_image = shared.sd_model.encode_first_stage(source_image).mode()
return conditioning_image
@@ -227,7 +330,7 @@ class StableDiffusionProcessing:
c_adm = torch.cat((c_adm, noise_level_emb), 1)
return c_adm
- def inpainting_image_conditioning(self, source_image, latent_image, image_mask=None):
+ def inpainting_image_conditioning(self, source_image, latent_image, image_mask=None, round_image_mask=True):
self.is_using_inpainting_conditioning = True
# Handle the different mask inputs
@@ -239,8 +342,10 @@ class StableDiffusionProcessing:
conditioning_mask = conditioning_mask.astype(np.float32) / 255.0
conditioning_mask = torch.from_numpy(conditioning_mask[None, None])
- # Inpainting model uses a discretized mask as input, so we round to either 1.0 or 0.0
- conditioning_mask = torch.round(conditioning_mask)
+ if round_image_mask:
+ # Caller is requesting a discretized mask as input, so we round to either 1.0 or 0.0
+ conditioning_mask = torch.round(conditioning_mask)
+
else:
conditioning_mask = source_image.new_ones(1, 1, *source_image.shape[-2:])
@@ -264,7 +369,7 @@ class StableDiffusionProcessing:
return image_conditioning
- def img2img_image_conditioning(self, source_image, latent_image, image_mask=None):
+ def img2img_image_conditioning(self, source_image, latent_image, image_mask=None, round_image_mask=True):
source_image = devices.cond_cast_float(source_image)
# HACK: Using introspection as the Depth2Image model doesn't appear to uniquely
@@ -276,11 +381,17 @@ class StableDiffusionProcessing:
return self.edit_image_conditioning(source_image)
if self.sampler.conditioning_key in {'hybrid', 'concat'}:
- return self.inpainting_image_conditioning(source_image, latent_image, image_mask=image_mask)
+ return self.inpainting_image_conditioning(source_image, latent_image, image_mask=image_mask, round_image_mask=round_image_mask)
if self.sampler.conditioning_key == "crossattn-adm":
return self.unclip_image_conditioning(source_image)
+ sd = self.sampler.model_wrap.inner_model.model.state_dict()
+ diffusion_model_input = sd.get('diffusion_model.input_blocks.0.0.weight', None)
+ if diffusion_model_input is not None:
+ if diffusion_model_input.shape[1] == 9:
+ return self.inpainting_image_conditioning(source_image, latent_image, image_mask=image_mask)
+
# Dummy zero conditioning if we're not using inpainting or depth model.
return latent_image.new_zeros(latent_image.shape[0], 5, 1, 1)
@@ -294,7 +405,7 @@ class StableDiffusionProcessing:
self.sampler = None
self.c = None
self.uc = None
- if not opts.experimental_persistent_cond_cache:
+ if not opts.persistent_cond_cache:
StableDiffusionProcessing.cached_c = [None, None]
StableDiffusionProcessing.cached_uc = [None, None]
@@ -305,35 +416,35 @@ class StableDiffusionProcessing:
return self.token_merging_ratio or opts.token_merging_ratio
def setup_prompts(self):
- if type(self.prompt) == list:
+ if isinstance(self.prompt,list):
self.all_prompts = self.prompt
+ elif isinstance(self.negative_prompt, list):
+ self.all_prompts = [self.prompt] * len(self.negative_prompt)
else:
self.all_prompts = self.batch_size * self.n_iter * [self.prompt]
- if type(self.negative_prompt) == list:
+ if isinstance(self.negative_prompt, list):
self.all_negative_prompts = self.negative_prompt
else:
- self.all_negative_prompts = self.batch_size * self.n_iter * [self.negative_prompt]
+ self.all_negative_prompts = [self.negative_prompt] * len(self.all_prompts)
+
+ if len(self.all_prompts) != len(self.all_negative_prompts):
+ raise RuntimeError(f"Received a different number of prompts ({len(self.all_prompts)}) and negative prompts ({len(self.all_negative_prompts)})")
self.all_prompts = [shared.prompt_styles.apply_styles_to_prompt(x, self.styles) for x in self.all_prompts]
self.all_negative_prompts = [shared.prompt_styles.apply_negative_styles_to_prompt(x, self.styles) for x in self.all_negative_prompts]
- def get_conds_with_caching(self, function, required_prompts, steps, caches, extra_network_data):
- """
- Returns the result of calling function(shared.sd_model, required_prompts, steps)
- using a cache to store the result if the same arguments have been used before.
-
- cache is an array containing two elements. The first element is a tuple
- representing the previously used arguments, or None if no arguments
- have been used before. The second element is where the previously
- computed result is stored.
+ self.main_prompt = self.all_prompts[0]
+ self.main_negative_prompt = self.all_negative_prompts[0]
- caches is a list with items described above.
- """
+ def cached_params(self, required_prompts, steps, extra_network_data, hires_steps=None, use_old_scheduling=False):
+ """Returns parameters that invalidate the cond cache if changed"""
- cached_params = (
+ return (
required_prompts,
steps,
+ hires_steps,
+ use_old_scheduling,
opts.CLIP_stop_at_last_layers,
shared.sd_model.sd_checkpoint_info,
extra_network_data,
@@ -341,8 +452,31 @@ class StableDiffusionProcessing:
opts.sdxl_crop_top,
self.width,
self.height,
+ opts.fp8_storage,
+ opts.cache_fp16_weight,
)
+ def get_conds_with_caching(self, function, required_prompts, steps, caches, extra_network_data, hires_steps=None):
+ """
+ Returns the result of calling function(shared.sd_model, required_prompts, steps)
+ using a cache to store the result if the same arguments have been used before.
+
+ cache is an array containing two elements. The first element is a tuple
+ representing the previously used arguments, or None if no arguments
+ have been used before. The second element is where the previously
+ computed result is stored.
+
+ caches is a list with items described above.
+ """
+
+ if shared.opts.use_old_scheduling:
+ old_schedules = prompt_parser.get_learned_conditioning_prompt_schedules(required_prompts, steps, hires_steps, False)
+ new_schedules = prompt_parser.get_learned_conditioning_prompt_schedules(required_prompts, steps, hires_steps, True)
+ if old_schedules != new_schedules:
+ self.extra_generation_params["Old prompt editing timelines"] = True
+
+ cached_params = self.cached_params(required_prompts, steps, extra_network_data, hires_steps, shared.opts.use_old_scheduling)
+
for cache in caches:
if cache[0] is not None and cached_params == cache[0]:
return cache[1]
@@ -350,7 +484,7 @@ class StableDiffusionProcessing:
cache = caches[0]
with devices.autocast():
- cache[1] = function(shared.sd_model, required_prompts, steps)
+ cache[1] = function(shared.sd_model, required_prompts, steps, hires_steps, shared.opts.use_old_scheduling)
cache[0] = cached_params
return cache[1]
@@ -360,13 +494,23 @@ class StableDiffusionProcessing:
negative_prompts = prompt_parser.SdConditioning(self.negative_prompts, width=self.width, height=self.height, is_negative_prompt=True)
sampler_config = sd_samplers.find_sampler_config(self.sampler_name)
- self.step_multiplier = 2 if sampler_config and sampler_config.options.get("second_order", False) else 1
- self.uc = self.get_conds_with_caching(prompt_parser.get_learned_conditioning, negative_prompts, self.steps * self.step_multiplier, [self.cached_uc], self.extra_network_data)
- self.c = self.get_conds_with_caching(prompt_parser.get_multicond_learned_conditioning, prompts, self.steps * self.step_multiplier, [self.cached_c], self.extra_network_data)
+ total_steps = sampler_config.total_steps(self.steps) if sampler_config else self.steps
+ self.step_multiplier = total_steps // self.steps
+ self.firstpass_steps = total_steps
+
+ self.uc = self.get_conds_with_caching(prompt_parser.get_learned_conditioning, negative_prompts, total_steps, [self.cached_uc], self.extra_network_data)
+ self.c = self.get_conds_with_caching(prompt_parser.get_multicond_learned_conditioning, prompts, total_steps, [self.cached_c], self.extra_network_data)
+
+ def get_conds(self):
+ return self.c, self.uc
def parse_extra_network_prompts(self):
self.prompts, self.extra_network_data = extra_networks.parse_prompts(self.prompts)
+ def save_samples(self) -> bool:
+ """Returns whether generated images need to be written to disk"""
+ return opts.samples_save and not self.do_not_save_samples and (opts.save_incomplete_images or not state.interrupted and not state.skipped)
+
class Processed:
def __init__(self, p: StableDiffusionProcessing, images_list, seed=-1, info="", subseed=None, all_prompts=None, all_negative_prompts=None, all_seeds=None, all_subseeds=None, index_of_first_image=0, infotexts=None, comments=""):
@@ -377,7 +521,7 @@ class Processed:
self.subseed = subseed
self.subseed_strength = p.subseed_strength
self.info = info
- self.comments = comments
+ self.comments = "".join(f"{comment}\n" for comment in p.comments)
self.width = p.width
self.height = p.height
self.sampler_name = p.sampler_name
@@ -387,7 +531,10 @@ class Processed:
self.batch_size = p.batch_size
self.restore_faces = p.restore_faces
self.face_restoration_model = opts.face_restoration_model if p.restore_faces else None
- self.sd_model_hash = shared.sd_model.sd_model_hash
+ self.sd_model_name = p.sd_model_name
+ self.sd_model_hash = p.sd_model_hash
+ self.sd_vae_name = p.sd_vae_name
+ self.sd_vae_hash = p.sd_vae_hash
self.seed_resize_from_w = p.seed_resize_from_w
self.seed_resize_from_h = p.seed_resize_from_h
self.denoising_strength = getattr(p, 'denoising_strength', None)
@@ -407,10 +554,10 @@ class Processed:
self.s_noise = p.s_noise
self.s_min_uncond = p.s_min_uncond
self.sampler_noise_scheduler_override = p.sampler_noise_scheduler_override
- self.prompt = self.prompt if type(self.prompt) != list else self.prompt[0]
- self.negative_prompt = self.negative_prompt if type(self.negative_prompt) != list else self.negative_prompt[0]
- self.seed = int(self.seed if type(self.seed) != list else self.seed[0]) if self.seed is not None else -1
- self.subseed = int(self.subseed if type(self.subseed) != list else self.subseed[0]) if self.subseed is not None else -1
+ self.prompt = self.prompt if not isinstance(self.prompt, list) else self.prompt[0]
+ self.negative_prompt = self.negative_prompt if not isinstance(self.negative_prompt, list) else self.negative_prompt[0]
+ self.seed = int(self.seed if not isinstance(self.seed, list) else self.seed[0]) if self.seed is not None else -1
+ self.subseed = int(self.subseed if not isinstance(self.subseed, list) else self.subseed[0]) if self.subseed is not None else -1
self.is_using_inpainting_conditioning = p.is_using_inpainting_conditioning
self.all_prompts = all_prompts or p.all_prompts or [self.prompt]
@@ -418,6 +565,7 @@ class Processed:
self.all_seeds = all_seeds or p.all_seeds or [self.seed]
self.all_subseeds = all_subseeds or p.all_subseeds or [self.subseed]
self.infotexts = infotexts or [info]
+ self.version = program_version()
def js(self):
obj = {
@@ -438,7 +586,10 @@ class Processed:
"batch_size": self.batch_size,
"restore_faces": self.restore_faces,
"face_restoration_model": self.face_restoration_model,
+ "sd_model_name": self.sd_model_name,
"sd_model_hash": self.sd_model_hash,
+ "sd_vae_name": self.sd_vae_name,
+ "sd_vae_hash": self.sd_vae_hash,
"seed_resize_from_w": self.seed_resize_from_w,
"seed_resize_from_h": self.seed_resize_from_h,
"denoising_strength": self.denoising_strength,
@@ -449,6 +600,7 @@ class Processed:
"job_timestamp": self.job_timestamp,
"clip_skip": self.clip_skip,
"is_using_inpainting_conditioning": self.is_using_inpainting_conditioning,
+ "version": self.version,
}
return json.dumps(obj)
@@ -460,86 +612,17 @@ class Processed:
return self.token_merging_ratio_hr if for_hr else self.token_merging_ratio
-# from https://discuss.pytorch.org/t/help-regarding-slerp-function-for-generative-model-sampling/32475/3
-def slerp(val, low, high):
- low_norm = low/torch.norm(low, dim=1, keepdim=True)
- high_norm = high/torch.norm(high, dim=1, keepdim=True)
- dot = (low_norm*high_norm).sum(1)
-
- if dot.mean() > 0.9995:
- return low * val + high * (1 - val)
-
- omega = torch.acos(dot)
- so = torch.sin(omega)
- res = (torch.sin((1.0-val)*omega)/so).unsqueeze(1)*low + (torch.sin(val*omega)/so).unsqueeze(1) * high
- return res
-
-
def create_random_tensors(shape, seeds, subseeds=None, subseed_strength=0.0, seed_resize_from_h=0, seed_resize_from_w=0, p=None):
- eta_noise_seed_delta = opts.eta_noise_seed_delta or 0
- xs = []
-
- # if we have multiple seeds, this means we are working with batch size>1; this then
- # enables the generation of additional tensors with noise that the sampler will use during its processing.
- # Using those pre-generated tensors instead of simple torch.randn allows a batch with seeds [100, 101] to
- # produce the same images as with two batches [100], [101].
- if p is not None and p.sampler is not None and (len(seeds) > 1 and opts.enable_batch_seeds or eta_noise_seed_delta > 0):
- sampler_noises = [[] for _ in range(p.sampler.number_of_needed_noises(p))]
- else:
- sampler_noises = None
-
- for i, seed in enumerate(seeds):
- noise_shape = shape if seed_resize_from_h <= 0 or seed_resize_from_w <= 0 else (shape[0], seed_resize_from_h//8, seed_resize_from_w//8)
-
- subnoise = None
- if subseeds is not None:
- subseed = 0 if i >= len(subseeds) else subseeds[i]
-
- subnoise = devices.randn(subseed, noise_shape)
-
- # randn results depend on device; gpu and cpu get different results for same seed;
- # the way I see it, it's better to do this on CPU, so that everyone gets same result;
- # but the original script had it like this, so I do not dare change it for now because
- # it will break everyone's seeds.
- noise = devices.randn(seed, noise_shape)
-
- if subnoise is not None:
- noise = slerp(subseed_strength, noise, subnoise)
-
- if noise_shape != shape:
- x = devices.randn(seed, shape)
- dx = (shape[2] - noise_shape[2]) // 2
- dy = (shape[1] - noise_shape[1]) // 2
- w = noise_shape[2] if dx >= 0 else noise_shape[2] + 2 * dx
- h = noise_shape[1] if dy >= 0 else noise_shape[1] + 2 * dy
- tx = 0 if dx < 0 else dx
- ty = 0 if dy < 0 else dy
- dx = max(-dx, 0)
- dy = max(-dy, 0)
-
- x[:, ty:ty+h, tx:tx+w] = noise[:, dy:dy+h, dx:dx+w]
- noise = x
+ g = rng.ImageRNG(shape, seeds, subseeds=subseeds, subseed_strength=subseed_strength, seed_resize_from_h=seed_resize_from_h, seed_resize_from_w=seed_resize_from_w)
+ return g.next()
- if sampler_noises is not None:
- cnt = p.sampler.number_of_needed_noises(p)
- if eta_noise_seed_delta > 0:
- torch.manual_seed(seed + eta_noise_seed_delta)
-
- for j in range(cnt):
- sampler_noises[j].append(devices.randn_without_seed(tuple(noise_shape)))
-
- xs.append(noise)
-
- if sampler_noises is not None:
- p.sampler.sampler_noises = [torch.stack(n).to(shared.device) for n in sampler_noises]
-
- x = torch.stack(xs).to(shared.device)
- return x
+class DecodedSamples(list):
+ already_decoded = True
def decode_latent_batch(model, batch, target_device=None, check_for_nans=False):
- samples = []
+ samples = DecodedSamples()
for i in range(batch.shape[0]):
sample = decode_first_stage(model, batch[i:i + 1])[0]
@@ -554,7 +637,7 @@ def decode_latent_batch(model, batch, target_device=None, check_for_nans=False):
errors.print_error_explanation(
"A tensor with all NaNs was produced in VAE.\n"
"Web UI will now convert VAE into 32-bit float and retry.\n"
- "To disable this behavior, disable the 'Automaticlly revert VAE to 32-bit floats' setting.\n"
+ "To disable this behavior, disable the 'Automatically revert VAE to 32-bit floats' setting.\n"
"To always start with 32-bit VAE, use --no-half-vae commandline flag."
)
@@ -572,14 +655,16 @@ def decode_latent_batch(model, batch, target_device=None, check_for_nans=False):
return samples
-def decode_first_stage(model, x):
- x = model.decode_first_stage(x.to(devices.dtype_vae))
-
- return x
-
-
def get_fixed_seed(seed):
- if seed is None or seed == '' or seed == -1:
+ if seed == '' or seed is None:
+ seed = -1
+ elif isinstance(seed, str):
+ try:
+ seed = int(seed)
+ except Exception:
+ seed = -1
+
+ if seed == -1:
return int(random.randrange(4294967294))
return seed
@@ -622,10 +707,14 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter
"CFG scale": p.cfg_scale,
"Image CFG scale": getattr(p, 'image_cfg_scale', None),
"Seed": p.all_seeds[0] if use_main_prompt else all_seeds[index],
- "Face restoration": (opts.face_restoration_model if p.restore_faces else None),
+ "Face restoration": opts.face_restoration_model if p.restore_faces else None,
"Size": f"{p.width}x{p.height}",
- "Model hash": getattr(p, 'sd_model_hash', None if not opts.add_model_hash_to_info or not shared.sd_model.sd_model_hash else shared.sd_model.sd_model_hash),
- "Model": (None if not opts.add_model_name_to_info else shared.sd_model.sd_checkpoint_info.name_for_extra),
+ "Model hash": p.sd_model_hash if opts.add_model_hash_to_info else None,
+ "Model": p.sd_model_name if opts.add_model_name_to_info else None,
+ "FP8 weight": opts.fp8_storage if devices.fp8 else None,
+ "Cache FP16 weight for LoRA": opts.cache_fp16_weight if devices.fp8 else None,
+ "VAE hash": p.sd_vae_hash if opts.add_vae_hash_to_info else None,
+ "VAE": p.sd_vae_name if opts.add_vae_name_to_info else None,
"Variation seed": (None if p.subseed_strength == 0 else (p.all_subseeds[0] if use_main_prompt else all_subseeds[index])),
"Variation seed strength": (None if p.subseed_strength == 0 else p.subseed_strength),
"Seed resize from": (None if p.seed_resize_from_w <= 0 or p.seed_resize_from_h <= 0 else f"{p.seed_resize_from_w}x{p.seed_resize_from_h}"),
@@ -638,15 +727,16 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter
"Init image hash": getattr(p, 'init_img_hash', None),
"RNG": opts.randn_source if opts.randn_source != "GPU" else None,
"NGMS": None if p.s_min_uncond == 0 else p.s_min_uncond,
+ "Tiling": "True" if p.tiling else None,
**p.extra_generation_params,
"Version": program_version() if opts.add_version_to_infotext else None,
"User": p.user if opts.add_user_name_to_info else None,
}
- generation_params_text = ", ".join([k if k == v else f'{k}: {generation_parameters_copypaste.quote(v)}' for k, v in generation_params.items() if v is not None])
+ generation_params_text = ", ".join([k if k == v else f'{k}: {infotext.quote(v)}' for k, v in generation_params.items() if v is not None])
- prompt_text = p.prompt if use_main_prompt else all_prompts[index]
- negative_prompt_text = f"\nNegative prompt: {all_negative_prompts[index]}" if all_negative_prompts[index] else ""
+ prompt_text = p.main_prompt if use_main_prompt else all_prompts[index]
+ negative_prompt_text = f"\nNegative prompt: {p.main_negative_prompt if use_main_prompt else all_negative_prompts[index]}" if all_negative_prompts[index] else ""
return f"{prompt_text}{negative_prompt_text}\n{generation_params_text}".strip()
@@ -655,16 +745,17 @@ def process_images(p: StableDiffusionProcessing) -> Processed:
if p.scripts is not None:
p.scripts.before_process(p)
- stored_opts = {k: opts.data[k] for k in p.override_settings.keys()}
+ stored_opts = {k: opts.data[k] if k in opts.data else opts.get_default(k) for k in p.override_settings.keys() if k in opts.data}
try:
# if no checkpoint override or the override checkpoint can't be found, remove override entry and load opts checkpoint
+ # and if after running refiner, the refiner model is not unloaded - webui swaps back to main model here, if model over is present it will be reloaded afterwards
if sd_models.checkpoint_aliases.get(p.override_settings.get('sd_model_checkpoint')) is None:
p.override_settings.pop('sd_model_checkpoint', None)
sd_models.reload_model_weights()
for k, v in p.override_settings.items():
- setattr(opts, k, v)
+ opts.set(k, v, is_api=True, run_callbacks=False)
if k == 'sd_model_checkpoint':
sd_models.reload_model_weights()
@@ -693,7 +784,7 @@ def process_images(p: StableDiffusionProcessing) -> Processed:
def process_images_inner(p: StableDiffusionProcessing) -> Processed:
"""this is the main loop that both txt2img and img2img use; it calls func_init once inside all the scopes and func_sample once per batch"""
- if type(p.prompt) == list:
+ if isinstance(p.prompt, list):
assert(len(p.prompt) > 0)
else:
assert p.prompt is not None
@@ -703,19 +794,33 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
seed = get_fixed_seed(p.seed)
subseed = get_fixed_seed(p.subseed)
+ if p.restore_faces is None:
+ p.restore_faces = opts.face_restoration
+
+ if p.tiling is None:
+ p.tiling = opts.tiling
+
+ if p.refiner_checkpoint not in (None, "", "None", "none"):
+ p.refiner_checkpoint_info = sd_models.get_closet_checkpoint_match(p.refiner_checkpoint)
+ if p.refiner_checkpoint_info is None:
+ raise Exception(f'Could not find checkpoint with name {p.refiner_checkpoint}')
+
+ p.sd_model_name = shared.sd_model.sd_checkpoint_info.name_for_extra
+ p.sd_model_hash = shared.sd_model.sd_model_hash
+ p.sd_vae_name = sd_vae.get_loaded_vae_name()
+ p.sd_vae_hash = sd_vae.get_loaded_vae_hash()
+
modules.sd_hijack.model_hijack.apply_circular(p.tiling)
modules.sd_hijack.model_hijack.clear_comments()
- comments = {}
-
p.setup_prompts()
- if type(seed) == list:
+ if isinstance(seed, list):
p.all_seeds = seed
else:
p.all_seeds = [int(seed) + (x if p.subseed_strength == 0 else 0) for x in range(len(p.all_prompts))]
- if type(subseed) == list:
+ if isinstance(subseed, list):
p.all_subseeds = subseed
else:
p.all_subseeds = [int(subseed) + x for x in range(len(p.all_prompts))]
@@ -728,7 +833,6 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
infotexts = []
output_images = []
-
with torch.no_grad(), p.sd_model.ema_scope():
with devices.autocast():
p.init(p.all_prompts, p.all_seeds, p.all_subseeds)
@@ -751,11 +855,15 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
if state.interrupted:
break
+ sd_models.reload_model_weights() # model can be changed for example by refiner
+
p.prompts = p.all_prompts[n * p.batch_size:(n + 1) * p.batch_size]
p.negative_prompts = p.all_negative_prompts[n * p.batch_size:(n + 1) * p.batch_size]
p.seeds = p.all_seeds[n * p.batch_size:(n + 1) * p.batch_size]
p.subseeds = p.all_subseeds[n * p.batch_size:(n + 1) * p.batch_size]
+ p.rng = rng.ImageRNG((opt_C, p.height // opt_f, p.width // opt_f), p.seeds, subseeds=p.subseeds, subseed_strength=p.subseed_strength, seed_resize_from_h=p.seed_resize_from_h, seed_resize_from_w=p.seed_resize_from_w)
+
if p.scripts is not None:
p.scripts.before_process_batch(p, batch_number=n, prompts=p.prompts, seeds=p.seeds, subseeds=p.subseeds)
@@ -777,23 +885,62 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
# strength, which is saved as "Model Strength: 1.0" in the infotext
if n == 0:
with open(os.path.join(paths.data_path, "params.txt"), "w", encoding="utf8") as file:
- processed = Processed(p, [], p.seed, "")
+ processed = Processed(p, [])
file.write(processed.infotext(p, 0))
p.setup_conds()
for comment in model_hijack.comments:
- comments[comment] = 1
+ p.comment(comment)
p.extra_generation_params.update(model_hijack.extra_generation_params)
if p.n_iter > 1:
shared.state.job = f"Batch {n+1} out of {p.n_iter}"
+ def rescale_zero_terminal_snr_abar(alphas_cumprod):
+ alphas_bar_sqrt = alphas_cumprod.sqrt()
+
+ # Store old values.
+ alphas_bar_sqrt_0 = alphas_bar_sqrt[0].clone()
+ alphas_bar_sqrt_T = alphas_bar_sqrt[-1].clone()
+
+ # Shift so the last timestep is zero.
+ alphas_bar_sqrt -= (alphas_bar_sqrt_T)
+
+ # Scale so the first timestep is back to the old value.
+ alphas_bar_sqrt *= alphas_bar_sqrt_0 / (alphas_bar_sqrt_0 - alphas_bar_sqrt_T)
+
+ # Convert alphas_bar_sqrt to betas
+ alphas_bar = alphas_bar_sqrt**2 # Revert sqrt
+ alphas_bar[-1] = 4.8973451890853435e-08
+ return alphas_bar
+
+ if hasattr(p.sd_model, 'alphas_cumprod') and hasattr(p.sd_model, 'alphas_cumprod_original'):
+ p.sd_model.alphas_cumprod = p.sd_model.alphas_cumprod_original.to(shared.device)
+
+ if opts.use_downcasted_alpha_bar:
+ p.extra_generation_params['Downcast alphas_cumprod'] = opts.use_downcasted_alpha_bar
+ p.sd_model.alphas_cumprod = p.sd_model.alphas_cumprod.half().to(shared.device)
+ if opts.sd_noise_schedule == "Zero Terminal SNR":
+ p.extra_generation_params['Noise Schedule'] = opts.sd_noise_schedule
+ p.sd_model.alphas_cumprod = rescale_zero_terminal_snr_abar(p.sd_model.alphas_cumprod).to(shared.device)
+
with devices.without_autocast() if devices.unet_needs_upcast else devices.autocast():
samples_ddim = p.sample(conditioning=p.c, unconditional_conditioning=p.uc, seeds=p.seeds, subseeds=p.subseeds, subseed_strength=p.subseed_strength, prompts=p.prompts)
- x_samples_ddim = decode_latent_batch(p.sd_model, samples_ddim, target_device=devices.cpu, check_for_nans=True)
+ if p.scripts is not None:
+ ps = scripts.PostSampleArgs(samples_ddim)
+ p.scripts.post_sample(p, ps)
+ samples_ddim = ps.samples
+
+ if getattr(samples_ddim, 'already_decoded', False):
+ x_samples_ddim = samples_ddim
+ else:
+ if opts.sd_vae_decode_method != 'Full':
+ p.extra_generation_params['VAE Decoder'] = opts.sd_vae_decode_method
+ x_samples_ddim = decode_latent_batch(p.sd_model, samples_ddim, target_device=devices.cpu, check_for_nans=True)
+
x_samples_ddim = torch.stack(x_samples_ddim).float()
x_samples_ddim = torch.clamp((x_samples_ddim + 1.0) / 2.0, min=0.0, max=1.0)
@@ -804,6 +951,8 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
devices.torch_gc()
+ state.nextjob()
+
if p.scripts is not None:
p.scripts.postprocess_batch(p, x_samples_ddim, batch_number=n)
@@ -817,6 +966,8 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
def infotext(index=0, use_main_prompt=False):
return create_infotext(p, p.prompts, p.seeds, p.subseeds, use_main_prompt=use_main_prompt, index=index, all_negative_prompts=p.negative_prompts)
+ save_samples = p.save_samples()
+
for i, x_sample in enumerate(x_samples_ddim):
p.batch_index = i
@@ -824,7 +975,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
x_sample = x_sample.astype(np.uint8)
if p.restore_faces:
- if opts.save and not p.do_not_save_samples and opts.save_images_before_face_restoration:
+ if save_samples and opts.save_images_before_face_restoration:
images.save_image(Image.fromarray(x_sample), p.outpath_samples, "", p.seeds[i], p.prompts[i], opts.samples_format, info=infotext(i), p=p, suffix="-before-face-restoration")
devices.torch_gc()
@@ -839,15 +990,32 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
p.scripts.postprocess_image(p, pp)
image = pp.image
+ mask_for_overlay = getattr(p, "mask_for_overlay", None)
+ overlay_image = p.overlay_images[i] if getattr(p, "overlay_images", None) is not None and i < len(p.overlay_images) else None
+
+ if p.scripts is not None:
+ ppmo = scripts.PostProcessMaskOverlayArgs(i, mask_for_overlay, overlay_image)
+ p.scripts.postprocess_maskoverlay(p, ppmo)
+ mask_for_overlay, overlay_image = ppmo.mask_for_overlay, ppmo.overlay_image
+
if p.color_corrections is not None and i < len(p.color_corrections):
- if opts.save and not p.do_not_save_samples and opts.save_images_before_color_correction:
- image_without_cc = apply_overlay(image, p.paste_to, i, p.overlay_images)
+ if save_samples and opts.save_images_before_color_correction:
+ image_without_cc = apply_overlay(image, p.paste_to, overlay_image)
images.save_image(image_without_cc, p.outpath_samples, "", p.seeds[i], p.prompts[i], opts.samples_format, info=infotext(i), p=p, suffix="-before-color-correction")
image = apply_color_correction(p.color_corrections[i], image)
- image = apply_overlay(image, p.paste_to, i, p.overlay_images)
+ # If the intention is to show the output from the model
+ # that is being composited over the original image,
+ # we need to keep the original image around
+ # and use it in the composite step.
+ original_denoised_image = image.copy()
+
+ if p.paste_to is not None:
+ original_denoised_image = uncrop(original_denoised_image, (overlay_image.width, overlay_image.height), p.paste_to)
+
+ image = apply_overlay(image, p.paste_to, overlay_image)
- if opts.samples_save and not p.do_not_save_samples:
+ if save_samples:
images.save_image(image, p.outpath_samples, "", p.seeds[i], p.prompts[i], opts.samples_format, info=infotext(i), p=p)
text = infotext(i)
@@ -856,27 +1024,27 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
image.info["parameters"] = text
output_images.append(image)
- if hasattr(p, 'mask_for_overlay') and p.mask_for_overlay and any([opts.save_mask, opts.save_mask_composite, opts.return_mask, opts.return_mask_composite]):
- image_mask = p.mask_for_overlay.convert('RGB')
- image_mask_composite = Image.composite(image.convert('RGBA').convert('RGBa'), Image.new('RGBa', image.size), images.resize_image(2, p.mask_for_overlay, image.width, image.height).convert('L')).convert('RGBA')
-
- if opts.save_mask:
- images.save_image(image_mask, p.outpath_samples, "", p.seeds[i], p.prompts[i], opts.samples_format, info=infotext(i), p=p, suffix="-mask")
-
- if opts.save_mask_composite:
- images.save_image(image_mask_composite, p.outpath_samples, "", p.seeds[i], p.prompts[i], opts.samples_format, info=infotext(i), p=p, suffix="-mask-composite")
-
- if opts.return_mask:
- output_images.append(image_mask)
-
- if opts.return_mask_composite:
- output_images.append(image_mask_composite)
+ if mask_for_overlay is not None:
+ if opts.return_mask or opts.save_mask:
+ image_mask = mask_for_overlay.convert('RGB')
+ if save_samples and opts.save_mask:
+ images.save_image(image_mask, p.outpath_samples, "", p.seeds[i], p.prompts[i], opts.samples_format, info=infotext(i), p=p, suffix="-mask")
+ if opts.return_mask:
+ output_images.append(image_mask)
+
+ if opts.return_mask_composite or opts.save_mask_composite:
+ image_mask_composite = Image.composite(original_denoised_image.convert('RGBA').convert('RGBa'), Image.new('RGBa', image.size), images.resize_image(2, mask_for_overlay, image.width, image.height).convert('L')).convert('RGBA')
+ if save_samples and opts.save_mask_composite:
+ images.save_image(image_mask_composite, p.outpath_samples, "", p.seeds[i], p.prompts[i], opts.samples_format, info=infotext(i), p=p, suffix="-mask-composite")
+ if opts.return_mask_composite:
+ output_images.append(image_mask_composite)
del x_samples_ddim
devices.torch_gc()
- state.nextjob()
+ if not infotexts:
+ infotexts.append(Processed(p, []).infotext(p, 0))
p.color_corrections = None
@@ -892,7 +1060,6 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
grid.info["parameters"] = text
output_images.insert(0, grid)
index_of_first_image = 1
-
if opts.grid_save:
images.save_image(grid, p.outpath_grids, "grid", p.all_seeds[0], p.all_prompts[0], opts.grid_format, info=infotext(use_main_prompt=True), short_filename=not opts.grid_extended_filename, p=p, grid=True)
@@ -906,7 +1073,6 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
images_list=output_images,
seed=p.all_seeds[0],
info=infotexts[0],
- comments="".join(f"{comment}\n" for comment in comments),
subseed=p.all_subseeds[0],
index_of_first_image=index_of_first_image,
infotexts=infotexts,
@@ -930,49 +1096,102 @@ def old_hires_fix_first_pass_dimensions(width, height):
return width, height
+@dataclass(repr=False)
class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
- sampler = None
+ enable_hr: bool = False
+ denoising_strength: float = 0.75
+ firstphase_width: int = 0
+ firstphase_height: int = 0
+ hr_scale: float = 2.0
+ hr_upscaler: str = None
+ hr_second_pass_steps: int = 0
+ hr_resize_x: int = 0
+ hr_resize_y: int = 0
+ hr_checkpoint_name: str = None
+ hr_sampler_name: str = None
+ hr_prompt: str = ''
+ hr_negative_prompt: str = ''
+ force_task_id: str = None
+
cached_hr_uc = [None, None]
cached_hr_c = [None, None]
- def __init__(self, enable_hr: bool = False, denoising_strength: float = 0.75, firstphase_width: int = 0, firstphase_height: int = 0, hr_scale: float = 2.0, hr_upscaler: str = None, hr_second_pass_steps: int = 0, hr_resize_x: int = 0, hr_resize_y: int = 0, hr_sampler_name: str = None, hr_prompt: str = '', hr_negative_prompt: str = '', **kwargs):
- super().__init__(**kwargs)
- self.enable_hr = enable_hr
- self.denoising_strength = denoising_strength
- self.hr_scale = hr_scale
- self.hr_upscaler = hr_upscaler
- self.hr_second_pass_steps = hr_second_pass_steps
- self.hr_resize_x = hr_resize_x
- self.hr_resize_y = hr_resize_y
- self.hr_upscale_to_x = hr_resize_x
- self.hr_upscale_to_y = hr_resize_y
- self.hr_sampler_name = hr_sampler_name
- self.hr_prompt = hr_prompt
- self.hr_negative_prompt = hr_negative_prompt
- self.all_hr_prompts = None
- self.all_hr_negative_prompts = None
-
- if firstphase_width != 0 or firstphase_height != 0:
+ hr_checkpoint_info: dict = field(default=None, init=False)
+ hr_upscale_to_x: int = field(default=0, init=False)
+ hr_upscale_to_y: int = field(default=0, init=False)
+ truncate_x: int = field(default=0, init=False)
+ truncate_y: int = field(default=0, init=False)
+ applied_old_hires_behavior_to: tuple = field(default=None, init=False)
+ latent_scale_mode: dict = field(default=None, init=False)
+ hr_c: tuple | None = field(default=None, init=False)
+ hr_uc: tuple | None = field(default=None, init=False)
+ all_hr_prompts: list = field(default=None, init=False)
+ all_hr_negative_prompts: list = field(default=None, init=False)
+ hr_prompts: list = field(default=None, init=False)
+ hr_negative_prompts: list = field(default=None, init=False)
+ hr_extra_network_data: list = field(default=None, init=False)
+
+ def __post_init__(self):
+ super().__post_init__()
+
+ if self.firstphase_width != 0 or self.firstphase_height != 0:
self.hr_upscale_to_x = self.width
self.hr_upscale_to_y = self.height
- self.width = firstphase_width
- self.height = firstphase_height
-
- self.truncate_x = 0
- self.truncate_y = 0
- self.applied_old_hires_behavior_to = None
-
- self.hr_prompts = None
- self.hr_negative_prompts = None
- self.hr_extra_network_data = None
+ self.width = self.firstphase_width
+ self.height = self.firstphase_height
self.cached_hr_uc = StableDiffusionProcessingTxt2Img.cached_hr_uc
self.cached_hr_c = StableDiffusionProcessingTxt2Img.cached_hr_c
- self.hr_c = None
- self.hr_uc = None
+
+ def calculate_target_resolution(self):
+ if opts.use_old_hires_fix_width_height and self.applied_old_hires_behavior_to != (self.width, self.height):
+ self.hr_resize_x = self.width
+ self.hr_resize_y = self.height
+ self.hr_upscale_to_x = self.width
+ self.hr_upscale_to_y = self.height
+
+ self.width, self.height = old_hires_fix_first_pass_dimensions(self.width, self.height)
+ self.applied_old_hires_behavior_to = (self.width, self.height)
+
+ if self.hr_resize_x == 0 and self.hr_resize_y == 0:
+ self.extra_generation_params["Hires upscale"] = self.hr_scale
+ self.hr_upscale_to_x = int(self.width * self.hr_scale)
+ self.hr_upscale_to_y = int(self.height * self.hr_scale)
+ else:
+ self.extra_generation_params["Hires resize"] = f"{self.hr_resize_x}x{self.hr_resize_y}"
+
+ if self.hr_resize_y == 0:
+ self.hr_upscale_to_x = self.hr_resize_x
+ self.hr_upscale_to_y = self.hr_resize_x * self.height // self.width
+ elif self.hr_resize_x == 0:
+ self.hr_upscale_to_x = self.hr_resize_y * self.width // self.height
+ self.hr_upscale_to_y = self.hr_resize_y
+ else:
+ target_w = self.hr_resize_x
+ target_h = self.hr_resize_y
+ src_ratio = self.width / self.height
+ dst_ratio = self.hr_resize_x / self.hr_resize_y
+
+ if src_ratio < dst_ratio:
+ self.hr_upscale_to_x = self.hr_resize_x
+ self.hr_upscale_to_y = self.hr_resize_x * self.height // self.width
+ else:
+ self.hr_upscale_to_x = self.hr_resize_y * self.width // self.height
+ self.hr_upscale_to_y = self.hr_resize_y
+
+ self.truncate_x = (self.hr_upscale_to_x - target_w) // opt_f
+ self.truncate_y = (self.hr_upscale_to_y - target_h) // opt_f
def init(self, all_prompts, all_seeds, all_subseeds):
if self.enable_hr:
+ if self.hr_checkpoint_name and self.hr_checkpoint_name != 'Use same checkpoint':
+ self.hr_checkpoint_info = sd_models.get_closet_checkpoint_match(self.hr_checkpoint_name)
+
+ if self.hr_checkpoint_info is None:
+ raise Exception(f'Could not find checkpoint with name {self.hr_checkpoint_name}')
+
+ self.extra_generation_params["Hires checkpoint"] = self.hr_checkpoint_info.short_title
+
if self.hr_sampler_name is not None and self.hr_sampler_name != self.sampler_name:
self.extra_generation_params["Hires sampler"] = self.hr_sampler_name
@@ -982,51 +1201,12 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
if tuple(self.hr_negative_prompt) != tuple(self.negative_prompt):
self.extra_generation_params["Hires negative prompt"] = self.hr_negative_prompt
- if opts.use_old_hires_fix_width_height and self.applied_old_hires_behavior_to != (self.width, self.height):
- self.hr_resize_x = self.width
- self.hr_resize_y = self.height
- self.hr_upscale_to_x = self.width
- self.hr_upscale_to_y = self.height
+ self.latent_scale_mode = shared.latent_upscale_modes.get(self.hr_upscaler, None) if self.hr_upscaler is not None else shared.latent_upscale_modes.get(shared.latent_upscale_default_mode, "nearest")
+ if self.enable_hr and self.latent_scale_mode is None:
+ if not any(x.name == self.hr_upscaler for x in shared.sd_upscalers):
+ raise Exception(f"could not find upscaler named {self.hr_upscaler}")
- self.width, self.height = old_hires_fix_first_pass_dimensions(self.width, self.height)
- self.applied_old_hires_behavior_to = (self.width, self.height)
-
- if self.hr_resize_x == 0 and self.hr_resize_y == 0:
- self.extra_generation_params["Hires upscale"] = self.hr_scale
- self.hr_upscale_to_x = int(self.width * self.hr_scale)
- self.hr_upscale_to_y = int(self.height * self.hr_scale)
- else:
- self.extra_generation_params["Hires resize"] = f"{self.hr_resize_x}x{self.hr_resize_y}"
-
- if self.hr_resize_y == 0:
- self.hr_upscale_to_x = self.hr_resize_x
- self.hr_upscale_to_y = self.hr_resize_x * self.height // self.width
- elif self.hr_resize_x == 0:
- self.hr_upscale_to_x = self.hr_resize_y * self.width // self.height
- self.hr_upscale_to_y = self.hr_resize_y
- else:
- target_w = self.hr_resize_x
- target_h = self.hr_resize_y
- src_ratio = self.width / self.height
- dst_ratio = self.hr_resize_x / self.hr_resize_y
-
- if src_ratio < dst_ratio:
- self.hr_upscale_to_x = self.hr_resize_x
- self.hr_upscale_to_y = self.hr_resize_x * self.height // self.width
- else:
- self.hr_upscale_to_x = self.hr_resize_y * self.width // self.height
- self.hr_upscale_to_y = self.hr_resize_y
-
- self.truncate_x = (self.hr_upscale_to_x - target_w) // opt_f
- self.truncate_y = (self.hr_upscale_to_y - target_h) // opt_f
-
- # special case: the user has chosen to do nothing
- if self.hr_upscale_to_x == self.width and self.hr_upscale_to_y == self.height:
- self.enable_hr = False
- self.denoising_strength = None
- self.extra_generation_params.pop("Hires upscale", None)
- self.extra_generation_params.pop("Hires resize", None)
- return
+ self.calculate_target_resolution()
if not state.processing_has_refined_job_count:
if state.job_count == -1:
@@ -1045,26 +1225,36 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
def sample(self, conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength, prompts):
self.sampler = sd_samplers.create_sampler(self.sampler_name, self.sd_model)
- latent_scale_mode = shared.latent_upscale_modes.get(self.hr_upscaler, None) if self.hr_upscaler is not None else shared.latent_upscale_modes.get(shared.latent_upscale_default_mode, "nearest")
- if self.enable_hr and latent_scale_mode is None:
- if not any(x.name == self.hr_upscaler for x in shared.sd_upscalers):
- raise Exception(f"could not find upscaler named {self.hr_upscaler}")
-
- x = create_random_tensors([opt_C, self.height // opt_f, self.width // opt_f], seeds=seeds, subseeds=subseeds, subseed_strength=self.subseed_strength, seed_resize_from_h=self.seed_resize_from_h, seed_resize_from_w=self.seed_resize_from_w, p=self)
+ x = self.rng.next()
samples = self.sampler.sample(self, x, conditioning, unconditional_conditioning, image_conditioning=self.txt2img_image_conditioning(x))
+ del x
if not self.enable_hr:
return samples
+ devices.torch_gc()
- self.is_hr_pass = True
+ if self.latent_scale_mode is None:
+ decoded_samples = torch.stack(decode_latent_batch(self.sd_model, samples, target_device=devices.cpu, check_for_nans=True)).to(dtype=torch.float32)
+ else:
+ decoded_samples = None
+
+ with sd_models.SkipWritingToConfig():
+ sd_models.reload_model_weights(info=self.hr_checkpoint_info)
+
+ return self.sample_hr_pass(samples, decoded_samples, seeds, subseeds, subseed_strength, prompts)
+ def sample_hr_pass(self, samples, decoded_samples, seeds, subseeds, subseed_strength, prompts):
+ if shared.state.interrupted:
+ return samples
+
+ self.is_hr_pass = True
target_width = self.hr_upscale_to_x
target_height = self.hr_upscale_to_y
def save_intermediate(image, index):
"""saves image before applying hires fix, if enabled in options; takes as an argument either an image or batch with latent space images"""
- if not opts.save or self.do_not_save_samples or not opts.save_images_before_highres_fix:
+ if not self.save_samples() or not opts.save_images_before_highres_fix:
return
if not isinstance(image, Image.Image):
@@ -1073,11 +1263,15 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
info = create_infotext(self, self.all_prompts, self.all_seeds, self.all_subseeds, [], iteration=self.iteration, position_in_batch=index)
images.save_image(image, self.outpath_samples, "", seeds[index], prompts[index], opts.samples_format, info=info, p=self, suffix="-before-highres-fix")
- if latent_scale_mode is not None:
+ img2img_sampler_name = self.hr_sampler_name or self.sampler_name
+
+ self.sampler = sd_samplers.create_sampler(img2img_sampler_name, self.sd_model)
+
+ if self.latent_scale_mode is not None:
for i in range(samples.shape[0]):
save_intermediate(samples, i)
- samples = torch.nn.functional.interpolate(samples, size=(target_height // opt_f, target_width // opt_f), mode=latent_scale_mode["mode"], antialias=latent_scale_mode["antialias"])
+ samples = torch.nn.functional.interpolate(samples, size=(target_height // opt_f, target_width // opt_f), mode=self.latent_scale_mode["mode"], antialias=self.latent_scale_mode["antialias"])
# Avoid making the inpainting conditioning unless necessary as
# this does need some extra compute to decode / encode the image again.
@@ -1086,7 +1280,6 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
else:
image_conditioning = self.txt2img_image_conditioning(samples)
else:
- decoded_samples = decode_first_stage(self.sd_model, samples)
lowres_samples = torch.clamp((decoded_samples + 1.0) / 2.0, min=0.0, max=1.0)
batch_images = []
@@ -1103,28 +1296,22 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
batch_images.append(image)
decoded_samples = torch.from_numpy(np.array(batch_images))
- decoded_samples = decoded_samples.to(shared.device)
- decoded_samples = 2. * decoded_samples - 1.
+ decoded_samples = decoded_samples.to(shared.device, dtype=devices.dtype_vae)
- samples = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(decoded_samples))
+ if opts.sd_vae_encode_method != 'Full':
+ self.extra_generation_params['VAE Encoder'] = opts.sd_vae_encode_method
+ samples = images_tensor_to_samples(decoded_samples, approximation_indexes.get(opts.sd_vae_encode_method))
image_conditioning = self.img2img_image_conditioning(decoded_samples, samples)
shared.state.nextjob()
- img2img_sampler_name = self.hr_sampler_name or self.sampler_name
-
- if self.sampler_name in ['PLMS', 'UniPC']: # PLMS/UniPC do not support img2img so we just silently switch to DDIM
- img2img_sampler_name = 'DDIM'
-
- self.sampler = sd_samplers.create_sampler(img2img_sampler_name, self.sd_model)
-
samples = samples[:, :, self.truncate_y//2:samples.shape[2]-(self.truncate_y+1)//2, self.truncate_x//2:samples.shape[3]-(self.truncate_x+1)//2]
- noise = create_random_tensors(samples.shape[1:], seeds=seeds, subseeds=subseeds, subseed_strength=subseed_strength, p=self)
+ self.rng = rng.ImageRNG(samples.shape[1:], self.seeds, subseeds=self.subseeds, subseed_strength=self.subseed_strength, seed_resize_from_h=self.seed_resize_from_h, seed_resize_from_w=self.seed_resize_from_w)
+ noise = self.rng.next()
# GC now before running the next img2img to prevent running out of memory
- x = None
devices.torch_gc()
if not self.disable_extra_networks:
@@ -1143,15 +1330,19 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
sd_models.apply_token_merging(self.sd_model, self.get_token_merging_ratio())
- self.is_hr_pass = False
+ self.sampler = None
+ devices.torch_gc()
- return samples
+ decoded_samples = decode_latent_batch(self.sd_model, samples, target_device=devices.cpu, check_for_nans=True)
+
+ self.is_hr_pass = False
+ return decoded_samples
def close(self):
super().close()
self.hr_c = None
self.hr_uc = None
- if not opts.experimental_persistent_cond_cache:
+ if not opts.persistent_cond_cache:
StableDiffusionProcessingTxt2Img.cached_hr_uc = [None, None]
StableDiffusionProcessingTxt2Img.cached_hr_c = [None, None]
@@ -1167,12 +1358,12 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
if self.hr_negative_prompt == '':
self.hr_negative_prompt = self.negative_prompt
- if type(self.hr_prompt) == list:
+ if isinstance(self.hr_prompt, list):
self.all_hr_prompts = self.hr_prompt
else:
self.all_hr_prompts = self.batch_size * self.n_iter * [self.hr_prompt]
- if type(self.hr_negative_prompt) == list:
+ if isinstance(self.hr_negative_prompt, list):
self.all_hr_negative_prompts = self.hr_negative_prompt
else:
self.all_hr_negative_prompts = self.batch_size * self.n_iter * [self.hr_negative_prompt]
@@ -1184,20 +1375,33 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
if self.hr_c is not None:
return
- self.hr_uc = self.get_conds_with_caching(prompt_parser.get_learned_conditioning, self.hr_negative_prompts, self.steps * self.step_multiplier, [self.cached_hr_uc, self.cached_uc], self.hr_extra_network_data)
- self.hr_c = self.get_conds_with_caching(prompt_parser.get_multicond_learned_conditioning, self.hr_prompts, self.steps * self.step_multiplier, [self.cached_hr_c, self.cached_c], self.hr_extra_network_data)
+ hr_prompts = prompt_parser.SdConditioning(self.hr_prompts, width=self.hr_upscale_to_x, height=self.hr_upscale_to_y)
+ hr_negative_prompts = prompt_parser.SdConditioning(self.hr_negative_prompts, width=self.hr_upscale_to_x, height=self.hr_upscale_to_y, is_negative_prompt=True)
+
+ sampler_config = sd_samplers.find_sampler_config(self.hr_sampler_name or self.sampler_name)
+ steps = self.hr_second_pass_steps or self.steps
+ total_steps = sampler_config.total_steps(steps) if sampler_config else steps
+
+ self.hr_uc = self.get_conds_with_caching(prompt_parser.get_learned_conditioning, hr_negative_prompts, self.firstpass_steps, [self.cached_hr_uc, self.cached_uc], self.hr_extra_network_data, total_steps)
+ self.hr_c = self.get_conds_with_caching(prompt_parser.get_multicond_learned_conditioning, hr_prompts, self.firstpass_steps, [self.cached_hr_c, self.cached_c], self.hr_extra_network_data, total_steps)
def setup_conds(self):
+ if self.is_hr_pass:
+ # if we are in hr pass right now, the call is being made from the refiner, and we don't need to setup firstpass cons or switch model
+ self.hr_c = None
+ self.calculate_hr_conds()
+ return
+
super().setup_conds()
self.hr_uc = None
self.hr_c = None
- if self.enable_hr:
+ if self.enable_hr and self.hr_checkpoint_info is None:
if shared.opts.hires_fix_use_firstpass_conds:
self.calculate_hr_conds()
- elif lowvram.is_enabled(shared.sd_model): # if in lowvram mode, we need to calculate conds right away, before the cond NN is unloaded
+ elif lowvram.is_enabled(shared.sd_model) and shared.sd_model.sd_checkpoint_info == sd_models.select_checkpoint(): # if in lowvram mode, we need to calculate conds right away, before the cond NN is unloaded
with devices.autocast():
extra_networks.activate(self, self.hr_extra_network_data)
@@ -1206,6 +1410,12 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
with devices.autocast():
extra_networks.activate(self, self.extra_network_data)
+ def get_conds(self):
+ if self.is_hr_pass:
+ return self.hr_c, self.hr_uc
+
+ return super().get_conds()
+
def parse_extra_network_prompts(self):
res = super().parse_extra_network_prompts()
@@ -1218,55 +1428,77 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
return res
+@dataclass(repr=False)
class StableDiffusionProcessingImg2Img(StableDiffusionProcessing):
- sampler = None
-
- def __init__(self, init_images: list = None, resize_mode: int = 0, denoising_strength: float = 0.75, image_cfg_scale: float = None, mask: Any = None, mask_blur: int = None, mask_blur_x: int = 4, mask_blur_y: int = 4, inpainting_fill: int = 0, inpaint_full_res: bool = True, inpaint_full_res_padding: int = 0, inpainting_mask_invert: int = 0, initial_noise_multiplier: float = None, **kwargs):
- super().__init__(**kwargs)
-
- self.init_images = init_images
- self.resize_mode: int = resize_mode
- self.denoising_strength: float = denoising_strength
- self.image_cfg_scale: float = image_cfg_scale if shared.sd_model.cond_stage_key == "edit" else None
- self.init_latent = None
- self.image_mask = mask
- self.latent_mask = None
- self.mask_for_overlay = None
- if mask_blur is not None:
- mask_blur_x = mask_blur
- mask_blur_y = mask_blur
- self.mask_blur_x = mask_blur_x
- self.mask_blur_y = mask_blur_y
- self.inpainting_fill = inpainting_fill
- self.inpaint_full_res = inpaint_full_res
- self.inpaint_full_res_padding = inpaint_full_res_padding
- self.inpainting_mask_invert = inpainting_mask_invert
- self.initial_noise_multiplier = opts.initial_noise_multiplier if initial_noise_multiplier is None else initial_noise_multiplier
+ init_images: list = None
+ resize_mode: int = 0
+ denoising_strength: float = 0.75
+ image_cfg_scale: float = None
+ mask: Any = None
+ mask_blur_x: int = 4
+ mask_blur_y: int = 4
+ mask_blur: int = None
+ mask_round: bool = True
+ inpainting_fill: int = 0
+ inpaint_full_res: bool = True
+ inpaint_full_res_padding: int = 0
+ inpainting_mask_invert: int = 0
+ initial_noise_multiplier: float = None
+ latent_mask: Image = None
+ force_task_id: str = None
+
+ image_mask: Any = field(default=None, init=False)
+
+ nmask: torch.Tensor = field(default=None, init=False)
+ image_conditioning: torch.Tensor = field(default=None, init=False)
+ init_img_hash: str = field(default=None, init=False)
+ mask_for_overlay: Image = field(default=None, init=False)
+ init_latent: torch.Tensor = field(default=None, init=False)
+
+ def __post_init__(self):
+ super().__post_init__()
+
+ self.image_mask = self.mask
self.mask = None
- self.nmask = None
- self.image_conditioning = None
+ self.initial_noise_multiplier = opts.initial_noise_multiplier if self.initial_noise_multiplier is None else self.initial_noise_multiplier
+
+ @property
+ def mask_blur(self):
+ if self.mask_blur_x == self.mask_blur_y:
+ return self.mask_blur_x
+ return None
+
+ @mask_blur.setter
+ def mask_blur(self, value):
+ if isinstance(value, int):
+ self.mask_blur_x = value
+ self.mask_blur_y = value
def init(self, all_prompts, all_seeds, all_subseeds):
+ self.image_cfg_scale: float = self.image_cfg_scale if shared.sd_model.cond_stage_key == "edit" else None
+
self.sampler = sd_samplers.create_sampler(self.sampler_name, self.sd_model)
crop_region = None
image_mask = self.image_mask
if image_mask is not None:
- image_mask = image_mask.convert('L')
+ # image_mask is passed in as RGBA by Gradio to support alpha masks,
+ # but we still want to support binary masks.
+ image_mask = create_binary_mask(image_mask, round=self.mask_round)
if self.inpainting_mask_invert:
image_mask = ImageOps.invert(image_mask)
if self.mask_blur_x > 0:
np_mask = np.array(image_mask)
- kernel_size = 2 * int(4 * self.mask_blur_x + 0.5) + 1
+ kernel_size = 2 * int(2.5 * self.mask_blur_x + 0.5) + 1
np_mask = cv2.GaussianBlur(np_mask, (kernel_size, 1), self.mask_blur_x)
image_mask = Image.fromarray(np_mask)
if self.mask_blur_y > 0:
np_mask = np.array(image_mask)
- kernel_size = 2 * int(4 * self.mask_blur_y + 0.5) + 1
+ kernel_size = 2 * int(2.5 * self.mask_blur_y + 0.5) + 1
np_mask = cv2.GaussianBlur(np_mask, (1, kernel_size), self.mask_blur_y)
image_mask = Image.fromarray(np_mask)
@@ -1299,7 +1531,7 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing):
# Save init image
if opts.save_init_img:
self.init_img_hash = hashlib.md5(img.tobytes()).hexdigest()
- images.save_image(img, path=opts.outdir_init_images, basename=None, forced_filename=self.init_img_hash, save_to_dirs=False)
+ images.save_image(img, path=opts.outdir_init_images, basename=None, forced_filename=self.init_img_hash, save_to_dirs=False, existing_info=img.info)
image = images.flatten(img, opts.img2img_background_color)
@@ -1344,10 +1576,13 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing):
raise RuntimeError(f"bad number of images passed: {len(imgs)}; expecting {self.batch_size} or less")
image = torch.from_numpy(batch_images)
- image = 2. * image - 1.
image = image.to(shared.device, dtype=devices.dtype_vae)
- self.init_latent = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(image))
+ if opts.sd_vae_encode_method != 'Full':
+ self.extra_generation_params['VAE Encoder'] = opts.sd_vae_encode_method
+
+ self.init_latent = images_tensor_to_samples(image, approximation_indexes.get(opts.sd_vae_encode_method), self.sd_model)
+ devices.torch_gc()
if self.resize_mode == 3:
self.init_latent = torch.nn.functional.interpolate(self.init_latent, size=(self.height // opt_f, self.width // opt_f), mode="bilinear")
@@ -1357,7 +1592,8 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing):
latmask = init_mask.convert('RGB').resize((self.init_latent.shape[3], self.init_latent.shape[2]))
latmask = np.moveaxis(np.array(latmask, dtype=np.float32), 2, 0) / 255
latmask = latmask[0]
- latmask = np.around(latmask)
+ if self.mask_round:
+ latmask = np.around(latmask)
latmask = np.tile(latmask[None], (4, 1, 1))
self.mask = torch.asarray(1.0 - latmask).to(shared.device).type(self.sd_model.dtype)
@@ -1369,10 +1605,10 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing):
elif self.inpainting_fill == 3:
self.init_latent = self.init_latent * self.mask
- self.image_conditioning = self.img2img_image_conditioning(image, self.init_latent, image_mask)
+ self.image_conditioning = self.img2img_image_conditioning(image * 2 - 1, self.init_latent, image_mask, self.mask_round)
def sample(self, conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength, prompts):
- x = create_random_tensors([opt_C, self.height // opt_f, self.width // opt_f], seeds=seeds, subseeds=subseeds, subseed_strength=self.subseed_strength, seed_resize_from_h=self.seed_resize_from_h, seed_resize_from_w=self.seed_resize_from_w, p=self)
+ x = self.rng.next()
if self.initial_noise_multiplier != 1.0:
self.extra_generation_params["Noise multiplier"] = self.initial_noise_multiplier
@@ -1381,7 +1617,14 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing):
samples = self.sampler.sample_img2img(self, self.init_latent, x, conditioning, unconditional_conditioning, image_conditioning=self.image_conditioning)
if self.mask is not None:
- samples = samples * self.nmask + self.init_latent * self.mask
+ blended_samples = samples * self.nmask + self.init_latent * self.mask
+
+ if self.scripts is not None:
+ mba = scripts.MaskBlendArgs(samples, self.nmask, self.init_latent, self.mask, blended_samples)
+ self.scripts.on_mask_blend(self, mba)
+ blended_samples = mba.blended_latent
+
+ samples = blended_samples
del x
devices.torch_gc()
diff --git a/modules/processing_scripts/refiner.py b/modules/processing_scripts/refiner.py
new file mode 100644
index 00000000..e9941413
--- /dev/null
+++ b/modules/processing_scripts/refiner.py
@@ -0,0 +1,50 @@
+import gradio as gr
+
+from modules import scripts, sd_models
+from modules.infotext import PasteField
+from modules.ui_common import create_refresh_button
+from modules.ui_components import InputAccordion
+
+
+class ScriptRefiner(scripts.ScriptBuiltinUI):
+ section = "accordions"
+ create_group = False
+
+ def __init__(self):
+ pass
+
+ def title(self):
+ return "Refiner"
+
+ def show(self, is_img2img):
+ return scripts.AlwaysVisible
+
+ def ui(self, is_img2img):
+ with InputAccordion(False, label="Refiner", elem_id=self.elem_id("enable")) as enable_refiner:
+ with gr.Row():
+ refiner_checkpoint = gr.Dropdown(label='Checkpoint', elem_id=self.elem_id("checkpoint"), choices=sd_models.checkpoint_tiles(), value='', tooltip="switch to another model in the middle of generation")
+ create_refresh_button(refiner_checkpoint, sd_models.list_models, lambda: {"choices": sd_models.checkpoint_tiles()}, self.elem_id("checkpoint_refresh"))
+
+ refiner_switch_at = gr.Slider(value=0.8, label="Switch at", minimum=0.01, maximum=1.0, step=0.01, elem_id=self.elem_id("switch_at"), tooltip="fraction of sampling steps when the switch to refiner model should happen; 1=never, 0.5=switch in the middle of generation")
+
+ def lookup_checkpoint(title):
+ info = sd_models.get_closet_checkpoint_match(title)
+ return None if info is None else info.title
+
+ self.infotext_fields = [
+ PasteField(enable_refiner, lambda d: 'Refiner' in d),
+ PasteField(refiner_checkpoint, lambda d: lookup_checkpoint(d.get('Refiner')), api="refiner_checkpoint"),
+ PasteField(refiner_switch_at, 'Refiner switch at', api="refiner_switch_at"),
+ ]
+
+ return enable_refiner, refiner_checkpoint, refiner_switch_at
+
+ def setup(self, p, enable_refiner, refiner_checkpoint, refiner_switch_at):
+ # the actual implementation is in sd_samplers_common.py, apply_refiner
+
+ if not enable_refiner or refiner_checkpoint in (None, "", "None"):
+ p.refiner_checkpoint = None
+ p.refiner_switch_at = None
+ else:
+ p.refiner_checkpoint = refiner_checkpoint
+ p.refiner_switch_at = refiner_switch_at
diff --git a/modules/processing_scripts/seed.py b/modules/processing_scripts/seed.py
new file mode 100644
index 00000000..60293278
--- /dev/null
+++ b/modules/processing_scripts/seed.py
@@ -0,0 +1,112 @@
+import json
+
+import gradio as gr
+
+from modules import scripts, ui, errors
+from modules.infotext import PasteField
+from modules.shared import cmd_opts
+from modules.ui_components import ToolButton
+
+
+class ScriptSeed(scripts.ScriptBuiltinUI):
+ section = "seed"
+ create_group = False
+
+ def __init__(self):
+ self.seed = None
+ self.reuse_seed = None
+ self.reuse_subseed = None
+
+ def title(self):
+ return "Seed"
+
+ def show(self, is_img2img):
+ return scripts.AlwaysVisible
+
+ def ui(self, is_img2img):
+ with gr.Row(elem_id=self.elem_id("seed_row")):
+ if cmd_opts.use_textbox_seed:
+ self.seed = gr.Textbox(label='Seed', value="", elem_id=self.elem_id("seed"), min_width=100)
+ else:
+ self.seed = gr.Number(label='Seed', value=-1, elem_id=self.elem_id("seed"), min_width=100, precision=0)
+
+ random_seed = ToolButton(ui.random_symbol, elem_id=self.elem_id("random_seed"), tooltip="Set seed to -1, which will cause a new random number to be used every time")
+ reuse_seed = ToolButton(ui.reuse_symbol, elem_id=self.elem_id("reuse_seed"), tooltip="Reuse seed from last generation, mostly useful if it was randomized")
+
+ seed_checkbox = gr.Checkbox(label='Extra', elem_id=self.elem_id("subseed_show"), value=False)
+
+ with gr.Group(visible=False, elem_id=self.elem_id("seed_extras")) as seed_extras:
+ with gr.Row(elem_id=self.elem_id("subseed_row")):
+ subseed = gr.Number(label='Variation seed', value=-1, elem_id=self.elem_id("subseed"), precision=0)
+ random_subseed = ToolButton(ui.random_symbol, elem_id=self.elem_id("random_subseed"))
+ reuse_subseed = ToolButton(ui.reuse_symbol, elem_id=self.elem_id("reuse_subseed"))
+ subseed_strength = gr.Slider(label='Variation strength', value=0.0, minimum=0, maximum=1, step=0.01, elem_id=self.elem_id("subseed_strength"))
+
+ with gr.Row(elem_id=self.elem_id("seed_resize_from_row")):
+ seed_resize_from_w = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize seed from width", value=0, elem_id=self.elem_id("seed_resize_from_w"))
+ seed_resize_from_h = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize seed from height", value=0, elem_id=self.elem_id("seed_resize_from_h"))
+
+ random_seed.click(fn=None, _js="function(){setRandomSeed('" + self.elem_id("seed") + "')}", show_progress=False, inputs=[], outputs=[])
+ random_subseed.click(fn=None, _js="function(){setRandomSeed('" + self.elem_id("subseed") + "')}", show_progress=False, inputs=[], outputs=[])
+
+ seed_checkbox.change(lambda x: gr.update(visible=x), show_progress=False, inputs=[seed_checkbox], outputs=[seed_extras])
+
+ self.infotext_fields = [
+ PasteField(self.seed, "Seed", api="seed"),
+ PasteField(seed_checkbox, lambda d: "Variation seed" in d or "Seed resize from-1" in d),
+ PasteField(subseed, "Variation seed", api="subseed"),
+ PasteField(subseed_strength, "Variation seed strength", api="subseed_strength"),
+ PasteField(seed_resize_from_w, "Seed resize from-1", api="seed_resize_from_h"),
+ PasteField(seed_resize_from_h, "Seed resize from-2", api="seed_resize_from_w"),
+ ]
+
+ self.on_after_component(lambda x: connect_reuse_seed(self.seed, reuse_seed, x.component, False), elem_id=f'generation_info_{self.tabname}')
+ self.on_after_component(lambda x: connect_reuse_seed(subseed, reuse_subseed, x.component, True), elem_id=f'generation_info_{self.tabname}')
+
+ return self.seed, seed_checkbox, subseed, subseed_strength, seed_resize_from_w, seed_resize_from_h
+
+ def setup(self, p, seed, seed_checkbox, subseed, subseed_strength, seed_resize_from_w, seed_resize_from_h):
+ p.seed = seed
+
+ if seed_checkbox and subseed_strength > 0:
+ p.subseed = subseed
+ p.subseed_strength = subseed_strength
+
+ if seed_checkbox and seed_resize_from_w > 0 and seed_resize_from_h > 0:
+ p.seed_resize_from_w = seed_resize_from_w
+ p.seed_resize_from_h = seed_resize_from_h
+
+
+
+def connect_reuse_seed(seed: gr.Number, reuse_seed: gr.Button, generation_info: gr.Textbox, is_subseed):
+ """ Connects a 'reuse (sub)seed' button's click event so that it copies last used
+ (sub)seed value from generation info the to the seed field. If copying subseed and subseed strength
+ was 0, i.e. no variation seed was used, it copies the normal seed value instead."""
+
+ def copy_seed(gen_info_string: str, index):
+ res = -1
+
+ try:
+ gen_info = json.loads(gen_info_string)
+ index -= gen_info.get('index_of_first_image', 0)
+
+ if is_subseed and gen_info.get('subseed_strength', 0) > 0:
+ all_subseeds = gen_info.get('all_subseeds', [-1])
+ res = all_subseeds[index if 0 <= index < len(all_subseeds) else 0]
+ else:
+ all_seeds = gen_info.get('all_seeds', [-1])
+ res = all_seeds[index if 0 <= index < len(all_seeds) else 0]
+
+ except json.decoder.JSONDecodeError:
+ if gen_info_string:
+ errors.report(f"Error parsing JSON generation info: {gen_info_string}")
+
+ return [res, gr.update()]
+
+ reuse_seed.click(
+ fn=copy_seed,
+ _js="(x, y) => [x, selected_gallery_index()]",
+ show_progress=False,
+ inputs=[generation_info, seed],
+ outputs=[seed, seed]
+ )
diff --git a/modules/progress.py b/modules/progress.py
index f405f07f..85255e82 100644
--- a/modules/progress.py
+++ b/modules/progress.py
@@ -8,10 +8,13 @@ from pydantic import BaseModel, Field
from modules.shared import opts
import modules.shared as shared
-
+from collections import OrderedDict
+import string
+import random
+from typing import List
current_task = None
-pending_tasks = {}
+pending_tasks = OrderedDict()
finished_tasks = []
recorded_results = []
recorded_results_limit = 2
@@ -34,6 +37,11 @@ def finish_task(id_task):
if len(finished_tasks) > 16:
finished_tasks.pop(0)
+def create_task_id(task_type):
+ N = 7
+ res = ''.join(random.choices(string.ascii_uppercase +
+ string.digits, k=N))
+ return f"task({task_type}-{res})"
def record_results(id_task, res):
recorded_results.append((id_task, res))
@@ -44,10 +52,14 @@ def record_results(id_task, res):
def add_task_to_queue(id_job):
pending_tasks[id_job] = time.time()
+class PendingTasksResponse(BaseModel):
+ size: int = Field(title="Pending task size")
+ tasks: List[str] = Field(title="Pending task ids")
class ProgressRequest(BaseModel):
id_task: str = Field(default=None, title="Task ID", description="id of the task to get progress for")
id_live_preview: int = Field(default=-1, title="Live preview image ID", description="id of last received last preview image")
+ live_preview: bool = Field(default=True, title="Include live preview", description="boolean flag indicating whether to include the live preview image")
class ProgressResponse(BaseModel):
@@ -62,16 +74,28 @@ class ProgressResponse(BaseModel):
def setup_progress_api(app):
+ app.add_api_route("/internal/pending-tasks", get_pending_tasks, methods=["GET"])
return app.add_api_route("/internal/progress", progressapi, methods=["POST"], response_model=ProgressResponse)
+def get_pending_tasks():
+ pending_tasks_ids = list(pending_tasks)
+ pending_len = len(pending_tasks_ids)
+ return PendingTasksResponse(size=pending_len, tasks=pending_tasks_ids)
+
+
def progressapi(req: ProgressRequest):
active = req.id_task == current_task
queued = req.id_task in pending_tasks
completed = req.id_task in finished_tasks
if not active:
- return ProgressResponse(active=active, queued=queued, completed=completed, id_live_preview=-1, textinfo="In queue..." if queued else "Waiting...")
+ textinfo = "Waiting..."
+ if queued:
+ sorted_queued = sorted(pending_tasks.keys(), key=lambda x: pending_tasks[x])
+ queue_index = sorted_queued.index(req.id_task)
+ textinfo = "In queue: {}/{}".format(queue_index + 1, len(sorted_queued))
+ return ProgressResponse(active=active, queued=queued, completed=completed, id_live_preview=-1, textinfo=textinfo)
progress = 0
@@ -89,31 +113,30 @@ def progressapi(req: ProgressRequest):
predicted_duration = elapsed_since_start / progress if progress > 0 else None
eta = predicted_duration - elapsed_since_start if predicted_duration is not None else None
+ live_preview = None
id_live_preview = req.id_live_preview
- shared.state.set_current_image()
- if opts.live_previews_enable and shared.state.id_live_preview != req.id_live_preview:
- image = shared.state.current_image
- if image is not None:
- buffered = io.BytesIO()
-
- if opts.live_previews_image_format == "png":
- # using optimize for large images takes an enormous amount of time
- if max(*image.size) <= 256:
- save_kwargs = {"optimize": True}
+
+ if opts.live_previews_enable and req.live_preview:
+ shared.state.set_current_image()
+ if shared.state.id_live_preview != req.id_live_preview:
+ image = shared.state.current_image
+ if image is not None:
+ buffered = io.BytesIO()
+
+ if opts.live_previews_image_format == "png":
+ # using optimize for large images takes an enormous amount of time
+ if max(*image.size) <= 256:
+ save_kwargs = {"optimize": True}
+ else:
+ save_kwargs = {"optimize": False, "compress_level": 1}
+
else:
- save_kwargs = {"optimize": False, "compress_level": 1}
-
- else:
- save_kwargs = {}
-
- image.save(buffered, format=opts.live_previews_image_format, **save_kwargs)
- base64_image = base64.b64encode(buffered.getvalue()).decode('ascii')
- live_preview = f"data:image/{opts.live_previews_image_format};base64,{base64_image}"
- id_live_preview = shared.state.id_live_preview
- else:
- live_preview = None
- else:
- live_preview = None
+ save_kwargs = {}
+
+ image.save(buffered, format=opts.live_previews_image_format, **save_kwargs)
+ base64_image = base64.b64encode(buffered.getvalue()).decode('ascii')
+ live_preview = f"data:image/{opts.live_previews_image_format};base64,{base64_image}"
+ id_live_preview = shared.state.id_live_preview
return ProgressResponse(active=active, queued=queued, completed=completed, progress=progress, eta=eta, live_preview=live_preview, id_live_preview=id_live_preview, textinfo=shared.state.textinfo)
diff --git a/modules/prompt_parser.py b/modules/prompt_parser.py
index 203ae1ac..cba13455 100644
--- a/modules/prompt_parser.py
+++ b/modules/prompt_parser.py
@@ -2,10 +2,9 @@ from __future__ import annotations
import re
from collections import namedtuple
-from typing import List
import lark
-# a prompt like this: "fantasy landscape with a [mountain:lake:0.25] and [an oak:a christmas tree:0.75][ in foreground::0.6][ in background:0.25] [shoddy:masterful:0.5]"
+# a prompt like this: "fantasy landscape with a [mountain:lake:0.25] and [an oak:a christmas tree:0.75][ in foreground::0.6][: in background:0.25] [shoddy:masterful:0.5]"
# will be represented with prompt_schedule like this (assuming steps=100):
# [25, 'fantasy landscape with a mountain and an oak in foreground shoddy']
# [50, 'fantasy landscape with a lake and an oak in foreground in background shoddy']
@@ -19,14 +18,14 @@ prompt: (emphasized | scheduled | alternate | plain | WHITESPACE)*
!emphasized: "(" prompt ")"
| "(" prompt ":" prompt ")"
| "[" prompt "]"
-scheduled: "[" [prompt ":"] prompt ":" [WHITESPACE] NUMBER "]"
-alternate: "[" prompt ("|" prompt)+ "]"
+scheduled: "[" [prompt ":"] prompt ":" [WHITESPACE] NUMBER [WHITESPACE] "]"
+alternate: "[" prompt ("|" [prompt])+ "]"
WHITESPACE: /\s+/
plain: /([^\\\[\]():|]|\\.)+/
%import common.SIGNED_NUMBER -> NUMBER
""")
-def get_learned_conditioning_prompt_schedules(prompts, steps):
+def get_learned_conditioning_prompt_schedules(prompts, base_steps, hires_steps=None, use_old_scheduling=False):
"""
>>> g = lambda p: get_learned_conditioning_prompt_schedules([p], 10)[0]
>>> g("test")
@@ -53,18 +52,43 @@ def get_learned_conditioning_prompt_schedules(prompts, steps):
[[3, '((a][:b:c '], [10, '((a][:b:c d']]
>>> g("[a|(b:1.1)]")
[[1, 'a'], [2, '(b:1.1)'], [3, 'a'], [4, '(b:1.1)'], [5, 'a'], [6, '(b:1.1)'], [7, 'a'], [8, '(b:1.1)'], [9, 'a'], [10, '(b:1.1)']]
+ >>> g("[fe|]male")
+ [[1, 'female'], [2, 'male'], [3, 'female'], [4, 'male'], [5, 'female'], [6, 'male'], [7, 'female'], [8, 'male'], [9, 'female'], [10, 'male']]
+ >>> g("[fe|||]male")
+ [[1, 'female'], [2, 'male'], [3, 'male'], [4, 'male'], [5, 'female'], [6, 'male'], [7, 'male'], [8, 'male'], [9, 'female'], [10, 'male']]
+ >>> g = lambda p: get_learned_conditioning_prompt_schedules([p], 10, 10)[0]
+ >>> g("a [b:.5] c")
+ [[10, 'a b c']]
+ >>> g("a [b:1.5] c")
+ [[5, 'a c'], [10, 'a b c']]
"""
+ if hires_steps is None or use_old_scheduling:
+ int_offset = 0
+ flt_offset = 0
+ steps = base_steps
+ else:
+ int_offset = base_steps
+ flt_offset = 1.0
+ steps = hires_steps
+
def collect_steps(steps, tree):
res = [steps]
class CollectSteps(lark.Visitor):
def scheduled(self, tree):
- tree.children[-1] = float(tree.children[-1])
- if tree.children[-1] < 1:
- tree.children[-1] *= steps
- tree.children[-1] = min(steps, int(tree.children[-1]))
- res.append(tree.children[-1])
+ s = tree.children[-2]
+ v = float(s)
+ if use_old_scheduling:
+ v = v*steps if v<1 else v
+ else:
+ if "." in s:
+ v = (v - flt_offset) * steps
+ else:
+ v = (v - int_offset)
+ tree.children[-2] = min(steps, int(v))
+ if tree.children[-2] >= 1:
+ res.append(tree.children[-2])
def alternate(self, tree):
res.extend(range(1, steps+1))
@@ -75,13 +99,14 @@ def get_learned_conditioning_prompt_schedules(prompts, steps):
def at_step(step, tree):
class AtStep(lark.Transformer):
def scheduled(self, args):
- before, after, _, when = args
+ before, after, _, when, _ = args
yield before or () if step <= when else after
def alternate(self, args):
- yield next(args[(step - 1)%len(args)])
+ args = ["" if not arg else arg for arg in args]
+ yield args[(step - 1) % len(args)]
def start(self, args):
def flatten(x):
- if type(x) == str:
+ if isinstance(x, str):
yield x
else:
for gen in x:
@@ -129,7 +154,7 @@ class SdConditioning(list):
-def get_learned_conditioning(model, prompts: SdConditioning | list[str], steps):
+def get_learned_conditioning(model, prompts: SdConditioning | list[str], steps, hires_steps=None, use_old_scheduling=False):
"""converts a list of prompts into a list of prompt schedules - each schedule is a list of ScheduledPromptConditioning, specifying the comdition (cond),
and the sampling step at which this condition is to be replaced by the next one.
@@ -149,7 +174,7 @@ def get_learned_conditioning(model, prompts: SdConditioning | list[str], steps):
"""
res = []
- prompt_schedules = get_learned_conditioning_prompt_schedules(prompts, steps)
+ prompt_schedules = get_learned_conditioning_prompt_schedules(prompts, steps, hires_steps, use_old_scheduling)
cache = {}
for prompt, prompt_schedule in zip(prompts, prompt_schedules):
@@ -214,17 +239,17 @@ def get_multicond_prompt_list(prompts: SdConditioning | list[str]):
class ComposableScheduledPromptConditioning:
def __init__(self, schedules, weight=1.0):
- self.schedules: List[ScheduledPromptConditioning] = schedules
+ self.schedules: list[ScheduledPromptConditioning] = schedules
self.weight: float = weight
class MulticondLearnedConditioning:
def __init__(self, shape, batch):
self.shape: tuple = shape # the shape field is needed to send this object to DDIM/PLMS
- self.batch: List[List[ComposableScheduledPromptConditioning]] = batch
+ self.batch: list[list[ComposableScheduledPromptConditioning]] = batch
-def get_multicond_learned_conditioning(model, prompts, steps) -> MulticondLearnedConditioning:
+def get_multicond_learned_conditioning(model, prompts, steps, hires_steps=None, use_old_scheduling=False) -> MulticondLearnedConditioning:
"""same as get_learned_conditioning, but returns a list of ScheduledPromptConditioning along with the weight objects for each prompt.
For each prompt, the list is obtained by splitting the prompt using the AND separator.
@@ -233,7 +258,7 @@ def get_multicond_learned_conditioning(model, prompts, steps) -> MulticondLearne
res_indexes, prompt_flat_list, prompt_indexes = get_multicond_prompt_list(prompts)
- learned_conditioning = get_learned_conditioning(model, prompt_flat_list, steps)
+ learned_conditioning = get_learned_conditioning(model, prompt_flat_list, steps, hires_steps, use_old_scheduling)
res = []
for indexes in res_indexes:
@@ -252,7 +277,7 @@ class DictWithShape(dict):
return self["crossattn"].shape
-def reconstruct_cond_batch(c: List[List[ScheduledPromptConditioning]], current_step):
+def reconstruct_cond_batch(c: list[list[ScheduledPromptConditioning]], current_step):
param = c[0][0].cond
is_dict = isinstance(param, dict)
@@ -333,7 +358,7 @@ re_attention = re.compile(r"""
\\|
\(|
\[|
-:([+-]?[.\d]+)\)|
+:\s*([+-]?[.\d]+)\s*\)|
\)|
]|
[^\\()\[\]:]+|
diff --git a/modules/realesrgan_model.py b/modules/realesrgan_model.py
index 0700b853..4d35b695 100644
--- a/modules/realesrgan_model.py
+++ b/modules/realesrgan_model.py
@@ -1,12 +1,9 @@
import os
-import numpy as np
-from PIL import Image
-from realesrgan import RealESRGANer
-
-from modules.upscaler import Upscaler, UpscalerData
-from modules.shared import cmd_opts, opts
from modules import modelloader, errors
+from modules.shared import cmd_opts, opts
+from modules.upscaler import Upscaler, UpscalerData
+from modules.upscaler_utils import upscale_with_model
class UpscalerRealESRGAN(Upscaler):
@@ -14,29 +11,20 @@ class UpscalerRealESRGAN(Upscaler):
self.name = "RealESRGAN"
self.user_path = path
super().__init__()
- try:
- from basicsr.archs.rrdbnet_arch import RRDBNet # noqa: F401
- from realesrgan import RealESRGANer # noqa: F401
- from realesrgan.archs.srvgg_arch import SRVGGNetCompact # noqa: F401
- self.enable = True
- self.scalers = []
- scalers = self.load_models(path)
+ self.enable = True
+ self.scalers = []
+ scalers = get_realesrgan_models(self)
- local_model_paths = self.find_models(ext_filter=[".pth"])
- for scaler in scalers:
- if scaler.local_data_path.startswith("http"):
- filename = modelloader.friendly_name(scaler.local_data_path)
- local_model_candidates = [local_model for local_model in local_model_paths if local_model.endswith(f"{filename}.pth")]
- if local_model_candidates:
- scaler.local_data_path = local_model_candidates[0]
+ local_model_paths = self.find_models(ext_filter=[".pth"])
+ for scaler in scalers:
+ if scaler.local_data_path.startswith("http"):
+ filename = modelloader.friendly_name(scaler.local_data_path)
+ local_model_candidates = [local_model for local_model in local_model_paths if local_model.endswith(f"{filename}.pth")]
+ if local_model_candidates:
+ scaler.local_data_path = local_model_candidates[0]
- if scaler.name in opts.realesrgan_enabled_models:
- self.scalers.append(scaler)
-
- except Exception:
- errors.report("Error importing Real-ESRGAN", exc_info=True)
- self.enable = False
- self.scalers = []
+ if scaler.name in opts.realesrgan_enabled_models:
+ self.scalers.append(scaler)
def do_upscale(self, img, path):
if not self.enable:
@@ -48,19 +36,19 @@ class UpscalerRealESRGAN(Upscaler):
errors.report(f"Unable to load RealESRGAN model {path}", exc_info=True)
return img
- upsampler = RealESRGANer(
- scale=info.scale,
- model_path=info.local_data_path,
- model=info.model(),
- half=not cmd_opts.no_half and not cmd_opts.upcast_sampling,
- tile=opts.ESRGAN_tile,
- tile_pad=opts.ESRGAN_tile_overlap,
+ model_descriptor = modelloader.load_spandrel_model(
+ info.local_data_path,
+ device=self.device,
+ half=(not cmd_opts.no_half and not cmd_opts.upcast_sampling),
+ expected_architecture="ESRGAN", # "RealESRGAN" isn't a specific thing for Spandrel
+ )
+ return upscale_with_model(
+ model_descriptor,
+ img,
+ tile_size=opts.ESRGAN_tile,
+ tile_overlap=opts.ESRGAN_tile_overlap,
+ # TODO: `outscale`?
)
-
- upsampled = upsampler.enhance(np.array(img), outscale=info.scale)[0]
-
- image = Image.fromarray(upsampled)
- return image
def load_model(self, path):
for scaler in self.scalers:
@@ -75,58 +63,43 @@ class UpscalerRealESRGAN(Upscaler):
return scaler
raise ValueError(f"Unable to find model info: {path}")
- def load_models(self, _):
- return get_realesrgan_models(self)
-
-def get_realesrgan_models(scaler):
- try:
- from basicsr.archs.rrdbnet_arch import RRDBNet
- from realesrgan.archs.srvgg_arch import SRVGGNetCompact
- models = [
- UpscalerData(
- name="R-ESRGAN General 4xV3",
- path="https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-x4v3.pth",
- scale=4,
- upscaler=scaler,
- model=lambda: SRVGGNetCompact(num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=32, upscale=4, act_type='prelu')
- ),
- UpscalerData(
- name="R-ESRGAN General WDN 4xV3",
- path="https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-wdn-x4v3.pth",
- scale=4,
- upscaler=scaler,
- model=lambda: SRVGGNetCompact(num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=32, upscale=4, act_type='prelu')
- ),
- UpscalerData(
- name="R-ESRGAN AnimeVideo",
- path="https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-animevideov3.pth",
- scale=4,
- upscaler=scaler,
- model=lambda: SRVGGNetCompact(num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=16, upscale=4, act_type='prelu')
- ),
- UpscalerData(
- name="R-ESRGAN 4x+",
- path="https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth",
- scale=4,
- upscaler=scaler,
- model=lambda: RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=4)
- ),
- UpscalerData(
- name="R-ESRGAN 4x+ Anime6B",
- path="https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.4/RealESRGAN_x4plus_anime_6B.pth",
- scale=4,
- upscaler=scaler,
- model=lambda: RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=6, num_grow_ch=32, scale=4)
- ),
- UpscalerData(
- name="R-ESRGAN 2x+",
- path="https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.1/RealESRGAN_x2plus.pth",
- scale=2,
- upscaler=scaler,
- model=lambda: RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=2)
- ),
- ]
- return models
- except Exception:
- errors.report("Error making Real-ESRGAN models list", exc_info=True)
+def get_realesrgan_models(scaler: UpscalerRealESRGAN):
+ return [
+ UpscalerData(
+ name="R-ESRGAN General 4xV3",
+ path="https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-x4v3.pth",
+ scale=4,
+ upscaler=scaler,
+ ),
+ UpscalerData(
+ name="R-ESRGAN General WDN 4xV3",
+ path="https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-wdn-x4v3.pth",
+ scale=4,
+ upscaler=scaler,
+ ),
+ UpscalerData(
+ name="R-ESRGAN AnimeVideo",
+ path="https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-animevideov3.pth",
+ scale=4,
+ upscaler=scaler,
+ ),
+ UpscalerData(
+ name="R-ESRGAN 4x+",
+ path="https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth",
+ scale=4,
+ upscaler=scaler,
+ ),
+ UpscalerData(
+ name="R-ESRGAN 4x+ Anime6B",
+ path="https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.4/RealESRGAN_x4plus_anime_6B.pth",
+ scale=4,
+ upscaler=scaler,
+ ),
+ UpscalerData(
+ name="R-ESRGAN 2x+",
+ path="https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.1/RealESRGAN_x2plus.pth",
+ scale=2,
+ upscaler=scaler,
+ ),
+ ]
diff --git a/modules/restart.py b/modules/restart.py
index 18eacaf3..2dd6493b 100644
--- a/modules/restart.py
+++ b/modules/restart.py
@@ -14,7 +14,9 @@ def is_restartable() -> bool:
def restart_program() -> None:
"""creates file tmp/restart and immediately stops the process, which webui.bat/webui.sh interpret as a command to start webui again"""
- (Path(script_path) / "tmp" / "restart").touch()
+ tmpdir = Path(script_path) / "tmp"
+ tmpdir.mkdir(parents=True, exist_ok=True)
+ (tmpdir / "restart").touch()
stop_program()
diff --git a/modules/rng.py b/modules/rng.py
new file mode 100644
index 00000000..8934d39b
--- /dev/null
+++ b/modules/rng.py
@@ -0,0 +1,170 @@
+import torch
+
+from modules import devices, rng_philox, shared
+
+
+def randn(seed, shape, generator=None):
+ """Generate a tensor with random numbers from a normal distribution using seed.
+
+ Uses the seed parameter to set the global torch seed; to generate more with that seed, use randn_like/randn_without_seed."""
+
+ manual_seed(seed)
+
+ if shared.opts.randn_source == "NV":
+ return torch.asarray((generator or nv_rng).randn(shape), device=devices.device)
+
+ if shared.opts.randn_source == "CPU" or devices.device.type == 'mps':
+ return torch.randn(shape, device=devices.cpu, generator=generator).to(devices.device)
+
+ return torch.randn(shape, device=devices.device, generator=generator)
+
+
+def randn_local(seed, shape):
+ """Generate a tensor with random numbers from a normal distribution using seed.
+
+ Does not change the global random number generator. You can only generate the seed's first tensor using this function."""
+
+ if shared.opts.randn_source == "NV":
+ rng = rng_philox.Generator(seed)
+ return torch.asarray(rng.randn(shape), device=devices.device)
+
+ local_device = devices.cpu if shared.opts.randn_source == "CPU" or devices.device.type == 'mps' else devices.device
+ local_generator = torch.Generator(local_device).manual_seed(int(seed))
+ return torch.randn(shape, device=local_device, generator=local_generator).to(devices.device)
+
+
+def randn_like(x):
+ """Generate a tensor with random numbers from a normal distribution using the previously initialized genrator.
+
+ Use either randn() or manual_seed() to initialize the generator."""
+
+ if shared.opts.randn_source == "NV":
+ return torch.asarray(nv_rng.randn(x.shape), device=x.device, dtype=x.dtype)
+
+ if shared.opts.randn_source == "CPU" or x.device.type == 'mps':
+ return torch.randn_like(x, device=devices.cpu).to(x.device)
+
+ return torch.randn_like(x)
+
+
+def randn_without_seed(shape, generator=None):
+ """Generate a tensor with random numbers from a normal distribution using the previously initialized genrator.
+
+ Use either randn() or manual_seed() to initialize the generator."""
+
+ if shared.opts.randn_source == "NV":
+ return torch.asarray((generator or nv_rng).randn(shape), device=devices.device)
+
+ if shared.opts.randn_source == "CPU" or devices.device.type == 'mps':
+ return torch.randn(shape, device=devices.cpu, generator=generator).to(devices.device)
+
+ return torch.randn(shape, device=devices.device, generator=generator)
+
+
+def manual_seed(seed):
+ """Set up a global random number generator using the specified seed."""
+
+ if shared.opts.randn_source == "NV":
+ global nv_rng
+ nv_rng = rng_philox.Generator(seed)
+ return
+
+ torch.manual_seed(seed)
+
+
+def create_generator(seed):
+ if shared.opts.randn_source == "NV":
+ return rng_philox.Generator(seed)
+
+ device = devices.cpu if shared.opts.randn_source == "CPU" or devices.device.type == 'mps' else devices.device
+ generator = torch.Generator(device).manual_seed(int(seed))
+ return generator
+
+
+# from https://discuss.pytorch.org/t/help-regarding-slerp-function-for-generative-model-sampling/32475/3
+def slerp(val, low, high):
+ low_norm = low/torch.norm(low, dim=1, keepdim=True)
+ high_norm = high/torch.norm(high, dim=1, keepdim=True)
+ dot = (low_norm*high_norm).sum(1)
+
+ if dot.mean() > 0.9995:
+ return low * val + high * (1 - val)
+
+ omega = torch.acos(dot)
+ so = torch.sin(omega)
+ res = (torch.sin((1.0-val)*omega)/so).unsqueeze(1)*low + (torch.sin(val*omega)/so).unsqueeze(1) * high
+ return res
+
+
+class ImageRNG:
+ def __init__(self, shape, seeds, subseeds=None, subseed_strength=0.0, seed_resize_from_h=0, seed_resize_from_w=0):
+ self.shape = tuple(map(int, shape))
+ self.seeds = seeds
+ self.subseeds = subseeds
+ self.subseed_strength = subseed_strength
+ self.seed_resize_from_h = seed_resize_from_h
+ self.seed_resize_from_w = seed_resize_from_w
+
+ self.generators = [create_generator(seed) for seed in seeds]
+
+ self.is_first = True
+
+ def first(self):
+ noise_shape = self.shape if self.seed_resize_from_h <= 0 or self.seed_resize_from_w <= 0 else (self.shape[0], int(self.seed_resize_from_h) // 8, int(self.seed_resize_from_w // 8))
+
+ xs = []
+
+ for i, (seed, generator) in enumerate(zip(self.seeds, self.generators)):
+ subnoise = None
+ if self.subseeds is not None and self.subseed_strength != 0:
+ subseed = 0 if i >= len(self.subseeds) else self.subseeds[i]
+ subnoise = randn(subseed, noise_shape)
+
+ if noise_shape != self.shape:
+ noise = randn(seed, noise_shape)
+ else:
+ noise = randn(seed, self.shape, generator=generator)
+
+ if subnoise is not None:
+ noise = slerp(self.subseed_strength, noise, subnoise)
+
+ if noise_shape != self.shape:
+ x = randn(seed, self.shape, generator=generator)
+ dx = (self.shape[2] - noise_shape[2]) // 2
+ dy = (self.shape[1] - noise_shape[1]) // 2
+ w = noise_shape[2] if dx >= 0 else noise_shape[2] + 2 * dx
+ h = noise_shape[1] if dy >= 0 else noise_shape[1] + 2 * dy
+ tx = 0 if dx < 0 else dx
+ ty = 0 if dy < 0 else dy
+ dx = max(-dx, 0)
+ dy = max(-dy, 0)
+
+ x[:, ty:ty + h, tx:tx + w] = noise[:, dy:dy + h, dx:dx + w]
+ noise = x
+
+ xs.append(noise)
+
+ eta_noise_seed_delta = shared.opts.eta_noise_seed_delta or 0
+ if eta_noise_seed_delta:
+ self.generators = [create_generator(seed + eta_noise_seed_delta) for seed in self.seeds]
+
+ return torch.stack(xs).to(shared.device)
+
+ def next(self):
+ if self.is_first:
+ self.is_first = False
+ return self.first()
+
+ xs = []
+ for generator in self.generators:
+ x = randn_without_seed(self.shape, generator=generator)
+ xs.append(x)
+
+ return torch.stack(xs).to(shared.device)
+
+
+devices.randn = randn
+devices.randn_local = randn_local
+devices.randn_like = randn_like
+devices.randn_without_seed = randn_without_seed
+devices.manual_seed = manual_seed
diff --git a/modules/rng_philox.py b/modules/rng_philox.py
new file mode 100644
index 00000000..5532cf9d
--- /dev/null
+++ b/modules/rng_philox.py
@@ -0,0 +1,102 @@
+"""RNG imitiating torch cuda randn on CPU. You are welcome.
+
+Usage:
+
+```
+g = Generator(seed=0)
+print(g.randn(shape=(3, 4)))
+```
+
+Expected output:
+```
+[[-0.92466259 -0.42534415 -2.6438457 0.14518388]
+ [-0.12086647 -0.57972564 -0.62285122 -0.32838709]
+ [-1.07454231 -0.36314407 -1.67105067 2.26550497]]
+```
+"""
+
+import numpy as np
+
+philox_m = [0xD2511F53, 0xCD9E8D57]
+philox_w = [0x9E3779B9, 0xBB67AE85]
+
+two_pow32_inv = np.array([2.3283064e-10], dtype=np.float32)
+two_pow32_inv_2pi = np.array([2.3283064e-10 * 6.2831855], dtype=np.float32)
+
+
+def uint32(x):
+ """Converts (N,) np.uint64 array into (2, N) np.unit32 array."""
+ return x.view(np.uint32).reshape(-1, 2).transpose(1, 0)
+
+
+def philox4_round(counter, key):
+ """A single round of the Philox 4x32 random number generator."""
+
+ v1 = uint32(counter[0].astype(np.uint64) * philox_m[0])
+ v2 = uint32(counter[2].astype(np.uint64) * philox_m[1])
+
+ counter[0] = v2[1] ^ counter[1] ^ key[0]
+ counter[1] = v2[0]
+ counter[2] = v1[1] ^ counter[3] ^ key[1]
+ counter[3] = v1[0]
+
+
+def philox4_32(counter, key, rounds=10):
+ """Generates 32-bit random numbers using the Philox 4x32 random number generator.
+
+ Parameters:
+ counter (numpy.ndarray): A 4xN array of 32-bit integers representing the counter values (offset into generation).
+ key (numpy.ndarray): A 2xN array of 32-bit integers representing the key values (seed).
+ rounds (int): The number of rounds to perform.
+
+ Returns:
+ numpy.ndarray: A 4xN array of 32-bit integers containing the generated random numbers.
+ """
+
+ for _ in range(rounds - 1):
+ philox4_round(counter, key)
+
+ key[0] = key[0] + philox_w[0]
+ key[1] = key[1] + philox_w[1]
+
+ philox4_round(counter, key)
+ return counter
+
+
+def box_muller(x, y):
+ """Returns just the first out of two numbers generated by Box–Muller transform algorithm."""
+ u = x * two_pow32_inv + two_pow32_inv / 2
+ v = y * two_pow32_inv_2pi + two_pow32_inv_2pi / 2
+
+ s = np.sqrt(-2.0 * np.log(u))
+
+ r1 = s * np.sin(v)
+ return r1.astype(np.float32)
+
+
+class Generator:
+ """RNG that produces same outputs as torch.randn(..., device='cuda') on CPU"""
+
+ def __init__(self, seed):
+ self.seed = seed
+ self.offset = 0
+
+ def randn(self, shape):
+ """Generate a sequence of n standard normal random variables using the Philox 4x32 random number generator and the Box-Muller transform."""
+
+ n = 1
+ for x in shape:
+ n *= x
+
+ counter = np.zeros((4, n), dtype=np.uint32)
+ counter[0] = self.offset
+ counter[2] = np.arange(n, dtype=np.uint32) # up to 2^32 numbers can be generated - if you want more you'd need to spill into counter[3]
+ self.offset += 1
+
+ key = np.empty(n, dtype=np.uint64)
+ key.fill(self.seed)
+ key = uint32(key)
+
+ g = philox4_32(counter, key)
+
+ return box_muller(g[0], g[1]).reshape(shape) # discard g[2] and g[3]
diff --git a/modules/script_callbacks.py b/modules/script_callbacks.py
index 77ee55ee..9ed7ad21 100644
--- a/modules/script_callbacks.py
+++ b/modules/script_callbacks.py
@@ -1,7 +1,7 @@
import inspect
import os
from collections import namedtuple
-from typing import Optional, Dict, Any
+from typing import Optional, Any
from fastapi import FastAPI
from gradio import Blocks
@@ -28,6 +28,18 @@ class ImageSaveParams:
"""dictionary with parameters for image's PNG info data; infotext will have the key 'parameters'"""
+class ExtraNoiseParams:
+ def __init__(self, noise, x, xi):
+ self.noise = noise
+ """Random noise generated by the seed"""
+
+ self.x = x
+ """Latent representation of the image"""
+
+ self.xi = xi
+ """Noisy latent representation of the image"""
+
+
class CFGDenoiserParams:
def __init__(self, x, image_cond, sigma, sampling_step, total_sampling_steps, text_cond, text_uncond):
self.x = x
@@ -100,6 +112,7 @@ callback_map = dict(
callbacks_ui_settings=[],
callbacks_before_image_saved=[],
callbacks_image_saved=[],
+ callbacks_extra_noise=[],
callbacks_cfg_denoiser=[],
callbacks_cfg_denoised=[],
callbacks_cfg_after_cfg=[],
@@ -189,6 +202,14 @@ def image_saved_callback(params: ImageSaveParams):
report_exception(c, 'image_saved_callback')
+def extra_noise_callback(params: ExtraNoiseParams):
+ for c in callback_map['callbacks_extra_noise']:
+ try:
+ c.callback(params)
+ except Exception:
+ report_exception(c, 'callbacks_extra_noise')
+
+
def cfg_denoiser_callback(params: CFGDenoiserParams):
for c in callback_map['callbacks_cfg_denoiser']:
try:
@@ -237,7 +258,7 @@ def image_grid_callback(params: ImageGridLoopParams):
report_exception(c, 'image_grid')
-def infotext_pasted_callback(infotext: str, params: Dict[str, Any]):
+def infotext_pasted_callback(infotext: str, params: dict[str, Any]):
for c in callback_map['callbacks_infotext_pasted']:
try:
c.callback(infotext, params)
@@ -367,6 +388,14 @@ def on_image_saved(callback):
add_callback(callback_map['callbacks_image_saved'], callback)
+def on_extra_noise(callback):
+ """register a function to be called before adding extra noise in img2img or hires fix;
+ The callback is called with one argument:
+ - params: ExtraNoiseParams - contains noise determined by seed and latent representation of image
+ """
+ add_callback(callback_map['callbacks_extra_noise'], callback)
+
+
def on_cfg_denoiser(callback):
"""register a function to be called in the kdiffussion cfg_denoiser method after building the inner model inputs.
The callback is called with one argument:
@@ -420,7 +449,7 @@ def on_infotext_pasted(callback):
"""register a function to be called before applying an infotext.
The callback is called with two arguments:
- infotext: str - raw infotext.
- - result: Dict[str, any] - parsed infotext parameters.
+ - result: dict[str, any] - parsed infotext parameters.
"""
add_callback(callback_map['callbacks_infotext_pasted'], callback)
diff --git a/modules/scripts.py b/modules/scripts.py
index 5b4edcac..017aed5a 100644
--- a/modules/scripts.py
+++ b/modules/scripts.py
@@ -3,6 +3,7 @@ import re
import sys
import inspect
from collections import namedtuple
+from dataclasses import dataclass
import gradio as gr
@@ -10,17 +11,42 @@ from modules import shared, paths, script_callbacks, extensions, script_loading,
AlwaysVisible = object()
+class MaskBlendArgs:
+ def __init__(self, current_latent, nmask, init_latent, mask, blended_latent, denoiser=None, sigma=None):
+ self.current_latent = current_latent
+ self.nmask = nmask
+ self.init_latent = init_latent
+ self.mask = mask
+ self.blended_latent = blended_latent
+
+ self.denoiser = denoiser
+ self.is_final_blend = denoiser is None
+ self.sigma = sigma
+
+class PostSampleArgs:
+ def __init__(self, samples):
+ self.samples = samples
class PostprocessImageArgs:
def __init__(self, image):
self.image = image
+class PostProcessMaskOverlayArgs:
+ def __init__(self, index, mask_for_overlay, overlay_image):
+ self.index = index
+ self.mask_for_overlay = mask_for_overlay
+ self.overlay_image = overlay_image
class PostprocessBatchListArgs:
def __init__(self, images):
self.images = images
+@dataclass
+class OnComponent:
+ component: gr.blocks.Block
+
+
class Script:
name = None
"""script's internal name derived from title"""
@@ -35,9 +61,13 @@ class Script:
is_txt2img = False
is_img2img = False
+ tabname = None
group = None
- """A gr.Group component that has all script's UI inside it"""
+ """A gr.Group component that has all script's UI inside it."""
+
+ create_group = True
+ """If False, for alwayson scripts, a group component will not be created."""
infotext_fields = None
"""if set in ui(), this is a list of pairs of gradio component + text; the text will be used when
@@ -52,6 +82,15 @@ class Script:
api_info = None
"""Generated value of type modules.api.models.ScriptInfo with information about the script for API"""
+ on_before_component_elem_id = None
+ """list of callbacks to be called before a component with an elem_id is created"""
+
+ on_after_component_elem_id = None
+ """list of callbacks to be called after a component with an elem_id is created"""
+
+ setup_for_ui_only = False
+ """If true, the script setup will only be run in Gradio UI, not in API"""
+
def title(self):
"""this function should return the title of the script. This is what will be displayed in the dropdown menu."""
@@ -90,9 +129,16 @@ class Script:
pass
+ def setup(self, p, *args):
+ """For AlwaysVisible scripts, this function is called when the processing object is set up, before any processing starts.
+ args contains all values returned by components from ui().
+ """
+ pass
+
+
def before_process(self, p, *args):
"""
- This function is called very early before processing begins for AlwaysVisible scripts.
+ This function is called very early during processing begins for AlwaysVisible scripts.
You can modify the processing object (p) here, inject hooks, etc.
args contains all values returned by components from ui()
"""
@@ -180,6 +226,25 @@ class Script:
pass
+ def on_mask_blend(self, p, mba: MaskBlendArgs, *args):
+ """
+ Called in inpainting mode when the original content is blended with the inpainted content.
+ This is called at every step in the denoising process and once at the end.
+ If is_final_blend is true, this is called for the final blending stage.
+ Otherwise, denoiser and sigma are defined and may be used to inform the procedure.
+ """
+
+ pass
+
+ def post_sample(self, p, ps: PostSampleArgs, *args):
+ """
+ Called after the samples have been generated,
+ but before they have been decoded by the VAE, if applicable.
+ Check getattr(samples, 'already_decoded', False) to test if the images are decoded.
+ """
+
+ pass
+
def postprocess_image(self, p, pp: PostprocessImageArgs, *args):
"""
Called for every image after it has been generated.
@@ -187,6 +252,13 @@ class Script:
pass
+ def postprocess_maskoverlay(self, p, ppmo: PostProcessMaskOverlayArgs, *args):
+ """
+ Called for every image after it has been generated.
+ """
+
+ pass
+
def postprocess(self, p, processed, *args):
"""
This function is called after processing ends for AlwaysVisible scripts.
@@ -212,6 +284,29 @@ class Script:
pass
+ def on_before_component(self, callback, *, elem_id):
+ """
+ Calls callback before a component is created. The callback function is called with a single argument of type OnComponent.
+
+ May be called in show() or ui() - but it may be too late in latter as some components may already be created.
+
+ This function is an alternative to before_component in that it also cllows to run before a component is created, but
+ it doesn't require to be called for every created component - just for the one you need.
+ """
+ if self.on_before_component_elem_id is None:
+ self.on_before_component_elem_id = []
+
+ self.on_before_component_elem_id.append((elem_id, callback))
+
+ def on_after_component(self, callback, *, elem_id):
+ """
+ Calls callback after a component is created. The callback function is called with a single argument of type OnComponent.
+ """
+ if self.on_after_component_elem_id is None:
+ self.on_after_component_elem_id = []
+
+ self.on_after_component_elem_id.append((elem_id, callback))
+
def describe(self):
"""unused"""
return ""
@@ -220,7 +315,7 @@ class Script:
"""helper function to generate id for a HTML element, constructs final id out of script name, tab and user-supplied item_id"""
need_tabname = self.show(True) == self.show(False)
- tabkind = 'img2img' if self.is_img2img else 'txt2txt'
+ tabkind = 'img2img' if self.is_img2img else 'txt2img'
tabname = f"{tabkind}_" if need_tabname else ""
title = re.sub(r'[^a-z_0-9]', '', re.sub(r'\s', '_', self.title().lower()))
@@ -232,6 +327,19 @@ class Script:
"""
pass
+
+class ScriptBuiltinUI(Script):
+ setup_for_ui_only = True
+
+ def elem_id(self, item_id):
+ """helper function to generate id for a HTML element, constructs final id out of tab and user-supplied item_id"""
+
+ need_tabname = self.show(True) == self.show(False)
+ tabname = ('img2img' if self.is_img2img else 'txt2img') + "_" if need_tabname else ""
+
+ return f'{tabname}{item_id}'
+
+
current_basedir = paths.script_path
@@ -249,19 +357,113 @@ scripts_data = []
postprocessing_scripts_data = []
ScriptClassData = namedtuple("ScriptClassData", ["script_class", "path", "basedir", "module"])
+def topological_sort(dependencies):
+ """Accepts a dictionary mapping name to its dependencies, returns a list of names ordered according to dependencies.
+ Ignores errors relating to missing dependeencies or circular dependencies
+ """
+
+ visited = {}
+ result = []
+
+ def inner(name):
+ visited[name] = True
+
+ for dep in dependencies.get(name, []):
+ if dep in dependencies and dep not in visited:
+ inner(dep)
+
+ result.append(name)
+
+ for depname in dependencies:
+ if depname not in visited:
+ inner(depname)
+
+ return result
+
-def list_scripts(scriptdirname, extension):
- scripts_list = []
+@dataclass
+class ScriptWithDependencies:
+ script_canonical_name: str
+ file: ScriptFile
+ requires: list
+ load_before: list
+ load_after: list
- basedir = os.path.join(paths.script_path, scriptdirname)
- if os.path.exists(basedir):
- for filename in sorted(os.listdir(basedir)):
- scripts_list.append(ScriptFile(paths.script_path, filename, os.path.join(basedir, filename)))
- for ext in extensions.active():
- scripts_list += ext.list_files(scriptdirname, extension)
+def list_scripts(scriptdirname, extension, *, include_extensions=True):
+ scripts = {}
- scripts_list = [x for x in scripts_list if os.path.splitext(x.path)[1].lower() == extension and os.path.isfile(x.path)]
+ loaded_extensions = {ext.canonical_name: ext for ext in extensions.active()}
+ loaded_extensions_scripts = {ext.canonical_name: [] for ext in extensions.active()}
+
+ # build script dependency map
+ root_script_basedir = os.path.join(paths.script_path, scriptdirname)
+ if os.path.exists(root_script_basedir):
+ for filename in sorted(os.listdir(root_script_basedir)):
+ if not os.path.isfile(os.path.join(root_script_basedir, filename)):
+ continue
+
+ if os.path.splitext(filename)[1].lower() != extension:
+ continue
+
+ script_file = ScriptFile(paths.script_path, filename, os.path.join(root_script_basedir, filename))
+ scripts[filename] = ScriptWithDependencies(filename, script_file, [], [], [])
+
+ if include_extensions:
+ for ext in extensions.active():
+ extension_scripts_list = ext.list_files(scriptdirname, extension)
+ for extension_script in extension_scripts_list:
+ if not os.path.isfile(extension_script.path):
+ continue
+
+ script_canonical_name = ("builtin/" if ext.is_builtin else "") + ext.canonical_name + "/" + extension_script.filename
+ relative_path = scriptdirname + "/" + extension_script.filename
+
+ script = ScriptWithDependencies(
+ script_canonical_name=script_canonical_name,
+ file=extension_script,
+ requires=ext.metadata.get_script_requirements("Requires", relative_path, scriptdirname),
+ load_before=ext.metadata.get_script_requirements("Before", relative_path, scriptdirname),
+ load_after=ext.metadata.get_script_requirements("After", relative_path, scriptdirname),
+ )
+
+ scripts[script_canonical_name] = script
+ loaded_extensions_scripts[ext.canonical_name].append(script)
+
+ for script_canonical_name, script in scripts.items():
+ # load before requires inverse dependency
+ # in this case, append the script name into the load_after list of the specified script
+ for load_before in script.load_before:
+ # if this requires an individual script to be loaded before
+ other_script = scripts.get(load_before)
+ if other_script:
+ other_script.load_after.append(script_canonical_name)
+
+ # if this requires an extension
+ other_extension_scripts = loaded_extensions_scripts.get(load_before)
+ if other_extension_scripts:
+ for other_script in other_extension_scripts:
+ other_script.load_after.append(script_canonical_name)
+
+ # if After mentions an extension, remove it and instead add all of its scripts
+ for load_after in list(script.load_after):
+ if load_after not in scripts and load_after in loaded_extensions_scripts:
+ script.load_after.remove(load_after)
+
+ for other_script in loaded_extensions_scripts.get(load_after, []):
+ script.load_after.append(other_script.script_canonical_name)
+
+ dependencies = {}
+
+ for script_canonical_name, script in scripts.items():
+ for required_script in script.requires:
+ if required_script not in scripts and required_script not in loaded_extensions:
+ errors.report(f'Script "{script_canonical_name}" requires "{required_script}" to be loaded, but it is not.', exc_info=False)
+
+ dependencies[script_canonical_name] = script.load_after
+
+ ordered_scripts = topological_sort(dependencies)
+ scripts_list = [scripts[script_canonical_name].file for script_canonical_name in ordered_scripts]
return scripts_list
@@ -288,7 +490,7 @@ def load_scripts():
postprocessing_scripts_data.clear()
script_callbacks.clear_callbacks()
- scripts_list = list_scripts("scripts", ".py")
+ scripts_list = list_scripts("scripts", ".py") + list_scripts("modules/processing_scripts", ".py", include_extensions=False)
syspath = sys.path
@@ -302,15 +504,9 @@ def load_scripts():
elif issubclass(script_class, scripts_postprocessing.ScriptPostprocessing):
postprocessing_scripts_data.append(ScriptClassData(script_class, scriptfile.path, scriptfile.basedir, module))
- def orderby(basedir):
- # 1st webui, 2nd extensions-builtin, 3rd extensions
- priority = {os.path.join(paths.script_path, "extensions-builtin"):1, paths.script_path:0}
- for key in priority:
- if basedir.startswith(key):
- return priority[key]
- return 9999
-
- for scriptfile in sorted(scripts_list, key=lambda x: [orderby(x.basedir), x]):
+ # here the scripts_list is already ordered
+ # processing_script is not considered though
+ for scriptfile in scripts_list:
try:
if scriptfile.basedir != paths.script_path:
sys.path = [scriptfile.basedir] + sys.path
@@ -349,10 +545,17 @@ class ScriptRunner:
self.selectable_scripts = []
self.alwayson_scripts = []
self.titles = []
+ self.title_map = {}
self.infotext_fields = []
self.paste_field_names = []
self.inputs = [None]
+ self.on_before_component_elem_id = {}
+ """dict of callbacks to be called before an element is created; key=elem_id, value=list of callbacks"""
+
+ self.on_after_component_elem_id = {}
+ """dict of callbacks to be called after an element is created; key=elem_id, value=list of callbacks"""
+
def initialize_scripts(self, is_img2img):
from modules import scripts_auto_postprocessing
@@ -363,10 +566,16 @@ class ScriptRunner:
auto_processing_scripts = scripts_auto_postprocessing.create_auto_preprocessing_script_data()
for script_data in auto_processing_scripts + scripts_data:
- script = script_data.script_class()
+ try:
+ script = script_data.script_class()
+ except Exception:
+ errors.report(f"Error # failed to initialize Script {script_data.module}: ", exc_info=True)
+ continue
+
script.filename = script_data.path
script.is_txt2img = not is_img2img
script.is_img2img = is_img2img
+ script.tabname = "img2img" if is_img2img else "txt2img"
visibility = script.show(script.is_img2img)
@@ -379,18 +588,48 @@ class ScriptRunner:
self.scripts.append(script)
self.selectable_scripts.append(script)
+ self.apply_on_before_component_callbacks()
+
+ def apply_on_before_component_callbacks(self):
+ for script in self.scripts:
+ on_before = script.on_before_component_elem_id or []
+ on_after = script.on_after_component_elem_id or []
+
+ for elem_id, callback in on_before:
+ if elem_id not in self.on_before_component_elem_id:
+ self.on_before_component_elem_id[elem_id] = []
+
+ self.on_before_component_elem_id[elem_id].append((callback, script))
+
+ for elem_id, callback in on_after:
+ if elem_id not in self.on_after_component_elem_id:
+ self.on_after_component_elem_id[elem_id] = []
+
+ self.on_after_component_elem_id[elem_id].append((callback, script))
+
+ on_before.clear()
+ on_after.clear()
+
def create_script_ui(self, script):
- import modules.api.models as api_models
script.args_from = len(self.inputs)
script.args_to = len(self.inputs)
+ try:
+ self.create_script_ui_inner(script)
+ except Exception:
+ errors.report(f"Error creating UI for {script.name}: ", exc_info=True)
+
+ def create_script_ui_inner(self, script):
+ import modules.api.models as api_models
+
controls = wrap_call(script.ui, script.filename, "ui", script.is_img2img)
if controls is None:
return
script.name = wrap_call(script.title, script.filename, "title", default=script.filename).lower()
+
api_args = []
for control in controls:
@@ -398,11 +637,15 @@ class ScriptRunner:
arg_info = api_models.ScriptArg(label=control.label or "")
- for field in ("value", "minimum", "maximum", "step", "choices"):
+ for field in ("value", "minimum", "maximum", "step"):
v = getattr(control, field, None)
if v is not None:
setattr(arg_info, field, v)
+ choices = getattr(control, 'choices', None) # as of gradio 3.41, some items in choices are strings, and some are tuples where the first elem is the string
+ if choices is not None:
+ arg_info.choices = [x[0] if isinstance(x, tuple) else x for x in choices]
+
api_args.append(arg_info)
script.api_info = api_models.ScriptInfo(
@@ -429,15 +672,20 @@ class ScriptRunner:
if script.alwayson and script.section != section:
continue
- with gr.Group(visible=script.alwayson) as group:
- self.create_script_ui(script)
+ if script.create_group:
+ with gr.Group(visible=script.alwayson) as group:
+ self.create_script_ui(script)
- script.group = group
+ script.group = group
+ else:
+ self.create_script_ui(script)
def prepare_ui(self):
self.inputs = [None]
def setup_ui(self):
+ all_titles = [wrap_call(script.title, script.filename, "title") or script.filename for script in self.scripts]
+ self.title_map = {title.lower(): script for title, script in zip(all_titles, self.scripts)}
self.titles = [wrap_call(script.title, script.filename, "title") or f"{script.filename} [error]" for script in self.selectable_scripts]
self.setup_ui_for_section(None)
@@ -448,6 +696,8 @@ class ScriptRunner:
self.setup_ui_for_section(None, self.selectable_scripts)
def select_script(script_index):
+ if script_index is None:
+ script_index = 0
selected_script = self.selectable_scripts[script_index - 1] if script_index>0 else None
return [gr.update(visible=selected_script == s) for s in self.selectable_scripts]
@@ -484,12 +734,14 @@ class ScriptRunner:
self.infotext_fields.append((dropdown, lambda x: gr.update(value=x.get('Script', 'None'))))
self.infotext_fields.extend([(script.group, onload_script_visibility) for script in self.selectable_scripts])
+ self.apply_on_before_component_callbacks()
+
return self.inputs
def run(self, p, *args):
script_index = args[0]
- if script_index == 0:
+ if script_index == 0 or script_index is None:
return None
script = self.selectable_scripts[script_index-1]
@@ -568,6 +820,22 @@ class ScriptRunner:
except Exception:
errors.report(f"Error running postprocess_batch_list: {script.filename}", exc_info=True)
+ def post_sample(self, p, ps: PostSampleArgs):
+ for script in self.alwayson_scripts:
+ try:
+ script_args = p.script_args[script.args_from:script.args_to]
+ script.post_sample(p, ps, *script_args)
+ except Exception:
+ errors.report(f"Error running post_sample: {script.filename}", exc_info=True)
+
+ def on_mask_blend(self, p, mba: MaskBlendArgs):
+ for script in self.alwayson_scripts:
+ try:
+ script_args = p.script_args[script.args_from:script.args_to]
+ script.on_mask_blend(p, mba, *script_args)
+ except Exception:
+ errors.report(f"Error running post_sample: {script.filename}", exc_info=True)
+
def postprocess_image(self, p, pp: PostprocessImageArgs):
for script in self.alwayson_scripts:
try:
@@ -576,7 +844,21 @@ class ScriptRunner:
except Exception:
errors.report(f"Error running postprocess_image: {script.filename}", exc_info=True)
+ def postprocess_maskoverlay(self, p, ppmo: PostProcessMaskOverlayArgs):
+ for script in self.alwayson_scripts:
+ try:
+ script_args = p.script_args[script.args_from:script.args_to]
+ script.postprocess_maskoverlay(p, ppmo, *script_args)
+ except Exception:
+ errors.report(f"Error running postprocess_image: {script.filename}", exc_info=True)
+
def before_component(self, component, **kwargs):
+ for callback, script in self.on_before_component_elem_id.get(kwargs.get("elem_id"), []):
+ try:
+ callback(OnComponent(component=component))
+ except Exception:
+ errors.report(f"Error running on_before_component: {script.filename}", exc_info=True)
+
for script in self.scripts:
try:
script.before_component(component, **kwargs)
@@ -584,12 +866,21 @@ class ScriptRunner:
errors.report(f"Error running before_component: {script.filename}", exc_info=True)
def after_component(self, component, **kwargs):
+ for callback, script in self.on_after_component_elem_id.get(component.elem_id, []):
+ try:
+ callback(OnComponent(component=component))
+ except Exception:
+ errors.report(f"Error running on_after_component: {script.filename}", exc_info=True)
+
for script in self.scripts:
try:
script.after_component(component, **kwargs)
except Exception:
errors.report(f"Error running after_component: {script.filename}", exc_info=True)
+ def script(self, title):
+ return self.title_map.get(title.lower())
+
def reload_sources(self, cache):
for si, script in list(enumerate(self.scripts)):
args_from = script.args_from
@@ -608,7 +899,6 @@ class ScriptRunner:
self.scripts[si].args_from = args_from
self.scripts[si].args_to = args_to
-
def before_hr(self, p):
for script in self.alwayson_scripts:
try:
@@ -617,6 +907,17 @@ class ScriptRunner:
except Exception:
errors.report(f"Error running before_hr: {script.filename}", exc_info=True)
+ def setup_scrips(self, p, *, is_ui=True):
+ for script in self.alwayson_scripts:
+ if not is_ui and script.setup_for_ui_only:
+ continue
+
+ try:
+ script_args = p.script_args[script.args_from:script.args_to]
+ script.setup(p, *script_args)
+ except Exception:
+ errors.report(f"Error running setup: {script.filename}", exc_info=True)
+
scripts_txt2img: ScriptRunner = None
scripts_img2img: ScriptRunner = None
@@ -631,49 +932,3 @@ def reload_script_body_only():
reload_scripts = load_scripts # compatibility alias
-
-
-def add_classes_to_gradio_component(comp):
- """
- this adds gradio-* to the component for css styling (ie gradio-button to gr.Button), as well as some others
- """
-
- comp.elem_classes = [f"gradio-{comp.get_block_name()}", *(comp.elem_classes or [])]
-
- if getattr(comp, 'multiselect', False):
- comp.elem_classes.append('multiselect')
-
-
-
-def IOComponent_init(self, *args, **kwargs):
- if scripts_current is not None:
- scripts_current.before_component(self, **kwargs)
-
- script_callbacks.before_component_callback(self, **kwargs)
-
- res = original_IOComponent_init(self, *args, **kwargs)
-
- add_classes_to_gradio_component(self)
-
- script_callbacks.after_component_callback(self, **kwargs)
-
- if scripts_current is not None:
- scripts_current.after_component(self, **kwargs)
-
- return res
-
-
-original_IOComponent_init = gr.components.IOComponent.__init__
-gr.components.IOComponent.__init__ = IOComponent_init
-
-
-def BlockContext_init(self, *args, **kwargs):
- res = original_BlockContext_init(self, *args, **kwargs)
-
- add_classes_to_gradio_component(self)
-
- return res
-
-
-original_BlockContext_init = gr.blocks.BlockContext.__init__
-gr.blocks.BlockContext.__init__ = BlockContext_init
diff --git a/modules/scripts_postprocessing.py b/modules/scripts_postprocessing.py
index bac1335d..901cad08 100644
--- a/modules/scripts_postprocessing.py
+++ b/modules/scripts_postprocessing.py
@@ -1,13 +1,56 @@
+import dataclasses
import os
import gradio as gr
from modules import errors, shared
+@dataclasses.dataclass
+class PostprocessedImageSharedInfo:
+ target_width: int = None
+ target_height: int = None
+
+
class PostprocessedImage:
def __init__(self, image):
self.image = image
self.info = {}
+ self.shared = PostprocessedImageSharedInfo()
+ self.extra_images = []
+ self.nametags = []
+ self.disable_processing = False
+ self.caption = None
+
+ def get_suffix(self, used_suffixes=None):
+ used_suffixes = {} if used_suffixes is None else used_suffixes
+ suffix = "-".join(self.nametags)
+ if suffix:
+ suffix = "-" + suffix
+
+ if suffix not in used_suffixes:
+ used_suffixes[suffix] = 1
+ return suffix
+
+ for i in range(1, 100):
+ proposed_suffix = suffix + "-" + str(i)
+
+ if proposed_suffix not in used_suffixes:
+ used_suffixes[proposed_suffix] = 1
+ return proposed_suffix
+
+ return suffix
+
+ def create_copy(self, new_image, *, nametags=None, disable_processing=False):
+ pp = PostprocessedImage(new_image)
+ pp.shared = self.shared
+ pp.nametags = self.nametags.copy()
+ pp.info = self.info.copy()
+ pp.disable_processing = disable_processing
+
+ if nametags is not None:
+ pp.nametags += nametags
+
+ return pp
class ScriptPostprocessing:
@@ -42,10 +85,17 @@ class ScriptPostprocessing:
pass
- def image_changed(self):
- pass
+ def process_firstpass(self, pp: PostprocessedImage, **args):
+ """
+ Called for all scripts before calling process(). Scripts can examine the image here and set fields
+ of the pp object to communicate things to other scripts.
+ args contains a dictionary with all values returned by components from ui()
+ """
+ pass
+ def image_changed(self):
+ pass
def wrap_call(func, filename, funcname, *args, default=None, **kwargs):
@@ -118,16 +168,42 @@ class ScriptPostprocessingRunner:
return inputs
def run(self, pp: PostprocessedImage, args):
- for script in self.scripts_in_preferred_order():
- shared.state.job = script.name
+ scripts = []
+ for script in self.scripts_in_preferred_order():
script_args = args[script.args_from:script.args_to]
process_args = {}
for (name, _component), value in zip(script.controls.items(), script_args):
process_args[name] = value
- script.process(pp, **process_args)
+ scripts.append((script, process_args))
+
+ for script, process_args in scripts:
+ script.process_firstpass(pp, **process_args)
+
+ all_images = [pp]
+
+ for script, process_args in scripts:
+ if shared.state.skipped:
+ break
+
+ shared.state.job = script.name
+
+ for single_image in all_images.copy():
+
+ if not single_image.disable_processing:
+ script.process(single_image, **process_args)
+
+ for extra_image in single_image.extra_images:
+ if not isinstance(extra_image, PostprocessedImage):
+ extra_image = single_image.create_copy(extra_image)
+
+ all_images.append(extra_image)
+
+ single_image.extra_images.clear()
+
+ pp.extra_images = all_images[1:]
def create_args_for_run(self, scripts_args):
if not self.ui_created:
diff --git a/modules/sd_disable_initialization.py b/modules/sd_disable_initialization.py
index 9fc89dc6..273a7edd 100644
--- a/modules/sd_disable_initialization.py
+++ b/modules/sd_disable_initialization.py
@@ -3,8 +3,31 @@ import open_clip
import torch
import transformers.utils.hub
+from modules import shared
-class DisableInitialization:
+
+class ReplaceHelper:
+ def __init__(self):
+ self.replaced = []
+
+ def replace(self, obj, field, func):
+ original = getattr(obj, field, None)
+ if original is None:
+ return None
+
+ self.replaced.append((obj, field, original))
+ setattr(obj, field, func)
+
+ return original
+
+ def restore(self):
+ for obj, field, original in self.replaced:
+ setattr(obj, field, original)
+
+ self.replaced.clear()
+
+
+class DisableInitialization(ReplaceHelper):
"""
When an object of this class enters a `with` block, it starts:
- preventing torch's layer initialization functions from working
@@ -21,7 +44,7 @@ class DisableInitialization:
"""
def __init__(self, disable_clip=True):
- self.replaced = []
+ super().__init__()
self.disable_clip = disable_clip
def replace(self, obj, field, func):
@@ -86,8 +109,124 @@ class DisableInitialization:
self.transformers_utils_hub_get_from_cache = self.replace(transformers.utils.hub, 'get_from_cache', transformers_utils_hub_get_from_cache)
def __exit__(self, exc_type, exc_val, exc_tb):
- for obj, field, original in self.replaced:
- setattr(obj, field, original)
+ self.restore()
- self.replaced.clear()
+class InitializeOnMeta(ReplaceHelper):
+ """
+ Context manager that causes all parameters for linear/conv2d/mha layers to be allocated on meta device,
+ which results in those parameters having no values and taking no memory. model.to() will be broken and
+ will need to be repaired by using LoadStateDictOnMeta below when loading params from state dict.
+
+ Usage:
+ ```
+ with sd_disable_initialization.InitializeOnMeta():
+ sd_model = instantiate_from_config(sd_config.model)
+ ```
+ """
+
+ def __enter__(self):
+ if shared.cmd_opts.disable_model_loading_ram_optimization:
+ return
+
+ def set_device(x):
+ x["device"] = "meta"
+ return x
+
+ linear_init = self.replace(torch.nn.Linear, '__init__', lambda *args, **kwargs: linear_init(*args, **set_device(kwargs)))
+ conv2d_init = self.replace(torch.nn.Conv2d, '__init__', lambda *args, **kwargs: conv2d_init(*args, **set_device(kwargs)))
+ mha_init = self.replace(torch.nn.MultiheadAttention, '__init__', lambda *args, **kwargs: mha_init(*args, **set_device(kwargs)))
+ self.replace(torch.nn.Module, 'to', lambda *args, **kwargs: None)
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ self.restore()
+
+
+class LoadStateDictOnMeta(ReplaceHelper):
+ """
+ Context manager that allows to read parameters from state_dict into a model that has some of its parameters in the meta device.
+ As those parameters are read from state_dict, they will be deleted from it, so by the end state_dict will be mostly empty, to save memory.
+ Meant to be used together with InitializeOnMeta above.
+
+ Usage:
+ ```
+ with sd_disable_initialization.LoadStateDictOnMeta(state_dict):
+ model.load_state_dict(state_dict, strict=False)
+ ```
+ """
+
+ def __init__(self, state_dict, device, weight_dtype_conversion=None):
+ super().__init__()
+ self.state_dict = state_dict
+ self.device = device
+ self.weight_dtype_conversion = weight_dtype_conversion or {}
+ self.default_dtype = self.weight_dtype_conversion.get('')
+
+ def get_weight_dtype(self, key):
+ key_first_term, _ = key.split('.', 1)
+ return self.weight_dtype_conversion.get(key_first_term, self.default_dtype)
+
+ def __enter__(self):
+ if shared.cmd_opts.disable_model_loading_ram_optimization:
+ return
+
+ sd = self.state_dict
+ device = self.device
+
+ def load_from_state_dict(original, module, state_dict, prefix, *args, **kwargs):
+ used_param_keys = []
+
+ for name, param in module._parameters.items():
+ if param is None:
+ continue
+
+ key = prefix + name
+ sd_param = sd.pop(key, None)
+ if sd_param is not None:
+ state_dict[key] = sd_param.to(dtype=self.get_weight_dtype(key))
+ used_param_keys.append(key)
+
+ if param.is_meta:
+ dtype = sd_param.dtype if sd_param is not None else param.dtype
+ module._parameters[name] = torch.nn.parameter.Parameter(torch.zeros_like(param, device=device, dtype=dtype), requires_grad=param.requires_grad)
+
+ for name in module._buffers:
+ key = prefix + name
+
+ sd_param = sd.pop(key, None)
+ if sd_param is not None:
+ state_dict[key] = sd_param
+ used_param_keys.append(key)
+
+ original(module, state_dict, prefix, *args, **kwargs)
+
+ for key in used_param_keys:
+ state_dict.pop(key, None)
+
+ def load_state_dict(original, module, state_dict, strict=True):
+ """torch makes a lot of copies of the dictionary with weights, so just deleting entries from state_dict does not help
+ because the same values are stored in multiple copies of the dict. The trick used here is to give torch a dict with
+ all weights on meta device, i.e. deleted, and then it doesn't matter how many copies torch makes.
+
+ In _load_from_state_dict, the correct weight will be obtained from a single dict with the right weights (sd).
+
+ The dangerous thing about this is if _load_from_state_dict is not called, (if some exotic module overloads
+ the function and does not call the original) the state dict will just fail to load because weights
+ would be on the meta device.
+ """
+
+ if state_dict is sd:
+ state_dict = {k: v.to(device="meta", dtype=v.dtype) for k, v in state_dict.items()}
+
+ original(module, state_dict, strict=strict)
+
+ module_load_state_dict = self.replace(torch.nn.Module, 'load_state_dict', lambda *args, **kwargs: load_state_dict(module_load_state_dict, *args, **kwargs))
+ module_load_from_state_dict = self.replace(torch.nn.Module, '_load_from_state_dict', lambda *args, **kwargs: load_from_state_dict(module_load_from_state_dict, *args, **kwargs))
+ linear_load_from_state_dict = self.replace(torch.nn.Linear, '_load_from_state_dict', lambda *args, **kwargs: load_from_state_dict(linear_load_from_state_dict, *args, **kwargs))
+ conv2d_load_from_state_dict = self.replace(torch.nn.Conv2d, '_load_from_state_dict', lambda *args, **kwargs: load_from_state_dict(conv2d_load_from_state_dict, *args, **kwargs))
+ mha_load_from_state_dict = self.replace(torch.nn.MultiheadAttention, '_load_from_state_dict', lambda *args, **kwargs: load_from_state_dict(mha_load_from_state_dict, *args, **kwargs))
+ layer_norm_load_from_state_dict = self.replace(torch.nn.LayerNorm, '_load_from_state_dict', lambda *args, **kwargs: load_from_state_dict(layer_norm_load_from_state_dict, *args, **kwargs))
+ group_norm_load_from_state_dict = self.replace(torch.nn.GroupNorm, '_load_from_state_dict', lambda *args, **kwargs: load_from_state_dict(group_norm_load_from_state_dict, *args, **kwargs))
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ self.restore()
diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py
index c8fdd4f1..e139d996 100644
--- a/modules/sd_hijack.py
+++ b/modules/sd_hijack.py
@@ -2,15 +2,15 @@ import torch
from torch.nn.functional import silu
from types import MethodType
-import modules.textual_inversion.textual_inversion
-from modules import devices, sd_hijack_optimizations, shared, script_callbacks, errors, sd_unet
+from modules import devices, sd_hijack_optimizations, shared, script_callbacks, errors, sd_unet, patches
from modules.hypernetworks import hypernetwork
from modules.shared import cmd_opts
-from modules import sd_hijack_clip, sd_hijack_open_clip, sd_hijack_unet, sd_hijack_xlmr, xlmr
+from modules import sd_hijack_clip, sd_hijack_open_clip, sd_hijack_unet, sd_hijack_xlmr, xlmr, xlmr_m18
import ldm.modules.attention
import ldm.modules.diffusionmodules.model
import ldm.modules.diffusionmodules.openaimodel
+import ldm.models.diffusion.ddpm
import ldm.models.diffusion.ddim
import ldm.models.diffusion.plms
import ldm.modules.encoders.modules
@@ -30,12 +30,20 @@ ldm.modules.attention.MemoryEfficientCrossAttention = ldm.modules.attention.Cros
ldm.modules.attention.BasicTransformerBlock.ATTENTION_MODES["softmax-xformers"] = ldm.modules.attention.CrossAttention
# silence new console spam from SD2
-ldm.modules.attention.print = lambda *args: None
-ldm.modules.diffusionmodules.model.print = lambda *args: None
+ldm.modules.attention.print = shared.ldm_print
+ldm.modules.diffusionmodules.model.print = shared.ldm_print
+ldm.util.print = shared.ldm_print
+ldm.models.diffusion.ddpm.print = shared.ldm_print
optimizers = []
current_optimizer: sd_hijack_optimizations.SdOptimization = None
+ldm_patched_forward = sd_unet.create_unet_forward(ldm.modules.diffusionmodules.openaimodel.UNetModel.forward)
+ldm_original_forward = patches.patch(__file__, ldm.modules.diffusionmodules.openaimodel.UNetModel, "forward", ldm_patched_forward)
+
+sgm_patched_forward = sd_unet.create_unet_forward(sgm.modules.diffusionmodules.openaimodel.UNetModel.forward)
+sgm_original_forward = patches.patch(__file__, sgm.modules.diffusionmodules.openaimodel.UNetModel, "forward", sgm_patched_forward)
+
def list_optimizers():
new_optimizers = script_callbacks.list_optimizers_callback()
@@ -164,12 +172,13 @@ class StableDiffusionModelHijack:
clip = None
optimization_method = None
- embedding_db = modules.textual_inversion.textual_inversion.EmbeddingDatabase()
-
def __init__(self):
+ import modules.textual_inversion.textual_inversion
+
self.extra_generation_params = {}
self.comments = []
+ self.embedding_db = modules.textual_inversion.textual_inversion.EmbeddingDatabase()
self.embedding_db.add_embedding_dir(cmd_opts.embeddings_dir)
def apply_optimizations(self, option=None):
@@ -179,6 +188,20 @@ class StableDiffusionModelHijack:
errors.display(e, "applying cross attention optimization")
undo_optimizations()
+ def convert_sdxl_to_ssd(self, m):
+ """Converts an SDXL model to a Segmind Stable Diffusion model (see https://huggingface.co/segmind/SSD-1B)"""
+
+ delattr(m.model.diffusion_model.middle_block, '1')
+ delattr(m.model.diffusion_model.middle_block, '2')
+ for i in ['9', '8', '7', '6', '5', '4']:
+ delattr(m.model.diffusion_model.input_blocks[7][1].transformer_blocks, i)
+ delattr(m.model.diffusion_model.input_blocks[8][1].transformer_blocks, i)
+ delattr(m.model.diffusion_model.output_blocks[0][1].transformer_blocks, i)
+ delattr(m.model.diffusion_model.output_blocks[1][1].transformer_blocks, i)
+ delattr(m.model.diffusion_model.output_blocks[4][1].transformer_blocks, '1')
+ delattr(m.model.diffusion_model.output_blocks[5][1].transformer_blocks, '1')
+ devices.torch_gc()
+
def hijack(self, m):
conditioner = getattr(m, 'conditioner', None)
if conditioner:
@@ -197,7 +220,7 @@ class StableDiffusionModelHijack:
conditioner.embedders[i] = sd_hijack_clip.FrozenCLIPEmbedderForSDXLWithCustomWords(embedder, self)
text_cond_models.append(conditioner.embedders[i])
if typename == 'FrozenOpenCLIPEmbedder2':
- embedder.model.token_embedding = EmbeddingsWithFixes(embedder.model.token_embedding, self)
+ embedder.model.token_embedding = EmbeddingsWithFixes(embedder.model.token_embedding, self, textual_inversion_key='clip_g')
conditioner.embedders[i] = sd_hijack_open_clip.FrozenOpenCLIPEmbedder2WithCustomWords(embedder, self)
text_cond_models.append(conditioner.embedders[i])
@@ -206,7 +229,7 @@ class StableDiffusionModelHijack:
else:
m.cond_stage_model = conditioner
- if type(m.cond_stage_model) == xlmr.BertSeriesModelWithTransformation:
+ if type(m.cond_stage_model) == xlmr.BertSeriesModelWithTransformation or type(m.cond_stage_model) == xlmr_m18.BertSeriesModelWithTransformation:
model_embeddings = m.cond_stage_model.roberta.embeddings
model_embeddings.token_embedding = EmbeddingsWithFixes(model_embeddings.word_embeddings, self)
m.cond_stage_model = sd_hijack_xlmr.FrozenXLMREmbedderWithCustomWords(m.cond_stage_model, self)
@@ -237,13 +260,34 @@ class StableDiffusionModelHijack:
self.layers = flatten(m)
- if not hasattr(ldm.modules.diffusionmodules.openaimodel, 'copy_of_UNetModel_forward_for_webui'):
- ldm.modules.diffusionmodules.openaimodel.copy_of_UNetModel_forward_for_webui = ldm.modules.diffusionmodules.openaimodel.UNetModel.forward
+ import modules.models.diffusion.ddpm_edit
+
+ if isinstance(m, ldm.models.diffusion.ddpm.LatentDiffusion):
+ sd_unet.original_forward = ldm_original_forward
+ elif isinstance(m, modules.models.diffusion.ddpm_edit.LatentDiffusion):
+ sd_unet.original_forward = ldm_original_forward
+ elif isinstance(m, sgm.models.diffusion.DiffusionEngine):
+ sd_unet.original_forward = sgm_original_forward
+ else:
+ sd_unet.original_forward = None
- ldm.modules.diffusionmodules.openaimodel.UNetModel.forward = sd_unet.UNetModel_forward
def undo_hijack(self, m):
- if type(m.cond_stage_model) == sd_hijack_xlmr.FrozenXLMREmbedderWithCustomWords:
+ conditioner = getattr(m, 'conditioner', None)
+ if conditioner:
+ for i in range(len(conditioner.embedders)):
+ embedder = conditioner.embedders[i]
+ if isinstance(embedder, (sd_hijack_open_clip.FrozenOpenCLIPEmbedderWithCustomWords, sd_hijack_open_clip.FrozenOpenCLIPEmbedder2WithCustomWords)):
+ embedder.wrapped.model.token_embedding = embedder.wrapped.model.token_embedding.wrapped
+ conditioner.embedders[i] = embedder.wrapped
+ if isinstance(embedder, sd_hijack_clip.FrozenCLIPEmbedderForSDXLWithCustomWords):
+ embedder.wrapped.transformer.text_model.embeddings.token_embedding = embedder.wrapped.transformer.text_model.embeddings.token_embedding.wrapped
+ conditioner.embedders[i] = embedder.wrapped
+
+ if hasattr(m, 'cond_stage_model'):
+ delattr(m, 'cond_stage_model')
+
+ elif type(m.cond_stage_model) == sd_hijack_xlmr.FrozenXLMREmbedderWithCustomWords:
m.cond_stage_model = m.cond_stage_model.wrapped
elif type(m.cond_stage_model) == sd_hijack_clip.FrozenCLIPEmbedderWithCustomWords:
@@ -263,7 +307,6 @@ class StableDiffusionModelHijack:
self.layers = None
self.clip = None
- ldm.modules.diffusionmodules.openaimodel.UNetModel.forward = ldm.modules.diffusionmodules.openaimodel.copy_of_UNetModel_forward_for_webui
def apply_circular(self, enable):
if self.circular_enabled == enable:
@@ -292,10 +335,11 @@ class StableDiffusionModelHijack:
class EmbeddingsWithFixes(torch.nn.Module):
- def __init__(self, wrapped, embeddings):
+ def __init__(self, wrapped, embeddings, textual_inversion_key='clip_l'):
super().__init__()
self.wrapped = wrapped
self.embeddings = embeddings
+ self.textual_inversion_key = textual_inversion_key
def forward(self, input_ids):
batch_fixes = self.embeddings.fixes
@@ -309,7 +353,8 @@ class EmbeddingsWithFixes(torch.nn.Module):
vecs = []
for fixes, tensor in zip(batch_fixes, inputs_embeds):
for offset, embedding in fixes:
- emb = devices.cond_cast_unet(embedding.vec)
+ vec = embedding.vec[self.textual_inversion_key] if isinstance(embedding.vec, dict) else embedding.vec
+ emb = devices.cond_cast_unet(vec)
emb_len = min(tensor.shape[0] - offset - 1, emb.shape[0])
tensor = torch.cat([tensor[0:offset + 1], emb[0:emb_len], tensor[offset + 1 + emb_len:]])
diff --git a/modules/sd_hijack_clip.py b/modules/sd_hijack_clip.py
index 16a5500e..8f29057a 100644
--- a/modules/sd_hijack_clip.py
+++ b/modules/sd_hijack_clip.py
@@ -161,7 +161,7 @@ class FrozenCLIPEmbedderWithCustomWordsBase(torch.nn.Module):
position += 1
continue
- emb_len = int(embedding.vec.shape[0])
+ emb_len = int(embedding.vectors)
if len(chunk.tokens) + emb_len > self.chunk_length:
next_chunk()
@@ -245,6 +245,8 @@ class FrozenCLIPEmbedderWithCustomWordsBase(torch.nn.Module):
hashes.append(f"{name}: {shorthash}")
if hashes:
+ if self.hijack.extra_generation_params.get("TI hashes"):
+ hashes.append(self.hijack.extra_generation_params.get("TI hashes"))
self.hijack.extra_generation_params["TI hashes"] = ", ".join(hashes)
if getattr(self.wrapped, 'return_pooled', False):
diff --git a/modules/sd_hijack_inpainting.py b/modules/sd_hijack_inpainting.py
deleted file mode 100644
index c1977b19..00000000
--- a/modules/sd_hijack_inpainting.py
+++ /dev/null
@@ -1,97 +0,0 @@
-import torch
-
-import ldm.models.diffusion.ddpm
-import ldm.models.diffusion.ddim
-import ldm.models.diffusion.plms
-
-from ldm.models.diffusion.ddim import noise_like
-from ldm.models.diffusion.sampling_util import norm_thresholding
-
-
-@torch.no_grad()
-def p_sample_plms(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,
- temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,
- unconditional_guidance_scale=1., unconditional_conditioning=None, old_eps=None, t_next=None, dynamic_threshold=None):
- b, *_, device = *x.shape, x.device
-
- def get_model_output(x, t):
- if unconditional_conditioning is None or unconditional_guidance_scale == 1.:
- e_t = self.model.apply_model(x, t, c)
- else:
- x_in = torch.cat([x] * 2)
- t_in = torch.cat([t] * 2)
-
- if isinstance(c, dict):
- assert isinstance(unconditional_conditioning, dict)
- c_in = {}
- for k in c:
- if isinstance(c[k], list):
- c_in[k] = [
- torch.cat([unconditional_conditioning[k][i], c[k][i]])
- for i in range(len(c[k]))
- ]
- else:
- c_in[k] = torch.cat([unconditional_conditioning[k], c[k]])
- else:
- c_in = torch.cat([unconditional_conditioning, c])
-
- e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)
- e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)
-
- if score_corrector is not None:
- assert self.model.parameterization == "eps"
- e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)
-
- return e_t
-
- alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas
- alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev
- sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas
- sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas
-
- def get_x_prev_and_pred_x0(e_t, index):
- # select parameters corresponding to the currently considered timestep
- a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)
- a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)
- sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)
- sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)
-
- # current prediction for x_0
- pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()
- if quantize_denoised:
- pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)
- if dynamic_threshold is not None:
- pred_x0 = norm_thresholding(pred_x0, dynamic_threshold)
- # direction pointing to x_t
- dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t
- noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature
- if noise_dropout > 0.:
- noise = torch.nn.functional.dropout(noise, p=noise_dropout)
- x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise
- return x_prev, pred_x0
-
- e_t = get_model_output(x, t)
- if len(old_eps) == 0:
- # Pseudo Improved Euler (2nd order)
- x_prev, pred_x0 = get_x_prev_and_pred_x0(e_t, index)
- e_t_next = get_model_output(x_prev, t_next)
- e_t_prime = (e_t + e_t_next) / 2
- elif len(old_eps) == 1:
- # 2nd order Pseudo Linear Multistep (Adams-Bashforth)
- e_t_prime = (3 * e_t - old_eps[-1]) / 2
- elif len(old_eps) == 2:
- # 3nd order Pseudo Linear Multistep (Adams-Bashforth)
- e_t_prime = (23 * e_t - 16 * old_eps[-1] + 5 * old_eps[-2]) / 12
- elif len(old_eps) >= 3:
- # 4nd order Pseudo Linear Multistep (Adams-Bashforth)
- e_t_prime = (55 * e_t - 59 * old_eps[-1] + 37 * old_eps[-2] - 9 * old_eps[-3]) / 24
-
- x_prev, pred_x0 = get_x_prev_and_pred_x0(e_t_prime, index)
-
- return x_prev, pred_x0, e_t
-
-
-def do_inpainting_hijack():
- # p_sample_plms is needed because PLMS can't work with dicts as conditionings
-
- ldm.models.diffusion.plms.PLMSSampler.p_sample_plms = p_sample_plms
diff --git a/modules/sd_hijack_optimizations.py b/modules/sd_hijack_optimizations.py
index b5f85ba5..7f9e328d 100644
--- a/modules/sd_hijack_optimizations.py
+++ b/modules/sd_hijack_optimizations.py
@@ -1,6 +1,7 @@
from __future__ import annotations
import math
import psutil
+import platform
import torch
from torch import einsum
@@ -94,7 +95,10 @@ class SdOptimizationSdp(SdOptimizationSdpNoMem):
class SdOptimizationSubQuad(SdOptimization):
name = "sub-quadratic"
cmd_opt = "opt_sub_quad_attention"
- priority = 10
+
+ @property
+ def priority(self):
+ return 1000 if shared.device.type == 'mps' else 10
def apply(self):
ldm.modules.attention.CrossAttention.forward = sub_quad_attention_forward
@@ -120,7 +124,7 @@ class SdOptimizationInvokeAI(SdOptimization):
@property
def priority(self):
- return 1000 if not torch.cuda.is_available() else 10
+ return 1000 if shared.device.type != 'mps' and not torch.cuda.is_available() else 10
def apply(self):
ldm.modules.attention.CrossAttention.forward = split_cross_attention_forward_invokeAI
@@ -256,9 +260,9 @@ def split_cross_attention_forward(self, x, context=None, mask=None, **kwargs):
raise RuntimeError(f'Not enough memory, use lower resolution (max approx. {max_res}x{max_res}). '
f'Need: {mem_required / 64 / gb:0.1f}GB free, Have:{mem_free_total / gb:0.1f}GB free')
- slice_size = q.shape[1] // steps if (q.shape[1] % steps) == 0 else q.shape[1]
+ slice_size = q.shape[1] // steps
for i in range(0, q.shape[1], slice_size):
- end = i + slice_size
+ end = min(i + slice_size, q.shape[1])
s1 = einsum('b i d, b j d -> b i j', q[:, i:end], k)
s2 = s1.softmax(dim=-1, dtype=q.dtype)
@@ -427,7 +431,10 @@ def sub_quad_attention(q, k, v, q_chunk_size=1024, kv_chunk_size=None, kv_chunk_
qk_matmul_size_bytes = batch_x_heads * bytes_per_token * q_tokens * k_tokens
if chunk_threshold is None:
- chunk_threshold_bytes = int(get_available_vram() * 0.9) if q.device.type == 'mps' else int(get_available_vram() * 0.7)
+ if q.device.type == 'mps':
+ chunk_threshold_bytes = 268435456 * (2 if platform.processor() == 'i386' else bytes_per_token)
+ else:
+ chunk_threshold_bytes = int(get_available_vram() * 0.7)
elif chunk_threshold == 0:
chunk_threshold_bytes = None
else:
diff --git a/modules/sd_models.py b/modules/sd_models.py
index fb31a793..50bc209e 100644
--- a/modules/sd_models.py
+++ b/modules/sd_models.py
@@ -1,23 +1,22 @@
import collections
import os.path
import sys
-import gc
import threading
import torch
import re
import safetensors.torch
-from omegaconf import OmegaConf
+from omegaconf import OmegaConf, ListConfig
from os import mkdir
from urllib import request
import ldm.modules.midas as midas
from ldm.util import instantiate_from_config
-from modules import paths, shared, modelloader, devices, script_callbacks, sd_vae, sd_disable_initialization, errors, hashes, sd_models_config, sd_unet, sd_models_xl
-from modules.sd_hijack_inpainting import do_inpainting_hijack
+from modules import paths, shared, modelloader, devices, script_callbacks, sd_vae, sd_disable_initialization, errors, hashes, sd_models_config, sd_unet, sd_models_xl, cache, extra_networks, processing, lowvram, sd_hijack, patches
from modules.timer import Timer
import tomesd
+import numpy as np
model_dir = "Stable-diffusion"
model_path = os.path.abspath(os.path.join(paths.models_path, model_dir))
@@ -28,13 +27,34 @@ checkpoint_alisases = checkpoint_aliases # for compatibility with old name
checkpoints_loaded = collections.OrderedDict()
+def replace_key(d, key, new_key, value):
+ keys = list(d.keys())
+
+ d[new_key] = value
+
+ if key not in keys:
+ return d
+
+ index = keys.index(key)
+ keys[index] = new_key
+
+ new_d = {k: d[k] for k in keys}
+
+ d.clear()
+ d.update(new_d)
+ return d
+
+
class CheckpointInfo:
def __init__(self, filename):
self.filename = filename
abspath = os.path.abspath(filename)
+ abs_ckpt_dir = os.path.abspath(shared.cmd_opts.ckpt_dir) if shared.cmd_opts.ckpt_dir is not None else None
+
+ self.is_safetensors = os.path.splitext(filename)[1].lower() == ".safetensors"
- if shared.cmd_opts.ckpt_dir is not None and abspath.startswith(shared.cmd_opts.ckpt_dir):
- name = abspath.replace(shared.cmd_opts.ckpt_dir, '')
+ if abs_ckpt_dir and abspath.startswith(abs_ckpt_dir):
+ name = abspath.replace(abs_ckpt_dir, '')
elif abspath.startswith(model_path):
name = abspath.replace(model_path, '')
else:
@@ -43,6 +63,19 @@ class CheckpointInfo:
if name.startswith("\\") or name.startswith("/"):
name = name[1:]
+ def read_metadata():
+ metadata = read_metadata_from_safetensors(filename)
+ self.modelspec_thumbnail = metadata.pop('modelspec.thumbnail', None)
+
+ return metadata
+
+ self.metadata = {}
+ if self.is_safetensors:
+ try:
+ self.metadata = cache.cached_data_for_file('safetensors-metadata', "checkpoint/" + name, filename, read_metadata)
+ except Exception as e:
+ errors.display(e, f"reading metadata for {filename}")
+
self.name = name
self.name_for_extra = os.path.splitext(os.path.basename(filename))[0]
self.model_name = os.path.splitext(name.replace("/", "_").replace("\\", "_"))[0]
@@ -52,17 +85,11 @@ class CheckpointInfo:
self.shorthash = self.sha256[0:10] if self.sha256 else None
self.title = name if self.shorthash is None else f'{name} [{self.shorthash}]'
+ self.short_title = self.name_for_extra if self.shorthash is None else f'{self.name_for_extra} [{self.shorthash}]'
- self.ids = [self.hash, self.model_name, self.title, name, f'{name} [{self.hash}]'] + ([self.shorthash, self.sha256, f'{self.name} [{self.shorthash}]'] if self.shorthash else [])
-
- self.metadata = {}
-
- _, ext = os.path.splitext(self.filename)
- if ext.lower() == ".safetensors":
- try:
- self.metadata = read_metadata_from_safetensors(filename)
- except Exception as e:
- errors.display(e, f"reading checkpoint metadata: {filename}")
+ self.ids = [self.hash, self.model_name, self.title, name, self.name_for_extra, f'{name} [{self.hash}]']
+ if self.shorthash:
+ self.ids += [self.shorthash, self.sha256, f'{self.name} [{self.shorthash}]', f'{self.name_for_extra} [{self.shorthash}]']
def register(self):
checkpoints_list[self.title] = self
@@ -74,13 +101,20 @@ class CheckpointInfo:
if self.sha256 is None:
return
- self.shorthash = self.sha256[0:10]
+ shorthash = self.sha256[0:10]
+ if self.shorthash == self.sha256[0:10]:
+ return self.shorthash
+
+ self.shorthash = shorthash
if self.shorthash not in self.ids:
- self.ids += [self.shorthash, self.sha256, f'{self.name} [{self.shorthash}]']
+ self.ids += [self.shorthash, self.sha256, f'{self.name} [{self.shorthash}]', f'{self.name_for_extra} [{self.shorthash}]']
- checkpoints_list.pop(self.title)
+ old_title = self.title
self.title = f'{self.name} [{self.shorthash}]'
+ self.short_title = f'{self.name_for_extra} [{self.shorthash}]'
+
+ replace_key(checkpoints_list, old_title, self.title, self)
self.register()
return self.shorthash
@@ -96,19 +130,16 @@ except Exception:
def setup_model():
+ """called once at startup to do various one-time tasks related to SD models"""
+
os.makedirs(model_path, exist_ok=True)
enable_midas_autodownload()
+ patch_given_betas()
-def checkpoint_tiles():
- def convert(name):
- return int(name) if name.isdigit() else name.lower()
-
- def alphanumeric_key(key):
- return [convert(c) for c in re.split('([0-9]+)', key)]
-
- return sorted([x.title for x in checkpoints_list.values()], key=alphanumeric_key)
+def checkpoint_tiles(use_short=False):
+ return [x.short_title if use_short else x.title for x in checkpoints_list.values()]
def list_models():
@@ -131,12 +162,18 @@ def list_models():
elif cmd_ckpt is not None and cmd_ckpt != shared.default_sd_model_file:
print(f"Checkpoint in --ckpt argument not found (Possible it was moved to {model_path}: {cmd_ckpt}", file=sys.stderr)
- for filename in sorted(model_list, key=str.lower):
+ for filename in model_list:
checkpoint_info = CheckpointInfo(filename)
checkpoint_info.register()
+re_strip_checksum = re.compile(r"\s*\[[^]]+]\s*$")
+
+
def get_closet_checkpoint_match(search_string):
+ if not search_string:
+ return None
+
checkpoint_info = checkpoint_aliases.get(search_string, None)
if checkpoint_info is not None:
return checkpoint_info
@@ -145,6 +182,11 @@ def get_closet_checkpoint_match(search_string):
if found:
return found[0]
+ search_string_without_checksum = re.sub(re_strip_checksum, '', search_string)
+ found = sorted([info for info in checkpoints_list.values() if search_string_without_checksum in info.title], key=lambda x: len(x.title))
+ if found:
+ return found[0]
+
return None
@@ -188,15 +230,19 @@ def select_checkpoint():
return checkpoint_info
-checkpoint_dict_replacements = {
+checkpoint_dict_replacements_sd1 = {
'cond_stage_model.transformer.embeddings.': 'cond_stage_model.transformer.text_model.embeddings.',
'cond_stage_model.transformer.encoder.': 'cond_stage_model.transformer.text_model.encoder.',
'cond_stage_model.transformer.final_layer_norm.': 'cond_stage_model.transformer.text_model.final_layer_norm.',
}
+checkpoint_dict_replacements_sd2_turbo = { # Converts SD 2.1 Turbo from SGM to LDM format.
+ 'conditioner.embedders.0.': 'cond_stage_model.',
+}
+
-def transform_checkpoint_dict_key(k):
- for text, replacement in checkpoint_dict_replacements.items():
+def transform_checkpoint_dict_key(k, replacements):
+ for text, replacement in replacements.items():
if k.startswith(text):
k = replacement + k[len(text):]
@@ -207,9 +253,14 @@ def get_state_dict_from_checkpoint(pl_sd):
pl_sd = pl_sd.pop("state_dict", pl_sd)
pl_sd.pop("state_dict", None)
+ is_sd2_turbo = 'conditioner.embedders.0.model.ln_final.weight' in pl_sd and pl_sd['conditioner.embedders.0.model.ln_final.weight'].size()[0] == 1024
+
sd = {}
for k, v in pl_sd.items():
- new_key = transform_checkpoint_dict_key(k)
+ if is_sd2_turbo:
+ new_key = transform_checkpoint_dict_key(k, checkpoint_dict_replacements_sd2_turbo)
+ else:
+ new_key = transform_checkpoint_dict_key(k, checkpoint_dict_replacements_sd1)
if new_key is not None:
sd[new_key] = v
@@ -271,6 +322,8 @@ def get_checkpoint_state_dict(checkpoint_info: CheckpointInfo, timer):
if checkpoint_info in checkpoints_loaded:
# use checkpoint cache
print(f"Loading weights [{sd_model_hash}] from cache")
+ # move to end as latest
+ checkpoints_loaded.move_to_end(checkpoint_info)
return checkpoints_loaded[checkpoint_info]
print(f"Loading weights [{sd_model_hash}] from {checkpoint_info.filename}")
@@ -280,11 +333,45 @@ def get_checkpoint_state_dict(checkpoint_info: CheckpointInfo, timer):
return res
+class SkipWritingToConfig:
+ """This context manager prevents load_model_weights from writing checkpoint name to the config when it loads weight."""
+
+ skip = False
+ previous = None
+
+ def __enter__(self):
+ self.previous = SkipWritingToConfig.skip
+ SkipWritingToConfig.skip = True
+ return self
+
+ def __exit__(self, exc_type, exc_value, exc_traceback):
+ SkipWritingToConfig.skip = self.previous
+
+
+def check_fp8(model):
+ if model is None:
+ return None
+ if devices.get_optimal_device_name() == "mps":
+ enable_fp8 = False
+ elif shared.opts.fp8_storage == "Enable":
+ enable_fp8 = True
+ elif getattr(model, "is_sdxl", False) and shared.opts.fp8_storage == "Enable for SDXL":
+ enable_fp8 = True
+ else:
+ enable_fp8 = False
+ return enable_fp8
+
+
def load_model_weights(model, checkpoint_info: CheckpointInfo, state_dict, timer):
sd_model_hash = checkpoint_info.calculate_shorthash()
timer.record("calculate hash")
- shared.opts.data["sd_model_checkpoint"] = checkpoint_info.title
+ if devices.fp8:
+ # prevent model to load state dict in fp8
+ model.half()
+
+ if not SkipWritingToConfig.skip:
+ shared.opts.data["sd_model_checkpoint"] = checkpoint_info.title
if state_dict is None:
state_dict = get_checkpoint_state_dict(checkpoint_info, timer)
@@ -292,23 +379,32 @@ def load_model_weights(model, checkpoint_info: CheckpointInfo, state_dict, timer
model.is_sdxl = hasattr(model, 'conditioner')
model.is_sd2 = not model.is_sdxl and hasattr(model.cond_stage_model, 'model')
model.is_sd1 = not model.is_sdxl and not model.is_sd2
-
+ model.is_ssd = model.is_sdxl and 'model.diffusion_model.middle_block.1.transformer_blocks.0.attn1.to_q.weight' not in state_dict.keys()
if model.is_sdxl:
sd_models_xl.extend_sdxl(model)
- model.load_state_dict(state_dict, strict=False)
- del state_dict
- timer.record("apply weights to model")
+ if model.is_ssd:
+ sd_hijack.model_hijack.convert_sdxl_to_ssd(model)
if shared.opts.sd_checkpoint_cache > 0:
# cache newly loaded model
- checkpoints_loaded[checkpoint_info] = model.state_dict().copy()
+ checkpoints_loaded[checkpoint_info] = state_dict.copy()
+
+ model.load_state_dict(state_dict, strict=False)
+ timer.record("apply weights to model")
+
+ del state_dict
if shared.cmd_opts.opt_channelslast:
model.to(memory_format=torch.channels_last)
timer.record("apply channels_last")
- if not shared.cmd_opts.no_half:
+ if shared.cmd_opts.no_half:
+ model.float()
+ model.alphas_cumprod_original = model.alphas_cumprod
+ devices.dtype_unet = torch.float32
+ timer.record("apply float()")
+ else:
vae = model.first_stage_model
depth_model = getattr(model, 'depth_model', None)
@@ -319,14 +415,40 @@ def load_model_weights(model, checkpoint_info: CheckpointInfo, state_dict, timer
if shared.cmd_opts.upcast_sampling and depth_model:
model.depth_model = None
+ alphas_cumprod = model.alphas_cumprod
+ model.alphas_cumprod = None
model.half()
+ model.alphas_cumprod = alphas_cumprod
+ model.alphas_cumprod_original = alphas_cumprod
model.first_stage_model = vae
if depth_model:
model.depth_model = depth_model
+ devices.dtype_unet = torch.float16
timer.record("apply half()")
- devices.dtype_unet = torch.float16 if model.is_sdxl and not shared.cmd_opts.no_half else model.model.diffusion_model.dtype
+ for module in model.modules():
+ if hasattr(module, 'fp16_weight'):
+ del module.fp16_weight
+ if hasattr(module, 'fp16_bias'):
+ del module.fp16_bias
+
+ if check_fp8(model):
+ devices.fp8 = True
+ first_stage = model.first_stage_model
+ model.first_stage_model = None
+ for module in model.modules():
+ if isinstance(module, (torch.nn.Conv2d, torch.nn.Linear)):
+ if shared.opts.cache_fp16_weight:
+ module.fp16_weight = module.weight.data.clone().cpu().half()
+ if module.bias is not None:
+ module.fp16_bias = module.bias.data.clone().cpu().half()
+ module.to(torch.float8_e4m3fn)
+ model.first_stage_model = first_stage
+ timer.record("apply fp8")
+ else:
+ devices.fp8 = False
+
devices.unet_needs_upcast = shared.cmd_opts.upcast_sampling and devices.dtype == torch.float16 and devices.dtype_unet == torch.float16
model.first_stage_model.to(devices.dtype_vae)
@@ -346,7 +468,7 @@ def load_model_weights(model, checkpoint_info: CheckpointInfo, state_dict, timer
sd_vae.delete_base_vae()
sd_vae.clear_loaded_vae()
- vae_file, vae_source = sd_vae.resolve_vae(checkpoint_info.filename)
+ vae_file, vae_source = sd_vae.resolve_vae(checkpoint_info.filename).tuple()
sd_vae.load_vae(model, vae_file, vae_source)
timer.record("load VAE")
@@ -394,6 +516,20 @@ def enable_midas_autodownload():
midas.api.load_model = load_model_wrapper
+def patch_given_betas():
+ import ldm.models.diffusion.ddpm
+
+ def patched_register_schedule(*args, **kwargs):
+ """a modified version of register_schedule function that converts plain list from Omegaconf into numpy"""
+
+ if isinstance(args[1], ListConfig):
+ args = (args[0], np.array(args[1]), *args[2:])
+
+ original_register_schedule(*args, **kwargs)
+
+ original_register_schedule = patches.patch(__name__, ldm.models.diffusion.ddpm.DDPM, 'register_schedule', patched_register_schedule)
+
+
def repair_config(sd_config):
if not hasattr(sd_config.model.params, "use_ema"):
@@ -423,6 +559,7 @@ sdxl_refiner_clip_weight = 'conditioner.embedders.0.model.ln_final.weight'
class SdModelData:
def __init__(self):
self.sd_model = None
+ self.loaded_sd_models = []
self.was_loaded_at_least_once = False
self.lock = threading.Lock()
@@ -437,6 +574,7 @@ class SdModelData:
try:
load_model()
+
except Exception as e:
errors.display(e, "loading stable diffusion model", full_traceback=True)
print("", file=sys.stderr)
@@ -445,14 +583,30 @@ class SdModelData:
return self.sd_model
- def set_sd_model(self, v):
+ def set_sd_model(self, v, already_loaded=False):
self.sd_model = v
+ if already_loaded:
+ sd_vae.base_vae = getattr(v, "base_vae", None)
+ sd_vae.loaded_vae_file = getattr(v, "loaded_vae_file", None)
+ sd_vae.checkpoint_info = v.sd_checkpoint_info
+
+ try:
+ self.loaded_sd_models.remove(v)
+ except ValueError:
+ pass
+
+ if v is not None:
+ self.loaded_sd_models.insert(0, v)
model_data = SdModelData()
def get_empty_cond(sd_model):
+
+ p = processing.StableDiffusionProcessingTxt2Img()
+ extra_networks.activate(p, {})
+
if hasattr(sd_model, 'conditioner'):
d = sd_model.get_learned_conditioning([""])
return d['crossattn']
@@ -460,20 +614,46 @@ def get_empty_cond(sd_model):
return sd_model.cond_stage_model([""])
+def send_model_to_cpu(m):
+ if m.lowvram:
+ lowvram.send_everything_to_cpu()
+ else:
+ m.to(devices.cpu)
+
+ devices.torch_gc()
+
+
+def model_target_device(m):
+ if lowvram.is_needed(m):
+ return devices.cpu
+ else:
+ return devices.device
+
+
+def send_model_to_device(m):
+ lowvram.apply(m)
+
+ if not m.lowvram:
+ m.to(shared.device)
+
+
+def send_model_to_trash(m):
+ m.to(device="meta")
+ devices.torch_gc()
+
def load_model(checkpoint_info=None, already_loaded_state_dict=None):
- from modules import lowvram, sd_hijack
+ from modules import sd_hijack
checkpoint_info = checkpoint_info or select_checkpoint()
+ timer = Timer()
+
if model_data.sd_model:
- sd_hijack.model_hijack.undo_hijack(model_data.sd_model)
+ send_model_to_trash(model_data.sd_model)
model_data.sd_model = None
- gc.collect()
devices.torch_gc()
- do_inpainting_hijack()
-
- timer = Timer()
+ timer.record("unload existing model")
if already_loaded_state_dict is not None:
state_dict = already_loaded_state_dict
@@ -495,25 +675,36 @@ def load_model(checkpoint_info=None, already_loaded_state_dict=None):
sd_model = None
try:
with sd_disable_initialization.DisableInitialization(disable_clip=clip_is_included_into_sd or shared.cmd_opts.do_not_download_clip):
- sd_model = instantiate_from_config(sd_config.model)
- except Exception:
- pass
+ with sd_disable_initialization.InitializeOnMeta():
+ sd_model = instantiate_from_config(sd_config.model)
+
+ except Exception as e:
+ errors.display(e, "creating model quickly", full_traceback=True)
if sd_model is None:
print('Failed to create model quickly; will retry using slow method.', file=sys.stderr)
- sd_model = instantiate_from_config(sd_config.model)
+
+ with sd_disable_initialization.InitializeOnMeta():
+ sd_model = instantiate_from_config(sd_config.model)
sd_model.used_config = checkpoint_config
timer.record("create model")
- load_model_weights(sd_model, checkpoint_info, state_dict, timer)
-
- if shared.cmd_opts.lowvram or shared.cmd_opts.medvram:
- lowvram.setup_for_low_vram(sd_model, shared.cmd_opts.medvram)
+ if shared.cmd_opts.no_half:
+ weight_dtype_conversion = None
else:
- sd_model.to(shared.device)
+ weight_dtype_conversion = {
+ 'first_stage_model': None,
+ 'alphas_cumprod': None,
+ '': torch.float16,
+ }
+
+ with sd_disable_initialization.LoadStateDictOnMeta(state_dict, device=model_target_device(sd_model), weight_dtype_conversion=weight_dtype_conversion):
+ load_model_weights(sd_model, checkpoint_info, state_dict, timer)
+ timer.record("load weights from state dict")
+ send_model_to_device(sd_model)
timer.record("move model to device")
sd_hijack.model_hijack.hijack(sd_model)
@@ -521,7 +712,7 @@ def load_model(checkpoint_info=None, already_loaded_state_dict=None):
timer.record("hijack")
sd_model.eval()
- model_data.sd_model = sd_model
+ model_data.set_sd_model(sd_model)
model_data.was_loaded_at_least_once = True
sd_hijack.model_hijack.embedding_db.load_textual_inversion_embeddings(force_reload=True) # Reload embeddings after model load as they may or may not fit the model
@@ -542,10 +733,70 @@ def load_model(checkpoint_info=None, already_loaded_state_dict=None):
return sd_model
-def reload_model_weights(sd_model=None, info=None):
- from modules import lowvram, devices, sd_hijack
+def reuse_model_from_already_loaded(sd_model, checkpoint_info, timer):
+ """
+ Checks if the desired checkpoint from checkpoint_info is not already loaded in model_data.loaded_sd_models.
+ If it is loaded, returns that (moving it to GPU if necessary, and moving the currently loadded model to CPU if necessary).
+ If not, returns the model that can be used to load weights from checkpoint_info's file.
+ If no such model exists, returns None.
+ Additionaly deletes loaded models that are over the limit set in settings (sd_checkpoints_limit).
+ """
+
+ already_loaded = None
+ for i in reversed(range(len(model_data.loaded_sd_models))):
+ loaded_model = model_data.loaded_sd_models[i]
+ if loaded_model.sd_checkpoint_info.filename == checkpoint_info.filename:
+ already_loaded = loaded_model
+ continue
+
+ if len(model_data.loaded_sd_models) > shared.opts.sd_checkpoints_limit > 0:
+ print(f"Unloading model {len(model_data.loaded_sd_models)} over the limit of {shared.opts.sd_checkpoints_limit}: {loaded_model.sd_checkpoint_info.title}")
+ model_data.loaded_sd_models.pop()
+ send_model_to_trash(loaded_model)
+ timer.record("send model to trash")
+
+ if shared.opts.sd_checkpoints_keep_in_cpu:
+ send_model_to_cpu(sd_model)
+ timer.record("send model to cpu")
+
+ if already_loaded is not None:
+ send_model_to_device(already_loaded)
+ timer.record("send model to device")
+
+ model_data.set_sd_model(already_loaded, already_loaded=True)
+
+ if not SkipWritingToConfig.skip:
+ shared.opts.data["sd_model_checkpoint"] = already_loaded.sd_checkpoint_info.title
+ shared.opts.data["sd_checkpoint_hash"] = already_loaded.sd_checkpoint_info.sha256
+
+ print(f"Using already loaded model {already_loaded.sd_checkpoint_info.title}: done in {timer.summary()}")
+ sd_vae.reload_vae_weights(already_loaded)
+ return model_data.sd_model
+ elif shared.opts.sd_checkpoints_limit > 1 and len(model_data.loaded_sd_models) < shared.opts.sd_checkpoints_limit:
+ print(f"Loading model {checkpoint_info.title} ({len(model_data.loaded_sd_models) + 1} out of {shared.opts.sd_checkpoints_limit})")
+
+ model_data.sd_model = None
+ load_model(checkpoint_info)
+ return model_data.sd_model
+ elif len(model_data.loaded_sd_models) > 0:
+ sd_model = model_data.loaded_sd_models.pop()
+ model_data.sd_model = sd_model
+
+ sd_vae.base_vae = getattr(sd_model, "base_vae", None)
+ sd_vae.loaded_vae_file = getattr(sd_model, "loaded_vae_file", None)
+ sd_vae.checkpoint_info = sd_model.sd_checkpoint_info
+
+ print(f"Reusing loaded model {sd_model.sd_checkpoint_info.title} to load {checkpoint_info.title}")
+ return sd_model
+ else:
+ return None
+
+
+def reload_model_weights(sd_model=None, info=None, forced_reload=False):
checkpoint_info = info or select_checkpoint()
+ timer = Timer()
+
if not sd_model:
sd_model = model_data.sd_model
@@ -553,20 +804,21 @@ def reload_model_weights(sd_model=None, info=None):
current_checkpoint_info = None
else:
current_checkpoint_info = sd_model.sd_checkpoint_info
- if sd_model.sd_model_checkpoint == checkpoint_info.filename:
- return
-
- sd_unet.apply_unet("None")
+ if check_fp8(sd_model) != devices.fp8:
+ # load from state dict again to prevent extra numerical errors
+ forced_reload = True
+ elif sd_model.sd_model_checkpoint == checkpoint_info.filename and not forced_reload:
+ return sd_model
- if shared.cmd_opts.lowvram or shared.cmd_opts.medvram:
- lowvram.send_everything_to_cpu()
- else:
- sd_model.to(devices.cpu)
+ sd_model = reuse_model_from_already_loaded(sd_model, checkpoint_info, timer)
+ if not forced_reload and sd_model is not None and sd_model.sd_checkpoint_info.filename == checkpoint_info.filename:
+ return sd_model
+ if sd_model is not None:
+ sd_unet.apply_unet("None")
+ send_model_to_cpu(sd_model)
sd_hijack.model_hijack.undo_hijack(sd_model)
- timer = Timer()
-
state_dict = get_checkpoint_state_dict(checkpoint_info, timer)
checkpoint_config = sd_models_config.find_checkpoint_config(state_dict, checkpoint_info)
@@ -574,7 +826,9 @@ def reload_model_weights(sd_model=None, info=None):
timer.record("find config")
if sd_model is None or checkpoint_config != sd_model.used_config:
- del sd_model
+ if sd_model is not None:
+ send_model_to_trash(sd_model)
+
load_model(checkpoint_info, already_loaded_state_dict=state_dict)
return model_data.sd_model
@@ -591,28 +845,20 @@ def reload_model_weights(sd_model=None, info=None):
script_callbacks.model_loaded_callback(sd_model)
timer.record("script callbacks")
- if not shared.cmd_opts.lowvram and not shared.cmd_opts.medvram:
+ if not sd_model.lowvram:
sd_model.to(devices.device)
timer.record("move model to device")
print(f"Weights loaded in {timer.summary()}.")
+ model_data.set_sd_model(sd_model)
+ sd_unet.apply_unet()
+
return sd_model
def unload_model_weights(sd_model=None, info=None):
- from modules import devices, sd_hijack
- timer = Timer()
-
- if model_data.sd_model:
- model_data.sd_model.to(devices.cpu)
- sd_hijack.model_hijack.undo_hijack(model_data.sd_model)
- model_data.sd_model = None
- sd_model = None
- gc.collect()
- devices.torch_gc()
-
- print(f"Unloaded weights {timer.summary()}.")
+ send_model_to_cpu(sd_model or shared.sd_model)
return sd_model
diff --git a/modules/sd_models_config.py b/modules/sd_models_config.py
index 8266fa39..b38137eb 100644
--- a/modules/sd_models_config.py
+++ b/modules/sd_models_config.py
@@ -2,7 +2,7 @@ import os
import torch
-from modules import shared, paths, sd_disable_initialization
+from modules import shared, paths, sd_disable_initialization, devices
sd_configs_path = shared.sd_configs_path
sd_repo_configs_path = os.path.join(paths.paths['Stable Diffusion'], "configs", "stable-diffusion")
@@ -15,13 +15,14 @@ config_sd2v = os.path.join(sd_repo_configs_path, "v2-inference-v.yaml")
config_sd2_inpainting = os.path.join(sd_repo_configs_path, "v2-inpainting-inference.yaml")
config_sdxl = os.path.join(sd_xl_repo_configs_path, "sd_xl_base.yaml")
config_sdxl_refiner = os.path.join(sd_xl_repo_configs_path, "sd_xl_refiner.yaml")
+config_sdxl_inpainting = os.path.join(sd_configs_path, "sd_xl_inpaint.yaml")
config_depth_model = os.path.join(sd_repo_configs_path, "v2-midas-inference.yaml")
config_unclip = os.path.join(sd_repo_configs_path, "v2-1-stable-unclip-l-inference.yaml")
config_unopenclip = os.path.join(sd_repo_configs_path, "v2-1-stable-unclip-h-inference.yaml")
config_inpainting = os.path.join(sd_configs_path, "v1-inpainting-inference.yaml")
config_instruct_pix2pix = os.path.join(sd_configs_path, "instruct-pix2pix.yaml")
config_alt_diffusion = os.path.join(sd_configs_path, "alt-diffusion-inference.yaml")
-
+config_alt_diffusion_m18 = os.path.join(sd_configs_path, "alt-diffusion-m18-inference.yaml")
def is_using_v_parameterization_for_sd2(state_dict):
"""
@@ -29,7 +30,6 @@ def is_using_v_parameterization_for_sd2(state_dict):
"""
import ldm.modules.diffusionmodules.openaimodel
- from modules import devices
device = devices.cpu
@@ -72,7 +72,10 @@ def guess_model_config_from_state_dict(sd, filename):
sd2_variations_weight = sd.get('embedder.model.ln_final.weight', None)
if sd.get('conditioner.embedders.1.model.ln_final.weight', None) is not None:
- return config_sdxl
+ if diffusion_model_input.shape[1] == 9:
+ return config_sdxl_inpainting
+ else:
+ return config_sdxl
if sd.get('conditioner.embedders.0.model.ln_final.weight', None) is not None:
return config_sdxl_refiner
elif sd.get('depth_model.model.pretrained.act_postprocess3.0.project.0.bias', None) is not None:
@@ -96,7 +99,10 @@ def guess_model_config_from_state_dict(sd, filename):
if diffusion_model_input.shape[1] == 8:
return config_instruct_pix2pix
+
if sd.get('cond_stage_model.roberta.embeddings.word_embeddings.weight', None) is not None:
+ if sd.get('cond_stage_model.transformation.weight').size()[0] == 1024:
+ return config_alt_diffusion_m18
return config_alt_diffusion
return config_default
diff --git a/modules/sd_models_types.py b/modules/sd_models_types.py
new file mode 100644
index 00000000..f911fbb6
--- /dev/null
+++ b/modules/sd_models_types.py
@@ -0,0 +1,34 @@
+from ldm.models.diffusion.ddpm import LatentDiffusion
+from typing import TYPE_CHECKING
+
+
+if TYPE_CHECKING:
+ from modules.sd_models import CheckpointInfo
+
+
+class WebuiSdModel(LatentDiffusion):
+ """This class is not actually instantinated, but its fields are created and fieeld by webui"""
+
+ lowvram: bool
+ """True if lowvram/medvram optimizations are enabled -- see modules.lowvram for more info"""
+
+ sd_model_hash: str
+ """short hash, 10 first characters of SHA1 hash of the model file; may be None if --no-hashing flag is used"""
+
+ sd_model_checkpoint: str
+ """path to the file on disk that model weights were obtained from"""
+
+ sd_checkpoint_info: 'CheckpointInfo'
+ """structure with additional information about the file with model's weights"""
+
+ is_sdxl: bool
+ """True if the model's architecture is SDXL or SSD"""
+
+ is_ssd: bool
+ """True if the model is SSD"""
+
+ is_sd2: bool
+ """True if the model's architecture is SD 2.x"""
+
+ is_sd1: bool
+ """True if the model's architecture is SD 1.x"""
diff --git a/modules/sd_models_xl.py b/modules/sd_models_xl.py
index 40559208..0de17af3 100644
--- a/modules/sd_models_xl.py
+++ b/modules/sd_models_xl.py
@@ -6,6 +6,7 @@ import sgm.models.diffusion
import sgm.modules.diffusionmodules.denoiser_scaling
import sgm.modules.diffusionmodules.discretizer
from modules import devices, shared, prompt_parser
+from modules import torch_utils
def get_learned_conditioning(self: sgm.models.diffusion.DiffusionEngine, batch: prompt_parser.SdConditioning | list[str]):
@@ -34,6 +35,12 @@ def get_learned_conditioning(self: sgm.models.diffusion.DiffusionEngine, batch:
def apply_model(self: sgm.models.diffusion.DiffusionEngine, x, t, cond):
+ sd = self.model.state_dict()
+ diffusion_model_input = sd.get('diffusion_model.input_blocks.0.0.weight', None)
+ if diffusion_model_input is not None:
+ if diffusion_model_input.shape[1] == 9:
+ x = torch.cat([x] + cond['c_concat'], dim=1)
+
return self.model(x, t, cond)
@@ -56,6 +63,14 @@ def encode_embedding_init_text(self: sgm.modules.GeneralConditioner, init_text,
return torch.cat(res, dim=1)
+def tokenize(self: sgm.modules.GeneralConditioner, texts):
+ for embedder in [embedder for embedder in self.embedders if hasattr(embedder, 'tokenize')]:
+ return embedder.tokenize(texts)
+
+ raise AssertionError('no tokenizer available')
+
+
+
def process_texts(self, texts):
for embedder in [embedder for embedder in self.embedders if hasattr(embedder, 'process_texts')]:
return embedder.process_texts(texts)
@@ -68,6 +83,7 @@ def get_target_prompt_token_count(self, token_count):
# those additions to GeneralConditioner make it possible to use it as model.cond_stage_model from SD1.5 in exist
sgm.modules.GeneralConditioner.encode_embedding_init_text = encode_embedding_init_text
+sgm.modules.GeneralConditioner.tokenize = tokenize
sgm.modules.GeneralConditioner.process_texts = process_texts
sgm.modules.GeneralConditioner.get_target_prompt_token_count = get_target_prompt_token_count
@@ -75,7 +91,7 @@ sgm.modules.GeneralConditioner.get_target_prompt_token_count = get_target_prompt
def extend_sdxl(model):
"""this adds a bunch of parameters to make SDXL model look a bit more like SD1.5 to the rest of the codebase."""
- dtype = next(model.model.diffusion_model.parameters()).dtype
+ dtype = torch_utils.get_param(model.model.diffusion_model).dtype
model.model.diffusion_model.dtype = dtype
model.model.conditioning_key = 'crossattn'
model.cond_stage_key = 'txt'
@@ -84,15 +100,15 @@ def extend_sdxl(model):
model.parameterization = "v" if isinstance(model.denoiser.scaling, sgm.modules.diffusionmodules.denoiser_scaling.VScaling) else "eps"
discretization = sgm.modules.diffusionmodules.discretizer.LegacyDDPMDiscretization()
- model.alphas_cumprod = torch.asarray(discretization.alphas_cumprod, device=devices.device, dtype=dtype)
+ model.alphas_cumprod = torch.asarray(discretization.alphas_cumprod, device=devices.device, dtype=torch.float32)
model.conditioner.wrapped = torch.nn.Module()
-sgm.modules.attention.print = lambda *args: None
-sgm.modules.diffusionmodules.model.print = lambda *args: None
-sgm.modules.diffusionmodules.openaimodel.print = lambda *args: None
-sgm.modules.encoders.modules.print = lambda *args: None
+sgm.modules.attention.print = shared.ldm_print
+sgm.modules.diffusionmodules.model.print = shared.ldm_print
+sgm.modules.diffusionmodules.openaimodel.print = shared.ldm_print
+sgm.modules.encoders.modules.print = shared.ldm_print
# this gets the code to load the vanilla attention that we override
sgm.modules.attention.SDP_IS_AVAILABLE = True
diff --git a/modules/sd_samplers.py b/modules/sd_samplers.py
index bea2684c..45faae62 100644
--- a/modules/sd_samplers.py
+++ b/modules/sd_samplers.py
@@ -1,17 +1,18 @@
-from modules import sd_samplers_compvis, sd_samplers_kdiffusion, shared
+from modules import sd_samplers_kdiffusion, sd_samplers_timesteps, shared
# imports for functions that previously were here and are used by other modules
from modules.sd_samplers_common import samples_to_image_grid, sample_to_image # noqa: F401
all_samplers = [
*sd_samplers_kdiffusion.samplers_data_k_diffusion,
- *sd_samplers_compvis.samplers_data_compvis,
+ *sd_samplers_timesteps.samplers_data_timesteps,
]
all_samplers_map = {x.name: x for x in all_samplers}
samplers = []
samplers_for_img2img = []
samplers_map = {}
+samplers_hidden = {}
def find_sampler_config(name):
@@ -38,13 +39,11 @@ def create_sampler(name, model):
def set_samplers():
- global samplers, samplers_for_img2img
+ global samplers, samplers_for_img2img, samplers_hidden
- hidden = set(shared.opts.hide_samplers)
- hidden_img2img = set(shared.opts.hide_samplers + ['PLMS', 'UniPC'])
-
- samplers = [x for x in all_samplers if x.name not in hidden]
- samplers_for_img2img = [x for x in all_samplers if x.name not in hidden_img2img]
+ samplers_hidden = set(shared.opts.hide_samplers)
+ samplers = all_samplers
+ samplers_for_img2img = all_samplers
samplers_map.clear()
for sampler in all_samplers:
@@ -53,4 +52,8 @@ def set_samplers():
samplers_map[alias.lower()] = sampler.name
+def visible_sampler_names():
+ return [x.name for x in samplers if x.name not in samplers_hidden]
+
+
set_samplers()
diff --git a/modules/sd_samplers_cfg_denoiser.py b/modules/sd_samplers_cfg_denoiser.py
new file mode 100644
index 00000000..eb9d5daf
--- /dev/null
+++ b/modules/sd_samplers_cfg_denoiser.py
@@ -0,0 +1,247 @@
+import torch
+from modules import prompt_parser, devices, sd_samplers_common
+
+from modules.shared import opts, state
+import modules.shared as shared
+from modules.script_callbacks import CFGDenoiserParams, cfg_denoiser_callback
+from modules.script_callbacks import CFGDenoisedParams, cfg_denoised_callback
+from modules.script_callbacks import AfterCFGCallbackParams, cfg_after_cfg_callback
+
+
+def catenate_conds(conds):
+ if not isinstance(conds[0], dict):
+ return torch.cat(conds)
+
+ return {key: torch.cat([x[key] for x in conds]) for key in conds[0].keys()}
+
+
+def subscript_cond(cond, a, b):
+ if not isinstance(cond, dict):
+ return cond[a:b]
+
+ return {key: vec[a:b] for key, vec in cond.items()}
+
+
+def pad_cond(tensor, repeats, empty):
+ if not isinstance(tensor, dict):
+ return torch.cat([tensor, empty.repeat((tensor.shape[0], repeats, 1))], axis=1)
+
+ tensor['crossattn'] = pad_cond(tensor['crossattn'], repeats, empty)
+ return tensor
+
+
+class CFGDenoiser(torch.nn.Module):
+ """
+ Classifier free guidance denoiser. A wrapper for stable diffusion model (specifically for unet)
+ that can take a noisy picture and produce a noise-free picture using two guidances (prompts)
+ instead of one. Originally, the second prompt is just an empty string, but we use non-empty
+ negative prompt.
+ """
+
+ def __init__(self, sampler):
+ super().__init__()
+ self.model_wrap = None
+ self.mask = None
+ self.nmask = None
+ self.init_latent = None
+ self.steps = None
+ """number of steps as specified by user in UI"""
+
+ self.total_steps = None
+ """expected number of calls to denoiser calculated from self.steps and specifics of the selected sampler"""
+
+ self.step = 0
+ self.image_cfg_scale = None
+ self.padded_cond_uncond = False
+ self.sampler = sampler
+ self.model_wrap = None
+ self.p = None
+
+ # NOTE: masking before denoising can cause the original latents to be oversmoothed
+ # as the original latents do not have noise
+ self.mask_before_denoising = False
+
+ @property
+ def inner_model(self):
+ raise NotImplementedError()
+
+ def combine_denoised(self, x_out, conds_list, uncond, cond_scale):
+ denoised_uncond = x_out[-uncond.shape[0]:]
+ denoised = torch.clone(denoised_uncond)
+
+ for i, conds in enumerate(conds_list):
+ for cond_index, weight in conds:
+ denoised[i] += (x_out[cond_index] - denoised_uncond[i]) * (weight * cond_scale)
+
+ return denoised
+
+ def combine_denoised_for_edit_model(self, x_out, cond_scale):
+ out_cond, out_img_cond, out_uncond = x_out.chunk(3)
+ denoised = out_uncond + cond_scale * (out_cond - out_img_cond) + self.image_cfg_scale * (out_img_cond - out_uncond)
+
+ return denoised
+
+ def get_pred_x0(self, x_in, x_out, sigma):
+ return x_out
+
+ def update_inner_model(self):
+ self.model_wrap = None
+
+ c, uc = self.p.get_conds()
+ self.sampler.sampler_extra_args['cond'] = c
+ self.sampler.sampler_extra_args['uncond'] = uc
+
+ def forward(self, x, sigma, uncond, cond, cond_scale, s_min_uncond, image_cond):
+ if state.interrupted or state.skipped:
+ raise sd_samplers_common.InterruptedException
+
+ if sd_samplers_common.apply_refiner(self):
+ cond = self.sampler.sampler_extra_args['cond']
+ uncond = self.sampler.sampler_extra_args['uncond']
+
+ # at self.image_cfg_scale == 1.0 produced results for edit model are the same as with normal sampling,
+ # so is_edit_model is set to False to support AND composition.
+ is_edit_model = shared.sd_model.cond_stage_key == "edit" and self.image_cfg_scale is not None and self.image_cfg_scale != 1.0
+
+ conds_list, tensor = prompt_parser.reconstruct_multicond_batch(cond, self.step)
+ uncond = prompt_parser.reconstruct_cond_batch(uncond, self.step)
+
+ assert not is_edit_model or all(len(conds) == 1 for conds in conds_list), "AND is not supported for InstructPix2Pix checkpoint (unless using Image CFG scale = 1.0)"
+
+ # If we use masks, blending between the denoised and original latent images occurs here.
+ def apply_blend(current_latent):
+ blended_latent = current_latent * self.nmask + self.init_latent * self.mask
+
+ if self.p.scripts is not None:
+ from modules import scripts
+ mba = scripts.MaskBlendArgs(current_latent, self.nmask, self.init_latent, self.mask, blended_latent, denoiser=self, sigma=sigma)
+ self.p.scripts.on_mask_blend(self.p, mba)
+ blended_latent = mba.blended_latent
+
+ return blended_latent
+
+ # Blend in the original latents (before)
+ if self.mask_before_denoising and self.mask is not None:
+ x = apply_blend(x)
+
+ batch_size = len(conds_list)
+ repeats = [len(conds_list[i]) for i in range(batch_size)]
+
+ if shared.sd_model.model.conditioning_key == "crossattn-adm":
+ image_uncond = torch.zeros_like(image_cond)
+ make_condition_dict = lambda c_crossattn, c_adm: {"c_crossattn": [c_crossattn], "c_adm": c_adm}
+ else:
+ image_uncond = image_cond
+ if isinstance(uncond, dict):
+ make_condition_dict = lambda c_crossattn, c_concat: {**c_crossattn, "c_concat": [c_concat]}
+ else:
+ make_condition_dict = lambda c_crossattn, c_concat: {"c_crossattn": [c_crossattn], "c_concat": [c_concat]}
+
+ if not is_edit_model:
+ x_in = torch.cat([torch.stack([x[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [x])
+ sigma_in = torch.cat([torch.stack([sigma[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [sigma])
+ image_cond_in = torch.cat([torch.stack([image_cond[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [image_uncond])
+ else:
+ x_in = torch.cat([torch.stack([x[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [x] + [x])
+ sigma_in = torch.cat([torch.stack([sigma[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [sigma] + [sigma])
+ image_cond_in = torch.cat([torch.stack([image_cond[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [image_uncond] + [torch.zeros_like(self.init_latent)])
+
+ denoiser_params = CFGDenoiserParams(x_in, image_cond_in, sigma_in, state.sampling_step, state.sampling_steps, tensor, uncond)
+ cfg_denoiser_callback(denoiser_params)
+ x_in = denoiser_params.x
+ image_cond_in = denoiser_params.image_cond
+ sigma_in = denoiser_params.sigma
+ tensor = denoiser_params.text_cond
+ uncond = denoiser_params.text_uncond
+ skip_uncond = False
+
+ # alternating uncond allows for higher thresholds without the quality loss normally expected from raising it
+ if self.step % 2 and s_min_uncond > 0 and sigma[0] < s_min_uncond and not is_edit_model:
+ skip_uncond = True
+ x_in = x_in[:-batch_size]
+ sigma_in = sigma_in[:-batch_size]
+
+ self.padded_cond_uncond = False
+ if shared.opts.pad_cond_uncond and tensor.shape[1] != uncond.shape[1]:
+ empty = shared.sd_model.cond_stage_model_empty_prompt
+ num_repeats = (tensor.shape[1] - uncond.shape[1]) // empty.shape[1]
+
+ if num_repeats < 0:
+ tensor = pad_cond(tensor, -num_repeats, empty)
+ self.padded_cond_uncond = True
+ elif num_repeats > 0:
+ uncond = pad_cond(uncond, num_repeats, empty)
+ self.padded_cond_uncond = True
+
+ if tensor.shape[1] == uncond.shape[1] or skip_uncond:
+ if is_edit_model:
+ cond_in = catenate_conds([tensor, uncond, uncond])
+ elif skip_uncond:
+ cond_in = tensor
+ else:
+ cond_in = catenate_conds([tensor, uncond])
+
+ if shared.opts.batch_cond_uncond:
+ x_out = self.inner_model(x_in, sigma_in, cond=make_condition_dict(cond_in, image_cond_in))
+ else:
+ x_out = torch.zeros_like(x_in)
+ for batch_offset in range(0, x_out.shape[0], batch_size):
+ a = batch_offset
+ b = a + batch_size
+ x_out[a:b] = self.inner_model(x_in[a:b], sigma_in[a:b], cond=make_condition_dict(subscript_cond(cond_in, a, b), image_cond_in[a:b]))
+ else:
+ x_out = torch.zeros_like(x_in)
+ batch_size = batch_size*2 if shared.opts.batch_cond_uncond else batch_size
+ for batch_offset in range(0, tensor.shape[0], batch_size):
+ a = batch_offset
+ b = min(a + batch_size, tensor.shape[0])
+
+ if not is_edit_model:
+ c_crossattn = subscript_cond(tensor, a, b)
+ else:
+ c_crossattn = torch.cat([tensor[a:b]], uncond)
+
+ x_out[a:b] = self.inner_model(x_in[a:b], sigma_in[a:b], cond=make_condition_dict(c_crossattn, image_cond_in[a:b]))
+
+ if not skip_uncond:
+ x_out[-uncond.shape[0]:] = self.inner_model(x_in[-uncond.shape[0]:], sigma_in[-uncond.shape[0]:], cond=make_condition_dict(uncond, image_cond_in[-uncond.shape[0]:]))
+
+ denoised_image_indexes = [x[0][0] for x in conds_list]
+ if skip_uncond:
+ fake_uncond = torch.cat([x_out[i:i+1] for i in denoised_image_indexes])
+ x_out = torch.cat([x_out, fake_uncond]) # we skipped uncond denoising, so we put cond-denoised image to where the uncond-denoised image should be
+
+ denoised_params = CFGDenoisedParams(x_out, state.sampling_step, state.sampling_steps, self.inner_model)
+ cfg_denoised_callback(denoised_params)
+
+ devices.test_for_nans(x_out, "unet")
+
+ if is_edit_model:
+ denoised = self.combine_denoised_for_edit_model(x_out, cond_scale)
+ elif skip_uncond:
+ denoised = self.combine_denoised(x_out, conds_list, uncond, 1.0)
+ else:
+ denoised = self.combine_denoised(x_out, conds_list, uncond, cond_scale)
+
+ # Blend in the original latents (after)
+ if not self.mask_before_denoising and self.mask is not None:
+ denoised = apply_blend(denoised)
+
+ self.sampler.last_latent = self.get_pred_x0(torch.cat([x_in[i:i + 1] for i in denoised_image_indexes]), torch.cat([x_out[i:i + 1] for i in denoised_image_indexes]), sigma)
+
+ if opts.live_preview_content == "Prompt":
+ preview = self.sampler.last_latent
+ elif opts.live_preview_content == "Negative prompt":
+ preview = self.get_pred_x0(x_in[-uncond.shape[0]:], x_out[-uncond.shape[0]:], sigma)
+ else:
+ preview = self.get_pred_x0(torch.cat([x_in[i:i+1] for i in denoised_image_indexes]), torch.cat([denoised[i:i+1] for i in denoised_image_indexes]), sigma)
+
+ sd_samplers_common.store_latent(preview)
+
+ after_cfg_callback_params = AfterCFGCallbackParams(denoised, state.sampling_step, state.sampling_steps)
+ cfg_after_cfg_callback(after_cfg_callback_params)
+ denoised = after_cfg_callback_params.x
+
+ self.step += 1
+ return denoised
+
diff --git a/modules/sd_samplers_common.py b/modules/sd_samplers_common.py
index 763829f1..58efcad2 100644
--- a/modules/sd_samplers_common.py
+++ b/modules/sd_samplers_common.py
@@ -1,13 +1,22 @@
+import inspect
from collections import namedtuple
import numpy as np
import torch
from PIL import Image
-from modules import devices, processing, images, sd_vae_approx, sd_samplers, sd_vae_taesd
-
+from modules import devices, images, sd_vae_approx, sd_samplers, sd_vae_taesd, shared, sd_models
from modules.shared import opts, state
-import modules.shared as shared
+import k_diffusion.sampling
+
+
+SamplerDataTuple = namedtuple('SamplerData', ['name', 'constructor', 'aliases', 'options'])
-SamplerData = namedtuple('SamplerData', ['name', 'constructor', 'aliases', 'options'])
+
+class SamplerData(SamplerDataTuple):
+ def total_steps(self, steps):
+ if self.options.get("second_order", False):
+ steps = steps * 2
+
+ return steps
def setup_img2img_steps(p, steps=None):
@@ -25,19 +34,34 @@ def setup_img2img_steps(p, steps=None):
approximation_indexes = {"Full": 0, "Approx NN": 1, "Approx cheap": 2, "TAESD": 3}
-def single_sample_to_image(sample, approximation=None):
- if approximation is None:
+def samples_to_images_tensor(sample, approximation=None, model=None):
+ """Transforms 4-channel latent space images into 3-channel RGB image tensors, with values in range [-1, 1]."""
+
+ if approximation is None or (shared.state.interrupted and opts.live_preview_fast_interrupt):
approximation = approximation_indexes.get(opts.show_progress_type, 0)
+ from modules import lowvram
+ if approximation == 0 and lowvram.is_enabled(shared.sd_model) and not shared.opts.live_preview_allow_lowvram_full:
+ approximation = 1
+
if approximation == 2:
- x_sample = sd_vae_approx.cheap_approximation(sample) * 0.5 + 0.5
+ x_sample = sd_vae_approx.cheap_approximation(sample)
elif approximation == 1:
- x_sample = sd_vae_approx.model()(sample.to(devices.device, devices.dtype).unsqueeze(0))[0].detach() * 0.5 + 0.5
+ x_sample = sd_vae_approx.model()(sample.to(devices.device, devices.dtype)).detach()
elif approximation == 3:
- x_sample = sample * 1.5
- x_sample = sd_vae_taesd.model()(x_sample.to(devices.device, devices.dtype).unsqueeze(0))[0].detach()
+ x_sample = sd_vae_taesd.decoder_model()(sample.to(devices.device, devices.dtype)).detach()
+ x_sample = x_sample * 2 - 1
else:
- x_sample = processing.decode_first_stage(shared.sd_model, sample.unsqueeze(0))[0] * 0.5 + 0.5
+ if model is None:
+ model = shared.sd_model
+ with devices.without_autocast(): # fixes an issue with unstable VAEs that are flaky even in fp32
+ x_sample = model.decode_first_stage(sample.to(model.first_stage_model.dtype))
+
+ return x_sample
+
+
+def single_sample_to_image(sample, approximation=None):
+ x_sample = samples_to_images_tensor(sample.unsqueeze(0), approximation)[0] * 0.5 + 0.5
x_sample = torch.clamp(x_sample, min=0.0, max=1.0)
x_sample = 255. * np.moveaxis(x_sample.cpu().numpy(), 0, 2)
@@ -46,6 +70,12 @@ def single_sample_to_image(sample, approximation=None):
return Image.fromarray(x_sample)
+def decode_first_stage(model, x):
+ x = x.to(devices.dtype_vae)
+ approx_index = approximation_indexes.get(opts.sd_vae_decode_method, 0)
+ return samples_to_images_tensor(x, approx_index, model)
+
+
def sample_to_image(samples, index=0, approximation=None):
return single_sample_to_image(samples[index], approximation)
@@ -54,6 +84,34 @@ def samples_to_image_grid(samples, approximation=None):
return images.image_grid([single_sample_to_image(sample, approximation) for sample in samples])
+def images_tensor_to_samples(image, approximation=None, model=None):
+ '''image[0, 1] -> latent'''
+ if approximation is None:
+ approximation = approximation_indexes.get(opts.sd_vae_encode_method, 0)
+
+ if approximation == 3:
+ image = image.to(devices.device, devices.dtype)
+ x_latent = sd_vae_taesd.encoder_model()(image)
+ else:
+ if model is None:
+ model = shared.sd_model
+ model.first_stage_model.to(devices.dtype_vae)
+
+ image = image.to(shared.device, dtype=devices.dtype_vae)
+ image = image * 2 - 1
+ if len(image) > 1:
+ x_latent = torch.stack([
+ model.get_first_stage_encoding(
+ model.encode_first_stage(torch.unsqueeze(img, 0))
+ )[0]
+ for img in image
+ ])
+ else:
+ x_latent = model.get_first_stage_encoding(model.encode_first_stage(image))
+
+ return x_latent
+
+
def store_latent(decoded):
state.current_latent = decoded
@@ -85,11 +143,195 @@ class InterruptedException(BaseException):
pass
-if opts.randn_source == "CPU":
+def replace_torchsde_browinan():
import torchsde._brownian.brownian_interval
def torchsde_randn(size, dtype, device, seed):
- generator = torch.Generator(devices.cpu).manual_seed(int(seed))
- return torch.randn(size, dtype=dtype, device=devices.cpu, generator=generator).to(device)
+ return devices.randn_local(seed, size).to(device=device, dtype=dtype)
torchsde._brownian.brownian_interval._randn = torchsde_randn
+
+
+replace_torchsde_browinan()
+
+
+def apply_refiner(cfg_denoiser):
+ completed_ratio = cfg_denoiser.step / cfg_denoiser.total_steps
+ refiner_switch_at = cfg_denoiser.p.refiner_switch_at
+ refiner_checkpoint_info = cfg_denoiser.p.refiner_checkpoint_info
+
+ if refiner_switch_at is not None and completed_ratio < refiner_switch_at:
+ return False
+
+ if refiner_checkpoint_info is None or shared.sd_model.sd_checkpoint_info == refiner_checkpoint_info:
+ return False
+
+ if getattr(cfg_denoiser.p, "enable_hr", False):
+ is_second_pass = cfg_denoiser.p.is_hr_pass
+
+ if opts.hires_fix_refiner_pass == "first pass" and is_second_pass:
+ return False
+
+ if opts.hires_fix_refiner_pass == "second pass" and not is_second_pass:
+ return False
+
+ if opts.hires_fix_refiner_pass != "second pass":
+ cfg_denoiser.p.extra_generation_params['Hires refiner'] = opts.hires_fix_refiner_pass
+
+ cfg_denoiser.p.extra_generation_params['Refiner'] = refiner_checkpoint_info.short_title
+ cfg_denoiser.p.extra_generation_params['Refiner switch at'] = refiner_switch_at
+
+ with sd_models.SkipWritingToConfig():
+ sd_models.reload_model_weights(info=refiner_checkpoint_info)
+
+ devices.torch_gc()
+ cfg_denoiser.p.setup_conds()
+ cfg_denoiser.update_inner_model()
+
+ return True
+
+
+class TorchHijack:
+ """This is here to replace torch.randn_like of k-diffusion.
+
+ k-diffusion has random_sampler argument for most samplers, but not for all, so
+ this is needed to properly replace every use of torch.randn_like.
+
+ We need to replace to make images generated in batches to be same as images generated individually."""
+
+ def __init__(self, p):
+ self.rng = p.rng
+
+ def __getattr__(self, item):
+ if item == 'randn_like':
+ return self.randn_like
+
+ if hasattr(torch, item):
+ return getattr(torch, item)
+
+ raise AttributeError(f"'{type(self).__name__}' object has no attribute '{item}'")
+
+ def randn_like(self, x):
+ return self.rng.next()
+
+
+class Sampler:
+ def __init__(self, funcname):
+ self.funcname = funcname
+ self.func = funcname
+ self.extra_params = []
+ self.sampler_noises = None
+ self.stop_at = None
+ self.eta = None
+ self.config: SamplerData = None # set by the function calling the constructor
+ self.last_latent = None
+ self.s_min_uncond = None
+ self.s_churn = 0.0
+ self.s_tmin = 0.0
+ self.s_tmax = float('inf')
+ self.s_noise = 1.0
+
+ self.eta_option_field = 'eta_ancestral'
+ self.eta_infotext_field = 'Eta'
+ self.eta_default = 1.0
+
+ self.conditioning_key = shared.sd_model.model.conditioning_key
+
+ self.p = None
+ self.model_wrap_cfg = None
+ self.sampler_extra_args = None
+ self.options = {}
+
+ def callback_state(self, d):
+ step = d['i']
+
+ if self.stop_at is not None and step > self.stop_at:
+ raise InterruptedException
+
+ state.sampling_step = step
+ shared.total_tqdm.update()
+
+ def launch_sampling(self, steps, func):
+ self.model_wrap_cfg.steps = steps
+ self.model_wrap_cfg.total_steps = self.config.total_steps(steps)
+ state.sampling_steps = steps
+ state.sampling_step = 0
+
+ try:
+ return func()
+ except RecursionError:
+ print(
+ 'Encountered RecursionError during sampling, returning last latent. '
+ 'rho >5 with a polyexponential scheduler may cause this error. '
+ 'You should try to use a smaller rho value instead.'
+ )
+ return self.last_latent
+ except InterruptedException:
+ return self.last_latent
+
+ def number_of_needed_noises(self, p):
+ return p.steps
+
+ def initialize(self, p) -> dict:
+ self.p = p
+ self.model_wrap_cfg.p = p
+ self.model_wrap_cfg.mask = p.mask if hasattr(p, 'mask') else None
+ self.model_wrap_cfg.nmask = p.nmask if hasattr(p, 'nmask') else None
+ self.model_wrap_cfg.step = 0
+ self.model_wrap_cfg.image_cfg_scale = getattr(p, 'image_cfg_scale', None)
+ self.eta = p.eta if p.eta is not None else getattr(opts, self.eta_option_field, 0.0)
+ self.s_min_uncond = getattr(p, 's_min_uncond', 0.0)
+
+ k_diffusion.sampling.torch = TorchHijack(p)
+
+ extra_params_kwargs = {}
+ for param_name in self.extra_params:
+ if hasattr(p, param_name) and param_name in inspect.signature(self.func).parameters:
+ extra_params_kwargs[param_name] = getattr(p, param_name)
+
+ if 'eta' in inspect.signature(self.func).parameters:
+ if self.eta != self.eta_default:
+ p.extra_generation_params[self.eta_infotext_field] = self.eta
+
+ extra_params_kwargs['eta'] = self.eta
+
+ if len(self.extra_params) > 0:
+ s_churn = getattr(opts, 's_churn', p.s_churn)
+ s_tmin = getattr(opts, 's_tmin', p.s_tmin)
+ s_tmax = getattr(opts, 's_tmax', p.s_tmax) or self.s_tmax # 0 = inf
+ s_noise = getattr(opts, 's_noise', p.s_noise)
+
+ if 's_churn' in extra_params_kwargs and s_churn != self.s_churn:
+ extra_params_kwargs['s_churn'] = s_churn
+ p.s_churn = s_churn
+ p.extra_generation_params['Sigma churn'] = s_churn
+ if 's_tmin' in extra_params_kwargs and s_tmin != self.s_tmin:
+ extra_params_kwargs['s_tmin'] = s_tmin
+ p.s_tmin = s_tmin
+ p.extra_generation_params['Sigma tmin'] = s_tmin
+ if 's_tmax' in extra_params_kwargs and s_tmax != self.s_tmax:
+ extra_params_kwargs['s_tmax'] = s_tmax
+ p.s_tmax = s_tmax
+ p.extra_generation_params['Sigma tmax'] = s_tmax
+ if 's_noise' in extra_params_kwargs and s_noise != self.s_noise:
+ extra_params_kwargs['s_noise'] = s_noise
+ p.s_noise = s_noise
+ p.extra_generation_params['Sigma noise'] = s_noise
+
+ return extra_params_kwargs
+
+ def create_noise_sampler(self, x, sigmas, p):
+ """For DPM++ SDE: manually create noise sampler to enable deterministic results across different batch sizes"""
+ if shared.opts.no_dpmpp_sde_batch_determinism:
+ return None
+
+ from k_diffusion.sampling import BrownianTreeNoiseSampler
+ sigma_min, sigma_max = sigmas[sigmas > 0].min(), sigmas.max()
+ current_iter_seeds = p.all_seeds[p.iteration * p.batch_size:(p.iteration + 1) * p.batch_size]
+ return BrownianTreeNoiseSampler(x, sigma_min, sigma_max, seed=current_iter_seeds)
+
+ def sample(self, p, x, conditioning, unconditional_conditioning, steps=None, image_conditioning=None):
+ raise NotImplementedError()
+
+ def sample_img2img(self, p, x, noise, conditioning, unconditional_conditioning, steps=None, image_conditioning=None):
+ raise NotImplementedError()
diff --git a/modules/sd_samplers_compvis.py b/modules/sd_samplers_compvis.py
index 4a8396f9..e69de29b 100644
--- a/modules/sd_samplers_compvis.py
+++ b/modules/sd_samplers_compvis.py
@@ -1,224 +0,0 @@
-import math
-import ldm.models.diffusion.ddim
-import ldm.models.diffusion.plms
-
-import numpy as np
-import torch
-
-from modules.shared import state
-from modules import sd_samplers_common, prompt_parser, shared
-import modules.models.diffusion.uni_pc
-
-
-samplers_data_compvis = [
- sd_samplers_common.SamplerData('DDIM', lambda model: VanillaStableDiffusionSampler(ldm.models.diffusion.ddim.DDIMSampler, model), [], {"default_eta_is_0": True, "uses_ensd": True, "no_sdxl": True}),
- sd_samplers_common.SamplerData('PLMS', lambda model: VanillaStableDiffusionSampler(ldm.models.diffusion.plms.PLMSSampler, model), [], {"no_sdxl": True}),
- sd_samplers_common.SamplerData('UniPC', lambda model: VanillaStableDiffusionSampler(modules.models.diffusion.uni_pc.UniPCSampler, model), [], {"no_sdxl": True}),
-]
-
-
-class VanillaStableDiffusionSampler:
- def __init__(self, constructor, sd_model):
- self.sampler = constructor(sd_model)
- self.is_ddim = hasattr(self.sampler, 'p_sample_ddim')
- self.is_plms = hasattr(self.sampler, 'p_sample_plms')
- self.is_unipc = isinstance(self.sampler, modules.models.diffusion.uni_pc.UniPCSampler)
- self.orig_p_sample_ddim = None
- if self.is_plms:
- self.orig_p_sample_ddim = self.sampler.p_sample_plms
- elif self.is_ddim:
- self.orig_p_sample_ddim = self.sampler.p_sample_ddim
- self.mask = None
- self.nmask = None
- self.init_latent = None
- self.sampler_noises = None
- self.step = 0
- self.stop_at = None
- self.eta = None
- self.config = None
- self.last_latent = None
-
- self.conditioning_key = sd_model.model.conditioning_key
-
- def number_of_needed_noises(self, p):
- return 0
-
- def launch_sampling(self, steps, func):
- state.sampling_steps = steps
- state.sampling_step = 0
-
- try:
- return func()
- except sd_samplers_common.InterruptedException:
- return self.last_latent
-
- def p_sample_ddim_hook(self, x_dec, cond, ts, unconditional_conditioning, *args, **kwargs):
- x_dec, ts, cond, unconditional_conditioning = self.before_sample(x_dec, ts, cond, unconditional_conditioning)
-
- res = self.orig_p_sample_ddim(x_dec, cond, ts, *args, unconditional_conditioning=unconditional_conditioning, **kwargs)
-
- x_dec, ts, cond, unconditional_conditioning, res = self.after_sample(x_dec, ts, cond, unconditional_conditioning, res)
-
- return res
-
- def before_sample(self, x, ts, cond, unconditional_conditioning):
- if state.interrupted or state.skipped:
- raise sd_samplers_common.InterruptedException
-
- if self.stop_at is not None and self.step > self.stop_at:
- raise sd_samplers_common.InterruptedException
-
- # Have to unwrap the inpainting conditioning here to perform pre-processing
- image_conditioning = None
- uc_image_conditioning = None
- if isinstance(cond, dict):
- if self.conditioning_key == "crossattn-adm":
- image_conditioning = cond["c_adm"]
- uc_image_conditioning = unconditional_conditioning["c_adm"]
- else:
- image_conditioning = cond["c_concat"][0]
- cond = cond["c_crossattn"][0]
- unconditional_conditioning = unconditional_conditioning["c_crossattn"][0]
-
- conds_list, tensor = prompt_parser.reconstruct_multicond_batch(cond, self.step)
- unconditional_conditioning = prompt_parser.reconstruct_cond_batch(unconditional_conditioning, self.step)
-
- assert all(len(conds) == 1 for conds in conds_list), 'composition via AND is not supported for DDIM/PLMS samplers'
- cond = tensor
-
- # for DDIM, shapes must match, we can't just process cond and uncond independently;
- # filling unconditional_conditioning with repeats of the last vector to match length is
- # not 100% correct but should work well enough
- if unconditional_conditioning.shape[1] < cond.shape[1]:
- last_vector = unconditional_conditioning[:, -1:]
- last_vector_repeated = last_vector.repeat([1, cond.shape[1] - unconditional_conditioning.shape[1], 1])
- unconditional_conditioning = torch.hstack([unconditional_conditioning, last_vector_repeated])
- elif unconditional_conditioning.shape[1] > cond.shape[1]:
- unconditional_conditioning = unconditional_conditioning[:, :cond.shape[1]]
-
- if self.mask is not None:
- img_orig = self.sampler.model.q_sample(self.init_latent, ts)
- x = img_orig * self.mask + self.nmask * x
-
- # Wrap the image conditioning back up since the DDIM code can accept the dict directly.
- # Note that they need to be lists because it just concatenates them later.
- if image_conditioning is not None:
- if self.conditioning_key == "crossattn-adm":
- cond = {"c_adm": image_conditioning, "c_crossattn": [cond]}
- unconditional_conditioning = {"c_adm": uc_image_conditioning, "c_crossattn": [unconditional_conditioning]}
- else:
- cond = {"c_concat": [image_conditioning], "c_crossattn": [cond]}
- unconditional_conditioning = {"c_concat": [image_conditioning], "c_crossattn": [unconditional_conditioning]}
-
- return x, ts, cond, unconditional_conditioning
-
- def update_step(self, last_latent):
- if self.mask is not None:
- self.last_latent = self.init_latent * self.mask + self.nmask * last_latent
- else:
- self.last_latent = last_latent
-
- sd_samplers_common.store_latent(self.last_latent)
-
- self.step += 1
- state.sampling_step = self.step
- shared.total_tqdm.update()
-
- def after_sample(self, x, ts, cond, uncond, res):
- if not self.is_unipc:
- self.update_step(res[1])
-
- return x, ts, cond, uncond, res
-
- def unipc_after_update(self, x, model_x):
- self.update_step(x)
-
- def initialize(self, p):
- if self.is_ddim:
- self.eta = p.eta if p.eta is not None else shared.opts.eta_ddim
- else:
- self.eta = 0.0
-
- if self.eta != 0.0:
- p.extra_generation_params["Eta DDIM"] = self.eta
-
- if self.is_unipc:
- keys = [
- ('UniPC variant', 'uni_pc_variant'),
- ('UniPC skip type', 'uni_pc_skip_type'),
- ('UniPC order', 'uni_pc_order'),
- ('UniPC lower order final', 'uni_pc_lower_order_final'),
- ]
-
- for name, key in keys:
- v = getattr(shared.opts, key)
- if v != shared.opts.get_default(key):
- p.extra_generation_params[name] = v
-
- for fieldname in ['p_sample_ddim', 'p_sample_plms']:
- if hasattr(self.sampler, fieldname):
- setattr(self.sampler, fieldname, self.p_sample_ddim_hook)
- if self.is_unipc:
- self.sampler.set_hooks(lambda x, t, c, u: self.before_sample(x, t, c, u), lambda x, t, c, u, r: self.after_sample(x, t, c, u, r), lambda x, mx: self.unipc_after_update(x, mx))
-
- self.mask = p.mask if hasattr(p, 'mask') else None
- self.nmask = p.nmask if hasattr(p, 'nmask') else None
-
-
- def adjust_steps_if_invalid(self, p, num_steps):
- if ((self.config.name == 'DDIM') and p.ddim_discretize == 'uniform') or (self.config.name == 'PLMS') or (self.config.name == 'UniPC'):
- if self.config.name == 'UniPC' and num_steps < shared.opts.uni_pc_order:
- num_steps = shared.opts.uni_pc_order
- valid_step = 999 / (1000 // num_steps)
- if valid_step == math.floor(valid_step):
- return int(valid_step) + 1
-
- return num_steps
-
- def sample_img2img(self, p, x, noise, conditioning, unconditional_conditioning, steps=None, image_conditioning=None):
- steps, t_enc = sd_samplers_common.setup_img2img_steps(p, steps)
- steps = self.adjust_steps_if_invalid(p, steps)
- self.initialize(p)
-
- self.sampler.make_schedule(ddim_num_steps=steps, ddim_eta=self.eta, ddim_discretize=p.ddim_discretize, verbose=False)
- x1 = self.sampler.stochastic_encode(x, torch.tensor([t_enc] * int(x.shape[0])).to(shared.device), noise=noise)
-
- self.init_latent = x
- self.last_latent = x
- self.step = 0
-
- # Wrap the conditioning models with additional image conditioning for inpainting model
- if image_conditioning is not None:
- if self.conditioning_key == "crossattn-adm":
- conditioning = {"c_adm": image_conditioning, "c_crossattn": [conditioning]}
- unconditional_conditioning = {"c_adm": torch.zeros_like(image_conditioning), "c_crossattn": [unconditional_conditioning]}
- else:
- conditioning = {"c_concat": [image_conditioning], "c_crossattn": [conditioning]}
- unconditional_conditioning = {"c_concat": [image_conditioning], "c_crossattn": [unconditional_conditioning]}
-
- samples = self.launch_sampling(t_enc + 1, lambda: self.sampler.decode(x1, conditioning, t_enc, unconditional_guidance_scale=p.cfg_scale, unconditional_conditioning=unconditional_conditioning))
-
- return samples
-
- def sample(self, p, x, conditioning, unconditional_conditioning, steps=None, image_conditioning=None):
- self.initialize(p)
-
- self.init_latent = None
- self.last_latent = x
- self.step = 0
-
- steps = self.adjust_steps_if_invalid(p, steps or p.steps)
-
- # Wrap the conditioning models with additional image conditioning for inpainting model
- # dummy_for_plms is needed because PLMS code checks the first item in the dict to have the right shape
- if image_conditioning is not None:
- if self.conditioning_key == "crossattn-adm":
- conditioning = {"dummy_for_plms": np.zeros((conditioning.shape[0],)), "c_crossattn": [conditioning], "c_adm": image_conditioning}
- unconditional_conditioning = {"c_crossattn": [unconditional_conditioning], "c_adm": torch.zeros_like(image_conditioning)}
- else:
- conditioning = {"dummy_for_plms": np.zeros((conditioning.shape[0],)), "c_crossattn": [conditioning], "c_concat": [image_conditioning]}
- unconditional_conditioning = {"c_crossattn": [unconditional_conditioning], "c_concat": [image_conditioning]}
-
- samples_ddim = self.launch_sampling(steps, lambda: self.sampler.sample(S=steps, conditioning=conditioning, batch_size=int(x.shape[0]), shape=x[0].shape, verbose=False, unconditional_guidance_scale=p.cfg_scale, unconditional_conditioning=unconditional_conditioning, x_T=x, eta=self.eta)[0])
-
- return samples_ddim
diff --git a/modules/sd_samplers_extra.py b/modules/sd_samplers_extra.py
new file mode 100644
index 00000000..72fd0aa5
--- /dev/null
+++ b/modules/sd_samplers_extra.py
@@ -0,0 +1,74 @@
+import torch
+import tqdm
+import k_diffusion.sampling
+
+
+@torch.no_grad()
+def restart_sampler(model, x, sigmas, extra_args=None, callback=None, disable=None, s_noise=1., restart_list=None):
+ """Implements restart sampling in Restart Sampling for Improving Generative Processes (2023)
+ Restart_list format: {min_sigma: [ restart_steps, restart_times, max_sigma]}
+ If restart_list is None: will choose restart_list automatically, otherwise will use the given restart_list
+ """
+ extra_args = {} if extra_args is None else extra_args
+ s_in = x.new_ones([x.shape[0]])
+ step_id = 0
+ from k_diffusion.sampling import to_d, get_sigmas_karras
+
+ def heun_step(x, old_sigma, new_sigma, second_order=True):
+ nonlocal step_id
+ denoised = model(x, old_sigma * s_in, **extra_args)
+ d = to_d(x, old_sigma, denoised)
+ if callback is not None:
+ callback({'x': x, 'i': step_id, 'sigma': new_sigma, 'sigma_hat': old_sigma, 'denoised': denoised})
+ dt = new_sigma - old_sigma
+ if new_sigma == 0 or not second_order:
+ # Euler method
+ x = x + d * dt
+ else:
+ # Heun's method
+ x_2 = x + d * dt
+ denoised_2 = model(x_2, new_sigma * s_in, **extra_args)
+ d_2 = to_d(x_2, new_sigma, denoised_2)
+ d_prime = (d + d_2) / 2
+ x = x + d_prime * dt
+ step_id += 1
+ return x
+
+ steps = sigmas.shape[0] - 1
+ if restart_list is None:
+ if steps >= 20:
+ restart_steps = 9
+ restart_times = 1
+ if steps >= 36:
+ restart_steps = steps // 4
+ restart_times = 2
+ sigmas = get_sigmas_karras(steps - restart_steps * restart_times, sigmas[-2].item(), sigmas[0].item(), device=sigmas.device)
+ restart_list = {0.1: [restart_steps + 1, restart_times, 2]}
+ else:
+ restart_list = {}
+
+ restart_list = {int(torch.argmin(abs(sigmas - key), dim=0)): value for key, value in restart_list.items()}
+
+ step_list = []
+ for i in range(len(sigmas) - 1):
+ step_list.append((sigmas[i], sigmas[i + 1]))
+ if i + 1 in restart_list:
+ restart_steps, restart_times, restart_max = restart_list[i + 1]
+ min_idx = i + 1
+ max_idx = int(torch.argmin(abs(sigmas - restart_max), dim=0))
+ if max_idx < min_idx:
+ sigma_restart = get_sigmas_karras(restart_steps, sigmas[min_idx].item(), sigmas[max_idx].item(), device=sigmas.device)[:-1]
+ while restart_times > 0:
+ restart_times -= 1
+ step_list.extend(zip(sigma_restart[:-1], sigma_restart[1:]))
+
+ last_sigma = None
+ for old_sigma, new_sigma in tqdm.tqdm(step_list, disable=disable):
+ if last_sigma is None:
+ last_sigma = old_sigma
+ elif last_sigma < old_sigma:
+ x = x + k_diffusion.sampling.torch.randn_like(x) * s_noise * (old_sigma ** 2 - last_sigma ** 2) ** 0.5
+ x = heun_step(x, old_sigma, new_sigma)
+ last_sigma = new_sigma
+
+ return x
diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py
index 5552a8dc..8a8c87e0 100644
--- a/modules/sd_samplers_kdiffusion.py
+++ b/modules/sd_samplers_kdiffusion.py
@@ -1,47 +1,60 @@
-from collections import deque
import torch
import inspect
import k_diffusion.sampling
-from modules import prompt_parser, devices, sd_samplers_common
+from modules import sd_samplers_common, sd_samplers_extra, sd_samplers_cfg_denoiser
+from modules.sd_samplers_cfg_denoiser import CFGDenoiser # noqa: F401
+from modules.script_callbacks import ExtraNoiseParams, extra_noise_callback
-from modules.shared import opts, state
+from modules.shared import opts
import modules.shared as shared
-from modules.script_callbacks import CFGDenoiserParams, cfg_denoiser_callback
-from modules.script_callbacks import CFGDenoisedParams, cfg_denoised_callback
-from modules.script_callbacks import AfterCFGCallbackParams, cfg_after_cfg_callback
samplers_k_diffusion = [
+ ('DPM++ 2M Karras', 'sample_dpmpp_2m', ['k_dpmpp_2m_ka'], {'scheduler': 'karras'}),
+ ('DPM++ SDE Karras', 'sample_dpmpp_sde', ['k_dpmpp_sde_ka'], {'scheduler': 'karras', "second_order": True, "brownian_noise": True}),
+ ('DPM++ 2M SDE Exponential', 'sample_dpmpp_2m_sde', ['k_dpmpp_2m_sde_exp'], {'scheduler': 'exponential', "brownian_noise": True}),
+ ('DPM++ 2M SDE Karras', 'sample_dpmpp_2m_sde', ['k_dpmpp_2m_sde_ka'], {'scheduler': 'karras', "brownian_noise": True}),
('Euler a', 'sample_euler_ancestral', ['k_euler_a', 'k_euler_ancestral'], {"uses_ensd": True}),
('Euler', 'sample_euler', ['k_euler'], {}),
('LMS', 'sample_lms', ['k_lms'], {}),
('Heun', 'sample_heun', ['k_heun'], {"second_order": True}),
- ('DPM2', 'sample_dpm_2', ['k_dpm_2'], {'discard_next_to_last_sigma': True}),
- ('DPM2 a', 'sample_dpm_2_ancestral', ['k_dpm_2_a'], {'discard_next_to_last_sigma': True, "uses_ensd": True}),
+ ('DPM2', 'sample_dpm_2', ['k_dpm_2'], {'discard_next_to_last_sigma': True, "second_order": True}),
+ ('DPM2 a', 'sample_dpm_2_ancestral', ['k_dpm_2_a'], {'discard_next_to_last_sigma': True, "uses_ensd": True, "second_order": True}),
('DPM++ 2S a', 'sample_dpmpp_2s_ancestral', ['k_dpmpp_2s_a'], {"uses_ensd": True, "second_order": True}),
('DPM++ 2M', 'sample_dpmpp_2m', ['k_dpmpp_2m'], {}),
('DPM++ SDE', 'sample_dpmpp_sde', ['k_dpmpp_sde'], {"second_order": True, "brownian_noise": True}),
('DPM++ 2M SDE', 'sample_dpmpp_2m_sde', ['k_dpmpp_2m_sde_ka'], {"brownian_noise": True}),
+ ('DPM++ 2M SDE Heun', 'sample_dpmpp_2m_sde', ['k_dpmpp_2m_sde_heun'], {"brownian_noise": True, "solver_type": "heun"}),
+ ('DPM++ 2M SDE Heun Karras', 'sample_dpmpp_2m_sde', ['k_dpmpp_2m_sde_heun_ka'], {'scheduler': 'karras', "brownian_noise": True, "solver_type": "heun"}),
+ ('DPM++ 2M SDE Heun Exponential', 'sample_dpmpp_2m_sde', ['k_dpmpp_2m_sde_heun_exp'], {'scheduler': 'exponential', "brownian_noise": True, "solver_type": "heun"}),
+ ('DPM++ 3M SDE', 'sample_dpmpp_3m_sde', ['k_dpmpp_3m_sde'], {'discard_next_to_last_sigma': True, "brownian_noise": True}),
+ ('DPM++ 3M SDE Karras', 'sample_dpmpp_3m_sde', ['k_dpmpp_3m_sde_ka'], {'scheduler': 'karras', 'discard_next_to_last_sigma': True, "brownian_noise": True}),
+ ('DPM++ 3M SDE Exponential', 'sample_dpmpp_3m_sde', ['k_dpmpp_3m_sde_exp'], {'scheduler': 'exponential', 'discard_next_to_last_sigma': True, "brownian_noise": True}),
('DPM fast', 'sample_dpm_fast', ['k_dpm_fast'], {"uses_ensd": True}),
('DPM adaptive', 'sample_dpm_adaptive', ['k_dpm_ad'], {"uses_ensd": True}),
('LMS Karras', 'sample_lms', ['k_lms_ka'], {'scheduler': 'karras'}),
('DPM2 Karras', 'sample_dpm_2', ['k_dpm_2_ka'], {'scheduler': 'karras', 'discard_next_to_last_sigma': True, "uses_ensd": True, "second_order": True}),
('DPM2 a Karras', 'sample_dpm_2_ancestral', ['k_dpm_2_a_ka'], {'scheduler': 'karras', 'discard_next_to_last_sigma': True, "uses_ensd": True, "second_order": True}),
('DPM++ 2S a Karras', 'sample_dpmpp_2s_ancestral', ['k_dpmpp_2s_a_ka'], {'scheduler': 'karras', "uses_ensd": True, "second_order": True}),
- ('DPM++ 2M Karras', 'sample_dpmpp_2m', ['k_dpmpp_2m_ka'], {'scheduler': 'karras'}),
- ('DPM++ SDE Karras', 'sample_dpmpp_sde', ['k_dpmpp_sde_ka'], {'scheduler': 'karras', "second_order": True, "brownian_noise": True}),
- ('DPM++ 2M SDE Karras', 'sample_dpmpp_2m_sde', ['k_dpmpp_2m_sde_ka'], {'scheduler': 'karras', "brownian_noise": True}),
+ ('Restart', sd_samplers_extra.restart_sampler, ['restart'], {'scheduler': 'karras', "second_order": True}),
]
+
samplers_data_k_diffusion = [
sd_samplers_common.SamplerData(label, lambda model, funcname=funcname: KDiffusionSampler(funcname, model), aliases, options)
for label, funcname, aliases, options in samplers_k_diffusion
- if hasattr(k_diffusion.sampling, funcname)
+ if callable(funcname) or hasattr(k_diffusion.sampling, funcname)
]
sampler_extra_params = {
'sample_euler': ['s_churn', 's_tmin', 's_tmax', 's_noise'],
'sample_heun': ['s_churn', 's_tmin', 's_tmax', 's_noise'],
'sample_dpm_2': ['s_churn', 's_tmin', 's_tmax', 's_noise'],
+ 'sample_dpm_fast': ['s_noise'],
+ 'sample_dpm_2_ancestral': ['s_noise'],
+ 'sample_dpmpp_2s_ancestral': ['s_noise'],
+ 'sample_dpmpp_sde': ['s_noise'],
+ 'sample_dpmpp_2m_sde': ['s_noise'],
+ 'sample_dpmpp_3m_sde': ['s_noise'],
}
k_diffusion_samplers_map = {x.name: x for x in samplers_data_k_diffusion}
@@ -53,289 +66,27 @@ k_diffusion_scheduler = {
}
-def catenate_conds(conds):
- if not isinstance(conds[0], dict):
- return torch.cat(conds)
-
- return {key: torch.cat([x[key] for x in conds]) for key in conds[0].keys()}
-
-
-def subscript_cond(cond, a, b):
- if not isinstance(cond, dict):
- return cond[a:b]
-
- return {key: vec[a:b] for key, vec in cond.items()}
-
-
-def pad_cond(tensor, repeats, empty):
- if not isinstance(tensor, dict):
- return torch.cat([tensor, empty.repeat((tensor.shape[0], repeats, 1))], axis=1)
-
- tensor['crossattn'] = pad_cond(tensor['crossattn'], repeats, empty)
- return tensor
-
-
-class CFGDenoiser(torch.nn.Module):
- """
- Classifier free guidance denoiser. A wrapper for stable diffusion model (specifically for unet)
- that can take a noisy picture and produce a noise-free picture using two guidances (prompts)
- instead of one. Originally, the second prompt is just an empty string, but we use non-empty
- negative prompt.
- """
-
- def __init__(self, model):
- super().__init__()
- self.inner_model = model
- self.mask = None
- self.nmask = None
- self.init_latent = None
- self.step = 0
- self.image_cfg_scale = None
- self.padded_cond_uncond = False
-
- def combine_denoised(self, x_out, conds_list, uncond, cond_scale):
- denoised_uncond = x_out[-uncond.shape[0]:]
- denoised = torch.clone(denoised_uncond)
-
- for i, conds in enumerate(conds_list):
- for cond_index, weight in conds:
- denoised[i] += (x_out[cond_index] - denoised_uncond[i]) * (weight * cond_scale)
-
- return denoised
-
- def combine_denoised_for_edit_model(self, x_out, cond_scale):
- out_cond, out_img_cond, out_uncond = x_out.chunk(3)
- denoised = out_uncond + cond_scale * (out_cond - out_img_cond) + self.image_cfg_scale * (out_img_cond - out_uncond)
-
- return denoised
-
- def forward(self, x, sigma, uncond, cond, cond_scale, s_min_uncond, image_cond):
- if state.interrupted or state.skipped:
- raise sd_samplers_common.InterruptedException
-
- # at self.image_cfg_scale == 1.0 produced results for edit model are the same as with normal sampling,
- # so is_edit_model is set to False to support AND composition.
- is_edit_model = shared.sd_model.cond_stage_key == "edit" and self.image_cfg_scale is not None and self.image_cfg_scale != 1.0
-
- conds_list, tensor = prompt_parser.reconstruct_multicond_batch(cond, self.step)
- uncond = prompt_parser.reconstruct_cond_batch(uncond, self.step)
-
- assert not is_edit_model or all(len(conds) == 1 for conds in conds_list), "AND is not supported for InstructPix2Pix checkpoint (unless using Image CFG scale = 1.0)"
-
- batch_size = len(conds_list)
- repeats = [len(conds_list[i]) for i in range(batch_size)]
-
- if shared.sd_model.model.conditioning_key == "crossattn-adm":
- image_uncond = torch.zeros_like(image_cond)
- make_condition_dict = lambda c_crossattn, c_adm: {"c_crossattn": [c_crossattn], "c_adm": c_adm}
- else:
- image_uncond = image_cond
- if isinstance(uncond, dict):
- make_condition_dict = lambda c_crossattn, c_concat: {**c_crossattn, "c_concat": [c_concat]}
- else:
- make_condition_dict = lambda c_crossattn, c_concat: {"c_crossattn": [c_crossattn], "c_concat": [c_concat]}
-
- if not is_edit_model:
- x_in = torch.cat([torch.stack([x[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [x])
- sigma_in = torch.cat([torch.stack([sigma[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [sigma])
- image_cond_in = torch.cat([torch.stack([image_cond[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [image_uncond])
- else:
- x_in = torch.cat([torch.stack([x[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [x] + [x])
- sigma_in = torch.cat([torch.stack([sigma[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [sigma] + [sigma])
- image_cond_in = torch.cat([torch.stack([image_cond[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [image_uncond] + [torch.zeros_like(self.init_latent)])
-
- denoiser_params = CFGDenoiserParams(x_in, image_cond_in, sigma_in, state.sampling_step, state.sampling_steps, tensor, uncond)
- cfg_denoiser_callback(denoiser_params)
- x_in = denoiser_params.x
- image_cond_in = denoiser_params.image_cond
- sigma_in = denoiser_params.sigma
- tensor = denoiser_params.text_cond
- uncond = denoiser_params.text_uncond
- skip_uncond = False
-
- # alternating uncond allows for higher thresholds without the quality loss normally expected from raising it
- if self.step % 2 and s_min_uncond > 0 and sigma[0] < s_min_uncond and not is_edit_model:
- skip_uncond = True
- x_in = x_in[:-batch_size]
- sigma_in = sigma_in[:-batch_size]
-
- self.padded_cond_uncond = False
- if shared.opts.pad_cond_uncond and tensor.shape[1] != uncond.shape[1]:
- empty = shared.sd_model.cond_stage_model_empty_prompt
- num_repeats = (tensor.shape[1] - uncond.shape[1]) // empty.shape[1]
-
- if num_repeats < 0:
- tensor = pad_cond(tensor, -num_repeats, empty)
- self.padded_cond_uncond = True
- elif num_repeats > 0:
- uncond = pad_cond(uncond, num_repeats, empty)
- self.padded_cond_uncond = True
-
- if tensor.shape[1] == uncond.shape[1] or skip_uncond:
- if is_edit_model:
- cond_in = catenate_conds([tensor, uncond, uncond])
- elif skip_uncond:
- cond_in = tensor
- else:
- cond_in = catenate_conds([tensor, uncond])
-
- if shared.batch_cond_uncond:
- x_out = self.inner_model(x_in, sigma_in, cond=make_condition_dict(cond_in, image_cond_in))
- else:
- x_out = torch.zeros_like(x_in)
- for batch_offset in range(0, x_out.shape[0], batch_size):
- a = batch_offset
- b = a + batch_size
- x_out[a:b] = self.inner_model(x_in[a:b], sigma_in[a:b], cond=make_condition_dict(subscript_cond(cond_in, a, b), image_cond_in[a:b]))
- else:
- x_out = torch.zeros_like(x_in)
- batch_size = batch_size*2 if shared.batch_cond_uncond else batch_size
- for batch_offset in range(0, tensor.shape[0], batch_size):
- a = batch_offset
- b = min(a + batch_size, tensor.shape[0])
-
- if not is_edit_model:
- c_crossattn = subscript_cond(tensor, a, b)
- else:
- c_crossattn = torch.cat([tensor[a:b]], uncond)
-
- x_out[a:b] = self.inner_model(x_in[a:b], sigma_in[a:b], cond=make_condition_dict(c_crossattn, image_cond_in[a:b]))
-
- if not skip_uncond:
- x_out[-uncond.shape[0]:] = self.inner_model(x_in[-uncond.shape[0]:], sigma_in[-uncond.shape[0]:], cond=make_condition_dict(uncond, image_cond_in[-uncond.shape[0]:]))
+class CFGDenoiserKDiffusion(sd_samplers_cfg_denoiser.CFGDenoiser):
+ @property
+ def inner_model(self):
+ if self.model_wrap is None:
+ denoiser = k_diffusion.external.CompVisVDenoiser if shared.sd_model.parameterization == "v" else k_diffusion.external.CompVisDenoiser
+ self.model_wrap = denoiser(shared.sd_model, quantize=shared.opts.enable_quantization)
- denoised_image_indexes = [x[0][0] for x in conds_list]
- if skip_uncond:
- fake_uncond = torch.cat([x_out[i:i+1] for i in denoised_image_indexes])
- x_out = torch.cat([x_out, fake_uncond]) # we skipped uncond denoising, so we put cond-denoised image to where the uncond-denoised image should be
+ return self.model_wrap
- denoised_params = CFGDenoisedParams(x_out, state.sampling_step, state.sampling_steps, self.inner_model)
- cfg_denoised_callback(denoised_params)
- devices.test_for_nans(x_out, "unet")
-
- if opts.live_preview_content == "Prompt":
- sd_samplers_common.store_latent(torch.cat([x_out[i:i+1] for i in denoised_image_indexes]))
- elif opts.live_preview_content == "Negative prompt":
- sd_samplers_common.store_latent(x_out[-uncond.shape[0]:])
-
- if is_edit_model:
- denoised = self.combine_denoised_for_edit_model(x_out, cond_scale)
- elif skip_uncond:
- denoised = self.combine_denoised(x_out, conds_list, uncond, 1.0)
- else:
- denoised = self.combine_denoised(x_out, conds_list, uncond, cond_scale)
-
- if self.mask is not None:
- denoised = self.init_latent * self.mask + self.nmask * denoised
-
- after_cfg_callback_params = AfterCFGCallbackParams(denoised, state.sampling_step, state.sampling_steps)
- cfg_after_cfg_callback(after_cfg_callback_params)
- denoised = after_cfg_callback_params.x
-
- self.step += 1
- return denoised
-
-
-class TorchHijack:
- def __init__(self, sampler_noises):
- # Using a deque to efficiently receive the sampler_noises in the same order as the previous index-based
- # implementation.
- self.sampler_noises = deque(sampler_noises)
-
- def __getattr__(self, item):
- if item == 'randn_like':
- return self.randn_like
-
- if hasattr(torch, item):
- return getattr(torch, item)
-
- raise AttributeError(f"'{type(self).__name__}' object has no attribute '{item}'")
-
- def randn_like(self, x):
- if self.sampler_noises:
- noise = self.sampler_noises.popleft()
- if noise.shape == x.shape:
- return noise
-
- if opts.randn_source == "CPU" or x.device.type == 'mps':
- return torch.randn_like(x, device=devices.cpu).to(x.device)
- else:
- return torch.randn_like(x)
+class KDiffusionSampler(sd_samplers_common.Sampler):
+ def __init__(self, funcname, sd_model, options=None):
+ super().__init__(funcname)
+ self.extra_params = sampler_extra_params.get(funcname, [])
-class KDiffusionSampler:
- def __init__(self, funcname, sd_model):
- denoiser = k_diffusion.external.CompVisVDenoiser if sd_model.parameterization == "v" else k_diffusion.external.CompVisDenoiser
+ self.options = options or {}
+ self.func = funcname if callable(funcname) else getattr(k_diffusion.sampling, self.funcname)
- self.model_wrap = denoiser(sd_model, quantize=shared.opts.enable_quantization)
- self.funcname = funcname
- self.func = getattr(k_diffusion.sampling, self.funcname)
- self.extra_params = sampler_extra_params.get(funcname, [])
- self.model_wrap_cfg = CFGDenoiser(self.model_wrap)
- self.sampler_noises = None
- self.stop_at = None
- self.eta = None
- self.config = None # set by the function calling the constructor
- self.last_latent = None
- self.s_min_uncond = None
-
- self.conditioning_key = sd_model.model.conditioning_key
-
- def callback_state(self, d):
- step = d['i']
- latent = d["denoised"]
- if opts.live_preview_content == "Combined":
- sd_samplers_common.store_latent(latent)
- self.last_latent = latent
-
- if self.stop_at is not None and step > self.stop_at:
- raise sd_samplers_common.InterruptedException
-
- state.sampling_step = step
- shared.total_tqdm.update()
-
- def launch_sampling(self, steps, func):
- state.sampling_steps = steps
- state.sampling_step = 0
-
- try:
- return func()
- except RecursionError:
- print(
- 'Encountered RecursionError during sampling, returning last latent. '
- 'rho >5 with a polyexponential scheduler may cause this error. '
- 'You should try to use a smaller rho value instead.'
- )
- return self.last_latent
- except sd_samplers_common.InterruptedException:
- return self.last_latent
-
- def number_of_needed_noises(self, p):
- return p.steps
-
- def initialize(self, p):
- self.model_wrap_cfg.mask = p.mask if hasattr(p, 'mask') else None
- self.model_wrap_cfg.nmask = p.nmask if hasattr(p, 'nmask') else None
- self.model_wrap_cfg.step = 0
- self.model_wrap_cfg.image_cfg_scale = getattr(p, 'image_cfg_scale', None)
- self.eta = p.eta if p.eta is not None else opts.eta_ancestral
- self.s_min_uncond = getattr(p, 's_min_uncond', 0.0)
-
- k_diffusion.sampling.torch = TorchHijack(self.sampler_noises if self.sampler_noises is not None else [])
-
- extra_params_kwargs = {}
- for param_name in self.extra_params:
- if hasattr(p, param_name) and param_name in inspect.signature(self.func).parameters:
- extra_params_kwargs[param_name] = getattr(p, param_name)
-
- if 'eta' in inspect.signature(self.func).parameters:
- if self.eta != 1.0:
- p.extra_generation_params["Eta"] = self.eta
-
- extra_params_kwargs['eta'] = self.eta
-
- return extra_params_kwargs
+ self.model_wrap_cfg = CFGDenoiserKDiffusion(self)
+ self.model_wrap = self.model_wrap_cfg.inner_model
def get_sigmas(self, p, steps):
discard_next_to_last_sigma = self.config is not None and self.config.options.get('discard_next_to_last_sigma', False)
@@ -376,6 +127,9 @@ class KDiffusionSampler:
sigma_min, sigma_max = (0.1, 10) if opts.use_old_karras_scheduler_sigmas else (self.model_wrap.sigmas[0].item(), self.model_wrap.sigmas[-1].item())
sigmas = k_diffusion.sampling.get_sigmas_karras(n=steps, sigma_min=sigma_min, sigma_max=sigma_max, device=shared.device)
+ elif self.config is not None and self.config.options.get('scheduler', None) == 'exponential':
+ m_sigma_min, m_sigma_max = (self.model_wrap.sigmas[0].item(), self.model_wrap.sigmas[-1].item())
+ sigmas = k_diffusion.sampling.get_sigmas_exponential(n=steps, sigma_min=m_sigma_min, sigma_max=m_sigma_max, device=shared.device)
else:
sigmas = self.model_wrap.get_sigmas(steps)
@@ -384,24 +138,21 @@ class KDiffusionSampler:
return sigmas
- def create_noise_sampler(self, x, sigmas, p):
- """For DPM++ SDE: manually create noise sampler to enable deterministic results across different batch sizes"""
- if shared.opts.no_dpmpp_sde_batch_determinism:
- return None
-
- from k_diffusion.sampling import BrownianTreeNoiseSampler
- sigma_min, sigma_max = sigmas[sigmas > 0].min(), sigmas.max()
- current_iter_seeds = p.all_seeds[p.iteration * p.batch_size:(p.iteration + 1) * p.batch_size]
- return BrownianTreeNoiseSampler(x, sigma_min, sigma_max, seed=current_iter_seeds)
-
def sample_img2img(self, p, x, noise, conditioning, unconditional_conditioning, steps=None, image_conditioning=None):
steps, t_enc = sd_samplers_common.setup_img2img_steps(p, steps)
sigmas = self.get_sigmas(p, steps)
-
sigma_sched = sigmas[steps - t_enc - 1:]
+
xi = x + noise * sigma_sched[0]
+ if opts.img2img_extra_noise > 0:
+ p.extra_generation_params["Extra noise"] = opts.img2img_extra_noise
+ extra_noise_params = ExtraNoiseParams(noise, x, xi)
+ extra_noise_callback(extra_noise_params)
+ noise = extra_noise_params.noise
+ xi += noise * opts.img2img_extra_noise
+
extra_params_kwargs = self.initialize(p)
parameters = inspect.signature(self.func).parameters
@@ -421,9 +172,12 @@ class KDiffusionSampler:
noise_sampler = self.create_noise_sampler(x, sigmas, p)
extra_params_kwargs['noise_sampler'] = noise_sampler
+ if self.config.options.get('solver_type', None) == 'heun':
+ extra_params_kwargs['solver_type'] = 'heun'
+
self.model_wrap_cfg.init_latent = x
self.last_latent = x
- extra_args = {
+ self.sampler_extra_args = {
'cond': conditioning,
'image_cond': image_conditioning,
'uncond': unconditional_conditioning,
@@ -431,7 +185,7 @@ class KDiffusionSampler:
's_min_uncond': self.s_min_uncond
}
- samples = self.launch_sampling(t_enc + 1, lambda: self.func(self.model_wrap_cfg, xi, extra_args=extra_args, disable=False, callback=self.callback_state, **extra_params_kwargs))
+ samples = self.launch_sampling(t_enc + 1, lambda: self.func(self.model_wrap_cfg, xi, extra_args=self.sampler_extra_args, disable=False, callback=self.callback_state, **extra_params_kwargs))
if self.model_wrap_cfg.padded_cond_uncond:
p.extra_generation_params["Pad conds"] = True
@@ -443,34 +197,46 @@ class KDiffusionSampler:
sigmas = self.get_sigmas(p, steps)
- x = x * sigmas[0]
+ if opts.sgm_noise_multiplier:
+ p.extra_generation_params["SGM noise multiplier"] = True
+ x = x * torch.sqrt(1.0 + sigmas[0] ** 2.0)
+ else:
+ x = x * sigmas[0]
extra_params_kwargs = self.initialize(p)
parameters = inspect.signature(self.func).parameters
+ if 'n' in parameters:
+ extra_params_kwargs['n'] = steps
+
if 'sigma_min' in parameters:
extra_params_kwargs['sigma_min'] = self.model_wrap.sigmas[0].item()
extra_params_kwargs['sigma_max'] = self.model_wrap.sigmas[-1].item()
- if 'n' in parameters:
- extra_params_kwargs['n'] = steps
- else:
+
+ if 'sigmas' in parameters:
extra_params_kwargs['sigmas'] = sigmas
if self.config.options.get('brownian_noise', False):
noise_sampler = self.create_noise_sampler(x, sigmas, p)
extra_params_kwargs['noise_sampler'] = noise_sampler
+ if self.config.options.get('solver_type', None) == 'heun':
+ extra_params_kwargs['solver_type'] = 'heun'
+
self.last_latent = x
- samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, x, extra_args={
+ self.sampler_extra_args = {
'cond': conditioning,
'image_cond': image_conditioning,
'uncond': unconditional_conditioning,
'cond_scale': p.cfg_scale,
's_min_uncond': self.s_min_uncond
- }, disable=False, callback=self.callback_state, **extra_params_kwargs))
+ }
+
+ samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, x, extra_args=self.sampler_extra_args, disable=False, callback=self.callback_state, **extra_params_kwargs))
if self.model_wrap_cfg.padded_cond_uncond:
p.extra_generation_params["Pad conds"] = True
return samples
+
diff --git a/modules/sd_samplers_timesteps.py b/modules/sd_samplers_timesteps.py
new file mode 100644
index 00000000..777dd8d0
--- /dev/null
+++ b/modules/sd_samplers_timesteps.py
@@ -0,0 +1,168 @@
+import torch
+import inspect
+import sys
+from modules import devices, sd_samplers_common, sd_samplers_timesteps_impl
+from modules.sd_samplers_cfg_denoiser import CFGDenoiser
+from modules.script_callbacks import ExtraNoiseParams, extra_noise_callback
+
+from modules.shared import opts
+import modules.shared as shared
+
+samplers_timesteps = [
+ ('DDIM', sd_samplers_timesteps_impl.ddim, ['ddim'], {}),
+ ('PLMS', sd_samplers_timesteps_impl.plms, ['plms'], {}),
+ ('UniPC', sd_samplers_timesteps_impl.unipc, ['unipc'], {}),
+]
+
+
+samplers_data_timesteps = [
+ sd_samplers_common.SamplerData(label, lambda model, funcname=funcname: CompVisSampler(funcname, model), aliases, options)
+ for label, funcname, aliases, options in samplers_timesteps
+]
+
+
+class CompVisTimestepsDenoiser(torch.nn.Module):
+ def __init__(self, model, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ self.inner_model = model
+
+ def forward(self, input, timesteps, **kwargs):
+ return self.inner_model.apply_model(input, timesteps, **kwargs)
+
+
+class CompVisTimestepsVDenoiser(torch.nn.Module):
+ def __init__(self, model, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ self.inner_model = model
+
+ def predict_eps_from_z_and_v(self, x_t, t, v):
+ return torch.sqrt(self.inner_model.alphas_cumprod)[t.to(torch.int), None, None, None] * v + torch.sqrt(1 - self.inner_model.alphas_cumprod)[t.to(torch.int), None, None, None] * x_t
+
+ def forward(self, input, timesteps, **kwargs):
+ model_output = self.inner_model.apply_model(input, timesteps, **kwargs)
+ e_t = self.predict_eps_from_z_and_v(input, timesteps, model_output)
+ return e_t
+
+
+class CFGDenoiserTimesteps(CFGDenoiser):
+
+ def __init__(self, sampler):
+ super().__init__(sampler)
+
+ self.alphas = shared.sd_model.alphas_cumprod
+ self.mask_before_denoising = True
+
+ def get_pred_x0(self, x_in, x_out, sigma):
+ ts = sigma.to(dtype=int)
+
+ a_t = self.alphas[ts][:, None, None, None]
+ sqrt_one_minus_at = (1 - a_t).sqrt()
+
+ pred_x0 = (x_in - sqrt_one_minus_at * x_out) / a_t.sqrt()
+
+ return pred_x0
+
+ @property
+ def inner_model(self):
+ if self.model_wrap is None:
+ denoiser = CompVisTimestepsVDenoiser if shared.sd_model.parameterization == "v" else CompVisTimestepsDenoiser
+ self.model_wrap = denoiser(shared.sd_model)
+
+ return self.model_wrap
+
+
+class CompVisSampler(sd_samplers_common.Sampler):
+ def __init__(self, funcname, sd_model):
+ super().__init__(funcname)
+
+ self.eta_option_field = 'eta_ddim'
+ self.eta_infotext_field = 'Eta DDIM'
+ self.eta_default = 0.0
+
+ self.model_wrap_cfg = CFGDenoiserTimesteps(self)
+ self.model_wrap = self.model_wrap_cfg.inner_model
+
+ def get_timesteps(self, p, steps):
+ discard_next_to_last_sigma = self.config is not None and self.config.options.get('discard_next_to_last_sigma', False)
+ if opts.always_discard_next_to_last_sigma and not discard_next_to_last_sigma:
+ discard_next_to_last_sigma = True
+ p.extra_generation_params["Discard penultimate sigma"] = True
+
+ steps += 1 if discard_next_to_last_sigma else 0
+
+ timesteps = torch.clip(torch.asarray(list(range(0, 1000, 1000 // steps)), device=devices.device) + 1, 0, 999)
+
+ return timesteps
+
+ def sample_img2img(self, p, x, noise, conditioning, unconditional_conditioning, steps=None, image_conditioning=None):
+ steps, t_enc = sd_samplers_common.setup_img2img_steps(p, steps)
+
+ timesteps = self.get_timesteps(p, steps)
+ timesteps_sched = timesteps[:t_enc]
+
+ alphas_cumprod = shared.sd_model.alphas_cumprod
+ sqrt_alpha_cumprod = torch.sqrt(alphas_cumprod[timesteps[t_enc]])
+ sqrt_one_minus_alpha_cumprod = torch.sqrt(1 - alphas_cumprod[timesteps[t_enc]])
+
+ xi = x * sqrt_alpha_cumprod + noise * sqrt_one_minus_alpha_cumprod
+
+ if opts.img2img_extra_noise > 0:
+ p.extra_generation_params["Extra noise"] = opts.img2img_extra_noise
+ extra_noise_params = ExtraNoiseParams(noise, x, xi)
+ extra_noise_callback(extra_noise_params)
+ noise = extra_noise_params.noise
+ xi += noise * opts.img2img_extra_noise * sqrt_alpha_cumprod
+
+ extra_params_kwargs = self.initialize(p)
+ parameters = inspect.signature(self.func).parameters
+
+ if 'timesteps' in parameters:
+ extra_params_kwargs['timesteps'] = timesteps_sched
+ if 'is_img2img' in parameters:
+ extra_params_kwargs['is_img2img'] = True
+
+ self.model_wrap_cfg.init_latent = x
+ self.last_latent = x
+ self.sampler_extra_args = {
+ 'cond': conditioning,
+ 'image_cond': image_conditioning,
+ 'uncond': unconditional_conditioning,
+ 'cond_scale': p.cfg_scale,
+ 's_min_uncond': self.s_min_uncond
+ }
+
+ samples = self.launch_sampling(t_enc + 1, lambda: self.func(self.model_wrap_cfg, xi, extra_args=self.sampler_extra_args, disable=False, callback=self.callback_state, **extra_params_kwargs))
+
+ if self.model_wrap_cfg.padded_cond_uncond:
+ p.extra_generation_params["Pad conds"] = True
+
+ return samples
+
+ def sample(self, p, x, conditioning, unconditional_conditioning, steps=None, image_conditioning=None):
+ steps = steps or p.steps
+ timesteps = self.get_timesteps(p, steps)
+
+ extra_params_kwargs = self.initialize(p)
+ parameters = inspect.signature(self.func).parameters
+
+ if 'timesteps' in parameters:
+ extra_params_kwargs['timesteps'] = timesteps
+
+ self.last_latent = x
+ self.sampler_extra_args = {
+ 'cond': conditioning,
+ 'image_cond': image_conditioning,
+ 'uncond': unconditional_conditioning,
+ 'cond_scale': p.cfg_scale,
+ 's_min_uncond': self.s_min_uncond
+ }
+ samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, x, extra_args=self.sampler_extra_args, disable=False, callback=self.callback_state, **extra_params_kwargs))
+
+ if self.model_wrap_cfg.padded_cond_uncond:
+ p.extra_generation_params["Pad conds"] = True
+
+ return samples
+
+
+sys.modules['modules.sd_samplers_compvis'] = sys.modules[__name__]
+VanillaStableDiffusionSampler = CompVisSampler # temp. compatibility with older extensions
diff --git a/modules/sd_samplers_timesteps_impl.py b/modules/sd_samplers_timesteps_impl.py
new file mode 100644
index 00000000..930a64af
--- /dev/null
+++ b/modules/sd_samplers_timesteps_impl.py
@@ -0,0 +1,137 @@
+import torch
+import tqdm
+import k_diffusion.sampling
+import numpy as np
+
+from modules import shared
+from modules.models.diffusion.uni_pc import uni_pc
+
+
+@torch.no_grad()
+def ddim(model, x, timesteps, extra_args=None, callback=None, disable=None, eta=0.0):
+ alphas_cumprod = model.inner_model.inner_model.alphas_cumprod
+ alphas = alphas_cumprod[timesteps]
+ alphas_prev = alphas_cumprod[torch.nn.functional.pad(timesteps[:-1], pad=(1, 0))].to(torch.float64 if x.device.type != 'mps' and x.device.type != 'xpu' else torch.float32)
+ sqrt_one_minus_alphas = torch.sqrt(1 - alphas)
+ sigmas = eta * np.sqrt((1 - alphas_prev.cpu().numpy()) / (1 - alphas.cpu()) * (1 - alphas.cpu() / alphas_prev.cpu().numpy()))
+
+ extra_args = {} if extra_args is None else extra_args
+ s_in = x.new_ones((x.shape[0]))
+ s_x = x.new_ones((x.shape[0], 1, 1, 1))
+ for i in tqdm.trange(len(timesteps) - 1, disable=disable):
+ index = len(timesteps) - 1 - i
+
+ e_t = model(x, timesteps[index].item() * s_in, **extra_args)
+
+ a_t = alphas[index].item() * s_x
+ a_prev = alphas_prev[index].item() * s_x
+ sigma_t = sigmas[index].item() * s_x
+ sqrt_one_minus_at = sqrt_one_minus_alphas[index].item() * s_x
+
+ pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()
+ dir_xt = (1. - a_prev - sigma_t ** 2).sqrt() * e_t
+ noise = sigma_t * k_diffusion.sampling.torch.randn_like(x)
+ x = a_prev.sqrt() * pred_x0 + dir_xt + noise
+
+ if callback is not None:
+ callback({'x': x, 'i': i, 'sigma': 0, 'sigma_hat': 0, 'denoised': pred_x0})
+
+ return x
+
+
+@torch.no_grad()
+def plms(model, x, timesteps, extra_args=None, callback=None, disable=None):
+ alphas_cumprod = model.inner_model.inner_model.alphas_cumprod
+ alphas = alphas_cumprod[timesteps]
+ alphas_prev = alphas_cumprod[torch.nn.functional.pad(timesteps[:-1], pad=(1, 0))].to(torch.float64 if x.device.type != 'mps' and x.device.type != 'xpu' else torch.float32)
+ sqrt_one_minus_alphas = torch.sqrt(1 - alphas)
+
+ extra_args = {} if extra_args is None else extra_args
+ s_in = x.new_ones([x.shape[0]])
+ s_x = x.new_ones((x.shape[0], 1, 1, 1))
+ old_eps = []
+
+ def get_x_prev_and_pred_x0(e_t, index):
+ # select parameters corresponding to the currently considered timestep
+ a_t = alphas[index].item() * s_x
+ a_prev = alphas_prev[index].item() * s_x
+ sqrt_one_minus_at = sqrt_one_minus_alphas[index].item() * s_x
+
+ # current prediction for x_0
+ pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()
+
+ # direction pointing to x_t
+ dir_xt = (1. - a_prev).sqrt() * e_t
+ x_prev = a_prev.sqrt() * pred_x0 + dir_xt
+ return x_prev, pred_x0
+
+ for i in tqdm.trange(len(timesteps) - 1, disable=disable):
+ index = len(timesteps) - 1 - i
+ ts = timesteps[index].item() * s_in
+ t_next = timesteps[max(index - 1, 0)].item() * s_in
+
+ e_t = model(x, ts, **extra_args)
+
+ if len(old_eps) == 0:
+ # Pseudo Improved Euler (2nd order)
+ x_prev, pred_x0 = get_x_prev_and_pred_x0(e_t, index)
+ e_t_next = model(x_prev, t_next, **extra_args)
+ e_t_prime = (e_t + e_t_next) / 2
+ elif len(old_eps) == 1:
+ # 2nd order Pseudo Linear Multistep (Adams-Bashforth)
+ e_t_prime = (3 * e_t - old_eps[-1]) / 2
+ elif len(old_eps) == 2:
+ # 3nd order Pseudo Linear Multistep (Adams-Bashforth)
+ e_t_prime = (23 * e_t - 16 * old_eps[-1] + 5 * old_eps[-2]) / 12
+ else:
+ # 4nd order Pseudo Linear Multistep (Adams-Bashforth)
+ e_t_prime = (55 * e_t - 59 * old_eps[-1] + 37 * old_eps[-2] - 9 * old_eps[-3]) / 24
+
+ x_prev, pred_x0 = get_x_prev_and_pred_x0(e_t_prime, index)
+
+ old_eps.append(e_t)
+ if len(old_eps) >= 4:
+ old_eps.pop(0)
+
+ x = x_prev
+
+ if callback is not None:
+ callback({'x': x, 'i': i, 'sigma': 0, 'sigma_hat': 0, 'denoised': pred_x0})
+
+ return x
+
+
+class UniPCCFG(uni_pc.UniPC):
+ def __init__(self, cfg_model, extra_args, callback, *args, **kwargs):
+ super().__init__(None, *args, **kwargs)
+
+ def after_update(x, model_x):
+ callback({'x': x, 'i': self.index, 'sigma': 0, 'sigma_hat': 0, 'denoised': model_x})
+ self.index += 1
+
+ self.cfg_model = cfg_model
+ self.extra_args = extra_args
+ self.callback = callback
+ self.index = 0
+ self.after_update = after_update
+
+ def get_model_input_time(self, t_continuous):
+ return (t_continuous - 1. / self.noise_schedule.total_N) * 1000.
+
+ def model(self, x, t):
+ t_input = self.get_model_input_time(t)
+
+ res = self.cfg_model(x, t_input, **self.extra_args)
+
+ return res
+
+
+def unipc(model, x, timesteps, extra_args=None, callback=None, disable=None, is_img2img=False):
+ alphas_cumprod = model.inner_model.inner_model.alphas_cumprod
+
+ ns = uni_pc.NoiseScheduleVP('discrete', alphas_cumprod=alphas_cumprod)
+ t_start = timesteps[-1] / 1000 + 1 / 1000 if is_img2img else None # this is likely off by a bit - if someone wants to fix it please by all means
+ unipc_sampler = UniPCCFG(model, extra_args, callback, ns, predict_x0=True, thresholding=False, variant=shared.opts.uni_pc_variant)
+ x = unipc_sampler.sample(x, steps=len(timesteps), t_start=t_start, skip_type=shared.opts.uni_pc_skip_type, method="multistep", order=shared.opts.uni_pc_order, lower_order_final=shared.opts.uni_pc_lower_order_final)
+
+ return x
diff --git a/modules/sd_unet.py b/modules/sd_unet.py
index 6d708ad2..a771849c 100644
--- a/modules/sd_unet.py
+++ b/modules/sd_unet.py
@@ -1,12 +1,11 @@
import torch.nn
-import ldm.modules.diffusionmodules.openaimodel
from modules import script_callbacks, shared, devices
unet_options = []
current_unet_option = None
current_unet = None
-
+original_forward = None # not used, only left temporarily for compatibility
def list_unets():
new_unets = script_callbacks.list_unets_callback()
@@ -47,7 +46,7 @@ def apply_unet(option=None):
if current_unet_option is None:
current_unet = None
- if not (shared.cmd_opts.lowvram or shared.cmd_opts.medvram):
+ if not shared.sd_model.lowvram:
shared.sd_model.model.diffusion_model.to(devices.device)
return
@@ -84,9 +83,12 @@ class SdUnet(torch.nn.Module):
pass
-def UNetModel_forward(self, x, timesteps=None, context=None, *args, **kwargs):
- if current_unet is not None:
- return current_unet.forward(x, timesteps, context, *args, **kwargs)
+def create_unet_forward(original_forward):
+ def UNetModel_forward(self, x, timesteps=None, context=None, *args, **kwargs):
+ if current_unet is not None:
+ return current_unet.forward(x, timesteps, context, *args, **kwargs)
+
+ return original_forward(self, x, timesteps, context, *args, **kwargs)
- return ldm.modules.diffusionmodules.openaimodel.copy_of_UNetModel_forward_for_webui(self, x, timesteps, context, *args, **kwargs)
+ return UNetModel_forward
diff --git a/modules/sd_vae.py b/modules/sd_vae.py
index e4ff2994..31306d8b 100644
--- a/modules/sd_vae.py
+++ b/modules/sd_vae.py
@@ -1,6 +1,9 @@
import os
import collections
-from modules import paths, shared, devices, script_callbacks, sd_models
+from dataclasses import dataclass
+
+from modules import paths, shared, devices, script_callbacks, sd_models, extra_networks, lowvram, sd_hijack, hashes
+
import glob
from copy import deepcopy
@@ -16,6 +19,23 @@ checkpoint_info = None
checkpoints_loaded = collections.OrderedDict()
+
+def get_loaded_vae_name():
+ if loaded_vae_file is None:
+ return None
+
+ return os.path.basename(loaded_vae_file)
+
+
+def get_loaded_vae_hash():
+ if loaded_vae_file is None:
+ return None
+
+ sha256 = hashes.sha256(loaded_vae_file, 'vae')
+
+ return sha256[0:10] if sha256 else None
+
+
def get_base_vae(model):
if base_vae is not None and checkpoint_info == model.sd_checkpoint_info and model:
return base_vae
@@ -83,6 +103,8 @@ def refresh_vae_list():
name = get_filename(filepath)
vae_dict[name] = filepath
+ vae_dict.update(dict(sorted(vae_dict.items(), key=lambda item: shared.natural_sort_key(item[0]))))
+
def find_vae_near_checkpoint(checkpoint_file):
checkpoint_path = os.path.basename(checkpoint_file).rsplit('.', 1)[0]
@@ -93,27 +115,74 @@ def find_vae_near_checkpoint(checkpoint_file):
return None
-def resolve_vae(checkpoint_file):
- if shared.cmd_opts.vae_path is not None:
- return shared.cmd_opts.vae_path, 'from commandline argument'
+@dataclass
+class VaeResolution:
+ vae: str = None
+ source: str = None
+ resolved: bool = True
- is_automatic = shared.opts.sd_vae in {"Automatic", "auto"} # "auto" for people with old config
+ def tuple(self):
+ return self.vae, self.source
- vae_near_checkpoint = find_vae_near_checkpoint(checkpoint_file)
- if vae_near_checkpoint is not None and (shared.opts.sd_vae_as_default or is_automatic):
- return vae_near_checkpoint, 'found near the checkpoint'
+def is_automatic():
+ return shared.opts.sd_vae in {"Automatic", "auto"} # "auto" for people with old config
+
+
+def resolve_vae_from_setting() -> VaeResolution:
if shared.opts.sd_vae == "None":
- return None, None
+ return VaeResolution()
vae_from_options = vae_dict.get(shared.opts.sd_vae, None)
if vae_from_options is not None:
- return vae_from_options, 'specified in settings'
+ return VaeResolution(vae_from_options, 'specified in settings')
- if not is_automatic:
+ if not is_automatic():
print(f"Couldn't find VAE named {shared.opts.sd_vae}; using None instead")
- return None, None
+ return VaeResolution(resolved=False)
+
+
+def resolve_vae_from_user_metadata(checkpoint_file) -> VaeResolution:
+ metadata = extra_networks.get_user_metadata(checkpoint_file)
+ vae_metadata = metadata.get("vae", None)
+ if vae_metadata is not None and vae_metadata != "Automatic":
+ if vae_metadata == "None":
+ return VaeResolution()
+
+ vae_from_metadata = vae_dict.get(vae_metadata, None)
+ if vae_from_metadata is not None:
+ return VaeResolution(vae_from_metadata, "from user metadata")
+
+ return VaeResolution(resolved=False)
+
+
+def resolve_vae_near_checkpoint(checkpoint_file) -> VaeResolution:
+ vae_near_checkpoint = find_vae_near_checkpoint(checkpoint_file)
+ if vae_near_checkpoint is not None and (not shared.opts.sd_vae_overrides_per_model_preferences or is_automatic()):
+ return VaeResolution(vae_near_checkpoint, 'found near the checkpoint')
+
+ return VaeResolution(resolved=False)
+
+
+def resolve_vae(checkpoint_file) -> VaeResolution:
+ if shared.cmd_opts.vae_path is not None:
+ return VaeResolution(shared.cmd_opts.vae_path, 'from commandline argument')
+
+ if shared.opts.sd_vae_overrides_per_model_preferences and not is_automatic():
+ return resolve_vae_from_setting()
+
+ res = resolve_vae_from_user_metadata(checkpoint_file)
+ if res.resolved:
+ return res
+
+ res = resolve_vae_near_checkpoint(checkpoint_file)
+ if res.resolved:
+ return res
+
+ res = resolve_vae_from_setting()
+
+ return res
def load_vae_dict(filename, map_location):
@@ -123,7 +192,7 @@ def load_vae_dict(filename, map_location):
def load_vae(model, vae_file=None, vae_source="from unknown source"):
- global vae_dict, loaded_vae_file
+ global vae_dict, base_vae, loaded_vae_file
# save_settings = False
cache_enabled = shared.opts.sd_vae_checkpoint_cache > 0
@@ -161,6 +230,8 @@ def load_vae(model, vae_file=None, vae_source="from unknown source"):
restore_base_vae(model)
loaded_vae_file = vae_file
+ model.base_vae = base_vae
+ model.loaded_vae_file = loaded_vae_file
# don't call this from outside
@@ -178,8 +249,6 @@ unspecified = object()
def reload_vae_weights(sd_model=None, vae_file=unspecified):
- from modules import lowvram, devices, sd_hijack
-
if not sd_model:
sd_model = shared.sd_model
@@ -187,14 +256,14 @@ def reload_vae_weights(sd_model=None, vae_file=unspecified):
checkpoint_file = checkpoint_info.filename
if vae_file == unspecified:
- vae_file, vae_source = resolve_vae(checkpoint_file)
+ vae_file, vae_source = resolve_vae(checkpoint_file).tuple()
else:
vae_source = "from function argument"
if loaded_vae_file == vae_file:
return
- if shared.cmd_opts.lowvram or shared.cmd_opts.medvram:
+ if sd_model.lowvram:
lowvram.send_everything_to_cpu()
else:
sd_model.to(devices.cpu)
@@ -206,7 +275,7 @@ def reload_vae_weights(sd_model=None, vae_file=unspecified):
sd_hijack.model_hijack.hijack(sd_model)
script_callbacks.model_loaded_callback(sd_model)
- if not shared.cmd_opts.lowvram and not shared.cmd_opts.medvram:
+ if not sd_model.lowvram:
sd_model.to(devices.device)
print("VAE weights loaded.")
diff --git a/modules/sd_vae_approx.py b/modules/sd_vae_approx.py
index 86bd658a..3965e223 100644
--- a/modules/sd_vae_approx.py
+++ b/modules/sd_vae_approx.py
@@ -81,6 +81,6 @@ def cheap_approximation(sample):
coefs = torch.tensor(coeffs).to(sample.device)
- x_sample = torch.einsum("lxy,lr -> rxy", sample, coefs)
+ x_sample = torch.einsum("...lxy,lr -> ...rxy", sample, coefs)
return x_sample
diff --git a/modules/sd_vae_taesd.py b/modules/sd_vae_taesd.py
index 5bf7c76e..808eb362 100644
--- a/modules/sd_vae_taesd.py
+++ b/modules/sd_vae_taesd.py
@@ -44,7 +44,17 @@ def decoder():
)
-class TAESD(nn.Module):
+def encoder():
+ return nn.Sequential(
+ conv(3, 64), Block(64, 64),
+ conv(64, 64, stride=2, bias=False), Block(64, 64), Block(64, 64), Block(64, 64),
+ conv(64, 64, stride=2, bias=False), Block(64, 64), Block(64, 64), Block(64, 64),
+ conv(64, 64, stride=2, bias=False), Block(64, 64), Block(64, 64), Block(64, 64),
+ conv(64, 4),
+ )
+
+
+class TAESDDecoder(nn.Module):
latent_magnitude = 3
latent_shift = 0.5
@@ -55,21 +65,28 @@ class TAESD(nn.Module):
self.decoder.load_state_dict(
torch.load(decoder_path, map_location='cpu' if devices.device.type != 'cuda' else None))
- @staticmethod
- def unscale_latents(x):
- """[0, 1] -> raw latents"""
- return x.sub(TAESD.latent_shift).mul(2 * TAESD.latent_magnitude)
+
+class TAESDEncoder(nn.Module):
+ latent_magnitude = 3
+ latent_shift = 0.5
+
+ def __init__(self, encoder_path="taesd_encoder.pth"):
+ """Initialize pretrained TAESD on the given device from the given checkpoints."""
+ super().__init__()
+ self.encoder = encoder()
+ self.encoder.load_state_dict(
+ torch.load(encoder_path, map_location='cpu' if devices.device.type != 'cuda' else None))
def download_model(model_path, model_url):
if not os.path.exists(model_path):
os.makedirs(os.path.dirname(model_path), exist_ok=True)
- print(f'Downloading TAESD decoder to: {model_path}')
+ print(f'Downloading TAESD model to: {model_path}')
torch.hub.download_url_to_file(model_url, model_path)
-def model():
+def decoder_model():
model_name = "taesdxl_decoder.pth" if getattr(shared.sd_model, 'is_sdxl', False) else "taesd_decoder.pth"
loaded_model = sd_vae_taesd_models.get(model_name)
@@ -78,7 +95,7 @@ def model():
download_model(model_path, 'https://github.com/madebyollin/taesd/raw/main/' + model_name)
if os.path.exists(model_path):
- loaded_model = TAESD(model_path)
+ loaded_model = TAESDDecoder(model_path)
loaded_model.eval()
loaded_model.to(devices.device, devices.dtype)
sd_vae_taesd_models[model_name] = loaded_model
@@ -86,3 +103,22 @@ def model():
raise FileNotFoundError('TAESD model not found')
return loaded_model.decoder
+
+
+def encoder_model():
+ model_name = "taesdxl_encoder.pth" if getattr(shared.sd_model, 'is_sdxl', False) else "taesd_encoder.pth"
+ loaded_model = sd_vae_taesd_models.get(model_name)
+
+ if loaded_model is None:
+ model_path = os.path.join(paths_internal.models_path, "VAE-taesd", model_name)
+ download_model(model_path, 'https://github.com/madebyollin/taesd/raw/main/' + model_name)
+
+ if os.path.exists(model_path):
+ loaded_model = TAESDEncoder(model_path)
+ loaded_model.eval()
+ loaded_model.to(devices.device, devices.dtype)
+ sd_vae_taesd_models[model_name] = loaded_model
+ else:
+ raise FileNotFoundError('TAESD model not found')
+
+ return loaded_model.encoder
diff --git a/modules/shared.py b/modules/shared.py
index aa72c9c8..63661939 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -1,771 +1,51 @@
-import datetime
-import json
-import os
-import re
import sys
-import threading
-import time
-import logging
import gradio as gr
-import torch
-import tqdm
-import launch
-import modules.interrogate
-import modules.memmon
-import modules.styles
-import modules.devices as devices
-from modules import localization, script_loading, errors, ui_components, shared_items, cmd_args
+from modules import shared_cmd_options, shared_gradio_themes, options, shared_items, sd_models_types
from modules.paths_internal import models_path, script_path, data_path, sd_configs_path, sd_default_config, sd_model_file, default_sd_model_file, extensions_dir, extensions_builtin_dir # noqa: F401
-from ldm.models.diffusion.ddpm import LatentDiffusion
-from typing import Optional
+from modules import util
-log = logging.getLogger(__name__)
+cmd_opts = shared_cmd_options.cmd_opts
+parser = shared_cmd_options.parser
-demo = None
-
-parser = cmd_args.parser
-
-script_loading.preload_extensions(extensions_dir, parser, extension_list=launch.list_extensions(launch.args.ui_settings_file))
-script_loading.preload_extensions(extensions_builtin_dir, parser)
-
-if os.environ.get('IGNORE_CMD_ARGS_ERRORS', None) is None:
- cmd_opts = parser.parse_args()
-else:
- cmd_opts, _ = parser.parse_known_args()
-
-
-restricted_opts = {
- "samples_filename_pattern",
- "directories_filename_pattern",
- "outdir_samples",
- "outdir_txt2img_samples",
- "outdir_img2img_samples",
- "outdir_extras_samples",
- "outdir_grids",
- "outdir_txt2img_grids",
- "outdir_save",
- "outdir_init_images"
-}
-
-# https://huggingface.co/datasets/freddyaboulton/gradio-theme-subdomains/resolve/main/subdomains.json
-gradio_hf_hub_themes = [
- "gradio/glass",
- "gradio/monochrome",
- "gradio/seafoam",
- "gradio/soft",
- "freddyaboulton/dracula_revamped",
- "gradio/dracula_test",
- "abidlabs/dracula_test",
- "abidlabs/pakistan",
- "dawood/microsoft_windows",
- "ysharma/steampunk"
-]
-
-
-cmd_opts.disable_extension_access = (cmd_opts.share or cmd_opts.listen or cmd_opts.server_name) and not cmd_opts.enable_insecure_extension_access
+batch_cond_uncond = True # old field, unused now in favor of shared.opts.batch_cond_uncond
+parallel_processing_allowed = True
+styles_filename = cmd_opts.styles_file
+config_filename = cmd_opts.ui_settings_file
+hide_dirs = {"visible": not cmd_opts.hide_ui_dir_config}
-devices.device, devices.device_interrogate, devices.device_gfpgan, devices.device_esrgan, devices.device_codeformer = \
- (devices.cpu if any(y in cmd_opts.use_cpu for y in [x, 'all']) else devices.get_optimal_device() for x in ['sd', 'interrogate', 'gfpgan', 'esrgan', 'codeformer'])
+demo = None
-devices.dtype = torch.float32 if cmd_opts.no_half else torch.float16
-devices.dtype_vae = torch.float32 if cmd_opts.no_half or cmd_opts.no_half_vae else torch.float16
+device = None
-device = devices.device
-weight_load_location = None if cmd_opts.lowram else "cpu"
+weight_load_location = None
-batch_cond_uncond = cmd_opts.always_batch_cond_uncond or not (cmd_opts.lowvram or cmd_opts.medvram)
-parallel_processing_allowed = not cmd_opts.lowvram and not cmd_opts.medvram
xformers_available = False
-config_filename = cmd_opts.ui_settings_file
-os.makedirs(cmd_opts.hypernetwork_dir, exist_ok=True)
hypernetworks = {}
-loaded_hypernetworks = []
-
-
-def reload_hypernetworks():
- from modules.hypernetworks import hypernetwork
- global hypernetworks
-
- hypernetworks = hypernetwork.list_hypernetworks(cmd_opts.hypernetwork_dir)
-
-
-class State:
- skipped = False
- interrupted = False
- job = ""
- job_no = 0
- job_count = 0
- processing_has_refined_job_count = False
- job_timestamp = '0'
- sampling_step = 0
- sampling_steps = 0
- current_latent = None
- current_image = None
- current_image_sampling_step = 0
- id_live_preview = 0
- textinfo = None
- time_start = None
- server_start = None
- _server_command_signal = threading.Event()
- _server_command: Optional[str] = None
-
- @property
- def need_restart(self) -> bool:
- # Compatibility getter for need_restart.
- return self.server_command == "restart"
-
- @need_restart.setter
- def need_restart(self, value: bool) -> None:
- # Compatibility setter for need_restart.
- if value:
- self.server_command = "restart"
-
- @property
- def server_command(self):
- return self._server_command
-
- @server_command.setter
- def server_command(self, value: Optional[str]) -> None:
- """
- Set the server command to `value` and signal that it's been set.
- """
- self._server_command = value
- self._server_command_signal.set()
-
- def wait_for_server_command(self, timeout: Optional[float] = None) -> Optional[str]:
- """
- Wait for server command to get set; return and clear the value and signal.
- """
- if self._server_command_signal.wait(timeout):
- self._server_command_signal.clear()
- req = self._server_command
- self._server_command = None
- return req
- return None
-
- def request_restart(self) -> None:
- self.interrupt()
- self.server_command = "restart"
- log.info("Received restart request")
-
- def skip(self):
- self.skipped = True
- log.info("Received skip request")
-
- def interrupt(self):
- self.interrupted = True
- log.info("Received interrupt request")
-
- def nextjob(self):
- if opts.live_previews_enable and opts.show_progress_every_n_steps == -1:
- self.do_set_current_image()
-
- self.job_no += 1
- self.sampling_step = 0
- self.current_image_sampling_step = 0
-
- def dict(self):
- obj = {
- "skipped": self.skipped,
- "interrupted": self.interrupted,
- "job": self.job,
- "job_count": self.job_count,
- "job_timestamp": self.job_timestamp,
- "job_no": self.job_no,
- "sampling_step": self.sampling_step,
- "sampling_steps": self.sampling_steps,
- }
-
- return obj
-
- def begin(self, job: str = "(unknown)"):
- self.sampling_step = 0
- self.job_count = -1
- self.processing_has_refined_job_count = False
- self.job_no = 0
- self.job_timestamp = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
- self.current_latent = None
- self.current_image = None
- self.current_image_sampling_step = 0
- self.id_live_preview = 0
- self.skipped = False
- self.interrupted = False
- self.textinfo = None
- self.time_start = time.time()
- self.job = job
- devices.torch_gc()
- log.info("Starting job %s", job)
-
- def end(self):
- duration = time.time() - self.time_start
- log.info("Ending job %s (%.2f seconds)", self.job, duration)
- self.job = ""
- self.job_count = 0
-
- devices.torch_gc()
-
- def set_current_image(self):
- """sets self.current_image from self.current_latent if enough sampling steps have been made after the last call to this"""
- if not parallel_processing_allowed:
- return
-
- if self.sampling_step - self.current_image_sampling_step >= opts.show_progress_every_n_steps and opts.live_previews_enable and opts.show_progress_every_n_steps != -1:
- self.do_set_current_image()
-
- def do_set_current_image(self):
- if self.current_latent is None:
- return
-
- import modules.sd_samplers
- if opts.show_progress_grid:
- self.assign_current_image(modules.sd_samplers.samples_to_image_grid(self.current_latent))
- else:
- self.assign_current_image(modules.sd_samplers.sample_to_image(self.current_latent))
-
- self.current_image_sampling_step = self.sampling_step
-
- def assign_current_image(self, image):
- self.current_image = image
- self.id_live_preview += 1
+loaded_hypernetworks = []
-state = State()
-state.server_start = time.time()
+state = None
-styles_filename = cmd_opts.styles_file
-prompt_styles = modules.styles.StyleDatabase(styles_filename)
+prompt_styles = None
-interrogator = modules.interrogate.InterrogateModels("interrogate")
+interrogator = None
face_restorers = []
+options_templates = None
+opts = None
+restricted_opts = None
-class OptionInfo:
- def __init__(self, default=None, label="", component=None, component_args=None, onchange=None, section=None, refresh=None, comment_before='', comment_after=''):
- self.default = default
- self.label = label
- self.component = component
- self.component_args = component_args
- self.onchange = onchange
- self.section = section
- self.refresh = refresh
-
- self.comment_before = comment_before
- """HTML text that will be added after label in UI"""
-
- self.comment_after = comment_after
- """HTML text that will be added before label in UI"""
-
- def link(self, label, url):
- self.comment_before += f"[<a href='{url}' target='_blank'>{label}</a>]"
- return self
-
- def js(self, label, js_func):
- self.comment_before += f"[<a onclick='{js_func}(); return false'>{label}</a>]"
- return self
-
- def info(self, info):
- self.comment_after += f"<span class='info'>({info})</span>"
- return self
-
- def html(self, html):
- self.comment_after += html
- return self
-
- def needs_restart(self):
- self.comment_after += " <span class='info'>(requires restart)</span>"
- return self
-
-
-
-
-def options_section(section_identifier, options_dict):
- for v in options_dict.values():
- v.section = section_identifier
-
- return options_dict
-
-
-def list_checkpoint_tiles():
- import modules.sd_models
- return modules.sd_models.checkpoint_tiles()
-
-
-def refresh_checkpoints():
- import modules.sd_models
- return modules.sd_models.list_models()
-
-
-def list_samplers():
- import modules.sd_samplers
- return modules.sd_samplers.all_samplers
-
-
-hide_dirs = {"visible": not cmd_opts.hide_ui_dir_config}
-tab_names = []
-
-options_templates = {}
-
-options_templates.update(options_section(('saving-images', "Saving images/grids"), {
- "samples_save": OptionInfo(True, "Always save all generated images"),
- "samples_format": OptionInfo('png', 'File format for images'),
- "samples_filename_pattern": OptionInfo("", "Images filename pattern", component_args=hide_dirs).link("wiki", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Custom-Images-Filename-Name-and-Subdirectory"),
- "save_images_add_number": OptionInfo(True, "Add number to filename when saving", component_args=hide_dirs),
-
- "grid_save": OptionInfo(True, "Always save all generated image grids"),
- "grid_format": OptionInfo('png', 'File format for grids'),
- "grid_extended_filename": OptionInfo(False, "Add extended info (seed, prompt) to filename when saving grid"),
- "grid_only_if_multiple": OptionInfo(True, "Do not save grids consisting of one picture"),
- "grid_prevent_empty_spots": OptionInfo(False, "Prevent empty spots in grid (when set to autodetect)"),
- "grid_zip_filename_pattern": OptionInfo("", "Archive filename pattern", component_args=hide_dirs).link("wiki", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Custom-Images-Filename-Name-and-Subdirectory"),
- "n_rows": OptionInfo(-1, "Grid row count; use -1 for autodetect and 0 for it to be same as batch size", gr.Slider, {"minimum": -1, "maximum": 16, "step": 1}),
- "font": OptionInfo("", "Font for image grids that have text"),
- "grid_text_active_color": OptionInfo("#000000", "Text color for image grids", ui_components.FormColorPicker, {}),
- "grid_text_inactive_color": OptionInfo("#999999", "Inactive text color for image grids", ui_components.FormColorPicker, {}),
- "grid_background_color": OptionInfo("#ffffff", "Background color for image grids", ui_components.FormColorPicker, {}),
-
- "enable_pnginfo": OptionInfo(True, "Save text information about generation parameters as chunks to png files"),
- "save_txt": OptionInfo(False, "Create a text file next to every image with generation parameters."),
- "save_images_before_face_restoration": OptionInfo(False, "Save a copy of image before doing face restoration."),
- "save_images_before_highres_fix": OptionInfo(False, "Save a copy of image before applying highres fix."),
- "save_images_before_color_correction": OptionInfo(False, "Save a copy of image before applying color correction to img2img results"),
- "save_mask": OptionInfo(False, "For inpainting, save a copy of the greyscale mask"),
- "save_mask_composite": OptionInfo(False, "For inpainting, save a masked composite"),
- "jpeg_quality": OptionInfo(80, "Quality for saved jpeg images", gr.Slider, {"minimum": 1, "maximum": 100, "step": 1}),
- "webp_lossless": OptionInfo(False, "Use lossless compression for webp images"),
- "export_for_4chan": OptionInfo(True, "Save copy of large images as JPG").info("if the file size is above the limit, or either width or height are above the limit"),
- "img_downscale_threshold": OptionInfo(4.0, "File size limit for the above option, MB", gr.Number),
- "target_side_length": OptionInfo(4000, "Width/height limit for the above option, in pixels", gr.Number),
- "img_max_size_mp": OptionInfo(200, "Maximum image size", gr.Number).info("in megapixels"),
-
- "use_original_name_batch": OptionInfo(True, "Use original name for output filename during batch process in extras tab"),
- "use_upscaler_name_as_suffix": OptionInfo(False, "Use upscaler name as filename suffix in the extras tab"),
- "save_selected_only": OptionInfo(True, "When using 'Save' button, only save a single selected image"),
- "save_init_img": OptionInfo(False, "Save init images when using img2img"),
-
- "temp_dir": OptionInfo("", "Directory for temporary images; leave empty for default"),
- "clean_temp_dir_at_start": OptionInfo(False, "Cleanup non-default temporary directory when starting webui"),
-
-}))
-
-options_templates.update(options_section(('saving-paths', "Paths for saving"), {
- "outdir_samples": OptionInfo("", "Output directory for images; if empty, defaults to three directories below", component_args=hide_dirs),
- "outdir_txt2img_samples": OptionInfo("outputs/txt2img-images", 'Output directory for txt2img images', component_args=hide_dirs),
- "outdir_img2img_samples": OptionInfo("outputs/img2img-images", 'Output directory for img2img images', component_args=hide_dirs),
- "outdir_extras_samples": OptionInfo("outputs/extras-images", 'Output directory for images from extras tab', component_args=hide_dirs),
- "outdir_grids": OptionInfo("", "Output directory for grids; if empty, defaults to two directories below", component_args=hide_dirs),
- "outdir_txt2img_grids": OptionInfo("outputs/txt2img-grids", 'Output directory for txt2img grids', component_args=hide_dirs),
- "outdir_img2img_grids": OptionInfo("outputs/img2img-grids", 'Output directory for img2img grids', component_args=hide_dirs),
- "outdir_save": OptionInfo("log/images", "Directory for saving images using the Save button", component_args=hide_dirs),
- "outdir_init_images": OptionInfo("outputs/init-images", "Directory for saving init images when using img2img", component_args=hide_dirs),
-}))
-
-options_templates.update(options_section(('saving-to-dirs', "Saving to a directory"), {
- "save_to_dirs": OptionInfo(True, "Save images to a subdirectory"),
- "grid_save_to_dirs": OptionInfo(True, "Save grids to a subdirectory"),
- "use_save_to_dirs_for_ui": OptionInfo(False, "When using \"Save\" button, save images to a subdirectory"),
- "directories_filename_pattern": OptionInfo("[date]", "Directory name pattern", component_args=hide_dirs).link("wiki", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Custom-Images-Filename-Name-and-Subdirectory"),
- "directories_max_prompt_words": OptionInfo(8, "Max prompt words for [prompt_words] pattern", gr.Slider, {"minimum": 1, "maximum": 20, "step": 1, **hide_dirs}),
-}))
-
-options_templates.update(options_section(('upscaling', "Upscaling"), {
- "ESRGAN_tile": OptionInfo(192, "Tile size for ESRGAN upscalers.", gr.Slider, {"minimum": 0, "maximum": 512, "step": 16}).info("0 = no tiling"),
- "ESRGAN_tile_overlap": OptionInfo(8, "Tile overlap for ESRGAN upscalers.", gr.Slider, {"minimum": 0, "maximum": 48, "step": 1}).info("Low values = visible seam"),
- "realesrgan_enabled_models": OptionInfo(["R-ESRGAN 4x+", "R-ESRGAN 4x+ Anime6B"], "Select which Real-ESRGAN models to show in the web UI.", gr.CheckboxGroup, lambda: {"choices": shared_items.realesrgan_models_names()}),
- "upscaler_for_img2img": OptionInfo(None, "Upscaler for img2img", gr.Dropdown, lambda: {"choices": [x.name for x in sd_upscalers]}),
-}))
-
-options_templates.update(options_section(('face-restoration', "Face restoration"), {
- "face_restoration_model": OptionInfo("CodeFormer", "Face restoration model", gr.Radio, lambda: {"choices": [x.name() for x in face_restorers]}),
- "code_former_weight": OptionInfo(0.5, "CodeFormer weight", gr.Slider, {"minimum": 0, "maximum": 1, "step": 0.01}).info("0 = maximum effect; 1 = minimum effect"),
- "face_restoration_unload": OptionInfo(False, "Move face restoration model from VRAM into RAM after processing"),
-}))
-
-options_templates.update(options_section(('system', "System"), {
- "show_warnings": OptionInfo(False, "Show warnings in console."),
- "memmon_poll_rate": OptionInfo(8, "VRAM usage polls per second during generation.", gr.Slider, {"minimum": 0, "maximum": 40, "step": 1}).info("0 = disable"),
- "samples_log_stdout": OptionInfo(False, "Always print all generation info to standard output"),
- "multiple_tqdm": OptionInfo(True, "Add a second progress bar to the console that shows progress for an entire job."),
- "print_hypernet_extra": OptionInfo(False, "Print extra hypernetwork information to console."),
- "list_hidden_files": OptionInfo(True, "Load models/files in hidden directories").info("directory is hidden if its name starts with \".\""),
- "disable_mmap_load_safetensors": OptionInfo(False, "Disable memmapping for loading .safetensors files.").info("fixes very slow loading speed in some cases"),
-}))
-
-options_templates.update(options_section(('training', "Training"), {
- "unload_models_when_training": OptionInfo(False, "Move VAE and CLIP to RAM when training if possible. Saves VRAM."),
- "pin_memory": OptionInfo(False, "Turn on pin_memory for DataLoader. Makes training slightly faster but can increase memory usage."),
- "save_optimizer_state": OptionInfo(False, "Saves Optimizer state as separate *.optim file. Training of embedding or HN can be resumed with the matching optim file."),
- "save_training_settings_to_txt": OptionInfo(True, "Save textual inversion and hypernet settings to a text file whenever training starts."),
- "dataset_filename_word_regex": OptionInfo("", "Filename word regex"),
- "dataset_filename_join_string": OptionInfo(" ", "Filename join string"),
- "training_image_repeats_per_epoch": OptionInfo(1, "Number of repeats for a single input image per epoch; used only for displaying epoch number", gr.Number, {"precision": 0}),
- "training_write_csv_every": OptionInfo(500, "Save an csv containing the loss to log directory every N steps, 0 to disable"),
- "training_xattention_optimizations": OptionInfo(False, "Use cross attention optimizations while training"),
- "training_enable_tensorboard": OptionInfo(False, "Enable tensorboard logging."),
- "training_tensorboard_save_images": OptionInfo(False, "Save generated images within tensorboard."),
- "training_tensorboard_flush_every": OptionInfo(120, "How often, in seconds, to flush the pending tensorboard events and summaries to disk."),
-}))
-
-options_templates.update(options_section(('sd', "Stable Diffusion"), {
- "sd_model_checkpoint": OptionInfo(None, "Stable Diffusion checkpoint", gr.Dropdown, lambda: {"choices": list_checkpoint_tiles()}, refresh=refresh_checkpoints),
- "sd_checkpoint_cache": OptionInfo(0, "Checkpoints to cache in RAM", gr.Slider, {"minimum": 0, "maximum": 10, "step": 1}),
- "sd_vae_checkpoint_cache": OptionInfo(0, "VAE Checkpoints to cache in RAM", gr.Slider, {"minimum": 0, "maximum": 10, "step": 1}),
- "sd_vae": OptionInfo("Automatic", "SD VAE", gr.Dropdown, lambda: {"choices": shared_items.sd_vae_items()}, refresh=shared_items.refresh_vae_list).info("choose VAE model: Automatic = use one with same filename as checkpoint; None = use VAE from checkpoint"),
- "sd_vae_as_default": OptionInfo(True, "Ignore selected VAE for stable diffusion checkpoints that have their own .vae.pt next to them"),
- "sd_unet": OptionInfo("Automatic", "SD Unet", gr.Dropdown, lambda: {"choices": shared_items.sd_unet_items()}, refresh=shared_items.refresh_unet_list).info("choose Unet model: Automatic = use one with same filename as checkpoint; None = use Unet from checkpoint"),
- "inpainting_mask_weight": OptionInfo(1.0, "Inpainting conditioning mask strength", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}),
- "initial_noise_multiplier": OptionInfo(1.0, "Noise multiplier for img2img", gr.Slider, {"minimum": 0.5, "maximum": 1.5, "step": 0.01}),
- "img2img_color_correction": OptionInfo(False, "Apply color correction to img2img results to match original colors."),
- "img2img_fix_steps": OptionInfo(False, "With img2img, do exactly the amount of steps the slider specifies.").info("normally you'd do less with less denoising"),
- "img2img_background_color": OptionInfo("#ffffff", "With img2img, fill image's transparent parts with this color.", ui_components.FormColorPicker, {}),
- "enable_quantization": OptionInfo(False, "Enable quantization in K samplers for sharper and cleaner results. This may change existing seeds. Requires restart to apply."),
- "enable_emphasis": OptionInfo(True, "Enable emphasis").info("use (text) to make model pay more attention to text and [text] to make it pay less attention"),
- "enable_batch_seeds": OptionInfo(True, "Make K-diffusion samplers produce same images in a batch as when making a single image"),
- "comma_padding_backtrack": OptionInfo(20, "Prompt word wrap length limit", gr.Slider, {"minimum": 0, "maximum": 74, "step": 1}).info("in tokens - for texts shorter than specified, if they don't fit into 75 token limit, move them to the next 75 token chunk"),
- "CLIP_stop_at_last_layers": OptionInfo(1, "Clip skip", gr.Slider, {"minimum": 1, "maximum": 12, "step": 1}).link("wiki", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Features#clip-skip").info("ignore last layers of CLIP network; 1 ignores none, 2 ignores one layer"),
- "upcast_attn": OptionInfo(False, "Upcast cross attention layer to float32"),
- "auto_vae_precision": OptionInfo(True, "Automaticlly revert VAE to 32-bit floats").info("triggers when a tensor with NaNs is produced in VAE; disabling the option in this case will result in a black square image"),
- "randn_source": OptionInfo("GPU", "Random number generator source.", gr.Radio, {"choices": ["GPU", "CPU"]}).info("changes seeds drastically; use CPU to produce the same picture across different videocard vendors"),
-}))
-
-options_templates.update(options_section(('sdxl', "Stable Diffusion XL"), {
- "sdxl_crop_top": OptionInfo(0, "crop top coordinate"),
- "sdxl_crop_left": OptionInfo(0, "crop left coordinate"),
- "sdxl_refiner_low_aesthetic_score": OptionInfo(2.5, "SDXL low aesthetic score", gr.Number).info("used for refiner model negative prompt"),
- "sdxl_refiner_high_aesthetic_score": OptionInfo(6.0, "SDXL high aesthetic score", gr.Number).info("used for refiner model prompt"),
-}))
-
-options_templates.update(options_section(('optimizations', "Optimizations"), {
- "cross_attention_optimization": OptionInfo("Automatic", "Cross attention optimization", gr.Dropdown, lambda: {"choices": shared_items.cross_attention_optimizations()}),
- "s_min_uncond": OptionInfo(0.0, "Negative Guidance minimum sigma", gr.Slider, {"minimum": 0.0, "maximum": 15.0, "step": 0.01}).link("PR", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/9177").info("skip negative prompt for some steps when the image is almost ready; 0=disable, higher=faster"),
- "token_merging_ratio": OptionInfo(0.0, "Token merging ratio", gr.Slider, {"minimum": 0.0, "maximum": 0.9, "step": 0.1}).link("PR", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/9256").info("0=disable, higher=faster"),
- "token_merging_ratio_img2img": OptionInfo(0.0, "Token merging ratio for img2img", gr.Slider, {"minimum": 0.0, "maximum": 0.9, "step": 0.1}).info("only applies if non-zero and overrides above"),
- "token_merging_ratio_hr": OptionInfo(0.0, "Token merging ratio for high-res pass", gr.Slider, {"minimum": 0.0, "maximum": 0.9, "step": 0.1}).info("only applies if non-zero and overrides above"),
- "pad_cond_uncond": OptionInfo(False, "Pad prompt/negative prompt to be same length").info("improves performance when prompt and negative prompt have different lengths; changes seeds"),
- "experimental_persistent_cond_cache": OptionInfo(False, "persistent cond cache").info("Experimental, keep cond caches across jobs, reduce overhead."),
-}))
-
-options_templates.update(options_section(('compatibility', "Compatibility"), {
- "use_old_emphasis_implementation": OptionInfo(False, "Use old emphasis implementation. Can be useful to reproduce old seeds."),
- "use_old_karras_scheduler_sigmas": OptionInfo(False, "Use old karras scheduler sigmas (0.1 to 10)."),
- "no_dpmpp_sde_batch_determinism": OptionInfo(False, "Do not make DPM++ SDE deterministic across different batch sizes."),
- "use_old_hires_fix_width_height": OptionInfo(False, "For hires fix, use width/height sliders to set final resolution rather than first pass (disables Upscale by, Resize width/height to)."),
- "dont_fix_second_order_samplers_schedule": OptionInfo(False, "Do not fix prompt schedule for second order samplers."),
- "hires_fix_use_firstpass_conds": OptionInfo(False, "For hires fix, calculate conds of second pass using extra networks of first pass."),
-}))
-
-options_templates.update(options_section(('interrogate', "Interrogate Options"), {
- "interrogate_keep_models_in_memory": OptionInfo(False, "Keep models in VRAM"),
- "interrogate_return_ranks": OptionInfo(False, "Include ranks of model tags matches in results.").info("booru only"),
- "interrogate_clip_num_beams": OptionInfo(1, "BLIP: num_beams", gr.Slider, {"minimum": 1, "maximum": 16, "step": 1}),
- "interrogate_clip_min_length": OptionInfo(24, "BLIP: minimum description length", gr.Slider, {"minimum": 1, "maximum": 128, "step": 1}),
- "interrogate_clip_max_length": OptionInfo(48, "BLIP: maximum description length", gr.Slider, {"minimum": 1, "maximum": 256, "step": 1}),
- "interrogate_clip_dict_limit": OptionInfo(1500, "CLIP: maximum number of lines in text file").info("0 = No limit"),
- "interrogate_clip_skip_categories": OptionInfo([], "CLIP: skip inquire categories", gr.CheckboxGroup, lambda: {"choices": modules.interrogate.category_types()}, refresh=modules.interrogate.category_types),
- "interrogate_deepbooru_score_threshold": OptionInfo(0.5, "deepbooru: score threshold", gr.Slider, {"minimum": 0, "maximum": 1, "step": 0.01}),
- "deepbooru_sort_alpha": OptionInfo(True, "deepbooru: sort tags alphabetically").info("if not: sort by score"),
- "deepbooru_use_spaces": OptionInfo(True, "deepbooru: use spaces in tags").info("if not: use underscores"),
- "deepbooru_escape": OptionInfo(True, "deepbooru: escape (\\) brackets").info("so they are used as literal brackets and not for emphasis"),
- "deepbooru_filter_tags": OptionInfo("", "deepbooru: filter out those tags").info("separate by comma"),
-}))
-
-options_templates.update(options_section(('extra_networks', "Extra Networks"), {
- "extra_networks_show_hidden_directories": OptionInfo(True, "Show hidden directories").info("directory is hidden if its name starts with \".\"."),
- "extra_networks_hidden_models": OptionInfo("When searched", "Show cards for models in hidden directories", gr.Radio, {"choices": ["Always", "When searched", "Never"]}).info('"When searched" option will only show the item when the search string has 4 characters or more'),
- "extra_networks_default_multiplier": OptionInfo(1.0, "Default multiplier for extra networks", gr.Slider, {"minimum": 0.0, "maximum": 2.0, "step": 0.01}),
- "extra_networks_card_width": OptionInfo(0, "Card width for Extra Networks").info("in pixels"),
- "extra_networks_card_height": OptionInfo(0, "Card height for Extra Networks").info("in pixels"),
- "extra_networks_card_text_scale": OptionInfo(1.0, "Card text scale", gr.Slider, {"minimum": 0.0, "maximum": 2.0, "step": 0.01}).info("1 = original size"),
- "extra_networks_card_show_desc": OptionInfo(True, "Show description on card"),
- "extra_networks_add_text_separator": OptionInfo(" ", "Extra networks separator").info("extra text to add before <...> when adding extra network to prompt"),
- "ui_extra_networks_tab_reorder": OptionInfo("", "Extra networks tab order").needs_restart(),
- "textual_inversion_print_at_load": OptionInfo(False, "Print a list of Textual Inversion embeddings when loading model"),
- "textual_inversion_add_hashes_to_infotext": OptionInfo(True, "Add Textual Inversion hashes to infotext"),
- "sd_hypernetwork": OptionInfo("None", "Add hypernetwork to prompt", gr.Dropdown, lambda: {"choices": ["None", *hypernetworks]}, refresh=reload_hypernetworks),
-}))
-
-options_templates.update(options_section(('ui', "User interface"), {
- "localization": OptionInfo("None", "Localization", gr.Dropdown, lambda: {"choices": ["None"] + list(localization.localizations.keys())}, refresh=lambda: localization.list_localizations(cmd_opts.localizations_dir)).needs_restart(),
- "gradio_theme": OptionInfo("Default", "Gradio theme", ui_components.DropdownEditable, lambda: {"choices": ["Default"] + gradio_hf_hub_themes}).needs_restart(),
- "img2img_editor_height": OptionInfo(720, "img2img: height of image editor", gr.Slider, {"minimum": 80, "maximum": 1600, "step": 1}).info("in pixels").needs_restart(),
- "return_grid": OptionInfo(True, "Show grid in results for web"),
- "return_mask": OptionInfo(False, "For inpainting, include the greyscale mask in results for web"),
- "return_mask_composite": OptionInfo(False, "For inpainting, include masked composite in results for web"),
- "do_not_show_images": OptionInfo(False, "Do not show any images in results for web"),
- "send_seed": OptionInfo(True, "Send seed when sending prompt or image to other interface"),
- "send_size": OptionInfo(True, "Send size when sending prompt or image to another interface"),
- "js_modal_lightbox": OptionInfo(True, "Enable full page image viewer"),
- "js_modal_lightbox_initially_zoomed": OptionInfo(True, "Show images zoomed in by default in full page image viewer"),
- "js_modal_lightbox_gamepad": OptionInfo(False, "Navigate image viewer with gamepad"),
- "js_modal_lightbox_gamepad_repeat": OptionInfo(250, "Gamepad repeat period, in milliseconds"),
- "show_progress_in_title": OptionInfo(True, "Show generation progress in window title."),
- "samplers_in_dropdown": OptionInfo(True, "Use dropdown for sampler selection instead of radio group").needs_restart(),
- "dimensions_and_batch_together": OptionInfo(True, "Show Width/Height and Batch sliders in same row").needs_restart(),
- "keyedit_precision_attention": OptionInfo(0.1, "Ctrl+up/down precision when editing (attention:1.1)", gr.Slider, {"minimum": 0.01, "maximum": 0.2, "step": 0.001}),
- "keyedit_precision_extra": OptionInfo(0.05, "Ctrl+up/down precision when editing <extra networks:0.9>", gr.Slider, {"minimum": 0.01, "maximum": 0.2, "step": 0.001}),
- "keyedit_delimiters": OptionInfo(".,\\/!?%^*;:{}=`~()", "Ctrl+up/down word delimiters"),
- "keyedit_move": OptionInfo(True, "Alt+left/right moves prompt elements"),
- "quicksettings_list": OptionInfo(["sd_model_checkpoint"], "Quicksettings list", ui_components.DropdownMulti, lambda: {"choices": list(opts.data_labels.keys())}).js("info", "settingsHintsShowQuicksettings").info("setting entries that appear at the top of page rather than in settings tab").needs_restart(),
- "ui_tab_order": OptionInfo([], "UI tab order", ui_components.DropdownMulti, lambda: {"choices": list(tab_names)}).needs_restart(),
- "hidden_tabs": OptionInfo([], "Hidden UI tabs", ui_components.DropdownMulti, lambda: {"choices": list(tab_names)}).needs_restart(),
- "ui_reorder_list": OptionInfo([], "txt2img/img2img UI item order", ui_components.DropdownMulti, lambda: {"choices": list(shared_items.ui_reorder_categories())}).info("selected items appear first").needs_restart(),
- "hires_fix_show_sampler": OptionInfo(False, "Hires fix: show hires sampler selection").needs_restart(),
- "hires_fix_show_prompts": OptionInfo(False, "Hires fix: show hires prompt and negative prompt").needs_restart(),
- "disable_token_counters": OptionInfo(False, "Disable prompt token counters").needs_restart(),
-}))
-
-options_templates.update(options_section(('infotext', "Infotext"), {
- "add_model_hash_to_info": OptionInfo(True, "Add model hash to generation information"),
- "add_model_name_to_info": OptionInfo(True, "Add model name to generation information"),
- "add_user_name_to_info": OptionInfo(False, "Add user name to generation information when authenticated"),
- "add_version_to_infotext": OptionInfo(True, "Add program version to generation information"),
- "disable_weights_auto_swap": OptionInfo(True, "Disregard checkpoint information from pasted infotext").info("when reading generation parameters from text into UI"),
- "infotext_styles": OptionInfo("Apply if any", "Infer styles from prompts of pasted infotext", gr.Radio, {"choices": ["Ignore", "Apply", "Discard", "Apply if any"]}).info("when reading generation parameters from text into UI)").html("""<ul style='margin-left: 1.5em'>
-<li>Ignore: keep prompt and styles dropdown as it is.</li>
-<li>Apply: remove style text from prompt, always replace styles dropdown value with found styles (even if none are found).</li>
-<li>Discard: remove style text from prompt, keep styles dropdown as it is.</li>
-<li>Apply if any: remove style text from prompt; if any styles are found in prompt, put them into styles dropdown, otherwise keep it as it is.</li>
-</ul>"""),
-
-}))
-
-options_templates.update(options_section(('ui', "Live previews"), {
- "show_progressbar": OptionInfo(True, "Show progressbar"),
- "live_previews_enable": OptionInfo(True, "Show live previews of the created image"),
- "live_previews_image_format": OptionInfo("png", "Live preview file format", gr.Radio, {"choices": ["jpeg", "png", "webp"]}),
- "show_progress_grid": OptionInfo(True, "Show previews of all images generated in a batch as a grid"),
- "show_progress_every_n_steps": OptionInfo(10, "Live preview display period", gr.Slider, {"minimum": -1, "maximum": 32, "step": 1}).info("in sampling steps - show new live preview image every N sampling steps; -1 = only show after completion of batch"),
- "show_progress_type": OptionInfo("Approx NN", "Live preview method", gr.Radio, {"choices": ["Full", "Approx NN", "Approx cheap", "TAESD"]}).info("Full = slow but pretty; Approx NN and TAESD = fast but low quality; Approx cheap = super fast but terrible otherwise"),
- "live_preview_content": OptionInfo("Prompt", "Live preview subject", gr.Radio, {"choices": ["Combined", "Prompt", "Negative prompt"]}),
- "live_preview_refresh_period": OptionInfo(1000, "Progressbar and preview update period").info("in milliseconds"),
-}))
-
-options_templates.update(options_section(('sampler-params', "Sampler parameters"), {
- "hide_samplers": OptionInfo([], "Hide samplers in user interface", gr.CheckboxGroup, lambda: {"choices": [x.name for x in list_samplers()]}).needs_restart(),
- "eta_ddim": OptionInfo(0.0, "Eta for DDIM", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}).info("noise multiplier; higher = more unperdictable results"),
- "eta_ancestral": OptionInfo(1.0, "Eta for ancestral samplers", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}).info("noise multiplier; applies to Euler a and other samplers that have a in them"),
- "ddim_discretize": OptionInfo('uniform', "img2img DDIM discretize", gr.Radio, {"choices": ['uniform', 'quad']}),
- 's_churn': OptionInfo(0.0, "sigma churn", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}),
- 's_tmin': OptionInfo(0.0, "sigma tmin", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}),
- 's_noise': OptionInfo(1.0, "sigma noise", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}),
- 'k_sched_type': OptionInfo("Automatic", "scheduler type", gr.Dropdown, {"choices": ["Automatic", "karras", "exponential", "polyexponential"]}).info("lets you override the noise schedule for k-diffusion samplers; choosing Automatic disables the three parameters below"),
- 'sigma_min': OptionInfo(0.0, "sigma min", gr.Number).info("0 = default (~0.03); minimum noise strength for k-diffusion noise scheduler"),
- 'sigma_max': OptionInfo(0.0, "sigma max", gr.Number).info("0 = default (~14.6); maximum noise strength for k-diffusion noise schedule"),
- 'rho': OptionInfo(0.0, "rho", gr.Number).info("0 = default (7 for karras, 1 for polyexponential); higher values result in a more steep noise schedule (decreases faster)"),
- 'eta_noise_seed_delta': OptionInfo(0, "Eta noise seed delta", gr.Number, {"precision": 0}).info("ENSD; does not improve anything, just produces different results for ancestral samplers - only useful for reproducing images"),
- 'always_discard_next_to_last_sigma': OptionInfo(False, "Always discard next-to-last sigma").link("PR", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/6044"),
- 'uni_pc_variant': OptionInfo("bh1", "UniPC variant", gr.Radio, {"choices": ["bh1", "bh2", "vary_coeff"]}),
- 'uni_pc_skip_type': OptionInfo("time_uniform", "UniPC skip type", gr.Radio, {"choices": ["time_uniform", "time_quadratic", "logSNR"]}),
- 'uni_pc_order': OptionInfo(3, "UniPC order", gr.Slider, {"minimum": 1, "maximum": 50, "step": 1}).info("must be < sampling steps"),
- 'uni_pc_lower_order_final': OptionInfo(True, "UniPC lower order final"),
-}))
-
-options_templates.update(options_section(('postprocessing', "Postprocessing"), {
- 'postprocessing_enable_in_main_ui': OptionInfo([], "Enable postprocessing operations in txt2img and img2img tabs", ui_components.DropdownMulti, lambda: {"choices": [x.name for x in shared_items.postprocessing_scripts()]}),
- 'postprocessing_operation_order': OptionInfo([], "Postprocessing operation order", ui_components.DropdownMulti, lambda: {"choices": [x.name for x in shared_items.postprocessing_scripts()]}),
- 'upscaling_max_images_in_cache': OptionInfo(5, "Maximum number of images in upscaling cache", gr.Slider, {"minimum": 0, "maximum": 10, "step": 1}),
-}))
-
-options_templates.update(options_section((None, "Hidden options"), {
- "disabled_extensions": OptionInfo([], "Disable these extensions"),
- "disable_all_extensions": OptionInfo("none", "Disable all extensions (preserves the list of disabled extensions)", gr.Radio, {"choices": ["none", "extra", "all"]}),
- "restore_config_state_file": OptionInfo("", "Config state file to restore from, under 'config-states/' folder"),
- "sd_checkpoint_hash": OptionInfo("", "SHA256 hash of the current checkpoint"),
-}))
-
-
-options_templates.update()
-
-
-class Options:
- data = None
- data_labels = options_templates
- typemap = {int: float}
-
- def __init__(self):
- self.data = {k: v.default for k, v in self.data_labels.items()}
-
- def __setattr__(self, key, value):
- if self.data is not None:
- if key in self.data or key in self.data_labels:
- assert not cmd_opts.freeze_settings, "changing settings is disabled"
-
- info = opts.data_labels.get(key, None)
- comp_args = info.component_args if info else None
- if isinstance(comp_args, dict) and comp_args.get('visible', True) is False:
- raise RuntimeError(f"not possible to set {key} because it is restricted")
-
- if cmd_opts.hide_ui_dir_config and key in restricted_opts:
- raise RuntimeError(f"not possible to set {key} because it is restricted")
-
- self.data[key] = value
- return
-
- return super(Options, self).__setattr__(key, value)
-
- def __getattr__(self, item):
- if self.data is not None:
- if item in self.data:
- return self.data[item]
-
- if item in self.data_labels:
- return self.data_labels[item].default
-
- return super(Options, self).__getattribute__(item)
-
- def set(self, key, value):
- """sets an option and calls its onchange callback, returning True if the option changed and False otherwise"""
-
- oldval = self.data.get(key, None)
- if oldval == value:
- return False
-
- try:
- setattr(self, key, value)
- except RuntimeError:
- return False
-
- if self.data_labels[key].onchange is not None:
- try:
- self.data_labels[key].onchange()
- except Exception as e:
- errors.display(e, f"changing setting {key} to {value}")
- setattr(self, key, oldval)
- return False
-
- return True
-
- def get_default(self, key):
- """returns the default value for the key"""
-
- data_label = self.data_labels.get(key)
- if data_label is None:
- return None
-
- return data_label.default
-
- def save(self, filename):
- assert not cmd_opts.freeze_settings, "saving settings is disabled"
-
- with open(filename, "w", encoding="utf8") as file:
- json.dump(self.data, file, indent=4)
-
- def same_type(self, x, y):
- if x is None or y is None:
- return True
-
- type_x = self.typemap.get(type(x), type(x))
- type_y = self.typemap.get(type(y), type(y))
-
- return type_x == type_y
-
- def load(self, filename):
- with open(filename, "r", encoding="utf8") as file:
- self.data = json.load(file)
-
- # 1.1.1 quicksettings list migration
- if self.data.get('quicksettings') is not None and self.data.get('quicksettings_list') is None:
- self.data['quicksettings_list'] = [i.strip() for i in self.data.get('quicksettings').split(',')]
-
- # 1.4.0 ui_reorder
- if isinstance(self.data.get('ui_reorder'), str) and self.data.get('ui_reorder') and "ui_reorder_list" not in self.data:
- self.data['ui_reorder_list'] = [i.strip() for i in self.data.get('ui_reorder').split(',')]
-
- bad_settings = 0
- for k, v in self.data.items():
- info = self.data_labels.get(k, None)
- if info is not None and not self.same_type(info.default, v):
- print(f"Warning: bad setting value: {k}: {v} ({type(v).__name__}; expected {type(info.default).__name__})", file=sys.stderr)
- bad_settings += 1
-
- if bad_settings > 0:
- print(f"The program is likely to not work with bad settings.\nSettings file: {filename}\nEither fix the file, or delete it and restart.", file=sys.stderr)
-
- def onchange(self, key, func, call=True):
- item = self.data_labels.get(key)
- item.onchange = func
-
- if call:
- func()
-
- def dumpjson(self):
- d = {k: self.data.get(k, v.default) for k, v in self.data_labels.items()}
- d["_comments_before"] = {k: v.comment_before for k, v in self.data_labels.items() if v.comment_before is not None}
- d["_comments_after"] = {k: v.comment_after for k, v in self.data_labels.items() if v.comment_after is not None}
- return json.dumps(d)
-
- def add_option(self, key, info):
- self.data_labels[key] = info
-
- def reorder(self):
- """reorder settings so that all items related to section always go together"""
-
- section_ids = {}
- settings_items = self.data_labels.items()
- for _, item in settings_items:
- if item.section not in section_ids:
- section_ids[item.section] = len(section_ids)
-
- self.data_labels = dict(sorted(settings_items, key=lambda x: section_ids[x[1].section]))
-
- def cast_value(self, key, value):
- """casts an arbitrary to the same type as this setting's value with key
- Example: cast_value("eta_noise_seed_delta", "12") -> returns 12 (an int rather than str)
- """
-
- if value is None:
- return None
-
- default_value = self.data_labels[key].default
- if default_value is None:
- default_value = getattr(self, key, None)
- if default_value is None:
- return None
-
- expected_type = type(default_value)
- if expected_type == bool and value == "False":
- value = False
- else:
- value = expected_type(value)
-
- return value
-
-
-opts = Options()
-if os.path.exists(config_filename):
- opts.load(config_filename)
-
-
-class Shared(sys.modules[__name__].__class__):
- """
- this class is here to provide sd_model field as a property, so that it can be created and loaded on demand rather than
- at program startup.
- """
-
- sd_model_val = None
-
- @property
- def sd_model(self):
- import modules.sd_models
-
- return modules.sd_models.model_data.get_sd_model()
-
- @sd_model.setter
- def sd_model(self, value):
- import modules.sd_models
-
- modules.sd_models.model_data.set_sd_model(value)
-
-
-sd_model: LatentDiffusion = None # this var is here just for IDE's type checking; it cannot be accessed because the class field above will be accessed instead
-sys.modules[__name__].__class__ = Shared
+sd_model: sd_models_types.WebuiSdModel = None
settings_components = None
"""assinged from ui.py, a mapping on setting names to gradio components repsponsible for those settings"""
+tab_names = []
+
latent_upscale_default_mode = "Latent"
latent_upscale_modes = {
"Latent": {"mode": "bilinear", "antialias": False},
@@ -784,108 +64,24 @@ progress_print_out = sys.stdout
gradio_theme = gr.themes.Base()
+total_tqdm = None
-def reload_gradio_theme(theme_name=None):
- global gradio_theme
- if not theme_name:
- theme_name = opts.gradio_theme
-
- default_theme_args = dict(
- font=["Source Sans Pro", 'ui-sans-serif', 'system-ui', 'sans-serif'],
- font_mono=['IBM Plex Mono', 'ui-monospace', 'Consolas', 'monospace'],
- )
-
- if theme_name == "Default":
- gradio_theme = gr.themes.Default(**default_theme_args)
- else:
- try:
- gradio_theme = gr.themes.ThemeClass.from_hub(theme_name)
- except Exception as e:
- errors.display(e, "changing gradio theme")
- gradio_theme = gr.themes.Default(**default_theme_args)
-
-
-
-class TotalTQDM:
- def __init__(self):
- self._tqdm = None
-
- def reset(self):
- self._tqdm = tqdm.tqdm(
- desc="Total progress",
- total=state.job_count * state.sampling_steps,
- position=1,
- file=progress_print_out
- )
-
- def update(self):
- if not opts.multiple_tqdm or cmd_opts.disable_console_progressbars:
- return
- if self._tqdm is None:
- self.reset()
- self._tqdm.update()
-
- def updateTotal(self, new_total):
- if not opts.multiple_tqdm or cmd_opts.disable_console_progressbars:
- return
- if self._tqdm is None:
- self.reset()
- self._tqdm.total = new_total
-
- def clear(self):
- if self._tqdm is not None:
- self._tqdm.refresh()
- self._tqdm.close()
- self._tqdm = None
-
-
-total_tqdm = TotalTQDM()
-
-mem_mon = modules.memmon.MemUsageMonitor("MemMon", device, opts)
-mem_mon.start()
-
-
-def natural_sort_key(s, regex=re.compile('([0-9]+)')):
- return [int(text) if text.isdigit() else text.lower() for text in regex.split(s)]
-
-
-def listfiles(dirname):
- filenames = [os.path.join(dirname, x) for x in sorted(os.listdir(dirname), key=natural_sort_key) if not x.startswith(".")]
- return [file for file in filenames if os.path.isfile(file)]
-
-
-def html_path(filename):
- return os.path.join(script_path, "html", filename)
-
-
-def html(filename):
- path = html_path(filename)
-
- if os.path.exists(path):
- with open(path, encoding="utf8") as file:
- return file.read()
-
- return ""
-
-
-def walk_files(path, allowed_extensions=None):
- if not os.path.exists(path):
- return
-
- if allowed_extensions is not None:
- allowed_extensions = set(allowed_extensions)
+mem_mon = None
- items = list(os.walk(path, followlinks=True))
- items = sorted(items, key=lambda x: natural_sort_key(x[0]))
+options_section = options.options_section
+OptionInfo = options.OptionInfo
+OptionHTML = options.OptionHTML
- for root, _, files in items:
- for filename in sorted(files, key=natural_sort_key):
- if allowed_extensions is not None:
- _, ext = os.path.splitext(filename)
- if ext not in allowed_extensions:
- continue
+natural_sort_key = util.natural_sort_key
+listfiles = util.listfiles
+html_path = util.html_path
+html = util.html
+walk_files = util.walk_files
+ldm_print = util.ldm_print
- if not opts.list_hidden_files and ("/." in root or "\\." in root):
- continue
+reload_gradio_theme = shared_gradio_themes.reload_gradio_theme
- yield os.path.join(root, filename)
+list_checkpoint_tiles = shared_items.list_checkpoint_tiles
+refresh_checkpoints = shared_items.refresh_checkpoints
+list_samplers = shared_items.list_samplers
+reload_hypernetworks = shared_items.reload_hypernetworks
diff --git a/modules/shared_cmd_options.py b/modules/shared_cmd_options.py
new file mode 100644
index 00000000..c9626667
--- /dev/null
+++ b/modules/shared_cmd_options.py
@@ -0,0 +1,18 @@
+import os
+
+import launch
+from modules import cmd_args, script_loading
+from modules.paths_internal import models_path, script_path, data_path, sd_configs_path, sd_default_config, sd_model_file, default_sd_model_file, extensions_dir, extensions_builtin_dir # noqa: F401
+
+parser = cmd_args.parser
+
+script_loading.preload_extensions(extensions_dir, parser, extension_list=launch.list_extensions(launch.args.ui_settings_file))
+script_loading.preload_extensions(extensions_builtin_dir, parser)
+
+if os.environ.get('IGNORE_CMD_ARGS_ERRORS', None) is None:
+ cmd_opts = parser.parse_args()
+else:
+ cmd_opts, _ = parser.parse_known_args()
+
+cmd_opts.webui_is_non_local = any([cmd_opts.share, cmd_opts.listen, cmd_opts.ngrok, cmd_opts.server_name])
+cmd_opts.disable_extension_access = cmd_opts.webui_is_non_local and not cmd_opts.enable_insecure_extension_access
diff --git a/modules/shared_gradio_themes.py b/modules/shared_gradio_themes.py
new file mode 100644
index 00000000..822db0a9
--- /dev/null
+++ b/modules/shared_gradio_themes.py
@@ -0,0 +1,67 @@
+import os
+
+import gradio as gr
+
+from modules import errors, shared
+from modules.paths_internal import script_path
+
+
+# https://huggingface.co/datasets/freddyaboulton/gradio-theme-subdomains/resolve/main/subdomains.json
+gradio_hf_hub_themes = [
+ "gradio/base",
+ "gradio/glass",
+ "gradio/monochrome",
+ "gradio/seafoam",
+ "gradio/soft",
+ "gradio/dracula_test",
+ "abidlabs/dracula_test",
+ "abidlabs/Lime",
+ "abidlabs/pakistan",
+ "Ama434/neutral-barlow",
+ "dawood/microsoft_windows",
+ "finlaymacklon/smooth_slate",
+ "Franklisi/darkmode",
+ "freddyaboulton/dracula_revamped",
+ "freddyaboulton/test-blue",
+ "gstaff/xkcd",
+ "Insuz/Mocha",
+ "Insuz/SimpleIndigo",
+ "JohnSmith9982/small_and_pretty",
+ "nota-ai/theme",
+ "nuttea/Softblue",
+ "ParityError/Anime",
+ "reilnuud/polite",
+ "remilia/Ghostly",
+ "rottenlittlecreature/Moon_Goblin",
+ "step-3-profit/Midnight-Deep",
+ "Taithrah/Minimal",
+ "ysharma/huggingface",
+ "ysharma/steampunk",
+ "NoCrypt/miku"
+]
+
+
+def reload_gradio_theme(theme_name=None):
+ if not theme_name:
+ theme_name = shared.opts.gradio_theme
+
+ default_theme_args = dict(
+ font=["Source Sans Pro", 'ui-sans-serif', 'system-ui', 'sans-serif'],
+ font_mono=['IBM Plex Mono', 'ui-monospace', 'Consolas', 'monospace'],
+ )
+
+ if theme_name == "Default":
+ shared.gradio_theme = gr.themes.Default(**default_theme_args)
+ else:
+ try:
+ theme_cache_dir = os.path.join(script_path, 'tmp', 'gradio_themes')
+ theme_cache_path = os.path.join(theme_cache_dir, f'{theme_name.replace("/", "_")}.json')
+ if shared.opts.gradio_themes_cache and os.path.exists(theme_cache_path):
+ shared.gradio_theme = gr.themes.ThemeClass.load(theme_cache_path)
+ else:
+ os.makedirs(theme_cache_dir, exist_ok=True)
+ shared.gradio_theme = gr.themes.ThemeClass.from_hub(theme_name)
+ shared.gradio_theme.dump(theme_cache_path)
+ except Exception as e:
+ errors.display(e, "changing gradio theme")
+ shared.gradio_theme = gr.themes.Default(**default_theme_args)
diff --git a/modules/shared_init.py b/modules/shared_init.py
new file mode 100644
index 00000000..d3fb687e
--- /dev/null
+++ b/modules/shared_init.py
@@ -0,0 +1,49 @@
+import os
+
+import torch
+
+from modules import shared
+from modules.shared import cmd_opts
+
+
+def initialize():
+ """Initializes fields inside the shared module in a controlled manner.
+
+ Should be called early because some other modules you can import mingt need these fields to be already set.
+ """
+
+ os.makedirs(cmd_opts.hypernetwork_dir, exist_ok=True)
+
+ from modules import options, shared_options
+ shared.options_templates = shared_options.options_templates
+ shared.opts = options.Options(shared_options.options_templates, shared_options.restricted_opts)
+ shared.restricted_opts = shared_options.restricted_opts
+ if os.path.exists(shared.config_filename):
+ shared.opts.load(shared.config_filename)
+
+ from modules import devices
+ devices.device, devices.device_interrogate, devices.device_gfpgan, devices.device_esrgan, devices.device_codeformer = \
+ (devices.cpu if any(y in cmd_opts.use_cpu for y in [x, 'all']) else devices.get_optimal_device() for x in ['sd', 'interrogate', 'gfpgan', 'esrgan', 'codeformer'])
+
+ devices.dtype = torch.float32 if cmd_opts.no_half else torch.float16
+ devices.dtype_vae = torch.float32 if cmd_opts.no_half or cmd_opts.no_half_vae else torch.float16
+
+ shared.device = devices.device
+ shared.weight_load_location = None if cmd_opts.lowram else "cpu"
+
+ from modules import shared_state
+ shared.state = shared_state.State()
+
+ from modules import styles
+ shared.prompt_styles = styles.StyleDatabase(shared.styles_filename)
+
+ from modules import interrogate
+ shared.interrogator = interrogate.InterrogateModels("interrogate")
+
+ from modules import shared_total_tqdm
+ shared.total_tqdm = shared_total_tqdm.TotalTQDM()
+
+ from modules import memmon, devices
+ shared.mem_mon = memmon.MemUsageMonitor("MemMon", devices.device, shared.opts)
+ shared.mem_mon.start()
+
diff --git a/modules/shared_items.py b/modules/shared_items.py
index 89792e88..e1392472 100644
--- a/modules/shared_items.py
+++ b/modules/shared_items.py
@@ -1,3 +1,6 @@
+import sys
+
+from modules.shared_cmd_options import cmd_opts
def realesrgan_models_names():
@@ -41,13 +44,54 @@ def refresh_unet_list():
modules.sd_unet.list_unets()
+def list_checkpoint_tiles(use_short=False):
+ import modules.sd_models
+ return modules.sd_models.checkpoint_tiles(use_short)
+
+
+def refresh_checkpoints():
+ import modules.sd_models
+ return modules.sd_models.list_models()
+
+
+def list_samplers():
+ import modules.sd_samplers
+ return modules.sd_samplers.all_samplers
+
+
+def reload_hypernetworks():
+ from modules.hypernetworks import hypernetwork
+ from modules import shared
+
+ shared.hypernetworks = hypernetwork.list_hypernetworks(cmd_opts.hypernetwork_dir)
+
+
+def get_infotext_names():
+ from modules import infotext, shared
+ res = {}
+
+ for info in shared.opts.data_labels.values():
+ if info.infotext:
+ res[info.infotext] = 1
+
+ for tab_data in infotext.paste_fields.values():
+ for _, name in tab_data.get("fields") or []:
+ if isinstance(name, str):
+ res[name] = 1
+
+ return list(res)
+
+
ui_reorder_categories_builtin_items = [
+ "prompt",
+ "image",
"inpaint",
"sampler",
+ "accordions",
"checkboxes",
- "hires_fix",
"dimensions",
"cfg",
+ "denoising",
"seed",
"batch",
"override_settings",
@@ -61,9 +105,33 @@ def ui_reorder_categories():
sections = {}
for script in scripts.scripts_txt2img.scripts + scripts.scripts_img2img.scripts:
- if isinstance(script.section, str):
+ if isinstance(script.section, str) and script.section not in ui_reorder_categories_builtin_items:
sections[script.section] = 1
yield from sections
yield "scripts"
+
+
+class Shared(sys.modules[__name__].__class__):
+ """
+ this class is here to provide sd_model field as a property, so that it can be created and loaded on demand rather than
+ at program startup.
+ """
+
+ sd_model_val = None
+
+ @property
+ def sd_model(self):
+ import modules.sd_models
+
+ return modules.sd_models.model_data.get_sd_model()
+
+ @sd_model.setter
+ def sd_model(self, value):
+ import modules.sd_models
+
+ modules.sd_models.model_data.set_sd_model(value)
+
+
+sys.modules['modules.shared'].__class__ = Shared
diff --git a/modules/shared_options.py b/modules/shared_options.py
new file mode 100644
index 00000000..ce06f022
--- /dev/null
+++ b/modules/shared_options.py
@@ -0,0 +1,377 @@
+import os
+import gradio as gr
+
+from modules import localization, ui_components, shared_items, shared, interrogate, shared_gradio_themes, util
+from modules.paths_internal import models_path, script_path, data_path, sd_configs_path, sd_default_config, sd_model_file, default_sd_model_file, extensions_dir, extensions_builtin_dir, default_output_dir # noqa: F401
+from modules.shared_cmd_options import cmd_opts
+from modules.options import options_section, OptionInfo, OptionHTML, categories
+
+options_templates = {}
+hide_dirs = shared.hide_dirs
+
+restricted_opts = {
+ "samples_filename_pattern",
+ "directories_filename_pattern",
+ "outdir_samples",
+ "outdir_txt2img_samples",
+ "outdir_img2img_samples",
+ "outdir_extras_samples",
+ "outdir_grids",
+ "outdir_txt2img_grids",
+ "outdir_save",
+ "outdir_init_images"
+}
+
+categories.register_category("saving", "Saving images")
+categories.register_category("sd", "Stable Diffusion")
+categories.register_category("ui", "User Interface")
+categories.register_category("system", "System")
+categories.register_category("postprocessing", "Postprocessing")
+categories.register_category("training", "Training")
+
+options_templates.update(options_section(('saving-images', "Saving images/grids", "saving"), {
+ "samples_save": OptionInfo(True, "Always save all generated images"),
+ "samples_format": OptionInfo('png', 'File format for images'),
+ "samples_filename_pattern": OptionInfo("", "Images filename pattern", component_args=hide_dirs).link("wiki", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Custom-Images-Filename-Name-and-Subdirectory"),
+ "save_images_add_number": OptionInfo(True, "Add number to filename when saving", component_args=hide_dirs),
+ "save_images_replace_action": OptionInfo("Replace", "Saving the image to an existing file", gr.Radio, {"choices": ["Replace", "Add number suffix"], **hide_dirs}),
+ "grid_save": OptionInfo(True, "Always save all generated image grids"),
+ "grid_format": OptionInfo('png', 'File format for grids'),
+ "grid_extended_filename": OptionInfo(False, "Add extended info (seed, prompt) to filename when saving grid"),
+ "grid_only_if_multiple": OptionInfo(True, "Do not save grids consisting of one picture"),
+ "grid_prevent_empty_spots": OptionInfo(False, "Prevent empty spots in grid (when set to autodetect)"),
+ "grid_zip_filename_pattern": OptionInfo("", "Archive filename pattern", component_args=hide_dirs).link("wiki", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Custom-Images-Filename-Name-and-Subdirectory"),
+ "n_rows": OptionInfo(-1, "Grid row count; use -1 for autodetect and 0 for it to be same as batch size", gr.Slider, {"minimum": -1, "maximum": 16, "step": 1}),
+ "font": OptionInfo("", "Font for image grids that have text"),
+ "grid_text_active_color": OptionInfo("#000000", "Text color for image grids", ui_components.FormColorPicker, {}),
+ "grid_text_inactive_color": OptionInfo("#999999", "Inactive text color for image grids", ui_components.FormColorPicker, {}),
+ "grid_background_color": OptionInfo("#ffffff", "Background color for image grids", ui_components.FormColorPicker, {}),
+
+ "save_images_before_face_restoration": OptionInfo(False, "Save a copy of image before doing face restoration."),
+ "save_images_before_highres_fix": OptionInfo(False, "Save a copy of image before applying highres fix."),
+ "save_images_before_color_correction": OptionInfo(False, "Save a copy of image before applying color correction to img2img results"),
+ "save_mask": OptionInfo(False, "For inpainting, save a copy of the greyscale mask"),
+ "save_mask_composite": OptionInfo(False, "For inpainting, save a masked composite"),
+ "jpeg_quality": OptionInfo(80, "Quality for saved jpeg images", gr.Slider, {"minimum": 1, "maximum": 100, "step": 1}),
+ "webp_lossless": OptionInfo(False, "Use lossless compression for webp images"),
+ "export_for_4chan": OptionInfo(True, "Save copy of large images as JPG").info("if the file size is above the limit, or either width or height are above the limit"),
+ "img_downscale_threshold": OptionInfo(4.0, "File size limit for the above option, MB", gr.Number),
+ "target_side_length": OptionInfo(4000, "Width/height limit for the above option, in pixels", gr.Number),
+ "img_max_size_mp": OptionInfo(200, "Maximum image size", gr.Number).info("in megapixels"),
+
+ "use_original_name_batch": OptionInfo(True, "Use original name for output filename during batch process in extras tab"),
+ "use_upscaler_name_as_suffix": OptionInfo(False, "Use upscaler name as filename suffix in the extras tab"),
+ "save_selected_only": OptionInfo(True, "When using 'Save' button, only save a single selected image"),
+ "save_init_img": OptionInfo(False, "Save init images when using img2img"),
+
+ "temp_dir": OptionInfo("", "Directory for temporary images; leave empty for default"),
+ "clean_temp_dir_at_start": OptionInfo(False, "Cleanup non-default temporary directory when starting webui"),
+
+ "save_incomplete_images": OptionInfo(False, "Save incomplete images").info("save images that has been interrupted in mid-generation; even if not saved, they will still show up in webui output."),
+
+ "notification_audio": OptionInfo(True, "Play notification sound after image generation").info("notification.mp3 should be present in the root directory").needs_reload_ui(),
+ "notification_volume": OptionInfo(100, "Notification sound volume", gr.Slider, {"minimum": 0, "maximum": 100, "step": 1}).info("in %"),
+}))
+
+options_templates.update(options_section(('saving-paths', "Paths for saving", "saving"), {
+ "outdir_samples": OptionInfo("", "Output directory for images; if empty, defaults to three directories below", component_args=hide_dirs),
+ "outdir_txt2img_samples": OptionInfo(util.truncate_path(os.path.join(default_output_dir, 'txt2img-images')), 'Output directory for txt2img images', component_args=hide_dirs),
+ "outdir_img2img_samples": OptionInfo(util.truncate_path(os.path.join(default_output_dir, 'img2img-images')), 'Output directory for img2img images', component_args=hide_dirs),
+ "outdir_extras_samples": OptionInfo(util.truncate_path(os.path.join(default_output_dir, 'extras-images')), 'Output directory for images from extras tab', component_args=hide_dirs),
+ "outdir_grids": OptionInfo("", "Output directory for grids; if empty, defaults to two directories below", component_args=hide_dirs),
+ "outdir_txt2img_grids": OptionInfo(util.truncate_path(os.path.join(default_output_dir, 'txt2img-grids')), 'Output directory for txt2img grids', component_args=hide_dirs),
+ "outdir_img2img_grids": OptionInfo(util.truncate_path(os.path.join(default_output_dir, 'img2img-grids')), 'Output directory for img2img grids', component_args=hide_dirs),
+ "outdir_save": OptionInfo(util.truncate_path(os.path.join(data_path, 'log', 'images')), "Directory for saving images using the Save button", component_args=hide_dirs),
+ "outdir_init_images": OptionInfo(util.truncate_path(os.path.join(default_output_dir, 'init-images')), "Directory for saving init images when using img2img", component_args=hide_dirs),
+}))
+
+options_templates.update(options_section(('saving-to-dirs', "Saving to a directory", "saving"), {
+ "save_to_dirs": OptionInfo(True, "Save images to a subdirectory"),
+ "grid_save_to_dirs": OptionInfo(True, "Save grids to a subdirectory"),
+ "use_save_to_dirs_for_ui": OptionInfo(False, "When using \"Save\" button, save images to a subdirectory"),
+ "directories_filename_pattern": OptionInfo("[date]", "Directory name pattern", component_args=hide_dirs).link("wiki", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Custom-Images-Filename-Name-and-Subdirectory"),
+ "directories_max_prompt_words": OptionInfo(8, "Max prompt words for [prompt_words] pattern", gr.Slider, {"minimum": 1, "maximum": 20, "step": 1, **hide_dirs}),
+}))
+
+options_templates.update(options_section(('upscaling', "Upscaling", "postprocessing"), {
+ "ESRGAN_tile": OptionInfo(192, "Tile size for ESRGAN upscalers.", gr.Slider, {"minimum": 0, "maximum": 512, "step": 16}).info("0 = no tiling"),
+ "ESRGAN_tile_overlap": OptionInfo(8, "Tile overlap for ESRGAN upscalers.", gr.Slider, {"minimum": 0, "maximum": 48, "step": 1}).info("Low values = visible seam"),
+ "realesrgan_enabled_models": OptionInfo(["R-ESRGAN 4x+", "R-ESRGAN 4x+ Anime6B"], "Select which Real-ESRGAN models to show in the web UI.", gr.CheckboxGroup, lambda: {"choices": shared_items.realesrgan_models_names()}),
+ "upscaler_for_img2img": OptionInfo(None, "Upscaler for img2img", gr.Dropdown, lambda: {"choices": [x.name for x in shared.sd_upscalers]}),
+}))
+
+options_templates.update(options_section(('face-restoration', "Face restoration", "postprocessing"), {
+ "face_restoration": OptionInfo(False, "Restore faces", infotext='Face restoration').info("will use a third-party model on generation result to reconstruct faces"),
+ "face_restoration_model": OptionInfo("CodeFormer", "Face restoration model", gr.Radio, lambda: {"choices": [x.name() for x in shared.face_restorers]}),
+ "code_former_weight": OptionInfo(0.5, "CodeFormer weight", gr.Slider, {"minimum": 0, "maximum": 1, "step": 0.01}).info("0 = maximum effect; 1 = minimum effect"),
+ "face_restoration_unload": OptionInfo(False, "Move face restoration model from VRAM into RAM after processing"),
+}))
+
+options_templates.update(options_section(('system', "System", "system"), {
+ "auto_launch_browser": OptionInfo("Local", "Automatically open webui in browser on startup", gr.Radio, lambda: {"choices": ["Disable", "Local", "Remote"]}),
+ "enable_console_prompts": OptionInfo(shared.cmd_opts.enable_console_prompts, "Print prompts to console when generating with txt2img and img2img."),
+ "show_warnings": OptionInfo(False, "Show warnings in console.").needs_reload_ui(),
+ "show_gradio_deprecation_warnings": OptionInfo(True, "Show gradio deprecation warnings in console.").needs_reload_ui(),
+ "memmon_poll_rate": OptionInfo(8, "VRAM usage polls per second during generation.", gr.Slider, {"minimum": 0, "maximum": 40, "step": 1}).info("0 = disable"),
+ "samples_log_stdout": OptionInfo(False, "Always print all generation info to standard output"),
+ "multiple_tqdm": OptionInfo(True, "Add a second progress bar to the console that shows progress for an entire job."),
+ "print_hypernet_extra": OptionInfo(False, "Print extra hypernetwork information to console."),
+ "list_hidden_files": OptionInfo(True, "Load models/files in hidden directories").info("directory is hidden if its name starts with \".\""),
+ "disable_mmap_load_safetensors": OptionInfo(False, "Disable memmapping for loading .safetensors files.").info("fixes very slow loading speed in some cases"),
+ "hide_ldm_prints": OptionInfo(True, "Prevent Stability-AI's ldm/sgm modules from printing noise to console."),
+ "dump_stacks_on_signal": OptionInfo(False, "Print stack traces before exiting the program with ctrl+c."),
+}))
+
+options_templates.update(options_section(('API', "API", "system"), {
+ "api_enable_requests": OptionInfo(True, "Allow http:// and https:// URLs for input images in API", restrict_api=True),
+ "api_forbid_local_requests": OptionInfo(True, "Forbid URLs to local resources", restrict_api=True),
+ "api_useragent": OptionInfo("", "User agent for requests", restrict_api=True),
+}))
+
+options_templates.update(options_section(('training', "Training", "training"), {
+ "unload_models_when_training": OptionInfo(False, "Move VAE and CLIP to RAM when training if possible. Saves VRAM."),
+ "pin_memory": OptionInfo(False, "Turn on pin_memory for DataLoader. Makes training slightly faster but can increase memory usage."),
+ "save_optimizer_state": OptionInfo(False, "Saves Optimizer state as separate *.optim file. Training of embedding or HN can be resumed with the matching optim file."),
+ "save_training_settings_to_txt": OptionInfo(True, "Save textual inversion and hypernet settings to a text file whenever training starts."),
+ "dataset_filename_word_regex": OptionInfo("", "Filename word regex"),
+ "dataset_filename_join_string": OptionInfo(" ", "Filename join string"),
+ "training_image_repeats_per_epoch": OptionInfo(1, "Number of repeats for a single input image per epoch; used only for displaying epoch number", gr.Number, {"precision": 0}),
+ "training_write_csv_every": OptionInfo(500, "Save an csv containing the loss to log directory every N steps, 0 to disable"),
+ "training_xattention_optimizations": OptionInfo(False, "Use cross attention optimizations while training"),
+ "training_enable_tensorboard": OptionInfo(False, "Enable tensorboard logging."),
+ "training_tensorboard_save_images": OptionInfo(False, "Save generated images within tensorboard."),
+ "training_tensorboard_flush_every": OptionInfo(120, "How often, in seconds, to flush the pending tensorboard events and summaries to disk."),
+}))
+
+options_templates.update(options_section(('sd', "Stable Diffusion", "sd"), {
+ "sd_model_checkpoint": OptionInfo(None, "Stable Diffusion checkpoint", gr.Dropdown, lambda: {"choices": shared_items.list_checkpoint_tiles(shared.opts.sd_checkpoint_dropdown_use_short)}, refresh=shared_items.refresh_checkpoints, infotext='Model hash'),
+ "sd_checkpoints_limit": OptionInfo(1, "Maximum number of checkpoints loaded at the same time", gr.Slider, {"minimum": 1, "maximum": 10, "step": 1}),
+ "sd_checkpoints_keep_in_cpu": OptionInfo(True, "Only keep one model on device").info("will keep models other than the currently used one in RAM rather than VRAM"),
+ "sd_checkpoint_cache": OptionInfo(0, "Checkpoints to cache in RAM", gr.Slider, {"minimum": 0, "maximum": 10, "step": 1}).info("obsolete; set to 0 and use the two settings above instead"),
+ "sd_unet": OptionInfo("Automatic", "SD Unet", gr.Dropdown, lambda: {"choices": shared_items.sd_unet_items()}, refresh=shared_items.refresh_unet_list).info("choose Unet model: Automatic = use one with same filename as checkpoint; None = use Unet from checkpoint"),
+ "enable_quantization": OptionInfo(False, "Enable quantization in K samplers for sharper and cleaner results. This may change existing seeds").needs_reload_ui(),
+ "enable_emphasis": OptionInfo(True, "Enable emphasis").info("use (text) to make model pay more attention to text and [text] to make it pay less attention"),
+ "enable_batch_seeds": OptionInfo(True, "Make K-diffusion samplers produce same images in a batch as when making a single image"),
+ "comma_padding_backtrack": OptionInfo(20, "Prompt word wrap length limit", gr.Slider, {"minimum": 0, "maximum": 74, "step": 1}).info("in tokens - for texts shorter than specified, if they don't fit into 75 token limit, move them to the next 75 token chunk"),
+ "CLIP_stop_at_last_layers": OptionInfo(1, "Clip skip", gr.Slider, {"minimum": 1, "maximum": 12, "step": 1}, infotext="Clip skip").link("wiki", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Features#clip-skip").info("ignore last layers of CLIP network; 1 ignores none, 2 ignores one layer"),
+ "upcast_attn": OptionInfo(False, "Upcast cross attention layer to float32"),
+ "randn_source": OptionInfo("GPU", "Random number generator source.", gr.Radio, {"choices": ["GPU", "CPU", "NV"]}, infotext="RNG").info("changes seeds drastically; use CPU to produce the same picture across different videocard vendors; use NV to produce same picture as on NVidia videocards"),
+ "tiling": OptionInfo(False, "Tiling", infotext='Tiling').info("produce a tileable picture"),
+ "hires_fix_refiner_pass": OptionInfo("second pass", "Hires fix: which pass to enable refiner for", gr.Radio, {"choices": ["first pass", "second pass", "both passes"]}, infotext="Hires refiner"),
+}))
+
+options_templates.update(options_section(('sdxl', "Stable Diffusion XL", "sd"), {
+ "sdxl_crop_top": OptionInfo(0, "crop top coordinate"),
+ "sdxl_crop_left": OptionInfo(0, "crop left coordinate"),
+ "sdxl_refiner_low_aesthetic_score": OptionInfo(2.5, "SDXL low aesthetic score", gr.Number).info("used for refiner model negative prompt"),
+ "sdxl_refiner_high_aesthetic_score": OptionInfo(6.0, "SDXL high aesthetic score", gr.Number).info("used for refiner model prompt"),
+}))
+
+options_templates.update(options_section(('vae', "VAE", "sd"), {
+ "sd_vae_explanation": OptionHTML("""
+<abbr title='Variational autoencoder'>VAE</abbr> is a neural network that transforms a standard <abbr title='red/green/blue'>RGB</abbr>
+image into latent space representation and back. Latent space representation is what stable diffusion is working on during sampling
+(i.e. when the progress bar is between empty and full). For txt2img, VAE is used to create a resulting image after the sampling is finished.
+For img2img, VAE is used to process user's input image before the sampling, and to create an image after sampling.
+"""),
+ "sd_vae_checkpoint_cache": OptionInfo(0, "VAE Checkpoints to cache in RAM", gr.Slider, {"minimum": 0, "maximum": 10, "step": 1}),
+ "sd_vae": OptionInfo("Automatic", "SD VAE", gr.Dropdown, lambda: {"choices": shared_items.sd_vae_items()}, refresh=shared_items.refresh_vae_list, infotext='VAE').info("choose VAE model: Automatic = use one with same filename as checkpoint; None = use VAE from checkpoint"),
+ "sd_vae_overrides_per_model_preferences": OptionInfo(True, "Selected VAE overrides per-model preferences").info("you can set per-model VAE either by editing user metadata for checkpoints, or by making the VAE have same name as checkpoint"),
+ "auto_vae_precision": OptionInfo(True, "Automatically revert VAE to 32-bit floats").info("triggers when a tensor with NaNs is produced in VAE; disabling the option in this case will result in a black square image"),
+ "sd_vae_encode_method": OptionInfo("Full", "VAE type for encode", gr.Radio, {"choices": ["Full", "TAESD"]}, infotext='VAE Encoder').info("method to encode image to latent (use in img2img, hires-fix or inpaint mask)"),
+ "sd_vae_decode_method": OptionInfo("Full", "VAE type for decode", gr.Radio, {"choices": ["Full", "TAESD"]}, infotext='VAE Decoder').info("method to decode latent to image"),
+}))
+
+options_templates.update(options_section(('img2img', "img2img", "sd"), {
+ "inpainting_mask_weight": OptionInfo(1.0, "Inpainting conditioning mask strength", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}, infotext='Conditional mask weight'),
+ "initial_noise_multiplier": OptionInfo(1.0, "Noise multiplier for img2img", gr.Slider, {"minimum": 0.0, "maximum": 1.5, "step": 0.001}, infotext='Noise multiplier'),
+ "img2img_extra_noise": OptionInfo(0.0, "Extra noise multiplier for img2img and hires fix", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}, infotext='Extra noise').info("0 = disabled (default); should be lower than denoising strength"),
+ "img2img_color_correction": OptionInfo(False, "Apply color correction to img2img results to match original colors."),
+ "img2img_fix_steps": OptionInfo(False, "With img2img, do exactly the amount of steps the slider specifies.").info("normally you'd do less with less denoising"),
+ "img2img_background_color": OptionInfo("#ffffff", "With img2img, fill transparent parts of the input image with this color.", ui_components.FormColorPicker, {}),
+ "img2img_editor_height": OptionInfo(720, "Height of the image editor", gr.Slider, {"minimum": 80, "maximum": 1600, "step": 1}).info("in pixels").needs_reload_ui(),
+ "img2img_sketch_default_brush_color": OptionInfo("#ffffff", "Sketch initial brush color", ui_components.FormColorPicker, {}).info("default brush color of img2img sketch").needs_reload_ui(),
+ "img2img_inpaint_mask_brush_color": OptionInfo("#ffffff", "Inpaint mask brush color", ui_components.FormColorPicker, {}).info("brush color of inpaint mask").needs_reload_ui(),
+ "img2img_inpaint_sketch_default_brush_color": OptionInfo("#ffffff", "Inpaint sketch initial brush color", ui_components.FormColorPicker, {}).info("default brush color of img2img inpaint sketch").needs_reload_ui(),
+ "return_mask": OptionInfo(False, "For inpainting, include the greyscale mask in results for web"),
+ "return_mask_composite": OptionInfo(False, "For inpainting, include masked composite in results for web"),
+ "img2img_batch_show_results_limit": OptionInfo(32, "Show the first N batch img2img results in UI", gr.Slider, {"minimum": -1, "maximum": 1000, "step": 1}).info('0: disable, -1: show all images. Too many images can cause lag'),
+}))
+
+options_templates.update(options_section(('optimizations', "Optimizations", "sd"), {
+ "cross_attention_optimization": OptionInfo("Automatic", "Cross attention optimization", gr.Dropdown, lambda: {"choices": shared_items.cross_attention_optimizations()}),
+ "s_min_uncond": OptionInfo(0.0, "Negative Guidance minimum sigma", gr.Slider, {"minimum": 0.0, "maximum": 15.0, "step": 0.01}).link("PR", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/9177").info("skip negative prompt for some steps when the image is almost ready; 0=disable, higher=faster"),
+ "token_merging_ratio": OptionInfo(0.0, "Token merging ratio", gr.Slider, {"minimum": 0.0, "maximum": 0.9, "step": 0.1}, infotext='Token merging ratio').link("PR", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/9256").info("0=disable, higher=faster"),
+ "token_merging_ratio_img2img": OptionInfo(0.0, "Token merging ratio for img2img", gr.Slider, {"minimum": 0.0, "maximum": 0.9, "step": 0.1}).info("only applies if non-zero and overrides above"),
+ "token_merging_ratio_hr": OptionInfo(0.0, "Token merging ratio for high-res pass", gr.Slider, {"minimum": 0.0, "maximum": 0.9, "step": 0.1}, infotext='Token merging ratio hr').info("only applies if non-zero and overrides above"),
+ "pad_cond_uncond": OptionInfo(False, "Pad prompt/negative prompt to be same length", infotext='Pad conds').info("improves performance when prompt and negative prompt have different lengths; changes seeds"),
+ "persistent_cond_cache": OptionInfo(True, "Persistent cond cache").info("do not recalculate conds from prompts if prompts have not changed since previous calculation"),
+ "batch_cond_uncond": OptionInfo(True, "Batch cond/uncond").info("do both conditional and unconditional denoising in one batch; uses a bit more VRAM during sampling, but improves speed; previously this was controlled by --always-batch-cond-uncond comandline argument"),
+ "fp8_storage": OptionInfo("Disable", "FP8 weight", gr.Radio, {"choices": ["Disable", "Enable for SDXL", "Enable"]}).info("Use FP8 to store Linear/Conv layers' weight. Require pytorch>=2.1.0."),
+ "cache_fp16_weight": OptionInfo(False, "Cache FP16 weight for LoRA").info("Cache fp16 weight when enabling FP8, will increase the quality of LoRA. Use more system ram."),
+}))
+
+options_templates.update(options_section(('compatibility', "Compatibility", "sd"), {
+ "auto_backcompat": OptionInfo(True, "Automatic backward compatibility").info("automatically enable options for backwards compatibility when importing generation parameters from infotext that has program version."),
+ "use_old_emphasis_implementation": OptionInfo(False, "Use old emphasis implementation. Can be useful to reproduce old seeds."),
+ "use_old_karras_scheduler_sigmas": OptionInfo(False, "Use old karras scheduler sigmas (0.1 to 10)."),
+ "no_dpmpp_sde_batch_determinism": OptionInfo(False, "Do not make DPM++ SDE deterministic across different batch sizes."),
+ "use_old_hires_fix_width_height": OptionInfo(False, "For hires fix, use width/height sliders to set final resolution rather than first pass (disables Upscale by, Resize width/height to)."),
+ "dont_fix_second_order_samplers_schedule": OptionInfo(False, "Do not fix prompt schedule for second order samplers."),
+ "hires_fix_use_firstpass_conds": OptionInfo(False, "For hires fix, calculate conds of second pass using extra networks of first pass."),
+ "use_old_scheduling": OptionInfo(False, "Use old prompt editing timelines.", infotext="Old prompt editing timelines").info("For [red:green:N]; old: If N < 1, it's a fraction of steps (and hires fix uses range from 0 to 1), if N >= 1, it's an absolute number of steps; new: If N has a decimal point in it, it's a fraction of steps (and hires fix uses range from 1 to 2), othewrwise it's an absolute number of steps"),
+ "use_downcasted_alpha_bar": OptionInfo(False, "Downcast model alphas_cumprod to fp16 before sampling. For reproducing old seeds.", infotext="Downcast alphas_cumprod")
+}))
+
+options_templates.update(options_section(('interrogate', "Interrogate"), {
+ "interrogate_keep_models_in_memory": OptionInfo(False, "Keep models in VRAM"),
+ "interrogate_return_ranks": OptionInfo(False, "Include ranks of model tags matches in results.").info("booru only"),
+ "interrogate_clip_num_beams": OptionInfo(1, "BLIP: num_beams", gr.Slider, {"minimum": 1, "maximum": 16, "step": 1}),
+ "interrogate_clip_min_length": OptionInfo(24, "BLIP: minimum description length", gr.Slider, {"minimum": 1, "maximum": 128, "step": 1}),
+ "interrogate_clip_max_length": OptionInfo(48, "BLIP: maximum description length", gr.Slider, {"minimum": 1, "maximum": 256, "step": 1}),
+ "interrogate_clip_dict_limit": OptionInfo(1500, "CLIP: maximum number of lines in text file").info("0 = No limit"),
+ "interrogate_clip_skip_categories": OptionInfo([], "CLIP: skip inquire categories", gr.CheckboxGroup, lambda: {"choices": interrogate.category_types()}, refresh=interrogate.category_types),
+ "interrogate_deepbooru_score_threshold": OptionInfo(0.5, "deepbooru: score threshold", gr.Slider, {"minimum": 0, "maximum": 1, "step": 0.01}),
+ "deepbooru_sort_alpha": OptionInfo(True, "deepbooru: sort tags alphabetically").info("if not: sort by score"),
+ "deepbooru_use_spaces": OptionInfo(True, "deepbooru: use spaces in tags").info("if not: use underscores"),
+ "deepbooru_escape": OptionInfo(True, "deepbooru: escape (\\) brackets").info("so they are used as literal brackets and not for emphasis"),
+ "deepbooru_filter_tags": OptionInfo("", "deepbooru: filter out those tags").info("separate by comma"),
+}))
+
+options_templates.update(options_section(('extra_networks', "Extra Networks", "sd"), {
+ "extra_networks_show_hidden_directories": OptionInfo(True, "Show hidden directories").info("directory is hidden if its name starts with \".\"."),
+ "extra_networks_dir_button_function": OptionInfo(False, "Add a '/' to the beginning of directory buttons").info("Buttons will display the contents of the selected directory without acting as a search filter."),
+ "extra_networks_hidden_models": OptionInfo("When searched", "Show cards for models in hidden directories", gr.Radio, {"choices": ["Always", "When searched", "Never"]}).info('"When searched" option will only show the item when the search string has 4 characters or more'),
+ "extra_networks_default_multiplier": OptionInfo(1.0, "Default multiplier for extra networks", gr.Slider, {"minimum": 0.0, "maximum": 2.0, "step": 0.01}),
+ "extra_networks_card_width": OptionInfo(0, "Card width for Extra Networks").info("in pixels"),
+ "extra_networks_card_height": OptionInfo(0, "Card height for Extra Networks").info("in pixels"),
+ "extra_networks_card_text_scale": OptionInfo(1.0, "Card text scale", gr.Slider, {"minimum": 0.0, "maximum": 2.0, "step": 0.01}).info("1 = original size"),
+ "extra_networks_card_show_desc": OptionInfo(True, "Show description on card"),
+ "extra_networks_card_order_field": OptionInfo("Path", "Default order field for Extra Networks cards", gr.Dropdown, {"choices": ['Path', 'Name', 'Date Created', 'Date Modified']}).needs_reload_ui(),
+ "extra_networks_card_order": OptionInfo("Ascending", "Default order for Extra Networks cards", gr.Dropdown, {"choices": ['Ascending', 'Descending']}).needs_reload_ui(),
+ "extra_networks_add_text_separator": OptionInfo(" ", "Extra networks separator").info("extra text to add before <...> when adding extra network to prompt"),
+ "ui_extra_networks_tab_reorder": OptionInfo("", "Extra networks tab order").needs_reload_ui(),
+ "textual_inversion_print_at_load": OptionInfo(False, "Print a list of Textual Inversion embeddings when loading model"),
+ "textual_inversion_add_hashes_to_infotext": OptionInfo(True, "Add Textual Inversion hashes to infotext"),
+ "sd_hypernetwork": OptionInfo("None", "Add hypernetwork to prompt", gr.Dropdown, lambda: {"choices": ["None", *shared.hypernetworks]}, refresh=shared_items.reload_hypernetworks),
+}))
+
+options_templates.update(options_section(('ui_prompt_editing', "Prompt editing", "ui"), {
+ "keyedit_precision_attention": OptionInfo(0.1, "Precision for (attention:1.1) when editing the prompt with Ctrl+up/down", gr.Slider, {"minimum": 0.01, "maximum": 0.2, "step": 0.001}),
+ "keyedit_precision_extra": OptionInfo(0.05, "Precision for <extra networks:0.9> when editing the prompt with Ctrl+up/down", gr.Slider, {"minimum": 0.01, "maximum": 0.2, "step": 0.001}),
+ "keyedit_delimiters": OptionInfo(r".,\/!?%^*;:{}=`~() ", "Word delimiters when editing the prompt with Ctrl+up/down"),
+ "keyedit_delimiters_whitespace": OptionInfo(["Tab", "Carriage Return", "Line Feed"], "Ctrl+up/down whitespace delimiters", gr.CheckboxGroup, lambda: {"choices": ["Tab", "Carriage Return", "Line Feed"]}),
+ "keyedit_move": OptionInfo(True, "Alt+left/right moves prompt elements"),
+ "disable_token_counters": OptionInfo(False, "Disable prompt token counters").needs_reload_ui(),
+}))
+
+options_templates.update(options_section(('ui_gallery', "Gallery", "ui"), {
+ "return_grid": OptionInfo(True, "Show grid in gallery"),
+ "do_not_show_images": OptionInfo(False, "Do not show any images in gallery"),
+ "js_modal_lightbox": OptionInfo(True, "Full page image viewer: enable"),
+ "js_modal_lightbox_initially_zoomed": OptionInfo(True, "Full page image viewer: show images zoomed in by default"),
+ "js_modal_lightbox_gamepad": OptionInfo(False, "Full page image viewer: navigate with gamepad"),
+ "js_modal_lightbox_gamepad_repeat": OptionInfo(250, "Full page image viewer: gamepad repeat period").info("in milliseconds"),
+ "gallery_height": OptionInfo("", "Gallery height", gr.Textbox).info("can be any valid CSS value, for example 768px or 20em").needs_reload_ui(),
+}))
+
+options_templates.update(options_section(('ui_alternatives', "UI alternatives", "ui"), {
+ "compact_prompt_box": OptionInfo(False, "Compact prompt layout").info("puts prompt and negative prompt inside the Generate tab, leaving more vertical space for the image on the right").needs_reload_ui(),
+ "samplers_in_dropdown": OptionInfo(True, "Use dropdown for sampler selection instead of radio group").needs_reload_ui(),
+ "dimensions_and_batch_together": OptionInfo(True, "Show Width/Height and Batch sliders in same row").needs_reload_ui(),
+ "sd_checkpoint_dropdown_use_short": OptionInfo(False, "Checkpoint dropdown: use filenames without paths").info("models in subdirectories like photo/sd15.ckpt will be listed as just sd15.ckpt"),
+ "hires_fix_show_sampler": OptionInfo(False, "Hires fix: show hires checkpoint and sampler selection").needs_reload_ui(),
+ "hires_fix_show_prompts": OptionInfo(False, "Hires fix: show hires prompt and negative prompt").needs_reload_ui(),
+ "txt2img_settings_accordion": OptionInfo(False, "Settings in txt2img hidden under Accordion").needs_reload_ui(),
+ "img2img_settings_accordion": OptionInfo(False, "Settings in img2img hidden under Accordion").needs_reload_ui(),
+}))
+
+options_templates.update(options_section(('ui', "User interface", "ui"), {
+ "localization": OptionInfo("None", "Localization", gr.Dropdown, lambda: {"choices": ["None"] + list(localization.localizations.keys())}, refresh=lambda: localization.list_localizations(cmd_opts.localizations_dir)).needs_reload_ui(),
+ "quicksettings_list": OptionInfo(["sd_model_checkpoint"], "Quicksettings list", ui_components.DropdownMulti, lambda: {"choices": list(shared.opts.data_labels.keys())}).js("info", "settingsHintsShowQuicksettings").info("setting entries that appear at the top of page rather than in settings tab").needs_reload_ui(),
+ "ui_tab_order": OptionInfo([], "UI tab order", ui_components.DropdownMulti, lambda: {"choices": list(shared.tab_names)}).needs_reload_ui(),
+ "hidden_tabs": OptionInfo([], "Hidden UI tabs", ui_components.DropdownMulti, lambda: {"choices": list(shared.tab_names)}).needs_reload_ui(),
+ "ui_reorder_list": OptionInfo([], "UI item order for txt2img/img2img tabs", ui_components.DropdownMulti, lambda: {"choices": list(shared_items.ui_reorder_categories())}).info("selected items appear first").needs_reload_ui(),
+ "gradio_theme": OptionInfo("Default", "Gradio theme", ui_components.DropdownEditable, lambda: {"choices": ["Default"] + shared_gradio_themes.gradio_hf_hub_themes}).info("you can also manually enter any of themes from the <a href='https://huggingface.co/spaces/gradio/theme-gallery'>gallery</a>.").needs_reload_ui(),
+ "gradio_themes_cache": OptionInfo(True, "Cache gradio themes locally").info("disable to update the selected Gradio theme"),
+ "show_progress_in_title": OptionInfo(True, "Show generation progress in window title."),
+ "send_seed": OptionInfo(True, "Send seed when sending prompt or image to other interface"),
+ "send_size": OptionInfo(True, "Send size when sending prompt or image to another interface"),
+}))
+
+
+options_templates.update(options_section(('infotext', "Infotext", "ui"), {
+ "infotext_explanation": OptionHTML("""
+Infotext is what this software calls the text that contains generation parameters and can be used to generate the same picture again.
+It is displayed in UI below the image. To use infotext, paste it into the prompt and click the ↙️ paste button.
+"""),
+ "enable_pnginfo": OptionInfo(True, "Write infotext to metadata of the generated image"),
+ "save_txt": OptionInfo(False, "Create a text file with infotext next to every generated image"),
+
+ "add_model_name_to_info": OptionInfo(True, "Add model name to infotext"),
+ "add_model_hash_to_info": OptionInfo(True, "Add model hash to infotext"),
+ "add_vae_name_to_info": OptionInfo(True, "Add VAE name to infotext"),
+ "add_vae_hash_to_info": OptionInfo(True, "Add VAE hash to infotext"),
+ "add_user_name_to_info": OptionInfo(False, "Add user name to infotext when authenticated"),
+ "add_version_to_infotext": OptionInfo(True, "Add program version to infotext"),
+ "disable_weights_auto_swap": OptionInfo(True, "Disregard checkpoint information from pasted infotext").info("when reading generation parameters from text into UI"),
+ "infotext_skip_pasting": OptionInfo([], "Disregard fields from pasted infotext", ui_components.DropdownMulti, lambda: {"choices": shared_items.get_infotext_names()}),
+ "infotext_styles": OptionInfo("Apply if any", "Infer styles from prompts of pasted infotext", gr.Radio, {"choices": ["Ignore", "Apply", "Discard", "Apply if any"]}).info("when reading generation parameters from text into UI)").html("""<ul style='margin-left: 1.5em'>
+<li>Ignore: keep prompt and styles dropdown as it is.</li>
+<li>Apply: remove style text from prompt, always replace styles dropdown value with found styles (even if none are found).</li>
+<li>Discard: remove style text from prompt, keep styles dropdown as it is.</li>
+<li>Apply if any: remove style text from prompt; if any styles are found in prompt, put them into styles dropdown, otherwise keep it as it is.</li>
+</ul>"""),
+
+}))
+
+options_templates.update(options_section(('ui', "Live previews", "ui"), {
+ "show_progressbar": OptionInfo(True, "Show progressbar"),
+ "live_previews_enable": OptionInfo(True, "Show live previews of the created image"),
+ "live_previews_image_format": OptionInfo("png", "Live preview file format", gr.Radio, {"choices": ["jpeg", "png", "webp"]}),
+ "show_progress_grid": OptionInfo(True, "Show previews of all images generated in a batch as a grid"),
+ "show_progress_every_n_steps": OptionInfo(10, "Live preview display period", gr.Slider, {"minimum": -1, "maximum": 32, "step": 1}).info("in sampling steps - show new live preview image every N sampling steps; -1 = only show after completion of batch"),
+ "show_progress_type": OptionInfo("Approx NN", "Live preview method", gr.Radio, {"choices": ["Full", "Approx NN", "Approx cheap", "TAESD"]}).info("Full = slow but pretty; Approx NN and TAESD = fast but low quality; Approx cheap = super fast but terrible otherwise"),
+ "live_preview_allow_lowvram_full": OptionInfo(False, "Allow Full live preview method with lowvram/medvram").info("If not, Approx NN will be used instead; Full live preview method is very detrimental to speed if lowvram/medvram optimizations are enabled"),
+ "live_preview_content": OptionInfo("Prompt", "Live preview subject", gr.Radio, {"choices": ["Combined", "Prompt", "Negative prompt"]}),
+ "live_preview_refresh_period": OptionInfo(1000, "Progressbar and preview update period").info("in milliseconds"),
+ "live_preview_fast_interrupt": OptionInfo(False, "Return image with chosen live preview method on interrupt").info("makes interrupts faster"),
+ "js_live_preview_in_modal_lightbox": OptionInfo(False, "Show Live preview in full page image viewer"),
+}))
+
+options_templates.update(options_section(('sampler-params', "Sampler parameters", "sd"), {
+ "hide_samplers": OptionInfo([], "Hide samplers in user interface", gr.CheckboxGroup, lambda: {"choices": [x.name for x in shared_items.list_samplers()]}).needs_reload_ui(),
+ "eta_ddim": OptionInfo(0.0, "Eta for DDIM", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}, infotext='Eta DDIM').info("noise multiplier; higher = more unpredictable results"),
+ "eta_ancestral": OptionInfo(1.0, "Eta for k-diffusion samplers", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}, infotext='Eta').info("noise multiplier; currently only applies to ancestral samplers (i.e. Euler a) and SDE samplers"),
+ "ddim_discretize": OptionInfo('uniform', "img2img DDIM discretize", gr.Radio, {"choices": ['uniform', 'quad']}),
+ 's_churn': OptionInfo(0.0, "sigma churn", gr.Slider, {"minimum": 0.0, "maximum": 100.0, "step": 0.01}, infotext='Sigma churn').info('amount of stochasticity; only applies to Euler, Heun, and DPM2'),
+ 's_tmin': OptionInfo(0.0, "sigma tmin", gr.Slider, {"minimum": 0.0, "maximum": 10.0, "step": 0.01}, infotext='Sigma tmin').info('enable stochasticity; start value of the sigma range; only applies to Euler, Heun, and DPM2'),
+ 's_tmax': OptionInfo(0.0, "sigma tmax", gr.Slider, {"minimum": 0.0, "maximum": 999.0, "step": 0.01}, infotext='Sigma tmax').info("0 = inf; end value of the sigma range; only applies to Euler, Heun, and DPM2"),
+ 's_noise': OptionInfo(1.0, "sigma noise", gr.Slider, {"minimum": 0.0, "maximum": 1.1, "step": 0.001}, infotext='Sigma noise').info('amount of additional noise to counteract loss of detail during sampling'),
+ 'k_sched_type': OptionInfo("Automatic", "Scheduler type", gr.Dropdown, {"choices": ["Automatic", "karras", "exponential", "polyexponential"]}, infotext='Schedule type').info("lets you override the noise schedule for k-diffusion samplers; choosing Automatic disables the three parameters below"),
+ 'sigma_min': OptionInfo(0.0, "sigma min", gr.Number, infotext='Schedule min sigma').info("0 = default (~0.03); minimum noise strength for k-diffusion noise scheduler"),
+ 'sigma_max': OptionInfo(0.0, "sigma max", gr.Number, infotext='Schedule max sigma').info("0 = default (~14.6); maximum noise strength for k-diffusion noise scheduler"),
+ 'rho': OptionInfo(0.0, "rho", gr.Number, infotext='Schedule rho').info("0 = default (7 for karras, 1 for polyexponential); higher values result in a steeper noise schedule (decreases faster)"),
+ 'eta_noise_seed_delta': OptionInfo(0, "Eta noise seed delta", gr.Number, {"precision": 0}, infotext='ENSD').info("ENSD; does not improve anything, just produces different results for ancestral samplers - only useful for reproducing images"),
+ 'always_discard_next_to_last_sigma': OptionInfo(False, "Always discard next-to-last sigma", infotext='Discard penultimate sigma').link("PR", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/6044"),
+ 'sgm_noise_multiplier': OptionInfo(False, "SGM noise multiplier", infotext='SGM noise multplier').link("PR", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12818").info("Match initial noise to official SDXL implementation - only useful for reproducing images"),
+ 'uni_pc_variant': OptionInfo("bh1", "UniPC variant", gr.Radio, {"choices": ["bh1", "bh2", "vary_coeff"]}, infotext='UniPC variant'),
+ 'uni_pc_skip_type': OptionInfo("time_uniform", "UniPC skip type", gr.Radio, {"choices": ["time_uniform", "time_quadratic", "logSNR"]}, infotext='UniPC skip type'),
+ 'uni_pc_order': OptionInfo(3, "UniPC order", gr.Slider, {"minimum": 1, "maximum": 50, "step": 1}, infotext='UniPC order').info("must be < sampling steps"),
+ 'uni_pc_lower_order_final': OptionInfo(True, "UniPC lower order final", infotext='UniPC lower order final'),
+ 'sd_noise_schedule': OptionInfo("Default", "Noise schedule for sampling", gr.Radio, {"choices": ["Default", "Zero Terminal SNR"]}, infotext="Noise Schedule").info("for use with zero terminal SNR trained models")
+}))
+
+options_templates.update(options_section(('postprocessing', "Postprocessing", "postprocessing"), {
+ 'postprocessing_enable_in_main_ui': OptionInfo([], "Enable postprocessing operations in txt2img and img2img tabs", ui_components.DropdownMulti, lambda: {"choices": [x.name for x in shared_items.postprocessing_scripts()]}),
+ 'postprocessing_operation_order': OptionInfo([], "Postprocessing operation order", ui_components.DropdownMulti, lambda: {"choices": [x.name for x in shared_items.postprocessing_scripts()]}),
+ 'upscaling_max_images_in_cache': OptionInfo(5, "Maximum number of images in upscaling cache", gr.Slider, {"minimum": 0, "maximum": 10, "step": 1}),
+ 'postprocessing_existing_caption_action': OptionInfo("Ignore", "Action for existing captions", gr.Radio, {"choices": ["Ignore", "Keep", "Prepend", "Append"]}).info("when generating captions using postprocessing; Ignore = use generated; Keep = use original; Prepend/Append = combine both"),
+}))
+
+options_templates.update(options_section((None, "Hidden options"), {
+ "disabled_extensions": OptionInfo([], "Disable these extensions"),
+ "disable_all_extensions": OptionInfo("none", "Disable all extensions (preserves the list of disabled extensions)", gr.Radio, {"choices": ["none", "extra", "all"]}),
+ "restore_config_state_file": OptionInfo("", "Config state file to restore from, under 'config-states/' folder"),
+ "sd_checkpoint_hash": OptionInfo("", "SHA256 hash of the current checkpoint"),
+}))
diff --git a/modules/shared_state.py b/modules/shared_state.py
new file mode 100644
index 00000000..a68789cc
--- /dev/null
+++ b/modules/shared_state.py
@@ -0,0 +1,159 @@
+import datetime
+import logging
+import threading
+import time
+
+from modules import errors, shared, devices
+from typing import Optional
+
+log = logging.getLogger(__name__)
+
+
+class State:
+ skipped = False
+ interrupted = False
+ job = ""
+ job_no = 0
+ job_count = 0
+ processing_has_refined_job_count = False
+ job_timestamp = '0'
+ sampling_step = 0
+ sampling_steps = 0
+ current_latent = None
+ current_image = None
+ current_image_sampling_step = 0
+ id_live_preview = 0
+ textinfo = None
+ time_start = None
+ server_start = None
+ _server_command_signal = threading.Event()
+ _server_command: Optional[str] = None
+
+ def __init__(self):
+ self.server_start = time.time()
+
+ @property
+ def need_restart(self) -> bool:
+ # Compatibility getter for need_restart.
+ return self.server_command == "restart"
+
+ @need_restart.setter
+ def need_restart(self, value: bool) -> None:
+ # Compatibility setter for need_restart.
+ if value:
+ self.server_command = "restart"
+
+ @property
+ def server_command(self):
+ return self._server_command
+
+ @server_command.setter
+ def server_command(self, value: Optional[str]) -> None:
+ """
+ Set the server command to `value` and signal that it's been set.
+ """
+ self._server_command = value
+ self._server_command_signal.set()
+
+ def wait_for_server_command(self, timeout: Optional[float] = None) -> Optional[str]:
+ """
+ Wait for server command to get set; return and clear the value and signal.
+ """
+ if self._server_command_signal.wait(timeout):
+ self._server_command_signal.clear()
+ req = self._server_command
+ self._server_command = None
+ return req
+ return None
+
+ def request_restart(self) -> None:
+ self.interrupt()
+ self.server_command = "restart"
+ log.info("Received restart request")
+
+ def skip(self):
+ self.skipped = True
+ log.info("Received skip request")
+
+ def interrupt(self):
+ self.interrupted = True
+ log.info("Received interrupt request")
+
+ def nextjob(self):
+ if shared.opts.live_previews_enable and shared.opts.show_progress_every_n_steps == -1:
+ self.do_set_current_image()
+
+ self.job_no += 1
+ self.sampling_step = 0
+ self.current_image_sampling_step = 0
+
+ def dict(self):
+ obj = {
+ "skipped": self.skipped,
+ "interrupted": self.interrupted,
+ "job": self.job,
+ "job_count": self.job_count,
+ "job_timestamp": self.job_timestamp,
+ "job_no": self.job_no,
+ "sampling_step": self.sampling_step,
+ "sampling_steps": self.sampling_steps,
+ }
+
+ return obj
+
+ def begin(self, job: str = "(unknown)"):
+ self.sampling_step = 0
+ self.time_start = time.time()
+ self.job_count = -1
+ self.processing_has_refined_job_count = False
+ self.job_no = 0
+ self.job_timestamp = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
+ self.current_latent = None
+ self.current_image = None
+ self.current_image_sampling_step = 0
+ self.id_live_preview = 0
+ self.skipped = False
+ self.interrupted = False
+ self.textinfo = None
+ self.job = job
+ devices.torch_gc()
+ log.info("Starting job %s", job)
+
+ def end(self):
+ duration = time.time() - self.time_start
+ log.info("Ending job %s (%.2f seconds)", self.job, duration)
+ self.job = ""
+ self.job_count = 0
+
+ devices.torch_gc()
+
+ def set_current_image(self):
+ """if enough sampling steps have been made after the last call to this, sets self.current_image from self.current_latent, and modifies self.id_live_preview accordingly"""
+ if not shared.parallel_processing_allowed:
+ return
+
+ if self.sampling_step - self.current_image_sampling_step >= shared.opts.show_progress_every_n_steps and shared.opts.live_previews_enable and shared.opts.show_progress_every_n_steps != -1:
+ self.do_set_current_image()
+
+ def do_set_current_image(self):
+ if self.current_latent is None:
+ return
+
+ import modules.sd_samplers
+
+ try:
+ if shared.opts.show_progress_grid:
+ self.assign_current_image(modules.sd_samplers.samples_to_image_grid(self.current_latent))
+ else:
+ self.assign_current_image(modules.sd_samplers.sample_to_image(self.current_latent))
+
+ self.current_image_sampling_step = self.sampling_step
+
+ except Exception:
+ # when switching models during genration, VAE would be on CPU, so creating an image will fail.
+ # we silently ignore this error
+ errors.record_exception()
+
+ def assign_current_image(self, image):
+ self.current_image = image
+ self.id_live_preview += 1
diff --git a/modules/shared_total_tqdm.py b/modules/shared_total_tqdm.py
new file mode 100644
index 00000000..cf82e104
--- /dev/null
+++ b/modules/shared_total_tqdm.py
@@ -0,0 +1,37 @@
+import tqdm
+
+from modules import shared
+
+
+class TotalTQDM:
+ def __init__(self):
+ self._tqdm = None
+
+ def reset(self):
+ self._tqdm = tqdm.tqdm(
+ desc="Total progress",
+ total=shared.state.job_count * shared.state.sampling_steps,
+ position=1,
+ file=shared.progress_print_out
+ )
+
+ def update(self):
+ if not shared.opts.multiple_tqdm or shared.cmd_opts.disable_console_progressbars:
+ return
+ if self._tqdm is None:
+ self.reset()
+ self._tqdm.update()
+
+ def updateTotal(self, new_total):
+ if not shared.opts.multiple_tqdm or shared.cmd_opts.disable_console_progressbars:
+ return
+ if self._tqdm is None:
+ self.reset()
+ self._tqdm.total = new_total
+
+ def clear(self):
+ if self._tqdm is not None:
+ self._tqdm.refresh()
+ self._tqdm.close()
+ self._tqdm = None
+
diff --git a/modules/styles.py b/modules/styles.py
index ec0e1bc5..026c4300 100644
--- a/modules/styles.py
+++ b/modules/styles.py
@@ -1,7 +1,7 @@
import csv
+import fnmatch
import os
import os.path
-import re
import typing
import shutil
@@ -10,6 +10,7 @@ class PromptStyle(typing.NamedTuple):
name: str
prompt: str
negative_prompt: str
+ path: str = None
def merge_prompts(style_prompt: str, prompt: str) -> str:
@@ -29,12 +30,17 @@ def apply_styles_to_prompt(prompt, styles):
return prompt
-re_spaces = re.compile(" +")
+def extract_style_text_from_prompt(style_text, prompt):
+ """This function extracts the text from a given prompt based on a provided style text. It checks if the style text contains the placeholder {prompt} or if it appears at the end of the prompt. If a match is found, it returns True along with the extracted text. Otherwise, it returns False and the original prompt.
+ extract_style_text_from_prompt("masterpiece", "1girl, art by greg, masterpiece") outputs (True, "1girl, art by greg")
+ extract_style_text_from_prompt("masterpiece, {prompt}", "masterpiece, 1girl, art by greg") outputs (True, "1girl, art by greg")
+ extract_style_text_from_prompt("masterpiece, {prompt}", "exquisite, 1girl, art by greg") outputs (False, "exquisite, 1girl, art by greg")
+ """
+
+ stripped_prompt = prompt.strip()
+ stripped_style_text = style_text.strip()
-def extract_style_text_from_prompt(style_text, prompt):
- stripped_prompt = re.sub(re_spaces, " ", prompt.strip())
- stripped_style_text = re.sub(re_spaces, " ", style_text.strip())
if "{prompt}" in stripped_style_text:
left, right = stripped_style_text.split("{prompt}", 2)
if stripped_prompt.startswith(left) and stripped_prompt.endswith(right):
@@ -52,7 +58,12 @@ def extract_style_text_from_prompt(style_text, prompt):
return False, prompt
-def extract_style_from_prompts(style: PromptStyle, prompt, negative_prompt):
+def extract_original_prompts(style: PromptStyle, prompt, negative_prompt):
+ """
+ Takes a style and compares it to the prompt and negative prompt. If the style
+ matches, returns True plus the prompt and negative prompt with the style text
+ removed. Otherwise, returns False with the original prompt and negative prompt.
+ """
if not style.prompt and not style.negative_prompt:
return False, prompt, negative_prompt
@@ -69,25 +80,84 @@ def extract_style_from_prompts(style: PromptStyle, prompt, negative_prompt):
class StyleDatabase:
def __init__(self, path: str):
- self.no_style = PromptStyle("None", "", "")
+ self.no_style = PromptStyle("None", "", "", None)
self.styles = {}
self.path = path
+ folder, file = os.path.split(self.path)
+ filename, _, ext = file.partition('*')
+ self.default_path = os.path.join(folder, filename + ext)
+
+ self.prompt_fields = [field for field in PromptStyle._fields if field != "path"]
+
self.reload()
def reload(self):
+ """
+ Clears the style database and reloads the styles from the CSV file(s)
+ matching the path used to initialize the database.
+ """
self.styles.clear()
- if not os.path.exists(self.path):
+ path, filename = os.path.split(self.path)
+
+ if "*" in filename:
+ fileglob = filename.split("*")[0] + "*.csv"
+ filelist = []
+ for file in os.listdir(path):
+ if fnmatch.fnmatch(file, fileglob):
+ filelist.append(file)
+ # Add a visible divider to the style list
+ half_len = round(len(file) / 2)
+ divider = f"{'-' * (20 - half_len)} {file.upper()}"
+ divider = f"{divider} {'-' * (40 - len(divider))}"
+ self.styles[divider] = PromptStyle(
+ f"{divider}", None, None, "do_not_save"
+ )
+ # Add styles from this CSV file
+ self.load_from_csv(os.path.join(path, file))
+ if len(filelist) == 0:
+ print(f"No styles found in {path} matching {fileglob}")
+ return
+ elif not os.path.exists(self.path):
+ print(f"Style database not found: {self.path}")
return
+ else:
+ self.load_from_csv(self.path)
- with open(self.path, "r", encoding="utf-8-sig", newline='') as file:
+ def load_from_csv(self, path: str):
+ with open(path, "r", encoding="utf-8-sig", newline="") as file:
reader = csv.DictReader(file, skipinitialspace=True)
for row in reader:
+ # Ignore empty rows or rows starting with a comment
+ if not row or row["name"].startswith("#"):
+ continue
# Support loading old CSV format with "name, text"-columns
prompt = row["prompt"] if "prompt" in row else row["text"]
negative_prompt = row.get("negative_prompt", "")
- self.styles[row["name"]] = PromptStyle(row["name"], prompt, negative_prompt)
+ # Add style to database
+ self.styles[row["name"]] = PromptStyle(
+ row["name"], prompt, negative_prompt, path
+ )
+
+ def get_style_paths(self) -> set:
+ """Returns a set of all distinct paths of files that styles are loaded from."""
+ # Update any styles without a path to the default path
+ for style in list(self.styles.values()):
+ if not style.path:
+ self.styles[style.name] = style._replace(path=self.default_path)
+
+ # Create a list of all distinct paths, including the default path
+ style_paths = set()
+ style_paths.add(self.default_path)
+ for _, style in self.styles.items():
+ if style.path:
+ style_paths.add(style.path)
+
+ # Remove any paths for styles that are just list dividers
+ style_paths.discard("do_not_save")
+
+ return style_paths
def get_style_prompts(self, styles):
return [self.styles.get(x, self.no_style).prompt for x in styles]
@@ -96,23 +166,40 @@ class StyleDatabase:
return [self.styles.get(x, self.no_style).negative_prompt for x in styles]
def apply_styles_to_prompt(self, prompt, styles):
- return apply_styles_to_prompt(prompt, [self.styles.get(x, self.no_style).prompt for x in styles])
+ return apply_styles_to_prompt(
+ prompt, [self.styles.get(x, self.no_style).prompt for x in styles]
+ )
def apply_negative_styles_to_prompt(self, prompt, styles):
- return apply_styles_to_prompt(prompt, [self.styles.get(x, self.no_style).negative_prompt for x in styles])
-
- def save_styles(self, path: str) -> None:
- # Always keep a backup file around
- if os.path.exists(path):
- shutil.copy(path, f"{path}.bak")
-
- fd = os.open(path, os.O_RDWR | os.O_CREAT)
- with os.fdopen(fd, "w", encoding="utf-8-sig", newline='') as file:
- # _fields is actually part of the public API: typing.NamedTuple is a replacement for collections.NamedTuple,
- # and collections.NamedTuple has explicit documentation for accessing _fields. Same goes for _asdict()
- writer = csv.DictWriter(file, fieldnames=PromptStyle._fields)
- writer.writeheader()
- writer.writerows(style._asdict() for k, style in self.styles.items())
+ return apply_styles_to_prompt(
+ prompt, [self.styles.get(x, self.no_style).negative_prompt for x in styles]
+ )
+
+ def save_styles(self, path: str = None) -> None:
+ # The path argument is deprecated, but kept for backwards compatibility
+ _ = path
+
+ style_paths = self.get_style_paths()
+
+ csv_names = [os.path.split(path)[1].lower() for path in style_paths]
+
+ for style_path in style_paths:
+ # Always keep a backup file around
+ if os.path.exists(style_path):
+ shutil.copy(style_path, f"{style_path}.bak")
+
+ # Write the styles to the CSV file
+ with open(style_path, "w", encoding="utf-8-sig", newline="") as file:
+ writer = csv.DictWriter(file, fieldnames=self.prompt_fields)
+ writer.writeheader()
+ for style in (s for s in self.styles.values() if s.path == style_path):
+ # Skip style list dividers, e.g. "STYLES.CSV"
+ if style.name.lower().strip("# ") in csv_names:
+ continue
+ # Write style fields, ignoring the path field
+ writer.writerow(
+ {k: v for k, v in style._asdict().items() if k != "path"}
+ )
def extract_styles_from_prompt(self, prompt, negative_prompt):
extracted = []
@@ -123,7 +210,9 @@ class StyleDatabase:
found_style = None
for style in applicable_styles:
- is_match, new_prompt, new_neg_prompt = extract_style_from_prompts(style, prompt, negative_prompt)
+ is_match, new_prompt, new_neg_prompt = extract_original_prompts(
+ style, prompt, negative_prompt
+ )
if is_match:
found_style = style
prompt = new_prompt
diff --git a/modules/sub_quadratic_attention.py b/modules/sub_quadratic_attention.py
index 497568eb..4cb561ef 100644
--- a/modules/sub_quadratic_attention.py
+++ b/modules/sub_quadratic_attention.py
@@ -15,7 +15,7 @@ import torch
from torch import Tensor
from torch.utils.checkpoint import checkpoint
import math
-from typing import Optional, NamedTuple, List
+from typing import Optional, NamedTuple
def narrow_trunc(
@@ -58,7 +58,7 @@ def _summarize_chunk(
scale: float,
) -> AttnChunk:
attn_weights = torch.baddbmm(
- torch.empty(1, 1, 1, device=query.device, dtype=query.dtype),
+ torch.zeros(1, 1, 1, device=query.device, dtype=query.dtype),
query,
key.transpose(1,2),
alpha=scale,
@@ -97,7 +97,7 @@ def _query_chunk_attention(
)
return summarize_chunk(query, key_chunk, value_chunk)
- chunks: List[AttnChunk] = [
+ chunks: list[AttnChunk] = [
chunk_scanner(chunk) for chunk in torch.arange(0, k_tokens, kv_chunk_size)
]
acc_chunk = AttnChunk(*map(torch.stack, zip(*chunks)))
@@ -121,7 +121,7 @@ def _get_attention_scores_no_kv_chunking(
scale: float,
) -> Tensor:
attn_scores = torch.baddbmm(
- torch.empty(1, 1, 1, device=query.device, dtype=query.dtype),
+ torch.zeros(1, 1, 1, device=query.device, dtype=query.dtype),
query,
key.transpose(1,2),
alpha=scale,
diff --git a/modules/sysinfo.py b/modules/sysinfo.py
index 5f15ac4f..5abf616b 100644
--- a/modules/sysinfo.py
+++ b/modules/sysinfo.py
@@ -1,7 +1,6 @@
import json
import os
import sys
-import traceback
import platform
import hashlib
@@ -10,7 +9,7 @@ import psutil
import re
import launch
-from modules import paths_internal, timer
+from modules import paths_internal, timer, shared, extensions, errors
checksum_token = "DontStealMyGamePlz__WINNERS_DONT_USE_DRUGS__DONT_COPY_THAT_FLOPPY"
environment_whitelist = {
@@ -23,16 +22,13 @@ environment_whitelist = {
"TORCH_COMMAND",
"REQS_FILE",
"XFORMERS_PACKAGE",
- "GFPGAN_PACKAGE",
"CLIP_PACKAGE",
"OPENCLIP_PACKAGE",
"STABLE_DIFFUSION_REPO",
"K_DIFFUSION_REPO",
- "CODEFORMER_REPO",
"BLIP_REPO",
"STABLE_DIFFUSION_COMMIT_HASH",
"K_DIFFUSION_COMMIT_HASH",
- "CODEFORMER_COMMIT_HASH",
"BLIP_COMMIT_HASH",
"COMMANDLINE_ARGS",
"IGNORE_CMD_ARGS_ERRORS",
@@ -83,9 +79,9 @@ def get_dict():
"Data path": paths_internal.data_path,
"Extensions dir": paths_internal.extensions_dir,
"Checksum": checksum_token,
- "Commandline": sys.argv,
+ "Commandline": get_argv(),
"Torch env info": get_torch_sysinfo(),
- "Exceptions": get_exceptions(),
+ "Exceptions": errors.get_exceptions(),
"CPU": {
"model": platform.processor(),
"count logical": psutil.cpu_count(logical=True),
@@ -105,22 +101,25 @@ def get_dict():
return res
-def format_traceback(tb):
- return [[f"{x.filename}, line {x.lineno}, {x.name}", x.line] for x in traceback.extract_tb(tb)]
+def get_environment():
+ return {k: os.environ[k] for k in sorted(os.environ) if k in environment_whitelist}
-def get_exceptions():
- try:
- from modules import errors
+def get_argv():
+ res = []
- return [{"exception": str(e), "traceback": format_traceback(tb)} for e, tb in reversed(errors.exception_records)]
- except Exception as e:
- return str(e)
+ for v in sys.argv:
+ if shared.cmd_opts.gradio_auth and shared.cmd_opts.gradio_auth == v:
+ res.append("<hidden>")
+ continue
+ if shared.cmd_opts.api_auth and shared.cmd_opts.api_auth == v:
+ res.append("<hidden>")
+ continue
-def get_environment():
- return {k: os.environ[k] for k in sorted(os.environ) if k in environment_whitelist}
+ res.append(v)
+ return res
re_newline = re.compile(r"\r*\n")
@@ -138,8 +137,6 @@ def get_torch_sysinfo():
def get_extensions(*, enabled):
try:
- from modules import extensions
-
def to_json(x: extensions.Extension):
return {
"name": x.name,
@@ -156,7 +153,6 @@ def get_extensions(*, enabled):
def get_config():
try:
- from modules import shared
return shared.opts.data
except Exception as e:
return str(e)
diff --git a/modules/textual_inversion/autocrop.py b/modules/textual_inversion/autocrop.py
index 1675e39a..e223a2e0 100644
--- a/modules/textual_inversion/autocrop.py
+++ b/modules/textual_inversion/autocrop.py
@@ -3,6 +3,8 @@ import requests
import os
import numpy as np
from PIL import ImageDraw
+from modules import paths_internal
+from pkg_resources import parse_version
GREEN = "#0F0"
BLUE = "#00F"
@@ -25,7 +27,6 @@ def crop_image(im, settings):
elif is_portrait(settings.crop_width, settings.crop_height):
scale_by = settings.crop_height / im.height
-
im = im.resize((int(im.width * scale_by), int(im.height * scale_by)))
im_debug = im.copy()
@@ -69,6 +70,7 @@ def crop_image(im, settings):
return results
+
def focal_point(im, settings):
corner_points = image_corner_points(im, settings) if settings.corner_points_weight > 0 else []
entropy_points = image_entropy_points(im, settings) if settings.entropy_points_weight > 0 else []
@@ -78,118 +80,120 @@ def focal_point(im, settings):
weight_pref_total = 0
if corner_points:
- weight_pref_total += settings.corner_points_weight
+ weight_pref_total += settings.corner_points_weight
if entropy_points:
- weight_pref_total += settings.entropy_points_weight
+ weight_pref_total += settings.entropy_points_weight
if face_points:
- weight_pref_total += settings.face_points_weight
+ weight_pref_total += settings.face_points_weight
corner_centroid = None
if corner_points:
- corner_centroid = centroid(corner_points)
- corner_centroid.weight = settings.corner_points_weight / weight_pref_total
- pois.append(corner_centroid)
+ corner_centroid = centroid(corner_points)
+ corner_centroid.weight = settings.corner_points_weight / weight_pref_total
+ pois.append(corner_centroid)
entropy_centroid = None
if entropy_points:
- entropy_centroid = centroid(entropy_points)
- entropy_centroid.weight = settings.entropy_points_weight / weight_pref_total
- pois.append(entropy_centroid)
+ entropy_centroid = centroid(entropy_points)
+ entropy_centroid.weight = settings.entropy_points_weight / weight_pref_total
+ pois.append(entropy_centroid)
face_centroid = None
if face_points:
- face_centroid = centroid(face_points)
- face_centroid.weight = settings.face_points_weight / weight_pref_total
- pois.append(face_centroid)
+ face_centroid = centroid(face_points)
+ face_centroid.weight = settings.face_points_weight / weight_pref_total
+ pois.append(face_centroid)
average_point = poi_average(pois, settings)
if settings.annotate_image:
- d = ImageDraw.Draw(im)
- max_size = min(im.width, im.height) * 0.07
- if corner_centroid is not None:
- color = BLUE
- box = corner_centroid.bounding(max_size * corner_centroid.weight)
- d.text((box[0], box[1]-15), f"Edge: {corner_centroid.weight:.02f}", fill=color)
- d.ellipse(box, outline=color)
- if len(corner_points) > 1:
- for f in corner_points:
- d.rectangle(f.bounding(4), outline=color)
- if entropy_centroid is not None:
- color = "#ff0"
- box = entropy_centroid.bounding(max_size * entropy_centroid.weight)
- d.text((box[0], box[1]-15), f"Entropy: {entropy_centroid.weight:.02f}", fill=color)
- d.ellipse(box, outline=color)
- if len(entropy_points) > 1:
- for f in entropy_points:
- d.rectangle(f.bounding(4), outline=color)
- if face_centroid is not None:
- color = RED
- box = face_centroid.bounding(max_size * face_centroid.weight)
- d.text((box[0], box[1]-15), f"Face: {face_centroid.weight:.02f}", fill=color)
- d.ellipse(box, outline=color)
- if len(face_points) > 1:
- for f in face_points:
- d.rectangle(f.bounding(4), outline=color)
-
- d.ellipse(average_point.bounding(max_size), outline=GREEN)
+ d = ImageDraw.Draw(im)
+ max_size = min(im.width, im.height) * 0.07
+ if corner_centroid is not None:
+ color = BLUE
+ box = corner_centroid.bounding(max_size * corner_centroid.weight)
+ d.text((box[0], box[1] - 15), f"Edge: {corner_centroid.weight:.02f}", fill=color)
+ d.ellipse(box, outline=color)
+ if len(corner_points) > 1:
+ for f in corner_points:
+ d.rectangle(f.bounding(4), outline=color)
+ if entropy_centroid is not None:
+ color = "#ff0"
+ box = entropy_centroid.bounding(max_size * entropy_centroid.weight)
+ d.text((box[0], box[1] - 15), f"Entropy: {entropy_centroid.weight:.02f}", fill=color)
+ d.ellipse(box, outline=color)
+ if len(entropy_points) > 1:
+ for f in entropy_points:
+ d.rectangle(f.bounding(4), outline=color)
+ if face_centroid is not None:
+ color = RED
+ box = face_centroid.bounding(max_size * face_centroid.weight)
+ d.text((box[0], box[1] - 15), f"Face: {face_centroid.weight:.02f}", fill=color)
+ d.ellipse(box, outline=color)
+ if len(face_points) > 1:
+ for f in face_points:
+ d.rectangle(f.bounding(4), outline=color)
+
+ d.ellipse(average_point.bounding(max_size), outline=GREEN)
return average_point
def image_face_points(im, settings):
if settings.dnn_model_path is not None:
- detector = cv2.FaceDetectorYN.create(
- settings.dnn_model_path,
- "",
- (im.width, im.height),
- 0.9, # score threshold
- 0.3, # nms threshold
- 5000 # keep top k before nms
- )
- faces = detector.detect(np.array(im))
- results = []
- if faces[1] is not None:
- for face in faces[1]:
- x = face[0]
- y = face[1]
- w = face[2]
- h = face[3]
- results.append(
- PointOfInterest(
- int(x + (w * 0.5)), # face focus left/right is center
- int(y + (h * 0.33)), # face focus up/down is close to the top of the head
- size = w,
- weight = 1/len(faces[1])
- )
- )
- return results
+ detector = cv2.FaceDetectorYN.create(
+ settings.dnn_model_path,
+ "",
+ (im.width, im.height),
+ 0.9, # score threshold
+ 0.3, # nms threshold
+ 5000 # keep top k before nms
+ )
+ faces = detector.detect(np.array(im))
+ results = []
+ if faces[1] is not None:
+ for face in faces[1]:
+ x = face[0]
+ y = face[1]
+ w = face[2]
+ h = face[3]
+ results.append(
+ PointOfInterest(
+ int(x + (w * 0.5)), # face focus left/right is center
+ int(y + (h * 0.33)), # face focus up/down is close to the top of the head
+ size=w,
+ weight=1 / len(faces[1])
+ )
+ )
+ return results
else:
- np_im = np.array(im)
- gray = cv2.cvtColor(np_im, cv2.COLOR_BGR2GRAY)
-
- tries = [
- [ f'{cv2.data.haarcascades}haarcascade_eye.xml', 0.01 ],
- [ f'{cv2.data.haarcascades}haarcascade_frontalface_default.xml', 0.05 ],
- [ f'{cv2.data.haarcascades}haarcascade_profileface.xml', 0.05 ],
- [ f'{cv2.data.haarcascades}haarcascade_frontalface_alt.xml', 0.05 ],
- [ f'{cv2.data.haarcascades}haarcascade_frontalface_alt2.xml', 0.05 ],
- [ f'{cv2.data.haarcascades}haarcascade_frontalface_alt_tree.xml', 0.05 ],
- [ f'{cv2.data.haarcascades}haarcascade_eye_tree_eyeglasses.xml', 0.05 ],
- [ f'{cv2.data.haarcascades}haarcascade_upperbody.xml', 0.05 ]
- ]
- for t in tries:
- classifier = cv2.CascadeClassifier(t[0])
- minsize = int(min(im.width, im.height) * t[1]) # at least N percent of the smallest side
- try:
- faces = classifier.detectMultiScale(gray, scaleFactor=1.1,
- minNeighbors=7, minSize=(minsize, minsize), flags=cv2.CASCADE_SCALE_IMAGE)
- except Exception:
- continue
-
- if faces:
- rects = [[f[0], f[1], f[0] + f[2], f[1] + f[3]] for f in faces]
- return [PointOfInterest((r[0] +r[2]) // 2, (r[1] + r[3]) // 2, size=abs(r[0]-r[2]), weight=1/len(rects)) for r in rects]
+ np_im = np.array(im)
+ gray = cv2.cvtColor(np_im, cv2.COLOR_BGR2GRAY)
+
+ tries = [
+ [f'{cv2.data.haarcascades}haarcascade_eye.xml', 0.01],
+ [f'{cv2.data.haarcascades}haarcascade_frontalface_default.xml', 0.05],
+ [f'{cv2.data.haarcascades}haarcascade_profileface.xml', 0.05],
+ [f'{cv2.data.haarcascades}haarcascade_frontalface_alt.xml', 0.05],
+ [f'{cv2.data.haarcascades}haarcascade_frontalface_alt2.xml', 0.05],
+ [f'{cv2.data.haarcascades}haarcascade_frontalface_alt_tree.xml', 0.05],
+ [f'{cv2.data.haarcascades}haarcascade_eye_tree_eyeglasses.xml', 0.05],
+ [f'{cv2.data.haarcascades}haarcascade_upperbody.xml', 0.05]
+ ]
+ for t in tries:
+ classifier = cv2.CascadeClassifier(t[0])
+ minsize = int(min(im.width, im.height) * t[1]) # at least N percent of the smallest side
+ try:
+ faces = classifier.detectMultiScale(gray, scaleFactor=1.1,
+ minNeighbors=7, minSize=(minsize, minsize),
+ flags=cv2.CASCADE_SCALE_IMAGE)
+ except Exception:
+ continue
+
+ if faces:
+ rects = [[f[0], f[1], f[0] + f[2], f[1] + f[3]] for f in faces]
+ return [PointOfInterest((r[0] + r[2]) // 2, (r[1] + r[3]) // 2, size=abs(r[0] - r[2]),
+ weight=1 / len(rects)) for r in rects]
return []
@@ -198,7 +202,7 @@ def image_corner_points(im, settings):
# naive attempt at preventing focal points from collecting at watermarks near the bottom
gd = ImageDraw.Draw(grayscale)
- gd.rectangle([0, im.height*.9, im.width, im.height], fill="#999")
+ gd.rectangle([0, im.height * .9, im.width, im.height], fill="#999")
np_im = np.array(grayscale)
@@ -206,7 +210,7 @@ def image_corner_points(im, settings):
np_im,
maxCorners=100,
qualityLevel=0.04,
- minDistance=min(grayscale.width, grayscale.height)*0.06,
+ minDistance=min(grayscale.width, grayscale.height) * 0.06,
useHarrisDetector=False,
)
@@ -215,8 +219,8 @@ def image_corner_points(im, settings):
focal_points = []
for point in points:
- x, y = point.ravel()
- focal_points.append(PointOfInterest(x, y, size=4, weight=1/len(points)))
+ x, y = point.ravel()
+ focal_points.append(PointOfInterest(x, y, size=4, weight=1 / len(points)))
return focal_points
@@ -225,13 +229,13 @@ def image_entropy_points(im, settings):
landscape = im.height < im.width
portrait = im.height > im.width
if landscape:
- move_idx = [0, 2]
- move_max = im.size[0]
+ move_idx = [0, 2]
+ move_max = im.size[0]
elif portrait:
- move_idx = [1, 3]
- move_max = im.size[1]
+ move_idx = [1, 3]
+ move_max = im.size[1]
else:
- return []
+ return []
e_max = 0
crop_current = [0, 0, settings.crop_width, settings.crop_height]
@@ -241,14 +245,14 @@ def image_entropy_points(im, settings):
e = image_entropy(crop)
if (e > e_max):
- e_max = e
- crop_best = list(crop_current)
+ e_max = e
+ crop_best = list(crop_current)
crop_current[move_idx[0]] += 4
crop_current[move_idx[1]] += 4
- x_mid = int(crop_best[0] + settings.crop_width/2)
- y_mid = int(crop_best[1] + settings.crop_height/2)
+ x_mid = int(crop_best[0] + settings.crop_width / 2)
+ y_mid = int(crop_best[1] + settings.crop_height / 2)
return [PointOfInterest(x_mid, y_mid, size=25, weight=1.0)]
@@ -294,22 +298,23 @@ def is_square(w, h):
return w == h
-def download_and_cache_models(dirname):
- download_url = 'https://github.com/opencv/opencv_zoo/blob/91fb0290f50896f38a0ab1e558b74b16bc009428/models/face_detection_yunet/face_detection_yunet_2022mar.onnx?raw=true'
- model_file_name = 'face_detection_yunet.onnx'
+model_dir_opencv = os.path.join(paths_internal.models_path, 'opencv')
+if parse_version(cv2.__version__) >= parse_version('4.8'):
+ model_file_path = os.path.join(model_dir_opencv, 'face_detection_yunet_2023mar.onnx')
+ model_url = 'https://github.com/opencv/opencv_zoo/blob/b6e370b10f641879a87890d44e42173077154a05/models/face_detection_yunet/face_detection_yunet_2023mar.onnx?raw=true'
+else:
+ model_file_path = os.path.join(model_dir_opencv, 'face_detection_yunet.onnx')
+ model_url = 'https://github.com/opencv/opencv_zoo/blob/91fb0290f50896f38a0ab1e558b74b16bc009428/models/face_detection_yunet/face_detection_yunet_2022mar.onnx?raw=true'
- os.makedirs(dirname, exist_ok=True)
- cache_file = os.path.join(dirname, model_file_name)
- if not os.path.exists(cache_file):
- print(f"downloading face detection model from '{download_url}' to '{cache_file}'")
- response = requests.get(download_url)
- with open(cache_file, "wb") as f:
+def download_and_cache_models():
+ if not os.path.exists(model_file_path):
+ os.makedirs(model_dir_opencv, exist_ok=True)
+ print(f"downloading face detection model from '{model_url}' to '{model_file_path}'")
+ response = requests.get(model_url)
+ with open(model_file_path, "wb") as f:
f.write(response.content)
-
- if os.path.exists(cache_file):
- return cache_file
- return None
+ return model_file_path
class PointOfInterest:
diff --git a/modules/textual_inversion/preprocess.py b/modules/textual_inversion/preprocess.py
deleted file mode 100644
index dbd856bd..00000000
--- a/modules/textual_inversion/preprocess.py
+++ /dev/null
@@ -1,232 +0,0 @@
-import os
-from PIL import Image, ImageOps
-import math
-import tqdm
-
-from modules import paths, shared, images, deepbooru
-from modules.textual_inversion import autocrop
-
-
-def preprocess(id_task, process_src, process_dst, process_width, process_height, preprocess_txt_action, process_keep_original_size, process_flip, process_split, process_caption, process_caption_deepbooru=False, split_threshold=0.5, overlap_ratio=0.2, process_focal_crop=False, process_focal_crop_face_weight=0.9, process_focal_crop_entropy_weight=0.15, process_focal_crop_edges_weight=0.5, process_focal_crop_debug=False, process_multicrop=None, process_multicrop_mindim=None, process_multicrop_maxdim=None, process_multicrop_minarea=None, process_multicrop_maxarea=None, process_multicrop_objective=None, process_multicrop_threshold=None):
- try:
- if process_caption:
- shared.interrogator.load()
-
- if process_caption_deepbooru:
- deepbooru.model.start()
-
- preprocess_work(process_src, process_dst, process_width, process_height, preprocess_txt_action, process_keep_original_size, process_flip, process_split, process_caption, process_caption_deepbooru, split_threshold, overlap_ratio, process_focal_crop, process_focal_crop_face_weight, process_focal_crop_entropy_weight, process_focal_crop_edges_weight, process_focal_crop_debug, process_multicrop, process_multicrop_mindim, process_multicrop_maxdim, process_multicrop_minarea, process_multicrop_maxarea, process_multicrop_objective, process_multicrop_threshold)
-
- finally:
-
- if process_caption:
- shared.interrogator.send_blip_to_ram()
-
- if process_caption_deepbooru:
- deepbooru.model.stop()
-
-
-def listfiles(dirname):
- return os.listdir(dirname)
-
-
-class PreprocessParams:
- src = None
- dstdir = None
- subindex = 0
- flip = False
- process_caption = False
- process_caption_deepbooru = False
- preprocess_txt_action = None
-
-
-def save_pic_with_caption(image, index, params: PreprocessParams, existing_caption=None):
- caption = ""
-
- if params.process_caption:
- caption += shared.interrogator.generate_caption(image)
-
- if params.process_caption_deepbooru:
- if caption:
- caption += ", "
- caption += deepbooru.model.tag_multi(image)
-
- filename_part = params.src
- filename_part = os.path.splitext(filename_part)[0]
- filename_part = os.path.basename(filename_part)
-
- basename = f"{index:05}-{params.subindex}-{filename_part}"
- image.save(os.path.join(params.dstdir, f"{basename}.png"))
-
- if params.preprocess_txt_action == 'prepend' and existing_caption:
- caption = f"{existing_caption} {caption}"
- elif params.preprocess_txt_action == 'append' and existing_caption:
- caption = f"{caption} {existing_caption}"
- elif params.preprocess_txt_action == 'copy' and existing_caption:
- caption = existing_caption
-
- caption = caption.strip()
-
- if caption:
- with open(os.path.join(params.dstdir, f"{basename}.txt"), "w", encoding="utf8") as file:
- file.write(caption)
-
- params.subindex += 1
-
-
-def save_pic(image, index, params, existing_caption=None):
- save_pic_with_caption(image, index, params, existing_caption=existing_caption)
-
- if params.flip:
- save_pic_with_caption(ImageOps.mirror(image), index, params, existing_caption=existing_caption)
-
-
-def split_pic(image, inverse_xy, width, height, overlap_ratio):
- if inverse_xy:
- from_w, from_h = image.height, image.width
- to_w, to_h = height, width
- else:
- from_w, from_h = image.width, image.height
- to_w, to_h = width, height
- h = from_h * to_w // from_w
- if inverse_xy:
- image = image.resize((h, to_w))
- else:
- image = image.resize((to_w, h))
-
- split_count = math.ceil((h - to_h * overlap_ratio) / (to_h * (1.0 - overlap_ratio)))
- y_step = (h - to_h) / (split_count - 1)
- for i in range(split_count):
- y = int(y_step * i)
- if inverse_xy:
- splitted = image.crop((y, 0, y + to_h, to_w))
- else:
- splitted = image.crop((0, y, to_w, y + to_h))
- yield splitted
-
-# not using torchvision.transforms.CenterCrop because it doesn't allow float regions
-def center_crop(image: Image, w: int, h: int):
- iw, ih = image.size
- if ih / h < iw / w:
- sw = w * ih / h
- box = (iw - sw) / 2, 0, iw - (iw - sw) / 2, ih
- else:
- sh = h * iw / w
- box = 0, (ih - sh) / 2, iw, ih - (ih - sh) / 2
- return image.resize((w, h), Image.Resampling.LANCZOS, box)
-
-
-def multicrop_pic(image: Image, mindim, maxdim, minarea, maxarea, objective, threshold):
- iw, ih = image.size
- err = lambda w, h: 1-(lambda x: x if x < 1 else 1/x)(iw/ih/(w/h))
- wh = max(((w, h) for w in range(mindim, maxdim+1, 64) for h in range(mindim, maxdim+1, 64)
- if minarea <= w * h <= maxarea and err(w, h) <= threshold),
- key= lambda wh: (wh[0]*wh[1], -err(*wh))[::1 if objective=='Maximize area' else -1],
- default=None
- )
- return wh and center_crop(image, *wh)
-
-
-def preprocess_work(process_src, process_dst, process_width, process_height, preprocess_txt_action, process_keep_original_size, process_flip, process_split, process_caption, process_caption_deepbooru=False, split_threshold=0.5, overlap_ratio=0.2, process_focal_crop=False, process_focal_crop_face_weight=0.9, process_focal_crop_entropy_weight=0.3, process_focal_crop_edges_weight=0.5, process_focal_crop_debug=False, process_multicrop=None, process_multicrop_mindim=None, process_multicrop_maxdim=None, process_multicrop_minarea=None, process_multicrop_maxarea=None, process_multicrop_objective=None, process_multicrop_threshold=None):
- width = process_width
- height = process_height
- src = os.path.abspath(process_src)
- dst = os.path.abspath(process_dst)
- split_threshold = max(0.0, min(1.0, split_threshold))
- overlap_ratio = max(0.0, min(0.9, overlap_ratio))
-
- assert src != dst, 'same directory specified as source and destination'
-
- os.makedirs(dst, exist_ok=True)
-
- files = listfiles(src)
-
- shared.state.job = "preprocess"
- shared.state.textinfo = "Preprocessing..."
- shared.state.job_count = len(files)
-
- params = PreprocessParams()
- params.dstdir = dst
- params.flip = process_flip
- params.process_caption = process_caption
- params.process_caption_deepbooru = process_caption_deepbooru
- params.preprocess_txt_action = preprocess_txt_action
-
- pbar = tqdm.tqdm(files)
- for index, imagefile in enumerate(pbar):
- params.subindex = 0
- filename = os.path.join(src, imagefile)
- try:
- img = Image.open(filename)
- img = ImageOps.exif_transpose(img)
- img = img.convert("RGB")
- except Exception:
- continue
-
- description = f"Preprocessing [Image {index}/{len(files)}]"
- pbar.set_description(description)
- shared.state.textinfo = description
-
- params.src = filename
-
- existing_caption = None
- existing_caption_filename = f"{os.path.splitext(filename)[0]}.txt"
- if os.path.exists(existing_caption_filename):
- with open(existing_caption_filename, 'r', encoding="utf8") as file:
- existing_caption = file.read()
-
- if shared.state.interrupted:
- break
-
- if img.height > img.width:
- ratio = (img.width * height) / (img.height * width)
- inverse_xy = False
- else:
- ratio = (img.height * width) / (img.width * height)
- inverse_xy = True
-
- process_default_resize = True
-
- if process_split and ratio < 1.0 and ratio <= split_threshold:
- for splitted in split_pic(img, inverse_xy, width, height, overlap_ratio):
- save_pic(splitted, index, params, existing_caption=existing_caption)
- process_default_resize = False
-
- if process_focal_crop and img.height != img.width:
-
- dnn_model_path = None
- try:
- dnn_model_path = autocrop.download_and_cache_models(os.path.join(paths.models_path, "opencv"))
- except Exception as e:
- print("Unable to load face detection model for auto crop selection. Falling back to lower quality haar method.", e)
-
- autocrop_settings = autocrop.Settings(
- crop_width = width,
- crop_height = height,
- face_points_weight = process_focal_crop_face_weight,
- entropy_points_weight = process_focal_crop_entropy_weight,
- corner_points_weight = process_focal_crop_edges_weight,
- annotate_image = process_focal_crop_debug,
- dnn_model_path = dnn_model_path,
- )
- for focal in autocrop.crop_image(img, autocrop_settings):
- save_pic(focal, index, params, existing_caption=existing_caption)
- process_default_resize = False
-
- if process_multicrop:
- cropped = multicrop_pic(img, process_multicrop_mindim, process_multicrop_maxdim, process_multicrop_minarea, process_multicrop_maxarea, process_multicrop_objective, process_multicrop_threshold)
- if cropped is not None:
- save_pic(cropped, index, params, existing_caption=existing_caption)
- else:
- print(f"skipped {img.width}x{img.height} image {filename} (can't find suitable size within error threshold)")
- process_default_resize = False
-
- if process_keep_original_size:
- save_pic(img, index, params, existing_caption=existing_caption)
- process_default_resize = False
-
- if process_default_resize:
- img = images.resize_image(1, img, width, height)
- save_pic(img, index, params, existing_caption=existing_caption)
-
- shared.state.nextjob()
diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py
index 6166c76f..c6bcab15 100644
--- a/modules/textual_inversion/textual_inversion.py
+++ b/modules/textual_inversion/textual_inversion.py
@@ -11,9 +11,8 @@ import safetensors.torch
import numpy as np
from PIL import Image, PngImagePlugin
-from torch.utils.tensorboard import SummaryWriter
-from modules import shared, devices, sd_hijack, processing, sd_models, images, sd_samplers, sd_hijack_checkpoint, errors, hashes
+from modules import shared, devices, sd_hijack, sd_models, images, sd_samplers, sd_hijack_checkpoint, errors, hashes
import modules.textual_inversion.dataset
from modules.textual_inversion.learn_schedule import LearnRateScheduler
@@ -181,31 +180,7 @@ class EmbeddingDatabase:
else:
return
- # textual inversion embeddings
- if 'string_to_param' in data:
- param_dict = data['string_to_param']
- param_dict = getattr(param_dict, '_parameters', param_dict) # fix for torch 1.12.1 loading saved file from torch 1.11
- assert len(param_dict) == 1, 'embedding file has multiple terms in it'
- emb = next(iter(param_dict.items()))[1]
- # diffuser concepts
- elif type(data) == dict and type(next(iter(data.values()))) == torch.Tensor:
- assert len(data.keys()) == 1, 'embedding file has multiple terms in it'
-
- emb = next(iter(data.values()))
- if len(emb.shape) == 1:
- emb = emb.unsqueeze(0)
- else:
- raise Exception(f"Couldn't identify {filename} as neither textual inversion embedding nor diffuser concept.")
-
- vec = emb.detach().to(devices.device, dtype=torch.float32)
- embedding = Embedding(vec, name)
- embedding.step = data.get('step', None)
- embedding.sd_checkpoint = data.get('sd_checkpoint', None)
- embedding.sd_checkpoint_name = data.get('sd_checkpoint_name', None)
- embedding.vectors = vec.shape[0]
- embedding.shape = vec.shape[-1]
- embedding.filename = path
- embedding.set_hash(hashes.sha256(embedding.filename, "textual_inversion/" + name) or '')
+ embedding = create_embedding_from_data(data, name, filename=filename, filepath=path)
if self.expected_shape == -1 or self.expected_shape == embedding.shape:
self.register_embedding(embedding, shared.sd_model)
@@ -304,6 +279,45 @@ def create_embedding(name, num_vectors_per_token, overwrite_old, init_text='*'):
return fn
+def create_embedding_from_data(data, name, filename='unknown embedding file', filepath=None):
+ if 'string_to_param' in data: # textual inversion embeddings
+ param_dict = data['string_to_param']
+ param_dict = getattr(param_dict, '_parameters', param_dict) # fix for torch 1.12.1 loading saved file from torch 1.11
+ assert len(param_dict) == 1, 'embedding file has multiple terms in it'
+ emb = next(iter(param_dict.items()))[1]
+ vec = emb.detach().to(devices.device, dtype=torch.float32)
+ shape = vec.shape[-1]
+ vectors = vec.shape[0]
+ elif type(data) == dict and 'clip_g' in data and 'clip_l' in data: # SDXL embedding
+ vec = {k: v.detach().to(devices.device, dtype=torch.float32) for k, v in data.items()}
+ shape = data['clip_g'].shape[-1] + data['clip_l'].shape[-1]
+ vectors = data['clip_g'].shape[0]
+ elif type(data) == dict and type(next(iter(data.values()))) == torch.Tensor: # diffuser concepts
+ assert len(data.keys()) == 1, 'embedding file has multiple terms in it'
+
+ emb = next(iter(data.values()))
+ if len(emb.shape) == 1:
+ emb = emb.unsqueeze(0)
+ vec = emb.detach().to(devices.device, dtype=torch.float32)
+ shape = vec.shape[-1]
+ vectors = vec.shape[0]
+ else:
+ raise Exception(f"Couldn't identify {filename} as neither textual inversion embedding nor diffuser concept.")
+
+ embedding = Embedding(vec, name)
+ embedding.step = data.get('step', None)
+ embedding.sd_checkpoint = data.get('sd_checkpoint', None)
+ embedding.sd_checkpoint_name = data.get('sd_checkpoint_name', None)
+ embedding.vectors = vectors
+ embedding.shape = shape
+
+ if filepath:
+ embedding.filename = filepath
+ embedding.set_hash(hashes.sha256(filepath, "textual_inversion/" + name) or '')
+
+ return embedding
+
+
def write_loss(log_directory, filename, step, epoch_len, values):
if shared.opts.training_write_csv_every == 0:
return
@@ -329,6 +343,7 @@ def write_loss(log_directory, filename, step, epoch_len, values):
})
def tensorboard_setup(log_directory):
+ from torch.utils.tensorboard import SummaryWriter
os.makedirs(os.path.join(log_directory, "tensorboard"), exist_ok=True)
return SummaryWriter(
log_dir=os.path.join(log_directory, "tensorboard"),
@@ -377,7 +392,9 @@ def validate_train_inputs(model_name, learn_rate, batch_size, gradient_step, dat
assert log_directory, "Log directory is empty"
-def train_embedding(id_task, embedding_name, learn_rate, batch_size, gradient_step, data_root, log_directory, training_width, training_height, varsize, steps, clip_grad_mode, clip_grad_value, shuffle_tags, tag_drop_out, latent_sampling_method, use_weight, create_image_every, save_embedding_every, template_filename, save_image_with_stored_embedding, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height):
+def train_embedding(id_task, embedding_name, learn_rate, batch_size, gradient_step, data_root, log_directory, training_width, training_height, varsize, steps, clip_grad_mode, clip_grad_value, shuffle_tags, tag_drop_out, latent_sampling_method, use_weight, create_image_every, save_embedding_every, template_filename, save_image_with_stored_embedding, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_name, preview_cfg_scale, preview_seed, preview_width, preview_height):
+ from modules import processing
+
save_embedding_every = save_embedding_every or 0
create_image_every = create_image_every or 0
template_file = textual_inversion_templates.get(template_filename, None)
@@ -431,8 +448,12 @@ def train_embedding(id_task, embedding_name, learn_rate, batch_size, gradient_st
shared.state.textinfo = f"Preparing dataset from {html.escape(data_root)}..."
old_parallel_processing_allowed = shared.parallel_processing_allowed
+ tensorboard_writer = None
if shared.opts.training_enable_tensorboard:
- tensorboard_writer = tensorboard_setup(log_directory)
+ try:
+ tensorboard_writer = tensorboard_setup(log_directory)
+ except ImportError:
+ errors.report("Error initializing tensorboard", exc_info=True)
pin_memory = shared.opts.pin_memory
@@ -579,7 +600,7 @@ def train_embedding(id_task, embedding_name, learn_rate, batch_size, gradient_st
p.prompt = preview_prompt
p.negative_prompt = preview_negative_prompt
p.steps = preview_steps
- p.sampler_name = sd_samplers.samplers[preview_sampler_index].name
+ p.sampler_name = sd_samplers.samplers_map[preview_sampler_name.lower()]
p.cfg_scale = preview_cfg_scale
p.seed = preview_seed
p.width = preview_width
@@ -605,7 +626,7 @@ def train_embedding(id_task, embedding_name, learn_rate, batch_size, gradient_st
last_saved_image, last_text_info = images.save_image(image, images_dir, "", p.seed, p.prompt, shared.opts.samples_format, processed.infotexts[0], p=p, forced_filename=forced_filename, save_to_dirs=False)
last_saved_image += f", prompt: {preview_text}"
- if shared.opts.training_enable_tensorboard and shared.opts.training_tensorboard_save_images:
+ if tensorboard_writer and shared.opts.training_tensorboard_save_images:
tensorboard_add_image(tensorboard_writer, f"Validation at epoch {epoch_num}", image, embedding.step)
if save_image_with_stored_embedding and os.path.exists(last_saved_file) and embedding_yet_to_be_embedded:
diff --git a/modules/textual_inversion/ui.py b/modules/textual_inversion/ui.py
index 35c4feef..f149ad1f 100644
--- a/modules/textual_inversion/ui.py
+++ b/modules/textual_inversion/ui.py
@@ -3,7 +3,6 @@ import html
import gradio as gr
import modules.textual_inversion.textual_inversion
-import modules.textual_inversion.preprocess
from modules import sd_hijack, shared
@@ -15,12 +14,6 @@ def create_embedding(name, initialization_text, nvpt, overwrite_old):
return gr.Dropdown.update(choices=sorted(sd_hijack.model_hijack.embedding_db.word_embeddings.keys())), f"Created: {filename}", ""
-def preprocess(*args):
- modules.textual_inversion.preprocess.preprocess(*args)
-
- return f"Preprocessing {'interrupted' if shared.state.interrupted else 'finished'}.", ""
-
-
def train_embedding(*args):
assert not shared.cmd_opts.lowvram, 'Training models with lowvram not possible'
diff --git a/modules/timer.py b/modules/timer.py
index da99e49f..1d38595c 100644
--- a/modules/timer.py
+++ b/modules/timer.py
@@ -1,4 +1,5 @@
import time
+import argparse
class TimerSubcategory:
@@ -11,20 +12,27 @@ class TimerSubcategory:
def __enter__(self):
self.start = time.time()
self.timer.base_category = self.original_base_category + self.category + "/"
+ self.timer.subcategory_level += 1
+
+ if self.timer.print_log:
+ print(f"{' ' * self.timer.subcategory_level}{self.category}:")
def __exit__(self, exc_type, exc_val, exc_tb):
elapsed_for_subcategroy = time.time() - self.start
self.timer.base_category = self.original_base_category
self.timer.add_time_to_record(self.original_base_category + self.category, elapsed_for_subcategroy)
- self.timer.record(self.category)
+ self.timer.subcategory_level -= 1
+ self.timer.record(self.category, disable_log=True)
class Timer:
- def __init__(self):
+ def __init__(self, print_log=False):
self.start = time.time()
self.records = {}
self.total = 0
self.base_category = ''
+ self.print_log = print_log
+ self.subcategory_level = 0
def elapsed(self):
end = time.time()
@@ -38,13 +46,16 @@ class Timer:
self.records[category] += amount
- def record(self, category, extra_time=0):
+ def record(self, category, extra_time=0, disable_log=False):
e = self.elapsed()
self.add_time_to_record(self.base_category + category, e + extra_time)
self.total += e + extra_time
+ if self.print_log and not disable_log:
+ print(f"{' ' * self.subcategory_level}{category}: done in {e + extra_time:.3f}s")
+
def subcategory(self, name):
self.elapsed()
@@ -71,6 +82,10 @@ class Timer:
self.__init__()
-startup_timer = Timer()
+parser = argparse.ArgumentParser(add_help=False)
+parser.add_argument("--log-startup", action='store_true', help="print a detailed log of what's happening at startup")
+args = parser.parse_known_args()[0]
+
+startup_timer = Timer(print_log=args.log_startup)
startup_record = None
diff --git a/modules/torch_utils.py b/modules/torch_utils.py
new file mode 100644
index 00000000..e5b52393
--- /dev/null
+++ b/modules/torch_utils.py
@@ -0,0 +1,17 @@
+from __future__ import annotations
+
+import torch.nn
+
+
+def get_param(model) -> torch.nn.Parameter:
+ """
+ Find the first parameter in a model or module.
+ """
+ if hasattr(model, "model") and hasattr(model.model, "parameters"):
+ # Unpeel a model descriptor to get at the actual Torch module.
+ model = model.model
+
+ for param in model.parameters():
+ return param
+
+ raise ValueError(f"No parameters found in model {model!r}")
diff --git a/modules/txt2img.py b/modules/txt2img.py
index 29d94e8c..3a481915 100644
--- a/modules/txt2img.py
+++ b/modules/txt2img.py
@@ -1,15 +1,15 @@
from contextlib import closing
import modules.scripts
-from modules import sd_samplers, processing
-from modules.generation_parameters_copypaste import create_override_settings_dict
-from modules.shared import opts, cmd_opts
+from modules import processing
+from modules.infotext import create_override_settings_dict
+from modules.shared import opts
import modules.shared as shared
from modules.ui import plaintext_to_html
import gradio as gr
-def txt2img(id_task: str, prompt: str, negative_prompt: str, prompt_styles, steps: int, sampler_index: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, height: int, width: int, enable_hr: bool, denoising_strength: float, hr_scale: float, hr_upscaler: str, hr_second_pass_steps: int, hr_resize_x: int, hr_resize_y: int, hr_sampler_index: int, hr_prompt: str, hr_negative_prompt, override_settings_texts, request: gr.Request, *args):
+def txt2img(id_task: str, prompt: str, negative_prompt: str, prompt_styles, steps: int, sampler_name: str, n_iter: int, batch_size: int, cfg_scale: float, height: int, width: int, enable_hr: bool, denoising_strength: float, hr_scale: float, hr_upscaler: str, hr_second_pass_steps: int, hr_resize_x: int, hr_resize_y: int, hr_checkpoint_name: str, hr_sampler_name: str, hr_prompt: str, hr_negative_prompt, override_settings_texts, request: gr.Request, *args):
override_settings = create_override_settings_dict(override_settings_texts)
p = processing.StableDiffusionProcessingTxt2Img(
@@ -19,21 +19,13 @@ def txt2img(id_task: str, prompt: str, negative_prompt: str, prompt_styles, step
prompt=prompt,
styles=prompt_styles,
negative_prompt=negative_prompt,
- seed=seed,
- subseed=subseed,
- subseed_strength=subseed_strength,
- seed_resize_from_h=seed_resize_from_h,
- seed_resize_from_w=seed_resize_from_w,
- seed_enable_extras=seed_enable_extras,
- sampler_name=sd_samplers.samplers[sampler_index].name,
+ sampler_name=sampler_name,
batch_size=batch_size,
n_iter=n_iter,
steps=steps,
cfg_scale=cfg_scale,
width=width,
height=height,
- restore_faces=restore_faces,
- tiling=tiling,
enable_hr=enable_hr,
denoising_strength=denoising_strength if enable_hr else None,
hr_scale=hr_scale,
@@ -41,7 +33,8 @@ def txt2img(id_task: str, prompt: str, negative_prompt: str, prompt_styles, step
hr_second_pass_steps=hr_second_pass_steps,
hr_resize_x=hr_resize_x,
hr_resize_y=hr_resize_y,
- hr_sampler_name=sd_samplers.samplers_for_img2img[hr_sampler_index - 1].name if hr_sampler_index != 0 else None,
+ hr_checkpoint_name=None if hr_checkpoint_name == 'Use same checkpoint' else hr_checkpoint_name,
+ hr_sampler_name=None if hr_sampler_name == 'Use same sampler' else hr_sampler_name,
hr_prompt=hr_prompt,
hr_negative_prompt=hr_negative_prompt,
override_settings=override_settings,
@@ -52,7 +45,7 @@ def txt2img(id_task: str, prompt: str, negative_prompt: str, prompt_styles, step
p.user = request.username
- if cmd_opts.enable_console_prompts:
+ if shared.opts.enable_console_prompts:
print(f"\ntxt2img: {prompt}", file=shared.progress_print_out)
with closing(p):
diff --git a/modules/ui.py b/modules/ui.py
index 07ecee7b..6451e14c 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -1,10 +1,10 @@
import datetime
-import json
import mimetypes
import os
import sys
from functools import reduce
import warnings
+from contextlib import ExitStack
import gradio as gr
import gradio.utils
@@ -12,39 +12,36 @@ import numpy as np
from PIL import Image, PngImagePlugin # noqa: F401
from modules.call_queue import wrap_gradio_gpu_call, wrap_queued_call, wrap_gradio_call
-from modules import sd_hijack, sd_models, script_callbacks, ui_extensions, deepbooru, sd_vae, extra_networks, ui_common, ui_postprocessing, progress, ui_loadsave, errors, shared_items, ui_settings, timer, sysinfo
-from modules.ui_components import FormRow, FormGroup, ToolButton, FormHTML
+from modules import gradio_extensons # noqa: F401
+from modules import sd_hijack, sd_models, script_callbacks, ui_extensions, deepbooru, extra_networks, ui_common, ui_postprocessing, progress, ui_loadsave, shared_items, ui_settings, timer, sysinfo, ui_checkpoint_merger, scripts, sd_samplers, processing, ui_extra_networks, ui_toprow
+from modules.ui_components import FormRow, FormGroup, ToolButton, FormHTML, InputAccordion, ResizeHandleRow
from modules.paths import script_path
from modules.ui_common import create_refresh_button
from modules.ui_gradio_extensions import reload_javascript
-
from modules.shared import opts, cmd_opts
-import modules.codeformer_model
-import modules.generation_parameters_copypaste as parameters_copypaste
-import modules.gfpgan_model
-import modules.hypernetworks.ui
-import modules.scripts
+import modules.infotext as parameters_copypaste
+import modules.hypernetworks.ui as hypernetworks_ui
+import modules.textual_inversion.ui as textual_inversion_ui
+import modules.textual_inversion.textual_inversion as textual_inversion
import modules.shared as shared
-import modules.styles
-import modules.textual_inversion.ui
from modules import prompt_parser
from modules.sd_hijack import model_hijack
-from modules.sd_samplers import samplers, samplers_for_img2img
-from modules.textual_inversion import textual_inversion
-import modules.hypernetworks.ui
-from modules.generation_parameters_copypaste import image_from_url_text
-import modules.extras
+from modules.infotext import image_from_url_text, PasteField
create_setting_component = ui_settings.create_setting_component
warnings.filterwarnings("default" if opts.show_warnings else "ignore", category=UserWarning)
+warnings.filterwarnings("default" if opts.show_gradio_deprecation_warnings else "ignore", category=gr.deprecation.GradioDeprecationWarning)
# this is a fix for Windows users. Without it, javascript files will be served with text/html content-type and the browser will not show any UI
mimetypes.init()
mimetypes.add_type('application/javascript', '.js')
+# Likewise, add explicit content-type header for certain missing image types
+mimetypes.add_type('image/webp', '.webp')
+
if not cmd_opts.share and not cmd_opts.listen:
# fix gradio phoning home
gradio.utils.version_check = lambda: None
@@ -80,7 +77,6 @@ extra_networks_symbol = '\U0001F3B4' # 🎴
switch_values_symbol = '\U000021C5' # ⇅
restore_progress_symbol = '\U0001F300' # 🌀
detect_image_size_symbol = '\U0001F4D0' # 📐
-up_down_symbol = '\u2195\ufe0f' # ↕️
plaintext_to_html = ui_common.plaintext_to_html
@@ -92,31 +88,14 @@ def send_gradio_gallery_to_image(x):
return image_from_url_text(x[0])
-def add_style(name: str, prompt: str, negative_prompt: str):
- if name is None:
- return [gr_show() for x in range(4)]
-
- style = modules.styles.PromptStyle(name, prompt, negative_prompt)
- shared.prompt_styles.styles[style.name] = style
- # Save all loaded prompt styles: this allows us to update the storage format in the future more easily, because we
- # reserialize all styles every time we save them
- shared.prompt_styles.save_styles(shared.styles_filename)
-
- return [gr.Dropdown.update(visible=True, choices=list(shared.prompt_styles.styles)) for _ in range(2)]
-
-
def calc_resolution_hires(enable, width, height, hr_scale, hr_resize_x, hr_resize_y):
- from modules import processing, devices
-
if not enable:
return ""
p = processing.StableDiffusionProcessingTxt2Img(width=width, height=height, enable_hr=True, hr_scale=hr_scale, hr_resize_x=hr_resize_x, hr_resize_y=hr_resize_y)
+ p.calculate_target_resolution()
- with devices.autocast():
- p.init([""], [0], [0])
-
- return f"resize: from <span class='resolution'>{p.width}x{p.height}</span> to <span class='resolution'>{p.hr_resize_x or p.hr_upscale_to_x}x{p.hr_resize_y or p.hr_upscale_to_y}</span>"
+ return f"from <span class='resolution'>{p.width}x{p.height}</span> to <span class='resolution'>{p.hr_resize_x or p.hr_upscale_to_x}x{p.hr_resize_y or p.hr_upscale_to_y}</span>"
def resize_from_to_html(width, height, scale_by):
@@ -129,13 +108,6 @@ def resize_from_to_html(width, height, scale_by):
return f"resize: from <span class='resolution'>{width}x{height}</span> to <span class='resolution'>{target_width}x{target_height}</span>"
-def apply_styles(prompt, prompt_neg, styles):
- prompt = shared.prompt_styles.apply_styles_to_prompt(prompt, styles)
- prompt_neg = shared.prompt_styles.apply_negative_styles_to_prompt(prompt_neg, styles)
-
- return [gr.Textbox.update(value=prompt), gr.Textbox.update(value=prompt_neg), gr.Dropdown.update(value=[])]
-
-
def process_interrogate(interrogation_function, mode, ii_input_dir, ii_output_dir, *ii_singles):
if mode in {0, 1, 3, 4}:
return [interrogation_function(ii_singles[mode]), None]
@@ -169,43 +141,6 @@ def interrogate_deepbooru(image):
return gr.update() if prompt is None else prompt
-def create_seed_inputs(target_interface):
- with FormRow(elem_id=f"{target_interface}_seed_row", variant="compact"):
- seed = (gr.Textbox if cmd_opts.use_textbox_seed else gr.Number)(label='Seed', value=-1, elem_id=f"{target_interface}_seed")
- seed.style(container=False)
- random_seed = ToolButton(random_symbol, elem_id=f"{target_interface}_random_seed", label='Random seed')
- reuse_seed = ToolButton(reuse_symbol, elem_id=f"{target_interface}_reuse_seed", label='Reuse seed')
-
- seed_checkbox = gr.Checkbox(label='Extra', elem_id=f"{target_interface}_subseed_show", value=False)
-
- # Components to show/hide based on the 'Extra' checkbox
- seed_extras = []
-
- with FormRow(visible=False, elem_id=f"{target_interface}_subseed_row") as seed_extra_row_1:
- seed_extras.append(seed_extra_row_1)
- subseed = gr.Number(label='Variation seed', value=-1, elem_id=f"{target_interface}_subseed")
- subseed.style(container=False)
- random_subseed = ToolButton(random_symbol, elem_id=f"{target_interface}_random_subseed")
- reuse_subseed = ToolButton(reuse_symbol, elem_id=f"{target_interface}_reuse_subseed")
- subseed_strength = gr.Slider(label='Variation strength', value=0.0, minimum=0, maximum=1, step=0.01, elem_id=f"{target_interface}_subseed_strength")
-
- with FormRow(visible=False) as seed_extra_row_2:
- seed_extras.append(seed_extra_row_2)
- seed_resize_from_w = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize seed from width", value=0, elem_id=f"{target_interface}_seed_resize_from_w")
- seed_resize_from_h = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize seed from height", value=0, elem_id=f"{target_interface}_seed_resize_from_h")
-
- random_seed.click(fn=None, _js="function(){setRandomSeed('" + target_interface + "_seed')}", show_progress=False, inputs=[], outputs=[])
- random_subseed.click(fn=None, _js="function(){setRandomSeed('" + target_interface + "_subseed')}", show_progress=False, inputs=[], outputs=[])
-
- def change_visibility(show):
- return {comp: gr_show(show) for comp in seed_extras}
-
- seed_checkbox.change(change_visibility, show_progress=False, inputs=[seed_checkbox], outputs=seed_extras)
-
- return seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox
-
-
-
def connect_clear_prompt(button):
"""Given clear button, prompt, and token_counter objects, setup clear prompt button click event"""
button.click(
@@ -216,44 +151,15 @@ def connect_clear_prompt(button):
)
-def connect_reuse_seed(seed: gr.Number, reuse_seed: gr.Button, generation_info: gr.Textbox, dummy_component, is_subseed):
- """ Connects a 'reuse (sub)seed' button's click event so that it copies last used
- (sub)seed value from generation info the to the seed field. If copying subseed and subseed strength
- was 0, i.e. no variation seed was used, it copies the normal seed value instead."""
- def copy_seed(gen_info_string: str, index):
- res = -1
-
- try:
- gen_info = json.loads(gen_info_string)
- index -= gen_info.get('index_of_first_image', 0)
-
- if is_subseed and gen_info.get('subseed_strength', 0) > 0:
- all_subseeds = gen_info.get('all_subseeds', [-1])
- res = all_subseeds[index if 0 <= index < len(all_subseeds) else 0]
- else:
- all_seeds = gen_info.get('all_seeds', [-1])
- res = all_seeds[index if 0 <= index < len(all_seeds) else 0]
-
- except json.decoder.JSONDecodeError:
- if gen_info_string:
- errors.report(f"Error parsing JSON generation info: {gen_info_string}")
-
- return [res, gr_show(False)]
-
- reuse_seed.click(
- fn=copy_seed,
- _js="(x, y) => [x, selected_gallery_index()]",
- show_progress=False,
- inputs=[generation_info, dummy_component],
- outputs=[seed, dummy_component]
- )
-
-
-def update_token_counter(text, steps):
+def update_token_counter(text, steps, *, is_positive=True):
try:
text, _ = extra_networks.parse_prompt(text)
- _, prompt_flat_list, _ = prompt_parser.get_multicond_prompt_list([text])
+ if is_positive:
+ _, prompt_flat_list, _ = prompt_parser.get_multicond_prompt_list([text])
+ else:
+ prompt_flat_list = [text]
+
prompt_schedules = prompt_parser.get_learned_conditioning_prompt_schedules(prompt_flat_list, steps)
except Exception:
@@ -267,71 +173,9 @@ def update_token_counter(text, steps):
return f"<span class='gr-box gr-text-input'>{token_count}/{max_length}</span>"
-def create_toprow(is_img2img):
- id_part = "img2img" if is_img2img else "txt2img"
+def update_negative_prompt_token_counter(text, steps):
+ return update_token_counter(text, steps, is_positive=False)
- with gr.Row(elem_id=f"{id_part}_toprow", variant="compact"):
- with gr.Column(elem_id=f"{id_part}_prompt_container", scale=6):
- with gr.Row():
- with gr.Column(scale=80):
- with gr.Row():
- prompt = gr.Textbox(label="Prompt", elem_id=f"{id_part}_prompt", show_label=False, lines=3, placeholder="Prompt (press Ctrl+Enter or Alt+Enter to generate)", elem_classes=["prompt"])
-
- with gr.Row():
- with gr.Column(scale=80):
- with gr.Row():
- negative_prompt = gr.Textbox(label="Negative prompt", elem_id=f"{id_part}_neg_prompt", show_label=False, lines=3, placeholder="Negative prompt (press Ctrl+Enter or Alt+Enter to generate)", elem_classes=["prompt"])
-
- button_interrogate = None
- button_deepbooru = None
- if is_img2img:
- with gr.Column(scale=1, elem_classes="interrogate-col"):
- button_interrogate = gr.Button('Interrogate\nCLIP', elem_id="interrogate")
- button_deepbooru = gr.Button('Interrogate\nDeepBooru', elem_id="deepbooru")
-
- with gr.Column(scale=1, elem_id=f"{id_part}_actions_column"):
- with gr.Row(elem_id=f"{id_part}_generate_box", elem_classes="generate-box"):
- interrupt = gr.Button('Interrupt', elem_id=f"{id_part}_interrupt", elem_classes="generate-box-interrupt")
- skip = gr.Button('Skip', elem_id=f"{id_part}_skip", elem_classes="generate-box-skip")
- submit = gr.Button('Generate', elem_id=f"{id_part}_generate", variant='primary')
-
- skip.click(
- fn=lambda: shared.state.skip(),
- inputs=[],
- outputs=[],
- )
-
- interrupt.click(
- fn=lambda: shared.state.interrupt(),
- inputs=[],
- outputs=[],
- )
-
- with gr.Row(elem_id=f"{id_part}_tools"):
- paste = ToolButton(value=paste_symbol, elem_id="paste")
- clear_prompt_button = ToolButton(value=clear_prompt_symbol, elem_id=f"{id_part}_clear_prompt")
- extra_networks_button = ToolButton(value=extra_networks_symbol, elem_id=f"{id_part}_extra_networks")
- prompt_style_apply = ToolButton(value=apply_style_symbol, elem_id=f"{id_part}_style_apply")
- save_style = ToolButton(value=save_style_symbol, elem_id=f"{id_part}_style_create")
- restore_progress_button = ToolButton(value=restore_progress_symbol, elem_id=f"{id_part}_restore_progress", visible=False)
-
- token_counter = gr.HTML(value="<span>0/75</span>", elem_id=f"{id_part}_token_counter", elem_classes=["token-counter"])
- token_button = gr.Button(visible=False, elem_id=f"{id_part}_token_button")
- negative_token_counter = gr.HTML(value="<span>0/75</span>", elem_id=f"{id_part}_negative_token_counter", elem_classes=["token-counter"])
- negative_token_button = gr.Button(visible=False, elem_id=f"{id_part}_negative_token_button")
-
- clear_prompt_button.click(
- fn=lambda *x: x,
- _js="confirm_clear_prompt",
- inputs=[prompt, negative_prompt],
- outputs=[prompt, negative_prompt],
- )
-
- with gr.Row(elem_id=f"{id_part}_styles_row"):
- prompt_styles = gr.Dropdown(label="Styles", elem_id=f"{id_part}_styles", choices=[k for k, v in shared.prompt_styles.styles.items()], value=[], multiselect=True)
- create_refresh_button(prompt_styles, shared.prompt_styles.reload, lambda: {"choices": [k for k, v in shared.prompt_styles.styles.items()]}, f"refresh_{id_part}_styles")
-
- return prompt, prompt_styles, negative_prompt, submit, button_interrogate, button_deepbooru, prompt_style_apply, save_style, paste, extra_networks_button, token_counter, token_button, negative_token_counter, negative_token_button, restore_progress_button
def setup_progressbar(*args, **kwargs):
@@ -371,21 +215,21 @@ def apply_setting(key, value):
return getattr(opts, key)
-def create_output_panel(tabname, outdir):
- return ui_common.create_output_panel(tabname, outdir)
+def create_output_panel(tabname, outdir, toprow=None):
+ return ui_common.create_output_panel(tabname, outdir, toprow)
def create_sampler_and_steps_selection(choices, tabname):
if opts.samplers_in_dropdown:
with FormRow(elem_id=f"sampler_selection_{tabname}"):
- sampler_index = gr.Dropdown(label='Sampling method', elem_id=f"{tabname}_sampling", choices=[x.name for x in choices], value=choices[0].name, type="index")
+ sampler_name = gr.Dropdown(label='Sampling method', elem_id=f"{tabname}_sampling", choices=choices, value=choices[0])
steps = gr.Slider(minimum=1, maximum=150, step=1, elem_id=f"{tabname}_steps", label="Sampling steps", value=20)
else:
with FormGroup(elem_id=f"sampler_selection_{tabname}"):
steps = gr.Slider(minimum=1, maximum=150, step=1, elem_id=f"{tabname}_steps", label="Sampling steps", value=20)
- sampler_index = gr.Radio(label='Sampling method', elem_id=f"{tabname}_sampling", choices=[x.name for x in choices], value=choices[0].name, type="index")
+ sampler_name = gr.Radio(label='Sampling method', elem_id=f"{tabname}_sampling", choices=choices, value=choices[0])
- return steps, sampler_index
+ return steps, sampler_name
def ordered_ui_categories():
@@ -415,26 +259,31 @@ def create_ui():
parameters_copypaste.reset()
- modules.scripts.scripts_current = modules.scripts.scripts_txt2img
- modules.scripts.scripts_txt2img.initialize_scripts(is_img2img=False)
+ scripts.scripts_current = scripts.scripts_txt2img
+ scripts.scripts_txt2img.initialize_scripts(is_img2img=False)
with gr.Blocks(analytics_enabled=False) as txt2img_interface:
- txt2img_prompt, txt2img_prompt_styles, txt2img_negative_prompt, submit, _, _, txt2img_prompt_style_apply, txt2img_save_style, txt2img_paste, extra_networks_button, token_counter, token_button, negative_token_counter, negative_token_button, restore_progress_button = create_toprow(is_img2img=False)
+ toprow = ui_toprow.Toprow(is_img2img=False, is_compact=shared.opts.compact_prompt_box)
dummy_component = gr.Label(visible=False)
- txt_prompt_img = gr.File(label="", elem_id="txt2img_prompt_image", file_count="single", type="binary", visible=False)
- with FormRow(variant='compact', elem_id="txt2img_extra_networks", visible=False) as extra_networks:
- from modules import ui_extra_networks
- extra_networks_ui = ui_extra_networks.create_ui(extra_networks, extra_networks_button, 'txt2img')
+ extra_tabs = gr.Tabs(elem_id="txt2img_extra_tabs")
+ extra_tabs.__enter__()
+
+ with gr.Tab("Generation", id="txt2img_generation") as txt2img_generation_tab, ResizeHandleRow(equal_height=False):
+ with ExitStack() as stack:
+ if shared.opts.txt2img_settings_accordion:
+ stack.enter_context(gr.Accordion("Open for Settings", open=False))
+ stack.enter_context(gr.Column(variant='compact', elem_id="txt2img_settings"))
- with gr.Row().style(equal_height=False):
- with gr.Column(variant='compact', elem_id="txt2img_settings"):
- modules.scripts.scripts_txt2img.prepare_ui()
+ scripts.scripts_txt2img.prepare_ui()
for category in ordered_ui_categories():
+ if category == "prompt":
+ toprow.create_inline_toprow_prompts()
+
if category == "sampler":
- steps, sampler_index = create_sampler_and_steps_selection(samplers, "txt2img")
+ steps, sampler_name = create_sampler_and_steps_selection(sd_samplers.visible_sampler_names(), "txt2img")
elif category == "dimensions":
with FormRow():
@@ -443,7 +292,7 @@ def create_ui():
height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="txt2img_height")
with gr.Column(elem_id="txt2img_dimensions_row", scale=1, elem_classes="dimensions-tools"):
- res_switch_btn = ToolButton(value=switch_values_symbol, elem_id="txt2img_res_switch_btn", label="Switch dims")
+ res_switch_btn = ToolButton(value=switch_values_symbol, elem_id="txt2img_res_switch_btn", tooltip="Switch width/height")
if opts.dimensions_and_batch_together:
with gr.Column(elem_id="txt2img_column_batch"):
@@ -451,40 +300,45 @@ def create_ui():
batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="txt2img_batch_size")
elif category == "cfg":
- cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0, elem_id="txt2img_cfg_scale")
-
- elif category == "seed":
- seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox = create_seed_inputs('txt2img')
+ with gr.Row():
+ cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0, elem_id="txt2img_cfg_scale")
elif category == "checkboxes":
with FormRow(elem_classes="checkboxes-row", variant="compact"):
- restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1, elem_id="txt2img_restore_faces")
- tiling = gr.Checkbox(label='Tiling', value=False, elem_id="txt2img_tiling")
- enable_hr = gr.Checkbox(label='Hires. fix', value=False, elem_id="txt2img_enable_hr")
- hr_final_resolution = FormHTML(value="", elem_id="txtimg_hr_finalres", label="Upscaled resolution", interactive=False)
-
- elif category == "hires_fix":
- with FormGroup(visible=False, elem_id="txt2img_hires_fix") as hr_options:
- with FormRow(elem_id="txt2img_hires_fix_row1", variant="compact"):
- hr_upscaler = gr.Dropdown(label="Upscaler", elem_id="txt2img_hr_upscaler", choices=[*shared.latent_upscale_modes, *[x.name for x in shared.sd_upscalers]], value=shared.latent_upscale_default_mode)
- hr_second_pass_steps = gr.Slider(minimum=0, maximum=150, step=1, label='Hires steps', value=0, elem_id="txt2img_hires_steps")
- denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.7, elem_id="txt2img_denoising_strength")
-
- with FormRow(elem_id="txt2img_hires_fix_row2", variant="compact"):
- hr_scale = gr.Slider(minimum=1.0, maximum=4.0, step=0.05, label="Upscale by", value=2.0, elem_id="txt2img_hr_scale")
- hr_resize_x = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize width to", value=0, elem_id="txt2img_hr_resize_x")
- hr_resize_y = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize height to", value=0, elem_id="txt2img_hr_resize_y")
-
- with FormRow(elem_id="txt2img_hires_fix_row3", variant="compact", visible=opts.hires_fix_show_sampler) as hr_sampler_container:
- hr_sampler_index = gr.Dropdown(label='Hires sampling method', elem_id="hr_sampler", choices=["Use same sampler"] + [x.name for x in samplers_for_img2img], value="Use same sampler", type="index")
-
- with FormRow(elem_id="txt2img_hires_fix_row4", variant="compact", visible=opts.hires_fix_show_prompts) as hr_prompts_container:
- with gr.Column(scale=80):
- with gr.Row():
- hr_prompt = gr.Textbox(label="Hires prompt", elem_id="hires_prompt", show_label=False, lines=3, placeholder="Prompt for hires fix pass.\nLeave empty to use the same prompt as in first pass.", elem_classes=["prompt"])
- with gr.Column(scale=80):
- with gr.Row():
- hr_negative_prompt = gr.Textbox(label="Hires negative prompt", elem_id="hires_neg_prompt", show_label=False, lines=3, placeholder="Negative prompt for hires fix pass.\nLeave empty to use the same negative prompt as in first pass.", elem_classes=["prompt"])
+ pass
+
+ elif category == "accordions":
+ with gr.Row(elem_id="txt2img_accordions", elem_classes="accordions"):
+ with InputAccordion(False, label="Hires. fix", elem_id="txt2img_hr") as enable_hr:
+ with enable_hr.extra():
+ hr_final_resolution = FormHTML(value="", elem_id="txtimg_hr_finalres", label="Upscaled resolution", interactive=False, min_width=0)
+
+ with FormRow(elem_id="txt2img_hires_fix_row1", variant="compact"):
+ hr_upscaler = gr.Dropdown(label="Upscaler", elem_id="txt2img_hr_upscaler", choices=[*shared.latent_upscale_modes, *[x.name for x in shared.sd_upscalers]], value=shared.latent_upscale_default_mode)
+ hr_second_pass_steps = gr.Slider(minimum=0, maximum=150, step=1, label='Hires steps', value=0, elem_id="txt2img_hires_steps")
+ denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.7, elem_id="txt2img_denoising_strength")
+
+ with FormRow(elem_id="txt2img_hires_fix_row2", variant="compact"):
+ hr_scale = gr.Slider(minimum=1.0, maximum=4.0, step=0.05, label="Upscale by", value=2.0, elem_id="txt2img_hr_scale")
+ hr_resize_x = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize width to", value=0, elem_id="txt2img_hr_resize_x")
+ hr_resize_y = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize height to", value=0, elem_id="txt2img_hr_resize_y")
+
+ with FormRow(elem_id="txt2img_hires_fix_row3", variant="compact", visible=opts.hires_fix_show_sampler) as hr_sampler_container:
+
+ hr_checkpoint_name = gr.Dropdown(label='Hires checkpoint', elem_id="hr_checkpoint", choices=["Use same checkpoint"] + modules.sd_models.checkpoint_tiles(use_short=True), value="Use same checkpoint")
+ create_refresh_button(hr_checkpoint_name, modules.sd_models.list_models, lambda: {"choices": ["Use same checkpoint"] + modules.sd_models.checkpoint_tiles(use_short=True)}, "hr_checkpoint_refresh")
+
+ hr_sampler_name = gr.Dropdown(label='Hires sampling method', elem_id="hr_sampler", choices=["Use same sampler"] + sd_samplers.visible_sampler_names(), value="Use same sampler")
+
+ with FormRow(elem_id="txt2img_hires_fix_row4", variant="compact", visible=opts.hires_fix_show_prompts) as hr_prompts_container:
+ with gr.Column(scale=80):
+ with gr.Row():
+ hr_prompt = gr.Textbox(label="Hires prompt", elem_id="hires_prompt", show_label=False, lines=3, placeholder="Prompt for hires fix pass.\nLeave empty to use the same prompt as in first pass.", elem_classes=["prompt"])
+ with gr.Column(scale=80):
+ with gr.Row():
+ hr_negative_prompt = gr.Textbox(label="Hires negative prompt", elem_id="hires_neg_prompt", show_label=False, lines=3, placeholder="Negative prompt for hires fix pass.\nLeave empty to use the same negative prompt as in first pass.", elem_classes=["prompt"])
+
+ scripts.scripts_txt2img.setup_ui_for_section(category)
elif category == "batch":
if not opts.dimensions_and_batch_together:
@@ -498,10 +352,10 @@ def create_ui():
elif category == "scripts":
with FormGroup(elem_id="txt2img_script_container"):
- custom_inputs = modules.scripts.scripts_txt2img.setup_ui()
+ custom_inputs = scripts.scripts_txt2img.setup_ui()
- else:
- modules.scripts.scripts_txt2img.setup_ui_for_section(category)
+ if category not in {"accordions"}:
+ scripts.scripts_txt2img.setup_ui_for_section(category)
hr_resolution_preview_inputs = [enable_hr, width, height, hr_scale, hr_resize_x, hr_resize_y]
@@ -522,28 +376,21 @@ def create_ui():
show_progress=False,
)
- txt2img_gallery, generation_info, html_info, html_log = create_output_panel("txt2img", opts.outdir_txt2img_samples)
-
- connect_reuse_seed(seed, reuse_seed, generation_info, dummy_component, is_subseed=False)
- connect_reuse_seed(subseed, reuse_subseed, generation_info, dummy_component, is_subseed=True)
+ txt2img_gallery, generation_info, html_info, html_log = create_output_panel("txt2img", opts.outdir_txt2img_samples, toprow)
txt2img_args = dict(
fn=wrap_gradio_gpu_call(modules.txt2img.txt2img, extra_outputs=[None, '', '']),
_js="submit",
inputs=[
dummy_component,
- txt2img_prompt,
- txt2img_negative_prompt,
- txt2img_prompt_styles,
+ toprow.prompt,
+ toprow.negative_prompt,
+ toprow.ui_styles.dropdown,
steps,
- sampler_index,
- restore_faces,
- tiling,
+ sampler_name,
batch_count,
batch_size,
cfg_scale,
- seed,
- subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox,
height,
width,
enable_hr,
@@ -553,7 +400,8 @@ def create_ui():
hr_second_pass_steps,
hr_resize_x,
hr_resize_y,
- hr_sampler_index,
+ hr_checkpoint_name,
+ hr_sampler_name,
hr_prompt,
hr_negative_prompt,
override_settings,
@@ -569,12 +417,12 @@ def create_ui():
show_progress=False,
)
- txt2img_prompt.submit(**txt2img_args)
- submit.click(**txt2img_args)
+ toprow.prompt.submit(**txt2img_args)
+ toprow.submit.click(**txt2img_args)
res_switch_btn.click(fn=None, _js="function(){switchWidthHeight('txt2img')}", inputs=None, outputs=None, show_progress=False)
- restore_progress_button.click(
+ toprow.restore_progress_button.click(
fn=progress.restore_progress,
_js="restoreProgressTxt2img",
inputs=[dummy_component],
@@ -587,91 +435,70 @@ def create_ui():
show_progress=False,
)
- txt_prompt_img.change(
- fn=modules.images.image_data,
- inputs=[
- txt_prompt_img
- ],
- outputs=[
- txt2img_prompt,
- txt_prompt_img
- ],
- show_progress=False,
- )
-
- enable_hr.change(
- fn=lambda x: gr_show(x),
- inputs=[enable_hr],
- outputs=[hr_options],
- show_progress = False,
- )
-
txt2img_paste_fields = [
- (txt2img_prompt, "Prompt"),
- (txt2img_negative_prompt, "Negative prompt"),
- (steps, "Steps"),
- (sampler_index, "Sampler"),
- (restore_faces, "Face restoration"),
- (cfg_scale, "CFG scale"),
- (seed, "Seed"),
- (width, "Size-1"),
- (height, "Size-2"),
- (batch_size, "Batch size"),
- (subseed, "Variation seed"),
- (subseed_strength, "Variation seed strength"),
- (seed_resize_from_w, "Seed resize from-1"),
- (seed_resize_from_h, "Seed resize from-2"),
- (txt2img_prompt_styles, lambda d: d["Styles array"] if isinstance(d.get("Styles array"), list) else gr.update()),
- (denoising_strength, "Denoising strength"),
- (enable_hr, lambda d: "Denoising strength" in d),
- (hr_options, lambda d: gr.Row.update(visible="Denoising strength" in d)),
- (hr_scale, "Hires upscale"),
- (hr_upscaler, "Hires upscaler"),
- (hr_second_pass_steps, "Hires steps"),
- (hr_resize_x, "Hires resize-1"),
- (hr_resize_y, "Hires resize-2"),
- (hr_sampler_index, "Hires sampler"),
- (hr_sampler_container, lambda d: gr.update(visible=True) if d.get("Hires sampler", "Use same sampler") != "Use same sampler" else gr.update()),
- (hr_prompt, "Hires prompt"),
- (hr_negative_prompt, "Hires negative prompt"),
- (hr_prompts_container, lambda d: gr.update(visible=True) if d.get("Hires prompt", "") != "" or d.get("Hires negative prompt", "") != "" else gr.update()),
- *modules.scripts.scripts_txt2img.infotext_fields
+ PasteField(toprow.prompt, "Prompt", api="prompt"),
+ PasteField(toprow.negative_prompt, "Negative prompt", api="negative_prompt"),
+ PasteField(steps, "Steps", api="steps"),
+ PasteField(sampler_name, "Sampler", api="sampler_name"),
+ PasteField(cfg_scale, "CFG scale", api="cfg_scale"),
+ PasteField(width, "Size-1", api="width"),
+ PasteField(height, "Size-2", api="height"),
+ PasteField(batch_size, "Batch size", api="batch_size"),
+ PasteField(toprow.ui_styles.dropdown, lambda d: d["Styles array"] if isinstance(d.get("Styles array"), list) else gr.update(), api="styles"),
+ PasteField(denoising_strength, "Denoising strength", api="denoising_strength"),
+ PasteField(enable_hr, lambda d: "Denoising strength" in d and ("Hires upscale" in d or "Hires upscaler" in d or "Hires resize-1" in d), api="enable_hr"),
+ PasteField(hr_scale, "Hires upscale", api="hr_scale"),
+ PasteField(hr_upscaler, "Hires upscaler", api="hr_upscaler"),
+ PasteField(hr_second_pass_steps, "Hires steps", api="hr_second_pass_steps"),
+ PasteField(hr_resize_x, "Hires resize-1", api="hr_resize_x"),
+ PasteField(hr_resize_y, "Hires resize-2", api="hr_resize_y"),
+ PasteField(hr_checkpoint_name, "Hires checkpoint", api="hr_checkpoint_name"),
+ PasteField(hr_sampler_name, "Hires sampler", api="hr_sampler_name"),
+ PasteField(hr_sampler_container, lambda d: gr.update(visible=True) if d.get("Hires sampler", "Use same sampler") != "Use same sampler" or d.get("Hires checkpoint", "Use same checkpoint") != "Use same checkpoint" else gr.update()),
+ PasteField(hr_prompt, "Hires prompt", api="hr_prompt"),
+ PasteField(hr_negative_prompt, "Hires negative prompt", api="hr_negative_prompt"),
+ PasteField(hr_prompts_container, lambda d: gr.update(visible=True) if d.get("Hires prompt", "") != "" or d.get("Hires negative prompt", "") != "" else gr.update()),
+ *scripts.scripts_txt2img.infotext_fields
]
parameters_copypaste.add_paste_fields("txt2img", None, txt2img_paste_fields, override_settings)
parameters_copypaste.register_paste_params_button(parameters_copypaste.ParamBinding(
- paste_button=txt2img_paste, tabname="txt2img", source_text_component=txt2img_prompt, source_image_component=None,
+ paste_button=toprow.paste, tabname="txt2img", source_text_component=toprow.prompt, source_image_component=None,
))
txt2img_preview_params = [
- txt2img_prompt,
- txt2img_negative_prompt,
+ toprow.prompt,
+ toprow.negative_prompt,
steps,
- sampler_index,
+ sampler_name,
cfg_scale,
- seed,
+ scripts.scripts_txt2img.script('Seed').seed,
width,
height,
]
- token_button.click(fn=wrap_queued_call(update_token_counter), inputs=[txt2img_prompt, steps], outputs=[token_counter])
- negative_token_button.click(fn=wrap_queued_call(update_token_counter), inputs=[txt2img_negative_prompt, steps], outputs=[negative_token_counter])
+ toprow.token_button.click(fn=wrap_queued_call(update_token_counter), inputs=[toprow.prompt, steps], outputs=[toprow.token_counter])
+ toprow.negative_token_button.click(fn=wrap_queued_call(update_negative_prompt_token_counter), inputs=[toprow.negative_prompt, steps], outputs=[toprow.negative_token_counter])
- ui_extra_networks.setup_ui(extra_networks_ui, txt2img_gallery)
+ extra_networks_ui = ui_extra_networks.create_ui(txt2img_interface, [txt2img_generation_tab], 'txt2img')
+ ui_extra_networks.setup_ui(extra_networks_ui, txt2img_gallery)
- modules.scripts.scripts_current = modules.scripts.scripts_img2img
- modules.scripts.scripts_img2img.initialize_scripts(is_img2img=True)
+ extra_tabs.__exit__()
+
+ scripts.scripts_current = scripts.scripts_img2img
+ scripts.scripts_img2img.initialize_scripts(is_img2img=True)
with gr.Blocks(analytics_enabled=False) as img2img_interface:
- img2img_prompt, img2img_prompt_styles, img2img_negative_prompt, submit, img2img_interrogate, img2img_deepbooru, img2img_prompt_style_apply, img2img_save_style, img2img_paste, extra_networks_button, token_counter, token_button, negative_token_counter, negative_token_button, restore_progress_button = create_toprow(is_img2img=True)
+ toprow = ui_toprow.Toprow(is_img2img=True, is_compact=shared.opts.compact_prompt_box)
- img2img_prompt_img = gr.File(label="", elem_id="img2img_prompt_image", file_count="single", type="binary", visible=False)
+ extra_tabs = gr.Tabs(elem_id="img2img_extra_tabs")
+ extra_tabs.__enter__()
- with FormRow(variant='compact', elem_id="img2img_extra_networks", visible=False) as extra_networks:
- from modules import ui_extra_networks
- extra_networks_ui_img2img = ui_extra_networks.create_ui(extra_networks, extra_networks_button, 'img2img')
+ with gr.Tab("Generation", id="img2img_generation") as img2img_generation_tab, ResizeHandleRow(equal_height=False):
+ with ExitStack() as stack:
+ if shared.opts.img2img_settings_accordion:
+ stack.enter_context(gr.Accordion("Open for Settings", open=False))
+ stack.enter_context(gr.Column(variant='compact', elem_id="img2img_settings"))
- with FormRow().style(equal_height=False):
- with gr.Column(variant='compact', elem_id="img2img_settings"):
copy_image_buttons = []
copy_image_destinations = {}
@@ -688,87 +515,91 @@ def create_ui():
button = gr.Button(title)
copy_image_buttons.append((button, name, elem))
- with gr.Tabs(elem_id="mode_img2img"):
- img2img_selected_tab = gr.State(0)
-
- with gr.TabItem('img2img', id='img2img', elem_id="img2img_img2img_tab") as tab_img2img:
- init_img = gr.Image(label="Image for img2img", elem_id="img2img_image", show_label=False, source="upload", interactive=True, type="pil", tool="editor", image_mode="RGBA").style(height=opts.img2img_editor_height)
- add_copy_image_controls('img2img', init_img)
-
- with gr.TabItem('Sketch', id='img2img_sketch', elem_id="img2img_img2img_sketch_tab") as tab_sketch:
- sketch = gr.Image(label="Image for img2img", elem_id="img2img_sketch", show_label=False, source="upload", interactive=True, type="pil", tool="color-sketch", image_mode="RGBA").style(height=opts.img2img_editor_height)
- add_copy_image_controls('sketch', sketch)
-
- with gr.TabItem('Inpaint', id='inpaint', elem_id="img2img_inpaint_tab") as tab_inpaint:
- init_img_with_mask = gr.Image(label="Image for inpainting with mask", show_label=False, elem_id="img2maskimg", source="upload", interactive=True, type="pil", tool="sketch", image_mode="RGBA").style(height=opts.img2img_editor_height)
- add_copy_image_controls('inpaint', init_img_with_mask)
-
- with gr.TabItem('Inpaint sketch', id='inpaint_sketch', elem_id="img2img_inpaint_sketch_tab") as tab_inpaint_color:
- inpaint_color_sketch = gr.Image(label="Color sketch inpainting", show_label=False, elem_id="inpaint_sketch", source="upload", interactive=True, type="pil", tool="color-sketch", image_mode="RGBA").style(height=opts.img2img_editor_height)
- inpaint_color_sketch_orig = gr.State(None)
- add_copy_image_controls('inpaint_sketch', inpaint_color_sketch)
-
- def update_orig(image, state):
- if image is not None:
- same_size = state is not None and state.size == image.size
- has_exact_match = np.any(np.all(np.array(image) == np.array(state), axis=-1))
- edited = same_size and has_exact_match
- return image if not edited or state is None else state
-
- inpaint_color_sketch.change(update_orig, [inpaint_color_sketch, inpaint_color_sketch_orig], inpaint_color_sketch_orig)
-
- with gr.TabItem('Inpaint upload', id='inpaint_upload', elem_id="img2img_inpaint_upload_tab") as tab_inpaint_upload:
- init_img_inpaint = gr.Image(label="Image for img2img", show_label=False, source="upload", interactive=True, type="pil", elem_id="img_inpaint_base")
- init_mask_inpaint = gr.Image(label="Mask", source="upload", interactive=True, type="pil", elem_id="img_inpaint_mask")
-
- with gr.TabItem('Batch', id='batch', elem_id="img2img_batch_tab") as tab_batch:
- hidden = '<br>Disabled when launched with --hide-ui-dir-config.' if shared.cmd_opts.hide_ui_dir_config else ''
- gr.HTML(
- "<p style='padding-bottom: 1em;' class=\"text-gray-500\">Process images in a directory on the same machine where the server is running." +
- "<br>Use an empty output directory to save pictures normally instead of writing to the output directory." +
- f"<br>Add inpaint batch mask directory to enable inpaint batch processing."
- f"{hidden}</p>"
- )
- img2img_batch_input_dir = gr.Textbox(label="Input directory", **shared.hide_dirs, elem_id="img2img_batch_input_dir")
- img2img_batch_output_dir = gr.Textbox(label="Output directory", **shared.hide_dirs, elem_id="img2img_batch_output_dir")
- img2img_batch_inpaint_mask_dir = gr.Textbox(label="Inpaint batch mask directory (required for inpaint batch processing only)", **shared.hide_dirs, elem_id="img2img_batch_inpaint_mask_dir")
- with gr.Accordion("PNG info", open=False):
- img2img_batch_use_png_info = gr.Checkbox(label="Append png info to prompts", **shared.hide_dirs, elem_id="img2img_batch_use_png_info")
- img2img_batch_png_info_dir = gr.Textbox(label="PNG info directory", **shared.hide_dirs, placeholder="Leave empty to use input directory", elem_id="img2img_batch_png_info_dir")
- img2img_batch_png_info_props = gr.CheckboxGroup(["Prompt", "Negative prompt", "Seed", "CFG scale", "Sampler", "Steps"], label="Parameters to take from png info", info="Prompts from png info will be appended to prompts set in ui.")
-
- img2img_tabs = [tab_img2img, tab_sketch, tab_inpaint, tab_inpaint_color, tab_inpaint_upload, tab_batch]
-
- for i, tab in enumerate(img2img_tabs):
- tab.select(fn=lambda tabnum=i: tabnum, inputs=[], outputs=[img2img_selected_tab])
-
- def copy_image(img):
- if isinstance(img, dict) and 'image' in img:
- return img['image']
-
- return img
-
- for button, name, elem in copy_image_buttons:
- button.click(
- fn=copy_image,
- inputs=[elem],
- outputs=[copy_image_destinations[name]],
- )
- button.click(
- fn=lambda: None,
- _js=f"switch_to_{name.replace(' ', '_')}",
- inputs=[],
- outputs=[],
- )
-
- with FormRow():
- resize_mode = gr.Radio(label="Resize mode", elem_id="resize_mode", choices=["Just resize", "Crop and resize", "Resize and fill", "Just resize (latent upscale)"], type="index", value="Just resize")
-
- modules.scripts.scripts_img2img.prepare_ui()
+ scripts.scripts_img2img.prepare_ui()
for category in ordered_ui_categories():
+ if category == "prompt":
+ toprow.create_inline_toprow_prompts()
+
+ if category == "image":
+ with gr.Tabs(elem_id="mode_img2img"):
+ img2img_selected_tab = gr.State(0)
+
+ with gr.TabItem('img2img', id='img2img', elem_id="img2img_img2img_tab") as tab_img2img:
+ init_img = gr.Image(label="Image for img2img", elem_id="img2img_image", show_label=False, source="upload", interactive=True, type="pil", tool="editor", image_mode="RGBA", height=opts.img2img_editor_height)
+ add_copy_image_controls('img2img', init_img)
+
+ with gr.TabItem('Sketch', id='img2img_sketch', elem_id="img2img_img2img_sketch_tab") as tab_sketch:
+ sketch = gr.Image(label="Image for img2img", elem_id="img2img_sketch", show_label=False, source="upload", interactive=True, type="pil", tool="color-sketch", image_mode="RGB", height=opts.img2img_editor_height, brush_color=opts.img2img_sketch_default_brush_color)
+ add_copy_image_controls('sketch', sketch)
+
+ with gr.TabItem('Inpaint', id='inpaint', elem_id="img2img_inpaint_tab") as tab_inpaint:
+ init_img_with_mask = gr.Image(label="Image for inpainting with mask", show_label=False, elem_id="img2maskimg", source="upload", interactive=True, type="pil", tool="sketch", image_mode="RGBA", height=opts.img2img_editor_height, brush_color=opts.img2img_inpaint_mask_brush_color)
+ add_copy_image_controls('inpaint', init_img_with_mask)
+
+ with gr.TabItem('Inpaint sketch', id='inpaint_sketch', elem_id="img2img_inpaint_sketch_tab") as tab_inpaint_color:
+ inpaint_color_sketch = gr.Image(label="Color sketch inpainting", show_label=False, elem_id="inpaint_sketch", source="upload", interactive=True, type="pil", tool="color-sketch", image_mode="RGB", height=opts.img2img_editor_height, brush_color=opts.img2img_inpaint_sketch_default_brush_color)
+ inpaint_color_sketch_orig = gr.State(None)
+ add_copy_image_controls('inpaint_sketch', inpaint_color_sketch)
+
+ def update_orig(image, state):
+ if image is not None:
+ same_size = state is not None and state.size == image.size
+ has_exact_match = np.any(np.all(np.array(image) == np.array(state), axis=-1))
+ edited = same_size and has_exact_match
+ return image if not edited or state is None else state
+
+ inpaint_color_sketch.change(update_orig, [inpaint_color_sketch, inpaint_color_sketch_orig], inpaint_color_sketch_orig)
+
+ with gr.TabItem('Inpaint upload', id='inpaint_upload', elem_id="img2img_inpaint_upload_tab") as tab_inpaint_upload:
+ init_img_inpaint = gr.Image(label="Image for img2img", show_label=False, source="upload", interactive=True, type="pil", elem_id="img_inpaint_base")
+ init_mask_inpaint = gr.Image(label="Mask", source="upload", interactive=True, type="pil", image_mode="RGBA", elem_id="img_inpaint_mask")
+
+ with gr.TabItem('Batch', id='batch', elem_id="img2img_batch_tab") as tab_batch:
+ hidden = '<br>Disabled when launched with --hide-ui-dir-config.' if shared.cmd_opts.hide_ui_dir_config else ''
+ gr.HTML(
+ "<p style='padding-bottom: 1em;' class=\"text-gray-500\">Process images in a directory on the same machine where the server is running." +
+ "<br>Use an empty output directory to save pictures normally instead of writing to the output directory." +
+ f"<br>Add inpaint batch mask directory to enable inpaint batch processing."
+ f"{hidden}</p>"
+ )
+ img2img_batch_input_dir = gr.Textbox(label="Input directory", **shared.hide_dirs, elem_id="img2img_batch_input_dir")
+ img2img_batch_output_dir = gr.Textbox(label="Output directory", **shared.hide_dirs, elem_id="img2img_batch_output_dir")
+ img2img_batch_inpaint_mask_dir = gr.Textbox(label="Inpaint batch mask directory (required for inpaint batch processing only)", **shared.hide_dirs, elem_id="img2img_batch_inpaint_mask_dir")
+ with gr.Accordion("PNG info", open=False):
+ img2img_batch_use_png_info = gr.Checkbox(label="Append png info to prompts", **shared.hide_dirs, elem_id="img2img_batch_use_png_info")
+ img2img_batch_png_info_dir = gr.Textbox(label="PNG info directory", **shared.hide_dirs, placeholder="Leave empty to use input directory", elem_id="img2img_batch_png_info_dir")
+ img2img_batch_png_info_props = gr.CheckboxGroup(["Prompt", "Negative prompt", "Seed", "CFG scale", "Sampler", "Steps", "Model hash"], label="Parameters to take from png info", info="Prompts from png info will be appended to prompts set in ui.")
+
+ img2img_tabs = [tab_img2img, tab_sketch, tab_inpaint, tab_inpaint_color, tab_inpaint_upload, tab_batch]
+
+ for i, tab in enumerate(img2img_tabs):
+ tab.select(fn=lambda tabnum=i: tabnum, inputs=[], outputs=[img2img_selected_tab])
+
+ def copy_image(img):
+ if isinstance(img, dict) and 'image' in img:
+ return img['image']
+
+ return img
+
+ for button, name, elem in copy_image_buttons:
+ button.click(
+ fn=copy_image,
+ inputs=[elem],
+ outputs=[copy_image_destinations[name]],
+ )
+ button.click(
+ fn=lambda: None,
+ _js=f"switch_to_{name.replace(' ', '_')}",
+ inputs=[],
+ outputs=[],
+ )
+
+ with FormRow():
+ resize_mode = gr.Radio(label="Resize mode", elem_id="resize_mode", choices=["Just resize", "Crop and resize", "Resize and fill", "Just resize (latent upscale)"], type="index", value="Just resize")
+
if category == "sampler":
- steps, sampler_index = create_sampler_and_steps_selection(samplers_for_img2img, "img2img")
+ steps, sampler_name = create_sampler_and_steps_selection(sd_samplers.visible_sampler_names(), "img2img")
elif category == "dimensions":
with FormRow():
@@ -782,8 +613,8 @@ def create_ui():
width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="img2img_width")
height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="img2img_height")
with gr.Column(elem_id="img2img_dimensions_row", scale=1, elem_classes="dimensions-tools"):
- res_switch_btn = ToolButton(value=switch_values_symbol, elem_id="img2img_res_switch_btn")
- detect_image_size_btn = ToolButton(value=detect_image_size_symbol, elem_id="img2img_detect_image_size_btn")
+ res_switch_btn = ToolButton(value=switch_values_symbol, elem_id="img2img_res_switch_btn", tooltip="Switch width/height")
+ detect_image_size_btn = ToolButton(value=detect_image_size_symbol, elem_id="img2img_detect_image_size_btn", tooltip="Auto detect size from img2img")
with gr.Tab(label="Resize by", elem_id="img2img_tab_resize_by") as tab_scale_by:
scale_by = gr.Slider(minimum=0.05, maximum=4.0, step=0.05, label="Scale", value=1.0, elem_id="img2img_scale")
@@ -804,12 +635,6 @@ def create_ui():
scale_by.release(**on_change_args)
button_update_resize_to.click(**on_change_args)
- # the code below is meant to update the resolution label after the image in the image selection UI has changed.
- # as it is now the event keeps firing continuously for inpaint edits, which ruins the page with constant requests.
- # I assume this must be a gradio bug and for now we'll just do it for non-inpaint inputs.
- for component in [init_img, sketch]:
- component.change(fn=lambda: None, _js="updateImg2imgResizeToTextAfterChangingImage", inputs=[], outputs=[], show_progress=False)
-
tab_scale_to.select(fn=lambda: 0, inputs=[], outputs=[selected_scale_tab])
tab_scale_by.select(fn=lambda: 1, inputs=[], outputs=[selected_scale_tab])
@@ -818,20 +643,21 @@ def create_ui():
batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="img2img_batch_count")
batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="img2img_batch_size")
- elif category == "cfg":
- with FormGroup():
- with FormRow():
- cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0, elem_id="img2img_cfg_scale")
- image_cfg_scale = gr.Slider(minimum=0, maximum=3.0, step=0.05, label='Image CFG Scale', value=1.5, elem_id="img2img_image_cfg_scale", visible=False)
- denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.75, elem_id="img2img_denoising_strength")
+ elif category == "denoising":
+ denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.75, elem_id="img2img_denoising_strength")
- elif category == "seed":
- seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox = create_seed_inputs('img2img')
+ elif category == "cfg":
+ with gr.Row():
+ cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0, elem_id="img2img_cfg_scale")
+ image_cfg_scale = gr.Slider(minimum=0, maximum=3.0, step=0.05, label='Image CFG Scale', value=1.5, elem_id="img2img_image_cfg_scale", visible=False)
elif category == "checkboxes":
with FormRow(elem_classes="checkboxes-row", variant="compact"):
- restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1, elem_id="img2img_restore_faces")
- tiling = gr.Checkbox(label='Tiling', value=False, elem_id="img2img_tiling")
+ pass
+
+ elif category == "accordions":
+ with gr.Row(elem_id="img2img_accordions", elem_classes="accordions"):
+ scripts.scripts_img2img.setup_ui_for_section(category)
elif category == "batch":
if not opts.dimensions_and_batch_together:
@@ -845,7 +671,7 @@ def create_ui():
elif category == "scripts":
with FormGroup(elem_id="img2img_script_container"):
- custom_inputs = modules.scripts.scripts_img2img.setup_ui()
+ custom_inputs = scripts.scripts_img2img.setup_ui()
elif category == "inpaint":
with FormGroup(elem_id="inpaint_controls", visible=False) as inpaint_controls:
@@ -866,34 +692,26 @@ def create_ui():
with gr.Column(scale=4):
inpaint_full_res_padding = gr.Slider(label='Only masked padding, pixels', minimum=0, maximum=256, step=4, value=32, elem_id="img2img_inpaint_full_res_padding")
- def select_img2img_tab(tab):
- return gr.update(visible=tab in [2, 3, 4]), gr.update(visible=tab == 3),
+ if category not in {"accordions"}:
+ scripts.scripts_img2img.setup_ui_for_section(category)
- for i, elem in enumerate(img2img_tabs):
- elem.select(
- fn=lambda tab=i: select_img2img_tab(tab),
- inputs=[],
- outputs=[inpaint_controls, mask_alpha],
- )
- else:
- modules.scripts.scripts_img2img.setup_ui_for_section(category)
+ # the code below is meant to update the resolution label after the image in the image selection UI has changed.
+ # as it is now the event keeps firing continuously for inpaint edits, which ruins the page with constant requests.
+ # I assume this must be a gradio bug and for now we'll just do it for non-inpaint inputs.
+ for component in [init_img, sketch]:
+ component.change(fn=lambda: None, _js="updateImg2imgResizeToTextAfterChangingImage", inputs=[], outputs=[], show_progress=False)
- img2img_gallery, generation_info, html_info, html_log = create_output_panel("img2img", opts.outdir_img2img_samples)
+ def select_img2img_tab(tab):
+ return gr.update(visible=tab in [2, 3, 4]), gr.update(visible=tab == 3),
- connect_reuse_seed(seed, reuse_seed, generation_info, dummy_component, is_subseed=False)
- connect_reuse_seed(subseed, reuse_subseed, generation_info, dummy_component, is_subseed=True)
+ for i, elem in enumerate(img2img_tabs):
+ elem.select(
+ fn=lambda tab=i: select_img2img_tab(tab),
+ inputs=[],
+ outputs=[inpaint_controls, mask_alpha],
+ )
- img2img_prompt_img.change(
- fn=modules.images.image_data,
- inputs=[
- img2img_prompt_img
- ],
- outputs=[
- img2img_prompt,
- img2img_prompt_img
- ],
- show_progress=False,
- )
+ img2img_gallery, generation_info, html_info, html_log = create_output_panel("img2img", opts.outdir_img2img_samples, toprow)
img2img_args = dict(
fn=wrap_gradio_gpu_call(modules.img2img.img2img, extra_outputs=[None, '', '']),
@@ -901,9 +719,9 @@ def create_ui():
inputs=[
dummy_component,
dummy_component,
- img2img_prompt,
- img2img_negative_prompt,
- img2img_prompt_styles,
+ toprow.prompt,
+ toprow.negative_prompt,
+ toprow.ui_styles.dropdown,
init_img,
sketch,
init_img_with_mask,
@@ -912,19 +730,15 @@ def create_ui():
init_img_inpaint,
init_mask_inpaint,
steps,
- sampler_index,
+ sampler_name,
mask_blur,
mask_alpha,
inpainting_fill,
- restore_faces,
- tiling,
batch_count,
batch_size,
cfg_scale,
image_cfg_scale,
denoising_strength,
- seed,
- subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox,
selected_scale_tab,
height,
width,
@@ -962,11 +776,11 @@ def create_ui():
inpaint_color_sketch,
init_img_inpaint,
],
- outputs=[img2img_prompt, dummy_component],
+ outputs=[toprow.prompt, dummy_component],
)
- img2img_prompt.submit(**img2img_args)
- submit.click(**img2img_args)
+ toprow.prompt.submit(**img2img_args)
+ toprow.submit.click(**img2img_args)
res_switch_btn.click(fn=None, _js="function(){switchWidthHeight('img2img')}", inputs=None, outputs=None, show_progress=False)
@@ -978,7 +792,7 @@ def create_ui():
show_progress=False,
)
- restore_progress_button.click(
+ toprow.restore_progress_button.click(
fn=progress.restore_progress,
_js="restoreProgressImg2img",
inputs=[dummy_component],
@@ -991,77 +805,52 @@ def create_ui():
show_progress=False,
)
- img2img_interrogate.click(
+ toprow.button_interrogate.click(
fn=lambda *args: process_interrogate(interrogate, *args),
**interrogate_args,
)
- img2img_deepbooru.click(
+ toprow.button_deepbooru.click(
fn=lambda *args: process_interrogate(interrogate_deepbooru, *args),
**interrogate_args,
)
- prompts = [(txt2img_prompt, txt2img_negative_prompt), (img2img_prompt, img2img_negative_prompt)]
- style_dropdowns = [txt2img_prompt_styles, img2img_prompt_styles]
- style_js_funcs = ["update_txt2img_tokens", "update_img2img_tokens"]
-
- for button, (prompt, negative_prompt) in zip([txt2img_save_style, img2img_save_style], prompts):
- button.click(
- fn=add_style,
- _js="ask_for_style_name",
- # Have to pass empty dummy component here, because the JavaScript and Python function have to accept
- # the same number of parameters, but we only know the style-name after the JavaScript prompt
- inputs=[dummy_component, prompt, negative_prompt],
- outputs=[txt2img_prompt_styles, img2img_prompt_styles],
- )
-
- for button, (prompt, negative_prompt), styles, js_func in zip([txt2img_prompt_style_apply, img2img_prompt_style_apply], prompts, style_dropdowns, style_js_funcs):
- button.click(
- fn=apply_styles,
- _js=js_func,
- inputs=[prompt, negative_prompt, styles],
- outputs=[prompt, negative_prompt, styles],
- )
-
- token_button.click(fn=update_token_counter, inputs=[img2img_prompt, steps], outputs=[token_counter])
- negative_token_button.click(fn=wrap_queued_call(update_token_counter), inputs=[img2img_negative_prompt, steps], outputs=[negative_token_counter])
-
- ui_extra_networks.setup_ui(extra_networks_ui_img2img, img2img_gallery)
+ toprow.token_button.click(fn=update_token_counter, inputs=[toprow.prompt, steps], outputs=[toprow.token_counter])
+ toprow.negative_token_button.click(fn=wrap_queued_call(update_token_counter), inputs=[toprow.negative_prompt, steps], outputs=[toprow.negative_token_counter])
img2img_paste_fields = [
- (img2img_prompt, "Prompt"),
- (img2img_negative_prompt, "Negative prompt"),
+ (toprow.prompt, "Prompt"),
+ (toprow.negative_prompt, "Negative prompt"),
(steps, "Steps"),
- (sampler_index, "Sampler"),
- (restore_faces, "Face restoration"),
+ (sampler_name, "Sampler"),
(cfg_scale, "CFG scale"),
(image_cfg_scale, "Image CFG scale"),
- (seed, "Seed"),
(width, "Size-1"),
(height, "Size-2"),
(batch_size, "Batch size"),
- (subseed, "Variation seed"),
- (subseed_strength, "Variation seed strength"),
- (seed_resize_from_w, "Seed resize from-1"),
- (seed_resize_from_h, "Seed resize from-2"),
- (img2img_prompt_styles, lambda d: d["Styles array"] if isinstance(d.get("Styles array"), list) else gr.update()),
+ (toprow.ui_styles.dropdown, lambda d: d["Styles array"] if isinstance(d.get("Styles array"), list) else gr.update()),
(denoising_strength, "Denoising strength"),
(mask_blur, "Mask blur"),
- *modules.scripts.scripts_img2img.infotext_fields
+ *scripts.scripts_img2img.infotext_fields
]
parameters_copypaste.add_paste_fields("img2img", init_img, img2img_paste_fields, override_settings)
parameters_copypaste.add_paste_fields("inpaint", init_img_with_mask, img2img_paste_fields, override_settings)
parameters_copypaste.register_paste_params_button(parameters_copypaste.ParamBinding(
- paste_button=img2img_paste, tabname="img2img", source_text_component=img2img_prompt, source_image_component=None,
+ paste_button=toprow.paste, tabname="img2img", source_text_component=toprow.prompt, source_image_component=None,
))
- modules.scripts.scripts_current = None
+ extra_networks_ui_img2img = ui_extra_networks.create_ui(img2img_interface, [img2img_generation_tab], 'img2img')
+ ui_extra_networks.setup_ui(extra_networks_ui_img2img, img2img_gallery)
+
+ extra_tabs.__exit__()
+
+ scripts.scripts_current = None
with gr.Blocks(analytics_enabled=False) as extras_interface:
ui_postprocessing.create_ui()
with gr.Blocks(analytics_enabled=False) as pnginfo_interface:
- with gr.Row().style(equal_height=False):
+ with gr.Row(equal_height=False):
with gr.Column(variant='panel'):
image = gr.Image(elem_id="pnginfo_image", label="Source", source="upload", interactive=True, type="pil")
@@ -1083,64 +872,13 @@ def create_ui():
outputs=[html, generation_info, html2],
)
- def update_interp_description(value):
- interp_description_css = "<p style='margin-bottom: 2.5em'>{}</p>"
- interp_descriptions = {
- "No interpolation": interp_description_css.format("No interpolation will be used. Requires one model; A. Allows for format conversion and VAE baking."),
- "Weighted sum": interp_description_css.format("A weighted sum will be used for interpolation. Requires two models; A and B. The result is calculated as A * (1 - M) + B * M"),
- "Add difference": interp_description_css.format("The difference between the last two models will be added to the first. Requires three models; A, B and C. The result is calculated as A + (B - C) * M")
- }
- return interp_descriptions[value]
-
- with gr.Blocks(analytics_enabled=False) as modelmerger_interface:
- with gr.Row().style(equal_height=False):
- with gr.Column(variant='compact'):
- interp_description = gr.HTML(value=update_interp_description("Weighted sum"), elem_id="modelmerger_interp_description")
-
- with FormRow(elem_id="modelmerger_models"):
- primary_model_name = gr.Dropdown(modules.sd_models.checkpoint_tiles(), elem_id="modelmerger_primary_model_name", label="Primary model (A)")
- create_refresh_button(primary_model_name, modules.sd_models.list_models, lambda: {"choices": modules.sd_models.checkpoint_tiles()}, "refresh_checkpoint_A")
-
- secondary_model_name = gr.Dropdown(modules.sd_models.checkpoint_tiles(), elem_id="modelmerger_secondary_model_name", label="Secondary model (B)")
- create_refresh_button(secondary_model_name, modules.sd_models.list_models, lambda: {"choices": modules.sd_models.checkpoint_tiles()}, "refresh_checkpoint_B")
-
- tertiary_model_name = gr.Dropdown(modules.sd_models.checkpoint_tiles(), elem_id="modelmerger_tertiary_model_name", label="Tertiary model (C)")
- create_refresh_button(tertiary_model_name, modules.sd_models.list_models, lambda: {"choices": modules.sd_models.checkpoint_tiles()}, "refresh_checkpoint_C")
-
- custom_name = gr.Textbox(label="Custom Name (Optional)", elem_id="modelmerger_custom_name")
- interp_amount = gr.Slider(minimum=0.0, maximum=1.0, step=0.05, label='Multiplier (M) - set to 0 to get model A', value=0.3, elem_id="modelmerger_interp_amount")
- interp_method = gr.Radio(choices=["No interpolation", "Weighted sum", "Add difference"], value="Weighted sum", label="Interpolation Method", elem_id="modelmerger_interp_method")
- interp_method.change(fn=update_interp_description, inputs=[interp_method], outputs=[interp_description])
-
- with FormRow():
- checkpoint_format = gr.Radio(choices=["ckpt", "safetensors"], value="safetensors", label="Checkpoint format", elem_id="modelmerger_checkpoint_format")
- save_as_half = gr.Checkbox(value=False, label="Save as float16", elem_id="modelmerger_save_as_half")
- save_metadata = gr.Checkbox(value=True, label="Save metadata (.safetensors only)", elem_id="modelmerger_save_metadata")
-
- with FormRow():
- with gr.Column():
- config_source = gr.Radio(choices=["A, B or C", "B", "C", "Don't"], value="A, B or C", label="Copy config from", type="index", elem_id="modelmerger_config_method")
-
- with gr.Column():
- with FormRow():
- bake_in_vae = gr.Dropdown(choices=["None"] + list(sd_vae.vae_dict), value="None", label="Bake in VAE", elem_id="modelmerger_bake_in_vae")
- create_refresh_button(bake_in_vae, sd_vae.refresh_vae_list, lambda: {"choices": ["None"] + list(sd_vae.vae_dict)}, "modelmerger_refresh_bake_in_vae")
-
- with FormRow():
- discard_weights = gr.Textbox(value="", label="Discard weights with matching name", elem_id="modelmerger_discard_weights")
-
- with gr.Row():
- modelmerger_merge = gr.Button(elem_id="modelmerger_merge", value="Merge", variant='primary')
-
- with gr.Column(variant='compact', elem_id="modelmerger_results_container"):
- with gr.Group(elem_id="modelmerger_results_panel"):
- modelmerger_result = gr.HTML(elem_id="modelmerger_result", show_label=False)
+ modelmerger_ui = ui_checkpoint_merger.UiCheckpointMerger()
with gr.Blocks(analytics_enabled=False) as train_interface:
- with gr.Row().style(equal_height=False):
+ with gr.Row(equal_height=False):
gr.HTML(value="<p style='margin-bottom: 0.7em'>See <b><a href=\"https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Textual-Inversion\">wiki</a></b> for detailed explanation.</p>")
- with gr.Row(variant="compact").style(equal_height=False):
+ with gr.Row(variant="compact", equal_height=False):
with gr.Tabs(elem_id="train_tabs"):
with gr.Tab(label="Create embedding", id="create_embedding"):
@@ -1160,7 +898,7 @@ def create_ui():
new_hypernetwork_name = gr.Textbox(label="Name", elem_id="train_new_hypernetwork_name")
new_hypernetwork_sizes = gr.CheckboxGroup(label="Modules", value=["768", "320", "640", "1280"], choices=["768", "1024", "320", "640", "1280"], elem_id="train_new_hypernetwork_sizes")
new_hypernetwork_layer_structure = gr.Textbox("1, 2, 1", label="Enter hypernetwork layer structure", placeholder="1st and last digit must be 1. ex:'1, 2, 1'", elem_id="train_new_hypernetwork_layer_structure")
- new_hypernetwork_activation_func = gr.Dropdown(value="linear", label="Select activation function of hypernetwork. Recommended : Swish / Linear(none)", choices=modules.hypernetworks.ui.keys, elem_id="train_new_hypernetwork_activation_func")
+ new_hypernetwork_activation_func = gr.Dropdown(value="linear", label="Select activation function of hypernetwork. Recommended : Swish / Linear(none)", choices=hypernetworks_ui.keys, elem_id="train_new_hypernetwork_activation_func")
new_hypernetwork_initialization_option = gr.Dropdown(value = "Normal", label="Select Layer weights initialization. Recommended: Kaiming for relu-like, Xavier for sigmoid-like, Normal otherwise", choices=["Normal", "KaimingUniform", "KaimingNormal", "XavierUniform", "XavierNormal"], elem_id="train_new_hypernetwork_initialization_option")
new_hypernetwork_add_layer_norm = gr.Checkbox(label="Add layer normalization", elem_id="train_new_hypernetwork_add_layer_norm")
new_hypernetwork_use_dropout = gr.Checkbox(label="Use dropout", elem_id="train_new_hypernetwork_use_dropout")
@@ -1174,71 +912,6 @@ def create_ui():
with gr.Column():
create_hypernetwork = gr.Button(value="Create hypernetwork", variant='primary', elem_id="train_create_hypernetwork")
- with gr.Tab(label="Preprocess images", id="preprocess_images"):
- process_src = gr.Textbox(label='Source directory', elem_id="train_process_src")
- process_dst = gr.Textbox(label='Destination directory', elem_id="train_process_dst")
- process_width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="train_process_width")
- process_height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="train_process_height")
- preprocess_txt_action = gr.Dropdown(label='Existing Caption txt Action', value="ignore", choices=["ignore", "copy", "prepend", "append"], elem_id="train_preprocess_txt_action")
-
- with gr.Row():
- process_keep_original_size = gr.Checkbox(label='Keep original size', elem_id="train_process_keep_original_size")
- process_flip = gr.Checkbox(label='Create flipped copies', elem_id="train_process_flip")
- process_split = gr.Checkbox(label='Split oversized images', elem_id="train_process_split")
- process_focal_crop = gr.Checkbox(label='Auto focal point crop', elem_id="train_process_focal_crop")
- process_multicrop = gr.Checkbox(label='Auto-sized crop', elem_id="train_process_multicrop")
- process_caption = gr.Checkbox(label='Use BLIP for caption', elem_id="train_process_caption")
- process_caption_deepbooru = gr.Checkbox(label='Use deepbooru for caption', visible=True, elem_id="train_process_caption_deepbooru")
-
- with gr.Row(visible=False) as process_split_extra_row:
- process_split_threshold = gr.Slider(label='Split image threshold', value=0.5, minimum=0.0, maximum=1.0, step=0.05, elem_id="train_process_split_threshold")
- process_overlap_ratio = gr.Slider(label='Split image overlap ratio', value=0.2, minimum=0.0, maximum=0.9, step=0.05, elem_id="train_process_overlap_ratio")
-
- with gr.Row(visible=False) as process_focal_crop_row:
- process_focal_crop_face_weight = gr.Slider(label='Focal point face weight', value=0.9, minimum=0.0, maximum=1.0, step=0.05, elem_id="train_process_focal_crop_face_weight")
- process_focal_crop_entropy_weight = gr.Slider(label='Focal point entropy weight', value=0.15, minimum=0.0, maximum=1.0, step=0.05, elem_id="train_process_focal_crop_entropy_weight")
- process_focal_crop_edges_weight = gr.Slider(label='Focal point edges weight', value=0.5, minimum=0.0, maximum=1.0, step=0.05, elem_id="train_process_focal_crop_edges_weight")
- process_focal_crop_debug = gr.Checkbox(label='Create debug image', elem_id="train_process_focal_crop_debug")
-
- with gr.Column(visible=False) as process_multicrop_col:
- gr.Markdown('Each image is center-cropped with an automatically chosen width and height.')
- with gr.Row():
- process_multicrop_mindim = gr.Slider(minimum=64, maximum=2048, step=8, label="Dimension lower bound", value=384, elem_id="train_process_multicrop_mindim")
- process_multicrop_maxdim = gr.Slider(minimum=64, maximum=2048, step=8, label="Dimension upper bound", value=768, elem_id="train_process_multicrop_maxdim")
- with gr.Row():
- process_multicrop_minarea = gr.Slider(minimum=64*64, maximum=2048*2048, step=1, label="Area lower bound", value=64*64, elem_id="train_process_multicrop_minarea")
- process_multicrop_maxarea = gr.Slider(minimum=64*64, maximum=2048*2048, step=1, label="Area upper bound", value=640*640, elem_id="train_process_multicrop_maxarea")
- with gr.Row():
- process_multicrop_objective = gr.Radio(["Maximize area", "Minimize error"], value="Maximize area", label="Resizing objective", elem_id="train_process_multicrop_objective")
- process_multicrop_threshold = gr.Slider(minimum=0, maximum=1, step=0.01, label="Error threshold", value=0.1, elem_id="train_process_multicrop_threshold")
-
- with gr.Row():
- with gr.Column(scale=3):
- gr.HTML(value="")
-
- with gr.Column():
- with gr.Row():
- interrupt_preprocessing = gr.Button("Interrupt", elem_id="train_interrupt_preprocessing")
- run_preprocess = gr.Button(value="Preprocess", variant='primary', elem_id="train_run_preprocess")
-
- process_split.change(
- fn=lambda show: gr_show(show),
- inputs=[process_split],
- outputs=[process_split_extra_row],
- )
-
- process_focal_crop.change(
- fn=lambda show: gr_show(show),
- inputs=[process_focal_crop],
- outputs=[process_focal_crop_row],
- )
-
- process_multicrop.change(
- fn=lambda show: gr_show(show),
- inputs=[process_multicrop],
- outputs=[process_multicrop_col],
- )
-
def get_textual_inversion_template_names():
return sorted(textual_inversion.textual_inversion_templates)
@@ -1300,12 +973,12 @@ def create_ui():
with gr.Column(elem_id='ti_gallery_container'):
ti_output = gr.Text(elem_id="ti_output", value="", show_label=False)
- gr.Gallery(label='Output', show_label=False, elem_id='ti_gallery').style(columns=4)
+ gr.Gallery(label='Output', show_label=False, elem_id='ti_gallery', columns=4)
gr.HTML(elem_id="ti_progress", value="")
ti_outcome = gr.HTML(elem_id="ti_error", value="")
create_embedding.click(
- fn=modules.textual_inversion.ui.create_embedding,
+ fn=textual_inversion_ui.create_embedding,
inputs=[
new_embedding_name,
initialization_text,
@@ -1320,7 +993,7 @@ def create_ui():
)
create_hypernetwork.click(
- fn=modules.hypernetworks.ui.create_hypernetwork,
+ fn=hypernetworks_ui.create_hypernetwork,
inputs=[
new_hypernetwork_name,
new_hypernetwork_sizes,
@@ -1339,44 +1012,8 @@ def create_ui():
]
)
- run_preprocess.click(
- fn=wrap_gradio_gpu_call(modules.textual_inversion.ui.preprocess, extra_outputs=[gr.update()]),
- _js="start_training_textual_inversion",
- inputs=[
- dummy_component,
- process_src,
- process_dst,
- process_width,
- process_height,
- preprocess_txt_action,
- process_keep_original_size,
- process_flip,
- process_split,
- process_caption,
- process_caption_deepbooru,
- process_split_threshold,
- process_overlap_ratio,
- process_focal_crop,
- process_focal_crop_face_weight,
- process_focal_crop_entropy_weight,
- process_focal_crop_edges_weight,
- process_focal_crop_debug,
- process_multicrop,
- process_multicrop_mindim,
- process_multicrop_maxdim,
- process_multicrop_minarea,
- process_multicrop_maxarea,
- process_multicrop_objective,
- process_multicrop_threshold,
- ],
- outputs=[
- ti_output,
- ti_outcome,
- ],
- )
-
train_embedding.click(
- fn=wrap_gradio_gpu_call(modules.textual_inversion.ui.train_embedding, extra_outputs=[gr.update()]),
+ fn=wrap_gradio_gpu_call(textual_inversion_ui.train_embedding, extra_outputs=[gr.update()]),
_js="start_training_textual_inversion",
inputs=[
dummy_component,
@@ -1410,7 +1047,7 @@ def create_ui():
)
train_hypernetwork.click(
- fn=wrap_gradio_gpu_call(modules.hypernetworks.ui.train_hypernetwork, extra_outputs=[gr.update()]),
+ fn=wrap_gradio_gpu_call(hypernetworks_ui.train_hypernetwork, extra_outputs=[gr.update()]),
_js="start_training_textual_inversion",
inputs=[
dummy_component,
@@ -1448,12 +1085,6 @@ def create_ui():
outputs=[],
)
- interrupt_preprocessing.click(
- fn=lambda: shared.state.interrupt(),
- inputs=[],
- outputs=[],
- )
-
loadsave = ui_loadsave.UiLoadsave(cmd_opts.ui_config_file)
settings = ui_settings.UiSettings()
@@ -1464,7 +1095,7 @@ def create_ui():
(img2img_interface, "img2img", "img2img"),
(extras_interface, "Extras", "extras"),
(pnginfo_interface, "PNG Info", "pnginfo"),
- (modelmerger_interface, "Checkpoint Merger", "modelmerger"),
+ (modelmerger_ui.blocks, "Checkpoint Merger", "modelmerger"),
(train_interface, "Train", "train"),
]
@@ -1493,17 +1124,14 @@ def create_ui():
with gr.TabItem(label, id=ifid, elem_id=f"tab_{ifid}"):
interface.render()
- for interface, _label, ifid in interfaces:
- if ifid in ["extensions", "settings"]:
- continue
-
- loadsave.add_block(interface, ifid)
+ if ifid not in ["extensions", "settings"]:
+ loadsave.add_block(interface, ifid)
loadsave.add_component(f"webui/Tabs@{tabs.elem_id}", tabs)
loadsave.setup_ui()
- if os.path.exists(os.path.join(script_path, "notification.mp3")):
+ if os.path.exists(os.path.join(script_path, "notification.mp3")) and shared.opts.notification_audio:
gr.Audio(interactive=False, value=os.path.join(script_path, "notification.mp3"), elem_id="audio_notification", visible=False)
footer = shared.html("footer.html")
@@ -1516,49 +1144,11 @@ def create_ui():
settings.text_settings.change(fn=update_image_cfg_scale_visibility, inputs=[], outputs=[image_cfg_scale])
demo.load(fn=update_image_cfg_scale_visibility, inputs=[], outputs=[image_cfg_scale])
- def modelmerger(*args):
- try:
- results = modules.extras.run_modelmerger(*args)
- except Exception as e:
- errors.report("Error loading/saving model file", exc_info=True)
- modules.sd_models.list_models() # to remove the potentially missing models from the list
- return [*[gr.Dropdown.update(choices=modules.sd_models.checkpoint_tiles()) for _ in range(4)], f"Error merging checkpoints: {e}"]
- return results
-
- modelmerger_merge.click(fn=lambda: '', inputs=[], outputs=[modelmerger_result])
- modelmerger_merge.click(
- fn=wrap_gradio_gpu_call(modelmerger, extra_outputs=lambda: [gr.update() for _ in range(4)]),
- _js='modelmerger',
- inputs=[
- dummy_component,
- primary_model_name,
- secondary_model_name,
- tertiary_model_name,
- interp_method,
- interp_amount,
- save_as_half,
- custom_name,
- checkpoint_format,
- config_source,
- bake_in_vae,
- discard_weights,
- save_metadata,
- ],
- outputs=[
- primary_model_name,
- secondary_model_name,
- tertiary_model_name,
- settings.component_dict['sd_model_checkpoint'],
- modelmerger_result,
- ]
- )
+ modelmerger_ui.setup_ui(dummy_component=dummy_component, sd_model_checkpoint_component=settings.component_dict['sd_model_checkpoint'])
loadsave.dump_defaults()
demo.ui_loadsave = loadsave
- # Required as a workaround for change() event not triggering when loading values from ui-config.json
- interp_description.value = update_interp_description(interp_method.value)
-
return demo
@@ -1593,7 +1183,6 @@ checkpoint: <a id="sd_checkpoint_hash">N/A</a>
def setup_ui_api(app):
from pydantic import BaseModel, Field
- from typing import List
class QuicksettingsHint(BaseModel):
name: str = Field(title="Name of the quicksettings field")
@@ -1602,7 +1191,7 @@ def setup_ui_api(app):
def quicksettings_hint():
return [QuicksettingsHint(name=k, label=v.label) for k, v in opts.data_labels.items()]
- app.add_api_route("/internal/quicksettings-hint", quicksettings_hint, methods=["GET"], response_model=List[QuicksettingsHint])
+ app.add_api_route("/internal/quicksettings-hint", quicksettings_hint, methods=["GET"], response_model=list[QuicksettingsHint])
app.add_api_route("/internal/ping", lambda: {}, methods=["GET"])
@@ -1612,7 +1201,7 @@ def setup_ui_api(app):
from fastapi.responses import PlainTextResponse
text = sysinfo.get()
- filename = f"sysinfo-{datetime.datetime.utcnow().strftime('%Y-%m-%d-%H-%M')}.txt"
+ filename = f"sysinfo-{datetime.datetime.utcnow().strftime('%Y-%m-%d-%H-%M')}.json"
return PlainTextResponse(text, headers={'Content-Disposition': f'{"attachment" if attachment else "inline"}; filename="{filename}"'})
diff --git a/modules/ui_checkpoint_merger.py b/modules/ui_checkpoint_merger.py
new file mode 100644
index 00000000..f9c5dd6b
--- /dev/null
+++ b/modules/ui_checkpoint_merger.py
@@ -0,0 +1,124 @@
+
+import gradio as gr
+
+from modules import sd_models, sd_vae, errors, extras, call_queue
+from modules.ui_components import FormRow
+from modules.ui_common import create_refresh_button
+
+
+def update_interp_description(value):
+ interp_description_css = "<p style='margin-bottom: 2.5em'>{}</p>"
+ interp_descriptions = {
+ "No interpolation": interp_description_css.format("No interpolation will be used. Requires one model; A. Allows for format conversion and VAE baking."),
+ "Weighted sum": interp_description_css.format("A weighted sum will be used for interpolation. Requires two models; A and B. The result is calculated as A * (1 - M) + B * M"),
+ "Add difference": interp_description_css.format("The difference between the last two models will be added to the first. Requires three models; A, B and C. The result is calculated as A + (B - C) * M")
+ }
+ return interp_descriptions[value]
+
+
+def modelmerger(*args):
+ try:
+ results = extras.run_modelmerger(*args)
+ except Exception as e:
+ errors.report("Error loading/saving model file", exc_info=True)
+ sd_models.list_models() # to remove the potentially missing models from the list
+ return [*[gr.Dropdown.update(choices=sd_models.checkpoint_tiles()) for _ in range(4)], f"Error merging checkpoints: {e}"]
+ return results
+
+
+class UiCheckpointMerger:
+ def __init__(self):
+ with gr.Blocks(analytics_enabled=False) as modelmerger_interface:
+ with gr.Row(equal_height=False):
+ with gr.Column(variant='compact'):
+ self.interp_description = gr.HTML(value=update_interp_description("Weighted sum"), elem_id="modelmerger_interp_description")
+
+ with FormRow(elem_id="modelmerger_models"):
+ self.primary_model_name = gr.Dropdown(sd_models.checkpoint_tiles(), elem_id="modelmerger_primary_model_name", label="Primary model (A)")
+ create_refresh_button(self.primary_model_name, sd_models.list_models, lambda: {"choices": sd_models.checkpoint_tiles()}, "refresh_checkpoint_A")
+
+ self.secondary_model_name = gr.Dropdown(sd_models.checkpoint_tiles(), elem_id="modelmerger_secondary_model_name", label="Secondary model (B)")
+ create_refresh_button(self.secondary_model_name, sd_models.list_models, lambda: {"choices": sd_models.checkpoint_tiles()}, "refresh_checkpoint_B")
+
+ self.tertiary_model_name = gr.Dropdown(sd_models.checkpoint_tiles(), elem_id="modelmerger_tertiary_model_name", label="Tertiary model (C)")
+ create_refresh_button(self.tertiary_model_name, sd_models.list_models, lambda: {"choices": sd_models.checkpoint_tiles()}, "refresh_checkpoint_C")
+
+ self.custom_name = gr.Textbox(label="Custom Name (Optional)", elem_id="modelmerger_custom_name")
+ self.interp_amount = gr.Slider(minimum=0.0, maximum=1.0, step=0.05, label='Multiplier (M) - set to 0 to get model A', value=0.3, elem_id="modelmerger_interp_amount")
+ self.interp_method = gr.Radio(choices=["No interpolation", "Weighted sum", "Add difference"], value="Weighted sum", label="Interpolation Method", elem_id="modelmerger_interp_method")
+ self.interp_method.change(fn=update_interp_description, inputs=[self.interp_method], outputs=[self.interp_description])
+
+ with FormRow():
+ self.checkpoint_format = gr.Radio(choices=["ckpt", "safetensors"], value="safetensors", label="Checkpoint format", elem_id="modelmerger_checkpoint_format")
+ self.save_as_half = gr.Checkbox(value=False, label="Save as float16", elem_id="modelmerger_save_as_half")
+
+ with FormRow():
+ with gr.Column():
+ self.config_source = gr.Radio(choices=["A, B or C", "B", "C", "Don't"], value="A, B or C", label="Copy config from", type="index", elem_id="modelmerger_config_method")
+
+ with gr.Column():
+ with FormRow():
+ self.bake_in_vae = gr.Dropdown(choices=["None"] + list(sd_vae.vae_dict), value="None", label="Bake in VAE", elem_id="modelmerger_bake_in_vae")
+ create_refresh_button(self.bake_in_vae, sd_vae.refresh_vae_list, lambda: {"choices": ["None"] + list(sd_vae.vae_dict)}, "modelmerger_refresh_bake_in_vae")
+
+ with FormRow():
+ self.discard_weights = gr.Textbox(value="", label="Discard weights with matching name", elem_id="modelmerger_discard_weights")
+
+ with gr.Accordion("Metadata", open=False) as metadata_editor:
+ with FormRow():
+ self.save_metadata = gr.Checkbox(value=True, label="Save metadata", elem_id="modelmerger_save_metadata")
+ self.add_merge_recipe = gr.Checkbox(value=True, label="Add merge recipe metadata", elem_id="modelmerger_add_recipe")
+ self.copy_metadata_fields = gr.Checkbox(value=True, label="Copy metadata from merged models", elem_id="modelmerger_copy_metadata")
+
+ self.metadata_json = gr.TextArea('{}', label="Metadata in JSON format")
+ self.read_metadata = gr.Button("Read metadata from selected checkpoints")
+
+ with FormRow():
+ self.modelmerger_merge = gr.Button(elem_id="modelmerger_merge", value="Merge", variant='primary')
+
+ with gr.Column(variant='compact', elem_id="modelmerger_results_container"):
+ with gr.Group(elem_id="modelmerger_results_panel"):
+ self.modelmerger_result = gr.HTML(elem_id="modelmerger_result", show_label=False)
+
+ self.metadata_editor = metadata_editor
+ self.blocks = modelmerger_interface
+
+ def setup_ui(self, dummy_component, sd_model_checkpoint_component):
+ self.checkpoint_format.change(lambda fmt: gr.update(visible=fmt == 'safetensors'), inputs=[self.checkpoint_format], outputs=[self.metadata_editor], show_progress=False)
+
+ self.read_metadata.click(extras.read_metadata, inputs=[self.primary_model_name, self.secondary_model_name, self.tertiary_model_name], outputs=[self.metadata_json])
+
+ self.modelmerger_merge.click(fn=lambda: '', inputs=[], outputs=[self.modelmerger_result])
+ self.modelmerger_merge.click(
+ fn=call_queue.wrap_gradio_gpu_call(modelmerger, extra_outputs=lambda: [gr.update() for _ in range(4)]),
+ _js='modelmerger',
+ inputs=[
+ dummy_component,
+ self.primary_model_name,
+ self.secondary_model_name,
+ self.tertiary_model_name,
+ self.interp_method,
+ self.interp_amount,
+ self.save_as_half,
+ self.custom_name,
+ self.checkpoint_format,
+ self.config_source,
+ self.bake_in_vae,
+ self.discard_weights,
+ self.save_metadata,
+ self.add_merge_recipe,
+ self.copy_metadata_fields,
+ self.metadata_json,
+ ],
+ outputs=[
+ self.primary_model_name,
+ self.secondary_model_name,
+ self.tertiary_model_name,
+ sd_model_checkpoint_component,
+ self.modelmerger_result,
+ ]
+ )
+
+ # Required as a workaround for change() event not triggering when loading values from ui-config.json
+ self.interp_description.value = update_interp_description(self.interp_method.value)
+
diff --git a/modules/ui_common.py b/modules/ui_common.py
index 11eb2a4b..fd32676f 100644
--- a/modules/ui_common.py
+++ b/modules/ui_common.py
@@ -8,10 +8,10 @@ import gradio as gr
import subprocess as sp
from modules import call_queue, shared
-from modules.generation_parameters_copypaste import image_from_url_text
+from modules.infotext import image_from_url_text
import modules.images
from modules.ui_components import ToolButton
-
+import modules.infotext as parameters_copypaste
folder_symbol = '\U0001f4c2' # 📂
refresh_symbol = '\U0001f504' # 🔄
@@ -104,9 +104,7 @@ def save_files(js_data, images, do_make_zip, index):
return gr.File.update(value=fullfns, visible=True), plaintext_to_html(f"Saved: {filenames[0]}")
-def create_output_panel(tabname, outdir):
- from modules import shared
- import modules.generation_parameters_copypaste as parameters_copypaste
+def create_output_panel(tabname, outdir, toprow=None):
def open_folder(f):
if not os.path.exists(f):
@@ -132,20 +130,27 @@ Requested path was: {f}
else:
sp.Popen(["xdg-open", path])
- with gr.Column(variant='panel', elem_id=f"{tabname}_results"):
- with gr.Group(elem_id=f"{tabname}_gallery_container"):
- result_gallery = gr.Gallery(label='Output', show_label=False, elem_id=f"{tabname}_gallery").style(columns=4)
+ with gr.Column(elem_id=f"{tabname}_results"):
+ if toprow:
+ toprow.create_inline_toprow_image()
+
+ with gr.Column(variant='panel', elem_id=f"{tabname}_results_panel"):
+ with gr.Group(elem_id=f"{tabname}_gallery_container"):
+ result_gallery = gr.Gallery(label='Output', show_label=False, elem_id=f"{tabname}_gallery", columns=4, preview=True, height=shared.opts.gallery_height or None)
- generation_info = None
- with gr.Column():
+ generation_info = None
with gr.Row(elem_id=f"image_buttons_{tabname}", elem_classes="image-buttons"):
- open_folder_button = gr.Button(folder_symbol, visible=not shared.cmd_opts.hide_ui_dir_config)
+ open_folder_button = ToolButton(folder_symbol, elem_id=f'{tabname}_open_folder', visible=not shared.cmd_opts.hide_ui_dir_config, tooltip="Open images output directory.")
if tabname != "extras":
- save = gr.Button('Save', elem_id=f'save_{tabname}')
- save_zip = gr.Button('Zip', elem_id=f'save_zip_{tabname}')
+ save = ToolButton('💾', elem_id=f'save_{tabname}', tooltip=f"Save the image to a dedicated directory ({shared.opts.outdir_save}).")
+ save_zip = ToolButton('🗃️', elem_id=f'save_zip_{tabname}', tooltip=f"Save zip archive with images to a dedicated directory ({shared.opts.outdir_save})")
- buttons = parameters_copypaste.create_buttons(["img2img", "inpaint", "extras"])
+ buttons = {
+ 'img2img': ToolButton('🖼️', elem_id=f'{tabname}_send_to_img2img', tooltip="Send image and generation parameters to img2img tab."),
+ 'inpaint': ToolButton('🎨️', elem_id=f'{tabname}_send_to_inpaint', tooltip="Send image and generation parameters to img2img inpaint tab."),
+ 'extras': ToolButton('📐', elem_id=f'{tabname}_send_to_extras', tooltip="Send image and generation parameters to extras tab.")
+ }
open_folder_button.click(
fn=lambda: open_folder(shared.opts.outdir_samples or outdir),
@@ -223,20 +228,44 @@ Requested path was: {f}
def create_refresh_button(refresh_component, refresh_method, refreshed_args, elem_id):
+ refresh_components = refresh_component if isinstance(refresh_component, list) else [refresh_component]
+
+ label = None
+ for comp in refresh_components:
+ label = getattr(comp, 'label', None)
+ if label is not None:
+ break
+
def refresh():
refresh_method()
args = refreshed_args() if callable(refreshed_args) else refreshed_args
for k, v in args.items():
- setattr(refresh_component, k, v)
+ for comp in refresh_components:
+ setattr(comp, k, v)
- return gr.update(**(args or {}))
+ return [gr.update(**(args or {})) for _ in refresh_components] if len(refresh_components) > 1 else gr.update(**(args or {}))
- refresh_button = ToolButton(value=refresh_symbol, elem_id=elem_id)
+ refresh_button = ToolButton(value=refresh_symbol, elem_id=elem_id, tooltip=f"{label}: refresh" if label else "Refresh")
refresh_button.click(
fn=refresh,
inputs=[],
- outputs=[refresh_component]
+ outputs=refresh_components
)
return refresh_button
+
+def setup_dialog(button_show, dialog, *, button_close=None):
+ """Sets up the UI so that the dialog (gr.Box) is invisible, and is only shown when buttons_show is clicked, in a fullscreen modal window."""
+
+ dialog.visible = False
+
+ button_show.click(
+ fn=lambda: gr.update(visible=True),
+ inputs=[],
+ outputs=[dialog],
+ ).then(fn=None, _js="function(){ popupId('" + dialog.elem_id + "'); }")
+
+ if button_close:
+ button_close.click(fn=None, _js="closePopup")
+
diff --git a/modules/ui_components.py b/modules/ui_components.py
index 64451df7..55979f62 100644
--- a/modules/ui_components.py
+++ b/modules/ui_components.py
@@ -20,6 +20,18 @@ class ToolButton(FormComponent, gr.Button):
return "button"
+class ResizeHandleRow(gr.Row):
+ """Same as gr.Row but fits inside gradio forms"""
+
+ def __init__(self, **kwargs):
+ super().__init__(**kwargs)
+
+ self.elem_classes.append("resize-handle-row")
+
+ def get_block_name(self):
+ return "row"
+
+
class FormRow(FormComponent, gr.Row):
"""Same as gr.Row but fits inside gradio forms"""
@@ -35,7 +47,7 @@ class FormColumn(FormComponent, gr.Column):
class FormGroup(FormComponent, gr.Group):
- """Same as gr.Row but fits inside gradio forms"""
+ """Same as gr.Group but fits inside gradio forms"""
def get_block_name(self):
return "group"
@@ -72,3 +84,62 @@ class DropdownEditable(FormComponent, gr.Dropdown):
def get_block_name(self):
return "dropdown"
+
+class InputAccordion(gr.Checkbox):
+ """A gr.Accordion that can be used as an input - returns True if open, False if closed.
+
+ Actaully just a hidden checkbox, but creates an accordion that follows and is followed by the state of the checkbox.
+ """
+
+ global_index = 0
+
+ def __init__(self, value, **kwargs):
+ self.accordion_id = kwargs.get('elem_id')
+ if self.accordion_id is None:
+ self.accordion_id = f"input-accordion-{InputAccordion.global_index}"
+ InputAccordion.global_index += 1
+
+ kwargs_checkbox = {
+ **kwargs,
+ "elem_id": f"{self.accordion_id}-checkbox",
+ "visible": False,
+ }
+ super().__init__(value, **kwargs_checkbox)
+
+ self.change(fn=None, _js='function(checked){ inputAccordionChecked("' + self.accordion_id + '", checked); }', inputs=[self])
+
+ kwargs_accordion = {
+ **kwargs,
+ "elem_id": self.accordion_id,
+ "label": kwargs.get('label', 'Accordion'),
+ "elem_classes": ['input-accordion'],
+ "open": value,
+ }
+ self.accordion = gr.Accordion(**kwargs_accordion)
+
+ def extra(self):
+ """Allows you to put something into the label of the accordion.
+
+ Use it like this:
+
+ ```
+ with InputAccordion(False, label="Accordion") as acc:
+ with acc.extra():
+ FormHTML(value="hello", min_width=0)
+
+ ...
+ ```
+ """
+
+ return gr.Column(elem_id=self.accordion_id + '-extra', elem_classes='input-accordion-extra', min_width=0)
+
+ def __enter__(self):
+ self.accordion.__enter__()
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ self.accordion.__exit__(exc_type, exc_val, exc_tb)
+
+ def get_block_name(self):
+ return "checkbox"
+
diff --git a/modules/ui_extensions.py b/modules/ui_extensions.py
index f3e4fba7..dc1e34c8 100644
--- a/modules/ui_extensions.py
+++ b/modules/ui_extensions.py
@@ -2,7 +2,7 @@ import json
import os
import threading
import time
-from datetime import datetime
+from datetime import datetime, timezone
import git
@@ -65,7 +65,7 @@ def save_config_state(name):
filename = os.path.join(config_states_dir, f"{timestamp}_{name}.json")
print(f"Saving backup of webui/extension state to {filename}.")
with open(filename, "w", encoding="utf-8") as f:
- json.dump(current_config_state, f)
+ json.dump(current_config_state, f, indent=4, ensure_ascii=False)
config_states.list_config_states()
new_value = next(iter(config_states.all_config_states.keys()), "Current")
new_choices = ["Current"] + list(config_states.all_config_states.keys())
@@ -164,7 +164,7 @@ def extension_table():
ext_status = ext.status
style = ""
- if shared.opts.disable_all_extensions == "extra" and not ext.is_builtin or shared.opts.disable_all_extensions == "all":
+ if shared.cmd_opts.disable_extra_extensions and not ext.is_builtin or shared.opts.disable_all_extensions == "extra" and not ext.is_builtin or shared.cmd_opts.disable_all_extensions or shared.opts.disable_all_extensions == "all":
style = STYLE_PRIMARY
version_link = ext.version
@@ -177,7 +177,7 @@ def extension_table():
<td>{remote}</td>
<td>{ext.branch}</td>
<td>{version_link}</td>
- <td>{time.asctime(time.gmtime(ext.commit_date))}</td>
+ <td>{datetime.fromtimestamp(ext.commit_date) if ext.commit_date else ""}</td>
<td{' class="extension_status"' if ext.remote is not None else ''}>{ext_status}</td>
</tr>
"""
@@ -197,122 +197,132 @@ def update_config_states_table(state_name):
config_state = config_states.all_config_states[state_name]
config_name = config_state.get("name", "Config")
- created_date = time.asctime(time.gmtime(config_state["created_at"]))
+ created_date = datetime.fromtimestamp(config_state["created_at"]).strftime('%Y-%m-%d %H:%M:%S')
filepath = config_state.get("filepath", "<unknown>")
- code = f"""<!-- {time.time()} -->"""
-
- webui_remote = config_state["webui"]["remote"] or ""
- webui_branch = config_state["webui"]["branch"]
- webui_commit_hash = config_state["webui"]["commit_hash"] or "<unknown>"
- webui_commit_date = config_state["webui"]["commit_date"]
- if webui_commit_date:
- webui_commit_date = time.asctime(time.gmtime(webui_commit_date))
- else:
- webui_commit_date = "<unknown>"
-
- remote = f"""<a href="{html.escape(webui_remote)}" target="_blank">{html.escape(webui_remote or '')}</a>"""
- commit_link = make_commit_link(webui_commit_hash, webui_remote)
- date_link = make_commit_link(webui_commit_hash, webui_remote, webui_commit_date)
-
- current_webui = config_states.get_webui_config()
-
- style_remote = ""
- style_branch = ""
- style_commit = ""
- if current_webui["remote"] != webui_remote:
- style_remote = STYLE_PRIMARY
- if current_webui["branch"] != webui_branch:
- style_branch = STYLE_PRIMARY
- if current_webui["commit_hash"] != webui_commit_hash:
- style_commit = STYLE_PRIMARY
-
- code += f"""<h2>Config Backup: {config_name}</h2>
- <div><b>Filepath:</b> {filepath}</div>
- <div><b>Created at:</b> {created_date}</div>"""
-
- code += f"""<h2>WebUI State</h2>
- <table id="config_state_webui">
- <thead>
- <tr>
- <th>URL</th>
- <th>Branch</th>
- <th>Commit</th>
- <th>Date</th>
- </tr>
- </thead>
- <tbody>
- <tr>
- <td><label{style_remote}>{remote}</label></td>
- <td><label{style_branch}>{webui_branch}</label></td>
- <td><label{style_commit}>{commit_link}</label></td>
- <td><label{style_commit}>{date_link}</label></td>
- </tr>
- </tbody>
- </table>
- """
-
- code += """<h2>Extension State</h2>
- <table id="config_state_extensions">
- <thead>
- <tr>
- <th>Extension</th>
- <th>URL</th>
- <th>Branch</th>
- <th>Commit</th>
- <th>Date</th>
- </tr>
- </thead>
- <tbody>
- """
-
- ext_map = {ext.name: ext for ext in extensions.extensions}
-
- for ext_name, ext_conf in config_state["extensions"].items():
- ext_remote = ext_conf["remote"] or ""
- ext_branch = ext_conf["branch"] or "<unknown>"
- ext_enabled = ext_conf["enabled"]
- ext_commit_hash = ext_conf["commit_hash"] or "<unknown>"
- ext_commit_date = ext_conf["commit_date"]
- if ext_commit_date:
- ext_commit_date = time.asctime(time.gmtime(ext_commit_date))
+ try:
+ webui_remote = config_state["webui"]["remote"] or ""
+ webui_branch = config_state["webui"]["branch"]
+ webui_commit_hash = config_state["webui"]["commit_hash"] or "<unknown>"
+ webui_commit_date = config_state["webui"]["commit_date"]
+ if webui_commit_date:
+ webui_commit_date = time.asctime(time.gmtime(webui_commit_date))
else:
- ext_commit_date = "<unknown>"
+ webui_commit_date = "<unknown>"
+
+ remote = f"""<a href="{html.escape(webui_remote)}" target="_blank">{html.escape(webui_remote or '')}</a>"""
+ commit_link = make_commit_link(webui_commit_hash, webui_remote)
+ date_link = make_commit_link(webui_commit_hash, webui_remote, webui_commit_date)
- remote = f"""<a href="{html.escape(ext_remote)}" target="_blank">{html.escape(ext_remote or '')}</a>"""
- commit_link = make_commit_link(ext_commit_hash, ext_remote)
- date_link = make_commit_link(ext_commit_hash, ext_remote, ext_commit_date)
+ current_webui = config_states.get_webui_config()
- style_enabled = ""
style_remote = ""
style_branch = ""
style_commit = ""
- if ext_name in ext_map:
- current_ext = ext_map[ext_name]
- current_ext.read_info_from_repo()
- if current_ext.enabled != ext_enabled:
- style_enabled = STYLE_PRIMARY
- if current_ext.remote != ext_remote:
- style_remote = STYLE_PRIMARY
- if current_ext.branch != ext_branch:
- style_branch = STYLE_PRIMARY
- if current_ext.commit_hash != ext_commit_hash:
- style_commit = STYLE_PRIMARY
-
- code += f"""
- <tr>
- <td><label{style_enabled}><input class="gr-check-radio gr-checkbox" type="checkbox" disabled="true" {'checked="checked"' if ext_enabled else ''}>{html.escape(ext_name)}</label></td>
- <td><label{style_remote}>{remote}</label></td>
- <td><label{style_branch}>{ext_branch}</label></td>
- <td><label{style_commit}>{commit_link}</label></td>
- <td><label{style_commit}>{date_link}</label></td>
- </tr>
- """
-
- code += """
- </tbody>
- </table>
- """
+ if current_webui["remote"] != webui_remote:
+ style_remote = STYLE_PRIMARY
+ if current_webui["branch"] != webui_branch:
+ style_branch = STYLE_PRIMARY
+ if current_webui["commit_hash"] != webui_commit_hash:
+ style_commit = STYLE_PRIMARY
+
+ code = f"""<!-- {time.time()} -->
+<h2>Config Backup: {config_name}</h2>
+<div><b>Filepath:</b> {filepath}</div>
+<div><b>Created at:</b> {created_date}</div>
+<h2>WebUI State</h2>
+<table id="config_state_webui">
+ <thead>
+ <tr>
+ <th>URL</th>
+ <th>Branch</th>
+ <th>Commit</th>
+ <th>Date</th>
+ </tr>
+ </thead>
+ <tbody>
+ <tr>
+ <td>
+ <label{style_remote}>{remote}</label>
+ </td>
+ <td>
+ <label{style_branch}>{webui_branch}</label>
+ </td>
+ <td>
+ <label{style_commit}>{commit_link}</label>
+ </td>
+ <td>
+ <label{style_commit}>{date_link}</label>
+ </td>
+ </tr>
+ </tbody>
+</table>
+<h2>Extension State</h2>
+<table id="config_state_extensions">
+ <thead>
+ <tr>
+ <th>Extension</th>
+ <th>URL</th>
+ <th>Branch</th>
+ <th>Commit</th>
+ <th>Date</th>
+ </tr>
+ </thead>
+ <tbody>
+"""
+
+ ext_map = {ext.name: ext for ext in extensions.extensions}
+
+ for ext_name, ext_conf in config_state["extensions"].items():
+ ext_remote = ext_conf["remote"] or ""
+ ext_branch = ext_conf["branch"] or "<unknown>"
+ ext_enabled = ext_conf["enabled"]
+ ext_commit_hash = ext_conf["commit_hash"] or "<unknown>"
+ ext_commit_date = ext_conf["commit_date"]
+ if ext_commit_date:
+ ext_commit_date = time.asctime(time.gmtime(ext_commit_date))
+ else:
+ ext_commit_date = "<unknown>"
+
+ remote = f"""<a href="{html.escape(ext_remote)}" target="_blank">{html.escape(ext_remote or '')}</a>"""
+ commit_link = make_commit_link(ext_commit_hash, ext_remote)
+ date_link = make_commit_link(ext_commit_hash, ext_remote, ext_commit_date)
+
+ style_enabled = ""
+ style_remote = ""
+ style_branch = ""
+ style_commit = ""
+ if ext_name in ext_map:
+ current_ext = ext_map[ext_name]
+ current_ext.read_info_from_repo()
+ if current_ext.enabled != ext_enabled:
+ style_enabled = STYLE_PRIMARY
+ if current_ext.remote != ext_remote:
+ style_remote = STYLE_PRIMARY
+ if current_ext.branch != ext_branch:
+ style_branch = STYLE_PRIMARY
+ if current_ext.commit_hash != ext_commit_hash:
+ style_commit = STYLE_PRIMARY
+
+ code += f""" <tr>
+ <td><label{style_enabled}><input class="gr-check-radio gr-checkbox" type="checkbox" disabled="true" {'checked="checked"' if ext_enabled else ''}>{html.escape(ext_name)}</label></td>
+ <td><label{style_remote}>{remote}</label></td>
+ <td><label{style_branch}>{ext_branch}</label></td>
+ <td><label{style_commit}>{commit_link}</label></td>
+ <td><label{style_commit}>{date_link}</label></td>
+ </tr>
+"""
+
+ code += """ </tbody>
+</table>"""
+
+ except Exception as e:
+ print(f"[ERROR]: Config states {filepath}, {e}")
+ code = f"""<!-- {time.time()} -->
+<h2>Config Backup: {config_name}</h2>
+<div><b>Filepath:</b> {filepath}</div>
+<div><b>Created at:</b> {created_date}</div>
+<h2>This file is corrupted</h2>"""
return code
@@ -325,6 +335,11 @@ def normalize_git_url(url):
return url
+def get_extension_dirname_from_url(url):
+ *parts, last_part = url.split('/')
+ return normalize_git_url(last_part)
+
+
def install_extension_from_url(dirname, url, branch_name=None):
check_access()
@@ -336,10 +351,7 @@ def install_extension_from_url(dirname, url, branch_name=None):
assert url, 'No URL specified'
if dirname is None or dirname == "":
- *parts, last_part = url.split('/')
- last_part = normalize_git_url(last_part)
-
- dirname = last_part
+ dirname = get_extension_dirname_from_url(url)
target_dir = os.path.join(extensions.extensions_dir, dirname)
assert not os.path.exists(target_dir), f'Extension directory already exists: {target_dir}'
@@ -432,14 +444,15 @@ sort_ordering = [
def get_date(info: dict, key):
try:
- return datetime.strptime(info.get(key), "%Y-%m-%dT%H:%M:%SZ").strftime("%Y-%m-%d")
+ return datetime.strptime(info.get(key), "%Y-%m-%dT%H:%M:%SZ").replace(tzinfo=timezone.utc).astimezone().strftime("%Y-%m-%d")
except (ValueError, TypeError):
return ''
def refresh_available_extensions_from_data(hide_tags, sort_column, filter_text=""):
extlist = available_extensions["extensions"]
- installed_extension_urls = {normalize_git_url(extension.remote): extension.name for extension in extensions.extensions}
+ installed_extensions = {extension.name for extension in extensions.extensions}
+ installed_extension_urls = {normalize_git_url(extension.remote) for extension in extensions.extensions if extension.remote is not None}
tags = available_extensions.get("tags", {})
tags_to_hide = set(hide_tags)
@@ -472,7 +485,7 @@ def refresh_available_extensions_from_data(hide_tags, sort_column, filter_text="
if url is None:
continue
- existing = installed_extension_urls.get(normalize_git_url(url), None)
+ existing = get_extension_dirname_from_url(url) in installed_extensions or normalize_git_url(url) in installed_extension_urls
extension_tags = extension_tags + ["installed"] if existing else extension_tags
if any(x for x in extension_tags if x in tags_to_hide):
@@ -533,18 +546,26 @@ def create_ui():
apply = gr.Button(value=apply_label, variant="primary")
check = gr.Button(value="Check for updates")
extensions_disable_all = gr.Radio(label="Disable all extensions", choices=["none", "extra", "all"], value=shared.opts.disable_all_extensions, elem_id="extensions_disable_all")
- extensions_disabled_list = gr.Text(elem_id="extensions_disabled_list", visible=False).style(container=False)
- extensions_update_list = gr.Text(elem_id="extensions_update_list", visible=False).style(container=False)
+ extensions_disabled_list = gr.Text(elem_id="extensions_disabled_list", visible=False, container=False)
+ extensions_update_list = gr.Text(elem_id="extensions_update_list", visible=False, container=False)
html = ""
- if shared.opts.disable_all_extensions != "none":
- html = """
-<span style="color: var(--primary-400);">
- "Disable all extensions" was set, change it to "none" to load all extensions again
-</span>
- """
- info = gr.HTML(html)
- extensions_table = gr.HTML('Loading...')
+
+ if shared.cmd_opts.disable_all_extensions or shared.cmd_opts.disable_extra_extensions or shared.opts.disable_all_extensions != "none":
+ if shared.cmd_opts.disable_all_extensions:
+ msg = '"--disable-all-extensions" was used, remove it to load all extensions again'
+ elif shared.opts.disable_all_extensions != "none":
+ msg = '"Disable all extensions" was set, change it to "none" to load all extensions again'
+ elif shared.cmd_opts.disable_extra_extensions:
+ msg = '"--disable-extra-extensions" was used, remove it to load all extensions again'
+ html = f'<span style="color: var(--primary-400);">{msg}</span>'
+
+ with gr.Row():
+ info = gr.HTML(html)
+
+ with gr.Row(elem_classes="progress-container"):
+ extensions_table = gr.HTML('Loading...', elem_id="extensions_installed_html")
+
ui.load(fn=extension_table, inputs=[], outputs=[extensions_table])
apply.click(
@@ -565,7 +586,7 @@ def create_ui():
with gr.Row():
refresh_available_extensions_button = gr.Button(value="Load from:", variant="primary")
extensions_index_url = os.environ.get('WEBUI_EXTENSIONS_INDEX', "https://raw.githubusercontent.com/AUTOMATIC1111/stable-diffusion-webui-extensions/master/index.json")
- available_extensions_index = gr.Text(value=extensions_index_url, label="Extension index URL").style(container=False)
+ available_extensions_index = gr.Text(value=extensions_index_url, label="Extension index URL", container=False)
extension_to_install = gr.Text(elem_id="extension_to_install", visible=False)
install_extension_button = gr.Button(elem_id="install_extension_button", visible=False)
@@ -574,7 +595,7 @@ def create_ui():
sort_column = gr.Radio(value="newest first", label="Order", choices=["newest first", "oldest first", "a-z", "z-a", "internal order",'update time', 'create time', "stars"], type="index")
with gr.Row():
- search_extensions_text = gr.Text(label="Search").style(container=False)
+ search_extensions_text = gr.Text(label="Search", container=False)
install_result = gr.HTML()
available_extensions_table = gr.HTML()
diff --git a/modules/ui_extra_networks.py b/modules/ui_extra_networks.py
index f2752f10..790af135 100644
--- a/modules/ui_extra_networks.py
+++ b/modules/ui_extra_networks.py
@@ -1,21 +1,32 @@
+import functools
import os.path
import urllib.parse
from pathlib import Path
-from modules import shared, ui_extra_networks_user_metadata, errors
+from modules import shared, ui_extra_networks_user_metadata, errors, extra_networks
from modules.images import read_info_from_image, save_image_with_geninfo
-from modules.ui import up_down_symbol
import gradio as gr
import json
import html
from fastapi.exceptions import HTTPException
-from modules.generation_parameters_copypaste import image_from_url_text
+from modules.infotext import image_from_url_text
from modules.ui_components import ToolButton
extra_pages = []
allowed_dirs = set()
+default_allowed_preview_extensions = ["png", "jpg", "jpeg", "webp", "gif"]
+
+
+@functools.cache
+def allowed_preview_extensions_with_extra(extra_extensions=None):
+ return set(default_allowed_preview_extensions) | set(extra_extensions or [])
+
+
+def allowed_preview_extensions():
+ return allowed_preview_extensions_with_extra((shared.opts.samples_format, ))
+
def register_page(page):
"""registers extra networks page for the UI; recommend doing it in on_before_ui() callback for extensions"""
@@ -34,9 +45,9 @@ def fetch_file(filename: str = ""):
if not any(Path(x).absolute() in Path(filename).absolute().parents for x in allowed_dirs):
raise ValueError(f"File cannot be fetched: {filename}. Must be in one of directories registered by extra pages.")
- ext = os.path.splitext(filename)[1].lower()
- if ext not in (".png", ".jpg", ".jpeg", ".webp", ".gif"):
- raise ValueError(f"File cannot be fetched: {filename}. Only png, jpg, webp, and gif.")
+ ext = os.path.splitext(filename)[1].lower()[1:]
+ if ext not in allowed_preview_extensions():
+ raise ValueError(f"File cannot be fetched: {filename}. Extensions allowed: {allowed_preview_extensions()}.")
# would profit from returning 304
return FileResponse(filename, headers={"Accept-Ranges": "bytes"})
@@ -92,6 +103,7 @@ class ExtraNetworksPage:
self.name = title.lower()
self.id_page = self.name.replace(" ", "_")
self.card_page = shared.html("extra-networks-card.html")
+ self.allow_prompt = True
self.allow_negative_prompt = False
self.metadata = {}
self.items = {}
@@ -101,16 +113,7 @@ class ExtraNetworksPage:
def read_user_metadata(self, item):
filename = item.get("filename", None)
- basename, ext = os.path.splitext(filename)
- metadata_filename = basename + '.json'
-
- metadata = {}
- try:
- if os.path.isfile(metadata_filename):
- with open(metadata_filename, "r", encoding="utf8") as file:
- metadata = json.load(file)
- except Exception as e:
- errors.display(e, f"reading extra network user metadata from {metadata_filename}")
+ metadata = extra_networks.get_user_metadata(filename)
desc = metadata.get("description", None)
if desc is not None:
@@ -148,8 +151,13 @@ class ExtraNetworksPage:
continue
subdir = os.path.abspath(x)[len(parentdir):].replace("\\", "/")
- while subdir.startswith("/"):
- subdir = subdir[1:]
+
+ if shared.opts.extra_networks_dir_button_function:
+ if not subdir.startswith("/"):
+ subdir = "/" + subdir
+ else:
+ while subdir.startswith("/"):
+ subdir = subdir[1:]
is_empty = len(os.listdir(x)) == 0
if not is_empty and not subdir.endswith("/"):
@@ -164,7 +172,7 @@ class ExtraNetworksPage:
subdirs = {"": 1, **subdirs}
subdirs_html = "".join([f"""
-<button class='lg secondary gradio-button custom-button{" search-all" if subdir=="" else ""}' onclick='extraNetworksSearchButton("{tabname}_extra_tabs", event)'>
+<button class='lg secondary gradio-button custom-button{" search-all" if subdir=="" else ""}' onclick='extraNetworksSearchButton("{tabname}_extra_search", event)'>
{html.escape(subdir if subdir!="" else "all")}
</button>
""" for subdir in subdirs])
@@ -215,7 +223,10 @@ class ExtraNetworksPage:
onclick = item.get("onclick", None)
if onclick is None:
- onclick = '"' + html.escape(f"""return cardClicked({quote_js(tabname)}, {item["prompt"]}, {"true" if self.allow_negative_prompt else "false"})""") + '"'
+ if "negative_prompt" in item:
+ onclick = '"' + html.escape(f"""return cardClicked({quote_js(tabname)}, {item["prompt"]}, {item["negative_prompt"]}, {"true" if self.allow_negative_prompt else "false"})""") + '"'
+ else:
+ onclick = '"' + html.escape(f"""return cardClicked({quote_js(tabname)}, {item["prompt"]}, {'""'}, {"true" if self.allow_negative_prompt else "false"})""") + '"'
height = f"height: {shared.opts.extra_networks_card_height}px;" if shared.opts.extra_networks_card_height else ''
width = f"width: {shared.opts.extra_networks_card_width}px;" if shared.opts.extra_networks_card_width else ''
@@ -223,9 +234,9 @@ class ExtraNetworksPage:
metadata_button = ""
metadata = item.get("metadata")
if metadata:
- metadata_button = f"<div class='metadata-button card-button' title='Show internal metadata' onclick='extraNetworksRequestMetadata(event, {quote_js(self.name)}, {quote_js(item['name'])})'></div>"
+ metadata_button = f"<div class='metadata-button card-button' title='Show internal metadata' onclick='extraNetworksRequestMetadata(event, {quote_js(self.name)}, {quote_js(html.escape(item['name']))})'></div>"
- edit_button = f"<div class='edit-button card-button' title='Edit metadata' onclick='extraNetworksEditUserMetadata(event, {quote_js(tabname)}, {quote_js(self.id_page)}, {quote_js(item['name'])})'></div>"
+ edit_button = f"<div class='edit-button card-button' title='Edit metadata' onclick='extraNetworksEditUserMetadata(event, {quote_js(tabname)}, {quote_js(self.id_page)}, {quote_js(html.escape(item['name']))})'></div>"
local_path = ""
filename = item.get("filename", "")
@@ -245,7 +256,7 @@ class ExtraNetworksPage:
if search_only and shared.opts.extra_networks_hidden_models == "Never":
return ""
- sort_keys = " ".join([html.escape(f'data-sort-{k}={v}') for k, v in item.get("sort_keys", {}).items()]).strip()
+ sort_keys = " ".join([f'data-sort-{k}="{html.escape(str(v))}"' for k, v in item.get("sort_keys", {}).items()]).strip()
args = {
"background_image": background_image,
@@ -276,6 +287,7 @@ class ExtraNetworksPage:
"date_created": int(stat.st_ctime or 0),
"date_modified": int(stat.st_mtime or 0),
"name": pth.name.lower(),
+ "path": str(pth.parent).lower(),
}
def find_preview(self, path):
@@ -283,11 +295,7 @@ class ExtraNetworksPage:
Find a preview PNG for a given path (without extension) and call link_preview on it.
"""
- preview_extensions = ["png", "jpg", "jpeg", "webp"]
- if shared.opts.samples_format not in preview_extensions:
- preview_extensions.append(shared.opts.samples_format)
-
- potential_files = sum([[path + "." + ext, path + ".preview." + ext] for ext in preview_extensions], [])
+ potential_files = sum([[path + "." + ext, path + ".preview." + ext] for ext in allowed_preview_extensions()], [])
for file in potential_files:
if os.path.isfile(file):
@@ -356,7 +364,9 @@ def pages_in_preferred_order(pages):
return sorted(pages, key=lambda x: tab_scores[x.name])
-def create_ui(container, button, tabname):
+def create_ui(interface: gr.Blocks, unrelated_tabs, tabname):
+ from modules.ui import switch_values_symbol
+
ui = ExtraNetworksUi()
ui.pages = []
ui.pages_contents = []
@@ -364,48 +374,54 @@ def create_ui(container, button, tabname):
ui.stored_extra_pages = pages_in_preferred_order(extra_pages.copy())
ui.tabname = tabname
- with gr.Tabs(elem_id=tabname+"_extra_tabs"):
- for page in ui.stored_extra_pages:
- with gr.Tab(page.title, id=page.id_page):
- elem_id = f"{tabname}_{page.id_page}_cards_html"
- page_elem = gr.HTML('Loading...', elem_id=elem_id)
- ui.pages.append(page_elem)
+ related_tabs = []
+
+ for page in ui.stored_extra_pages:
+ with gr.Tab(page.title, elem_id=f"{tabname}_{page.id_page}", elem_classes=["extra-page"]) as tab:
+ with gr.Column(elem_id=f"{tabname}_{page.id_page}_prompts", elem_classes=["extra-page-prompts"]):
+ pass
+
+ elem_id = f"{tabname}_{page.id_page}_cards_html"
+ page_elem = gr.HTML('Loading...', elem_id=elem_id)
+ ui.pages.append(page_elem)
- page_elem.change(fn=lambda: None, _js='function(){applyExtraNetworkFilter(' + quote_js(tabname) + '); return []}', inputs=[], outputs=[])
+ page_elem.change(fn=lambda: None, _js='function(){applyExtraNetworkFilter(' + quote_js(tabname) + '); return []}', inputs=[], outputs=[])
- editor = page.create_user_metadata_editor(ui, tabname)
- editor.create_ui()
- ui.user_metadata_editors.append(editor)
+ editor = page.create_user_metadata_editor(ui, tabname)
+ editor.create_ui()
+ ui.user_metadata_editors.append(editor)
- gr.Textbox('', show_label=False, elem_id=tabname+"_extra_search", placeholder="Search...", visible=False)
- gr.Dropdown(choices=['Default Sort', 'Date Created', 'Date Modified', 'Name'], value='Default Sort', elem_id=tabname+"_extra_sort", multiselect=False, visible=False, show_label=False, interactive=True)
- ToolButton(up_down_symbol, elem_id=tabname+"_extra_sortorder")
- button_refresh = gr.Button('Refresh', elem_id=tabname+"_extra_refresh")
+ related_tabs.append(tab)
+
+ edit_search = gr.Textbox('', show_label=False, elem_id=tabname+"_extra_search", elem_classes="search", placeholder="Search...", visible=False, interactive=True)
+ dropdown_sort = gr.Dropdown(choices=['Path', 'Name', 'Date Created', 'Date Modified', ], value=shared.opts.extra_networks_card_order_field, elem_id=tabname+"_extra_sort", elem_classes="sort", multiselect=False, visible=False, show_label=False, interactive=True, label=tabname+"_extra_sort_order")
+ button_sortorder = ToolButton(switch_values_symbol, elem_id=tabname+"_extra_sortorder", elem_classes=["sortorder"] + ([] if shared.opts.extra_networks_card_order == "Ascending" else ["sortReverse"]), visible=False, tooltip="Invert sort order")
+ button_refresh = gr.Button('Refresh', elem_id=tabname+"_extra_refresh", visible=False)
+ checkbox_show_dirs = gr.Checkbox(True, label='Show dirs', elem_id=tabname+"_extra_show_dirs", elem_classes="show-dirs", visible=False)
ui.button_save_preview = gr.Button('Save preview', elem_id=tabname+"_save_preview", visible=False)
ui.preview_target_filename = gr.Textbox('Preview save filename', elem_id=tabname+"_preview_filename", visible=False)
- def toggle_visibility(is_visible):
- is_visible = not is_visible
+ tab_controls = [edit_search, dropdown_sort, button_sortorder, button_refresh, checkbox_show_dirs]
- return is_visible, gr.update(visible=is_visible), gr.update(variant=("secondary-down" if is_visible else "secondary"))
+ for tab in unrelated_tabs:
+ tab.select(fn=lambda: [gr.update(visible=False) for _ in tab_controls], _js='function(){ extraNetworksUrelatedTabSelected("' + tabname + '"); }', inputs=[], outputs=tab_controls, show_progress=False)
- def fill_tabs(is_empty):
- """Creates HTML for extra networks' tabs when the extra networks button is clicked for the first time."""
+ for page, tab in zip(ui.stored_extra_pages, related_tabs):
+ allow_prompt = "true" if page.allow_prompt else "false"
+ allow_negative_prompt = "true" if page.allow_negative_prompt else "false"
- if not ui.pages_contents:
- refresh()
+ jscode = 'extraNetworksTabSelected("' + tabname + '", "' + f"{tabname}_{page.id_page}_prompts" + '", ' + allow_prompt + ', ' + allow_negative_prompt + ');'
- if is_empty:
- return True, *ui.pages_contents
+ tab.select(fn=lambda: [gr.update(visible=True) for _ in tab_controls], _js='function(){ ' + jscode + ' }', inputs=[], outputs=tab_controls, show_progress=False)
- return True, *[gr.update() for _ in ui.pages_contents]
+ dropdown_sort.change(fn=lambda: None, _js="function(){ applyExtraNetworkSort('" + tabname + "'); }")
- state_visible = gr.State(value=False)
- button.click(fn=toggle_visibility, inputs=[state_visible], outputs=[state_visible, container, button], show_progress=False)
+ def pages_html():
+ if not ui.pages_contents:
+ return refresh()
- state_empty = gr.State(value=True)
- button.click(fn=fill_tabs, inputs=[state_empty], outputs=[state_empty, *ui.pages], show_progress=False)
+ return ui.pages_contents
def refresh():
for pg in ui.stored_extra_pages:
@@ -415,6 +431,7 @@ def create_ui(container, button, tabname):
return ui.pages_contents
+ interface.load(fn=pages_html, inputs=[], outputs=[*ui.pages])
button_refresh.click(fn=refresh, inputs=[], outputs=ui.pages)
return ui
diff --git a/modules/ui_extra_networks_checkpoints.py b/modules/ui_extra_networks_checkpoints.py
index 76780cfd..1693e71f 100644
--- a/modules/ui_extra_networks_checkpoints.py
+++ b/modules/ui_extra_networks_checkpoints.py
@@ -3,33 +3,47 @@ import os
from modules import shared, ui_extra_networks, sd_models
from modules.ui_extra_networks import quote_js
+from modules.ui_extra_networks_checkpoints_user_metadata import CheckpointUserMetadataEditor
class ExtraNetworksPageCheckpoints(ui_extra_networks.ExtraNetworksPage):
def __init__(self):
super().__init__('Checkpoints')
+ self.allow_prompt = False
+
def refresh(self):
shared.refresh_checkpoints()
- def create_item(self, name, index=None):
+ def create_item(self, name, index=None, enable_filter=True):
checkpoint: sd_models.CheckpointInfo = sd_models.checkpoint_aliases.get(name)
+ if checkpoint is None:
+ return
+
path, ext = os.path.splitext(checkpoint.filename)
return {
"name": checkpoint.name_for_extra,
"filename": checkpoint.filename,
+ "shorthash": checkpoint.shorthash,
"preview": self.find_preview(path),
"description": self.find_description(path),
"search_term": self.search_terms_from_path(checkpoint.filename) + " " + (checkpoint.sha256 or ""),
"onclick": '"' + html.escape(f"""return selectCheckpoint({quote_js(name)})""") + '"',
"local_preview": f"{path}.{shared.opts.samples_format}",
+ "metadata": checkpoint.metadata,
"sort_keys": {'default': index, **self.get_sort_keys(checkpoint.filename)},
}
def list_items(self):
- for index, name in enumerate(sd_models.checkpoints_list):
- yield self.create_item(name, index)
+ # instantiate a list to protect against concurrent modification
+ names = list(sd_models.checkpoints_list)
+ for index, name in enumerate(names):
+ item = self.create_item(name, index)
+ if item is not None:
+ yield item
def allowed_directories_for_previews(self):
return [v for v in [shared.cmd_opts.ckpt_dir, sd_models.model_path] if v is not None]
+ def create_user_metadata_editor(self, ui, tabname):
+ return CheckpointUserMetadataEditor(ui, tabname, self)
diff --git a/modules/ui_extra_networks_checkpoints_user_metadata.py b/modules/ui_extra_networks_checkpoints_user_metadata.py
new file mode 100644
index 00000000..25df0a80
--- /dev/null
+++ b/modules/ui_extra_networks_checkpoints_user_metadata.py
@@ -0,0 +1,66 @@
+import gradio as gr
+
+from modules import ui_extra_networks_user_metadata, sd_vae, shared
+from modules.ui_common import create_refresh_button
+
+
+class CheckpointUserMetadataEditor(ui_extra_networks_user_metadata.UserMetadataEditor):
+ def __init__(self, ui, tabname, page):
+ super().__init__(ui, tabname, page)
+
+ self.select_vae = None
+
+ def save_user_metadata(self, name, desc, notes, vae):
+ user_metadata = self.get_user_metadata(name)
+ user_metadata["description"] = desc
+ user_metadata["notes"] = notes
+ user_metadata["vae"] = vae
+
+ self.write_user_metadata(name, user_metadata)
+
+ def update_vae(self, name):
+ if name == shared.sd_model.sd_checkpoint_info.name_for_extra:
+ sd_vae.reload_vae_weights()
+
+ def put_values_into_components(self, name):
+ user_metadata = self.get_user_metadata(name)
+ values = super().put_values_into_components(name)
+
+ return [
+ *values[0:5],
+ user_metadata.get('vae', ''),
+ ]
+
+ def create_editor(self):
+ self.create_default_editor_elems()
+
+ with gr.Row():
+ self.select_vae = gr.Dropdown(choices=["Automatic", "None"] + list(sd_vae.vae_dict), value="None", label="Preferred VAE", elem_id="checpoint_edit_user_metadata_preferred_vae")
+ create_refresh_button(self.select_vae, sd_vae.refresh_vae_list, lambda: {"choices": ["Automatic", "None"] + list(sd_vae.vae_dict)}, "checpoint_edit_user_metadata_refresh_preferred_vae")
+
+ self.edit_notes = gr.TextArea(label='Notes', lines=4)
+
+ self.create_default_buttons()
+
+ viewed_components = [
+ self.edit_name,
+ self.edit_description,
+ self.html_filedata,
+ self.html_preview,
+ self.edit_notes,
+ self.select_vae,
+ ]
+
+ self.button_edit\
+ .click(fn=self.put_values_into_components, inputs=[self.edit_name_input], outputs=viewed_components)\
+ .then(fn=lambda: gr.update(visible=True), inputs=[], outputs=[self.box])
+
+ edited_components = [
+ self.edit_description,
+ self.edit_notes,
+ self.select_vae,
+ ]
+
+ self.setup_save_handler(self.button_save, self.save_user_metadata, edited_components)
+ self.button_save.click(fn=self.update_vae, inputs=[self.edit_name_input])
+
diff --git a/modules/ui_extra_networks_hypernets.py b/modules/ui_extra_networks_hypernets.py
index e53ccb42..c96c4fa3 100644
--- a/modules/ui_extra_networks_hypernets.py
+++ b/modules/ui_extra_networks_hypernets.py
@@ -2,6 +2,7 @@ import os
from modules import shared, ui_extra_networks
from modules.ui_extra_networks import quote_js
+from modules.hashes import sha256_from_cache
class ExtraNetworksPageHypernetworks(ui_extra_networks.ExtraNetworksPage):
@@ -11,24 +12,34 @@ class ExtraNetworksPageHypernetworks(ui_extra_networks.ExtraNetworksPage):
def refresh(self):
shared.reload_hypernetworks()
- def create_item(self, name, index=None):
- full_path = shared.hypernetworks[name]
+ def create_item(self, name, index=None, enable_filter=True):
+ full_path = shared.hypernetworks.get(name)
+ if full_path is None:
+ return
+
path, ext = os.path.splitext(full_path)
+ sha256 = sha256_from_cache(full_path, f'hypernet/{name}')
+ shorthash = sha256[0:10] if sha256 else None
return {
"name": name,
"filename": full_path,
+ "shorthash": shorthash,
"preview": self.find_preview(path),
"description": self.find_description(path),
- "search_term": self.search_terms_from_path(path),
+ "search_term": self.search_terms_from_path(path) + " " + (sha256 or ""),
"prompt": quote_js(f"<hypernet:{name}:") + " + opts.extra_networks_default_multiplier + " + quote_js(">"),
"local_preview": f"{path}.preview.{shared.opts.samples_format}",
"sort_keys": {'default': index, **self.get_sort_keys(path + ext)},
}
def list_items(self):
- for index, name in enumerate(shared.hypernetworks):
- yield self.create_item(name, index)
+ # instantiate a list to protect against concurrent modification
+ names = list(shared.hypernetworks)
+ for index, name in enumerate(names):
+ item = self.create_item(name, index)
+ if item is not None:
+ yield item
def allowed_directories_for_previews(self):
return [shared.cmd_opts.hypernetwork_dir]
diff --git a/modules/ui_extra_networks_textual_inversion.py b/modules/ui_extra_networks_textual_inversion.py
index d1794e50..1b334fda 100644
--- a/modules/ui_extra_networks_textual_inversion.py
+++ b/modules/ui_extra_networks_textual_inversion.py
@@ -12,24 +12,31 @@ class ExtraNetworksPageTextualInversion(ui_extra_networks.ExtraNetworksPage):
def refresh(self):
sd_hijack.model_hijack.embedding_db.load_textual_inversion_embeddings(force_reload=True)
- def create_item(self, name, index=None):
+ def create_item(self, name, index=None, enable_filter=True):
embedding = sd_hijack.model_hijack.embedding_db.word_embeddings.get(name)
+ if embedding is None:
+ return
path, ext = os.path.splitext(embedding.filename)
return {
"name": name,
"filename": embedding.filename,
+ "shorthash": embedding.shorthash,
"preview": self.find_preview(path),
"description": self.find_description(path),
- "search_term": self.search_terms_from_path(embedding.filename),
+ "search_term": self.search_terms_from_path(embedding.filename) + " " + (embedding.hash or ""),
"prompt": quote_js(embedding.name),
"local_preview": f"{path}.preview.{shared.opts.samples_format}",
"sort_keys": {'default': index, **self.get_sort_keys(embedding.filename)},
}
def list_items(self):
- for index, name in enumerate(sd_hijack.model_hijack.embedding_db.word_embeddings):
- yield self.create_item(name, index)
+ # instantiate a list to protect against concurrent modification
+ names = list(sd_hijack.model_hijack.embedding_db.word_embeddings)
+ for index, name in enumerate(names):
+ item = self.create_item(name, index)
+ if item is not None:
+ yield item
def allowed_directories_for_previews(self):
return list(sd_hijack.model_hijack.embedding_db.embedding_dirs)
diff --git a/modules/ui_extra_networks_user_metadata.py b/modules/ui_extra_networks_user_metadata.py
index 63d4b503..87aeb6f3 100644
--- a/modules/ui_extra_networks_user_metadata.py
+++ b/modules/ui_extra_networks_user_metadata.py
@@ -5,7 +5,7 @@ import os.path
import gradio as gr
-from modules import generation_parameters_copypaste, images, sysinfo, errors
+from modules import infotext, images, sysinfo, errors, ui_extra_networks
class UserMetadataEditor:
@@ -36,8 +36,8 @@ class UserMetadataEditor:
item = self.page.items.get(name, {})
user_metadata = item.get('user_metadata', None)
- if user_metadata is None:
- user_metadata = {}
+ if not user_metadata:
+ user_metadata = {'description': item.get('description', '')}
item['user_metadata'] = user_metadata
return user_metadata
@@ -89,14 +89,24 @@ class UserMetadataEditor:
return preview
+ def relative_path(self, path):
+ for parent_path in self.page.allowed_directories_for_previews():
+ if ui_extra_networks.path_is_parent(parent_path, path):
+ return os.path.relpath(path, parent_path)
+
+ return os.path.basename(path)
+
def get_metadata_table(self, name):
item = self.page.items.get(name, {})
try:
filename = item["filename"]
+ shorthash = item.get("shorthash", None)
stats = os.stat(filename)
params = [
+ ('Filename: ', self.relative_path(filename)),
('File size: ', sysinfo.pretty_bytes(stats.st_size)),
+ ('Hash: ', shorthash),
('Modified: ', datetime.datetime.fromtimestamp(stats.st_mtime).strftime('%Y-%m-%d %H:%M')),
]
@@ -114,7 +124,7 @@ class UserMetadataEditor:
errors.display(e, f"reading metadata info for {name}")
params = []
- table = '<table class="file-metadata">' + "".join(f"<tr><th>{name}</th><td>{value}</td></tr>" for name, value in params) + '</table>'
+ table = '<table class="file-metadata">' + "".join(f"<tr><th>{name}</th><td>{value}</td></tr>" for name, value in params if value is not None) + '</table>'
return html.escape(name), user_metadata.get('description', ''), table, self.get_card_html(name), user_metadata.get('notes', '')
@@ -124,7 +134,7 @@ class UserMetadataEditor:
basename, ext = os.path.splitext(filename)
with open(basename + '.json', "w", encoding="utf8") as file:
- json.dump(metadata, file)
+ json.dump(metadata, file, indent=4, ensure_ascii=False)
def save_user_metadata(self, name, desc, notes):
user_metadata = self.get_user_metadata(name)
@@ -171,7 +181,7 @@ class UserMetadataEditor:
index = len(gallery) - 1 if index >= len(gallery) else index
img_info = gallery[index if index >= 0 else 0]
- image = generation_parameters_copypaste.image_from_url_text(img_info)
+ image = infotext.image_from_url_text(img_info)
geninfo, items = images.read_info_from_image(image)
images.save_image_with_geninfo(image, geninfo, item["local_preview"])
diff --git a/modules/ui_gradio_extensions.py b/modules/ui_gradio_extensions.py
index b824b113..a86c368e 100644
--- a/modules/ui_gradio_extensions.py
+++ b/modules/ui_gradio_extensions.py
@@ -1,17 +1,12 @@
import os
import gradio as gr
-from modules import localization, shared, scripts
+from modules import localization, shared, scripts, util
from modules.paths import script_path, data_path
def webpath(fn):
- if fn.startswith(script_path):
- web_path = os.path.relpath(fn, script_path).replace('\\', '/')
- else:
- web_path = os.path.abspath(fn)
-
- return f'file={web_path}?{os.path.getmtime(fn)}'
+ return f'file={util.truncate_path(fn)}?{os.path.getmtime(fn)}'
def javascript_html():
diff --git a/modules/ui_loadsave.py b/modules/ui_loadsave.py
index 0052a5cc..7826786c 100644
--- a/modules/ui_loadsave.py
+++ b/modules/ui_loadsave.py
@@ -4,11 +4,15 @@ import os
import gradio as gr
from modules import errors
-from modules.ui_components import ToolButton
+from modules.ui_components import ToolButton, InputAccordion
+
+
+def radio_choices(comp): # gradio 3.41 changes choices from list of values to list of pairs
+ return [x[0] if isinstance(x, tuple) else x for x in getattr(comp, 'choices', [])]
class UiLoadsave:
- """allows saving and restorig default values for gradio components"""
+ """allows saving and restoring default values for gradio components"""
def __init__(self, filename):
self.filename = filename
@@ -37,23 +41,35 @@ class UiLoadsave:
key = f"{path}/{field}"
if getattr(obj, 'custom_script_source', None) is not None:
- key = f"customscript/{obj.custom_script_source}/{key}"
+ key = f"customscript/{obj.custom_script_source}/{key}"
if getattr(obj, 'do_not_save_to_config', False):
return
saved_value = self.ui_settings.get(key, None)
+
+ if isinstance(obj, gr.Accordion) and isinstance(x, InputAccordion) and field == 'value':
+ field = 'open'
+
if saved_value is None:
self.ui_settings[key] = getattr(obj, field)
elif condition and not condition(saved_value):
pass
else:
+ if isinstance(obj, gr.Textbox) and field == 'value': # due to an undesirable behavior of gr.Textbox, if you give it an int value instead of str, everything dies
+ saved_value = str(saved_value)
+ elif isinstance(obj, gr.Number) and field == 'value':
+ try:
+ saved_value = float(saved_value)
+ except ValueError:
+ return
+
setattr(obj, field, saved_value)
if init_field is not None:
init_field(saved_value)
if field == 'value' and key not in self.component_mapping:
- self.component_mapping[key] = x
+ self.component_mapping[key] = obj
if type(x) in [gr.Slider, gr.Radio, gr.Checkbox, gr.Textbox, gr.Number, gr.Dropdown, ToolButton, gr.Button] and x.visible:
apply_field(x, 'visible')
@@ -65,7 +81,7 @@ class UiLoadsave:
apply_field(x, 'step')
if type(x) == gr.Radio:
- apply_field(x, 'value', lambda val: val in x.choices)
+ apply_field(x, 'value', lambda val: val in radio_choices(x))
if type(x) == gr.Checkbox:
apply_field(x, 'value')
@@ -78,13 +94,20 @@ class UiLoadsave:
if type(x) == gr.Dropdown:
def check_dropdown(val):
+ choices = radio_choices(x)
if getattr(x, 'multiselect', False):
- return all(value in x.choices for value in val)
+ return all(value in choices for value in val)
else:
- return val in x.choices
+ return val in choices
apply_field(x, 'value', check_dropdown, getattr(x, 'init_field', None))
+ if type(x) == InputAccordion:
+ if x.accordion.visible:
+ apply_field(x.accordion, 'visible')
+ apply_field(x, 'value')
+ apply_field(x.accordion, 'value')
+
def check_tab_id(tab_id):
tab_items = list(filter(lambda e: isinstance(e, gr.TabItem), x.children))
if type(tab_id) == str:
@@ -118,7 +141,7 @@ class UiLoadsave:
def write_to_file(self, current_ui_settings):
with open(self.filename, "w", encoding="utf8") as file:
- json.dump(current_ui_settings, file, indent=4)
+ json.dump(current_ui_settings, file, indent=4, ensure_ascii=False)
def dump_defaults(self):
"""saves default values to a file unless tjhe file is present and there was an error loading default values at start"""
@@ -138,12 +161,14 @@ class UiLoadsave:
for (path, component), new_value in zip(self.component_mapping.items(), values):
old_value = current_ui_settings.get(path)
- choices = getattr(component, 'choices', None)
+ choices = radio_choices(component)
if isinstance(new_value, int) and choices:
if new_value >= len(choices):
continue
new_value = choices[new_value]
+ if isinstance(new_value, tuple):
+ new_value = new_value[0]
if new_value == old_value:
continue
diff --git a/modules/ui_postprocessing.py b/modules/ui_postprocessing.py
index c7dc1154..b74a1532 100644
--- a/modules/ui_postprocessing.py
+++ b/modules/ui_postprocessing.py
@@ -1,12 +1,13 @@
import gradio as gr
-from modules import scripts, shared, ui_common, postprocessing, call_queue
-import modules.generation_parameters_copypaste as parameters_copypaste
+from modules import scripts, shared, ui_common, postprocessing, call_queue, ui_toprow
+import modules.infotext as parameters_copypaste
def create_ui():
+ dummy_component = gr.Label(visible=False)
tab_index = gr.State(value=0)
- with gr.Row().style(equal_height=False, variant='compact'):
+ with gr.Row(equal_height=False, variant='compact'):
with gr.Column(variant='compact'):
with gr.Tabs(elem_id="mode_extras"):
with gr.TabItem('Single Image', id="single_image", elem_id="extras_single_tab") as tab_single:
@@ -20,11 +21,13 @@ def create_ui():
extras_batch_output_dir = gr.Textbox(label="Output directory", **shared.hide_dirs, placeholder="Leave blank to save images to the default path.", elem_id="extras_batch_output_dir")
show_extras_results = gr.Checkbox(label='Show result images', value=True, elem_id="extras_show_extras_results")
- submit = gr.Button('Generate', elem_id="extras_generate", variant='primary')
-
script_inputs = scripts.scripts_postproc.setup_ui()
with gr.Column():
+ toprow = ui_toprow.Toprow(is_compact=True, is_img2img=False, id_part="extras")
+ toprow.create_inline_toprow_image()
+ submit = toprow.submit
+
result_images, html_info_x, html_info, html_log = ui_common.create_output_panel("extras", shared.opts.outdir_extras_samples)
tab_single.select(fn=lambda: 0, inputs=[], outputs=[tab_index])
@@ -32,8 +35,10 @@ def create_ui():
tab_batch_dir.select(fn=lambda: 2, inputs=[], outputs=[tab_index])
submit.click(
- fn=call_queue.wrap_gradio_gpu_call(postprocessing.run_postprocessing, extra_outputs=[None, '']),
+ fn=call_queue.wrap_gradio_gpu_call(postprocessing.run_postprocessing_webui, extra_outputs=[None, '']),
+ _js="submit_extras",
inputs=[
+ dummy_component,
tab_index,
extras_image,
image_batch,
@@ -45,8 +50,9 @@ def create_ui():
outputs=[
result_images,
html_info_x,
- html_info,
- ]
+ html_log,
+ ],
+ show_progress=False,
)
parameters_copypaste.add_paste_fields("extras", extras_image, None)
diff --git a/modules/ui_prompt_styles.py b/modules/ui_prompt_styles.py
new file mode 100644
index 00000000..0d74c23f
--- /dev/null
+++ b/modules/ui_prompt_styles.py
@@ -0,0 +1,120 @@
+import gradio as gr
+
+from modules import shared, ui_common, ui_components, styles
+
+styles_edit_symbol = '\U0001f58c\uFE0F' # 🖌️
+styles_materialize_symbol = '\U0001f4cb' # 📋
+styles_copy_symbol = '\U0001f4dd' # 📝
+
+
+def select_style(name):
+ style = shared.prompt_styles.styles.get(name)
+ existing = style is not None
+ empty = not name
+
+ prompt = style.prompt if style else gr.update()
+ negative_prompt = style.negative_prompt if style else gr.update()
+
+ return prompt, negative_prompt, gr.update(visible=existing), gr.update(visible=not empty)
+
+
+def save_style(name, prompt, negative_prompt):
+ if not name:
+ return gr.update(visible=False)
+
+ style = styles.PromptStyle(name, prompt, negative_prompt)
+ shared.prompt_styles.styles[style.name] = style
+ shared.prompt_styles.save_styles(shared.styles_filename)
+
+ return gr.update(visible=True)
+
+
+def delete_style(name):
+ if name == "":
+ return
+
+ shared.prompt_styles.styles.pop(name, None)
+ shared.prompt_styles.save_styles(shared.styles_filename)
+
+ return '', '', ''
+
+
+def materialize_styles(prompt, negative_prompt, styles):
+ prompt = shared.prompt_styles.apply_styles_to_prompt(prompt, styles)
+ negative_prompt = shared.prompt_styles.apply_negative_styles_to_prompt(negative_prompt, styles)
+
+ return [gr.Textbox.update(value=prompt), gr.Textbox.update(value=negative_prompt), gr.Dropdown.update(value=[])]
+
+
+def refresh_styles():
+ return gr.update(choices=list(shared.prompt_styles.styles)), gr.update(choices=list(shared.prompt_styles.styles))
+
+
+class UiPromptStyles:
+ def __init__(self, tabname, main_ui_prompt, main_ui_negative_prompt):
+ self.tabname = tabname
+ self.main_ui_prompt = main_ui_prompt
+ self.main_ui_negative_prompt = main_ui_negative_prompt
+
+ with gr.Row(elem_id=f"{tabname}_styles_row"):
+ self.dropdown = gr.Dropdown(label="Styles", show_label=False, elem_id=f"{tabname}_styles", choices=list(shared.prompt_styles.styles), value=[], multiselect=True, tooltip="Styles")
+ edit_button = ui_components.ToolButton(value=styles_edit_symbol, elem_id=f"{tabname}_styles_edit_button", tooltip="Edit styles")
+
+ with gr.Box(elem_id=f"{tabname}_styles_dialog", elem_classes="popup-dialog") as styles_dialog:
+ with gr.Row():
+ self.selection = gr.Dropdown(label="Styles", elem_id=f"{tabname}_styles_edit_select", choices=list(shared.prompt_styles.styles), value=[], allow_custom_value=True, info="Styles allow you to add custom text to prompt. Use the {prompt} token in style text, and it will be replaced with user's prompt when applying style. Otherwise, style's text will be added to the end of the prompt.")
+ ui_common.create_refresh_button([self.dropdown, self.selection], shared.prompt_styles.reload, lambda: {"choices": list(shared.prompt_styles.styles)}, f"refresh_{tabname}_styles")
+ self.materialize = ui_components.ToolButton(value=styles_materialize_symbol, elem_id=f"{tabname}_style_apply_dialog", tooltip="Apply all selected styles from the style selction dropdown in main UI to the prompt.")
+ self.copy = ui_components.ToolButton(value=styles_copy_symbol, elem_id=f"{tabname}_style_copy", tooltip="Copy main UI prompt to style.")
+
+ with gr.Row():
+ self.prompt = gr.Textbox(label="Prompt", show_label=True, elem_id=f"{tabname}_edit_style_prompt", lines=3, elem_classes=["prompt"])
+
+ with gr.Row():
+ self.neg_prompt = gr.Textbox(label="Negative prompt", show_label=True, elem_id=f"{tabname}_edit_style_neg_prompt", lines=3, elem_classes=["prompt"])
+
+ with gr.Row():
+ self.save = gr.Button('Save', variant='primary', elem_id=f'{tabname}_edit_style_save', visible=False)
+ self.delete = gr.Button('Delete', variant='primary', elem_id=f'{tabname}_edit_style_delete', visible=False)
+ self.close = gr.Button('Close', variant='secondary', elem_id=f'{tabname}_edit_style_close')
+
+ self.selection.change(
+ fn=select_style,
+ inputs=[self.selection],
+ outputs=[self.prompt, self.neg_prompt, self.delete, self.save],
+ show_progress=False,
+ )
+
+ self.save.click(
+ fn=save_style,
+ inputs=[self.selection, self.prompt, self.neg_prompt],
+ outputs=[self.delete],
+ show_progress=False,
+ ).then(refresh_styles, outputs=[self.dropdown, self.selection], show_progress=False)
+
+ self.delete.click(
+ fn=delete_style,
+ _js='function(name){ if(name == "") return ""; return confirm("Delete style " + name + "?") ? name : ""; }',
+ inputs=[self.selection],
+ outputs=[self.selection, self.prompt, self.neg_prompt],
+ show_progress=False,
+ ).then(refresh_styles, outputs=[self.dropdown, self.selection], show_progress=False)
+
+ self.setup_apply_button(self.materialize)
+
+ self.copy.click(
+ fn=lambda p, n: (p, n),
+ inputs=[main_ui_prompt, main_ui_negative_prompt],
+ outputs=[self.prompt, self.neg_prompt],
+ show_progress=False,
+ )
+
+ ui_common.setup_dialog(button_show=edit_button, dialog=styles_dialog, button_close=self.close)
+
+ def setup_apply_button(self, button):
+ button.click(
+ fn=materialize_styles,
+ inputs=[self.main_ui_prompt, self.main_ui_negative_prompt, self.dropdown],
+ outputs=[self.main_ui_prompt, self.main_ui_negative_prompt, self.dropdown],
+ show_progress=False,
+ ).then(fn=None, _js="function(){update_"+self.tabname+"_tokens(); closePopup();}", show_progress=False)
diff --git a/modules/ui_settings.py b/modules/ui_settings.py
index a6076bf3..e054d00a 100644
--- a/modules/ui_settings.py
+++ b/modules/ui_settings.py
@@ -1,10 +1,11 @@
import gradio as gr
-from modules import ui_common, shared, script_callbacks, scripts, sd_models, sysinfo
+from modules import ui_common, shared, script_callbacks, scripts, sd_models, sysinfo, timer
from modules.call_queue import wrap_gradio_call
from modules.shared import opts
from modules.ui_components import FormRow
from modules.ui_gradio_extensions import reload_javascript
+from concurrent.futures import ThreadPoolExecutor, as_completed
def get_value_for_setting(key):
@@ -63,6 +64,9 @@ class UiSettings:
quicksettings_list = None
quicksettings_names = None
text_settings = None
+ show_all_pages = None
+ show_one_page = None
+ search_input = None
def run_settings(self, *args):
changed = []
@@ -87,7 +91,7 @@ class UiSettings:
if not opts.same_type(value, opts.data_labels[key].default):
return gr.update(visible=True), opts.dumpjson()
- if not opts.set(key, value):
+ if value is None or not opts.set(key, value):
return gr.update(value=getattr(opts, key)), opts.dumpjson()
opts.save(shared.config_filename)
@@ -135,7 +139,7 @@ class UiSettings:
gr.Group()
current_tab = gr.TabItem(elem_id=f"settings_{elem_id}", label=text)
current_tab.__enter__()
- current_row = gr.Column(variant='compact')
+ current_row = gr.Column(elem_id=f"column_settings_{elem_id}", variant='compact')
current_row.__enter__()
previous_section = item.section
@@ -158,7 +162,7 @@ class UiSettings:
loadsave.create_ui()
with gr.TabItem("Sysinfo", id="sysinfo", elem_id="settings_tab_sysinfo"):
- gr.HTML('<a href="./internal/sysinfo-download" class="sysinfo_big_link" download>Download system info</a><br /><a href="./internal/sysinfo">(or open as text in a new page)</a>', elem_id="sysinfo_download")
+ gr.HTML('<a href="./internal/sysinfo-download" class="sysinfo_big_link" download>Download system info</a><br /><a href="./internal/sysinfo" target="_blank">(or open as text in a new page)</a>', elem_id="sysinfo_download")
with gr.Row():
with gr.Column(scale=1):
@@ -173,26 +177,43 @@ class UiSettings:
download_localization = gr.Button(value='Download localization template', elem_id="download_localization")
reload_script_bodies = gr.Button(value='Reload custom script bodies (No ui updates, No restart)', variant='secondary', elem_id="settings_reload_script_bodies")
with gr.Row():
- unload_sd_model = gr.Button(value='Unload SD checkpoint to free VRAM', elem_id="sett_unload_sd_model")
- reload_sd_model = gr.Button(value='Reload the last SD checkpoint back into VRAM', elem_id="sett_reload_sd_model")
+ unload_sd_model = gr.Button(value='Unload SD checkpoint to RAM', elem_id="sett_unload_sd_model")
+ reload_sd_model = gr.Button(value='Load SD checkpoint to VRAM from RAM', elem_id="sett_reload_sd_model")
+ with gr.Row():
+ calculate_all_checkpoint_hash = gr.Button(value='Calculate hash for all checkpoint', elem_id="calculate_all_checkpoint_hash")
+ calculate_all_checkpoint_hash_threads = gr.Number(value=1, label="Number of parallel calculations", elem_id="calculate_all_checkpoint_hash_threads", precision=0, minimum=1)
with gr.TabItem("Licenses", id="licenses", elem_id="settings_tab_licenses"):
gr.HTML(shared.html("licenses.html"), elem_id="licenses")
- gr.Button(value="Show all pages", elem_id="settings_show_all_pages")
+ self.show_all_pages = gr.Button(value="Show all pages", elem_id="settings_show_all_pages")
+ self.show_one_page = gr.Button(value="Show only one page", elem_id="settings_show_one_page", visible=False)
+ self.show_one_page.click(lambda: None)
+
+ self.search_input = gr.Textbox(value="", elem_id="settings_search", max_lines=1, placeholder="Search...", show_label=False)
self.text_settings = gr.Textbox(elem_id="settings_json", value=lambda: opts.dumpjson(), visible=False)
+ def call_func_and_return_text(func, text):
+ def handler():
+ t = timer.Timer()
+ func()
+ t.record(text)
+
+ return f'{text} in {t.total:.1f}s'
+
+ return handler
+
unload_sd_model.click(
- fn=sd_models.unload_model_weights,
+ fn=call_func_and_return_text(sd_models.unload_model_weights, 'Unloaded the checkpoint'),
inputs=[],
- outputs=[]
+ outputs=[self.result]
)
reload_sd_model.click(
- fn=sd_models.reload_model_weights,
+ fn=call_func_and_return_text(lambda: sd_models.send_model_to_device(shared.sd_model), 'Loaded the checkpoint'),
inputs=[],
- outputs=[]
+ outputs=[self.result]
)
request_notifications.click(
@@ -241,6 +262,21 @@ class UiSettings:
outputs=[sysinfo_check_output],
)
+ def calculate_all_checkpoint_hash_fn(max_thread):
+ checkpoints_list = sd_models.checkpoints_list.values()
+ with ThreadPoolExecutor(max_workers=max_thread) as executor:
+ futures = [executor.submit(checkpoint.calculate_shorthash) for checkpoint in checkpoints_list]
+ completed = 0
+ for _ in as_completed(futures):
+ completed += 1
+ print(f"{completed} / {len(checkpoints_list)} ")
+ print("Finish calculating hash for all checkpoints")
+
+ calculate_all_checkpoint_hash.click(
+ fn=calculate_all_checkpoint_hash_fn,
+ inputs=[calculate_all_checkpoint_hash_threads],
+ )
+
self.interface = settings_interface
def add_quicksettings(self):
@@ -294,3 +330,8 @@ class UiSettings:
outputs=[self.component_dict[k] for k in component_keys],
queue=False,
)
+
+ def search(self, text):
+ print(text)
+
+ return [gr.update(visible=text in (comp.label or "")) for comp in self.components]
diff --git a/modules/ui_tempdir.py b/modules/ui_tempdir.py
index fb75137e..85015db5 100644
--- a/modules/ui_tempdir.py
+++ b/modules/ui_tempdir.py
@@ -44,6 +44,8 @@ def save_pil_to_file(self, pil_image, dir=None, format="png"):
if shared.opts.temp_dir != "":
dir = shared.opts.temp_dir
+ else:
+ os.makedirs(dir, exist_ok=True)
use_metadata = False
metadata = PngImagePlugin.PngInfo()
@@ -57,8 +59,9 @@ def save_pil_to_file(self, pil_image, dir=None, format="png"):
return file_obj.name
-# override save to file function so that it also writes PNG info
-gradio.components.IOComponent.pil_to_temp_file = save_pil_to_file
+def install_ui_tempdir_override():
+ """override save to file function so that it also writes PNG info"""
+ gradio.components.IOComponent.pil_to_temp_file = save_pil_to_file
def on_tmpdir_changed():
diff --git a/modules/ui_toprow.py b/modules/ui_toprow.py
new file mode 100644
index 00000000..9caf8faa
--- /dev/null
+++ b/modules/ui_toprow.py
@@ -0,0 +1,143 @@
+import gradio as gr
+
+from modules import shared, ui_prompt_styles
+import modules.images
+
+from modules.ui_components import ToolButton
+
+
+class Toprow:
+ """Creates a top row UI with prompts, generate button, styles, extra little buttons for things, and enables some functionality related to their operation"""
+
+ prompt = None
+ prompt_img = None
+ negative_prompt = None
+
+ button_interrogate = None
+ button_deepbooru = None
+
+ interrupt = None
+ skip = None
+ submit = None
+
+ paste = None
+ clear_prompt_button = None
+ apply_styles = None
+ restore_progress_button = None
+
+ token_counter = None
+ token_button = None
+ negative_token_counter = None
+ negative_token_button = None
+
+ ui_styles = None
+
+ submit_box = None
+
+ def __init__(self, is_img2img, is_compact=False, id_part=None):
+ if id_part is None:
+ id_part = "img2img" if is_img2img else "txt2img"
+
+ self.id_part = id_part
+ self.is_img2img = is_img2img
+ self.is_compact = is_compact
+
+ if not is_compact:
+ with gr.Row(elem_id=f"{id_part}_toprow", variant="compact"):
+ self.create_classic_toprow()
+ else:
+ self.create_submit_box()
+
+ def create_classic_toprow(self):
+ self.create_prompts()
+
+ with gr.Column(scale=1, elem_id=f"{self.id_part}_actions_column"):
+ self.create_submit_box()
+
+ self.create_tools_row()
+
+ self.create_styles_ui()
+
+ def create_inline_toprow_prompts(self):
+ if not self.is_compact:
+ return
+
+ self.create_prompts()
+
+ with gr.Row(elem_classes=["toprow-compact-stylerow"]):
+ with gr.Column(elem_classes=["toprow-compact-tools"]):
+ self.create_tools_row()
+ with gr.Column():
+ self.create_styles_ui()
+
+ def create_inline_toprow_image(self):
+ if not self.is_compact:
+ return
+
+ self.submit_box.render()
+
+ def create_prompts(self):
+ with gr.Column(elem_id=f"{self.id_part}_prompt_container", elem_classes=["prompt-container-compact"] if self.is_compact else [], scale=6):
+ with gr.Row(elem_id=f"{self.id_part}_prompt_row", elem_classes=["prompt-row"]):
+ self.prompt = gr.Textbox(label="Prompt", elem_id=f"{self.id_part}_prompt", show_label=False, lines=3, placeholder="Prompt\n(Press Ctrl+Enter to generate, Alt+Enter to skip, Esc to interrupt)", elem_classes=["prompt"])
+ self.prompt_img = gr.File(label="", elem_id=f"{self.id_part}_prompt_image", file_count="single", type="binary", visible=False)
+
+ with gr.Row(elem_id=f"{self.id_part}_neg_prompt_row", elem_classes=["prompt-row"]):
+ self.negative_prompt = gr.Textbox(label="Negative prompt", elem_id=f"{self.id_part}_neg_prompt", show_label=False, lines=3, placeholder="Negative prompt\n(Press Ctrl+Enter to generate, Alt+Enter to skip, Esc to interrupt)", elem_classes=["prompt"])
+
+ self.prompt_img.change(
+ fn=modules.images.image_data,
+ inputs=[self.prompt_img],
+ outputs=[self.prompt, self.prompt_img],
+ show_progress=False,
+ )
+
+ def create_submit_box(self):
+ with gr.Row(elem_id=f"{self.id_part}_generate_box", elem_classes=["generate-box"] + (["generate-box-compact"] if self.is_compact else []), render=not self.is_compact) as submit_box:
+ self.submit_box = submit_box
+
+ self.interrupt = gr.Button('Interrupt', elem_id=f"{self.id_part}_interrupt", elem_classes="generate-box-interrupt")
+ self.skip = gr.Button('Skip', elem_id=f"{self.id_part}_skip", elem_classes="generate-box-skip")
+ self.submit = gr.Button('Generate', elem_id=f"{self.id_part}_generate", variant='primary')
+
+ self.skip.click(
+ fn=lambda: shared.state.skip(),
+ inputs=[],
+ outputs=[],
+ )
+
+ self.interrupt.click(
+ fn=lambda: shared.state.interrupt(),
+ inputs=[],
+ outputs=[],
+ )
+
+ def create_tools_row(self):
+ with gr.Row(elem_id=f"{self.id_part}_tools"):
+ from modules.ui import paste_symbol, clear_prompt_symbol, restore_progress_symbol
+
+ self.paste = ToolButton(value=paste_symbol, elem_id="paste", tooltip="Read generation parameters from prompt or last generation if prompt is empty into user interface.")
+ self.clear_prompt_button = ToolButton(value=clear_prompt_symbol, elem_id=f"{self.id_part}_clear_prompt", tooltip="Clear prompt")
+ self.apply_styles = ToolButton(value=ui_prompt_styles.styles_materialize_symbol, elem_id=f"{self.id_part}_style_apply", tooltip="Apply all selected styles to prompts.")
+
+ if self.is_img2img:
+ self.button_interrogate = ToolButton('📎', tooltip='Interrogate CLIP - use CLIP neural network to create a text describing the image, and put it into the prompt field', elem_id="interrogate")
+ self.button_deepbooru = ToolButton('📦', tooltip='Interrogate DeepBooru - use DeepBooru neural network to create a text describing the image, and put it into the prompt field', elem_id="deepbooru")
+
+ self.restore_progress_button = ToolButton(value=restore_progress_symbol, elem_id=f"{self.id_part}_restore_progress", visible=False, tooltip="Restore progress")
+
+ self.token_counter = gr.HTML(value="<span>0/75</span>", elem_id=f"{self.id_part}_token_counter", elem_classes=["token-counter"])
+ self.token_button = gr.Button(visible=False, elem_id=f"{self.id_part}_token_button")
+ self.negative_token_counter = gr.HTML(value="<span>0/75</span>", elem_id=f"{self.id_part}_negative_token_counter", elem_classes=["token-counter"])
+ self.negative_token_button = gr.Button(visible=False, elem_id=f"{self.id_part}_negative_token_button")
+
+ self.clear_prompt_button.click(
+ fn=lambda *x: x,
+ _js="confirm_clear_prompt",
+ inputs=[self.prompt, self.negative_prompt],
+ outputs=[self.prompt, self.negative_prompt],
+ )
+
+ def create_styles_ui(self):
+ self.ui_styles = ui_prompt_styles.UiPromptStyles(self.id_part, self.prompt, self.negative_prompt)
+ self.ui_styles.setup_apply_button(self.apply_styles)
diff --git a/modules/upscaler.py b/modules/upscaler.py
index e682bbaa..3aee69db 100644
--- a/modules/upscaler.py
+++ b/modules/upscaler.py
@@ -57,6 +57,9 @@ class Upscaler:
dest_h = int((img.height * scale) // 8 * 8)
for _ in range(3):
+ if img.width >= dest_w and img.height >= dest_h:
+ break
+
shape = (img.width, img.height)
img = self.do_upscale(img, selected_model)
@@ -64,9 +67,6 @@ class Upscaler:
if shape == (img.width, img.height):
break
- if img.width >= dest_w and img.height >= dest_h:
- break
-
if img.width != dest_w or img.height != dest_h:
img = img.resize((int(dest_w), int(dest_h)), resample=LANCZOS)
@@ -98,6 +98,9 @@ class UpscalerData:
self.scale = scale
self.model = model
+ def __repr__(self):
+ return f"<UpscalerData name={self.name} path={self.data_path} scale={self.scale}>"
+
class UpscalerNone(Upscaler):
name = "None"
diff --git a/modules/upscaler_utils.py b/modules/upscaler_utils.py
new file mode 100644
index 00000000..f5cb92d5
--- /dev/null
+++ b/modules/upscaler_utils.py
@@ -0,0 +1,140 @@
+import logging
+from typing import Callable
+
+import numpy as np
+import torch
+import tqdm
+from PIL import Image
+
+from modules import images, shared, torch_utils
+
+logger = logging.getLogger(__name__)
+
+
+def upscale_without_tiling(model, img: Image.Image):
+ img = np.array(img)
+ img = img[:, :, ::-1]
+ img = np.ascontiguousarray(np.transpose(img, (2, 0, 1))) / 255
+ img = torch.from_numpy(img).float()
+
+ param = torch_utils.get_param(model)
+ img = img.unsqueeze(0).to(device=param.device, dtype=param.dtype)
+
+ with torch.no_grad():
+ output = model(img)
+
+ output = output.squeeze().float().cpu().clamp_(0, 1).numpy()
+ output = 255. * np.moveaxis(output, 0, 2)
+ output = output.astype(np.uint8)
+ output = output[:, :, ::-1]
+ return Image.fromarray(output, 'RGB')
+
+
+def upscale_with_model(
+ model: Callable[[torch.Tensor], torch.Tensor],
+ img: Image.Image,
+ *,
+ tile_size: int,
+ tile_overlap: int = 0,
+ desc="tiled upscale",
+) -> Image.Image:
+ if tile_size <= 0:
+ logger.debug("Upscaling %s without tiling", img)
+ output = upscale_without_tiling(model, img)
+ logger.debug("=> %s", output)
+ return output
+
+ grid = images.split_grid(img, tile_size, tile_size, tile_overlap)
+ newtiles = []
+
+ with tqdm.tqdm(total=grid.tile_count, desc=desc) as p:
+ for y, h, row in grid.tiles:
+ newrow = []
+ for x, w, tile in row:
+ logger.debug("Tile (%d, %d) %s...", x, y, tile)
+ output = upscale_without_tiling(model, tile)
+ scale_factor = output.width // tile.width
+ logger.debug("=> %s (scale factor %s)", output, scale_factor)
+ newrow.append([x * scale_factor, w * scale_factor, output])
+ p.update(1)
+ newtiles.append([y * scale_factor, h * scale_factor, newrow])
+
+ newgrid = images.Grid(
+ newtiles,
+ tile_w=grid.tile_w * scale_factor,
+ tile_h=grid.tile_h * scale_factor,
+ image_w=grid.image_w * scale_factor,
+ image_h=grid.image_h * scale_factor,
+ overlap=grid.overlap * scale_factor,
+ )
+ return images.combine_grid(newgrid)
+
+
+def tiled_upscale_2(
+ img,
+ model,
+ *,
+ tile_size: int,
+ tile_overlap: int,
+ scale: int,
+ device,
+ desc="Tiled upscale",
+):
+ # Alternative implementation of `upscale_with_model` originally used by
+ # SwinIR and ScuNET. It differs from `upscale_with_model` in that tiling and
+ # weighting is done in PyTorch space, as opposed to `images.Grid` doing it in
+ # Pillow space without weighting.
+ b, c, h, w = img.size()
+ tile_size = min(tile_size, h, w)
+
+ if tile_size <= 0:
+ logger.debug("Upscaling %s without tiling", img.shape)
+ return model(img)
+
+ stride = tile_size - tile_overlap
+ h_idx_list = list(range(0, h - tile_size, stride)) + [h - tile_size]
+ w_idx_list = list(range(0, w - tile_size, stride)) + [w - tile_size]
+ result = torch.zeros(
+ b,
+ c,
+ h * scale,
+ w * scale,
+ device=device,
+ ).type_as(img)
+ weights = torch.zeros_like(result)
+ logger.debug("Upscaling %s to %s with tiles", img.shape, result.shape)
+ with tqdm.tqdm(total=len(h_idx_list) * len(w_idx_list), desc=desc) as pbar:
+ for h_idx in h_idx_list:
+ if shared.state.interrupted or shared.state.skipped:
+ break
+
+ for w_idx in w_idx_list:
+ if shared.state.interrupted or shared.state.skipped:
+ break
+
+ in_patch = img[
+ ...,
+ h_idx : h_idx + tile_size,
+ w_idx : w_idx + tile_size,
+ ]
+ out_patch = model(in_patch)
+
+ result[
+ ...,
+ h_idx * scale : (h_idx + tile_size) * scale,
+ w_idx * scale : (w_idx + tile_size) * scale,
+ ].add_(out_patch)
+
+ out_patch_mask = torch.ones_like(out_patch)
+
+ weights[
+ ...,
+ h_idx * scale : (h_idx + tile_size) * scale,
+ w_idx * scale : (w_idx + tile_size) * scale,
+ ].add_(out_patch_mask)
+
+ pbar.update(1)
+
+ output = result.div_(weights)
+
+ return output
diff --git a/modules/util.py b/modules/util.py
new file mode 100644
index 00000000..4861bcb0
--- /dev/null
+++ b/modules/util.py
@@ -0,0 +1,68 @@
+import os
+import re
+
+from modules import shared
+from modules.paths_internal import script_path, cwd
+
+
+def natural_sort_key(s, regex=re.compile('([0-9]+)')):
+ return [int(text) if text.isdigit() else text.lower() for text in regex.split(s)]
+
+
+def listfiles(dirname):
+ filenames = [os.path.join(dirname, x) for x in sorted(os.listdir(dirname), key=natural_sort_key) if not x.startswith(".")]
+ return [file for file in filenames if os.path.isfile(file)]
+
+
+def html_path(filename):
+ return os.path.join(script_path, "html", filename)
+
+
+def html(filename):
+ path = html_path(filename)
+
+ if os.path.exists(path):
+ with open(path, encoding="utf8") as file:
+ return file.read()
+
+ return ""
+
+
+def walk_files(path, allowed_extensions=None):
+ if not os.path.exists(path):
+ return
+
+ if allowed_extensions is not None:
+ allowed_extensions = set(allowed_extensions)
+
+ items = list(os.walk(path, followlinks=True))
+ items = sorted(items, key=lambda x: natural_sort_key(x[0]))
+
+ for root, _, files in items:
+ for filename in sorted(files, key=natural_sort_key):
+ if allowed_extensions is not None:
+ _, ext = os.path.splitext(filename)
+ if ext not in allowed_extensions:
+ continue
+
+ if not shared.opts.list_hidden_files and ("/." in root or "\\." in root):
+ continue
+
+ yield os.path.join(root, filename)
+
+
+def ldm_print(*args, **kwargs):
+ if shared.opts.hide_ldm_prints:
+ return
+
+ print(*args, **kwargs)
+
+
+def truncate_path(target_path, base_path=cwd):
+ abs_target, abs_base = os.path.abspath(target_path), os.path.abspath(base_path)
+ try:
+ if os.path.commonpath([abs_target, abs_base]) == abs_base:
+ return os.path.relpath(abs_target, abs_base)
+ except ValueError:
+ pass
+ return abs_target
diff --git a/modules/xlmr.py b/modules/xlmr.py
index a407a3ca..319771b7 100644
--- a/modules/xlmr.py
+++ b/modules/xlmr.py
@@ -5,6 +5,9 @@ from transformers.models.xlm_roberta.configuration_xlm_roberta import XLMRoberta
from transformers import XLMRobertaModel,XLMRobertaTokenizer
from typing import Optional
+from modules import torch_utils
+
+
class BertSeriesConfig(BertConfig):
def __init__(self, vocab_size=30522, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, pad_token_id=0, position_embedding_type="absolute", use_cache=True, classifier_dropout=None,project_dim=512, pooler_fn="average",learn_encoder=False,model_type='bert',**kwargs):
@@ -62,7 +65,7 @@ class BertSeriesModelWithTransformation(BertPreTrainedModel):
self.post_init()
def encode(self,c):
- device = next(self.parameters()).device
+ device = torch_utils.get_param(self).device
text = self.tokenizer(c,
truncation=True,
max_length=77,
diff --git a/modules/xlmr_m18.py b/modules/xlmr_m18.py
new file mode 100644
index 00000000..f6055504
--- /dev/null
+++ b/modules/xlmr_m18.py
@@ -0,0 +1,166 @@
+from transformers import BertPreTrainedModel,BertConfig
+import torch.nn as nn
+import torch
+from transformers.models.xlm_roberta.configuration_xlm_roberta import XLMRobertaConfig
+from transformers import XLMRobertaModel,XLMRobertaTokenizer
+from typing import Optional
+from modules import torch_utils
+
+
+class BertSeriesConfig(BertConfig):
+ def __init__(self, vocab_size=30522, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, pad_token_id=0, position_embedding_type="absolute", use_cache=True, classifier_dropout=None,project_dim=512, pooler_fn="average",learn_encoder=False,model_type='bert',**kwargs):
+
+ super().__init__(vocab_size, hidden_size, num_hidden_layers, num_attention_heads, intermediate_size, hidden_act, hidden_dropout_prob, attention_probs_dropout_prob, max_position_embeddings, type_vocab_size, initializer_range, layer_norm_eps, pad_token_id, position_embedding_type, use_cache, classifier_dropout, **kwargs)
+ self.project_dim = project_dim
+ self.pooler_fn = pooler_fn
+ self.learn_encoder = learn_encoder
+
+class RobertaSeriesConfig(XLMRobertaConfig):
+ def __init__(self, pad_token_id=1, bos_token_id=0, eos_token_id=2,project_dim=512,pooler_fn='cls',learn_encoder=False, **kwargs):
+ super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
+ self.project_dim = project_dim
+ self.pooler_fn = pooler_fn
+ self.learn_encoder = learn_encoder
+
+
+class BertSeriesModelWithTransformation(BertPreTrainedModel):
+
+ _keys_to_ignore_on_load_unexpected = [r"pooler"]
+ _keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"]
+ config_class = BertSeriesConfig
+
+ def __init__(self, config=None, **kargs):
+ # modify initialization for autoloading
+ if config is None:
+ config = XLMRobertaConfig()
+ config.attention_probs_dropout_prob= 0.1
+ config.bos_token_id=0
+ config.eos_token_id=2
+ config.hidden_act='gelu'
+ config.hidden_dropout_prob=0.1
+ config.hidden_size=1024
+ config.initializer_range=0.02
+ config.intermediate_size=4096
+ config.layer_norm_eps=1e-05
+ config.max_position_embeddings=514
+
+ config.num_attention_heads=16
+ config.num_hidden_layers=24
+ config.output_past=True
+ config.pad_token_id=1
+ config.position_embedding_type= "absolute"
+
+ config.type_vocab_size= 1
+ config.use_cache=True
+ config.vocab_size= 250002
+ config.project_dim = 1024
+ config.learn_encoder = False
+ super().__init__(config)
+ self.roberta = XLMRobertaModel(config)
+ self.transformation = nn.Linear(config.hidden_size,config.project_dim)
+ # self.pre_LN=nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.tokenizer = XLMRobertaTokenizer.from_pretrained('xlm-roberta-large')
+ # self.pooler = lambda x: x[:,0]
+ # self.post_init()
+
+ self.has_pre_transformation = True
+ if self.has_pre_transformation:
+ self.transformation_pre = nn.Linear(config.hidden_size, config.project_dim)
+ self.pre_LN = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.post_init()
+
+ def encode(self,c):
+ device = torch_utils.get_param(self).device
+ text = self.tokenizer(c,
+ truncation=True,
+ max_length=77,
+ return_length=False,
+ return_overflowing_tokens=False,
+ padding="max_length",
+ return_tensors="pt")
+ text["input_ids"] = torch.tensor(text["input_ids"]).to(device)
+ text["attention_mask"] = torch.tensor(
+ text['attention_mask']).to(device)
+ features = self(**text)
+ return features['projection_state']
+
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ encoder_hidden_states: Optional[torch.Tensor] = None,
+ encoder_attention_mask: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ ) :
+ r"""
+ """
+
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+
+ outputs = self.roberta(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_attention_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=True,
+ return_dict=return_dict,
+ )
+
+ # # last module outputs
+ # sequence_output = outputs[0]
+
+
+ # # project every module
+ # sequence_output_ln = self.pre_LN(sequence_output)
+
+ # # pooler
+ # pooler_output = self.pooler(sequence_output_ln)
+ # pooler_output = self.transformation(pooler_output)
+ # projection_state = self.transformation(outputs.last_hidden_state)
+
+ if self.has_pre_transformation:
+ sequence_output2 = outputs["hidden_states"][-2]
+ sequence_output2 = self.pre_LN(sequence_output2)
+ projection_state2 = self.transformation_pre(sequence_output2)
+
+ return {
+ "projection_state": projection_state2,
+ "last_hidden_state": outputs.last_hidden_state,
+ "hidden_states": outputs.hidden_states,
+ "attentions": outputs.attentions,
+ }
+ else:
+ projection_state = self.transformation(outputs.last_hidden_state)
+ return {
+ "projection_state": projection_state,
+ "last_hidden_state": outputs.last_hidden_state,
+ "hidden_states": outputs.hidden_states,
+ "attentions": outputs.attentions,
+ }
+
+
+ # return {
+ # 'pooler_output':pooler_output,
+ # 'last_hidden_state':outputs.last_hidden_state,
+ # 'hidden_states':outputs.hidden_states,
+ # 'attentions':outputs.attentions,
+ # 'projection_state':projection_state,
+ # 'sequence_out': sequence_output
+ # }
+
+
+class RobertaSeriesModelWithTransformation(BertSeriesModelWithTransformation):
+ base_model_prefix = 'roberta'
+ config_class= RobertaSeriesConfig
diff --git a/modules/xpu_specific.py b/modules/xpu_specific.py
new file mode 100644
index 00000000..d8da94a0
--- /dev/null
+++ b/modules/xpu_specific.py
@@ -0,0 +1,59 @@
+from modules import shared
+from modules.sd_hijack_utils import CondFunc
+
+has_ipex = False
+try:
+ import torch
+ import intel_extension_for_pytorch as ipex # noqa: F401
+ has_ipex = True
+except Exception:
+ pass
+
+
+def check_for_xpu():
+ return has_ipex and hasattr(torch, 'xpu') and torch.xpu.is_available()
+
+
+def get_xpu_device_string():
+ if shared.cmd_opts.device_id is not None:
+ return f"xpu:{shared.cmd_opts.device_id}"
+ return "xpu"
+
+
+def torch_xpu_gc():
+ with torch.xpu.device(get_xpu_device_string()):
+ torch.xpu.empty_cache()
+
+
+has_xpu = check_for_xpu()
+
+if has_xpu:
+ # W/A for https://github.com/intel/intel-extension-for-pytorch/issues/452: torch.Generator API doesn't support XPU device
+ CondFunc('torch.Generator',
+ lambda orig_func, device=None: torch.xpu.Generator(device),
+ lambda orig_func, device=None: device is not None and device.type == "xpu")
+
+ # W/A for some OPs that could not handle different input dtypes
+ CondFunc('torch.nn.functional.layer_norm',
+ lambda orig_func, input, normalized_shape=None, weight=None, *args, **kwargs:
+ orig_func(input.to(weight.data.dtype), normalized_shape, weight, *args, **kwargs),
+ lambda orig_func, input, normalized_shape=None, weight=None, *args, **kwargs:
+ weight is not None and input.dtype != weight.data.dtype)
+ CondFunc('torch.nn.modules.GroupNorm.forward',
+ lambda orig_func, self, input: orig_func(self, input.to(self.weight.data.dtype)),
+ lambda orig_func, self, input: input.dtype != self.weight.data.dtype)
+ CondFunc('torch.nn.modules.linear.Linear.forward',
+ lambda orig_func, self, input: orig_func(self, input.to(self.weight.data.dtype)),
+ lambda orig_func, self, input: input.dtype != self.weight.data.dtype)
+ CondFunc('torch.nn.modules.conv.Conv2d.forward',
+ lambda orig_func, self, input: orig_func(self, input.to(self.weight.data.dtype)),
+ lambda orig_func, self, input: input.dtype != self.weight.data.dtype)
+ CondFunc('torch.bmm',
+ lambda orig_func, input, mat2, out=None: orig_func(input.to(mat2.dtype), mat2, out=out),
+ lambda orig_func, input, mat2, out=None: input.dtype != mat2.dtype)
+ CondFunc('torch.cat',
+ lambda orig_func, tensors, dim=0, out=None: orig_func([t.to(tensors[0].dtype) for t in tensors], dim=dim, out=out),
+ lambda orig_func, tensors, dim=0, out=None: not all(t.dtype == tensors[0].dtype for t in tensors))
+ CondFunc('torch.nn.functional.scaled_dot_product_attention',
+ lambda orig_func, query, key, value, attn_mask=None, dropout_p=0.0, is_causal=False: orig_func(query, key.to(query.dtype), value.to(query.dtype), attn_mask, dropout_p, is_causal),
+ lambda orig_func, query, key, value, attn_mask=None, dropout_p=0.0, is_causal=False: query.dtype != key.dtype or query.dtype != value.dtype)
diff --git a/pyproject.toml b/pyproject.toml
index 80541a8f..d03036e7 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -16,6 +16,7 @@ exclude = [
ignore = [
"E501", # Line too long
+ "E721", # Do not compare types, use `isinstance`
"E731", # Do not assign a `lambda` expression, use a `def`
"I001", # Import block is un-sorted or un-formatted
diff --git a/requirements.txt b/requirements.txt
index b3f8a7f4..731a1be7 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -2,12 +2,12 @@ GitPython
Pillow
accelerate
-basicsr
blendmodes
clean-fid
einops
-gfpgan
-gradio==3.32.0
+facexlib
+fastapi>=0.90.1
+gradio==3.41.2
inflection
jsonmerge
kornia
@@ -19,15 +19,13 @@ open-clip-torch
piexif
psutil
pytorch_lightning
-realesrgan
requests
resize-right
safetensors
scikit-image>=0.19
-timm
tomesd
torch
torchdiffeq
torchsde
-transformers==4.25.1
+transformers==4.30.2
diff --git a/requirements_versions.txt b/requirements_versions.txt
index d07ab456..2a922f28 100644
--- a/requirements_versions.txt
+++ b/requirements_versions.txt
@@ -1,31 +1,30 @@
-GitPython==3.1.30
+GitPython==3.1.32
Pillow==9.5.0
-accelerate==0.18.0
-basicsr==1.4.2
+accelerate==0.21.0
blendmodes==2022
clean-fid==0.1.35
einops==0.4.1
+facexlib==0.3.0
fastapi==0.94.0
-gfpgan==1.3.8
-gradio==3.32.0
+gradio==3.41.2
httpcore==0.15
inflection==0.5.1
jsonmerge==1.8.0
kornia==0.6.7
lark==1.1.2
-numpy==1.23.5
+numpy==1.26.2
omegaconf==2.2.3
open-clip-torch==2.20.0
piexif==1.1.3
psutil==5.9.5
pytorch_lightning==1.9.4
-realesrgan==0.3.0
resize-right==0.0.2
safetensors==0.3.1
-scikit-image==0.20.0
-timm==0.6.7
-tomesd==0.1.2
+scikit-image==0.21.0
+spandrel==0.1.6
+tomesd==0.1.3
torch
torchdiffeq==0.2.3
-torchsde==0.2.5
-transformers==4.25.1
+torchsde==0.2.6
+transformers==4.30.2
+httpx==0.24.1
diff --git a/script.js b/script.js
index 34cca765..be1bc317 100644
--- a/script.js
+++ b/script.js
@@ -121,22 +121,56 @@ document.addEventListener("DOMContentLoaded", function() {
});
/**
- * Add a ctrl+enter as a shortcut to start a generation
+ * Add keyboard shortcuts:
+ * Ctrl+Enter to start/restart a generation
+ * Alt/Option+Enter to skip a generation
+ * Esc to interrupt a generation
*/
document.addEventListener('keydown', function(e) {
- var handled = false;
- if (e.key !== undefined) {
- if ((e.key == "Enter" && (e.metaKey || e.ctrlKey || e.altKey))) handled = true;
- } else if (e.keyCode !== undefined) {
- if ((e.keyCode == 13 && (e.metaKey || e.ctrlKey || e.altKey))) handled = true;
- }
- if (handled) {
- var button = get_uiCurrentTabContent().querySelector('button[id$=_generate]');
- if (button) {
- button.click();
+ const isEnter = e.key === 'Enter' || e.keyCode === 13;
+ const isCtrlKey = e.metaKey || e.ctrlKey;
+ const isAltKey = e.altKey;
+ const isEsc = e.key === 'Escape';
+
+ const generateButton = get_uiCurrentTabContent().querySelector('button[id$=_generate]');
+ const interruptButton = get_uiCurrentTabContent().querySelector('button[id$=_interrupt]');
+ const skipButton = get_uiCurrentTabContent().querySelector('button[id$=_skip]');
+
+ if (isCtrlKey && isEnter) {
+ if (interruptButton.style.display === 'block') {
+ interruptButton.click();
+ const callback = (mutationList) => {
+ for (const mutation of mutationList) {
+ if (mutation.type === 'attributes' && mutation.attributeName === 'style') {
+ if (interruptButton.style.display === 'none') {
+ generateButton.click();
+ observer.disconnect();
+ }
+ }
+ }
+ };
+ const observer = new MutationObserver(callback);
+ observer.observe(interruptButton, {attributes: true});
+ } else {
+ generateButton.click();
}
e.preventDefault();
}
+
+ if (isAltKey && isEnter) {
+ skipButton.click();
+ e.preventDefault();
+ }
+
+ if (isEsc) {
+ const globalPopup = document.querySelector('.global-popup');
+ const lightboxModal = document.querySelector('#lightboxModal');
+ if (!globalPopup || globalPopup.style.display === 'none') {
+ if (document.activeElement === lightboxModal) return;
+ interruptButton.click();
+ e.preventDefault();
+ }
+ }
});
/**
diff --git a/scripts/postprocessing_caption.py b/scripts/postprocessing_caption.py
new file mode 100644
index 00000000..5592a898
--- /dev/null
+++ b/scripts/postprocessing_caption.py
@@ -0,0 +1,30 @@
+from modules import scripts_postprocessing, ui_components, deepbooru, shared
+import gradio as gr
+
+
+class ScriptPostprocessingCeption(scripts_postprocessing.ScriptPostprocessing):
+ name = "Caption"
+ order = 4040
+
+ def ui(self):
+ with ui_components.InputAccordion(False, label="Caption") as enable:
+ option = gr.CheckboxGroup(value=["Deepbooru"], choices=["Deepbooru", "BLIP"], show_label=False)
+
+ return {
+ "enable": enable,
+ "option": option,
+ }
+
+ def process(self, pp: scripts_postprocessing.PostprocessedImage, enable, option):
+ if not enable:
+ return
+
+ captions = [pp.caption]
+
+ if "Deepbooru" in option:
+ captions.append(deepbooru.model.tag(pp.image))
+
+ if "BLIP" in option:
+ captions.append(shared.interrogator.interrogate(pp.image.convert("RGB")))
+
+ pp.caption = ", ".join([x for x in captions if x])
diff --git a/scripts/postprocessing_codeformer.py b/scripts/postprocessing_codeformer.py
index a7d80d40..e1e156dd 100644
--- a/scripts/postprocessing_codeformer.py
+++ b/scripts/postprocessing_codeformer.py
@@ -1,28 +1,28 @@
from PIL import Image
import numpy as np
-from modules import scripts_postprocessing, codeformer_model
+from modules import scripts_postprocessing, codeformer_model, ui_components
import gradio as gr
-from modules.ui_components import FormRow
-
class ScriptPostprocessingCodeFormer(scripts_postprocessing.ScriptPostprocessing):
name = "CodeFormer"
order = 3000
def ui(self):
- with FormRow():
- codeformer_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="CodeFormer visibility", value=0, elem_id="extras_codeformer_visibility")
- codeformer_weight = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="CodeFormer weight (0 = maximum effect, 1 = minimum effect)", value=0, elem_id="extras_codeformer_weight")
+ with ui_components.InputAccordion(False, label="CodeFormer") as enable:
+ with gr.Row():
+ codeformer_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="Visibility", value=1.0, elem_id="extras_codeformer_visibility")
+ codeformer_weight = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="Weight (0 = maximum effect, 1 = minimum effect)", value=0, elem_id="extras_codeformer_weight")
return {
+ "enable": enable,
"codeformer_visibility": codeformer_visibility,
"codeformer_weight": codeformer_weight,
}
- def process(self, pp: scripts_postprocessing.PostprocessedImage, codeformer_visibility, codeformer_weight):
- if codeformer_visibility == 0:
+ def process(self, pp: scripts_postprocessing.PostprocessedImage, enable, codeformer_visibility, codeformer_weight):
+ if codeformer_visibility == 0 or not enable:
return
restored_img = codeformer_model.codeformer.restore(np.array(pp.image, dtype=np.uint8), w=codeformer_weight)
diff --git a/scripts/postprocessing_create_flipped_copies.py b/scripts/postprocessing_create_flipped_copies.py
new file mode 100644
index 00000000..b673003b
--- /dev/null
+++ b/scripts/postprocessing_create_flipped_copies.py
@@ -0,0 +1,32 @@
+from PIL import ImageOps, Image
+
+from modules import scripts_postprocessing, ui_components
+import gradio as gr
+
+
+class ScriptPostprocessingCreateFlippedCopies(scripts_postprocessing.ScriptPostprocessing):
+ name = "Create flipped copies"
+ order = 4030
+
+ def ui(self):
+ with ui_components.InputAccordion(False, label="Create flipped copies") as enable:
+ with gr.Row():
+ option = gr.CheckboxGroup(value=["Horizontal"], choices=["Horizontal", "Vertical", "Both"], show_label=False)
+
+ return {
+ "enable": enable,
+ "option": option,
+ }
+
+ def process(self, pp: scripts_postprocessing.PostprocessedImage, enable, option):
+ if not enable:
+ return
+
+ if "Horizontal" in option:
+ pp.extra_images.append(ImageOps.mirror(pp.image))
+
+ if "Vertical" in option:
+ pp.extra_images.append(pp.image.transpose(Image.Transpose.FLIP_TOP_BOTTOM))
+
+ if "Both" in option:
+ pp.extra_images.append(pp.image.transpose(Image.Transpose.FLIP_TOP_BOTTOM).transpose(Image.Transpose.FLIP_LEFT_RIGHT))
diff --git a/scripts/postprocessing_focal_crop.py b/scripts/postprocessing_focal_crop.py
new file mode 100644
index 00000000..cff1dbc5
--- /dev/null
+++ b/scripts/postprocessing_focal_crop.py
@@ -0,0 +1,54 @@
+
+from modules import scripts_postprocessing, ui_components, errors
+import gradio as gr
+
+from modules.textual_inversion import autocrop
+
+
+class ScriptPostprocessingFocalCrop(scripts_postprocessing.ScriptPostprocessing):
+ name = "Auto focal point crop"
+ order = 4010
+
+ def ui(self):
+ with ui_components.InputAccordion(False, label="Auto focal point crop") as enable:
+ face_weight = gr.Slider(label='Focal point face weight', value=0.9, minimum=0.0, maximum=1.0, step=0.05, elem_id="postprocess_focal_crop_face_weight")
+ entropy_weight = gr.Slider(label='Focal point entropy weight', value=0.15, minimum=0.0, maximum=1.0, step=0.05, elem_id="postprocess_focal_crop_entropy_weight")
+ edges_weight = gr.Slider(label='Focal point edges weight', value=0.5, minimum=0.0, maximum=1.0, step=0.05, elem_id="postprocess_focal_crop_edges_weight")
+ debug = gr.Checkbox(label='Create debug image', elem_id="train_process_focal_crop_debug")
+
+ return {
+ "enable": enable,
+ "face_weight": face_weight,
+ "entropy_weight": entropy_weight,
+ "edges_weight": edges_weight,
+ "debug": debug,
+ }
+
+ def process(self, pp: scripts_postprocessing.PostprocessedImage, enable, face_weight, entropy_weight, edges_weight, debug):
+ if not enable:
+ return
+
+ if not pp.shared.target_width or not pp.shared.target_height:
+ return
+
+ dnn_model_path = None
+ try:
+ dnn_model_path = autocrop.download_and_cache_models()
+ except Exception:
+ errors.report("Unable to load face detection model for auto crop selection. Falling back to lower quality haar method.", exc_info=True)
+
+ autocrop_settings = autocrop.Settings(
+ crop_width=pp.shared.target_width,
+ crop_height=pp.shared.target_height,
+ face_points_weight=face_weight,
+ entropy_points_weight=entropy_weight,
+ corner_points_weight=edges_weight,
+ annotate_image=debug,
+ dnn_model_path=dnn_model_path,
+ )
+
+ result, *others = autocrop.crop_image(pp.image, autocrop_settings)
+
+ pp.image = result
+ pp.extra_images = [pp.create_copy(x, nametags=["focal-crop-debug"], disable_processing=True) for x in others]
+
diff --git a/scripts/postprocessing_gfpgan.py b/scripts/postprocessing_gfpgan.py
index d854f3f7..6e756605 100644
--- a/scripts/postprocessing_gfpgan.py
+++ b/scripts/postprocessing_gfpgan.py
@@ -1,26 +1,25 @@
from PIL import Image
import numpy as np
-from modules import scripts_postprocessing, gfpgan_model
+from modules import scripts_postprocessing, gfpgan_model, ui_components
import gradio as gr
-from modules.ui_components import FormRow
-
class ScriptPostprocessingGfpGan(scripts_postprocessing.ScriptPostprocessing):
name = "GFPGAN"
order = 2000
def ui(self):
- with FormRow():
- gfpgan_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="GFPGAN visibility", value=0, elem_id="extras_gfpgan_visibility")
+ with ui_components.InputAccordion(False, label="GFPGAN") as enable:
+ gfpgan_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="Visibility", value=1.0, elem_id="extras_gfpgan_visibility")
return {
+ "enable": enable,
"gfpgan_visibility": gfpgan_visibility,
}
- def process(self, pp: scripts_postprocessing.PostprocessedImage, gfpgan_visibility):
- if gfpgan_visibility == 0:
+ def process(self, pp: scripts_postprocessing.PostprocessedImage, enable, gfpgan_visibility):
+ if gfpgan_visibility == 0 or not enable:
return
restored_img = gfpgan_model.gfpgan_fix_faces(np.array(pp.image, dtype=np.uint8))
diff --git a/scripts/postprocessing_split_oversized.py b/scripts/postprocessing_split_oversized.py
new file mode 100644
index 00000000..c4a03160
--- /dev/null
+++ b/scripts/postprocessing_split_oversized.py
@@ -0,0 +1,71 @@
+import math
+
+from modules import scripts_postprocessing, ui_components
+import gradio as gr
+
+
+def split_pic(image, inverse_xy, width, height, overlap_ratio):
+ if inverse_xy:
+ from_w, from_h = image.height, image.width
+ to_w, to_h = height, width
+ else:
+ from_w, from_h = image.width, image.height
+ to_w, to_h = width, height
+ h = from_h * to_w // from_w
+ if inverse_xy:
+ image = image.resize((h, to_w))
+ else:
+ image = image.resize((to_w, h))
+
+ split_count = math.ceil((h - to_h * overlap_ratio) / (to_h * (1.0 - overlap_ratio)))
+ y_step = (h - to_h) / (split_count - 1)
+ for i in range(split_count):
+ y = int(y_step * i)
+ if inverse_xy:
+ splitted = image.crop((y, 0, y + to_h, to_w))
+ else:
+ splitted = image.crop((0, y, to_w, y + to_h))
+ yield splitted
+
+
+class ScriptPostprocessingSplitOversized(scripts_postprocessing.ScriptPostprocessing):
+ name = "Split oversized images"
+ order = 4000
+
+ def ui(self):
+ with ui_components.InputAccordion(False, label="Split oversized images") as enable:
+ with gr.Row():
+ split_threshold = gr.Slider(label='Threshold', value=0.5, minimum=0.0, maximum=1.0, step=0.05, elem_id="postprocess_split_threshold")
+ overlap_ratio = gr.Slider(label='Overlap ratio', value=0.2, minimum=0.0, maximum=0.9, step=0.05, elem_id="postprocess_overlap_ratio")
+
+ return {
+ "enable": enable,
+ "split_threshold": split_threshold,
+ "overlap_ratio": overlap_ratio,
+ }
+
+ def process(self, pp: scripts_postprocessing.PostprocessedImage, enable, split_threshold, overlap_ratio):
+ if not enable:
+ return
+
+ width = pp.shared.target_width
+ height = pp.shared.target_height
+
+ if not width or not height:
+ return
+
+ if pp.image.height > pp.image.width:
+ ratio = (pp.image.width * height) / (pp.image.height * width)
+ inverse_xy = False
+ else:
+ ratio = (pp.image.height * width) / (pp.image.width * height)
+ inverse_xy = True
+
+ if ratio >= 1.0 and ratio > split_threshold:
+ return
+
+ result, *others = split_pic(pp.image, inverse_xy, width, height, overlap_ratio)
+
+ pp.image = result
+ pp.extra_images = [pp.create_copy(x) for x in others]
+
diff --git a/scripts/postprocessing_upscale.py b/scripts/postprocessing_upscale.py
index edb70ac0..ed709688 100644
--- a/scripts/postprocessing_upscale.py
+++ b/scripts/postprocessing_upscale.py
@@ -29,7 +29,7 @@ class ScriptPostprocessingUpscale(scripts_postprocessing.ScriptPostprocessing):
upscaling_resize_w = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="extras_upscaling_resize_w")
upscaling_resize_h = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="extras_upscaling_resize_h")
with gr.Column(elem_id="upscaling_dimensions_row", scale=1, elem_classes="dimensions-tools"):
- upscaling_res_switch_btn = ToolButton(value=switch_values_symbol, elem_id="upscaling_res_switch_btn")
+ upscaling_res_switch_btn = ToolButton(value=switch_values_symbol, elem_id="upscaling_res_switch_btn", tooltip="Switch width/height")
upscaling_crop = gr.Checkbox(label='Crop to fit', value=True, elem_id="extras_upscaling_crop")
with FormRow():
@@ -81,6 +81,14 @@ class ScriptPostprocessingUpscale(scripts_postprocessing.ScriptPostprocessing):
return image
+ def process_firstpass(self, pp: scripts_postprocessing.PostprocessedImage, upscale_mode=1, upscale_by=2.0, upscale_to_width=None, upscale_to_height=None, upscale_crop=False, upscaler_1_name=None, upscaler_2_name=None, upscaler_2_visibility=0.0):
+ if upscale_mode == 1:
+ pp.shared.target_width = upscale_to_width
+ pp.shared.target_height = upscale_to_height
+ else:
+ pp.shared.target_width = int(pp.image.width * upscale_by)
+ pp.shared.target_height = int(pp.image.height * upscale_by)
+
def process(self, pp: scripts_postprocessing.PostprocessedImage, upscale_mode=1, upscale_by=2.0, upscale_to_width=None, upscale_to_height=None, upscale_crop=False, upscaler_1_name=None, upscaler_2_name=None, upscaler_2_visibility=0.0):
if upscaler_1_name == "None":
upscaler_1_name = None
@@ -126,6 +134,10 @@ class ScriptPostprocessingUpscaleSimple(ScriptPostprocessingUpscale):
"upscaler_name": upscaler_name,
}
+ def process_firstpass(self, pp: scripts_postprocessing.PostprocessedImage, upscale_by=2.0, upscaler_name=None):
+ pp.shared.target_width = int(pp.image.width * upscale_by)
+ pp.shared.target_height = int(pp.image.height * upscale_by)
+
def process(self, pp: scripts_postprocessing.PostprocessedImage, upscale_by=2.0, upscaler_name=None):
if upscaler_name is None or upscaler_name == "None":
return
diff --git a/scripts/processing_autosized_crop.py b/scripts/processing_autosized_crop.py
new file mode 100644
index 00000000..7e674989
--- /dev/null
+++ b/scripts/processing_autosized_crop.py
@@ -0,0 +1,64 @@
+from PIL import Image
+
+from modules import scripts_postprocessing, ui_components
+import gradio as gr
+
+
+def center_crop(image: Image, w: int, h: int):
+ iw, ih = image.size
+ if ih / h < iw / w:
+ sw = w * ih / h
+ box = (iw - sw) / 2, 0, iw - (iw - sw) / 2, ih
+ else:
+ sh = h * iw / w
+ box = 0, (ih - sh) / 2, iw, ih - (ih - sh) / 2
+ return image.resize((w, h), Image.Resampling.LANCZOS, box)
+
+
+def multicrop_pic(image: Image, mindim, maxdim, minarea, maxarea, objective, threshold):
+ iw, ih = image.size
+ err = lambda w, h: 1 - (lambda x: x if x < 1 else 1 / x)(iw / ih / (w / h))
+ wh = max(((w, h) for w in range(mindim, maxdim + 1, 64) for h in range(mindim, maxdim + 1, 64)
+ if minarea <= w * h <= maxarea and err(w, h) <= threshold),
+ key=lambda wh: (wh[0] * wh[1], -err(*wh))[::1 if objective == 'Maximize area' else -1],
+ default=None
+ )
+ return wh and center_crop(image, *wh)
+
+
+class ScriptPostprocessingAutosizedCrop(scripts_postprocessing.ScriptPostprocessing):
+ name = "Auto-sized crop"
+ order = 4020
+
+ def ui(self):
+ with ui_components.InputAccordion(False, label="Auto-sized crop") as enable:
+ gr.Markdown('Each image is center-cropped with an automatically chosen width and height.')
+ with gr.Row():
+ mindim = gr.Slider(minimum=64, maximum=2048, step=8, label="Dimension lower bound", value=384, elem_id="postprocess_multicrop_mindim")
+ maxdim = gr.Slider(minimum=64, maximum=2048, step=8, label="Dimension upper bound", value=768, elem_id="postprocess_multicrop_maxdim")
+ with gr.Row():
+ minarea = gr.Slider(minimum=64 * 64, maximum=2048 * 2048, step=1, label="Area lower bound", value=64 * 64, elem_id="postprocess_multicrop_minarea")
+ maxarea = gr.Slider(minimum=64 * 64, maximum=2048 * 2048, step=1, label="Area upper bound", value=640 * 640, elem_id="postprocess_multicrop_maxarea")
+ with gr.Row():
+ objective = gr.Radio(["Maximize area", "Minimize error"], value="Maximize area", label="Resizing objective", elem_id="postprocess_multicrop_objective")
+ threshold = gr.Slider(minimum=0, maximum=1, step=0.01, label="Error threshold", value=0.1, elem_id="postprocess_multicrop_threshold")
+
+ return {
+ "enable": enable,
+ "mindim": mindim,
+ "maxdim": maxdim,
+ "minarea": minarea,
+ "maxarea": maxarea,
+ "objective": objective,
+ "threshold": threshold,
+ }
+
+ def process(self, pp: scripts_postprocessing.PostprocessedImage, enable, mindim, maxdim, minarea, maxarea, objective, threshold):
+ if not enable:
+ return
+
+ cropped = multicrop_pic(pp.image, mindim, maxdim, minarea, maxarea, objective, threshold)
+ if cropped is not None:
+ pp.image = cropped
+ else:
+ print(f"skipped {pp.image.width}x{pp.image.height} image (can't find suitable size within error threshold)")
diff --git a/scripts/prompts_from_file.py b/scripts/prompts_from_file.py
index 50320d55..a4a2f24d 100644
--- a/scripts/prompts_from_file.py
+++ b/scripts/prompts_from_file.py
@@ -5,11 +5,17 @@ import shlex
import modules.scripts as scripts
import gradio as gr
-from modules import sd_samplers, errors
+from modules import sd_samplers, errors, sd_models
from modules.processing import Processed, process_images
from modules.shared import state
+def process_model_tag(tag):
+ info = sd_models.get_closet_checkpoint_match(tag)
+ assert info is not None, f'Unknown checkpoint: {tag}'
+ return info.name
+
+
def process_string_tag(tag):
return tag
@@ -27,7 +33,7 @@ def process_boolean_tag(tag):
prompt_tags = {
- "sd_model": None,
+ "sd_model": process_model_tag,
"outpath_samples": process_string_tag,
"outpath_grids": process_string_tag,
"prompt_for_display": process_string_tag,
@@ -108,6 +114,7 @@ class Script(scripts.Script):
def ui(self, is_img2img):
checkbox_iterate = gr.Checkbox(label="Iterate seed every line", value=False, elem_id=self.elem_id("checkbox_iterate"))
checkbox_iterate_batch = gr.Checkbox(label="Use same random seed for all lines", value=False, elem_id=self.elem_id("checkbox_iterate_batch"))
+ prompt_position = gr.Radio(["start", "end"], label="Insert prompts at the", elem_id=self.elem_id("prompt_position"), value="start")
prompt_txt = gr.Textbox(label="List of prompt inputs", lines=1, elem_id=self.elem_id("prompt_txt"))
file = gr.File(label="Upload prompt inputs", type='binary', elem_id=self.elem_id("file"))
@@ -118,9 +125,9 @@ class Script(scripts.Script):
# We don't shrink back to 1, because that causes the control to ignore [enter], and it may
# be unclear to the user that shift-enter is needed.
prompt_txt.change(lambda tb: gr.update(lines=7) if ("\n" in tb) else gr.update(lines=2), inputs=[prompt_txt], outputs=[prompt_txt], show_progress=False)
- return [checkbox_iterate, checkbox_iterate_batch, prompt_txt]
+ return [checkbox_iterate, checkbox_iterate_batch, prompt_position, prompt_txt]
- def run(self, p, checkbox_iterate, checkbox_iterate_batch, prompt_txt: str):
+ def run(self, p, checkbox_iterate, checkbox_iterate_batch, prompt_position, prompt_txt: str):
lines = [x for x in (x.strip() for x in prompt_txt.splitlines()) if x]
p.do_not_save_grid = True
@@ -156,7 +163,22 @@ class Script(scripts.Script):
copy_p = copy.copy(p)
for k, v in args.items():
- setattr(copy_p, k, v)
+ if k == "sd_model":
+ copy_p.override_settings['sd_model_checkpoint'] = v
+ else:
+ setattr(copy_p, k, v)
+
+ if args.get("prompt") and p.prompt:
+ if prompt_position == "start":
+ copy_p.prompt = args.get("prompt") + " " + p.prompt
+ else:
+ copy_p.prompt = p.prompt + " " + args.get("prompt")
+
+ if args.get("negative_prompt") and p.negative_prompt:
+ if prompt_position == "start":
+ copy_p.negative_prompt = args.get("negative_prompt") + " " + p.negative_prompt
+ else:
+ copy_p.negative_prompt = p.negative_prompt + " " + args.get("negative_prompt")
proc = process_images(copy_p)
images += proc.images
diff --git a/scripts/xyz_grid.py b/scripts/xyz_grid.py
index 4eb1b197..34267c2c 100644
--- a/scripts/xyz_grid.py
+++ b/scripts/xyz_grid.py
@@ -3,6 +3,7 @@ from copy import copy
from itertools import permutations, chain
import random
import csv
+import os.path
from io import StringIO
from PIL import Image
import numpy as np
@@ -10,7 +11,7 @@ import numpy as np
import modules.scripts as scripts
import gradio as gr
-from modules import images, sd_samplers, processing, sd_models, sd_vae, sd_samplers_kdiffusion
+from modules import images, sd_samplers, processing, sd_models, sd_vae, sd_samplers_kdiffusion, errors
from modules.processing import process_images, Processed, StableDiffusionProcessingTxt2Img
from modules.shared import opts, state
import modules.shared as shared
@@ -66,14 +67,6 @@ def apply_order(p, x, xs):
p.prompt = prompt_tmp + p.prompt
-def apply_sampler(p, x, xs):
- sampler_name = sd_samplers.samplers_map.get(x.lower(), None)
- if sampler_name is None:
- raise RuntimeError(f"Unknown sampler: {x}")
-
- p.sampler_name = sampler_name
-
-
def confirm_samplers(p, xs):
for x in xs:
if x.lower() not in sd_samplers.samplers_map:
@@ -93,6 +86,15 @@ def confirm_checkpoints(p, xs):
raise RuntimeError(f"Unknown checkpoint: {x}")
+def confirm_checkpoints_or_none(p, xs):
+ for x in xs:
+ if x in (None, "", "None", "none"):
+ continue
+
+ if modules.sd_models.get_closet_checkpoint_match(x) is None:
+ raise RuntimeError(f"Unknown checkpoint: {x}")
+
+
def apply_clip_skip(p, x, xs):
opts.data["CLIP_stop_at_last_layers"] = x
@@ -183,19 +185,34 @@ def format_nothing(p, opt, x):
return ""
+def format_remove_path(p, opt, x):
+ return os.path.basename(x)
+
+
def str_permutations(x):
"""dummy function for specifying it in AxisOption's type when you want to get a list of permutations"""
return x
+def list_to_csv_string(data_list):
+ with StringIO() as o:
+ csv.writer(o).writerow(data_list)
+ return o.getvalue().strip()
+
+
+def csv_string_to_list_strip(data_str):
+ return list(map(str.strip, chain.from_iterable(csv.reader(StringIO(data_str)))))
+
+
class AxisOption:
- def __init__(self, label, type, apply, format_value=format_value_add_label, confirm=None, cost=0.0, choices=None):
+ def __init__(self, label, type, apply, format_value=format_value_add_label, confirm=None, cost=0.0, choices=None, prepare=None):
self.label = label
self.type = type
self.apply = apply
self.format_value = format_value
self.confirm = confirm
self.cost = cost
+ self.prepare = prepare
self.choices = choices
@@ -204,6 +221,7 @@ class AxisOptionImg2Img(AxisOption):
super().__init__(*args, **kwargs)
self.is_img2img = True
+
class AxisOptionTxt2Img(AxisOption):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@@ -221,9 +239,10 @@ axis_options = [
AxisOptionImg2Img("Image CFG Scale", float, apply_field("image_cfg_scale")),
AxisOption("Prompt S/R", str, apply_prompt, format_value=format_value),
AxisOption("Prompt order", str_permutations, apply_order, format_value=format_value_join_list),
- AxisOptionTxt2Img("Sampler", str, apply_sampler, format_value=format_value, confirm=confirm_samplers, choices=lambda: [x.name for x in sd_samplers.samplers]),
- AxisOptionImg2Img("Sampler", str, apply_sampler, format_value=format_value, confirm=confirm_samplers, choices=lambda: [x.name for x in sd_samplers.samplers_for_img2img]),
- AxisOption("Checkpoint name", str, apply_checkpoint, format_value=format_value, confirm=confirm_checkpoints, cost=1.0, choices=lambda: sorted(sd_models.checkpoints_list, key=str.casefold)),
+ AxisOptionTxt2Img("Sampler", str, apply_field("sampler_name"), format_value=format_value, confirm=confirm_samplers, choices=lambda: [x.name for x in sd_samplers.samplers if x.name not in opts.hide_samplers]),
+ AxisOptionTxt2Img("Hires sampler", str, apply_field("hr_sampler_name"), confirm=confirm_samplers, choices=lambda: [x.name for x in sd_samplers.samplers_for_img2img if x.name not in opts.hide_samplers]),
+ AxisOptionImg2Img("Sampler", str, apply_field("sampler_name"), format_value=format_value, confirm=confirm_samplers, choices=lambda: [x.name for x in sd_samplers.samplers_for_img2img if x.name not in opts.hide_samplers]),
+ AxisOption("Checkpoint name", str, apply_checkpoint, format_value=format_remove_path, confirm=confirm_checkpoints, cost=1.0, choices=lambda: sorted(sd_models.checkpoints_list, key=str.casefold)),
AxisOption("Negative Guidance minimum sigma", float, apply_field("s_min_uncond")),
AxisOption("Sigma Churn", float, apply_field("s_churn")),
AxisOption("Sigma min", float, apply_field("s_tmin")),
@@ -236,6 +255,8 @@ axis_options = [
AxisOption("Eta", float, apply_field("eta")),
AxisOption("Clip skip", int, apply_clip_skip),
AxisOption("Denoising", float, apply_field("denoising_strength")),
+ AxisOption("Initial noise multiplier", float, apply_field("initial_noise_multiplier")),
+ AxisOption("Extra noise", float, apply_override("img2img_extra_noise")),
AxisOptionTxt2Img("Hires upscaler", str, apply_field("hr_upscaler"), choices=lambda: [*shared.latent_upscale_modes, *[x.name for x in shared.sd_upscalers]]),
AxisOptionImg2Img("Cond. Image Mask Weight", float, apply_field("inpainting_mask_weight")),
AxisOption("VAE", str, apply_vae, cost=0.7, choices=lambda: ['None'] + list(sd_vae.vae_dict)),
@@ -245,6 +266,11 @@ axis_options = [
AxisOption("Token merging ratio", float, apply_override('token_merging_ratio')),
AxisOption("Token merging ratio high-res", float, apply_override('token_merging_ratio_hr')),
AxisOption("Always discard next-to-last sigma", str, apply_override('always_discard_next_to_last_sigma', boolean=True), choices=boolean_choice(reverse=True)),
+ AxisOption("SGM noise multiplier", str, apply_override('sgm_noise_multiplier', boolean=True), choices=boolean_choice(reverse=True)),
+ AxisOption("Refiner checkpoint", str, apply_field('refiner_checkpoint'), format_value=format_remove_path, confirm=confirm_checkpoints_or_none, cost=1.0, choices=lambda: ['None'] + sorted(sd_models.checkpoints_list, key=str.casefold)),
+ AxisOption("Refiner switch at", float, apply_field('refiner_switch_at')),
+ AxisOption("RNG source", str, apply_override("randn_source"), choices=lambda: ["GPU", "CPU", "NV"]),
+ AxisOption("FP8 mode", str, apply_override("fp8_storage"), cost=0.9, choices=lambda: ["Disable", "Enable for SDXL", "Enable"]),
]
@@ -290,11 +316,10 @@ def draw_xyz_grid(p, xs, ys, zs, x_labels, y_labels, z_labels, cell, draw_legend
cell_size = (processed_result.width, processed_result.height)
if processed_result.images[0] is not None:
cell_mode = processed_result.images[0].mode
- #This corrects size in case of batches:
+ # This corrects size in case of batches:
cell_size = processed_result.images[0].size
processed_result.images[idx] = Image.new(cell_mode, cell_size)
-
if first_axes_processed == 'x':
for ix, x in enumerate(xs):
if second_axes_processed == 'y':
@@ -352,9 +377,9 @@ def draw_xyz_grid(p, xs, ys, zs, x_labels, y_labels, z_labels, cell, draw_legend
if draw_legend:
z_grid = images.draw_grid_annotations(z_grid, sub_grid_size[0], sub_grid_size[1], title_texts, [[images.GridAnnotation()]])
processed_result.images.insert(0, z_grid)
- #TODO: Deeper aspects of the program rely on grid info being misaligned between metadata arrays, which is not ideal.
- #processed_result.all_prompts.insert(0, processed_result.all_prompts[0])
- #processed_result.all_seeds.insert(0, processed_result.all_seeds[0])
+ # TODO: Deeper aspects of the program rely on grid info being misaligned between metadata arrays, which is not ideal.
+ # processed_result.all_prompts.insert(0, processed_result.all_prompts[0])
+ # processed_result.all_seeds.insert(0, processed_result.all_seeds[0])
processed_result.infotexts.insert(0, processed_result.infotexts[0])
return processed_result
@@ -378,8 +403,8 @@ class SharedSettingsStackHelper(object):
re_range = re.compile(r"\s*([+-]?\s*\d+)\s*-\s*([+-]?\s*\d+)(?:\s*\(([+-]\d+)\s*\))?\s*")
re_range_float = re.compile(r"\s*([+-]?\s*\d+(?:.\d*)?)\s*-\s*([+-]?\s*\d+(?:.\d*)?)(?:\s*\(([+-]\d+(?:.\d*)?)\s*\))?\s*")
-re_range_count = re.compile(r"\s*([+-]?\s*\d+)\s*-\s*([+-]?\s*\d+)(?:\s*\[(\d+)\s*\])?\s*")
-re_range_count_float = re.compile(r"\s*([+-]?\s*\d+(?:.\d*)?)\s*-\s*([+-]?\s*\d+(?:.\d*)?)(?:\s*\[(\d+(?:.\d*)?)\s*\])?\s*")
+re_range_count = re.compile(r"\s*([+-]?\s*\d+)\s*-\s*([+-]?\s*\d+)(?:\s*\[(\d+)\s*])?\s*")
+re_range_count_float = re.compile(r"\s*([+-]?\s*\d+(?:.\d*)?)\s*-\s*([+-]?\s*\d+(?:.\d*)?)(?:\s*\[(\d+(?:.\d*)?)\s*])?\s*")
class Script(scripts.Script):
@@ -394,19 +419,19 @@ class Script(scripts.Script):
with gr.Row():
x_type = gr.Dropdown(label="X type", choices=[x.label for x in self.current_axis_options], value=self.current_axis_options[1].label, type="index", elem_id=self.elem_id("x_type"))
x_values = gr.Textbox(label="X values", lines=1, elem_id=self.elem_id("x_values"))
- x_values_dropdown = gr.Dropdown(label="X values",visible=False,multiselect=True,interactive=True)
+ x_values_dropdown = gr.Dropdown(label="X values", visible=False, multiselect=True, interactive=True)
fill_x_button = ToolButton(value=fill_values_symbol, elem_id="xyz_grid_fill_x_tool_button", visible=False)
with gr.Row():
y_type = gr.Dropdown(label="Y type", choices=[x.label for x in self.current_axis_options], value=self.current_axis_options[0].label, type="index", elem_id=self.elem_id("y_type"))
y_values = gr.Textbox(label="Y values", lines=1, elem_id=self.elem_id("y_values"))
- y_values_dropdown = gr.Dropdown(label="Y values",visible=False,multiselect=True,interactive=True)
+ y_values_dropdown = gr.Dropdown(label="Y values", visible=False, multiselect=True, interactive=True)
fill_y_button = ToolButton(value=fill_values_symbol, elem_id="xyz_grid_fill_y_tool_button", visible=False)
with gr.Row():
z_type = gr.Dropdown(label="Z type", choices=[x.label for x in self.current_axis_options], value=self.current_axis_options[0].label, type="index", elem_id=self.elem_id("z_type"))
z_values = gr.Textbox(label="Z values", lines=1, elem_id=self.elem_id("z_values"))
- z_values_dropdown = gr.Dropdown(label="Z values",visible=False,multiselect=True,interactive=True)
+ z_values_dropdown = gr.Dropdown(label="Z values", visible=False, multiselect=True, interactive=True)
fill_z_button = ToolButton(value=fill_values_symbol, elem_id="xyz_grid_fill_z_tool_button", visible=False)
with gr.Row(variant="compact", elem_id="axis_options"):
@@ -422,6 +447,8 @@ class Script(scripts.Script):
vary_seeds_z = gr.Checkbox(label='Vary seed on Z axis', value=False, elem_id=self.elem_id("vary_seeds_z"))
with gr.Column():
margin_size = gr.Slider(label="Grid margins (px)", minimum=0, maximum=500, value=0, step=2, elem_id=self.elem_id("margin_size"))
+ with gr.Column():
+ csv_mode = gr.Checkbox(label='Use text inputs instead of dropdowns', value=False, elem_id=self.elem_id("csv_mode"))
with gr.Row(variant="compact", elem_id="swap_axes"):
swap_xy_axes_button = gr.Button(value="Swap X/Y axes", elem_id="xy_grid_swap_axes_button")
@@ -438,50 +465,75 @@ class Script(scripts.Script):
xz_swap_args = [x_type, x_values, x_values_dropdown, z_type, z_values, z_values_dropdown]
swap_xz_axes_button.click(swap_axes, inputs=xz_swap_args, outputs=xz_swap_args)
- def fill(x_type):
- axis = self.current_axis_options[x_type]
- return axis.choices() if axis.choices else gr.update()
+ def fill(axis_type, csv_mode):
+ axis = self.current_axis_options[axis_type]
+ if axis.choices:
+ if csv_mode:
+ return list_to_csv_string(axis.choices()), gr.update()
+ else:
+ return gr.update(), axis.choices()
+ else:
+ return gr.update(), gr.update()
+
+ fill_x_button.click(fn=fill, inputs=[x_type, csv_mode], outputs=[x_values, x_values_dropdown])
+ fill_y_button.click(fn=fill, inputs=[y_type, csv_mode], outputs=[y_values, y_values_dropdown])
+ fill_z_button.click(fn=fill, inputs=[z_type, csv_mode], outputs=[z_values, z_values_dropdown])
- fill_x_button.click(fn=fill, inputs=[x_type], outputs=[x_values_dropdown])
- fill_y_button.click(fn=fill, inputs=[y_type], outputs=[y_values_dropdown])
- fill_z_button.click(fn=fill, inputs=[z_type], outputs=[z_values_dropdown])
+ def select_axis(axis_type, axis_values, axis_values_dropdown, csv_mode):
+ axis_type = axis_type or 0 # if axle type is None set to 0
- def select_axis(axis_type,axis_values_dropdown):
choices = self.current_axis_options[axis_type].choices
has_choices = choices is not None
- current_values = axis_values_dropdown
+
if has_choices:
choices = choices()
- if isinstance(current_values,str):
- current_values = current_values.split(",")
- current_values = list(filter(lambda x: x in choices, current_values))
- return gr.Button.update(visible=has_choices),gr.Textbox.update(visible=not has_choices),gr.update(choices=choices if has_choices else None,visible=has_choices,value=current_values)
-
- x_type.change(fn=select_axis, inputs=[x_type,x_values_dropdown], outputs=[fill_x_button,x_values,x_values_dropdown])
- y_type.change(fn=select_axis, inputs=[y_type,y_values_dropdown], outputs=[fill_y_button,y_values,y_values_dropdown])
- z_type.change(fn=select_axis, inputs=[z_type,z_values_dropdown], outputs=[fill_z_button,z_values,z_values_dropdown])
-
- def get_dropdown_update_from_params(axis,params):
+ if csv_mode:
+ if axis_values_dropdown:
+ axis_values = list_to_csv_string(list(filter(lambda x: x in choices, axis_values_dropdown)))
+ axis_values_dropdown = []
+ else:
+ if axis_values:
+ axis_values_dropdown = list(filter(lambda x: x in choices, csv_string_to_list_strip(axis_values)))
+ axis_values = ""
+
+ return (gr.Button.update(visible=has_choices), gr.Textbox.update(visible=not has_choices or csv_mode, value=axis_values),
+ gr.update(choices=choices if has_choices else None, visible=has_choices and not csv_mode, value=axis_values_dropdown))
+
+ x_type.change(fn=select_axis, inputs=[x_type, x_values, x_values_dropdown, csv_mode], outputs=[fill_x_button, x_values, x_values_dropdown])
+ y_type.change(fn=select_axis, inputs=[y_type, y_values, y_values_dropdown, csv_mode], outputs=[fill_y_button, y_values, y_values_dropdown])
+ z_type.change(fn=select_axis, inputs=[z_type, z_values, z_values_dropdown, csv_mode], outputs=[fill_z_button, z_values, z_values_dropdown])
+
+ def change_choice_mode(csv_mode, x_type, x_values, x_values_dropdown, y_type, y_values, y_values_dropdown, z_type, z_values, z_values_dropdown):
+ _fill_x_button, _x_values, _x_values_dropdown = select_axis(x_type, x_values, x_values_dropdown, csv_mode)
+ _fill_y_button, _y_values, _y_values_dropdown = select_axis(y_type, y_values, y_values_dropdown, csv_mode)
+ _fill_z_button, _z_values, _z_values_dropdown = select_axis(z_type, z_values, z_values_dropdown, csv_mode)
+ return _fill_x_button, _x_values, _x_values_dropdown, _fill_y_button, _y_values, _y_values_dropdown, _fill_z_button, _z_values, _z_values_dropdown
+
+ csv_mode.change(fn=change_choice_mode, inputs=[csv_mode, x_type, x_values, x_values_dropdown, y_type, y_values, y_values_dropdown, z_type, z_values, z_values_dropdown], outputs=[fill_x_button, x_values, x_values_dropdown, fill_y_button, y_values, y_values_dropdown, fill_z_button, z_values, z_values_dropdown])
+
+ def get_dropdown_update_from_params(axis, params):
val_key = f"{axis} Values"
- vals = params.get(val_key,"")
- valslist = [x.strip() for x in chain.from_iterable(csv.reader(StringIO(vals))) if x]
- return gr.update(value = valslist)
+ vals = params.get(val_key, "")
+ valslist = csv_string_to_list_strip(vals)
+ return gr.update(value=valslist)
self.infotext_fields = (
(x_type, "X Type"),
(x_values, "X Values"),
- (x_values_dropdown, lambda params:get_dropdown_update_from_params("X",params)),
+ (x_values_dropdown, lambda params: get_dropdown_update_from_params("X", params)),
(y_type, "Y Type"),
(y_values, "Y Values"),
- (y_values_dropdown, lambda params:get_dropdown_update_from_params("Y",params)),
+ (y_values_dropdown, lambda params: get_dropdown_update_from_params("Y", params)),
(z_type, "Z Type"),
(z_values, "Z Values"),
- (z_values_dropdown, lambda params:get_dropdown_update_from_params("Z",params)),
+ (z_values_dropdown, lambda params: get_dropdown_update_from_params("Z", params)),
)
- return [x_type, x_values, x_values_dropdown, y_type, y_values, y_values_dropdown, z_type, z_values, z_values_dropdown, draw_legend, include_lone_images, include_sub_grids, no_fixed_seeds, vary_seeds_x, vary_seeds_y, vary_seeds_z, margin_size]
+ return [x_type, x_values, x_values_dropdown, y_type, y_values, y_values_dropdown, z_type, z_values, z_values_dropdown, draw_legend, include_lone_images, include_sub_grids, no_fixed_seeds, vary_seeds_x, vary_seeds_y, vary_seeds_z, margin_size, csv_mode]
+
+ def run(self, p, x_type, x_values, x_values_dropdown, y_type, y_values, y_values_dropdown, z_type, z_values, z_values_dropdown, draw_legend, include_lone_images, include_sub_grids, no_fixed_seeds, margin_size, csv_mode):
+ x_type, y_type, z_type = x_type or 0, y_type or 0, z_type or 0 # if axle type is None set to 0
- def run(self, p, x_type, x_values, x_values_dropdown, y_type, y_values, y_values_dropdown, z_type, z_values, z_values_dropdown, draw_legend, include_lone_images, include_sub_grids, no_fixed_seeds, vary_seeds_x, vary_seeds_y, vary_seeds_z, margin_size):
if not no_fixed_seeds:
modules.processing.fix_seed(p)
@@ -492,10 +544,12 @@ class Script(scripts.Script):
if opt.label == 'Nothing':
return [0]
- if opt.choices is not None:
+ if opt.choices is not None and not csv_mode:
valslist = vals_dropdown
+ elif opt.prepare is not None:
+ valslist = opt.prepare(vals)
else:
- valslist = [x.strip() for x in chain.from_iterable(csv.reader(StringIO(vals))) if x]
+ valslist = csv_string_to_list_strip(vals)
if opt.type == int:
valslist_ext = []
@@ -511,8 +565,8 @@ class Script(scripts.Script):
valslist_ext += list(range(start, end, step))
elif mc is not None:
start = int(mc.group(1))
- end = int(mc.group(2))
- num = int(mc.group(3)) if mc.group(3) is not None else 1
+ end = int(mc.group(2))
+ num = int(mc.group(3)) if mc.group(3) is not None else 1
valslist_ext += [int(x) for x in np.linspace(start=start, stop=end, num=num).tolist()]
else:
@@ -533,8 +587,8 @@ class Script(scripts.Script):
valslist_ext += np.arange(start, end + step, step).tolist()
elif mc is not None:
start = float(mc.group(1))
- end = float(mc.group(2))
- num = int(mc.group(3)) if mc.group(3) is not None else 1
+ end = float(mc.group(2))
+ num = int(mc.group(3)) if mc.group(3) is not None else 1
valslist_ext += np.linspace(start=start, stop=end, num=num).tolist()
else:
@@ -553,22 +607,22 @@ class Script(scripts.Script):
return valslist
x_opt = self.current_axis_options[x_type]
- if x_opt.choices is not None:
- x_values = ",".join(x_values_dropdown)
+ if x_opt.choices is not None and not csv_mode:
+ x_values = list_to_csv_string(x_values_dropdown)
xs = process_axis(x_opt, x_values, x_values_dropdown)
y_opt = self.current_axis_options[y_type]
- if y_opt.choices is not None:
- y_values = ",".join(y_values_dropdown)
+ if y_opt.choices is not None and not csv_mode:
+ y_values = list_to_csv_string(y_values_dropdown)
ys = process_axis(y_opt, y_values, y_values_dropdown)
z_opt = self.current_axis_options[z_type]
- if z_opt.choices is not None:
- z_values = ",".join(z_values_dropdown)
+ if z_opt.choices is not None and not csv_mode:
+ z_values = list_to_csv_string(z_values_dropdown)
zs = process_axis(z_opt, z_values, z_values_dropdown)
# this could be moved to common code, but unlikely to be ever triggered anywhere else
- Image.MAX_IMAGE_PIXELS = None # disable check in Pillow and rely on check below to allow large custom image sizes
+ Image.MAX_IMAGE_PIXELS = None # disable check in Pillow and rely on check below to allow large custom image sizes
grid_mp = round(len(xs) * len(ys) * len(zs) * p.width * p.height / 1000000)
assert grid_mp < opts.img_max_size_mp, f'Error: Resulting grid would be too large ({grid_mp} MPixels) (max configured size is {opts.img_max_size_mp} MPixels)'
@@ -662,7 +716,12 @@ class Script(scripts.Script):
if vary_seeds_z:
pc.seed += iz * xdim * ydim
- res = process_images(pc)
+ try:
+ res = process_images(pc)
+ except Exception as e:
+ errors.display(e, "generating image for xyz plot")
+
+ res = Processed(p, [], p.seed, "")
# Sets subgrid infotexts
subgrid_index = 1 + iz
@@ -733,9 +792,11 @@ class Script(scripts.Script):
# Auto-save main and sub-grids:
grid_count = z_count + 1 if z_count > 1 else 1
for g in range(grid_count):
- #TODO: See previous comment about intentional data misalignment.
+ # TODO: See previous comment about intentional data misalignment.
adj_g = g-1 if g > 0 else g
images.save_image(processed.images[g], p.outpath_grids, "xyz_grid", info=processed.infotexts[g], extension=opts.grid_format, prompt=processed.all_prompts[adj_g], seed=processed.all_seeds[adj_g], grid=True, p=processed)
+ if not include_sub_grids: # if not include_sub_grids then skip saving after the first grid
+ break
if not include_sub_grids:
# Done with sub-grids, drop all related information:
diff --git a/style.css b/style.css
index 6c92d6e7..ee39a57b 100644
--- a/style.css
+++ b/style.css
@@ -2,12 +2,21 @@
@import url('https://fonts.googleapis.com/css2?family=Source+Sans+Pro:wght@400;600&display=swap');
+
+/* temporary fix to hide gradio crop tool until it's fixed https://github.com/gradio-app/gradio/issues/3810 */
+
+div.gradio-image button[aria-label="Edit"] {
+ display: none;
+}
+
+
/* general gradio fixes */
:root, .dark{
--checkbox-label-gap: 0.25em 0.1em;
--section-header-text-size: 12pt;
--block-background-fill: transparent;
+
}
.block.padded:not(.gradio-accordion) {
@@ -42,12 +51,15 @@ div.form{
.block.gradio-radio,
.block.gradio-checkboxgroup,
.block.gradio-number,
-.block.gradio-colorpicker
-{
+.block.gradio-colorpicker {
border-width: 0 !important;
box-shadow: none !important;
}
+div.gradio-group, div.styler{
+ border-width: 0 !important;
+ background: none;
+}
.gap.compact{
padding: 0;
gap: 0.2em 0;
@@ -71,8 +83,10 @@ div.compact{
white-space: nowrap;
}
-.gradio-dropdown ul.options li.item {
- padding: 0.05em 0;
+@media (pointer:fine) {
+ .gradio-dropdown ul.options li.item {
+ padding: 0.05em 0;
+ }
}
.gradio-dropdown ul.options li.item.selected {
@@ -133,6 +147,16 @@ a{
cursor: pointer;
}
+/* gradio 3.39 puts a lot of overflow: hidden all over the place for an unknown reason. */
+div.gradio-container, .block.gradio-textbox, div.gradio-group, div.gradio-dropdown{
+ overflow: visible !important;
+}
+
+/* align-items isn't enough and elements may overflow in Safari. */
+.unequal-height {
+ align-content: flex-start;
+}
+
/* general styled components */
@@ -157,16 +181,6 @@ a{
color: var(--button-secondary-text-color-hover);
}
-.checkboxes-row{
- margin-bottom: 0.5em;
- margin-left: 0em;
-}
-.checkboxes-row > div{
- flex: 0;
- white-space: nowrap;
- min-width: auto;
-}
-
button.custom-button{
border-radius: var(--button-large-radius);
padding: var(--button-large-padding);
@@ -183,6 +197,18 @@ button.custom-button{
text-align: center;
}
+div.block.gradio-accordion {
+ border: 1px solid var(--block-border-color) !important;
+ border-radius: 8px !important;
+ margin: 2px 0;
+ padding: 8px 8px;
+}
+
+input[type="checkbox"].input-accordion-checkbox{
+ vertical-align: sub;
+ margin-right: 0.5em;
+}
+
/* txt2img/img2img specific */
@@ -223,10 +249,14 @@ button.custom-button{
}
[id$=_subseed_show] label{
- margin-bottom: 0.5em;
+ margin-bottom: 0.65em;
align-self: end;
}
+[id$=_seed_extras] > div{
+ gap: 0.5em;
+}
+
.html-log .comments{
padding-top: 0.5em;
}
@@ -266,14 +296,21 @@ button.custom-button{
min-height: 4.5em;
}
+#txt2img_generate, #img2img_generate {
+ min-height: 4.5em;
+}
+.generate-box-compact #txt2img_generate, .generate-box-compact #img2img_generate {
+ min-height: 3em;
+}
+
@media screen and (min-width: 2500px) {
#txt2img_gallery, #img2img_gallery {
min-height: 768px;
}
}
-#txt2img_gallery img, #img2img_gallery img, #extras_gallery img{
- object-fit: scale-down;
+.gradio-gallery .thumbnails img {
+ object-fit: scale-down !important;
}
#txt2img_actions_column, #img2img_actions_column {
gap: 0.5em;
@@ -313,12 +350,6 @@ button.custom-button{
border-radius: 0 0.5rem 0.5rem 0;
}
-#txtimg_hr_finalres{
- min-height: 0 !important;
- padding: .625rem .75rem;
- margin-left: -0.75em
-}
-
#img2img_scale_resolution_preview.block{
display: flex;
align-items: end;
@@ -342,7 +373,7 @@ button.custom-button{
}
div.dimensions-tools{
- min-width: 0 !important;
+ min-width: 1.6em !important;
max-width: fit-content;
flex-direction: column;
place-content: center;
@@ -359,8 +390,8 @@ div#extras_scale_to_tab div.form{
z-index: 5;
}
-.image-buttons button{
- min-width: auto;
+.image-buttons > .form{
+ justify-content: center;
}
.infotext {
@@ -379,20 +410,32 @@ div#extras_scale_to_tab div.form{
min-width: 0.5em;
}
+div.toprow-compact-stylerow{
+ margin: 0.5em 0;
+}
+
+div.toprow-compact-tools{
+ min-width: fit-content !important;
+ max-width: fit-content;
+}
+
/* settings */
#quicksettings {
- width: fit-content;
align-items: end;
}
#quicksettings > div, #quicksettings > fieldset{
- max-width: 24em;
- min-width: 24em;
+ max-width: 36em;
+ width: fit-content;
+ flex: 0 1 fit-content;
padding: 0;
border: none;
box-shadow: none;
background: none;
}
+#quicksettings > div.gradio-dropdown{
+ min-width: 24em !important;
+}
#settings{
display: block;
@@ -401,6 +444,7 @@ div#extras_scale_to_tab div.form{
#settings > div{
border: none;
margin-left: 10em;
+ padding: 0 var(--spacing-xl);
}
#settings > div.tab-nav{
@@ -415,6 +459,16 @@ div#extras_scale_to_tab div.form{
border: none;
text-align: left;
white-space: initial;
+ padding: 4px;
+}
+
+#settings > div.tab-nav .settings-category{
+ display: block;
+ margin: 1em 0 0.25em 0;
+ font-weight: bold;
+ text-decoration: underline;
+ cursor: default;
+ user-select: none;
}
#settings_result{
@@ -482,14 +536,31 @@ table.popup-table .link{
font-size: 18pt;
}
+#settings .settings-info{
+ max-width: 48em;
+ border: 1px dotted #777;
+ margin: 0;
+ padding: 1em;
+}
+
/* live preview */
.progressDiv{
- position: relative;
+ position: absolute;
height: 20px;
background: #b4c0cc;
border-radius: 3px !important;
- margin-bottom: -3px;
+ top: -14px;
+ left: 0px;
+ width: 100%;
+}
+
+.progress-container{
+ position: relative;
+}
+
+[id$=_results].mobile{
+ margin-top: 28px;
}
.dark .progressDiv{
@@ -514,19 +585,16 @@ table.popup-table .link{
.livePreview{
position: absolute;
z-index: 300;
- background-color: white;
- margin: -4px;
-}
-
-.dark .livePreview{
- background-color: rgb(17 24 39 / var(--tw-bg-opacity));
+ background: var(--background-fill-primary);
+ width: 100%;
+ height: 100%;
}
.livePreview img{
position: absolute;
object-fit: contain;
width: 100%;
- height: 100%;
+ height: calc(100% - 60px); /* to match gradio's height */
}
/* fullscreen popup (ie in Lora's (i) button) */
@@ -548,7 +616,6 @@ table.popup-table .link{
width: 100%;
height: 100%;
overflow: auto;
- background-color: rgba(20, 20, 20, 0.95);
}
.global-popup *{
@@ -557,9 +624,6 @@ table.popup-table .link{
.global-popup-close:before {
content: "×";
-}
-
-.global-popup-close{
position: fixed;
right: 0.25em;
top: 0;
@@ -568,10 +632,22 @@ table.popup-table .link{
font-size: 32pt;
}
+.global-popup-close{
+ position: fixed;
+ left: 0;
+ top: 0;
+ width: 100%;
+ height: 100%;
+ background-color: rgba(20, 20, 20, 0.95);
+}
+
.global-popup-inner{
display: inline-block;
margin: auto;
padding: 2em;
+ z-index: 1001;
+ max-height: 90%;
+ max-width: 90%;
}
/* fullpage image viewer */
@@ -593,15 +669,24 @@ table.popup-table .link{
.modalControls {
display: flex;
+ position: absolute;
+ right: 0px;
+ left: 0px;
gap: 1em;
padding: 1em;
- background-color: rgba(0,0,0,0.2);
+ background-color:rgba(0,0,0,0);
+ z-index: 1;
+ transition: 0.2s ease background-color;
+}
+.modalControls:hover {
+ background-color:rgba(0,0,0,0.9);
}
.modalClose {
margin-left: auto;
}
.modalControls span{
color: white;
+ text-shadow: 0px 0px 0.25em black;
font-size: 35px;
font-weight: bold;
cursor: pointer;
@@ -766,10 +851,27 @@ footer {
/* extra networks UI */
+.extra-page > div.gap{
+ gap: 0;
+}
+
+.extra-page-prompts{
+ margin-bottom: 0;
+}
+
+.extra-page-prompts.extra-page-prompts-active{
+ margin-bottom: 1em;
+}
+
.extra-network-cards{
- height: 725px;
- overflow: scroll;
+ height: calc(100vh - 24rem);
+ overflow: clip scroll;
resize: vertical;
+ min-height: 52rem;
+}
+
+.extra-networks > div.tab-nav{
+ min-height: 3.4rem;
}
.extra-networks > div > [id *= '_extra_']{
@@ -784,10 +886,12 @@ footer {
margin: 0 0.15em;
}
.extra-networks .tab-nav .search,
-.extra-networks .tab-nav .sort{
- display: inline-block;
+.extra-networks .tab-nav .sort,
+.extra-networks .tab-nav .show-dirs
+{
margin: 0.3em;
align-self: center;
+ width: auto;
}
.extra-networks .tab-nav .search {
@@ -823,6 +927,7 @@ footer {
position: absolute;
color: white;
right: 0;
+ z-index: 1
}
.extra-network-cards .card:hover .button-row{
display: flex;
@@ -960,6 +1065,8 @@ div.block.gradio-box.edit-user-metadata {
.edit-user-metadata .file-metadata th, .edit-user-metadata .file-metadata td{
padding: 0.3em 1em;
+ overflow-wrap: anywhere;
+ word-break: break-word;
}
.edit-user-metadata .wrap.translucent{
@@ -972,3 +1079,81 @@ div.block.gradio-box.edit-user-metadata {
.edit-user-metadata-buttons{
margin-top: 1.5em;
}
+
+
+
+
+div.block.gradio-box.popup-dialog, .popup-dialog {
+ width: 56em;
+ background: var(--body-background-fill);
+ padding: 2em !important;
+}
+
+div.block.gradio-box.popup-dialog > div:last-child, .popup-dialog > div:last-child{
+ margin-top: 1em;
+}
+
+div.block.input-accordion{
+
+}
+
+.input-accordion-extra{
+ flex: 0 0 auto !important;
+ margin: 0 0.5em 0 auto;
+}
+
+div.accordions > div.input-accordion{
+ min-width: fit-content !important;
+}
+
+div.accordions > div.gradio-accordion .label-wrap span{
+ white-space: nowrap;
+ margin-right: 0.25em;
+}
+
+div.accordions{
+ gap: 0.5em;
+}
+
+div.accordions > div.input-accordion.input-accordion-open{
+ flex: 1 auto;
+ flex-flow: column;
+}
+
+
+/* sticky right hand columns */
+
+#img2img_results, #txt2img_results, #extras_results {
+ position: sticky;
+ top: 0.5em;
+}
+
+body.resizing {
+ cursor: col-resize !important;
+}
+
+body.resizing * {
+ pointer-events: none !important;
+}
+
+body.resizing .resize-handle {
+ pointer-events: initial !important;
+}
+
+.resize-handle {
+ position: relative;
+ cursor: col-resize;
+ grid-column: 2 / 3;
+ min-width: 16px !important;
+ max-width: 16px !important;
+ height: 100%;
+}
+
+.resize-handle::after {
+ content: '';
+ position: absolute;
+ top: 0;
+ bottom: 0;
+ left: 7.5px;
+ border-left: 1px dashed var(--border-color-primary);
+}
diff --git a/test/conftest.py b/test/conftest.py
index 0723f62a..e4fc5678 100644
--- a/test/conftest.py
+++ b/test/conftest.py
@@ -1,17 +1,36 @@
+import base64
import os
import pytest
-from PIL import Image
-from gradio.processing_utils import encode_pil_to_base64
test_files_path = os.path.dirname(__file__) + "/test_files"
+test_outputs_path = os.path.dirname(__file__) + "/test_outputs"
+
+
+def pytest_configure(config):
+ # We don't want to fail on Py.test command line arguments being
+ # parsed by webui:
+ os.environ.setdefault("IGNORE_CMD_ARGS_ERRORS", "1")
+
+
+def file_to_base64(filename):
+ with open(filename, "rb") as file:
+ data = file.read()
+
+ base64_str = str(base64.b64encode(data), "utf-8")
+ return "data:image/png;base64," + base64_str
@pytest.fixture(scope="session") # session so we don't read this over and over
def img2img_basic_image_base64() -> str:
- return encode_pil_to_base64(Image.open(os.path.join(test_files_path, "img2img_basic.png")))
+ return file_to_base64(os.path.join(test_files_path, "img2img_basic.png"))
@pytest.fixture(scope="session") # session so we don't read this over and over
def mask_basic_image_base64() -> str:
- return encode_pil_to_base64(Image.open(os.path.join(test_files_path, "mask_basic.png")))
+ return file_to_base64(os.path.join(test_files_path, "mask_basic.png"))
+
+
+@pytest.fixture(scope="session")
+def initialize() -> None:
+ import webui # noqa: F401
diff --git a/test/test_face_restorers.py b/test/test_face_restorers.py
new file mode 100644
index 00000000..7760d51b
--- /dev/null
+++ b/test/test_face_restorers.py
@@ -0,0 +1,29 @@
+import os
+from test.conftest import test_files_path, test_outputs_path
+
+import numpy as np
+import pytest
+from PIL import Image
+
+
+@pytest.mark.usefixtures("initialize")
+@pytest.mark.parametrize("restorer_name", ["gfpgan", "codeformer"])
+def test_face_restorers(restorer_name):
+ from modules import shared
+
+ if restorer_name == "gfpgan":
+ from modules import gfpgan_model
+ gfpgan_model.setup_model(shared.cmd_opts.gfpgan_models_path)
+ restorer = gfpgan_model.gfpgan_fix_faces
+ elif restorer_name == "codeformer":
+ from modules import codeformer_model
+ codeformer_model.setup_model(shared.cmd_opts.codeformer_models_path)
+ restorer = codeformer_model.codeformer.restore
+ else:
+ raise NotImplementedError("...")
+ img = Image.open(os.path.join(test_files_path, "two-faces.jpg"))
+ np_img = np.array(img, dtype=np.uint8)
+ fixed_image = restorer(np_img)
+ assert fixed_image.shape == np_img.shape
+ assert not np.allclose(fixed_image, np_img) # should have visibly changed
+ Image.fromarray(fixed_image).save(os.path.join(test_outputs_path, f"{restorer_name}.png"))
diff --git a/test/test_files/two-faces.jpg b/test/test_files/two-faces.jpg
new file mode 100644
index 00000000..c9d1b010
--- /dev/null
+++ b/test/test_files/two-faces.jpg
Binary files differ
diff --git a/test/test_outputs/.gitkeep b/test/test_outputs/.gitkeep
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/test_outputs/.gitkeep
diff --git a/test/test_torch_utils.py b/test/test_torch_utils.py
new file mode 100644
index 00000000..23ccb93a
--- /dev/null
+++ b/test/test_torch_utils.py
@@ -0,0 +1,19 @@
+import types
+
+import pytest
+import torch
+
+from modules import torch_utils
+
+
+@pytest.mark.parametrize("wrapped", [True, False])
+def test_get_param(wrapped):
+ mod = torch.nn.Linear(1, 1)
+ cpu = torch.device("cpu")
+ mod.to(dtype=torch.float16, device=cpu)
+ if wrapped:
+ # more or less how spandrel wraps a thing
+ mod = types.SimpleNamespace(model=mod)
+ p = torch_utils.get_param(mod)
+ assert p.dtype == torch.float16
+ assert p.device == cpu
diff --git a/webui-macos-env.sh b/webui-macos-env.sh
index 6354e73b..db7e8b1a 100644
--- a/webui-macos-env.sh
+++ b/webui-macos-env.sh
@@ -11,9 +11,7 @@ fi
export install_dir="$HOME"
export COMMANDLINE_ARGS="--skip-torch-cuda-test --upcast-sampling --no-half-vae --use-cpu interrogate"
-export TORCH_COMMAND="pip install torch==2.0.1 torchvision==0.15.2"
-export K_DIFFUSION_REPO="https://github.com/brkirch/k-diffusion.git"
-export K_DIFFUSION_COMMIT_HASH="51c9778f269cedb55a4d88c79c0246d35bdadb71"
+export TORCH_COMMAND="pip install torch==2.1.0 torchvision==0.16.0"
export PYTORCH_ENABLE_MPS_FALLBACK=1
####################################################################
diff --git a/webui.bat b/webui.bat
index 42e7d517..e2c9079d 100644
--- a/webui.bat
+++ b/webui.bat
@@ -1,6 +1,11 @@
@echo off
+if exist webui.settings.bat (
+ call webui.settings.bat
+)
+
if not defined PYTHON (set PYTHON=python)
+if defined GIT (set "GIT_PYTHON_GIT_EXECUTABLE=%GIT%")
if not defined VENV_DIR (set "VENV_DIR=%~dp0%venv")
set SD_WEBUI_RESTART=tmp/restart
diff --git a/webui.py b/webui.py
index 6bf06854..2c417168 100644
--- a/webui.py
+++ b/webui.py
@@ -1,413 +1,91 @@
from __future__ import annotations
import os
-import sys
import time
-import importlib
-import signal
-import re
-import warnings
-import json
-from threading import Thread
-from typing import Iterable
-
-from fastapi import FastAPI
-from fastapi.middleware.cors import CORSMiddleware
-from fastapi.middleware.gzip import GZipMiddleware
-from packaging import version
-
-import logging
-
-# We can't use cmd_opts for this because it will not have been initialized at this point.
-log_level = os.environ.get("SD_WEBUI_LOG_LEVEL")
-if log_level:
- log_level = getattr(logging, log_level.upper(), None) or logging.INFO
- logging.basicConfig(
- level=log_level,
- format='%(asctime)s %(levelname)s [%(name)s] %(message)s',
- datefmt='%Y-%m-%d %H:%M:%S',
- )
-
-logging.getLogger("torch.distributed.nn").setLevel(logging.ERROR) # sshh...
-logging.getLogger("xformers").addFilter(lambda record: 'A matching Triton is not available' not in record.getMessage())
from modules import timer
+from modules import initialize_util
+from modules import initialize
+
startup_timer = timer.startup_timer
startup_timer.record("launcher")
-import torch
-import pytorch_lightning # noqa: F401 # pytorch_lightning should be imported after torch, but it re-enables warnings on import so import once to disable them
-warnings.filterwarnings(action="ignore", category=DeprecationWarning, module="pytorch_lightning")
-warnings.filterwarnings(action="ignore", category=UserWarning, module="torchvision")
-startup_timer.record("import torch")
-
-import gradio # noqa: F401
-startup_timer.record("import gradio")
-
-from modules import paths, timer, import_hook, errors, devices # noqa: F401
-startup_timer.record("setup paths")
-
-import ldm.modules.encoders.modules # noqa: F401
-startup_timer.record("import ldm")
-
-from modules import extra_networks
-from modules.call_queue import wrap_gradio_gpu_call, wrap_queued_call, queue_lock # noqa: F401
-
-# Truncate version number of nightly/local build of PyTorch to not cause exceptions with CodeFormer or Safetensors
-if ".dev" in torch.__version__ or "+git" in torch.__version__:
- torch.__long_version__ = torch.__version__
- torch.__version__ = re.search(r'[\d.]+[\d]', torch.__version__).group(0)
-
-from modules import shared, sd_samplers, upscaler, extensions, localization, ui_tempdir, ui_extra_networks, config_states
-import modules.codeformer_model as codeformer
-import modules.face_restoration
-import modules.gfpgan_model as gfpgan
-import modules.img2img
-
-import modules.lowvram
-import modules.scripts
-import modules.sd_hijack
-import modules.sd_hijack_optimizations
-import modules.sd_models
-import modules.sd_vae
-import modules.sd_unet
-import modules.txt2img
-import modules.script_callbacks
-import modules.textual_inversion.textual_inversion
-import modules.progress
-
-import modules.ui
-from modules import modelloader
-from modules.shared import cmd_opts
-import modules.hypernetworks.hypernetwork
-
-startup_timer.record("other imports")
-
-
-if cmd_opts.server_name:
- server_name = cmd_opts.server_name
-else:
- server_name = "0.0.0.0" if cmd_opts.listen else None
-
-
-def fix_asyncio_event_loop_policy():
- """
- The default `asyncio` event loop policy only automatically creates
- event loops in the main threads. Other threads must create event
- loops explicitly or `asyncio.get_event_loop` (and therefore
- `.IOLoop.current`) will fail. Installing this policy allows event
- loops to be created automatically on any thread, matching the
- behavior of Tornado versions prior to 5.0 (or 5.0 on Python 2).
- """
-
- import asyncio
-
- if sys.platform == "win32" and hasattr(asyncio, "WindowsSelectorEventLoopPolicy"):
- # "Any thread" and "selector" should be orthogonal, but there's not a clean
- # interface for composing policies so pick the right base.
- _BasePolicy = asyncio.WindowsSelectorEventLoopPolicy # type: ignore
- else:
- _BasePolicy = asyncio.DefaultEventLoopPolicy
-
- class AnyThreadEventLoopPolicy(_BasePolicy): # type: ignore
- """Event loop policy that allows loop creation on any thread.
- Usage::
-
- asyncio.set_event_loop_policy(AnyThreadEventLoopPolicy())
- """
-
- def get_event_loop(self) -> asyncio.AbstractEventLoop:
- try:
- return super().get_event_loop()
- except (RuntimeError, AssertionError):
- # This was an AssertionError in python 3.4.2 (which ships with debian jessie)
- # and changed to a RuntimeError in 3.4.3.
- # "There is no current event loop in thread %r"
- loop = self.new_event_loop()
- self.set_event_loop(loop)
- return loop
-
- asyncio.set_event_loop_policy(AnyThreadEventLoopPolicy())
-
-
-def check_versions():
- if shared.cmd_opts.skip_version_check:
- return
-
- expected_torch_version = "2.0.0"
-
- if version.parse(torch.__version__) < version.parse(expected_torch_version):
- errors.print_error_explanation(f"""
-You are running torch {torch.__version__}.
-The program is tested to work with torch {expected_torch_version}.
-To reinstall the desired version, run with commandline flag --reinstall-torch.
-Beware that this will cause a lot of large files to be downloaded, as well as
-there are reports of issues with training tab on the latest version.
-
-Use --skip-version-check commandline argument to disable this check.
- """.strip())
-
- expected_xformers_version = "0.0.20"
- if shared.xformers_available:
- import xformers
-
- if version.parse(xformers.__version__) < version.parse(expected_xformers_version):
- errors.print_error_explanation(f"""
-You are running xformers {xformers.__version__}.
-The program is tested to work with xformers {expected_xformers_version}.
-To reinstall the desired version, run with commandline flag --reinstall-xformers.
-
-Use --skip-version-check commandline argument to disable this check.
- """.strip())
-
-
-def restore_config_state_file():
- config_state_file = shared.opts.restore_config_state_file
- if config_state_file == "":
- return
-
- shared.opts.restore_config_state_file = ""
- shared.opts.save(shared.config_filename)
-
- if os.path.isfile(config_state_file):
- print(f"*** About to restore extension state from file: {config_state_file}")
- with open(config_state_file, "r", encoding="utf-8") as f:
- config_state = json.load(f)
- config_states.restore_extension_config(config_state)
- startup_timer.record("restore extension config")
- elif config_state_file:
- print(f"!!! Config state backup not found: {config_state_file}")
-
-
-def validate_tls_options():
- if not (cmd_opts.tls_keyfile and cmd_opts.tls_certfile):
- return
-
- try:
- if not os.path.exists(cmd_opts.tls_keyfile):
- print("Invalid path to TLS keyfile given")
- if not os.path.exists(cmd_opts.tls_certfile):
- print(f"Invalid path to TLS certfile: '{cmd_opts.tls_certfile}'")
- except TypeError:
- cmd_opts.tls_keyfile = cmd_opts.tls_certfile = None
- print("TLS setup invalid, running webui without TLS")
- else:
- print("Running with TLS")
- startup_timer.record("TLS")
-
-
-def get_gradio_auth_creds() -> Iterable[tuple[str, ...]]:
- """
- Convert the gradio_auth and gradio_auth_path commandline arguments into
- an iterable of (username, password) tuples.
- """
- def process_credential_line(s) -> tuple[str, ...] | None:
- s = s.strip()
- if not s:
- return None
- return tuple(s.split(':', 1))
-
- if cmd_opts.gradio_auth:
- for cred in cmd_opts.gradio_auth.split(','):
- cred = process_credential_line(cred)
- if cred:
- yield cred
-
- if cmd_opts.gradio_auth_path:
- with open(cmd_opts.gradio_auth_path, 'r', encoding="utf8") as file:
- for line in file.readlines():
- for cred in line.strip().split(','):
- cred = process_credential_line(cred)
- if cred:
- yield cred
-
-
-def configure_sigint_handler():
- # make the program just exit at ctrl+c without waiting for anything
- def sigint_handler(sig, frame):
- print(f'Interrupted with signal {sig} in {frame}')
- os._exit(0)
-
- if not os.environ.get("COVERAGE_RUN"):
- # Don't install the immediate-quit handler when running under coverage,
- # as then the coverage report won't be generated.
- signal.signal(signal.SIGINT, sigint_handler)
-
-
-def configure_opts_onchange():
- shared.opts.onchange("sd_model_checkpoint", wrap_queued_call(lambda: modules.sd_models.reload_model_weights()), call=False)
- shared.opts.onchange("sd_vae", wrap_queued_call(lambda: modules.sd_vae.reload_vae_weights()), call=False)
- shared.opts.onchange("sd_vae_as_default", wrap_queued_call(lambda: modules.sd_vae.reload_vae_weights()), call=False)
- shared.opts.onchange("temp_dir", ui_tempdir.on_tmpdir_changed)
- shared.opts.onchange("gradio_theme", shared.reload_gradio_theme)
- shared.opts.onchange("cross_attention_optimization", wrap_queued_call(lambda: modules.sd_hijack.model_hijack.redo_hijack(shared.sd_model)), call=False)
- startup_timer.record("opts onchange")
-
-
-def initialize():
- fix_asyncio_event_loop_policy()
- validate_tls_options()
- configure_sigint_handler()
- check_versions()
- modelloader.cleanup_models()
- configure_opts_onchange()
-
- modules.sd_models.setup_model()
- startup_timer.record("setup SD model")
-
- codeformer.setup_model(cmd_opts.codeformer_models_path)
- startup_timer.record("setup codeformer")
-
- gfpgan.setup_model(cmd_opts.gfpgan_models_path)
- startup_timer.record("setup gfpgan")
-
- initialize_rest(reload_script_modules=False)
-
-
-def initialize_rest(*, reload_script_modules=False):
- """
- Called both from initialize() and when reloading the webui.
- """
- sd_samplers.set_samplers()
- extensions.list_extensions()
- startup_timer.record("list extensions")
-
- restore_config_state_file()
-
- if cmd_opts.ui_debug_mode:
- shared.sd_upscalers = upscaler.UpscalerLanczos().scalers
- modules.scripts.load_scripts()
- return
-
- modules.sd_models.list_models()
- startup_timer.record("list SD models")
-
- localization.list_localizations(cmd_opts.localizations_dir)
-
- with startup_timer.subcategory("load scripts"):
- modules.scripts.load_scripts()
-
- if reload_script_modules:
- for module in [module for name, module in sys.modules.items() if name.startswith("modules.ui")]:
- importlib.reload(module)
- startup_timer.record("reload script modules")
+initialize.imports()
- modelloader.load_upscalers()
- startup_timer.record("load upscalers")
-
- modules.sd_vae.refresh_vae_list()
- startup_timer.record("refresh VAE")
- modules.textual_inversion.textual_inversion.list_textual_inversion_templates()
- startup_timer.record("refresh textual inversion templates")
-
- modules.script_callbacks.on_list_optimizers(modules.sd_hijack_optimizations.list_optimizers)
- modules.sd_hijack.list_optimizers()
- startup_timer.record("scripts list_optimizers")
-
- modules.sd_unet.list_unets()
- startup_timer.record("scripts list_unets")
-
- def load_model():
- """
- Accesses shared.sd_model property to load model.
- After it's available, if it has been loaded before this access by some extension,
- its optimization may be None because the list of optimizaers has neet been filled
- by that time, so we apply optimization again.
- """
-
- shared.sd_model # noqa: B018
-
- if modules.sd_hijack.current_optimizer is None:
- modules.sd_hijack.apply_optimizations()
-
- Thread(target=load_model).start()
-
- Thread(target=devices.first_time_calculation).start()
-
- shared.reload_hypernetworks()
- startup_timer.record("reload hypernetworks")
-
- ui_extra_networks.initialize()
- ui_extra_networks.register_default_pages()
-
- extra_networks.initialize()
- extra_networks.register_default_extra_networks()
- startup_timer.record("initialize extra networks")
-
-
-def setup_middleware(app):
- app.middleware_stack = None # reset current middleware to allow modifying user provided list
- app.add_middleware(GZipMiddleware, minimum_size=1000)
- configure_cors_middleware(app)
- app.build_middleware_stack() # rebuild middleware stack on-the-fly
-
-
-def configure_cors_middleware(app):
- cors_options = {
- "allow_methods": ["*"],
- "allow_headers": ["*"],
- "allow_credentials": True,
- }
- if cmd_opts.cors_allow_origins:
- cors_options["allow_origins"] = cmd_opts.cors_allow_origins.split(',')
- if cmd_opts.cors_allow_origins_regex:
- cors_options["allow_origin_regex"] = cmd_opts.cors_allow_origins_regex
- app.add_middleware(CORSMiddleware, **cors_options)
+initialize.check_versions()
def create_api(app):
from modules.api.api import Api
+ from modules.call_queue import queue_lock
+
api = Api(app, queue_lock)
return api
def api_only():
- initialize()
+ from fastapi import FastAPI
+ from modules.shared_cmd_options import cmd_opts
+
+ initialize.initialize()
app = FastAPI()
- setup_middleware(app)
+ initialize_util.setup_middleware(app)
api = create_api(app)
- modules.script_callbacks.app_started_callback(None, app)
+ from modules import script_callbacks
+ script_callbacks.before_ui_callback()
+ script_callbacks.app_started_callback(None, app)
print(f"Startup time: {startup_timer.summary()}.")
api.launch(
- server_name="0.0.0.0" if cmd_opts.listen else "127.0.0.1",
+ server_name=initialize_util.gradio_server_name(),
port=cmd_opts.port if cmd_opts.port else 7861,
root_path=f"/{cmd_opts.subpath}" if cmd_opts.subpath else ""
)
def webui():
+ from modules.shared_cmd_options import cmd_opts
+
launch_api = cmd_opts.api
- initialize()
+ initialize.initialize()
+
+ from modules import shared, ui_tempdir, script_callbacks, ui, progress, ui_extra_networks
while 1:
if shared.opts.clean_temp_dir_at_start:
ui_tempdir.cleanup_tmpdr()
startup_timer.record("cleanup temp dir")
- modules.script_callbacks.before_ui_callback()
+ script_callbacks.before_ui_callback()
startup_timer.record("scripts before_ui_callback")
- shared.demo = modules.ui.create_ui()
+ shared.demo = ui.create_ui()
startup_timer.record("create ui")
if not cmd_opts.no_gradio_queue:
shared.demo.queue(64)
- gradio_auth_creds = list(get_gradio_auth_creds()) or None
+ gradio_auth_creds = list(initialize_util.get_gradio_auth_creds()) or None
+
+ auto_launch_browser = False
+ if os.getenv('SD_WEBUI_RESTARTING') != '1':
+ if shared.opts.auto_launch_browser == "Remote" or cmd_opts.autolaunch:
+ auto_launch_browser = True
+ elif shared.opts.auto_launch_browser == "Local":
+ auto_launch_browser = not cmd_opts.webui_is_non_local
app, local_url, share_url = shared.demo.launch(
share=cmd_opts.share,
- server_name=server_name,
+ server_name=initialize_util.gradio_server_name(),
server_port=cmd_opts.port,
ssl_keyfile=cmd_opts.tls_keyfile,
ssl_certfile=cmd_opts.tls_certfile,
ssl_verify=cmd_opts.disable_tls_verify,
debug=cmd_opts.gradio_debug,
auth=gradio_auth_creds,
- inbrowser=cmd_opts.autolaunch and os.getenv('SD_WEBUI_RESTARTING') != '1',
+ inbrowser=auto_launch_browser,
prevent_thread_lock=True,
allowed_paths=cmd_opts.gradio_allowed_path,
app_kwargs={
@@ -417,9 +95,6 @@ def webui():
root_path=f"/{cmd_opts.subpath}" if cmd_opts.subpath else "",
)
- # after initial launch, disable --autolaunch for subsequent restarts
- cmd_opts.autolaunch = False
-
startup_timer.record("gradio launch")
# gradio uses a very open CORS policy via app.user_middleware, which makes it possible for
@@ -428,10 +103,10 @@ def webui():
# running its code. We disable this here. Suggested by RyotaK.
app.user_middleware = [x for x in app.user_middleware if x.cls.__name__ != 'CORSMiddleware']
- setup_middleware(app)
+ initialize_util.setup_middleware(app)
- modules.progress.setup_progress_api(app)
- modules.ui.setup_ui_api(app)
+ progress.setup_progress_api(app)
+ ui.setup_ui_api(app)
if launch_api:
create_api(app)
@@ -441,7 +116,7 @@ def webui():
startup_timer.record("add APIs")
with startup_timer.subcategory("app_started_callback"):
- modules.script_callbacks.app_started_callback(shared.demo, app)
+ script_callbacks.app_started_callback(shared.demo, app)
timer.startup_record = startup_timer.dump()
print(f"Startup time: {startup_timer.summary()}.")
@@ -464,18 +139,23 @@ def webui():
shared.demo.close()
break
+ # disable auto launch webui in browser for subsequent UI Reload
+ os.environ.setdefault('SD_WEBUI_RESTARTING', '1')
+
print('Restarting UI...')
shared.demo.close()
time.sleep(0.5)
startup_timer.reset()
- modules.script_callbacks.app_reload_callback()
+ script_callbacks.app_reload_callback()
startup_timer.record("app reload callback")
- modules.script_callbacks.script_unloaded_callback()
+ script_callbacks.script_unloaded_callback()
startup_timer.record("scripts unloaded callback")
- initialize_rest(reload_script_modules=True)
+ initialize.initialize_rest(reload_script_modules=True)
if __name__ == "__main__":
+ from modules.shared_cmd_options import cmd_opts
+
if cmd_opts.nowebui:
api_only()
else:
diff --git a/webui.sh b/webui.sh
index cb8b9d14..38258ef6 100755
--- a/webui.sh
+++ b/webui.sh
@@ -4,12 +4,6 @@
# change the variables in webui-user.sh instead #
#################################################
-
-use_venv=1
-if [[ $venv_dir == "-" ]]; then
- use_venv=0
-fi
-
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
@@ -28,6 +22,12 @@ then
source "$SCRIPT_DIR"/webui-user.sh
fi
+# If $venv_dir is "-", then disable venv support
+use_venv=1
+if [[ $venv_dir == "-" ]]; then
+ use_venv=0
+fi
+
# Set defaults
# Install directory without trailing slash
if [[ -z "${install_dir}" ]]
@@ -51,6 +51,8 @@ fi
if [[ -z "${GIT}" ]]
then
export GIT="git"
+else
+ export GIT_PYTHON_GIT_EXECUTABLE="${GIT}"
fi
# python3 venv without trailing slash (defaults to ${install_dir}/${clone_dir}/venv)
@@ -87,7 +89,7 @@ delimiter="################################################################"
printf "\n%s\n" "${delimiter}"
printf "\e[1m\e[32mInstall script for stable-diffusion + Web UI\n"
-printf "\e[1m\e[34mTested on Debian 11 (Bullseye)\e[0m"
+printf "\e[1m\e[34mTested on Debian 11 (Bullseye), Fedora 34+ and openSUSE Leap 15.4 or newer.\e[0m"
printf "\n%s\n" "${delimiter}"
# Do not run as root
@@ -131,7 +133,7 @@ case "$gpu_info" in
if [[ $(bc <<< "$pyv <= 3.10") -eq 1 ]]
then
# Navi users will still use torch 1.13 because 2.0 does not seem to work.
- export TORCH_COMMAND="pip install torch==1.13.1+rocm5.2 torchvision==0.14.1+rocm5.2 --index-url https://download.pytorch.org/whl/rocm5.2"
+ export TORCH_COMMAND="pip install --pre torch torchvision --index-url https://download.pytorch.org/whl/nightly/rocm5.6"
else
printf "\e[1m\e[31mERROR: RX 5000 series GPUs must be using at max python 3.10, aborting...\e[0m"
exit 1
@@ -141,8 +143,7 @@ case "$gpu_info" in
*"Navi 2"*) export HSA_OVERRIDE_GFX_VERSION=10.3.0
;;
*"Navi 3"*) [[ -z "${TORCH_COMMAND}" ]] && \
- export TORCH_COMMAND="pip install --pre torch==2.1.0.dev-20230614+rocm5.5 torchvision==0.16.0.dev-20230614+rocm5.5 --index-url https://download.pytorch.org/whl/nightly/rocm5.5"
- # Navi 3 needs at least 5.5 which is only on the nightly chain
+ export TORCH_COMMAND="pip install --pre torch torchvision --index-url https://download.pytorch.org/whl/nightly/rocm5.7"
;;
*"Renoir"*) export HSA_OVERRIDE_GFX_VERSION=9.0.0
printf "\n%s\n" "${delimiter}"
@@ -221,13 +222,30 @@ fi
# Try using TCMalloc on Linux
prepare_tcmalloc() {
if [[ "${OSTYPE}" == "linux"* ]] && [[ -z "${NO_TCMALLOC}" ]] && [[ -z "${LD_PRELOAD}" ]]; then
- TCMALLOC="$(PATH=/usr/sbin:$PATH ldconfig -p | grep -Po "libtcmalloc(_minimal|)\.so\.\d" | head -n 1)"
- if [[ ! -z "${TCMALLOC}" ]]; then
- echo "Using TCMalloc: ${TCMALLOC}"
- export LD_PRELOAD="${TCMALLOC}"
- else
- printf "\e[1m\e[31mCannot locate TCMalloc (improves CPU memory usage)\e[0m\n"
- fi
+ # Define Tcmalloc Libs arrays
+ TCMALLOC_LIBS=("libtcmalloc(_minimal|)\.so\.\d" "libtcmalloc\.so\.\d")
+
+ # Traversal array
+ for lib in "${TCMALLOC_LIBS[@]}"
+ do
+ #Determine which type of tcmalloc library the library supports
+ TCMALLOC="$(PATH=/usr/sbin:$PATH ldconfig -p | grep -P $lib | head -n 1)"
+ TC_INFO=(${TCMALLOC//=>/})
+ if [[ ! -z "${TC_INFO}" ]]; then
+ echo "Using TCMalloc: ${TC_INFO}"
+ #Determine if the library is linked to libptthread and resolve undefined symbol: ptthread_Key_Create
+ if ldd ${TC_INFO[2]} | grep -q 'libpthread'; then
+ echo "$TC_INFO is linked with libpthread,execute LD_PRELOAD=${TC_INFO}"
+ export LD_PRELOAD="${TC_INFO}"
+ break
+ else
+ echo "$TC_INFO is not linked with libpthreadand will trigger undefined symbol: ptthread_Key_Create error"
+ fi
+ else
+ printf "\e[1m\e[31mCannot locate TCMalloc (improves CPU memory usage)\e[0m\n"
+ fi
+ done
+
fi
}
@@ -245,7 +263,7 @@ while [[ "$KEEP_GOING" -eq "1" ]]; do
printf "Launching launch.py..."
printf "\n%s\n" "${delimiter}"
prepare_tcmalloc
- "${python_cmd}" "${LAUNCH_SCRIPT}" "$@"
+ "${python_cmd}" -u "${LAUNCH_SCRIPT}" "$@"
fi
if [[ ! -f tmp/restart ]]; then