aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.github/ISSUE_TEMPLATE/bug_report.yml29
-rw-r--r--.github/ISSUE_TEMPLATE/feature_request.yml2
-rw-r--r--.github/pull_request_template.md (renamed from .github/PULL_REQUEST_TEMPLATE/pull_request_template.md)4
-rw-r--r--.github/workflows/on_pull_request.yaml13
-rw-r--r--.github/workflows/run_tests.yaml10
-rw-r--r--.gitignore1
-rw-r--r--LICENSE.txt663
-rw-r--r--README.md31
-rw-r--r--artists.csv3041
-rw-r--r--configs/alt-diffusion-inference.yaml72
-rw-r--r--configs/instruct-pix2pix.yaml99
-rw-r--r--configs/v1-inference.yaml (renamed from v1-inference.yaml)0
-rw-r--r--configs/v1-inpainting-inference.yaml70
-rw-r--r--extensions-builtin/LDSR/ldsr_model_arch.py3
-rw-r--r--extensions-builtin/Lora/extra_networks_lora.py20
-rw-r--r--extensions-builtin/Lora/lora.py207
-rw-r--r--extensions-builtin/Lora/preload.py6
-rw-r--r--extensions-builtin/Lora/scripts/lora_script.py35
-rw-r--r--extensions-builtin/Lora/ui_extra_networks_lora.py36
-rw-r--r--extensions-builtin/SwinIR/scripts/swinir_model.py8
-rw-r--r--extensions-builtin/prompt-bracket-checker/javascript/prompt-bracket-checker.js45
-rw-r--r--html/card-no-preview.pngbin0 -> 84440 bytes
-rw-r--r--html/extra-networks-card.html11
-rw-r--r--html/extra-networks-no-cards.html8
-rw-r--r--html/footer.html13
-rw-r--r--html/image-update.svg7
-rw-r--r--html/licenses.html419
-rw-r--r--javascript/aspectRatioOverlay.js9
-rw-r--r--javascript/dragdrop.js10
-rw-r--r--javascript/edit-attention.js127
-rw-r--r--javascript/extensions.js2
-rw-r--r--javascript/extraNetworks.js69
-rw-r--r--javascript/hints.js30
-rw-r--r--javascript/hires_fix.js22
-rw-r--r--javascript/imageviewer.js13
-rw-r--r--javascript/localization.js6
-rw-r--r--javascript/progressbar.js274
-rw-r--r--javascript/textualInversion.js13
-rw-r--r--javascript/ui.js157
-rw-r--r--launch.py99
-rw-r--r--modules/api/api.py242
-rw-r--r--modules/api/models.py30
-rw-r--r--modules/artists.py25
-rw-r--r--modules/call_queue.py19
-rw-r--r--modules/codeformer_model.py2
-rw-r--r--modules/deepbooru_model.py4
-rw-r--r--modules/devices.py82
-rw-r--r--modules/errors.py37
-rw-r--r--modules/extensions.py4
-rw-r--r--modules/extra_networks.py147
-rw-r--r--modules/extra_networks_hypernet.py21
-rw-r--r--modules/extras.py408
-rw-r--r--modules/generation_parameters_copypaste.py127
-rw-r--r--modules/gfpgan_model.py5
-rw-r--r--modules/hashes.py87
-rw-r--r--modules/hypernetworks/hypernetwork.py301
-rw-r--r--modules/hypernetworks/ui.py9
-rw-r--r--modules/images.py43
-rw-r--r--modules/img2img.py80
-rw-r--r--modules/interrogate.py82
-rw-r--r--modules/memmon.py3
-rw-r--r--modules/modelloader.py24
-rw-r--r--modules/models/diffusion/ddpm_edit.py1459
-rw-r--r--modules/paths.py24
-rw-r--r--modules/postprocessing.py103
-rw-r--r--modules/processing.py329
-rw-r--r--modules/progress.py99
-rw-r--r--modules/prompt_parser.py11
-rw-r--r--modules/realesrgan_model.py12
-rw-r--r--modules/script_callbacks.py77
-rw-r--r--modules/script_loading.py10
-rw-r--r--modules/scripts.py73
-rw-r--r--modules/scripts_auto_postprocessing.py42
-rw-r--r--modules/scripts_postprocessing.py152
-rw-r--r--modules/sd_disable_initialization.py90
-rw-r--r--modules/sd_hijack.py66
-rw-r--r--modules/sd_hijack_checkpoint.py38
-rw-r--r--modules/sd_hijack_clip.py382
-rw-r--r--modules/sd_hijack_clip_old.py81
-rw-r--r--modules/sd_hijack_inpainting.py238
-rw-r--r--modules/sd_hijack_ip2p.py13
-rw-r--r--modules/sd_hijack_optimizations.py292
-rw-r--r--modules/sd_hijack_unet.py32
-rw-r--r--modules/sd_hijack_utils.py28
-rw-r--r--modules/sd_hijack_xlmr.py34
-rw-r--r--modules/sd_models.py357
-rw-r--r--modules/sd_models_config.py71
-rw-r--r--modules/sd_samplers.py39
-rw-r--r--modules/sd_vae.py228
-rw-r--r--modules/sd_vae_approx.py2
-rw-r--r--modules/shared.py196
-rw-r--r--modules/shared_items.py23
-rw-r--r--modules/styles.py12
-rw-r--r--modules/sub_quadratic_attention.py214
-rw-r--r--modules/textual_inversion/dataset.py60
-rw-r--r--modules/textual_inversion/image_embedding.py4
-rw-r--r--modules/textual_inversion/learn_schedule.py11
-rw-r--r--modules/textual_inversion/logging.py24
-rw-r--r--modules/textual_inversion/preprocess.py51
-rw-r--r--modules/textual_inversion/textual_inversion.py353
-rw-r--r--modules/timer.py35
-rw-r--r--modules/txt2img.py13
-rw-r--r--modules/ui.py1397
-rw-r--r--modules/ui_common.py202
-rw-r--r--modules/ui_components.py58
-rw-r--r--modules/ui_extensions.py46
-rw-r--r--modules/ui_extra_networks.py179
-rw-r--r--modules/ui_extra_networks_hypernets.py35
-rw-r--r--modules/ui_extra_networks_textual_inversion.py33
-rw-r--r--modules/ui_postprocessing.py57
-rw-r--r--modules/ui_tempdir.py24
-rw-r--r--modules/upscaler.py12
-rw-r--r--modules/xlmr.py137
-rw-r--r--requirements.txt9
-rw-r--r--requirements_versions.txt9
-rw-r--r--screenshot.pngbin525075 -> 420577 bytes
-rw-r--r--script.js22
-rw-r--r--scripts/custom_code.py3
-rw-r--r--scripts/img2imgalt.py22
-rw-r--r--scripts/loopback.py7
-rw-r--r--scripts/outpainting_mk_2.py10
-rw-r--r--scripts/poor_mans_outpainting.py11
-rw-r--r--scripts/postprocessing_codeformer.py36
-rw-r--r--scripts/postprocessing_gfpgan.py33
-rw-r--r--scripts/postprocessing_upscale.py131
-rw-r--r--scripts/prompt_matrix.py6
-rw-r--r--scripts/prompts_from_file.py16
-rw-r--r--scripts/sd_upscale.py10
-rw-r--r--scripts/xy_grid.py430
-rw-r--r--scripts/xyz_grid.py588
-rw-r--r--style.css657
-rw-r--r--test/advanced_features/__init__.py0
-rw-r--r--test/advanced_features/extras_test.py29
-rw-r--r--test/advanced_features/txt2img_test.py47
-rw-r--r--test/basic_features/extras_test.py54
-rw-r--r--test/basic_features/img2img_test.py13
-rw-r--r--test/basic_features/txt2img_test.py12
-rw-r--r--test/basic_features/utils_test.py25
-rw-r--r--test/server_poll.py2
-rw-r--r--txt2img_Screenshot.pngbin337094 -> 0 bytes
-rw-r--r--webui-macos-env.sh2
-rw-r--r--webui.bat35
-rw-r--r--webui.py113
-rwxr-xr-xwebui.sh23
144 files changed, 10910 insertions, 6469 deletions
diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml
index ed372f22..7d435297 100644
--- a/.github/ISSUE_TEMPLATE/bug_report.yml
+++ b/.github/ISSUE_TEMPLATE/bug_report.yml
@@ -37,20 +37,20 @@ body:
id: what-should
attributes:
label: What should have happened?
- description: tell what you think the normal behavior should be
+ description: Tell what you think the normal behavior should be
validations:
required: true
- type: input
id: commit
attributes:
label: Commit where the problem happens
- description: Which commit are you running ? (Do not write *Latest version/repo/commit*, as this means nothing and will have changed by the time we read your issue. Rather, copy the **Commit hash** shown in the cmd/terminal when you launch the UI)
+ description: Which commit are you running ? (Do not write *Latest version/repo/commit*, as this means nothing and will have changed by the time we read your issue. Rather, copy the **Commit** link at the bottom of the UI, or from the cmd/terminal if you can't launch it.)
validations:
required: true
- type: dropdown
id: platforms
attributes:
- label: What platforms do you use to access UI ?
+ label: What platforms do you use to access the UI ?
multiple: true
options:
- Windows
@@ -74,10 +74,27 @@ body:
id: cmdargs
attributes:
label: Command Line Arguments
- description: Are you using any launching parameters/command line arguments (modified webui-user.py) ? If yes, please write them below
+ description: Are you using any launching parameters/command line arguments (modified webui-user .bat/.sh) ? If yes, please write them below. Write "No" otherwise.
render: Shell
+ validations:
+ required: true
+ - type: textarea
+ id: extensions
+ attributes:
+ label: List of extensions
+ description: Are you using any extensions other than built-ins? If yes, provide a list, you can copy it at "Extensions" tab. Write "No" otherwise.
+ validations:
+ required: true
+ - type: textarea
+ id: logs
+ attributes:
+ label: Console logs
+ description: Please provide **full** cmd/terminal logs from the moment you started UI to the end of it, after your bug happened. If it's very long, provide a link to pastebin or similar service.
+ render: Shell
+ validations:
+ required: true
- type: textarea
id: misc
attributes:
- label: Additional information, context and logs
- description: Please provide us with any relevant additional info, context or log output.
+ label: Additional information
+ description: Please provide us with any relevant additional info or context.
diff --git a/.github/ISSUE_TEMPLATE/feature_request.yml b/.github/ISSUE_TEMPLATE/feature_request.yml
index 8ca6e21f..35a88740 100644
--- a/.github/ISSUE_TEMPLATE/feature_request.yml
+++ b/.github/ISSUE_TEMPLATE/feature_request.yml
@@ -1,7 +1,7 @@
name: Feature request
description: Suggest an idea for this project
title: "[Feature Request]: "
-labels: ["suggestion"]
+labels: ["enhancement"]
body:
- type: checkboxes
diff --git a/.github/PULL_REQUEST_TEMPLATE/pull_request_template.md b/.github/pull_request_template.md
index 86009613..69056331 100644
--- a/.github/PULL_REQUEST_TEMPLATE/pull_request_template.md
+++ b/.github/pull_request_template.md
@@ -18,8 +18,8 @@ More technical discussion about your changes go here, plus anything that a maint
List the environment you have developed / tested this on. As per the contributing page, changes should be able to work on Windows out of the box.
- OS: [e.g. Windows, Linux]
- - Browser [e.g. chrome, safari]
- - Graphics card [e.g. NVIDIA RTX 2080 8GB, AMD RX 6600 8GB]
+ - Browser: [e.g. chrome, safari]
+ - Graphics card: [e.g. NVIDIA RTX 2080 8GB, AMD RX 6600 8GB]
**Screenshots or videos of your changes**
diff --git a/.github/workflows/on_pull_request.yaml b/.github/workflows/on_pull_request.yaml
index b097d180..a168be5b 100644
--- a/.github/workflows/on_pull_request.yaml
+++ b/.github/workflows/on_pull_request.yaml
@@ -19,22 +19,19 @@ jobs:
- name: Checkout Code
uses: actions/checkout@v3
- name: Set up Python 3.10
- uses: actions/setup-python@v3
+ uses: actions/setup-python@v4
with:
python-version: 3.10.6
- - uses: actions/cache@v2
- with:
- path: ~/.cache/pip
- key: ${{ runner.os }}-pip-${{ hashFiles('**/requirements.txt') }}
- restore-keys: |
- ${{ runner.os }}-pip-
+ cache: pip
+ cache-dependency-path: |
+ **/requirements*txt
- name: Install PyLint
run: |
python -m pip install --upgrade pip
pip install pylint
# This lets PyLint check to see if it can resolve imports
- name: Install dependencies
- run : |
+ run: |
export COMMANDLINE_ARGS="--skip-torch-cuda-test --exit"
python launch.py
- name: Analysing the code with pylint
diff --git a/.github/workflows/run_tests.yaml b/.github/workflows/run_tests.yaml
index 49dc92bd..be7ffa23 100644
--- a/.github/workflows/run_tests.yaml
+++ b/.github/workflows/run_tests.yaml
@@ -14,13 +14,11 @@ jobs:
uses: actions/setup-python@v4
with:
python-version: 3.10.6
- - uses: actions/cache@v3
- with:
- path: ~/.cache/pip
- key: ${{ runner.os }}-pip-${{ hashFiles('**/requirements.txt') }}
- restore-keys: ${{ runner.os }}-pip-
+ cache: pip
+ cache-dependency-path: |
+ **/requirements*txt
- name: Run tests
- run: python launch.py --tests basic_features --no-half --disable-opt-split-attention --use-cpu all --skip-torch-cuda-test
+ run: python launch.py --tests --no-half --disable-opt-split-attention --use-cpu all --skip-torch-cuda-test
- name: Upload main app stdout-stderr
uses: actions/upload-artifact@v3
if: always()
diff --git a/.gitignore b/.gitignore
index 21fa26a7..0b1d17ca 100644
--- a/.gitignore
+++ b/.gitignore
@@ -32,3 +32,4 @@ notification.mp3
/extensions
/test/stdout.txt
/test/stderr.txt
+/cache.json
diff --git a/LICENSE.txt b/LICENSE.txt
new file mode 100644
index 00000000..14577543
--- /dev/null
+++ b/LICENSE.txt
@@ -0,0 +1,663 @@
+ GNU AFFERO GENERAL PUBLIC LICENSE
+ Version 3, 19 November 2007
+
+ Copyright (c) 2023 AUTOMATIC1111
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The GNU Affero General Public License is a free, copyleft license for
+software and other kinds of works, specifically designed to ensure
+cooperation with the community in the case of network server software.
+
+ The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works. By contrast,
+our General Public Licenses are intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+ Developers that use our General Public Licenses protect your rights
+with two steps: (1) assert copyright on the software, and (2) offer
+you this License which gives you legal permission to copy, distribute
+and/or modify the software.
+
+ A secondary benefit of defending all users' freedom is that
+improvements made in alternate versions of the program, if they
+receive widespread use, become available for other developers to
+incorporate. Many developers of free software are heartened and
+encouraged by the resulting cooperation. However, in the case of
+software used on network servers, this result may fail to come about.
+The GNU General Public License permits making a modified version and
+letting the public access it on a server without ever releasing its
+source code to the public.
+
+ The GNU Affero General Public License is designed specifically to
+ensure that, in such cases, the modified source code becomes available
+to the community. It requires the operator of a network server to
+provide the source code of the modified version running there to the
+users of that server. Therefore, public use of a modified version, on
+a publicly accessible server, gives the public access to the source
+code of the modified version.
+
+ An older license, called the Affero General Public License and
+published by Affero, was designed to accomplish similar goals. This is
+a different license, not a version of the Affero GPL, but Affero has
+released a new version of the Affero GPL which permits relicensing under
+this license.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ TERMS AND CONDITIONS
+
+ 0. Definitions.
+
+ "This License" refers to version 3 of the GNU Affero General Public License.
+
+ "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+ "The Program" refers to any copyrightable work licensed under this
+License. Each licensee is addressed as "you". "Licensees" and
+"recipients" may be individuals or organizations.
+
+ To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy. The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+ A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+ To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy. Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+ To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies. Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+ An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License. If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+ 1. Source Code.
+
+ The "source code" for a work means the preferred form of the work
+for making modifications to it. "Object code" means any non-source
+form of a work.
+
+ A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+ The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form. A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+ The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities. However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work. For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+ The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+ The Corresponding Source for a work in source code form is that
+same work.
+
+ 2. Basic Permissions.
+
+ All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met. This License explicitly affirms your unlimited
+permission to run the unmodified Program. The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work. This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+ You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force. You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright. Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+ Conveying under any other circumstances is permitted solely under
+the conditions stated below. Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+ No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+ When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+ 4. Conveying Verbatim Copies.
+
+ You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+ You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+ 5. Conveying Modified Source Versions.
+
+ You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+ a) The work must carry prominent notices stating that you modified
+ it, and giving a relevant date.
+
+ b) The work must carry prominent notices stating that it is
+ released under this License and any conditions added under section
+ 7. This requirement modifies the requirement in section 4 to
+ "keep intact all notices".
+
+ c) You must license the entire work, as a whole, under this
+ License to anyone who comes into possession of a copy. This
+ License will therefore apply, along with any applicable section 7
+ additional terms, to the whole of the work, and all its parts,
+ regardless of how they are packaged. This License gives no
+ permission to license the work in any other way, but it does not
+ invalidate such permission if you have separately received it.
+
+ d) If the work has interactive user interfaces, each must display
+ Appropriate Legal Notices; however, if the Program has interactive
+ interfaces that do not display Appropriate Legal Notices, your
+ work need not make them do so.
+
+ A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit. Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+ 6. Conveying Non-Source Forms.
+
+ You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+ a) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by the
+ Corresponding Source fixed on a durable physical medium
+ customarily used for software interchange.
+
+ b) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by a
+ written offer, valid for at least three years and valid for as
+ long as you offer spare parts or customer support for that product
+ model, to give anyone who possesses the object code either (1) a
+ copy of the Corresponding Source for all the software in the
+ product that is covered by this License, on a durable physical
+ medium customarily used for software interchange, for a price no
+ more than your reasonable cost of physically performing this
+ conveying of source, or (2) access to copy the
+ Corresponding Source from a network server at no charge.
+
+ c) Convey individual copies of the object code with a copy of the
+ written offer to provide the Corresponding Source. This
+ alternative is allowed only occasionally and noncommercially, and
+ only if you received the object code with such an offer, in accord
+ with subsection 6b.
+
+ d) Convey the object code by offering access from a designated
+ place (gratis or for a charge), and offer equivalent access to the
+ Corresponding Source in the same way through the same place at no
+ further charge. You need not require recipients to copy the
+ Corresponding Source along with the object code. If the place to
+ copy the object code is a network server, the Corresponding Source
+ may be on a different server (operated by you or a third party)
+ that supports equivalent copying facilities, provided you maintain
+ clear directions next to the object code saying where to find the
+ Corresponding Source. Regardless of what server hosts the
+ Corresponding Source, you remain obligated to ensure that it is
+ available for as long as needed to satisfy these requirements.
+
+ e) Convey the object code using peer-to-peer transmission, provided
+ you inform other peers where the object code and Corresponding
+ Source of the work are being offered to the general public at no
+ charge under subsection 6d.
+
+ A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+ A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling. In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage. For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product. A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+ "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source. The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+ If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information. But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+ The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed. Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+ Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+ 7. Additional Terms.
+
+ "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law. If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+ When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it. (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.) You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+ Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+ a) Disclaiming warranty or limiting liability differently from the
+ terms of sections 15 and 16 of this License; or
+
+ b) Requiring preservation of specified reasonable legal notices or
+ author attributions in that material or in the Appropriate Legal
+ Notices displayed by works containing it; or
+
+ c) Prohibiting misrepresentation of the origin of that material, or
+ requiring that modified versions of such material be marked in
+ reasonable ways as different from the original version; or
+
+ d) Limiting the use for publicity purposes of names of licensors or
+ authors of the material; or
+
+ e) Declining to grant rights under trademark law for use of some
+ trade names, trademarks, or service marks; or
+
+ f) Requiring indemnification of licensors and authors of that
+ material by anyone who conveys the material (or modified versions of
+ it) with contractual assumptions of liability to the recipient, for
+ any liability that these contractual assumptions directly impose on
+ those licensors and authors.
+
+ All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10. If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term. If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+ If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+ Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+ 8. Termination.
+
+ You may not propagate or modify a covered work except as expressly
+provided under this License. Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+ However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+ Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+ Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License. If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+ 9. Acceptance Not Required for Having Copies.
+
+ You are not required to accept this License in order to receive or
+run a copy of the Program. Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance. However,
+nothing other than this License grants you permission to propagate or
+modify any covered work. These actions infringe copyright if you do
+not accept this License. Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+ 10. Automatic Licensing of Downstream Recipients.
+
+ Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License. You are not responsible
+for enforcing compliance by third parties with this License.
+
+ An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations. If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+ You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License. For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+ 11. Patents.
+
+ A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based. The
+work thus licensed is called the contributor's "contributor version".
+
+ A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version. For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+ In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement). To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+ If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients. "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+ If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+ A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License. You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+ Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+ 12. No Surrender of Others' Freedom.
+
+ If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all. For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+ 13. Remote Network Interaction; Use with the GNU General Public License.
+
+ Notwithstanding any other provision of this License, if you modify the
+Program, your modified version must prominently offer all users
+interacting with it remotely through a computer network (if your version
+supports such interaction) an opportunity to receive the Corresponding
+Source of your version by providing access to the Corresponding Source
+from a network server at no charge, through some standard or customary
+means of facilitating copying of software. This Corresponding Source
+shall include the Corresponding Source for any work covered by version 3
+of the GNU General Public License that is incorporated pursuant to the
+following paragraph.
+
+ Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU General Public License into a single
+combined work, and to convey the resulting work. The terms of this
+License will continue to apply to the part which is the covered work,
+but the work with which it is combined will remain governed by version
+3 of the GNU General Public License.
+
+ 14. Revised Versions of this License.
+
+ The Free Software Foundation may publish revised and/or new versions of
+the GNU Affero General Public License from time to time. Such new versions
+will be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Program specifies that a certain numbered version of the GNU Affero General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation. If the Program does not specify a version number of the
+GNU Affero General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+ If the Program specifies that a proxy can decide which future
+versions of the GNU Affero General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+ Later license versions may give you additional or different
+permissions. However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+ 15. Disclaimer of Warranty.
+
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. Limitation of Liability.
+
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+ 17. Interpretation of Sections 15 and 16.
+
+ If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+ <one line to give the program's name and a brief idea of what it does.>
+ Copyright (C) <year> <name of author>
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with this program. If not, see <https://www.gnu.org/licenses/>.
+
+Also add information on how to contact you by electronic and paper mail.
+
+ If your software can interact with users remotely through a computer
+network, you should also make sure that it provides a way for users to
+get its source. For example, if your program is a web application, its
+interface could display a "Source" link that leads users to an archive
+of the code. There are many ways you could offer source, and different
+solutions will be better for different programs; see section 13 for the
+specific requirements.
+
+ You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU AGPL, see
+<https://www.gnu.org/licenses/>.
diff --git a/README.md b/README.md
index 556000fb..2149dcc5 100644
--- a/README.md
+++ b/README.md
@@ -1,9 +1,7 @@
# Stable Diffusion web UI
A browser interface based on Gradio library for Stable Diffusion.
-![](txt2img_Screenshot.png)
-
-Check the [custom scripts](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Custom-Scripts) wiki page for extra scripts developed by users.
+![](screenshot.png)
## Features
[Detailed feature showcase with images](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Features):
@@ -19,7 +17,7 @@ Check the [custom scripts](https://github.com/AUTOMATIC1111/stable-diffusion-web
- a man in a (tuxedo:1.21) - alternative syntax
- select text and press ctrl+up or ctrl+down to automatically adjust attention to selected text (code contributed by anonymous user)
- Loopback, run img2img processing multiple times
-- X/Y plot, a way to draw a 2 dimensional plot of images with different parameters
+- X/Y/Z plot, a way to draw a 3 dimensional plot of images with different parameters
- Textual Inversion
- have as many embeddings as you want and use any names you like for them
- use multiple embeddings with different numbers of vectors per token
@@ -51,9 +49,9 @@ Check the [custom scripts](https://github.com/AUTOMATIC1111/stable-diffusion-web
- Running arbitrary python code from UI (must run with --allow-code to enable)
- Mouseover hints for most UI elements
- Possible to change defaults/mix/max/step values for UI elements via text config
-- Random artist button
- Tiling support, a checkbox to create images that can be tiled like textures
- Progress bar and live image generation preview
+ - Can use a separate neural network to produce previews with almost none VRAM or compute requirement
- Negative prompt, an extra text field that allows you to list what you don't want to see in generated image
- Styles, a way to save part of prompt and easily apply them via dropdown later
- Variations, a way to generate same image but with tiny differences
@@ -78,13 +76,22 @@ Check the [custom scripts](https://github.com/AUTOMATIC1111/stable-diffusion-web
- hypernetworks and embeddings options
- Preprocessing images: cropping, mirroring, autotagging using BLIP or deepdanbooru (for anime)
- Clip skip
-- Use Hypernetworks
-- Use VAEs
+- Hypernetworks
+- Loras (same as Hypernetworks but more pretty)
+- A sparate UI where you can choose, with preview, which embeddings, hypernetworks or Loras to add to your prompt.
+- Can select to load a different VAE from settings screen
- Estimated completion time in progress bar
- API
- Support for dedicated [inpainting model](https://github.com/runwayml/stable-diffusion#inpainting-with-stable-diffusion) by RunwayML.
- via extension: [Aesthetic Gradients](https://github.com/AUTOMATIC1111/stable-diffusion-webui-aesthetic-gradients), a way to generate images with a specific aesthetic by using clip images embeds (implementation of [https://github.com/vicgalle/stable-diffusion-aesthetic-gradients](https://github.com/vicgalle/stable-diffusion-aesthetic-gradients))
- [Stable Diffusion 2.0](https://github.com/Stability-AI/stablediffusion) support - see [wiki](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Features#stable-diffusion-20) for instructions
+- [Alt-Diffusion](https://arxiv.org/abs/2211.06679) support - see [wiki](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Features#alt-diffusion) for instructions
+- Now without any bad letters!
+- Load checkpoints in safetensors format
+- Eased resolution restriction: generated image's domension must be a multiple of 8 rather than 64
+- Now with a license!
+- Reorder elements in the UI from settings screen
+-
## Installation and Running
Make sure the required [dependencies](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Dependencies) are met and follow the instructions available for both [NVidia](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Install-and-Run-on-NVidia-GPUs) (recommended) and [AMD](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Install-and-Run-on-AMD-GPUs) GPUs.
@@ -97,9 +104,8 @@ Alternatively, use online services (like Google Colab):
1. Install [Python 3.10.6](https://www.python.org/downloads/windows/), checking "Add Python to PATH"
2. Install [git](https://git-scm.com/download/win).
3. Download the stable-diffusion-webui repository, for example by running `git clone https://github.com/AUTOMATIC1111/stable-diffusion-webui.git`.
-4. Place `model.ckpt` in the `models` directory (see [dependencies](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Dependencies) for where to get it).
-5. _*(Optional)*_ Place `GFPGANv1.4.pth` in the base directory, alongside `webui.py` (see [dependencies](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Dependencies) for where to get it).
-6. Run `webui-user.bat` from Windows Explorer as normal, non-administrator, user.
+4. Place stable diffusion checkpoint (`model.ckpt`) in the `models/Stable-diffusion` directory (see [dependencies](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Dependencies) for where to get it).
+5. Run `webui-user.bat` from Windows Explorer as normal, non-administrator, user.
### Automatic Installation on Linux
1. Install the dependencies:
@@ -127,6 +133,8 @@ Here's how to add code to this repo: [Contributing](https://github.com/AUTOMATIC
The documentation was moved from this README over to the project's [wiki](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki).
## Credits
+Licenses for borrowed code can be found in `Settings -> Licenses` screen, and also in `html/licenses.html` file.
+
- Stable Diffusion - https://github.com/CompVis/stable-diffusion, https://github.com/CompVis/taming-transformers
- k-diffusion - https://github.com/crowsonkb/k-diffusion.git
- GFPGAN - https://github.com/TencentARC/GFPGAN.git
@@ -139,6 +147,7 @@ The documentation was moved from this README over to the project's [wiki](https:
- Ideas for optimizations - https://github.com/basujindal/stable-diffusion
- Cross Attention layer optimization - Doggettx - https://github.com/Doggettx/stable-diffusion, original idea for prompt editing.
- Cross Attention layer optimization - InvokeAI, lstein - https://github.com/invoke-ai/InvokeAI (originally http://github.com/lstein/stable-diffusion)
+- Sub-quadratic Cross Attention layer optimization - Alex Birch (https://github.com/Birch-san/diffusers/pull/1), Amin Rezaei (https://github.com/AminRezaei0x443/memory-efficient-attention)
- Textual Inversion - Rinon Gal - https://github.com/rinongal/textual_inversion (we're not using his code, but we are using his ideas).
- Idea for SD upscale - https://github.com/jquesnelle/txt2imghd
- Noise generation for outpainting mk2 - https://github.com/parlance-zz/g-diffuser-bot
@@ -146,6 +155,8 @@ The documentation was moved from this README over to the project's [wiki](https:
- Idea for Composable Diffusion - https://github.com/energy-based-model/Compositional-Visual-Generation-with-Composable-Diffusion-Models-PyTorch
- xformers - https://github.com/facebookresearch/xformers
- DeepDanbooru - interrogator for anime diffusers https://github.com/KichangKim/DeepDanbooru
+- Sampling in float32 precision from a float16 UNet - marunine for the idea, Birch-san for the example Diffusers implementation (https://github.com/Birch-san/diffusers-play/tree/92feee6)
+- Instruct pix2pix - Tim Brooks (star), Aleksander Holynski (star), Alexei A. Efros (no star) - https://github.com/timothybrooks/instruct-pix2pix
- Security advice - RyotaK
- Initial Gradio script - posted on 4chan by an Anonymous user. Thank you Anonymous user.
- (You)
diff --git a/artists.csv b/artists.csv
deleted file mode 100644
index 1a61ed88..00000000
--- a/artists.csv
+++ /dev/null
@@ -1,3041 +0,0 @@
-artist,score,category
-Peter Max,0.99715996,weird
-Roy Lichtenstein,0.98272276,cartoon
-Romero Britto,0.9498342,scribbles
-Keith Haring,0.9431302,weird
-Hiroshige,0.93995106,ukioe
-Joan Miró,0.9169429,scribbles
-Jean-Michel Basquiat,0.90080947,scribbles
-Katsushika Hokusai,0.8887236,ukioe
-Paul Klee,0.8868682,scribbles
-Marc Chagall,0.8868168,scribbles
-Karl Schmidt-Rottluff,0.88444495,scribbles
-Howard Hodgkin,0.8808578,scribbles
-Jean Metzinger,0.88056004,scribbles
-Alma Thomas,0.87658304,weird
-Rufino Tamayo,0.8749848,scribbles
-Utagawa Hiroshige,0.8728796,ukioe
-Chagall,0.8718535,scribbles
-Harumi Hironaka,0.86914605,scribbles
-Hans Hofmann,0.8686159,scribbles
-Kawanabe Kyōsai,0.86612236,ukioe
-Andy Warhol,0.8654825,scribbles
-Barbara Takenaga,0.86223894,scribbles
-Tatsuro Kiuchi,0.8597267,cartoon
-Vincent Van Gogh,0.85538065,scribbles
-Wassily Kandinsky,0.85490596,scribbles
-Georges Seurat,0.8534801,scribbles
-Karel Appel,0.8529153,scribbles
-Sonia Delaunay,0.8506156,scribbles
-Hokusai,0.85046995,ukioe
-Eduardo Kobra,0.85036755,weird
-Fra Angelico,0.84984255,fineart
-Milton Avery,0.849746,scribbles
-David Hockney,0.8496144,scribbles
-Hiroshi Nagai,0.847129,cartoon
-Aristarkh Lentulov,0.846537,scribbles
-Lyonel Feininger,0.84573764,scribbles
-Mary Blair,0.845709,scribbles
-Ellsworth Kelly,0.8455428,scribbles
-Jun Kaneko,0.8448367,scribbles
-Roz Chast,0.8432013,weird
-Ida Rentoul Outhwaite,0.84275174,scribbles
-Robert Motherwell,0.8409468,scribbles
-Garry Winogrand,0.83994275,black-white
-Andrei Rublev,0.83950496,fineart
-Alexander Calder,0.83832693,scribbles
-Tomokazu Matsuyama,0.8376121,scribbles
-August Macke,0.8362022,scribbles
-Kazimir Malevich,0.8356527,scribbles
-Richard Scarry,0.83554685,scribbles
-Victor Vasarely,0.8335438,scribbles
-Kitagawa Utamaro,0.83333457,ukioe
-Matt Bors,0.83252287,scribbles
-Emil Nolde,0.8323225,scribbles
-Patrick Caulfield,0.8322225,scribbles
-Charles Blackman,0.83200824,scribbles
-Peter Doig,0.83111644,scribbles
-Alexej von Jawlensky,0.8308932,scribbles
-Rumiko Takahashi,0.8301817,anime
-Eileen Agar,0.82945526,scribbles
-Ernst Ludwig Kirchner,0.82756275,scribbles
-Nicolas Delort,0.8261329,scribbles
-Marsden Hartley,0.8250993,scribbles
-Keith Negley,0.8212553,scribbles
-Jamini Roy,0.8212199,scribbles
-Quentin Blake,0.82115215,scribbles
-Andy Kehoe,0.82063186,cartoon
-George barbier,0.82046914,fineart
-Frans Masereel,0.81997275,scribbles
-Umberto Boccioni,0.81921184,scribbles
-Conrad Roset,0.8190752,cartoon
-Paul Ranson,0.81903255,scribbles
-Yayoi Kusama,0.81886625,weird
-Tomi Ungerer,0.81848705,scribbles
-Saul Steinberg,0.81778854,scribbles
-Jon Klassen,0.81773067,scribbles
-W.W. Denslow,0.81708044,fineart
-Helen Frankenthaler,0.81704986,scribbles
-Jean Jullien,0.816437,scribbles
-Brett Whiteley,0.81601924,scribbles
-Giotto Di Bondone,0.81427747,fineart
-Takashi Murakami,0.81338763,weird
-Howard Finster,0.81333554,scribbles
-Eduardo Paolozzi,0.81312317,scribbles
-Charles Rennie Mackintosh,0.81297064,scribbles
-Brandon Mably,0.8128239,weird
-Rebecca Louise Law,0.81214285,weird
-Victo Ngai,0.81195843,cartoon
-Hanabusa Itchō II,0.81187993,ukioe
-Edmund Dulac,0.81104875,scribbles
-Ben Shahn,0.8104582,scribbles
-Howard Arkley,0.8103746,scribbles
-Wilfredo Lam,0.8096211,scribbles
-Michael Deforge,0.8095954,scribbles
-John Hoyland,0.8094592,fineart
-Francesco Clemente,0.8090387,scribbles
-Leonetto Cappiello,0.8087691,scribbles
-Norman Ackroyd,0.80788493,scribbles
-Bhupen Khakhar,0.8077607,scribbles
-Jeremiah Ketner,0.8075384,cartoon
-Chris Ofili,0.8073793,scribbles
-Banksy,0.80695426,scribbles
-Tom Whalen,0.805867,scribbles
-Ernst Wilhelm Nay,0.805295,scribbles
-Henri Rousseau,0.8049866,scribbles
-Kunisada,0.80493814,ukioe
-Naoko Takeuchi,0.80482674,anime
-Kaethe Butcher,0.80406916,scribbles
-Hasui Kawase,0.8040483,ukioe
-Alvin Langdon Coburn,0.8035004,black-white
-Stanley Donwood,0.8033054,scribbles
-Agnes Martin,0.8028028,scribbles
-Osamu Tezuka,0.8005524,cartoon
-Frank Stella,0.80049455,scribbles
-Dale Chihuly,0.79982775,digipa-high-impact
-Evgeni Gordiets,0.79967916,scribbles
-Janek Sedlar,0.7993992,fineart
-Alasdair Gray,0.7992301,scribbles
-Yasuo Kuniyoshi,0.79870003,ukioe
-Edward Gorey,0.7984938,scribbles
-Johannes Itten,0.798481,scribbles
-Cuno Amiet,0.7979497,scribbles
-M.C. Escher,0.7976657,scribbles
-Albert Irvin,0.79688835,scribbles
-Jack Gaughan,0.79443675,scribbles
-Ravi Zupa,0.7939542,scribbles
-Kay Nielsen,0.79385525,scribbles
-Agnolo Gaddi,0.79369193,fineart
-Alessandro Gottardo,0.79321593,scribbles
-Paul Laffoley,0.79196846,scribbles
-Giovanni Battista Piranesi,0.79111177,fineart
-Adrian Tomine,0.79109013,scribbles
-Adolph Gottlieb,0.79061794,scribbles
-Milton Caniff,0.7905358,cartoon
-Philip Guston,0.78994095,scribbles
-Debbie Criswell,0.7895031,cartoon
-Alice Pasquini,0.78949904,cartoon
-Johannes Vermeer,0.78931487,fineart
-Lisa Frank,0.7892591,cartoon
-Patrick Heron,0.78889126,scribbles
-Mikhail Nesterov,0.78814346,fineart
-Cézanne,0.7879481,scribbles
-Tristan Eaton,0.787513,scribbles
-Jillian Tamaki,0.7868066,scribbles
-Takato Yamamoto,0.78460765,ukioe
-Martiros Saryan,0.7844924,scribbles
-Emil Orlik,0.7842625,scribbles
-Armand Guillaumin,0.7840431,scribbles
-Jane Newland,0.7837676,scribbles
-Paul Cézanne,0.78368753,scribbles
-Tove Jansson,0.78356475,scribbles
-Guido Crepax,0.7835321,cartoon
-OSGEMEOS,0.7829088,weird
-Albert Watson,0.48901254,digipa-med-impact
-Emory Douglas,0.78179604,scribbles
-Chris Van Allsburg,0.66413003,fineart
-Ohara Koson,0.78132576,ukioe
-Nicolas de Stael,0.7802779,scribbles
-Aubrey Beardsley,0.77970016,scribbles
-Hishikawa Moronobu,0.7794119,ukioe
-Alfred Wallis,0.77926695,scribbles
-Friedensreich Hundertwasser,0.7791805,scribbles
-Eyvind Earle,0.7788089,scribbles
-Giotto,0.7785216,fineart
-Simone Martini,0.77843,fineart
-Ivan Bilibin,0.77720606,fineart
-Karl Blossfeldt,0.77652574,black-white
-Duy Huynh,0.77634746,scribbles
-Giovanni da Udina,0.7763063,fineart
-Henri-Edmond Cross,0.7762994,fineart
-Barry McGee,0.77618384,scribbles
-William Kentridge,0.77615225,scribbles
-Alexander Archipenko,0.7759824,scribbles
-Jaume Plensa,0.7756799,weird
-Bill Jacklin,0.77504414,fineart
-Alberto Vargas,0.7747376,cartoon
-Jean Dubuffet,0.7744374,scribbles
-Eugène Grasset,0.7741958,fineart
-Arthur Rackham,0.77418125,fineart
-Yves Tanguy,0.77380997,scribbles
-Elsa Beskow,0.7736908,fineart
-Georgia O’Keeffe,0.77368987,scribbles
-Georgia O'Keeffe,0.77368987,scribbles
-Henri Cartier-Bresson,0.7735415,black-white
-Andrea del Verrocchio,0.77307427,fineart
-Mark Rothko,0.77294236,scribbles
-Bruce Gilden,0.7256681,black-white
-Gino Severini,0.77247965,scribbles
-Delphin Enjolras,0.5594248,fineart
-Alena Aenami,0.77210015,cartoon
-Ed Freeman,0.42526615,digipa-low-impact
-Apollonia Saintclair,0.7718383,anime
-László Moholy-Nagy,0.771497,scribbles
-Louis Glackens,0.7713224,fineart
-Fang Lijun,0.77097225,fineart
-Alfred Kubin,0.74409986,fineart
-David Wojnarowicz,0.7705802,scribbles
-Tara McPherson,0.77023256,scribbles
-Gustav Doré,0.7367536,fineart
-Patricia Polacco,0.7696109,scribbles
-Norman Bluhm,0.7692634,fineart
-Elizabeth Gadd,0.7691194,digipa-high-impact
-Gabriele Münter,0.7690926,scribbles
-David Inshaw,0.76905304,scribbles
-Maurice Sendak,0.7690118,cartoon
-Harry Clarke,0.7688428,cartoon
-Howardena Pindell,0.7686921,n
-Jamie Hewlett,0.7680373,scribbles
-Steve Ditko,0.76725733,scribbles
-Annie Soudain,0.7671485,scribbles
-Albert Gleizes,0.76658314,scribbles
-Henry Fuseli,0.69147265,fineart
-Alain Laboile,0.67634284,c
-Albrecht Altdorfer,0.7663378,fineart
-Jack Butler Yeats,0.7661406,fineart
-Yue Minjun,0.76583517,scribbles
-Art Spiegelman,0.7656343,scribbles
-Grete Stern,0.7656276,fineart
-Mordecai Ardon,0.7648692,scribbles
-Joel Sternfeld,0.76456416,digipa-high-impact
-Milton Glaser,0.7641823,scribbles
-Eishōsai Chōki,0.7639659,scribbles
-Domenico Ghirlandaio,0.76372653,fineart
-Alex Timmermans,0.64443207,digipa-high-impact
-Andreas Vesalius,0.763446,fineart
-Bruce McLean,0.76335883,scribbles
-Jacob Lawrence,0.76330304,scribbles
-Alex Katz,0.76317835,scribbles
-Henri de Toulouse-Lautrec,0.76268333,scribbles
-Franz Sedlacek,0.762062,scribbles
-Paul Lehr,0.70854837,cartoon
-Nicholas Roerich,0.76117516,scribbles
-Henri Matisse,0.76110923,scribbles
-Colin McCahon,0.76086944,scribbles
-Max Dupain,0.6661642,black-white
-Stephen Gammell,0.74001735,weird
-Alberto Giacometti,0.7596302,scribbles
-Goyō Hashiguchi,0.7595048,ukioe
-Gustave Doré,0.7018832,fineart
-Butcher Billy,0.7593378,cartoon
-Pieter de Hooch,0.75916564,fineart
-Gaetano Pesce,0.75906265,scribbles
-Winsor McCay,0.7589382,scribbles
-Claude Cahun,0.7588153,weird
-Roger Ballen,0.64683115,black-white
-Ellen Gallagher,0.758621,scribbles
-Anton Corbijn,0.5550669,digipa-high-impact
-Margaret Macdonald Mackintosh,0.75781375,fineart
-Franz Kline,0.7576461,scribbles
-Cimabue,0.75720495,fineart
-André Kertész,0.7319392,black-white
-Hans Hartung,0.75718236,scribbles
-J. J. Grandville,0.7321584,fineart
-David Octavius Hill,0.6333561,digipa-high-impact
-teamLab,0.7566472,digipa-high-impact
-Paul Gauguin,0.75635266,scribbles
-Etel Adnan,0.75631833,scribbles
-Barbara Kruger,0.7562784,scribbles
-Franz Marc,0.75538874,scribbles
-Saul Bass,0.75496316,scribbles
-El Lissitzky,0.7549487,scribbles
-Thomas Moran,0.6507399,fineart
-Claude Monet,0.7541377,fineart
-David Young Cameron,0.7541016,scribbles
-W. Heath Robinson,0.75374347,cartoon
-Yves Klein,0.7536262,fineart
-Albert Pinkham Ryder,0.7338848,fineart
-Elizabeth Shippen Green,0.7533686,fineart
-Robert Stivers,0.5516287,fineart
-Emily Kame Kngwarreye,0.7532016,weird
-Charline von Heyl,0.753142,scribbles
-Frida Kahlo,0.75303876,scribbles
-Amy Sillman,0.752921,scribbles
-Emperor Huizong of Song,0.7525214,ukioe
-Edward Burne-Jones,0.75220466,fineart
-Brett Weston,0.6891357,black-white
-Charles E. Burchfield,0.75174403,scribbles
-Hishida Shunsō,0.751617,fareast
-Elaine de Kooning,0.7514996,scribbles
-Gary Panter,0.7514598,scribbles
-Frederick Hammersley,0.7514268,scribbles
-Gustave Dore,0.6735896,fineart
-Ephraim Moses Lilien,0.7510494,fineart
-Hannah Hoch,0.7509496,scribbles
-Shepard Fairey,0.7508583,scribbles
-Richard Burlet,0.7506659,scribbles
-Bill Brandt,0.6833408,black-white
-Herbert List,0.68455493,black-white
-Joseph Cornell,0.75023884,nudity
-Nathan Wirth,0.6436741,black-white
-John Kenn Mortensen,0.74758303,anime
-Andre De Dienes,0.5683014,digipa-high-impact
-Albert Robida,0.7485741,cartoon
-Shintaro Kago,0.7484431,anime
-Sidney Nolan,0.74809414,scribbles
-Patrice Murciano,0.61973965,fineart
-Brian Stelfreeze,0.7478351,scribbles
-Francisco De Goya,0.6954584,fineart
-William Morris,0.7478111,fineart
-Honoré Daumier,0.74767774,scribbles
-Hubert Robert,0.6863421,fineart
-Marianne von Werefkin,0.7475825,fineart
-Edvard Munch,0.74719715,scribbles
-Victor Brauner,0.74719006,scribbles
-George Inness,0.7470588,fineart
-Naoki Urasawa,0.7469665,anime
-Kilian Eng,0.7468486,scribbles
-Bordalo II,0.7467364,digipa-high-impact
-Katsuhiro Otomo,0.746364,anime
-Maximilien Luce,0.74609685,fineart
-Amy Earles,0.74603415,fineart
-Jeanloup Sieff,0.7196009,black-white
-William Zorach,0.74574494,scribbles
-Pascale Campion,0.74516207,fineart
-Dorothy Lathrop,0.74418795,fineart
-Sofonisba Anguissola,0.74418664,fineart
-Natalia Goncharova,0.74414873,scribbles
-August Sander,0.6644566,black-white
-Jasper Johns,0.74395454,scribbles
-Arthur Dove,0.74383533,scribbles
-Darwyn Cooke,0.7435789,scribbles
-Leonardo Da Vinci,0.6825216,fineart
-Fra Filippo Lippi,0.7433891,fineart
-Pierre-Auguste Renoir,0.742464,fineart
-Jeff Lemire,0.7422893,scribbles
-Al Williamson,0.742113,cartoon
-Childe Hassam,0.7418015,fineart
-Francisco Goya,0.69522625,fineart
-Alphonse Mucha,0.74171394,special
-Cleon Peterson,0.74163914,scribbles
-J.M.W. Turner,0.65582645,fineart
-Walter Crane,0.74146044,fineart
-Brassaï,0.6361966,digipa-high-impact
-Virgil Finlay,0.74133486,fineart
-Fernando Botero,0.7412504,nudity
-Ben Nicholson,0.7411573,scribbles
-Robert Rauschenberg,0.7410054,fineart
-David Wiesner,0.7406237,scribbles
-Bartolome Esteban Murillo,0.6933951,fineart
-Jean Arp,0.7403873,scribbles
-Andre Kertesz,0.7228358,black-white
-Simeon Solomon,0.66441345,fineart
-Hugh Ferriss,0.72443527,black-white
-Agnes Lawrence Pelton,0.73960555,scribbles
-Charles Camoin,0.7395686,scribbles
-Paul Strand,0.7080332,black-white
-Charles Gwathmey,0.7394747,scribbles
-Bartolomé Esteban Murillo,0.7011274,fineart
-Oskar Kokoschka,0.7392038,scribbles
-Bruno Munari,0.73918355,weird
-Willem de Kooning,0.73916197,scribbles
-Hans Memling,0.7387886,fineart
-Chris Mars,0.5861489,digipa-high-impact
-Hiroshi Yoshida,0.73787534,ukioe
-Hundertwasser,0.7377672,fineart
-David Bowie,0.73773724,weird
-Ettore Sottsass,0.7376095,digipa-high-impact
-Antanas Sutkus,0.7369492,black-white
-Leonora Carrington,0.73726475,scribbles
-Hieronymus Bosch,0.7369955,scribbles
-A. J. Casson,0.73666203,scribbles
-Chaim Soutine,0.73662066,scribbles
-Artur Bordalo,0.7364549,weird
-Thomas Allom,0.68792284,fineart
-Louis Comfort Tiffany,0.7363504,fineart
-Philippe Druillet,0.7363382,cartoon
-Jan Van Eyck,0.7360621,fineart
-Sandro Botticelli,0.7359395,fineart
-Hieronim Bosch,0.7359308,scribbles
-Everett Shinn,0.7355817,fineart
-Camille Corot,0.7355603,fineart
-Nick Sharratt,0.73470485,scribbles
-Fernand Léger,0.7079839,scribbles
-Robert S. Duncanson,0.7346282,fineart
-Hieronymous Bosch,0.73453265,scribbles
-Charles Addams,0.7344034,scribbles
-Studio Ghibli,0.73439026,anime
-Archibald Motley,0.7343683,scribbles
-Anton Fadeev,0.73433846,cartoon
-Uemura Shoen,0.7342118,ukioe
-Ando Fuchs,0.73406494,black-white
-Jessie Willcox Smith,0.73398125,fineart
-Alex Garant,0.7333658,scribbles
-Lawren Harris,0.73331416,scribbles
-Anne Truitt,0.73297834,scribbles
-Richard Lindner,0.7328564,scribbles
-Sailor Moon,0.73281246,anime
-Bridget Bate Tichenor,0.73274165,scribbles
-Ralph Steadman,0.7325864,scribbles
-Annibale Carracci,0.73251307,fineart
-Dürer,0.7324789,fineart
-Abigail Larson,0.7319012,cartoon
-Bill Traylor,0.73189163,scribbles
-Louis Rhead,0.7318623,fineart
-David Burliuk,0.731803,scribbles
-Camille Pissarro,0.73172396,fineart
-Catrin Welz-Stein,0.73117495,scribbles
-William Etty,0.6497544,nudity
-Pierre Bonnard,0.7310132,scribbles
-Benoit B. Mandelbrot,0.5033001,digipa-med-impact
-Théodore Géricault,0.692039,fineart
-Andy Goldsworthy,0.7307565,digipa-high-impact
-Alfred Sisley,0.7306032,fineart
-Charles-Francois Daubigny,0.73057353,fineart
-Karel Thole,0.7305395,cartoon
-Andre Derain,0.73050404,scribbles
-Larry Poons,0.73023695,fineart
-Beauford Delaney,0.72999024,scribbles
-Ruth Bernhard,0.72990334,black-white
-David Alfaro Siqueiros,0.7297947,scribbles
-Gaugin,0.729636,fineart
-Carl Larsson,0.7296195,cartoon
-Albrecht Dürer,0.72946966,fineart
-Henri De Toulouse Lautrec,0.7294263,cartoon
-Shotaro Ishinomori,0.7292093,anime
-Hope Gangloff,0.729082,scribbles
-Vivian Maier,0.72897506,digipa-high-impact
-Alex Andreev,0.6442978,digipa-high-impact
-Julie Blackmon,0.72862685,c
-Arthur Melville,0.7286146,fineart
-Henri Michaux,0.599607,fineart
-William Steig,0.7283096,scribbles
-Octavio Ocampo,0.72814554,scribbles
-Cy Twombly,0.72814107,scribbles
-Guy Denning,0.67375445,fineart
-Maxfield Parrish,0.7280283,fineart
-Randolph Caldecott,0.7279564,fineart
-Duccio,0.72795,fineart
-Ray Donley,0.5837457,fineart
-Hiroshi Sugimoto,0.6497892,digipa-high-impact
-Daniela Uhlig,0.4691466,special
-Go Nagai,0.72770613,anime
-Carlo Crivelli,0.72764605,fineart
-Helmut Newton,0.44433144,digipa-low-impact
-Josef Albers,0.7061394,scribbles
-Henry Moret,0.7274567,fineart
-André Masson,0.727404,scribbles
-Henri Fantin Latour,0.72732764,fineart
-Theo van Rysselberghe,0.7272843,fineart
-John Wayne Gacy,0.72686327,scribbles
-Carlos Schwabe,0.7267612,fineart
-Herbert Bayer,0.7094297,scribbles
-Domenichino,0.72667265,fineart
-Liam Wong,0.7262276,special
-George Caleb Bingham,0.7262154,digipa-high-impact
-Gigadō Ashiyuki,0.7261864,fineart
-Chaïm Soutine,0.72603923,scribbles
-Ary Scheffer,0.64913243,fineart
-Rockwell Kent,0.7257272,scribbles
-Jean-Paul Riopelle,0.72570604,fineart
-Ed Mell,0.6637067,cartoon
-Ismail Inceoglu,0.72561014,special
-Edgar Degas,0.72538006,fineart
-Giorgione,0.7252798,fineart
-Charles-François Daubigny,0.7252482,fineart
-Arthur Lismer,0.7251765,scribbles
-Aaron Siskind,0.4852289,digipa-med-impact
-Arkhip Kuindzhi,0.7249981,fineart
-Joseph Mallord William Turner,0.6834406,fineart
-Dante Gabriel Rossetti,0.7244541,fineart
-Ernst Haeckel,0.6660129,fineart
-Rebecca Guay,0.72439146,cartoon
-Anthony Gerace,0.636678,digipa-high-impact
-Martin Kippenberger,0.72418386,scribbles
-Diego Giacometti,0.72415763,scribbles
-Dmitry Kustanovich,0.7241322,cartoon
-Dora Carrington,0.7239633,scribbles
-Shusei Nagaoko,0.7238965,anime
-Odilon Redon,0.72381747,scribbles
-Shohei Otomo,0.7132803,nudity
-Barnett Newman,0.7236389,scribbles
-Jean Fouquet,0.7235963,fineart
-Gustav Klimt,0.72356784,nudity
-Francisco Josè de Goya,0.6589663,fineart
-Bonnard Pierre,0.72309464,nudity
-Brooke Shaden,0.61281693,digipa-high-impact
-Mao Hamaguchi,0.7228292,scribbles
-Frederick Edwin Church,0.64416,fineart
-Asher Brown Durand,0.72264796,fineart
-George Baselitz,0.7223453,scribbles
-Sam Bosma,0.7223237,fineart
-Asaf Hanuka,0.72222745,scribbles
-David Teniers the Younger,0.7221168,fineart
-Nicola Samori,0.68747556,nudity
-Claude Lorrain,0.7217102,fineart
-Hermenegildo Anglada Camarasa,0.7214374,nudity
-Pablo Picasso,0.72142905,scribbles
-Howard Chaykin,0.7213998,cartoon
-Ferdinand Hodler,0.7213758,nudity
-Farel Dalrymple,0.7213298,fineart
-Lyubov Popova,0.7213024,scribbles
-Albin Egger-Lienz,0.72120845,fineart
-Geertgen tot Sint Jans,0.72107565,fineart
-Kate Greenaway,0.72069687,fineart
-Louise Bourgeois,0.7206516,fineart
-Miriam Schapiro,0.72026414,fineart
-Pieter Claesz,0.7200939,fineart
-George B. Bridgman,0.5592567,fineart
-Piet Mondrian,0.71990657,scribbles
-Michelangelo Merisi Da Caravaggio,0.7094674,fineart
-Marie Spartali Stillman,0.71986604,fineart
-Gertrude Abercrombie,0.7196962,scribbles
-Louis Icart,0.7195913,fineart
-David Driskell,0.719564,scribbles
-Paula Modersohn-Becker,0.7193769,scribbles
-George Hurrell,0.57496595,digipa-high-impact
-Andrea Mantegna,0.7190254,fineart
-Silvestro Lega,0.71891177,fineart
-Junji Ito,0.7188978,anime
-Jacob Hashimoto,0.7186867,digipa-high-impact
-Benjamin West,0.6642946,fineart
-David Teniers the Elder,0.7181293,fineart
-Roberto Matta,0.71808386,fineart
-Chiho Aoshima,0.71801454,anime
-Amedeo Modigliani,0.71788836,scribbles
-Raja Ravi Varma,0.71788085,fineart
-Roberto Ferri,0.538221,nudity
-Winslow Homer,0.7176876,fineart
-Horace Vernet,0.65729,fineart
-Lucas Cranach the Elder,0.71738195,fineart
-Godfried Schalcken,0.625893,fineart
-Affandi,0.7170285,nudity
-Diane Arbus,0.655138,digipa-high-impact
-Joseph Ducreux,0.65247905,digipa-high-impact
-Berthe Morisot,0.7165984,fineart
-Hilma af Klint,0.71643853,scribbles
-Filippino Lippi,0.7163017,fineart
-Leonid Afremov,0.7163005,fineart
-Chris Ware,0.71628594,scribbles
-Marius Borgeaud,0.7162446,scribbles
-M.W. Kaluta,0.71612585,cartoon
-Govert Flinck,0.68975246,fineart
-Charles Demuth,0.71605396,scribbles
-Coles Phillips,0.7158309,scribbles
-Oskar Fischinger,0.6721027,digipa-high-impact
-David Teniers III,0.71569765,fineart
-Jean Delville,0.7156771,fineart
-Antonio Saura,0.7155949,scribbles
-Bridget Riley,0.7155669,fineart
-Gordon Parks,0.5759978,digipa-high-impact
-Anselm Kiefer,0.71514887,scribbles
-Remedios Varo,0.7150927,weird
-Franz Hegi,0.71495223,scribbles
-Kati Horna,0.71486115,black-white
-Arshile Gorky,0.71459055,scribbles
-David LaChapelle,0.7144903,scribbles
-Fritz von Dardel,0.71446383,scribbles
-Edward Ruscha,0.71438885,fineart
-Blanche Hoschedé Monet,0.7143073,fineart
-Alexandre Calame,0.5735474,fineart
-Sean Scully,0.714154,fineart
-Alexandre Benois,0.7141515,fineart
-Sally Mann,0.6534312,black-white
-Thomas Eakins,0.7141104,fineart
-Arnold Böcklin,0.71407956,fineart
-Alfonse Mucha,0.7139052,special
-Damien Hirst,0.7136273,scribbles
-Lee Krasner,0.71362555,scribbles
-Dorothea Lange,0.71361613,black-white
-Juan Gris,0.7132987,scribbles
-Bernardo Bellotto,0.70720065,fineart
-John Martin,0.5376847,fineart
-Harriet Backer,0.7131594,fineart
-Arnold Newman,0.5736342,digipa-high-impact
-Gjon Mili,0.46520913,digipa-low-impact
-Asger Jorn,0.7129575,scribbles
-Chesley Bonestell,0.6063316,fineart
-Agostino Carracci,0.7128167,fineart
-Peter Wileman,0.71271706,cartoon
-Chen Hongshou,0.71268153,ukioe
-Catherine Hyde,0.71266896,scribbles
-Andrea Pozzo,0.626546,fineart
-Kitty Lange Kielland,0.7125735,fineart
-Cornelis Saftleven,0.6684047,fineart
-Félix Vallotton,0.71237606,fineart
-Albrecht Durer,0.7122327,fineart
-Jackson Pollock,0.71222305,scribbles
-John Bratby,0.7122171,scribbles
-Beksinski,0.71218586,fineart
-James Thomas Watts,0.5959548,fineart
-Konstantin Korovin,0.71188873,fineart
-Gustave Caillebotte,0.71181154,fineart
-Dean Ellis,0.50233585,fineart
-Friedrich von Amerling,0.6420181,fineart
-Christopher Balaskas,0.67935324,special
-Alexander Rodchenko,0.67415404,scribbles
-Alfred Cheney Johnston,0.6647291,fineart
-Mikalojus Konstantinas Ciurlionis,0.710677,scribbles
-Jean-Antoine Watteau,0.71061164,fineart
-Paul Delvaux,0.7105914,scribbles
-Francesco del Cossa,0.7104901,nudity
-Isaac Cordal,0.71046066,weird
-Hikari Shimoda,0.7104546,weird
-François Boucher,0.67153126,fineart
-Akos Major,0.7103802,digipa-high-impact
-Bernard Buffet,0.7103491,cartoon
-Brandon Woelfel,0.6727086,digipa-high-impact
-Edouard Manet,0.7101296,fineart
-Auguste Herbin,0.6866145,scribbles
-Eugene Delacroix,0.70995826,fineart
-L. Birge Harrison,0.70989627,fineart
-Howard Pyle,0.70979863,fineart
-Diane Dillon,0.70968723,scribbles
-Hans Erni,0.7096618,scribbles
-Richard Diebenkorn,0.7096184,scribbles
-Thomas Gainsborough,0.6759419,fineart
-Maria Sibylla Merian,0.7093275,fineart
-François Joseph Heim,0.6175854,fineart
-E. H. Shepard,0.7091189,cartoon
-Hsiao-Ron Cheng,0.7090618,scribbles
-Canaletto,0.7090392,fineart
-John Atkinson Grimshaw,0.7087531,fineart
-Giovanni Battista Tiepolo,0.6754107,fineart
-Cornelis van Poelenburgh,0.69821274,fineart
-Raina Telgemeier,0.70846486,scribbles
-Francesco Hayez,0.6960006,fineart
-Gilbert Stuart,0.659772,fineart
-Konstantin Yuon,0.7081486,fineart
-Antonello da Messina,0.70806944,fineart
-Austin Osman Spare,0.7079903,fineart
-James Ensor,0.70781446,scribbles
-Claude Bonin-Pissarro,0.70739406,fineart
-Mikhail Vrubel,0.70738363,fineart
-Angelica Kauffman,0.6748828,fineart
-Viktor Vasnetsov,0.7072422,fineart
-Alphonse Osbert,0.70724136,fineart
-Tsutomu Nihei,0.7070495,anime
-Harvey Quaytman,0.63613266,fineart
-Jamie Hawkesworth,0.706914,digipa-high-impact
-Francesco Guardi,0.70682615,fineart
-Jean-Honoré Fragonard,0.6518248,fineart
-Brice Marden,0.70673287,digipa-high-impact
-Charles-Amédée-Philippe van Loo,0.6725916,fineart
-Mati Klarwein,0.7066092,n
-Gerard ter Borch,0.706589,fineart
-Dan Hillier,0.48966256,digipa-med-impact
-Federico Barocci,0.682664,fineart
-Henri Le Sidaner,0.70637953,fineart
-Olivier Bonhomme,0.7063748,scribbles
-Edward Weston,0.7061382,black-white
-Giovanni Paolo Cavagna,0.6840265,fineart
-Germaine Krull,0.6621777,black-white
-Hans Holbein the Younger,0.70590156,fineart
-François Bocion,0.6272365,fineart
-Georg Baselitz,0.7053314,scribbles
-Caravaggio,0.7050303,fineart
-Anne Rothenstein,0.70502245,scribbles
-Wadim Kashin,0.43714935,digipa-low-impact
-Heinrich Lefler,0.7048054,fineart
-Jacob van Ruisdael,0.7047918,fineart
-Bartholomeus van Bassen,0.6676872,fineart
-Jeffrey Smith art,0.56750107,fineart
-Anne Packard,0.7046703,weird
-Jean-François Millet,0.7045456,fineart
-Andrey Remnev,0.7041204,digipa-high-impact
-Fujiwara Takanobu,0.70410216,ukioe
-Elliott Erwitt,0.69950557,black-white
-Fern Coppedge,0.7036215,fineart
-Bartholomeus van der Helst,0.66411966,fineart
-Rembrandt Van Rijn,0.6979987,fineart
-Rene Magritte,0.703457,scribbles
-Aelbert Cuyp,0.7033657,fineart
-Gerda Wegener,0.70319015,scribbles
-Graham Sutherland,0.7031714,scribbles
-Gerrit Dou,0.7029986,fineart
-August Friedrich Schenck,0.6801586,fineart
-George Herriman,0.7028568,scribbles
-Stanisław Szukalski,0.6903354,fineart
-Slim Aarons,0.70222545,digipa-high-impact
-Ernst Thoms,0.70221686,fineart
-Louis Wain,0.702186,fineart
-Artemisia Gentileschi,0.70198226,fineart
-Eugène Delacroix,0.70155394,fineart
-Peter Bagge,0.70127463,scribbles
-Jeffrey Catherine Jones,0.7012148,cartoon
-Eugène Carrière,0.65272695,fineart
-Alexander Millar,0.7011144,scribbles
-Nobuyoshi Araki,0.70108867,fareast
-Tintoretto,0.6702795,fineart
-André Derain,0.7009005,scribbles
-Charles Maurice Detmold,0.70079994,fineart
-Francisco de Zurbarán,0.7007234,fineart
-Laurie Greasley,0.70072114,cartoon
-Lynda Benglis,0.7006948,digipa-high-impact
-Cecil Beaton,0.66362655,black-white
-Gustaf Tenggren,0.7006041,cartoon
-Abdur Rahman Chughtai,0.7004994,ukioe
-Constantin Brancusi,0.7004367,scribbles
-Mikhail Larionov,0.7004066,fineart
-Jan van Kessel the Elder,0.70040506,fineart
-Chantal Joffe,0.70036674,scribbles
-Charles-André van Loo,0.6830367,fineart
-Reginald Marsh,0.6301042,fineart
-Elsa Bleda,0.70005083,digipa-high-impact
-Peter Paul Rubens,0.65745676,fineart
-Eugène Boudin,0.70001304,fineart
-Charles Willson Peale,0.66907954,fineart
-Brian Mashburn,0.63395154,digipa-high-impact
-Barkley L. Hendricks,0.69986427,n
-Yoshiyuki Tomino,0.6998095,anime
-Guido Reni,0.6416875,fineart
-Lynd Ward,0.69958556,fineart
-John Constable,0.6907788,fineart
-František Kupka,0.6993329,fineart
-Pieter Bruegel The Elder,0.6992879,scribbles
-Benjamin Gerritsz Cuyp,0.6992173,fineart
-Nicolas Mignard,0.6988214,fineart
-Augustus Edwin Mulready,0.6482165,fineart
-Andrea del Sarto,0.698532,fineart
-Edward Steichen,0.69837445,black-white
-James Abbott McNeill Whistler,0.69836813,fineart
-Alphonse Legros,0.6983243,fineart
-Ivan Aivazovsky,0.64588225,fineart
-Giovanni Francesco Barbieri,0.6981316,fineart
-Grace Cossington Smith,0.69811064,fineart
-Bert Stern,0.53411555,scribbles
-Mary Cassatt,0.6980135,fineart
-Jules Bastien-Lepage,0.69796044,fineart
-Max Ernst,0.69777006,fineart
-Kentaro Miura,0.697743,anime
-Georges Rouault,0.69758564,scribbles
-Josephine Wall,0.6973667,fineart
-Anne-Louis Girodet,0.58104825,nudity
-Bert Hardy,0.6972966,black-white
-Adriaen van de Velde,0.69716156,fineart
-Andreas Achenbach,0.61108655,fineart
-Hayv Kahraman,0.69705284,fineart
-Beatrix Potter,0.6969851,fineart
-Elmer Bischoff,0.6968948,fineart
-Cornelis de Heem,0.6968436,fineart
-Inio Asano,0.6965007,anime
-Alfred Henry Maurer,0.6964837,fineart
-Gottfried Helnwein,0.6962953,digipa-high-impact
-Paul Barson,0.54196984,digipa-high-impact
-Roger de La Fresnaye,0.69620967,fineart
-Abraham Mignon,0.60605425,fineart
-Albert Bloch,0.69573116,nudity
-Charles Dana Gibson,0.67155975,fineart
-Alexandre-Évariste Fragonard,0.6507174,fineart
-Ernst Fuchs,0.6953538,nudity
-Alfredo Jaar,0.6952965,digipa-high-impact
-Judy Chicago,0.6952246,weird
-Frans van Mieris the Younger,0.6951849,fineart
-Aertgen van Leyden,0.6951305,fineart
-Emily Carr,0.69512105,fineart
-Frances MacDonald,0.6950408,scribbles
-Hannah Höch,0.69495845,scribbles
-Gillis Rombouts,0.58770025,fineart
-Käthe Kollwitz,0.6947756,fineart
-Barbara Stauffacher Solomon,0.6920825,fineart
-Georges Lacombe,0.6944455,fineart
-Gwen John,0.6944161,fineart
-Terada Katsuya,0.6944026,cartoon
-James Gillray,0.6871335,fineart
-Robert Crumb,0.69420326,fineart
-Bruce Pennington,0.6545669,fineart
-David Firth,0.69400465,scribbles
-Arthur Boyd,0.69399726,fineart
-Antonin Artaud,0.67321455,fineart
-Giuseppe Arcimboldo,0.6937329,fineart
-Jim Mahfood,0.6936606,cartoon
-Ossip Zadkine,0.6494374,scribbles
-Atelier Olschinsky,0.69349927,fineart
-Carl Frederik von Breda,0.57274634,fineart
-Ken Sugimori,0.6932626,anime
-Chris Friel,0.5399168,fineart
-Andrew Macara,0.69307995,fineart
-Alexander Jansson,0.69298327,scribbles
-Anne Brigman,0.6865817,black-white
-George Ault,0.66756654,fineart
-Arkhyp Kuindzhi,0.6928072,digipa-high-impact
-Emiliano Ponzi,0.69278395,scribbles
-William Holman Hunt,0.6927663,fineart
-Tamara Lempicka,0.6386007,scribbles
-Mark Ryden,0.69259655,fineart
-Giovanni Paolo Pannini,0.6802902,fineart
-Carl Barks,0.6923666,cartoon
-Fritz Bultman,0.6318746,fineart
-Salomon van Ruysdael,0.690313,fineart
-Carrie Mae Weems,0.6645416,n
-Agostino Arrivabene,0.61166185,fineart
-Gustave Boulanger,0.655797,fineart
-Henry Justice Ford,0.51214355,fareast
-Bernardo Strozzi,0.63510317,fineart
-André Lhote,0.68718815,scribbles
-Paul Corfield,0.6915611,scribbles
-Gifford Beal,0.6914777,fineart
-Hirohiko Araki,0.6914078,anime
-Emil Carlsen,0.691326,fineart
-Frans van Mieris the Elder,0.6912799,fineart
-Simon Stalenhag,0.6912775,special
-Henry van de Velde,0.64838886,fineart
-Eleanor Fortescue-Brickdale,0.6909729,fineart
-Thomas W Schaller,0.69093937,special
-NHK Animation,0.6907677,cartoon
-Euan Uglow,0.69060403,scribbles
-Hendrick Goltzius,0.69058937,fineart
-William Blake,0.69038224,fineart
-Vito Acconci,0.58409876,digipa-high-impact
-Billy Childish,0.6902057,scribbles
-Ben Quilty,0.6875855,fineart
-Mark Briscoe,0.69010437,fineart
-Adriaen van de Venne,0.6899867,fineart
-Alasdair McLellan,0.6898454,digipa-high-impact
-Ed Paschke,0.68974686,scribbles
-Guy Rose,0.68960273,fineart
-Barbara Hepworth,0.68958247,fineart
-Edward Henry Potthast,0.6895703,fineart
-Francis Bacon,0.6895397,scribbles
-Pawel Kuczynski,0.6894536,fineart
-Bjarke Ingels,0.68933153,digipa-high-impact
-Henry Ossawa Tanner,0.68932164,fineart
-Alessandro Allori,0.6892961,fineart
-Abraham van Calraet,0.63841593,fineart
-Egon Schiele,0.6891415,scribbles
-Tim Doyle,0.5474768,digipa-high-impact
-Grandma Moses,0.6890782,fineart
-John Frederick Kensett,0.61981744,fineart
-Giacomo Balla,0.68893707,fineart
-Jamie Baldridge,0.6546651,digipa-high-impact
-Max Beckmann,0.6884731,scribbles
-Cornelis van Haarlem,0.6677613,fineart
-Edward Hopper,0.6884258,special
-Barkley Hendricks,0.6883637,n
-Patrick Dougherty,0.688321,digipa-high-impact
-Karol Bak,0.6367705,fineart
-Pierre Puvis de Chavannes,0.6880703,fineart
-Antoni Tàpies,0.685689,fineart
-Alexander Nasmyth,0.57695735,fineart
-Laurent Grasso,0.5793272,fineart
-Camille Walala,0.6076875,digipa-high-impact
-Fairfield Porter,0.68790644,fineart
-Alex Colville,0.68787855,fineart
-Herb Ritts,0.51471305,scribbles
-Gerhard Munthe,0.687658,fineart
-Susan Seddon Boulet,0.68762136,scribbles
-Liu Ye,0.68760437,fineart
-Robert Antoine Pinchon,0.68744636,fineart
-Fujiwara Nobuzane,0.6873439,fineart
-Frederick Carl Frieseke,0.6873361,fineart
-Aert van der Neer,0.6159286,fineart
-Allen Jones,0.6869935,scribbles
-Anja Millen,0.6064488,digipa-high-impact
-Esaias van de Velde,0.68673944,fineart
-Gyoshū Hayami,0.68665624,anime
-William Hogarth,0.6720842,fineart
-Frederic Church,0.6865637,fineart
-Cyril Rolando,0.68644965,cartoon
-Frederic Edwin Church,0.6863009,fineart
-Thomas Rowlandson,0.66726154,fineart
-Joachim Brohm,0.68601763,digipa-high-impact
-Cristofano Allori,0.6858083,fineart
-Adrianus Eversen,0.58259964,fineart
-Richard Dadd,0.68546164,fineart
-Ambrosius Bosschaert II,0.6854217,fineart
-Paolo Veronese,0.68422073,fineart
-Abraham van den Tempel,0.66463804,fineart
-Duncan Grant,0.6852565,scribbles
-Hendrick Cornelisz. van Vliet,0.6851691,fineart
-Geof Darrow,0.6851174,scribbles
-Émile Bernard,0.6850957,fineart
-Brian Bolland,0.68496394,scribbles
-James Gilleard,0.6849431,cartoon
-Anton Raphael Mengs,0.6689196,fineart
-Augustus Jansson,0.6845705,digipa-high-impact
-Hendrik Goltzius,0.6843367,fineart
-Domenico Quaglio the Younger,0.65769434,fineart
-Cicely Mary Barker,0.6841806,fineart
-William Eggleston,0.6840795,digipa-high-impact
-David Choe,0.6840449,scribbles
-Adam Elsheimer,0.6716068,fineart
-Heinrich Danioth,0.5390186,fineart
-Franz Stuck,0.6836468,fineart
-Bernie Wrightson,0.64101505,fineart
-Dorina Costras,0.6835419,fineart
-El Greco,0.68343943,fineart
-Gatōken Shunshi,0.6833314,anime
-Giovanni Bellini,0.67622876,fineart
-Aron Wiesenfeld,0.68331146,nudity
-Boris Kustodiev,0.68329334,fineart
-Alec Soth,0.5597321,digipa-high-impact
-Artus Scheiner,0.6313348,fineart
-Kelly Vivanco,0.6830933,scribbles
-Shaun Tan,0.6830649,fineart
-Anthony van Dyck,0.6577681,fineart
-Neil Welliver,0.68297863,nudity
-Robert McCall,0.68294585,fineart
-Sandra Chevrier,0.68284667,scribbles
-Yinka Shonibare,0.68256056,n
-Arthur Tress,0.6301861,digipa-high-impact
-Richard McGuire,0.6820089,scribbles
-Anni Albers,0.65708244,digipa-high-impact
-Aleksey Savrasov,0.65207493,fineart
-Wayne Barlowe,0.6537874,fineart
-Giorgio de Chirico,0.6815907,fineart
-Ernest Procter,0.6815795,fineart
-Adriaen Brouwer,0.6815058,fineart
-Ilya Glazunov,0.6813533,fineart
-Alison Bechdel,0.68096143,scribbles
-Carl Holsoe,0.68082225,fineart
-Alfred Edward Chalon,0.6464571,fineart
-Gerard David,0.68058,fineart
-Basil Blackshaw,0.6805679,fineart
-Gerrit Adriaenszoon Berckheyde,0.67340267,fineart
-George Hendrik Breitner,0.6804209,fineart
-Abraham Bloemaert,0.68036544,fineart
-Ferdinand Van Kessel,0.67742276,fineart
-Hugo Simberg,0.68031186,fineart
-Gaston Bussière,0.665221,fineart
-Shawn Coss,0.42407864,digipa-low-impact
-Hanabusa Itchō,0.68023074,ukioe
-Magnus Enckell,0.6801553,fineart
-Gary Larson,0.6801336,scribbles
-George Manson,0.68013126,digipa-high-impact
-Hayao Miyazaki,0.6800754,anime
-Carl Spitzweg,0.66581815,fineart
-Ambrosius Holbein,0.6798341,fineart
-Domenico Pozzi,0.6434162,fineart
-Dorothea Tanning,0.6797955,fineart
-Jeannette Guichard-Bunel,0.5251578,digipa-high-impact
-Victor Moscoso,0.62962687,fineart
-Francis Picabia,0.6795391,scribbles
-Charles W. Bartlett,0.67947805,fineart
-David A Hardy,0.5554935,fineart
-C. R. W. Nevinson,0.67946506,fineart
-Man Ray,0.6507145,scribbles
-Albert Bierstadt,0.67935765,fineart
-Charles Le Brun,0.6758479,fineart
-Lovis Corinth,0.67913896,fineart
-Herbert Abrams,0.5507507,digipa-high-impact
-Giorgio Morandi,0.6789025,fineart
-Agnolo Bronzino,0.6787985,fineart
-Abraham Pether,0.66922426,fineart
-John Bauer,0.6786695,fineart
-Arthur Stanley Wilkinson,0.67860866,fineart
-Arthur Wardle,0.5510789,fineart
-George Romney,0.62868094,fineart
-Laurie Lipton,0.5201844,fineart
-Mickalene Thomas,0.45433685,digipa-low-impact
-Alice Rahon,0.6777824,scribbles
-Gustave Van de Woestijne,0.6777346,scribbles
-Laurel Burch,0.67766285,fineart
-Hendrik Gerritsz Pot,0.67750573,fineart
-John William Waterhouse,0.677472,fineart
-Conor Harrington,0.5967809,fineart
-Gabriel Ba,0.6773366,cartoon
-Franz Xaver Winterhalter,0.62229514,fineart
-George Cruikshank,0.6473593,fineart
-Hyacinthe Rigaud,0.67717785,fineart
-Cornelis Claesz van Wieringen,0.6770269,fineart
-Adriaen van Outrecht,0.67682564,fineart
-Yaacov Agam,0.6767926,fineart
-Franz von Lenbach,0.61948,fineart
-Clyfford Still,0.67667866,fineart
-Alexander Roslin,0.66719526,fineart
-Barry Windsor Smith,0.6765375,cartoon
-Takeshi Obata,0.67643225,anime
-John Harris,0.47712502,fineart
-Bruce Davidson,0.6763525,digipa-high-impact
-Hendrik Willem Mesdag,0.6762745,fineart
-Makoto Shinkai,0.67610705,anime
-Andreas Gursky,0.67610145,digipa-high-impact
-Mike Winkelmann (Beeple),0.6510196,digipa-high-impact
-Gustave Moreau,0.67607844,fineart
-Frank Weston Benson,0.6760142,fineart
-Eduardo Kingman,0.6759026,fineart
-Benjamin Williams Leader,0.5611925,fineart
-Hervé Guibert,0.55973417,black-white
-Cornelis Dusart,0.6753622,fineart
-Amédée Guillemin,0.6752696,fineart
-Alessio Albi,0.6752633,digipa-high-impact
-Matthias Grünewald,0.6751779,fineart
-Fujishima Takeji,0.6751577,anime
-Georges Braque,0.67514753,scribbles
-John Salminen,0.67498183,fineart
-Atey Ghailan,0.674873,scribbles
-Giovanni Antonio Galli,0.657484,fineart
-Julie Mehretu,0.6748382,fineart
-Jean Auguste Dominique Ingres,0.6746286,fineart
-Francesco Albani,0.6621554,fineart
-Anato Finnstark,0.6744919,digipa-high-impact
-Giovanni Bernardino Mazzolini,0.64416045,fineart
-Antoine Le Nain,0.6233709,fineart
-Ford Madox Brown,0.6743224,fineart
-Gerhard Richter,0.67426133,fineart
-theCHAMBA,0.6742506,cartoon
-Edward Julius Detmold,0.67421955,fineart
-George Stubbs,0.6209227,fineart
-George Tooker,0.6740602,scribbles
-Faith Ringgold,0.6739976,scribbles
-Giambattista Pittoni,0.5792371,fineart
-George Bellows,0.6737008,fineart
-Aldus Manutius,0.67366326,fineart
-Ambrosius Bosschaert,0.67364097,digipa-high-impact
-Michael Parkes,0.6133628,fineart
-Hans Bellmer,0.6735973,nudity
-Sir James Guthrie,0.67359626,fineart
-Charles Spencelayh,0.67356884,fineart
-Ivan Shishkin,0.6734136,fineart
-Hans Holbein the Elder,0.6733856,fineart
-Filip Hodas,0.60053295,digipa-high-impact
-Herman Saftleven,0.6732188,digipa-high-impact
-Dirck de Quade van Ravesteyn,0.67309594,fineart
-Joe Fenton,0.6730916,scribbles
-Arnold Bocklin,0.6730706,fineart
-Baiōken Eishun,0.6730663,anime
-Giovanni Giacometti,0.6730505,fineart
-Giovanni Battista Gaulli,0.65036476,fineart
-William Stout,0.672887,fineart
-Gavin Hamilton,0.5982757,fineart
-John Stezaker,0.6726847,black-white
-Frederick McCubbin,0.67263377,fineart
-Christoph Ludwig Agricola,0.62750757,fineart
-Alice Neel,0.67255914,scribbles
-Giovanni Battista Venanzi,0.61996603,fineart
-Miho Hirano,0.6724092,anime
-Tom Thomson,0.6723876,fineart
-Alfred Munnings,0.6723851,fineart
-David Wilkie,0.6722781,fineart
-Adriaen van Ostade,0.67220736,fineart
-Alfred Eisenstaedt,0.67213774,black-white
-Leon Kossoff,0.67208946,fineart
-Georges de La Tour,0.6421979,fineart
-Chuck Close,0.6719756,digipa-high-impact
-Herbert MacNair,0.6719506,scribbles
-Edward Atkinson Hornel,0.6719265,fineart
-Becky Cloonan,0.67192084,cartoon
-Gian Lorenzo Bernini,0.58210254,fineart
-Hein Gorny,0.4982776,digipa-med-impact
-Joe Webb,0.6714884,fineart
-Cornelis Pietersz Bega,0.64423996,fineart
-Christian Krohg,0.6713641,fineart
-Cornelia Parker,0.6712246,fineart
-Anna Mary Robertson Moses,0.6709144,fineart
-Quentin Tarantino,0.6708354,digipa-high-impact
-Frederic Remington,0.67074275,fineart
-Barent Fabritius,0.6707407,fineart
-Oleg Oprisco,0.6707388,digipa-high-impact
-Hendrick van Streeck,0.670666,fineart
-Bakemono Zukushi,0.67051035,anime
-Lucy Madox Brown,0.67032814,fineart
-Paul Wonner,0.6700563,scribbles
-Guido Borelli Da Caluso,0.66966087,digipa-high-impact
-Emil Alzamora,0.5844039,nudity
-Heinrich Brocksieper,0.64469147,fineart
-Dan Smith,0.669563,digipa-high-impact
-Lois van Baarle,0.6695091,scribbles
-Arthur Garfield Dove,0.6694996,scribbles
-Matthias Jung,0.66936135,digipa-high-impact
-José Clemente Orozco,0.6693544,scribbles
-Don Bluth,0.6693046,cartoon
-Akseli Gallen-Kallela,0.66927314,fineart
-Alex Howitt,0.52858865,digipa-high-impact
-Giovanni Bernardino Asoleni,0.6635405,fineart
-Frederick Goodall,0.6690712,fineart
-Francesco Bartolozzi,0.63431,fineart
-Edmund Leighton,0.6689639,fineart
-Abraham Willaerts,0.5966594,fineart
-François Louis Thomas Francia,0.6207474,fineart
-Carel Fabritius,0.6688478,fineart
-Flora Macdonald Reid,0.6687404,fineart
-Bartholomeus Breenbergh,0.6163084,fineart
-Bernardino Mei,0.6486895,fineart
-Carel Weight,0.6684968,fineart
-Aristide Maillol,0.66843045,scribbles
-Chris Leib,0.60567486,fineart
-Giovanni Battista Piazzetta,0.65012705,fineart
-Daniel Maclise,0.6678073,fineart
-Giovanni Bernardino Azzolini,0.65774256,fineart
-Aaron Horkey,0.6676864,fineart
-Otto Dix,0.667294,scribbles
-Ferdinand Bol,0.6414797,fineart
-Adriaen Coorte,0.6670663,fineart
-William Gropper,0.6669881,scribbles
-Gerard de Lairesse,0.6639489,fineart
-Mab Graves,0.6668356,scribbles
-Fernando Amorsolo,0.66683346,fineart
-Pixar Concept Artists,0.6667752,cartoon
-Alfred Augustus Glendening,0.64009607,fineart
-Diego Velázquez,0.6666799,fineart
-Jerry Pinkney,0.6665478,fineart
-Antoine Wiertz,0.6143825,fineart
-Alberto Burri,0.6618252,scribbles
-Max Weber,0.6664029,fineart
-Hans Baluschek,0.66636246,fineart
-Annie Swynnerton,0.6663346,fineart
-Albert Dubois-Pillet,0.57526016,fineart
-Dora Maar,0.62862253,digipa-high-impact
-Kay Sage,0.5614823,fineart
-David A. Hardy,0.51376164,fineart
-Alberto Biasi,0.42917693,digipa-low-impact
-Fra Bartolomeo,0.6661105,fineart
-Hendrick van Balen,0.65754294,fineart
-Edwin Austin Abbey,0.66596496,fineart
-George Frederic Watts,0.66595024,fineart
-Alexei Kondratyevich Savrasov,0.6470352,fineart
-Anna Ancher,0.66581213,fineart
-Irma Stern,0.66580737,fineart
-Frédéric Bazille,0.6657115,fineart
-Awataguchi Takamitsu,0.6656272,anime
-Edward Sorel,0.6655388,fineart
-Edward Lear,0.6655078,fineart
-Gabriel Metsu,0.6654555,fineart
-Giovanni Battista Innocenzo Colombo,0.6653655,fineart
-Scott Naismith,0.6650656,fineart
-John Perceval,0.6650283,fineart
-Girolamo Muziano,0.64234406,fineart
-Cornelis de Man,0.66494393,fineart
-Cornelis Bisschop,0.64119905,digipa-high-impact
-Hans Leu the Elder,0.64770013,fineart
-Michael Hutter,0.62479556,fineart
-Cornelia MacIntyre Foley,0.6510235,fineart
-Todd McFarlane,0.6647763,cartoon
-John James Audubon,0.6279882,digipa-high-impact
-William Henry Hunt,0.57340264,fineart
-John Anster Fitzgerald,0.6644317,fineart
-Tomer Hanuka,0.6643152,cartoon
-Alex Prager,0.6641814,fineart
-Heinrich Kley,0.6641148,fineart
-Anne Redpath,0.66407835,scribbles
-Marianne North,0.6640104,fineart
-Daniel Merriam,0.6639365,fineart
-Bill Carman,0.66390574,fineart
-Méret Oppenheim,0.66387725,digipa-high-impact
-Erich Heckel,0.66384083,fineart
-Iryna Yermolova,0.663623,fineart
-Antoine Ignace Melling,0.61502695,fineart
-Akira Toriyama,0.6635002,anime
-Gregory Crewdson,0.59810174,digipa-high-impact
-Helene Schjerfbeck,0.66333634,fineart
-Antonio Mancini,0.6631618,fineart
-Zanele Muholi,0.58554715,n
-Balthasar van der Ast,0.66294503,fineart
-Toei Animations,0.6629127,anime
-Arthur Quartley,0.6628106,fineart
-Diego Rivera,0.6625808,fineart
-Hendrik van Steenwijk II,0.6623777,fineart
-James Tissot,0.6623415,fineart
-Kehinde Wiley,0.66218376,n
-Chiharu Shiota,0.6621249,digipa-high-impact
-George Grosz,0.6620224,fineart
-Peter De Seve,0.6616659,cartoon
-Ryan Hewett,0.6615638,fineart
-Hasegawa Tōhaku,0.66146004,anime
-Apollinary Vasnetsov,0.6613177,fineart
-Francis Cadell,0.66119456,fineart
-Henri Harpignies,0.6611012,fineart
-Henry Macbeth-Raeburn,0.6213787,fineart
-Christoffel van den Berghe,0.6609149,fineart
-Leiji Matsumoto,0.66089404,anime
-Adriaen van der Werff,0.638286,fineart
-Ramon Casas,0.6606529,fineart
-Arthur Hacker,0.66062653,fineart
-Edward Willis Redfield,0.66058433,fineart
-Carl Gustav Carus,0.65355223,fineart
-Francesca Woodman,0.60435605,digipa-high-impact
-Hans Makart,0.5881955,fineart
-Carne Griffiths,0.660091,weird
-Will Barnet,0.65995145,scribbles
-Fitz Henry Lane,0.659841,fineart
-Masaaki Sasamoto,0.6597158,anime
-Salvador Dali,0.6290813,scribbles
-Walt Kelly,0.6596993,digipa-high-impact
-Charlotte Nasmyth,0.56481636,fineart
-Ferdinand Knab,0.6596528,fineart
-Steve Lieber,0.6596117,scribbles
-Zhang Kechun,0.6595939,fareast
-Olivier Valsecchi,0.5324838,digipa-high-impact
-Joel Meyerowitz,0.65937585,digipa-high-impact
-Arthur Streeton,0.6592294,fineart
-Henriett Seth F.,0.6592273,fineart
-Genndy Tartakovsky,0.6591695,scribbles
-Otto Marseus van Schrieck,0.65890455,fineart
-Hanna-Barbera,0.6588123,cartoon
-Mary Anning,0.6588001,fineart
-Pamela Colman Smith,0.6587648,fineart
-Anton Mauve,0.6586873,fineart
-Hendrick Avercamp,0.65866685,fineart
-Max Pechstein,0.65860206,scribbles
-Franciszek Żmurko,0.56855476,fineart
-Felice Casorati,0.6584761,fineart
-Louis Janmot,0.65298057,fineart
-Thomas Cole,0.5408042,fineart
-Peter Mohrbacher,0.58273685,fineart
-Arnold Franz Brasz,0.65834284,nudity
-Christian Rohlfs,0.6582814,fineart
-Basil Gogos,0.658105,fineart
-Fitz Hugh Lane,0.657923,fineart
-Liubov Sergeevna Popova,0.62325525,fineart
-Elizabeth MacNicol,0.65773135,fineart
-Zinaida Serebriakova,0.6577016,fineart
-Ernest Lawson,0.6575238,fineart
-Bruno Catalano,0.6574354,fineart
-Albert Namatjira,0.6573372,fineart
-Fritz von Uhde,0.6572697,fineart
-Edwin Henry Landseer,0.62363374,fineart
-Naoto Hattori,0.621745,fareast
-Reylia Slaby,0.65709853,fineart
-Arthur Burdett Frost,0.6147318,fineart
-Frank Miller,0.65707314,digipa-high-impact
-Algernon Talmage,0.65702903,fineart
-Itō Jakuchū,0.6570199,digipa-high-impact
-Billie Waters,0.65684533,digipa-high-impact
-Ingrid Baars,0.58558,digipa-high-impact
-Pieter Jansz Saenredam,0.6566058,fineart
-Egbert van Heemskerck,0.6125889,fineart
-John French Sloan,0.6362145,fineart
-Craola,0.65639997,scribbles
-Benjamin Marra,0.61809736,nudity
-Anthony Thieme,0.65609205,fineart
-Satoshi Kon,0.65606606,anime
-Masamune Shirow,0.65592873,anime
-Alfred Stevens,0.6557321,fineart
-Hariton Pushwagner,0.6556745,anime
-Carlo Carrà,0.6556279,fineart
-Stuart Davis,0.6050534,digipa-high-impact
-David Shrigley,0.6553904,digipa-high-impact
-Albrecht Anker,0.65531695,fineart
-Anton Semenov,0.6552501,digipa-high-impact
-Fabio Hurtado,0.5955889,fineart
-Donald Judd,0.6552257,fineart
-Francisco de Burgos Mantilla,0.65516514,fineart
-Barthel Bruyn the Younger,0.6551433,fineart
-Abram Arkhipov,0.6550962,fineart
-Paulus Potter,0.65498203,fineart
-Edward Lamson Henry,0.6549521,fineart
-Audrey Kawasaki,0.654843,fineart
-George Catlin,0.6547183,fineart
-Adélaïde Labille-Guiard,0.6066263,fineart
-Sandy Skoglund,0.6546999,digipa-high-impact
-Hans Baldung,0.654431,fineart
-Ethan Van Sciver,0.65442884,cartoon
-Frans Hals,0.6542338,fineart
-Caspar David Friedrich,0.6542175,fineart
-Charles Conder,0.65420866,fineart
-Betty Churcher,0.65387225,fineart
-Claes Corneliszoon Moeyaert,0.65386075,fineart
-David Bomberg,0.6537477,fineart
-Abraham Bosschaert,0.6535562,fineart
-Giuseppe de Nittis,0.65354455,fineart
-John La Farge,0.65342575,fineart
-Frits Thaulow,0.65341854,fineart
-John Duncan,0.6532379,fineart
-Floris van Dyck,0.64900756,fineart
-Anton Pieck,0.65310377,fineart
-Roger Dean,0.6529647,nudity
-Maximilian Pirner,0.65280807,fineart
-Dorothy Johnstone,0.65267503,fineart
-Govert Dircksz Camphuysen,0.65258145,fineart
-Ryohei Hase,0.6168618,fineart
-Hans von Aachen,0.62437224,fineart
-Gustaf Munch-Petersen,0.6522485,fineart
-Earnst Haeckel,0.6344333,fineart
-Giovanni Battista Bracelli,0.62635326,fineart
-Hendrick Goudt,0.6521433,fineart
-Aneurin Jones,0.65191466,fineart
-Bryan Hitch,0.6518333,cartoon
-Coby Whitmore,0.6515695,fineart
-Barthélemy d'Eyck,0.65156406,fineart
-Quint Buchholz,0.65151155,fineart
-Adriaen Hanneman,0.6514815,fineart
-Tom Roberts,0.5855832,fineart
-Fernand Khnopff,0.6512954,nudity
-Charles Vess,0.6512271,cartoon
-Carlo Galli Bibiena,0.6511681,nudity
-Alexander Milne Calder,0.6081027,fineart
-Josan Gonzalez,0.6193469,cartoon
-Barthel Bruyn the Elder,0.6509954,fineart
-Jon Whitcomb,0.6046063,fineart
-Arcimboldo,0.6509897,fineart
-Hendrik van Steenwijk I,0.65086293,fineart
-Albert Joseph Pénot,0.65085316,fineart
-Edward Wadsworth,0.6308917,scribbles
-Andrew Wyeth,0.6507103,fineart
-Correggio,0.650689,fineart
-Frances Currey,0.65068,fineart
-Henryk Siemiradzki,0.56721973,fineart
-Worthington Whittredge,0.6504713,fineart
-Federico Zandomeneghi,0.65033823,fineart
-Isaac Levitan,0.6503356,fineart
-Russ Mills,0.65012795,fineart
-Edith Lawrence,0.65010095,fineart
-Gil Elvgren,0.5614284,digipa-high-impact
-Chris Foss,0.56495357,fineart
-Francesco Zuccarelli,0.612805,fineart
-Hendrick Bloemaert,0.64962655,fineart
-Egon von Vietinghoff,0.57180583,fineart
-Pixar,0.6495793,cartoon
-Daniel Clowes,0.6495775,fineart
-Friedrich Ritter von Friedländer-Malheim,0.6493772,fineart
-Rebecca Sugar,0.6492679,scribbles
-Chen Daofu,0.6492026,fineart
-Dustin Nguyen,0.64909416,cartoon
-Raymond Duchamp-Villon,0.6489605,nudity
-Daniel Garber,0.6489332,fineart
-Antonio Canova,0.58764786,fineart
-Algernon Blackwood,0.59256804,fineart
-Betye Saar,0.64877665,fineart
-William S. Burroughs,0.5505619,fineart
-Rodney Matthews,0.64844495,fineart
-Michelangelo Buonarroti,0.6484401,fineart
-Posuka Demizu,0.64843124,anime
-Joao Ruas,0.6484134,fineart
-Andy Fairhurst,0.6480388,special
-"Andries Stock, Dutch Baroque painter",0.6479797,fineart
-Antonio de la Gandara,0.6479292,fineart
-Bruce Timm,0.6477877,scribbles
-Harvey Kurtzman,0.64772683,cartoon
-Eiichiro Oda,0.64772165,anime
-Edwin Landseer,0.6166703,fineart
-Carl Heinrich Bloch,0.64755356,fineart
-Adriaen Isenbrant,0.6475428,fineart
-Santiago Caruso,0.6473954,fineart
-Alfred Guillou,0.6472603,fineart
-Clara Peeters,0.64725095,fineart
-Kim Jung Gi,0.6472225,cartoon
-Milo Manara,0.6471776,cartoon
-Phil Noto,0.6470769,anime
-Kaws,0.6470336,cartoon
-Desmond Morris,0.5951916,fineart
-Gediminas Pranckevicius,0.6467787,fineart
-Jack Kirby,0.6467424,cartoon
-Claes Jansz. Visscher,0.6466888,fineart
-Augustin Meinrad Bächtiger,0.6465789,fineart
-John Lavery,0.64643383,fineart
-Anne Bachelier,0.6464065,fineart
-Giuseppe Bernardino Bison,0.64633006,fineart
-E. T. A. Hoffmann,0.5887251,fineart
-Ambrosius Benson,0.6457839,fineart
-Cornelis Verbeeck,0.645782,fineart
-H. R. Giger,0.6456823,weird
-Adolph Menzel,0.6455246,fineart
-Aliza Razell,0.5863178,digipa-high-impact
-Gerard Seghers,0.6205679,fineart
-David Aja,0.62812066,scribbles
-Gustave Courbet,0.64476407,fineart
-Alexandre Cabanel,0.63849115,fineart
-Albert Marquet,0.64471006,fineart
-Harold Harvey,0.64464307,fineart
-William Wegman,0.6446265,scribbles
-Harold Gilman,0.6445966,fineart
-Jeremy Geddes,0.57839495,digipa-high-impact
-Abraham van Beijeren,0.6356113,fineart
-Eugène Isabey,0.6160607,fineart
-Jorge Jacinto,0.58618563,fineart
-Frederic Leighton,0.64383554,fineart
-Dave McKean,0.6438012,cartoon
-Hiromu Arakawa,0.64371413,anime
-Aaron Douglas,0.6437089,fineart
-Adolf Dietrich,0.590169,fineart
-Frederik de Moucheron,0.6435952,fineart
-Siya Oum,0.6435919,cartoon
-Alberto Morrocco,0.64352196,fineart
-Robert Vonnoh,0.6433115,fineart
-Tom Bagshaw,0.5322264,fineart
-Guerrilla Girls,0.64309967,digipa-high-impact
-Johann Wolfgang von Goethe,0.6429888,fineart
-Charles Le Roux,0.6426594,fineart
-Auguste Toulmouche,0.64261353,fineart
-Cindy Sherman,0.58666563,digipa-high-impact
-Federico Zuccari,0.6425021,fineart
-Mike Mignola,0.642346,cartoon
-Cecily Brown,0.6421981,fineart
-Brian K. Vaughan,0.64147836,cartoon
-RETNA (Marquis Lewis),0.47963,n
-Klaus Janson,0.64129144,cartoon
-Alessandro Galli Bibiena,0.6412889,fineart
-Jeremy Lipking,0.64123213,fineart
-Stephen Shore,0.64108944,digipa-high-impact
-Heinz Edelmann,0.51325977,digipa-med-impact
-Joaquín Sorolla,0.6409732,fineart
-Bella Kotak,0.6409608,digipa-high-impact
-Cornelis Engebrechtsz,0.64091057,fineart
-Bruce Munro,0.64084166,digipa-high-impact
-Marjane Satrapi,0.64076495,fineart
-Jeremy Mann,0.557744,digipa-high-impact
-Heinrich Maria Davringhausen,0.6403986,fineart
-Kengo Kuma,0.6402023,digipa-high-impact
-Alfred Manessier,0.640153,fineart
-Antonio Galli Bibiena,0.6399247,digipa-high-impact
-Eduard von Grützner,0.6397164,fineart
-Bunny Yeager,0.5455078,digipa-high-impact
-Adolphe Willette,0.6396935,fineart
-Wangechi Mutu,0.6394607,n
-Peter Milligan,0.6391612,digipa-high-impact
-Dalí,0.45400402,digipa-low-impact
-Élisabeth Vigée Le Brun,0.6388982,fineart
-Beth Conklin,0.6388204,digipa-high-impact
-Charles Alphonse du Fresnoy,0.63881266,fineart
-Thomas Benjamin Kennington,0.56668127,fineart
-Jim Woodring,0.5625168,fineart
-Francisco Oller,0.63846034,fineart
-Csaba Markus,0.6384506,fineart
-Botero,0.63843524,scribbles
-Bill Henson,0.5394536,digipa-high-impact
-Anna Bocek,0.6382304,scribbles
-Hugo van der Goes,0.63822484,fineart
-Robert William Hume,0.5433574,fineart
-Chip Zdarsky,0.6381826,cartoon
-Daniel Seghers,0.53494316,fineart
-Richard Doyle,0.6377541,fineart
-Hendrick Terbrugghen,0.63773805,fineart
-Joe Madureira,0.6377177,special
-Floris van Schooten,0.6376191,fineart
-Jeff Simpson,0.3959046,fineart
-Albert Joseph Moore,0.6374316,fineart
-Arthur Merric Boyd,0.6373228,fineart
-Amadeo de Souza Cardoso,0.5927926,fineart
-Os Gemeos,0.6368859,digipa-high-impact
-Giovanni Boldini,0.6368698,fineart
-Albert Goodwin,0.6368695,fineart
-Hans Eduard von Berlepsch-Valendas,0.61562145,fineart
-Edmond Xavier Kapp,0.5758474,fineart
-François Quesnel,0.6365935,fineart
-Nathan Coley,0.6365817,digipa-high-impact
-Jasmine Becket-Griffith,0.6365083,digipa-high-impact
-Raphaelle Peale,0.6364422,fineart
-Candido Portinari,0.63634276,fineart
-Edward Dugmore,0.63179636,fineart
-Anders Zorn,0.6361722,fineart
-Ed Emshwiller,0.63615763,fineart
-Francis Coates Jones,0.6361159,fineart
-Ernst Haas,0.6361123,digipa-high-impact
-Dirck van Baburen,0.6213001,fineart
-René Lalique,0.63594735,fineart
-Sydney Prior Hall,0.6359345,fineart
-Brad Kunkle,0.5659712,fineart
-Corneille,0.6356381,fineart
-Henry Lamb,0.63560975,fineart
-Dirck Hals,0.63559663,fineart
-Alex Grey,0.62908936,nudity
-Michael Heizer,0.63555753,fineart
-Yiannis Moralis,0.61731136,fineart
-Emily Murray Paterson,0.4392335,fineart
-Georg Friedrich Kersting,0.6256248,fineart
-Frances Hodgkins,0.6352128,fineart
-Charles Cundall,0.6349486,fineart
-Henry Wallis,0.63478243,fineart
-Goro Fujita,0.6346491,cartoon
-Jean-Léon Gérôme,0.5954844,fineart
-August von Pettenkofen,0.60910493,fineart
-Abbott Handerson Thayer,0.63428533,fineart
-Martin John Heade,0.5926603,fineart
-Ellen Jewett,0.63420236,digipa-high-impact
-Hidari Jingorō,0.63388014,fareast
-Taiyō Matsumoto,0.63372946,special
-Emanuel Leutze,0.6007246,fineart
-Adam Martinakis,0.48973057,digipa-med-impact
-Will Eisner,0.63349223,cartoon
-Alexander Stirling Calder,0.6331682,fineart
-Saturno Butto,0.6331184,nudity
-Cecilia Beaux,0.6330725,fineart
-Amandine Van Ray,0.6174208,digipa-high-impact
-Bob Eggleton,0.63277495,digipa-high-impact
-Sherree Valentine Daines,0.63274443,fineart
-Frederick Lord Leighton,0.6299176,fineart
-Daniel Ridgway Knight,0.63251615,fineart
-Gaetano Previati,0.61743724,fineart
-John Berkey,0.63226986,fineart
-Richard Misrach,0.63201725,digipa-high-impact
-Aaron Jasinski,0.57948315,fineart
-"Edward Otho Cresap Ord, II",0.6317712,fineart
-Evelyn De Morgan,0.6317376,fineart
-Noelle Stevenson,0.63159716,digipa-high-impact
-Edward Robert Hughes,0.6315573,fineart
-Allan Ramsay,0.63150716,fineart
-Balthus,0.6314323,scribbles
-Hendrick Cornelisz Vroom,0.63143134,digipa-high-impact
-Ilya Repin,0.6313043,fineart
-George Lambourn,0.6312267,fineart
-Arthur Hughes,0.6310194,fineart
-Antonio J. Manzanedo,0.53841716,fineart
-John Singleton Copley,0.6264835,fineart
-Dennis Miller Bunker,0.63078755,fineart
-Ernie Barnes,0.6307126,cartoon
-Alison Kinnaird,0.6306353,digipa-high-impact
-Alex Toth,0.6305541,digipa-high-impact
-Henry Raeburn,0.6155551,fineart
-Alice Bailly,0.6305177,fineart
-Brian Kesinger,0.63037646,scribbles
-Antoine Blanchard,0.63036835,fineart
-Ron Walotsky,0.63035095,fineart
-Kent Monkman,0.63027304,fineart
-Naomi Okubo,0.5782754,fareast
-Hercules Seghers,0.62957174,fineart
-August Querfurt,0.6295643,fineart
-Samuel Melton Fisher,0.6283333,fineart
-David Burdeny,0.62950236,digipa-high-impact
-George Bain,0.58519644,fineart
-Peter Holme III,0.62938106,fineart
-Grayson Perry,0.62928164,digipa-high-impact
-Chris Claremont,0.6292076,digipa-high-impact
-Dod Procter,0.6291759,fineart
-Huang Tingjian,0.6290358,fareast
-Dorothea Warren O'Hara,0.6290113,fineart
-Ivan Albright,0.6289551,fineart
-Hubert von Herkomer,0.6288955,fineart
-Barbara Nessim,0.60589516,digipa-high-impact
-Henry Scott Tuke,0.6286309,fineart
-Ditlev Blunck,0.6282925,fineart
-Sven Nordqvist,0.62828535,fineart
-Lee Madgwick,0.6281731,fineart
-Hubert van Eyck,0.6281529,fineart
-Edmond Bille,0.62339354,fineart
-Ejnar Nielsen,0.6280824,fineart
-Arturo Souto,0.6280583,fineart
-Jean Giraud,0.6279888,fineart
-Storm Thorgerson,0.6277394,digipa-high-impact
-Ed Benedict,0.62764007,digipa-high-impact
-Christoffer Wilhelm Eckersberg,0.6014842,fineart
-Clarence Holbrook Carter,0.5514105,fineart
-Dorothy Lockwood,0.6273235,fineart
-John Singer Sargent,0.6272487,fineart
-Brigid Derham,0.6270125,digipa-high-impact
-Henricus Hondius II,0.6268505,fineart
-Gertrude Harvey,0.5903887,fineart
-Grant Wood,0.6266253,fineart
-Fyodor Vasilyev,0.5234919,digipa-med-impact
-Cagnaccio di San Pietro,0.6261671,fineart
-Doris Boulton-Maude,0.62593174,fineart
-Adolf Hirémy-Hirschl,0.5946784,fineart
-Harold von Schmidt,0.6256755,fineart
-Martine Johanna,0.6256161,digipa-high-impact
-Gerald Kelly,0.5579602,digipa-high-impact
-Ub Iwerks,0.625396,cartoon
-Dirck van der Lisse,0.6253871,fineart
-Edouard Riou,0.6250113,fineart
-Ilya Yefimovich Repin,0.62491584,fineart
-Martin Johnson Heade,0.59421235,fineart
-Afarin Sajedi,0.62475824,scribbles
-Alfred Thompson Bricher,0.6247515,fineart
-Edwin G. Lucas,0.5553578,fineart
-Georges Emile Lebacq,0.56175387,fineart
-Francis Davis Millet,0.5988504,fineart
-Bill Sienkiewicz,0.6125557,digipa-high-impact
-Giocondo Albertolli,0.62441677,fineart
-Victor Nizovtsev,0.6242258,fineart
-Squeak Carnwath,0.62416434,digipa-high-impact
-Bill Viola,0.62409425,digipa-high-impact
-Annie Abernethie Pirie Quibell,0.6240767,fineart
-Jason Edmiston,0.62405366,fineart
-Al Capp,0.6239494,fineart
-Kobayashi Kiyochika,0.6239368,anime
-Albert Anker,0.62389827,fineart
-Iain Faulkner,0.62376785,fineart
-Todd Schorr,0.6237408,fineart
-Charles Ginner,0.62370133,fineart
-Emile Auguste Carolus-Duran,0.62353987,fineart
-John Philip Falter,0.623418,cartoon
-Chizuko Yoshida,0.6233001,fareast
-Anna Dittmann,0.62327325,cartoon
-Henry Snell Gamley,0.62319934,fineart
-Edmund Charles Tarbell,0.6230626,fineart
-Rob Gonsalves,0.62298363,fineart
-Gladys Dawson,0.6228511,fineart
-Tomma Abts,0.61153626,fineart
-Kate Beaton,0.53993124,digipa-high-impact
-Gustave Buchet,0.62243867,fineart
-Gareth Pugh,0.6223551,digipa-high-impact
-Caspar van Wittel,0.57871693,fineart
-Anton Otto Fischer,0.6222941,fineart
-Albert Guillaume,0.56529653,fineart
-Felix Octavius Carr Darley,0.62223387,fineart
-Bernard van Orley,0.62221646,fineart
-Edward John Poynter,0.60147405,fineart
-Walter Percy Day,0.62207425,fineart
-Franciszek Starowieyski,0.5709621,fineart
-Auguste Baud-Bovy,0.6219854,fineart
-Chris LaBrooy,0.45497298,digipa-low-impact
-Abraham de Vries,0.5859101,fineart
-Antoni Gaudi,0.62162614,fineart
-Joe Jusko,0.62156093,digipa-high-impact
-Lynda Barry,0.62154603,digipa-high-impact
-Michal Karcz,0.62154436,digipa-high-impact
-Raymond Briggs,0.62150294,fineart
-Herbert James Gunn,0.6210927,fineart
-Dwight William Tryon,0.620984,fineart
-Paul Henry,0.5752968,fineart
-Helio Oiticica,0.6203739,digipa-high-impact
-Sebastian Errazuriz,0.62036186,digipa-high-impact
-Lucian Freud,0.6203146,nudity
-Frank Auerbach,0.6201102,weird
-Andre-Charles Boulle,0.6200789,fineart
-Franz Fedier,0.5669752,fineart
-Austin Briggs,0.57675314,fineart
-Hugo Sánchez Bonilla,0.61978436,digipa-high-impact
-Caroline Chariot-Dayez,0.6195682,digipa-high-impact
-Bill Ward,0.61953044,digipa-high-impact
-Charles Bird King,0.6194487,fineart
-Adrian Ghenie,0.6193521,digipa-high-impact
-Agnes Cecile,0.6192814,digipa-high-impact
-Augustus John,0.6191995,fineart
-Jeffrey T. Larson,0.61913544,fineart
-Alexis Simon Belle,0.3190395,digipa-low-impact
-Jean-Baptiste Monge,0.5758537,fineart
-Adolf Bierbrauer,0.56129396,fineart
-Ayako Rokkaku,0.61891204,fareast
-Lisa Keene,0.54570895,digipa-high-impact
-Edmond Aman-Jean,0.57168096,fineart
-Marc Davis,0.61837333,cartoon
-Cerith Wyn Evans,0.61829346,digipa-high-impact
-George Wyllie,0.61829203,fineart
-George Luks,0.6182724,fineart
-William-Adolphe Bouguereau,0.618265,c
-Grigoriy Myasoyedov,0.61801606,fineart
-Hashimoto Gahō,0.61795104,fineart
-Charles Ragland Bunnell,0.61772746,fineart
-Ambrose McCarthy Patterson,0.61764514,fineart
-Bill Brauer,0.5824066,fineart
-Mikko Lagerstedt,0.591015,digipa-high-impact
-Koson Ohara,0.53635323,fineart
-Evaristo Baschenis,0.5857368,fineart
-Martin Ansin,0.5294119,fineart
-Cory Loftis,0.6168619,cartoon
-Joseph Stella,0.6166778,fineart
-André Pijet,0.5768274,fineart
-Jeff Wall,0.6162895,digipa-high-impact
-Eleanor Layfield Davis,0.6158844,fineart
-Saul Tepper,0.61579347,fineart
-Alex Hirsch,0.6157384,cartoon
-Alexandre Falguière,0.55011404,fineart
-Malcolm Liepke,0.6155646,fineart
-Georg Friedrich Schmidt,0.60364646,fineart
-Hendrik Kerstens,0.55099905,digipa-high-impact
-Félix Bódog Widder,0.6153954,fineart
-Marie Guillemine Benoist,0.61532974,fineart
-Kelly Mckernan,0.60047054,digipa-high-impact
-Ignacio Zuloaga,0.6151608,fineart
-Hubert van Ravesteyn,0.61489964,fineart
-Angus McKie,0.61487424,digipa-high-impact
-Colin Campbell Cooper,0.6147882,fineart
-Pieter Aertsen,0.61454165,fineart
-Jan Brett,0.6144608,fineart
-Kazuo Koike,0.61438507,fineart
-Edith Grace Wheatley,0.61428297,fineart
-Ogawa Kazumasa,0.61427975,fareast
-Giovanni Battista Cipriani,0.6022825,fineart
-André Bauchant,0.57124996,fineart
-George Abe,0.6140447,digipa-high-impact
-Georges Lemmen,0.6139967,scribbles
-Frank Leonard Brooks,0.6139327,fineart
-Gai Qi,0.613744,anime
-Frank Gehry,0.6136776,digipa-high-impact
-Anton Domenico Gabbiani,0.55471313,fineart
-Cassandra Austen,0.6135781,fineart
-Paul Gustav Fischer,0.613273,fineart
-Emiliano Di Cavalcanti,0.6131207,fineart
-Meryl McMaster,0.6129995,digipa-high-impact
-Domenico di Pace Beccafumi,0.6129922,fineart
-Ludwig Mies van der Rohe,0.6126692,fineart
-Étienne-Louis Boullée,0.6126158,fineart
-Dali,0.5928694,nudity
-Shinji Aramaki,0.61246127,anime
-Giovanni Fattori,0.59544694,fineart
-Bapu,0.6122084,c
-Raphael Lacoste,0.5539114,digipa-high-impact
-Scarlett Hooft Graafland,0.6119631,digipa-high-impact
-Rene Laloux,0.61190474,fineart
-Julius Horsthuis,0.59037095,fineart
-Gerald van Honthorst,0.6115939,fineart
-Dino Valls,0.611533,fineart
-Tony DiTerlizzi,0.6114657,cartoon
-Michael Cheval,0.61138546,anime
-Charles Schulz,0.6113759,digipa-high-impact
-Alvar Aalto,0.61122143,digipa-high-impact
-Gu Kaizhi,0.6110798,fareast
-Eugene von Guerard,0.6109776,fineart
-John Cassaday,0.610949,fineart
-Elizabeth Forbes,0.61092335,fineart
-Edmund Greacen,0.6109115,fineart
-Eugène Burnand,0.6107876,fineart
-Boris Grigoriev,0.6107853,scribbles
-Norman Rockwell,0.6107638,fineart
-Barthélemy Menn,0.61064315,fineart
-George Biddle,0.61058354,fineart
-Edgar Ainsworth,0.5525424,digipa-high-impact
-Alfred Leyman,0.5887217,fineart
-Tex Avery,0.6104007,cartoon
-Beatrice Ethel Lithiby,0.61030364,fineart
-Grace Pailthorpe,0.61026484,digipa-high-impact
-Brian Oldham,0.396231,digipa-low-impact
-Android Jones,0.61023116,fareast
-François Girardon,0.5830649,fineart
-Ib Eisner,0.61016303,digipa-high-impact
-Armand Point,0.610156,fineart
-Henri Alphonse Barnoin,0.59465057,fineart
-Jean Marc Nattier,0.60987425,fineart
-Francisco de Holanda,0.6091294,fineart
-Marco Mazzoni,0.60970783,fineart
-Esaias Boursse,0.6093308,fineart
-Alexander Deyneka,0.55000365,fineart
-John Totleben,0.60883725,fineart
-Al Feldstein,0.6087723,fineart
-Adam Hughes,0.60854626,anime
-Ernest Zobole,0.6085073,fineart
-Alex Gross,0.60837066,digipa-high-impact
-George Jamesone,0.6079673,fineart
-Frank Lloyd Wright,0.60793245,scribbles
-Brooke DiDonato,0.47680336,digipa-med-impact
-Hans Gude,0.60780364,fineart
-Ethel Schwabacher,0.60748273,fineart
-Gladys Kathleen Bell,0.60747695,fineart
-Adolf Fényes,0.54192233,fineart
-Carel Willink,0.58120143,fineart
-George Henry,0.6070727,digipa-high-impact
-Ronald Balfour,0.60697085,fineart
-Elsie Dalton Hewland,0.6067718,digipa-high-impact
-Alex Maleev,0.6067118,fineart
-Anish Kapoor,0.6067015,digipa-high-impact
-Aleksandr Ivanovich Laktionov,0.606544,fineart
-Kim Keever,0.6037775,digipa-high-impact
-Aleksi Briclot,0.46056762,fineart
-Raymond Leech,0.6062721,fineart
-Richard Eurich,0.6062664,fineart
-Phil Jimenez,0.60625625,cartoon
-Gao Cen,0.60618126,nudity
-Mike Deodato,0.6061201,cartoon
-Charles Haslewood Shannon,0.6060581,fineart
-Alexandre Jacovleff,0.3991747,digipa-low-impact
-André Beauneveu,0.584062,fineart
-Hiroshi Honda,0.60507596,digipa-high-impact
-Charles Joshua Chaplin,0.60498774,fineart
-Domenico Zampieri,0.6049726,fineart
-Gusukuma Seihō,0.60479784,fareast
-Nikolina Petolas,0.46318632,digipa-low-impact
-Casey Weldon,0.6047672,cartoon
-Elmyr de Hory,0.6046374,fineart
-Nan Goldin,0.6046119,digipa-high-impact
-Charles McAuley,0.6045995,fineart
-Archibald Skirving,0.6044234,fineart
-Elizabeth York Brunton,0.6043737,fineart
-Dugald Sutherland MacColl,0.6042907,fineart
-Titian,0.60426414,fineart
-Ignacy Witkiewicz,0.6042259,fineart
-Allie Brosh,0.6042061,digipa-high-impact
-H.P. Lovecraft,0.6039597,digipa-high-impact
-Andrée Ruellan,0.60395086,fineart
-Ralph McQuarrie,0.60380936,fineart
-Mead Schaeffer,0.6036558,fineart
-Henri-Julien Dumont,0.571257,fineart
-Kieron Gillen,0.6035093,fineart
-Maginel Wright Enright Barney,0.6034306,nudity
-Vincent Di Fate,0.6034131,fineart
-Briton Rivière,0.6032918,fineart
-Hajime Sorayama,0.60325956,nudity
-Béla Czóbel,0.6031023,fineart
-Edmund Blampied,0.603072,fineart
-E. Simms Campbell,0.6030443,fineart
-Hisui Sugiura,0.603034,fareast
-Alan Davis,0.6029676,fineart
-Glen Keane,0.60287905,cartoon
-Frank Holl,0.6027312,fineart
-Abbott Fuller Graves,0.6025608,fineart
-Albert Servaes,0.60250103,black-white
-Hovsep Pushman,0.5937487,fineart
-Brian M. Viveros,0.60233414,fineart
-Charles Fremont Conner,0.6023278,fineart
-Francesco Furini,0.6022654,digipa-high-impact
-Camille-Pierre Pambu Bodo,0.60191673,fineart
-Yasushi Nirasawa,0.6016714,nudity
-Charles Uzzell-Edwards,0.6014683,fineart
-Abram Efimovich Arkhipov,0.60128385,fineart
-Hedda Sterne,0.6011857,digipa-high-impact
-Ben Aronson,0.6011548,fineart
-Frank Frazetta,0.551121,nudity
-Elizabeth Durack,0.6010842,fineart
-Ian Miller,0.42153555,fareast
-Charlie Bowater,0.4410439,special
-Michael Carson,0.60039437,fineart
-Walter Langley,0.6002273,fineart
-Cornelis Anthonisz,0.6001956,fineart
-Dorothy Elizabeth Bradford,0.6001929,fineart
-J.C. Leyendecker,0.5791972,fineart
-Willem van Haecht,0.59990716,fineart
-Anna and Elena Balbusso,0.59955937,digipa-low-impact
-Harrison Fisher,0.59952044,fineart
-Bill Medcalf,0.59950054,fineart
-Edward Arthur Walton,0.59945667,fineart
-Alois Arnegger,0.5991994,fineart
-Ray Caesar,0.59902894,digipa-high-impact
-Karen Wallis,0.5990094,fineart
-Emmanuel Shiu,0.51082766,digipa-med-impact
-Thomas Struth,0.5988324,digipa-high-impact
-Barbara Longhi,0.5985706,fineart
-Richard Deacon,0.59851056,fineart
-Constantin Hansen,0.5984213,fineart
-Harold Shapinsky,0.5984175,fineart
-George Dionysus Ehret,0.5983857,fineart
-Doug Wildey,0.5983639,digipa-high-impact
-Fernand Toussaint,0.5982694,fineart
-Horatio Nelson Poole,0.5982614,fineart
-Caesar van Everdingen,0.5981566,fineart
-Eva Gonzalès,0.5981396,fineart
-Franz Vohwinkel,0.5448179,fineart
-Margaret Mee,0.5979592,fineart
-Francis Focer Brown,0.59779185,fineart
-Henry Moore,0.59767926,nudity
-Scott Listfield,0.58795893,fineart
-Nikolai Ge,0.5973643,fineart
-Jacek Yerka,0.58198756,fineart
-Margaret Brundage,0.5969077,fineart
-JC Leyendecker,0.5620243,fineart
-Ben Templesmith,0.5498991,digipa-high-impact
-Armin Hansen,0.59669334,anime
-Jean-Louis Prevost,0.5966897,fineart
-Daphne Allen,0.59666026,fineart
-Franz Karl Basler-Kopp,0.59663445,fineart
-"Henry Ives Cobb, Jr.",0.596385,fineart
-Michael Sowa,0.546285,fineart
-Anna Füssli,0.59600973,fineart
-György Rózsahegyi,0.59580946,fineart
-Luis Royo,0.59566617,fineart
-Émile Gallé,0.5955559,fineart
-Antonio Mora,0.5334297,digipa-high-impact
-Edward P. Beard Jr.,0.59543866,fineart
-Jessica Rossier,0.54958373,special
-André Thomkins,0.5343785,digipa-high-impact
-David Macbeth Sutherland,0.5949968,fineart
-Charles Liu,0.5949787,digipa-high-impact
-Edi Rama,0.5949226,digipa-high-impact
-Jacques Le Moyne,0.5948843,fineart
-Egbert van der Poel,0.59488285,fineart
-Georg Jensen,0.594782,digipa-high-impact
-Anne Sudworth,0.5947539,fineart
-Jan Pietersz Saenredam,0.59472525,fineart
-Henryk Stażewski,0.5945748,fineart
-André François,0.58402044,fineart
-Alexander Runciman,0.5944449,digipa-high-impact
-Thomas Kinkade,0.594391,fineart
-Robert Williams,0.5567989,digipa-high-impact
-George Gardner Symons,0.57431924,fineart
-D. Alexander Gregory,0.5334464,fineart
-Gerald Brom,0.52473724,fineart
-Robert Hagan,0.59406,fineart
-Ernest Crichlow,0.5940588,fineart
-Viviane Sassen,0.5939927,digipa-high-impact
-Enrique Simonet,0.5937546,fineart
-Esther Blaikie MacKinnon,0.593747,digipa-high-impact
-Jeff Kinney,0.59372896,scribbles
-Igor Morski,0.5936732,digipa-high-impact
-John Currin,0.5936216,fineart
-Bob Ringwood,0.5935273,digipa-high-impact
-Jordan Grimmer,0.44948143,digipa-low-impact
-François Barraud,0.5933471,fineart
-Helen Binyon,0.59331006,digipa-high-impact
-Brenda Chamberlain,0.5932333,fineart
-Candido Bido,0.59310603,fineart
-Abraham Storck,0.5929502,fineart
-Raphael,0.59278333,fineart
-Larry Sultan,0.59273386,digipa-high-impact
-Agostino Tassi,0.59265685,fineart
-Alexander V. Kuprin,0.5925917,fineart
-Frans Koppelaar,0.5658725,fineart
-Richard Corben,0.59251785,fineart
-David Gilmour Blythe,0.5924247,digipa-high-impact
-František Kaván,0.5924211,fineart
-Rob Liefeld,0.5921167,fineart
-Ernő Rubik,0.5920297,fineart
-Byeon Sang-byeok,0.59200096,fareast
-Johfra Bosschart,0.5919376,fineart
-Emil Lindenfeld,0.5761086,fineart
-Howard Mehring,0.5917471,fineart
-Gwenda Morgan,0.5915571,digipa-high-impact
-Henry Asencio,0.5915404,fineart
-"George Barret, Sr.",0.5914306,fineart
-Andrew Ferez,0.5911011,fineart
-Ed Brubaker,0.5910869,digipa-high-impact
-George Reid,0.59095883,digipa-high-impact
-Derek Gores,0.51769906,digipa-med-impact
-Charles Rollier,0.5539186,fineart
-Terry Oakes,0.590443,fineart
-Thomas Blackshear,0.5078616,fineart
-Albert Benois,0.5902705,nudity
-Krenz Cushart,0.59026587,special
-Jeff Koons,0.5902637,digipa-high-impact
-Akihiko Yoshida,0.5901294,special
-Anja Percival,0.45039332,digipa-low-impact
-Eduard von Steinle,0.59008586,fineart
-Alex Russell Flint,0.5900352,digipa-high-impact
-Edward Okuń,0.5897297,fineart
-Emma Lampert Cooper,0.5894849,fineart
-Stuart Haygarth,0.58132994,digipa-high-impact
-George French Angas,0.5434376,fineart
-Edmund F. Ward,0.5892848,fineart
-Eleanor Vere Boyle,0.58925456,digipa-high-impact
-Evelyn Cheston,0.58924586,fineart
-Edwin Dickinson,0.58921975,digipa-high-impact
-Christophe Vacher,0.47325426,fineart
-Anne Dewailly,0.58905107,fineart
-Gertrude Greene,0.5862596,digipa-high-impact
-Boris Groh,0.5888809,digipa-high-impact
-Douglas Smith,0.588804,digipa-high-impact
-Ian Hamilton Finlay,0.5887713,fineart
-Derek Jarman,0.5887292,digipa-high-impact
-Archibald Thorburn,0.5882001,fineart
-Gillis d'Hondecoeter,0.58813053,fineart
-I Ketut Soki,0.58801544,digipa-high-impact
-Alex Schomburg,0.46614102,digipa-low-impact
-Bastien L. Deharme,0.583349,special
-František Jakub Prokyš,0.58782333,fineart
-Jesper Ejsing,0.58782053,fineart
-Odd Nerdrum,0.53551745,digipa-high-impact
-Tom Lovell,0.5877577,fineart
-Ayami Kojima,0.5877416,fineart
-Peter Sculthorpe,0.5875696,fineart
-Bernard D’Andrea,0.5874042,fineart
-Denis Eden,0.58739066,digipa-high-impact
-Alfons Walde,0.58728385,fineart
-Jovana Rikalo,0.47006977,digipa-low-impact
-Franklin Booth,0.5870834,fineart
-Mat Collishaw,0.5870676,digipa-high-impact
-Joseph Lorusso,0.586858,fineart
-Helen Stevenson,0.454647,digipa-low-impact
-Delaunay,0.58657396,fineart
-H.R. Millar,0.58655745,fineart
-E. Charlton Fortune,0.586376,fineart
-Alson Skinner Clark,0.58631575,fineart
-Stan And Jan Berenstain,0.5862361,digipa-high-impact
-Howard Lyon,0.5862271,fineart
-John Blanche,0.586182,fineart
-Bernardo Cavallino,0.5858575,fineart
-Tomasz Alen Kopera,0.5216588,fineart
-Peter Gric,0.58583695,fineart
-Guo Pei,0.5857794,fareast
-James Turrell,0.5853901,digipa-high-impact
-Alexandr Averin,0.58533764,fineart
-Bertalan Székely,0.5548113,digipa-high-impact
-Brothers Hildebrandt,0.5850233,fineart
-Ed Roth,0.5849769,digipa-high-impact
-Enki Bilal,0.58492255,fineart
-Alan Lee,0.5848701,fineart
-Charles H. Woodbury,0.5848688,fineart
-André Charles Biéler,0.5847876,fineart
-Annie Rose Laing,0.5597829,fineart
-Matt Fraction,0.58463776,cartoon
-Charles Alston,0.58453286,fineart
-Frank Xavier Leyendecker,0.545465,fineart
-Alfred Richard Gurrey,0.584306,fineart
-Dan Mumford,0.5843051,cartoon
-Francisco Martín,0.5842005,fineart
-Alvaro Siza,0.58406967,digipa-high-impact
-Frank J. Girardin,0.5839858,fineart
-Henry Carr,0.58397424,digipa-high-impact
-Charles Furneaux,0.58394694,fineart
-Daniel F. Gerhartz,0.58389103,fineart
-Gilberto Soren Zaragoza,0.5448442,fineart
-Bart Sears,0.5838427,cartoon
-Allison Bechdel,0.58383805,digipa-high-impact
-Frank O'Meara,0.5837992,fineart
-Charles Codman,0.5836579,fineart
-Francisco Zúñiga,0.58359766,fineart
-Vladimir Kush,0.49075457,fineart
-Arnold Mesches,0.5834257,fineart
-Frank McKelvey,0.5831641,fineart
-Allen Butler Talcott,0.5830911,fineart
-Eric Zener,0.58300316,fineart
-Noah Bradley,0.44176096,digipa-low-impact
-Robert Childress,0.58289623,fineart
-Frances C. Fairman,0.5827239,fineart
-Kathryn Morris Trotter,0.465856,digipa-low-impact
-Everett Raymond Kinstler,0.5824819,fineart
-Edward Mitchell Bannister,0.5804899,fineart
-"George Barret, Jr.",0.5823128,fineart
-Greg Hildebrandt,0.4271311,fineart
-Anka Zhuravleva,0.5822078,digipa-high-impact
-Rolf Armstrong,0.58217514,fineart
-Eric Wallis,0.58191466,fineart
-Clemens Ascher,0.5480207,digipa-high-impact
-Hugo Kārlis Grotuss,0.5818766,fineart
-Albert Paris Gütersloh,0.5817827,fineart
-Hilda May Gordon,0.5817449,fineart
-Hendrik Martenszoon Sorgh,0.5817126,fineart
-Pipilotti Rist,0.5816868,digipa-high-impact
-Hiroyuki Tajima,0.5816242,fareast
-Igor Zenin,0.58159757,digipa-high-impact
-Genevieve Springston Lynch,0.4979099,digipa-med-impact
-Dan Witz,0.44476372,fineart
-David Roberts,0.5255326,fineart
-Frieke Janssens,0.5706969,digipa-high-impact
-Arnold Schoenberg,0.56520367,fineart
-Inoue Naohisa,0.5809933,fareast
-Elfriede Lohse-Wächtler,0.58097905,fineart
-Alex Ross,0.42460668,digipa-low-impact
-Robert Irwin,0.58078,c
-Charles Angrand,0.58077514,fineart
-Anne Nasmyth,0.54221964,fineart
-Henri Bellechose,0.5773891,fineart
-De Hirsh Margules,0.58059025,fineart
-Hiromitsu Takahashi,0.5805599,fareast
-Ilya Kuvshinov,0.5805521,special
-Cassius Marcellus Coolidge,0.5805516,c
-Dorothy Burroughes,0.5804835,fineart
-Emanuel de Witte,0.58027405,fineart
-George Herbert Baker,0.5799624,digipa-high-impact
-Cheng Zhengkui,0.57990086,fareast
-Bernard Fleetwood-Walker,0.57987773,digipa-high-impact
-Philippe Parreno,0.57985014,digipa-high-impact
-Thornton Oakley,0.57969713,fineart
-Greg Rutkowski,0.5203395,special
-Ike no Taiga,0.5795857,anime
-Eduardo Lefebvre Scovell,0.5795808,fineart
-Adolfo Müller-Ury,0.57944727,fineart
-Patrick Woodroffe,0.5228063,fineart
-Wim Crouwel,0.57933235,digipa-high-impact
-Colijn de Coter,0.5792779,fineart
-François Boquet,0.57924724,fineart
-Gerbrand van den Eeckhout,0.57897866,fineart
-Eugenio Granell,0.5392264,fineart
-Kuang Hong,0.5782304,digipa-high-impact
-Justin Gerard,0.46685404,fineart
-Tokujin Yoshioka,0.5779153,digipa-high-impact
-Alan Bean,0.57788515,fineart
-Ernest Biéler,0.5778079,fineart
-Martin Deschambault,0.44401115,digipa-low-impact
-Anna Boch,0.577735,fineart
-Jack Davis,0.5775291,fineart
-Félix Labisse,0.5775142,fineart
-Greg Simkins,0.5679761,fineart
-David Lynch,0.57751054,digipa-low-impact
-Eizō Katō,0.5774127,digipa-high-impact
-Grethe Jürgens,0.5773412,digipa-high-impact
-Heinrich Bichler,0.5770147,fineart
-Barbara Nasmyth,0.5446056,fineart
-Domenico Induno,0.5583946,fineart
-Gustave Baumann,0.5607866,fineart
-Mike Mayhew,0.5765857,cartoon
-Delmer J. Yoakum,0.576538,fineart
-Aykut Aydogdu,0.43111503,digipa-low-impact
-George Barker,0.5763551,fineart
-Ernő Grünbaum,0.57634187,fineart
-Eliseu Visconti,0.5763241,fineart
-Esao Andrews,0.5761547,fineart
-JennyBird Alcantara,0.49165845,digipa-med-impact
-Joan Tuset,0.5761051,fineart
-Angela Barrett,0.55976534,digipa-high-impact
-Syd Mead,0.5758396,fineart
-Ignacio Bazan-Lazcano,0.5757512,fineart
-Franciszek Kostrzewski,0.57570386,fineart
-Eero Järnefelt,0.57540673,fineart
-Loretta Lux,0.56217635,digipa-high-impact
-Gaudi,0.57519895,fineart
-Charles Gleyre,0.57490873,fineart
-Antoine Verney-Carron,0.56386137,fineart
-Albert Edelfelt,0.57466495,fineart
-Fabian Perez,0.57444525,fineart
-Kevin Sloan,0.5737548,fineart
-Stanislav Poltavsky,0.57434607,fineart
-Abraham Hondius,0.574326,fineart
-Tadao Ando,0.57429105,fareast
-Fyodor Slavyansky,0.49796474,digipa-med-impact
-David Brewster,0.57385933,digipa-high-impact
-Cliff Chiang,0.57375133,digipa-high-impact
-Drew Struzan,0.5317983,digipa-high-impact
-Henry O. Tanner,0.5736586,fineart
-Alberto Sughi,0.5736495,fineart
-Albert J. Welti,0.5736257,fineart
-Charles Mahoney,0.5735923,digipa-high-impact
-Exekias,0.5734506,fineart
-Felipe Seade,0.57342744,digipa-high-impact
-Henriette Wyeth,0.57330644,digipa-high-impact
-Harold Sandys Williamson,0.5443646,fineart
-Eddie Campbell,0.57329535,digipa-high-impact
-Gao Fenghan,0.5732926,fareast
-Cynthia Sheppard,0.51099646,fineart
-Henriette Grindat,0.573179,fineart
-Yasutomo Oka,0.5731342,fareast
-Celia Frances Bedford,0.57313216,fineart
-Les Edwards,0.42068473,fineart
-Edwin Deakin,0.5031717,fineart
-Eero Saarinen,0.5725142,digipa-high-impact
-Franciszek Smuglewicz,0.5722554,fineart
-Doris Blair,0.57221186,fineart
-Seb Mckinnon,0.51721895,digipa-med-impact
-Gregorio Lazzarini,0.57204294,fineart
-Gerard Sekoto,0.5719927,fineart
-Francis Ernest Jackson,0.5506009,fineart
-Simon Birch,0.57171595,digipa-high-impact
-Bayard Wu,0.57171166,fineart
-François Clouet,0.57162094,fineart
-Christopher Wren,0.5715372,fineart
-Evgeny Lushpin,0.5714827,special
-Art Green,0.5714495,digipa-high-impact
-Amy Judd,0.57142305,digipa-high-impact
-Art Brenner,0.42619684,digipa-low-impact
-Travis Louie,0.43916368,digipa-low-impact
-James Jean,0.5457318,digipa-high-impact
-Ewald Rübsamen,0.57083976,fineart
-Donato Giancola,0.57052535,fineart
-Carl Arnold Gonzenbach,0.5703996,fineart
-Bastien Lecouffe-Deharme,0.5201288,fineart
-Howard Chandler Christy,0.5702813,nudity
-Dean Cornwell,0.56977296,fineart
-Don Maitz,0.4743015,fineart
-James Montgomery Flagg,0.56974065,fineart
-Andreas Levers,0.42125136,digipa-low-impact
-Edgar Schofield Baum,0.56965977,fineart
-Alan Parry,0.5694952,digipa-high-impact
-An Zhengwen,0.56942475,fareast
-Alayna Lemmer,0.48293802,fineart
-Edward Marshall Boehm,0.5530143,fineart
-Henri Biva,0.54013556,nudity
-Fiona Rae,0.4646715,digipa-low-impact
-Elizabeth Jane Lloyd,0.5688463,digipa-high-impact
-Franklin Carmichael,0.5687844,digipa-high-impact
-Dionisius,0.56875896,fineart
-Edwin Georgi,0.56868523,fineart
-Jenny Saville,0.5686633,fineart
-Ernest Hébert,0.56859314,fineart
-Stephan Martiniere,0.56856346,digipa-high-impact
-Huang Binhong,0.56841767,fineart
-August Lemmer,0.5683548,fineart
-Camille Bouvagne,0.5678048,fineart
-Olga Skomorokhova,0.39401102,digipa-low-impact
-Sacha Goldberger,0.5675477,digipa-high-impact
-Hilda Annetta Walker,0.5675261,digipa-high-impact
-Harvey Pratt,0.51314723,digipa-med-impact
-Jean Bourdichon,0.5670543,fineart
-Noriyoshi Ohrai,0.56690073,fineart
-Kadir Nelson,0.5669006,n
-Ilya Ostroukhov,0.5668801,fineart
-Eugène Brands,0.56681967,fineart
-Achille Leonardi,0.56674325,fineart
-Franz Cižek,0.56670356,fineart
-George Paul Chalmers,0.5665988,digipa-high-impact
-Serge Marshennikov,0.5665971,digipa-high-impact
-Mike Worrall,0.56641084,fineart
-Dirck van Delen,0.5661764,fineart
-Peter Andrew Jones,0.5661655,fineart
-Rafael Albuquerque,0.56541103,fineart
-Daniel Buren,0.5654043,fineart
-Giuseppe Grisoni,0.5432699,fineart
-George Fiddes Watt,0.55861616,fineart
-Stan Lee,0.5651268,digipa-high-impact
-Dorning Rasbotham,0.56511617,fineart
-Albert Lynch,0.56497896,fineart
-Lorenz Hideyoshi,0.56494075,fineart
-Fenghua Zhong,0.56492203,fareast
-Caroline Lucy Scott,0.49190843,digipa-med-impact
-Victoria Crowe,0.5647996,digipa-high-impact
-Hasegawa Settan,0.5647092,fareast
-Dennis H. Farber,0.56453323,digipa-high-impact
-Dick Bickenbach,0.5644289,fineart
-Art Frahm,0.56439924,fineart
-Edith Edmonds,0.5643151,fineart
-Alfred Heber Hutty,0.56419206,fineart
-Henry Tonks,0.56410825,fineart
-Peter Howson,0.5640759,fineart
-Albert Dorne,0.56395364,fineart
-Arthur Adams,0.5639404,fineart
-Bernt Tunold,0.56383425,digipa-high-impact
-Gianluca Foli,0.5637317,digipa-high-impact
-Vittorio Matteo Corcos,0.5636767,fineart
-Béla Iványi-Grünwald,0.56355745,nudity
-Feng Zhu,0.5634973,fineart
-Sam Kieth,0.47251505,digipa-low-impact
-Charles Crodel,0.5633834,fineart
-Elsie Henderson,0.56310076,digipa-high-impact
-George Earl Ortman,0.56295705,fineart
-Tari Márk Dávid,0.562937,fineart
-Betty Merken,0.56281745,digipa-high-impact
-Cecile Walton,0.46672013,digipa-low-impact
-Bracha L. Ettinger,0.56237936,fineart
-Ken Fairclough,0.56230986,digipa-high-impact
-Phil Koch,0.56224954,digipa-high-impact
-George Pirie,0.56213045,digipa-high-impact
-Chad Knight,0.56194013,digipa-high-impact
-Béla Kondor,0.5427164,digipa-high-impact
-Barclay Shaw,0.53689134,digipa-high-impact
-Tim Hildebrandt,0.47194147,fineart
-Hermann Rüdisühli,0.56104004,digipa-high-impact
-Ian McQue,0.5342066,digipa-high-impact
-Yanjun Cheng,0.5607171,fineart
-Heinrich Hofmann,0.56060636,fineart
-Henry Raleigh,0.5605958,fineart
-Ernest Buckmaster,0.5605704,fineart
-Charles Ricketts,0.56055415,fineart
-Juergen Teller,0.56051147,digipa-high-impact
-Auguste Mambour,0.5604873,fineart
-Sean Yoro,0.5601486,digipa-high-impact
-Sheilah Beckett,0.55995446,digipa-high-impact
-Eugene Tertychnyi,0.5598978,fineart
-Dr. Seuss,0.5597466,c
-Adolf Wölfli,0.5372333,digipa-high-impact
-Enrique Tábara,0.559323,fineart
-Dionisio Baixeras Verdaguer,0.5590695,fineart
-Aleksander Gierymski,0.5590013,fineart
-Augustus Dunbier,0.55872476,fineart
-Adolf Born,0.55848217,fineart
-Chris Turnham,0.5584234,digipa-high-impact
-James C Christensen,0.55837405,fineart
-Daphne Fedarb,0.5582459,digipa-high-impact
-Andre Kohn,0.5581832,special
-Ron Mueck,0.5581811,nudity
-Glenn Fabry,0.55786383,fineart
-Elizabeth Polunin,0.5578102,digipa-high-impact
-Charles S. Kaelin,0.5577954,fineart
-Arthur Radebaugh,0.5577016,fineart
-Ai Yazawa,0.55768114,fareast
-Charles Roka,0.55762553,fineart
-Ai Weiwei,0.5576034,digipa-high-impact
-Dorothy Bradford,0.55760014,digipa-high-impact
-Alfred Leslie,0.557555,fineart
-Heinrich Herzig,0.5574423,fineart
-Eliot Hodgkin,0.55740607,digipa-high-impact
-Albert Kotin,0.55737317,fineart
-Carlo Carlone,0.55729353,fineart
-Chen Rong,0.5571221,fineart
-Ikuo Hirayama,0.5570225,digipa-high-impact
-Edward Corbett,0.55701995,nudity
-Eugeniusz Żak,0.556925,nudity
-Ettore Tito,0.556875,fineart
-Helene Knoop,0.5567731,fineart
-Amanda Sage,0.37731662,fareast
-Annick Bouvattier,0.54647046,fineart
-Harvey Dunn,0.55663586,fineart
-Hans Sandreuter,0.5562575,digipa-high-impact
-Ruan Jia,0.5398549,special
-Anton Räderscheidt,0.55618906,fineart
-Tyler Shields,0.4081434,digipa-low-impact
-Darek Zabrocki,0.49975997,digipa-med-impact
-Frank Montague Moore,0.5556432,fineart
-Greg Staples,0.5555332,fineart
-Endre Bálint,0.5553731,fineart
-Augustus Vincent Tack,0.5136602,fineart
-Marc Simonetti,0.48602036,fineart
-Carlo Randanini,0.55493265,digipa-high-impact
-Diego Dayer,0.5549119,fineart
-Kelly Freas,0.55476534,fineart
-Thomas Saliot,0.5139967,digipa-med-impact
-Gijsbert d'Hondecoeter,0.55455256,fineart
-Walter Kim,0.554521,digipa-high-impact
-Francesco Cozza,0.5155097,digipa-med-impact
-Bill Watterson,0.5542879,digipa-high-impact
-Mark Keathley,0.4824056,fineart
-Béni Ferenczy,0.55405354,digipa-high-impact
-Amadou Opa Bathily,0.5536976,n
-Giuseppe Antonio Petrini,0.55340284,fineart
-Enzo Cucchi,0.55331933,digipa-high-impact
-Adolf Schrödter,0.55316544,fineart
-George Benjamin Luks,0.548566,fineart
-Glenys Cour,0.55304,digipa-high-impact
-Andrew Robertson,0.5529603,digipa-high-impact
-Claude Rogers,0.55272067,digipa-high-impact
-Alexandre Antigna,0.5526737,fineart
-Aimé Barraud,0.55265915,digipa-high-impact
-György Vastagh,0.55258965,fineart
-Bruce Nauman,0.55257386,digipa-high-impact
-Benjamin Block,0.55251944,digipa-high-impact
-Gonzalo Endara Crow,0.552346,digipa-high-impact
-Dirck de Bray,0.55221736,fineart
-Gerald Kelley,0.5521059,digipa-high-impact
-Dave Gibbons,0.5520954,digipa-high-impact
-Béla Nagy Abodi,0.5520624,digipa-high-impact
-Faith 47,0.5517006,digipa-high-impact
-Anna Razumovskaya,0.5229187,digipa-med-impact
-Archibald Robertson,0.55129635,digipa-high-impact
-Louise Dahl-Wolfe,0.55120385,digipa-high-impact
-Simon Bisley,0.55119276,digipa-high-impact
-Eric Fischl,0.55107886,fineart
-Hu Zaobin,0.5510481,fareast
-Béla Pállik,0.5507963,digipa-high-impact
-Eugene J. Martin,0.55078864,fineart
-Friedrich Gauermann,0.55063415,fineart
-Fritz Baumann,0.5341434,fineart
-Michal Lisowski,0.5505639,fineart
-Paolo Roversi,0.5503342,digipa-high-impact
-Andrew Atroshenko,0.55009747,fineart
-Gyula Derkovits,0.5500315,fineart
-Hugh Adam Crawford,0.55000615,digipa-high-impact
-Béla Apáti Abkarovics,0.5499799,digipa-high-impact
-Paul Chadeisson,0.389151,digipa-low-impact
-Aurél Bernáth,0.54968774,fineart
-Albert Henry Krehbiel,0.54952574,fineart
-Piet Hein Eek,0.54918796,digipa-high-impact
-Yoshitaka Amano,0.5491855,fareast
-Antonio Rotta,0.54909515,fineart
-Józef Mehoffer,0.50760424,fineart
-Donald Sherwood,0.5490415,digipa-high-impact
-Catrin G Grosse,0.5489286,digipa-high-impact
-Arthur Webster Emerson,0.5478842,fineart
-Incarcerated Jerkfaces,0.5488423,digipa-high-impact
-Emanuel Büchel,0.5487217,fineart
-Andrew Loomis,0.54854584,fineart
-Charles Hopkinson,0.54853606,fineart
-Gabor Szikszai,0.5485203,digipa-high-impact
-Archibald Standish Hartrick,0.54850936,digipa-high-impact
-Aleksander Orłowski,0.546705,nudity
-Hans Hinterreiter,0.5483628,fineart
-Fred Williams,0.54544824,fineart
-Fred A. Precht,0.5481606,fineart
-Camille Souter,0.5213742,fineart
-Emil Fuchs,0.54807395,fineart
-Francesco Bonsignori,0.5478936,fineart
-H. R. (Hans Ruedi) Giger,0.547799,fineart
-Harriet Zeitlin,0.5477388,digipa-high-impact
-Christian Jane Fergusson,0.5396168,fineart
-Edward Kemble,0.5476892,fineart
-Bernard Aubertin,0.5475396,fineart
-Augustyn Mirys,0.5474162,fineart
-Alejandro Burdisio,0.47482288,special
-Erin Hanson,0.4343264,digipa-low-impact
-Amalia Lindegren,0.5471987,digipa-high-impact
-Alberto Seveso,0.47735062,fineart
-Bartholomeus Strobel,0.54703736,fineart
-Jim Davis,0.54703003,digipa-high-impact
-Antony Gormley,0.54696125,digipa-high-impact
-Charles Marion Russell,0.54696095,fineart
-George B. Sutherland,0.5467901,fineart
-Almada Negreiros,0.54670584,fineart
-Edward Armitage,0.54358315,fineart
-Bruno Walpoth,0.546167,digipa-high-impact
-Richard Hamilton,0.5461275,nudity
-Charles Harold Davis,0.5460415,digipa-high-impact
-Fernand Verhaegen,0.54601514,fineart
-Bernard Meninsky,0.5302034,digipa-high-impact
-Fede Galizia,0.5456873,digipa-high-impact
-Alfred Kelsner,0.5455753,nudity
-Fritz Puempin,0.5452847,fineart
-Alfred Charles Parker,0.54521024,fineart
-Ahmed Yacoubi,0.544767,digipa-high-impact
-Arthur B. Carles,0.54447794,fineart
-Alice Prin,0.54435575,digipa-high-impact
-Carl Gustaf Pilo,0.5443212,digipa-high-impact
-Ross Tran,0.5259248,special
-Hideyuki Kikuchi,0.544193,fareast
-Art Fitzpatrick,0.49847245,fineart
-Cherryl Fountain,0.5440454,fineart
-Skottie Young,0.5440119,cartoon
-NC Wyeth,0.54382974,digipa-high-impact
-Rudolf Freund,0.5437342,fineart
-Mort Kunstler,0.5433619,digipa-high-impact
-Ben Goossens,0.53002644,digipa-high-impact
-Andreas Rocha,0.49621177,special
-Gérard Ernest Schneider,0.5429964,fineart
-Francesco Filippini,0.5429598,digipa-high-impact
-Alejandro Jodorowsky,0.5429065,digipa-high-impact
-Friedrich Traffelet,0.5428817,fineart
-Honor C. Appleton,0.5428735,digipa-high-impact
-Jason A. Engle,0.542821,fineart
-Henry Otto Wix,0.54271996,fineart
-Gregory Manchess,0.54270375,fineart
-Ann Stookey,0.54269934,digipa-high-impact
-Henryk Rodakowski,0.542589,fineart
-Albert Welti,0.5425134,digipa-high-impact
-Gerard Houckgeest,0.5424413,digipa-high-impact
-Dorothy Hood,0.54226196,digipa-high-impact
-Frank Schoonover,0.51056194,fineart
-Erlund Hudson,0.5422107,digipa-high-impact
-Alexander Litovchenko,0.54210097,fineart
-Sakai Hōitsu,0.5420294,digipa-high-impact
-Benito Quinquela Martín,0.54194224,fineart
-David Watson Stevenson,0.54191554,fineart
-Ann Thetis Blacker,0.5416629,digipa-high-impact
-Frank DuMond,0.51004076,digipa-med-impact
-David Dougal Williams,0.5410126,digipa-high-impact
-Robert Mcginnis,0.54098356,fineart
-Ernest Briggs,0.5408636,fineart
-Ferenc Joachim,0.5408625,fineart
-Carlos Saenz de Tejada,0.47332364,digipa-low-impact
-David Burton-Richardson,0.49659324,digipa-med-impact
-Ernest Heber Thompson,0.54039246,digipa-high-impact
-Albert Bertelsen,0.54038215,nudity
-Giorgio Giulio Clovio,0.5403708,fineart
-Eugene Leroy,0.54019785,digipa-high-impact
-Anna Findlay,0.54018176,digipa-high-impact
-Roy Gjertson,0.54012,digipa-high-impact
-Charmion von Wiegand,0.5400893,fineart
-Arnold Bronckhorst,0.526247,fineart
-Boris Vallejo,0.487253,fineart
-Adélaïde Victoire Hall,0.539939,fineart
-Earl Norem,0.5398575,fineart
-Sanford Kossin,0.53977877,digipa-high-impact
-Aert de Gelder,0.519166,digipa-med-impact
-Carl Eugen Keel,0.539739,digipa-high-impact
-Francis Bourgeois,0.5397272,digipa-high-impact
-Bojan Jevtic,0.41141546,fineart
-Edward Avedisian,0.5393925,fineart
-Gao Xiang,0.5392419,fareast
-Charles Hinman,0.53911865,digipa-high-impact
-Frits Van den Berghe,0.53896487,fineart
-Carlo Martini,0.5384833,digipa-high-impact
-Elina Karimova,0.5384318,digipa-high-impact
-Anto Carte,0.4708289,digipa-low-impact
-Andrey Yefimovich Martynov,0.537721,fineart
-Frances Jetter,0.5376904,fineart
-Yuri Ivanovich Pimenov,0.5342793,fineart
-Gaston Anglade,0.537608,digipa-high-impact
-Albert Swinden,0.5375844,fineart
-Bob Byerley,0.5375774,fineart
-A.B. Frost,0.5375025,fineart
-Jaya Suberg,0.5372893,digipa-high-impact
-Josh Keyes,0.53654516,digipa-high-impact
-Juliana Huxtable,0.5364195,n
-Everett Warner,0.53641814,digipa-high-impact
-Hugh Kretschmer,0.45171157,digipa-low-impact
-Arnold Blanch,0.535774,fineart
-Ryan McGinley,0.53572595,digipa-high-impact
-Alfons Karpiński,0.53564656,fineart
-George Aleef,0.5355317,digipa-high-impact
-Hal Foster,0.5351446,fineart
-Stuart Immonen,0.53501946,digipa-high-impact
-Craig Thompson,0.5346844,digipa-high-impact
-Bartolomeo Vivarini,0.53465015,fineart
-Hermann Feierabend,0.5346168,digipa-high-impact
-Antonio Donghi,0.4610982,digipa-low-impact
-Adonna Khare,0.4858036,digipa-med-impact
-James Stokoe,0.5015107,digipa-med-impact
-Agustín Fernández,0.53403986,fineart
-Germán Londoño,0.5338712,fineart
-Emmanuelle Moureaux,0.5335641,digipa-high-impact
-Conrad Marca-Relli,0.5148334,digipa-med-impact
-Gyula Batthyány,0.5332407,fineart
-Francesco Raibolini,0.53314835,fineart
-Apelles,0.5166026,fineart
-Marat Latypov,0.45811993,fineart
-Andrei Markin,0.5328752,fineart
-Einar Hakonarson,0.5328311,digipa-high-impact
-Beatrice Huntington,0.5328165,digipa-high-impact
-Coppo di Marcovaldo,0.5327443,fineart
-Gregorio Prestopino,0.53250784,fineart
-A.D.M. Cooper,0.53244877,digipa-high-impact
-Horatio McCulloch,0.53244334,digipa-high-impact
-Wes Anderson,0.5318741,digipa-high-impact
-Moebius,0.53178746,digipa-high-impact
-Gerard Soest,0.53160626,fineart
-Charles Ellison,0.53152347,digipa-high-impact
-Wojciech Ostrycharz,0.5314213,fineart
-Doug Chiang,0.5313724,fineart
-Anne Savage,0.5310638,digipa-high-impact
-Cor Melchers,0.53099334,fineart
-Gordon Browne,0.5308195,digipa-high-impact
-Augustus Earle,0.49196815,fineart
-Carlos Francisco Chang Marín,0.5304734,fineart
-Larry Elmore,0.53032553,fineart
-Adolf Hölzel,0.5303149,fineart
-David Ligare,0.5301894,fineart
-Jan Luyken,0.52985555,fineart
-Earle Bergey,0.5298525,fineart
-David Ramsay Hay,0.52974963,digipa-high-impact
-Alfred East,0.5296565,digipa-high-impact
-A. R. Middleton Todd,0.50988734,fineart
-Giorgio De Vincenzi,0.5291678,fineart
-Hugh William Williams,0.5291014,digipa-high-impact
-Erwin Bowien,0.52895796,digipa-high-impact
-Victor Adame Minguez,0.5288686,fineart
-Yoji Shinkawa,0.5287015,anime
-Clara Weaver Parrish,0.5284487,digipa-high-impact
-Albert Eckhout,0.5284096,fineart
-Dorothy Coke,0.5282345,digipa-high-impact
-Jerzy Duda-Gracz,0.5279943,digipa-high-impact
-Byron Galvez,0.39178842,fareast
-Alson S. Clark,0.5278568,digipa-high-impact
-Adolf Ulric Wertmüller,0.5278296,digipa-high-impact
-Bruce Coville,0.5277226,digipa-high-impact
-Gong Kai,0.5276811,digipa-high-impact
-Andréi Arinouchkine,0.52763486,digipa-high-impact
-Florence Engelbach,0.5273161,digipa-high-impact
-Brian Froud,0.5270276,fineart
-Charles Thomson,0.5270127,digipa-high-impact
-Bessie Wheeler,0.5269164,digipa-high-impact
-Anton Lehmden,0.5268611,fineart
-Emilia Wilk,0.5264961,fineart
-Carl Eytel,0.52646196,digipa-high-impact
-Alfred Janes,0.5264481,digipa-high-impact
-Julie Bell,0.49962538,fineart
-Eugenio de Arriba,0.52613926,digipa-high-impact
-Samuel and Joseph Newsom,0.52595663,digipa-high-impact
-Hans Falk,0.52588874,digipa-high-impact
-Guillermo del Toro,0.52565175,digipa-high-impact
-Félix Arauz,0.52555984,digipa-high-impact
-Gyula Basch,0.52524436,digipa-high-impact
-Haroon Mirza,0.5252279,digipa-high-impact
-Du Jin,0.5249934,digipa-med-impact
-Harry Shoulberg,0.5249456,digipa-med-impact
-Arie Smit,0.5249027,fineart
-Ahmed Karahisari,0.4259451,digipa-low-impact
-Brian and Wendy Froud,0.5246335,fineart
-E. William Gollings,0.52461207,digipa-med-impact
-Bo Bartlett,0.51341593,digipa-med-impact
-Hans Burgkmair,0.52416867,digipa-med-impact
-David Macaulay,0.5241233,digipa-med-impact
-Benedetto Caliari,0.52370214,digipa-med-impact
-Eliott Lilly,0.5235398,digipa-med-impact
-Vincent Tanguay,0.48578292,digipa-med-impact
-Ada Hill Walker,0.52207166,fineart
-Christopher Wood,0.49360397,digipa-med-impact
-Kris Kuksi,0.43938053,digipa-low-impact
-Chen Yifei,0.5217867,fineart
-Margaux Valonia,0.5217782,digipa-med-impact
-Antoni Pitxot,0.40582713,digipa-low-impact
-Jhonen Vasquez,0.5216471,digipa-med-impact
-Emilio Grau Sala,0.52156484,fineart
-Henry B. Christian,0.52153796,fineart
-Jacques Nathan-Garamond,0.52144086,digipa-med-impact
-Eddie Mendoza,0.4949638,digipa-med-impact
-Grzegorz Rutkowski,0.48906532,special
-Beeple,0.40085253,digipa-low-impact
-Giorgio Cavallon,0.5209209,digipa-med-impact
-Godfrey Blow,0.52062386,digipa-med-impact
-Gabriel Dawe,0.5204431,fineart
-Emile Lahner,0.5202367,digipa-med-impact
-Steve Dillon,0.5201676,digipa-med-impact
-Lee Quinones,0.4626683,digipa-low-impact
-Hale Woodruff,0.52000225,digipa-med-impact
-Tom Hammick,0.5032626,digipa-med-impact
-Hamilton Sloan,0.5197798,digipa-med-impact
-Caesar Andrade Faini,0.51971483,digipa-med-impact
-Sam Spratt,0.48991,digipa-med-impact
-Chris Cold,0.4753577,fineart
-Alejandro Obregón,0.5190562,digipa-med-impact
-Dan Flavin,0.51901346,digipa-med-impact
-Arthur Sarnoff,0.5189428,fineart
-Elenore Abbott,0.5187141,digipa-med-impact
-Andrea Kowch,0.51822996,digipa-med-impact
-Demetrios Farmakopoulos,0.5181248,digipa-med-impact
-Alexis Grimou,0.41958088,digipa-low-impact
-Lesley Vance,0.5177536,digipa-med-impact
-Gyula Aggházy,0.517747,fineart
-Georgina Hunt,0.46105456,digipa-low-impact
-Christian W. Staudinger,0.4684662,digipa-low-impact
-Abraham Begeyn,0.5172538,digipa-med-impact
-Charles Mozley,0.5171356,digipa-med-impact
-Elias Ravanetti,0.38719344,digipa-low-impact
-Herman van Swanevelt,0.5168748,digipa-med-impact
-David Paton,0.4842217,digipa-med-impact
-Hans Werner Schmidt,0.51671976,digipa-med-impact
-Bob Ross,0.51628315,fineart
-Sou Fujimoto,0.5162528,fareast
-Balcomb Greene,0.5162045,digipa-med-impact
-Glen Angus,0.51609933,digipa-med-impact
-Buckminster Fuller,0.51607454,digipa-med-impact
-Andrei Ryabushkin,0.5158933,fineart
-Almeida Júnior,0.515856,digipa-med-impact
-Tim White,0.4182697,digipa-low-impact
-Hans Beat Wieland,0.51553553,digipa-med-impact
-Jakub Różalski,0.5154904,digipa-med-impact
-John Whitcomb,0.51523805,digipa-med-impact
-Dorothy King,0.5150925,digipa-med-impact
-Richard S. Johnson,0.51500344,fineart
-Aniello Falcone,0.51475304,digipa-med-impact
-Henning Jakob Henrik Lund,0.5147134,c
-Robert M Cunningham,0.5144858,digipa-med-impact
-Nick Knight,0.51447505,digipa-med-impact
-David Chipperfield,0.51424,digipa-med-impact
-Bartolomeo Cesi,0.5136737,digipa-med-impact
-Bettina Heinen-Ayech,0.51334465,digipa-med-impact
-Annabel Kidston,0.51327646,digipa-med-impact
-Charles Schridde,0.51308405,digipa-med-impact
-Samuel Earp,0.51305825,digipa-med-impact
-Eugene Montgomery,0.5128343,digipa-med-impact
-Alfred Parsons,0.5127445,digipa-med-impact
-Anton Möller,0.5127209,digipa-med-impact
-Craig Davison,0.499598,special
-Cricorps Grégoire,0.51267076,fineart
-Celia Fiennes,0.51266706,digipa-med-impact
-Raymond Swanland,0.41350424,fineart
-Howard Knotts,0.5122062,digipa-med-impact
-Helmut Federle,0.51201206,digipa-med-impact
-Tyler Edlin,0.44028252,digipa-high-impact
-Elwood H. Smith,0.5119027,digipa-med-impact
-Ralph Horsley,0.51142794,fineart
-Alexander Ivanov,0.4539051,digipa-low-impact
-Cedric Peyravernay,0.4200587,digipa-low-impact
-Annabel Eyres,0.51136214,digipa-med-impact
-Zack Snyder,0.51129746,digipa-med-impact
-Gentile Bellini,0.511102,digipa-med-impact
-Giovanni Pelliccioli,0.4868688,digipa-med-impact
-Fikret Muallâ Saygı,0.510694,digipa-med-impact
-Bauhaus,0.43454266,digipa-low-impact
-Charles Williams,0.510406,digipa-med-impact
-Georg Arnold-Graboné,0.5103381,digipa-med-impact
-Fedot Sychkov,0.47935224,digipa-med-impact
-Alberto Magnelli,0.5103212,digipa-med-impact
-Aloysius O'Kelly,0.5102891,digipa-med-impact
-Alexander McQueen,0.5101986,digipa-med-impact
-Cam Sykes,0.510071,digipa-med-impact
-George Lucas,0.510038,digipa-med-impact
-Eglon van der Neer,0.5099339,digipa-med-impact
-Christian August Lorentzen,0.50989646,digipa-med-impact
-Eleanor Best,0.50966686,digipa-med-impact
-Terry Redlin,0.474244,fineart
-Ken Kelly,0.4304738,fineart
-David Eugene Henry,0.48173362,fineart
-Shin Jeongho,0.5092497,fareast
-Flora Borsi,0.5091922,digipa-med-impact
-Berndnaut Smilde,0.50864,digipa-med-impact
-Art of Brom,0.45828784,fineart
-Ernő Tibor,0.50851977,digipa-med-impact
-Ancell Stronach,0.5084514,digipa-med-impact
-Helen Thomas Dranga,0.45412368,digipa-low-impact
-Anita Malfatti,0.5080986,digipa-med-impact
-Arnold Brügger,0.5080749,digipa-med-impact
-Edward Ben Avram,0.50778764,digipa-med-impact
-Antonio Ciseri,0.5073538,fineart
-Alyssa Monks,0.50734174,digipa-med-impact
-Chen Zhen,0.5071876,digipa-med-impact
-Francis Helps,0.50707847,digipa-med-impact
-Georg Karl Pfahler,0.50700235,digipa-med-impact
-Henry Woods,0.506811,digipa-med-impact
-Barbara Greg,0.50674164,digipa-med-impact
-Guan Daosheng,0.506712,fareast
-Guy Billout,0.5064906,digipa-med-impact
-Basuki Abdullah,0.50613165,digipa-med-impact
-Thomas Visscher,0.5059943,digipa-med-impact
-Edward Simmons,0.50598735,digipa-med-impact
-Arabella Rankin,0.50572735,digipa-med-impact
-Lady Pink,0.5056634,digipa-high-impact
-Christopher Williams,0.5052288,digipa-med-impact
-Fuyuko Matsui,0.5051116,fareast
-Edward Baird,0.5049874,digipa-med-impact
-Georges Stein,0.5049069,digipa-med-impact
-Alex Alemany,0.43974748,digipa-low-impact
-Emanuel Schongut,0.5047326,digipa-med-impact
-Hans Bol,0.5045265,digipa-med-impact
-Kurzgesagt,0.5043725,digipa-med-impact
-Harald Giersing,0.50410193,digipa-med-impact
-Antonín Slavíček,0.5040368,fineart
-Carl Rahl,0.5040115,digipa-med-impact
-Etienne Delessert,0.5037818,fineart
-Americo Makk,0.5034161,digipa-med-impact
-Fernand Pelez,0.5027561,digipa-med-impact
-Alexey Merinov,0.4469615,digipa-low-impact
-Caspar Netscher,0.5019529,digipa-med-impact
-Walt Disney,0.50178146,digipa-med-impact
-Qian Xuan,0.50150526,fareast
-Geoffrey Dyer,0.50120556,digipa-med-impact
-Andre Norton,0.5007602,digipa-med-impact
-Daphne McClure,0.5007391,digipa-med-impact
-Dieric Bouts,0.5005882,fineart
-Aguri Uchida,0.5005107,fareast
-Hugo Scheiber,0.50004864,digipa-med-impact
-Kenne Gregoire,0.46421963,digipa-low-impact
-Wolfgang Tillmans,0.4999767,fineart
-Carl-Henning Pedersen,0.4998986,digipa-med-impact
-Alison Debenham,0.4998683,digipa-med-impact
-Eppo Doeve,0.49975222,digipa-med-impact
-Christen Købke,0.49961317,digipa-med-impact
-Aron Demetz,0.49895018,digipa-med-impact
-Alesso Baldovinetti,0.49849576,digipa-med-impact
-Jimmy Lawlor,0.4475271,fineart
-Carl Walter Liner,0.49826378,fineart
-Gwenny Griffiths,0.45598924,digipa-low-impact
-David Cooke Gibson,0.4976222,digipa-med-impact
-Howard Butterworth,0.4974621,digipa-med-impact
-Bob Thompson,0.49743804,fineart
-Enguerrand Quarton,0.49711192,fineart
-Abdel Hadi Al Gazzar,0.49631482,digipa-med-impact
-Gu Zhengyi,0.49629828,digipa-med-impact
-Aleksander Kotsis,0.4953621,digipa-med-impact
-Alexander Sharpe Ross,0.49519226,digipa-med-impact
-Carlos Enríquez Gómez,0.49494863,digipa-med-impact
-Abed Abdi,0.4948855,digipa-med-impact
-Elaine Duillo,0.49474388,digipa-med-impact
-Anne Said,0.49473995,digipa-med-impact
-Istvan Banyai,0.4947369,digipa-med-impact
-Bouchta El Hayani,0.49455142,digipa-med-impact
-Chinwe Chukwuogo-Roy,0.49445248,n
-George Claessen,0.49412063,digipa-med-impact
-Axel Törneman,0.49401706,digipa-med-impact
-Avigdor Arikha,0.49384058,digipa-med-impact
-Gloria Stoll Karn,0.4937976,digipa-med-impact
-Alfredo Volpi,0.49367586,digipa-med-impact
-Raffaello Sanizo,0.49365884,digipa-med-impact
-Jeff Easley,0.49344411,digipa-med-impact
-Aileen Eagleton,0.49318358,digipa-med-impact
-Gaetano Sabatini,0.49307147,digipa-med-impact
-Bertalan Pór,0.4930132,digipa-med-impact
-Alfred Jensen,0.49291304,digipa-med-impact
-Huang Guangjian,0.49286693,fareast
-Emil Ferris,0.49282396,digipa-med-impact
-Derek Chittock,0.492694,digipa-med-impact
-Alonso Vázquez,0.49205148,digipa-med-impact
-Kelly Sue Deconnick,0.4919476,digipa-med-impact
-Clive Madgwick,0.4749857,fineart
-Edward George Handel Lucas,0.49166748,digipa-med-impact
-Dorothea Braby,0.49161923,digipa-med-impact
-Sangyeob Park,0.49150884,fareast
-Heinz Edelman,0.49140438,digipa-med-impact
-Mark Seliger,0.4912073,digipa-med-impact
-Camilo Egas,0.4586727,digipa-low-impact
-Craig Mullins,0.49085408,fineart
-Dong Kingman,0.49063343,digipa-med-impact
-Douglas Robertson Bisset,0.49031347,digipa-med-impact
-Blek Le Rat,0.49008566,digipa-med-impact
-Anton Ažbe,0.48984748,fineart
-Olafur Eliasson,0.48971075,digipa-med-impact
-Elinor Proby Adams,0.48967826,digipa-med-impact
-Cándido López,0.48915705,digipa-med-impact
-D. Howard Hitchcock,0.48902267,digipa-med-impact
-Cheng Jiasui,0.48889247,fareast
-Jean Nouvel,0.4888183,digipa-med-impact
-Bill Gekas,0.48848945,digipa-med-impact
-Hermione Hammond,0.48845994,digipa-med-impact
-Fernando Gerassi,0.48841453,digipa-med-impact
-Frank Barrington Craig,0.4883762,digipa-med-impact
-A. B. Jackson,0.4883623,digipa-med-impact
-Bernie D’Andrea,0.48813275,digipa-med-impact
-Clarice Beckett,0.487809,digipa-med-impact
-Dosso Dossi,0.48775777,digipa-med-impact
-Donald Roller Wilson,0.48767656,digipa-med-impact
-Ernest William Christmas,0.4876317,digipa-med-impact
-Aleksandr Gerasimov,0.48736423,digipa-med-impact
-Edward Clark,0.48703307,digipa-med-impact
-Georg Schrimpf,0.48697302,digipa-med-impact
-John Wilhelm,0.48696536,digipa-med-impact
-Aries Moross,0.4863676,digipa-med-impact
-Bill Lewis,0.48635158,digipa-med-impact
-Huang Ji,0.48611963,fareast
-F. Scott Hess,0.43634564,fineart
-Gao Qipei,0.4860631,fareast
-Albert Tucker,0.4854299,digipa-med-impact
-Barbara Balmer,0.48528513,fineart
-Anne Ryan,0.48511976,digipa-med-impact
-Helen Edwards,0.48484707,digipa-med-impact
-Alexander Bogen,0.48421195,digipa-med-impact
-David Annand,0.48418126,digipa-med-impact
-Du Qiong,0.48414314,fareast
-Fred Cress,0.4837878,digipa-med-impact
-David B. Mattingly,0.48370445,digipa-med-impact
-Hristofor Žefarović,0.4837008,digipa-med-impact
-Wim Wenders,0.44484183,digipa-low-impact
-Alexander Fedosav,0.48360944,digipa-med-impact
-Anne Rigney,0.48357943,digipa-med-impact
-Bertalan Karlovszky,0.48338628,digipa-med-impact
-George Frederick Harris,0.4833259,fineart
-Toshiharu Mizutani,0.48315164,fareast
-David McClellan,0.39739317,digipa-low-impact
-Eugeen Van Mieghem,0.48270774,digipa-med-impact
-Alexei Harlamoff,0.48255378,digipa-med-impact
-Jeff Legg,0.48249072,digipa-med-impact
-Elizabeth Murray,0.48227608,digipa-med-impact
-Hugo Heyrman,0.48213717,digipa-med-impact
-Adrian Paul Allinson,0.48211843,digipa-med-impact
-Altoon Sultan,0.4820177,digipa-med-impact
-Alice Mason,0.48188528,fareast
-Harriet Powers,0.48181778,digipa-med-impact
-Aaron Bohrod,0.48175076,digipa-med-impact
-Chris Saunders,0.41429797,digipa-low-impact
-Clara Miller Burd,0.47797233,digipa-med-impact
-David G. Sorensen,0.38101727,digipa-low-impact
-Iwan Baan,0.4806739,digipa-med-impact
-Anatoly Metlan,0.48020265,digipa-med-impact
-Alfons von Czibulka,0.4801954,digipa-med-impact
-Amedee Ozenfant,0.47950014,digipa-med-impact
-Valerie Hegarty,0.47947168,digipa-med-impact
-Hugo Anton Fisher,0.4793551,digipa-med-impact
-Antonio Roybal,0.4792729,digipa-med-impact
-Cui Zizhong,0.47902682,fareast
-F Scott Hess,0.42582104,fineart
-Julien Delval,0.47888556,digipa-med-impact
-Marcin Jakubowski,0.4788583,digipa-med-impact
-Anne Stokes,0.4786997,digipa-med-impact
-David Palumbo,0.47632077,fineart
-Hallsteinn Sigurðsson,0.47858906,digipa-med-impact
-Mike Campau,0.47850558,digipa-med-impact
-Giuseppe Avanzi,0.47846943,digipa-med-impact
-Harry Morley,0.47836518,digipa-med-impact
-Constance-Anne Parker,0.47832203,digipa-med-impact
-Albert Keller,0.47825447,digipa-med-impact
-Daniel Chodowiecki,0.47825167,digipa-med-impact
-Alasdair Grant Taylor,0.47802624,digipa-med-impact
-Maria Pascual Alberich,0.4779718,fineart
-Rebeca Saray,0.41697127,digipa-low-impact
-Ernő Bánk,0.47753686,digipa-med-impact
-Shaddy Safadi,0.47724134,digipa-med-impact
-André Castro,0.4771826,digipa-med-impact
-Amiet Cuno,0.41975892,digipa-low-impact
-Adi Granov,0.40670198,fineart
-Allen Williams,0.47675848,digipa-med-impact
-Anna Haifisch,0.47672725,digipa-med-impact
-Clovis Trouille,0.47669724,digipa-med-impact
-Jane Graverol,0.47655866,digipa-med-impact
-Conroy Maddox,0.47645602,digipa-med-impact
-Božidar Jakac,0.4763106,digipa-med-impact
-George Morrison,0.47533786,digipa-med-impact
-Douglas Bourgeois,0.47527707,digipa-med-impact
-Cao Zhibai,0.47476804,fareast
-Bradley Walker Tomlin,0.47462896,digipa-low-impact
-Dave Dorman,0.46852386,fineart
-Stevan Dohanos,0.47452107,fineart
-John Howe,0.44144905,fineart
-Fanny McIan,0.47406268,digipa-low-impact
-Bholekar Srihari,0.47387534,digipa-low-impact
-Giovanni Lanfranco,0.4737344,digipa-low-impact
-Fred Marcellino,0.47346023,digipa-low-impact
-Clyde Caldwell,0.47305286,fineart
-Haukur Halldórsson,0.47275954,digipa-low-impact
-Huang Gongwang,0.47269204,fareast
-Brothers Grimm,0.47249007,digipa-low-impact
-Ollie Hoff,0.47240657,digipa-low-impact
-RHADS,0.4722166,digipa-low-impact
-Constance Gordon-Cumming,0.47219282,digipa-low-impact
-Anne Mccaffrey,0.4719924,digipa-low-impact
-Henry Heerup,0.47190166,digipa-low-impact
-Adrian Smith,0.4716923,digipa-high-impact
-Harold Elliott,0.4714101,digipa-low-impact
-Eric Peterson,0.47106332,digipa-low-impact
-David Garner,0.47106326,digipa-low-impact
-Edward Hicks,0.4708863,digipa-low-impact
-Alfred Krupa,0.47052455,digipa-low-impact
-Breyten Breytenbach,0.4699338,digipa-low-impact
-Douglas Shuler,0.4695691,digipa-low-impact
-Elaine Hamilton,0.46941522,digipa-low-impact
-Kapwani Kiwanga,0.46917036,digipa-low-impact
-Dan Scott,0.46897763,digipa-low-impact
-Allan Brooks,0.46882123,digipa-low-impact
-Ian Fairweather,0.46878594,digipa-low-impact
-Arlington Nelson Lindenmuth,0.4683814,digipa-low-impact
-Russell Ayto,0.4681503,digipa-low-impact
-Allan Linder,0.46812692,digipa-low-impact
-Bohumil Kubista,0.4679809,digipa-low-impact
-Christopher Jin Baron,0.4677839,digipa-low-impact
-Eero Snellman,0.46777654,digipa-low-impact
-Christabel Dennison,0.4677633,digipa-low-impact
-Amelia Peláez,0.46764764,digipa-low-impact
-James Gurney,0.46740666,digipa-low-impact
-Carles Delclaux Is,0.46734855,digipa-low-impact
-George Papazov,0.42420334,digipa-low-impact
-Mark Brooks,0.4672415,fineart
-Anne Dunn,0.46722376,digipa-low-impact
-Klaus Wittmann,0.4670704,fineart
-Arvid Nyholm,0.46697336,digipa-low-impact
-Georg Scholz,0.46674117,digipa-low-impact
-David Spriggs,0.46671993,digipa-low-impact
-Ernest Morgan,0.4665036,digipa-low-impact
-Ella Guru,0.46619284,digipa-low-impact
-Helen Berman,0.46614346,digipa-low-impact
-Gen Paul,0.4658785,digipa-low-impact
-Auseklis Ozols,0.46569023,digipa-low-impact
-Amelia Robertson Hill,0.4654411,fineart
-Jim Lee,0.46544096,digipa-low-impact
-Anson Maddocks,0.46539295,digipa-low-impact
-Chen Hong,0.46516004,fareast
-Haddon Sundblom,0.46490777,digipa-low-impact
-Eva Švankmajerová,0.46454152,digipa-low-impact
-Antonio Cavallucci,0.4645282,digipa-low-impact
-Herve Groussin,0.40050638,digipa-low-impact
-Gwen Barnard,0.46400994,digipa-low-impact
-Grace English,0.4638674,digipa-low-impact
-Carl Critchlow,0.4636,digipa-low-impact
-Ayshia Taşkın,0.463412,digipa-low-impact
-Alison Watt,0.43141022,digipa-low-impact
-Andre de Krayewski,0.4628024,digipa-low-impact
-Hamish MacDonald,0.462645,digipa-low-impact
-Ni Chuanjing,0.46254826,fareast
-Frank Mason,0.46254665,digipa-low-impact
-Steve Henderson,0.43113405,fineart
-Eileen Aldridge,0.46210572,digipa-low-impact
-Brad Rigney,0.28446302,digipa-low-impact
-Ching Yeh,0.46177,fareast
-Bertram Brooker,0.46176457,digipa-low-impact
-Henry Bright,0.46150023,digipa-low-impact
-Claire Dalby,0.46117848,digipa-low-impact
-Brian Despain,0.41538632,digipa-low-impact
-Anna Maria Barbara Abesch,0.4611045,digipa-low-impact
-Bernardo Daddi,0.46088326,digipa-low-impact
-Abraham Mintchine,0.46088243,digipa-high-impact
-Alexander Carse,0.46078917,digipa-low-impact
-Doc Hammer,0.46075988,digipa-low-impact
-Yuumei,0.46072406,digipa-low-impact
-Teophilus Tetteh,0.46064255,n
-Bess Hamiti,0.46062252,digipa-low-impact
-Ceferí Olivé,0.46058378,digipa-low-impact
-Enrique Grau,0.46046937,digipa-low-impact
-Eleanor Hughes,0.46007007,digipa-low-impact
-Elizabeth Charleston,0.46001568,digipa-low-impact
-Félix Ziem,0.45987016,digipa-low-impact
-Eugeniusz Zak,0.45985222,digipa-low-impact
-Dain Yoon,0.45977795,fareast
-Gong Xian,0.4595083,digipa-low-impact
-Flavia Blois,0.45950204,digipa-low-impact
-Frederik Vermehren,0.45949826,digipa-low-impact
-Gang Se-hwang,0.45937777,digipa-low-impact
-Bjørn Wiinblad,0.45934483,digipa-low-impact
-Alex Horley-Orlandelli,0.42623433,digipa-low-impact
-Dr. Atl,0.459287,digipa-low-impact
-Hu Jieqing,0.45889485,fareast
-Amédée Ozenfant,0.4585215,digipa-low-impact
-Warren Ellis,0.4584044,digipa-low-impact
-Helen Dahm,0.45804346,digipa-low-impact
-Anne Geddes,0.45785287,digipa-low-impact
-Bikash Bhattacharjee,0.45775396,digipa-low-impact
-Phil Foglio,0.457582,digipa-low-impact
-Evelyn Abelson,0.4574563,digipa-low-impact
-Alan Moore,0.4573369,digipa-low-impact
-Josh Kao,0.45725146,fareast
-Bertil Nilsson,0.45724383,digipa-low-impact
-Hristofor Zhefarovich,0.457089,fineart
-Edward Bailey,0.45659882,digipa-low-impact
-Christopher Moeller,0.45648077,digipa-low-impact
-Dóra Keresztes,0.4558745,fineart
-Cory Arcangel,0.4558071,digipa-low-impact
-Aleksander Kobzdej,0.45552525,digipa-low-impact
-Tim Burton,0.45541722,digipa-high-impact
-Chen Jiru,0.4553378,fareast
-George Passantino,0.4552104,digipa-low-impact
-Fuller Potter,0.4552072,digipa-low-impact
-Warwick Globe,0.45516664,digipa-low-impact
-Heinz Anger,0.45466962,digipa-low-impact
-Elias Goldberg,0.45416242,digipa-low-impact
-tokyogenso,0.45406622,fareast
-Zeen Chin,0.45404464,digipa-low-impact
-Albert Koetsier,0.45385844,fineart
-Giuseppe Camuncoli,0.45377725,digipa-low-impact
-Elsie Vera Cole,0.45377362,digipa-low-impact
-Andreas Franke,0.4300047,digipa-low-impact
-Constantine Andreou,0.4533816,digipa-low-impact
-Elisabeth Collins,0.45337808,digipa-low-impact
-Ted Nasmith,0.45302224,fineart
-Antônio Parreiras,0.45269623,digipa-low-impact
-Gwilym Prichard,0.45256525,digipa-low-impact
-Fang Congyi,0.45240825,fareast
-Huang Ding,0.45233482,fareast
-Hans von Bartels,0.45200723,digipa-low-impact
-Peter Elson,0.4121406,fineart
-Fan Kuan,0.4513034,digipa-low-impact
-Dean Roger,0.45112592,digipa-low-impact
-Bernat Sanjuan,0.45074993,fareast
-Fletcher Martin,0.45055175,digipa-low-impact
-Gentile Tondino,0.45043385,digipa-low-impact
-Ei-Q,0.45038772,digipa-low-impact
-Chen Lin,0.45035738,fareast
-Ted Wallace,0.4500007,digipa-low-impact
-"Cornelisz Hendriksz Vroom, the Younger",0.4499252,digipa-low-impact
-Alpo Jaakola,0.44981295,digipa-low-impact
-Clark Voorhees,0.4495309,digipa-low-impact
-Cleve Gray,0.449188,digipa-low-impact
-Wolf Kahn,0.4489858,digipa-low-impact
-Choi Buk,0.44892842,fareast
-Frank Tinsley,0.4480373,digipa-low-impact
-George Bell,0.44779524,digipa-low-impact
-Fiona Stephenson,0.44761062,fineart
-Carlos Trillo Name,0.4470371,digipa-low-impact
-Jamie McKelvie,0.44696707,digipa-low-impact
-Dennis Flanders,0.44673377,digipa-low-impact
-Dulah Marie Evans,0.44662604,digipa-low-impact
-Hans Schwarz,0.4463275,digipa-low-impact
-Steve McCurry,0.44620228,digipa-low-impact
-Bedwyr Williams,0.44616276,digipa-low-impact
-Anton Graff,0.38569996,digipa-low-impact
-Leticia Gillett,0.44578317,digipa-low-impact
-Rafał Olbiński,0.44561762,digipa-low-impact
-Artgerm,0.44555497,fineart
-Adrienn Henczné Deák,0.445518,digipa-low-impact
-Gu Hongzhong,0.4454906,fareast
-Matt Groening,0.44518438,digipa-low-impact
-Sue Bryce,0.4447164,digipa-low-impact
-Armin Baumgarten,0.444061,digipa-low-impact
-Araceli Gilbert,0.44399196,digipa-low-impact
-Carey Morris,0.44388965,digipa-low-impact
-Ignat Bednarik,0.4438085,digipa-low-impact
-Frank Buchser,0.44373792,digipa-low-impact
-Ben Zoeller,0.44368798,digipa-low-impact
-Adam Szentpétery,0.4434548,fineart
-Gene Davis,0.44343877,digipa-low-impact
-Fei Danxu,0.4433627,fareast
-Andrei Kolkoutine,0.44328922,digipa-low-impact
-Bruce Onobrakpeya,0.42588046,n
-Christoph Amberger,0.38912287,digipa-low-impact
-"Fred Mitchell,",0.4432277,digipa-low-impact
-Klaus Burgle,0.44295216,digipa-low-impact
-Carl Hoppe,0.44270635,digipa-low-impact
-Caroline Gotch,0.44263047,digipa-low-impact
-Hans Mertens,0.44260004,digipa-low-impact
-Mandy Disher,0.44219893,fineart
-Sarah Lucas,0.4420507,digipa-low-impact
-Sydney Edmunds,0.44198513,digipa-low-impact
-Amos Ferguson,0.4418735,digipa-low-impact
-Alton Tobey,0.4416385,digipa-low-impact
-Clifford Ross,0.44139367,digipa-low-impact
-Henric Trenk,0.4412782,digipa-low-impact
-Claire Hummel,0.44119984,digipa-low-impact
-Norman Foster,0.4411899,digipa-low-impact
-Carmen Saldana,0.44076762,digipa-low-impact
-Michael Whelan,0.4372847,digipa-low-impact
-Carlos Berlanga,0.440354,digipa-low-impact
-Gilles Beloeil,0.43997732,digipa-low-impact
-Ashley Wood,0.4398396,digipa-low-impact
-David Allan,0.43969798,digipa-low-impact
-Mark Lovett,0.43922082,digipa-low-impact
-Jed Henry,0.43882954,digipa-low-impact
-Adam Bruce Thomson,0.43847767,digipa-low-impact
-Horst Antes,0.4384303,digipa-low-impact
-Fritz Glarner,0.43787453,digipa-low-impact
-Harold McCauley,0.43760818,digipa-low-impact
-Estuardo Maldonado,0.437594,digipa-low-impact
-Dai Jin,0.4375449,fareast
-Fabien Charuau,0.43688047,digipa-low-impact
-Chica Macnab,0.4365166,digipa-low-impact
-Jim Burns,0.3975072,digipa-low-impact
-Santiago Calatrava,0.43651623,digipa-low-impact
-Robert Maguire,0.40926617,digipa-low-impact
-Cliff Childs,0.43611953,digipa-low-impact
-Charles Martin,0.43582463,fareast
-Elbridge Ayer Burbank,0.43572164,digipa-low-impact
-Anita Kunz,0.4356005,digipa-low-impact
-Colin Geller,0.43559563,digipa-low-impact
-Allen Tupper True,0.43556124,digipa-low-impact
-Jef Wu,0.43555313,digipa-low-impact
-Jon McCoy,0.4147122,digipa-low-impact
-Cedric Seaut,0.43521535,digipa-low-impact
-Emily Shanks,0.43519047,digipa-low-impact
-Andrew Whem,0.43512022,digipa-low-impact
-Ibrahim Kodra,0.43471518,digipa-low-impact
-Harrington Mann,0.4345901,digipa-low-impact
-Jerry Siegel,0.43458986,digipa-low-impact
-Howard Kanovitz,0.4345178,digipa-low-impact
-Cicely Hey,0.43449926,digipa-low-impact
-Ben Thompson,0.43436068,digipa-low-impact
-Joe Bowler,0.43413073,digipa-low-impact
-Lori Earley,0.43389612,digipa-low-impact
-Arent Arentsz,0.43373522,digipa-low-impact
-David Bailly,0.43371305,digipa-low-impact
-Hans Arnold,0.4335214,digipa-low-impact
-Constance Copeman,0.4334836,digipa-low-impact
-Brent Heighton,0.4333118,fineart
-Eric Taylor,0.43312082,digipa-low-impact
-Aleksander Gine,0.4326849,digipa-low-impact
-Alexander Johnston,0.4326589,digipa-low-impact
-David Park,0.43235332,digipa-low-impact
-Balázs Diószegi,0.432244,digipa-low-impact
-Ed Binkley,0.43222216,digipa-low-impact
-Eric Dinyer,0.4321258,digipa-low-impact
-Susan Luo,0.43198025,fareast
-Cedric Seaut (Keos Masons),0.4317356,digipa-low-impact
-Lorena Alvarez Gómez,0.431683,digipa-low-impact
-Fred Ludekens,0.431662,digipa-low-impact
-David Begbie,0.4316218,digipa-low-impact
-Ai Xuan,0.43150818,fareast
-Felix-Kelly,0.43132153,digipa-low-impact
-Antonín Chittussi,0.431248,digipa-low-impact
-Ammi Phillips,0.43095884,digipa-low-impact
-Elke Vogelsang,0.43092483,digipa-low-impact
-Fathi Hassan,0.43090487,digipa-low-impact
-Angela Sung,0.391746,fareast
-Clément Serveau,0.43050706,digipa-low-impact
-Dong Yuan,0.4303865,fareast
-Hew Lorimer,0.43035403,digipa-low-impact
-David Finch,0.29487437,digipa-low-impact
-Bill Durgin,0.4300932,digipa-low-impact
-Alexander Robertson,0.4300743,digipa-low-impact
diff --git a/configs/alt-diffusion-inference.yaml b/configs/alt-diffusion-inference.yaml
new file mode 100644
index 00000000..cfbee72d
--- /dev/null
+++ b/configs/alt-diffusion-inference.yaml
@@ -0,0 +1,72 @@
+model:
+ base_learning_rate: 1.0e-04
+ target: ldm.models.diffusion.ddpm.LatentDiffusion
+ params:
+ linear_start: 0.00085
+ linear_end: 0.0120
+ num_timesteps_cond: 1
+ log_every_t: 200
+ timesteps: 1000
+ first_stage_key: "jpg"
+ cond_stage_key: "txt"
+ image_size: 64
+ channels: 4
+ cond_stage_trainable: false # Note: different from the one we trained before
+ conditioning_key: crossattn
+ monitor: val/loss_simple_ema
+ scale_factor: 0.18215
+ use_ema: False
+
+ scheduler_config: # 10000 warmup steps
+ target: ldm.lr_scheduler.LambdaLinearScheduler
+ params:
+ warm_up_steps: [ 10000 ]
+ cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
+ f_start: [ 1.e-6 ]
+ f_max: [ 1. ]
+ f_min: [ 1. ]
+
+ unet_config:
+ target: ldm.modules.diffusionmodules.openaimodel.UNetModel
+ params:
+ image_size: 32 # unused
+ in_channels: 4
+ out_channels: 4
+ model_channels: 320
+ attention_resolutions: [ 4, 2, 1 ]
+ num_res_blocks: 2
+ channel_mult: [ 1, 2, 4, 4 ]
+ num_heads: 8
+ use_spatial_transformer: True
+ transformer_depth: 1
+ context_dim: 768
+ use_checkpoint: True
+ legacy: False
+
+ first_stage_config:
+ target: ldm.models.autoencoder.AutoencoderKL
+ params:
+ embed_dim: 4
+ monitor: val/rec_loss
+ ddconfig:
+ double_z: true
+ z_channels: 4
+ resolution: 256
+ in_channels: 3
+ out_ch: 3
+ ch: 128
+ ch_mult:
+ - 1
+ - 2
+ - 4
+ - 4
+ num_res_blocks: 2
+ attn_resolutions: []
+ dropout: 0.0
+ lossconfig:
+ target: torch.nn.Identity
+
+ cond_stage_config:
+ target: modules.xlmr.BertSeriesModelWithTransformation
+ params:
+ name: "XLMR-Large" \ No newline at end of file
diff --git a/configs/instruct-pix2pix.yaml b/configs/instruct-pix2pix.yaml
new file mode 100644
index 00000000..437ddcef
--- /dev/null
+++ b/configs/instruct-pix2pix.yaml
@@ -0,0 +1,99 @@
+# File modified by authors of InstructPix2Pix from original (https://github.com/CompVis/stable-diffusion).
+# See more details in LICENSE.
+
+model:
+ base_learning_rate: 1.0e-04
+ target: modules.models.diffusion.ddpm_edit.LatentDiffusion
+ params:
+ linear_start: 0.00085
+ linear_end: 0.0120
+ num_timesteps_cond: 1
+ log_every_t: 200
+ timesteps: 1000
+ first_stage_key: edited
+ cond_stage_key: edit
+ # image_size: 64
+ # image_size: 32
+ image_size: 16
+ channels: 4
+ cond_stage_trainable: false # Note: different from the one we trained before
+ conditioning_key: hybrid
+ monitor: val/loss_simple_ema
+ scale_factor: 0.18215
+ use_ema: true
+ load_ema: true
+
+ scheduler_config: # 10000 warmup steps
+ target: ldm.lr_scheduler.LambdaLinearScheduler
+ params:
+ warm_up_steps: [ 0 ]
+ cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
+ f_start: [ 1.e-6 ]
+ f_max: [ 1. ]
+ f_min: [ 1. ]
+
+ unet_config:
+ target: ldm.modules.diffusionmodules.openaimodel.UNetModel
+ params:
+ image_size: 32 # unused
+ in_channels: 8
+ out_channels: 4
+ model_channels: 320
+ attention_resolutions: [ 4, 2, 1 ]
+ num_res_blocks: 2
+ channel_mult: [ 1, 2, 4, 4 ]
+ num_heads: 8
+ use_spatial_transformer: True
+ transformer_depth: 1
+ context_dim: 768
+ use_checkpoint: True
+ legacy: False
+
+ first_stage_config:
+ target: ldm.models.autoencoder.AutoencoderKL
+ params:
+ embed_dim: 4
+ monitor: val/rec_loss
+ ddconfig:
+ double_z: true
+ z_channels: 4
+ resolution: 256
+ in_channels: 3
+ out_ch: 3
+ ch: 128
+ ch_mult:
+ - 1
+ - 2
+ - 4
+ - 4
+ num_res_blocks: 2
+ attn_resolutions: []
+ dropout: 0.0
+ lossconfig:
+ target: torch.nn.Identity
+
+ cond_stage_config:
+ target: ldm.modules.encoders.modules.FrozenCLIPEmbedder
+
+data:
+ target: main.DataModuleFromConfig
+ params:
+ batch_size: 128
+ num_workers: 1
+ wrap: false
+ validation:
+ target: edit_dataset.EditDataset
+ params:
+ path: data/clip-filtered-dataset
+ cache_dir: data/
+ cache_name: data_10k
+ split: val
+ min_text_sim: 0.2
+ min_image_sim: 0.75
+ min_direction_sim: 0.2
+ max_samples_per_prompt: 1
+ min_resize_res: 512
+ max_resize_res: 512
+ crop_res: 512
+ output_as_edit: False
+ real_input: True
diff --git a/v1-inference.yaml b/configs/v1-inference.yaml
index d4effe56..d4effe56 100644
--- a/v1-inference.yaml
+++ b/configs/v1-inference.yaml
diff --git a/configs/v1-inpainting-inference.yaml b/configs/v1-inpainting-inference.yaml
new file mode 100644
index 00000000..f9eec37d
--- /dev/null
+++ b/configs/v1-inpainting-inference.yaml
@@ -0,0 +1,70 @@
+model:
+ base_learning_rate: 7.5e-05
+ target: ldm.models.diffusion.ddpm.LatentInpaintDiffusion
+ params:
+ linear_start: 0.00085
+ linear_end: 0.0120
+ num_timesteps_cond: 1
+ log_every_t: 200
+ timesteps: 1000
+ first_stage_key: "jpg"
+ cond_stage_key: "txt"
+ image_size: 64
+ channels: 4
+ cond_stage_trainable: false # Note: different from the one we trained before
+ conditioning_key: hybrid # important
+ monitor: val/loss_simple_ema
+ scale_factor: 0.18215
+ finetune_keys: null
+
+ scheduler_config: # 10000 warmup steps
+ target: ldm.lr_scheduler.LambdaLinearScheduler
+ params:
+ warm_up_steps: [ 2500 ] # NOTE for resuming. use 10000 if starting from scratch
+ cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
+ f_start: [ 1.e-6 ]
+ f_max: [ 1. ]
+ f_min: [ 1. ]
+
+ unet_config:
+ target: ldm.modules.diffusionmodules.openaimodel.UNetModel
+ params:
+ image_size: 32 # unused
+ in_channels: 9 # 4 data + 4 downscaled image + 1 mask
+ out_channels: 4
+ model_channels: 320
+ attention_resolutions: [ 4, 2, 1 ]
+ num_res_blocks: 2
+ channel_mult: [ 1, 2, 4, 4 ]
+ num_heads: 8
+ use_spatial_transformer: True
+ transformer_depth: 1
+ context_dim: 768
+ use_checkpoint: True
+ legacy: False
+
+ first_stage_config:
+ target: ldm.models.autoencoder.AutoencoderKL
+ params:
+ embed_dim: 4
+ monitor: val/rec_loss
+ ddconfig:
+ double_z: true
+ z_channels: 4
+ resolution: 256
+ in_channels: 3
+ out_ch: 3
+ ch: 128
+ ch_mult:
+ - 1
+ - 2
+ - 4
+ - 4
+ num_res_blocks: 2
+ attn_resolutions: []
+ dropout: 0.0
+ lossconfig:
+ target: torch.nn.Identity
+
+ cond_stage_config:
+ target: ldm.modules.encoders.modules.FrozenCLIPEmbedder
diff --git a/extensions-builtin/LDSR/ldsr_model_arch.py b/extensions-builtin/LDSR/ldsr_model_arch.py
index 0ad49f4e..bc11cc6e 100644
--- a/extensions-builtin/LDSR/ldsr_model_arch.py
+++ b/extensions-builtin/LDSR/ldsr_model_arch.py
@@ -1,7 +1,6 @@
import os
import gc
import time
-import warnings
import numpy as np
import torch
@@ -15,8 +14,6 @@ from ldm.models.diffusion.ddim import DDIMSampler
from ldm.util import instantiate_from_config, ismap
from modules import shared, sd_hijack
-warnings.filterwarnings("ignore", category=UserWarning)
-
cached_ldsr_model: torch.nn.Module = None
diff --git a/extensions-builtin/Lora/extra_networks_lora.py b/extensions-builtin/Lora/extra_networks_lora.py
new file mode 100644
index 00000000..8f2e753e
--- /dev/null
+++ b/extensions-builtin/Lora/extra_networks_lora.py
@@ -0,0 +1,20 @@
+from modules import extra_networks
+import lora
+
+class ExtraNetworkLora(extra_networks.ExtraNetwork):
+ def __init__(self):
+ super().__init__('lora')
+
+ def activate(self, p, params_list):
+ names = []
+ multipliers = []
+ for params in params_list:
+ assert len(params.items) > 0
+
+ names.append(params.items[0])
+ multipliers.append(float(params.items[1]) if len(params.items) > 1 else 1.0)
+
+ lora.load_loras(names, multipliers)
+
+ def deactivate(self, p):
+ pass
diff --git a/extensions-builtin/Lora/lora.py b/extensions-builtin/Lora/lora.py
new file mode 100644
index 00000000..cb8f1d36
--- /dev/null
+++ b/extensions-builtin/Lora/lora.py
@@ -0,0 +1,207 @@
+import glob
+import os
+import re
+import torch
+
+from modules import shared, devices, sd_models
+
+re_digits = re.compile(r"\d+")
+re_unet_down_blocks = re.compile(r"lora_unet_down_blocks_(\d+)_attentions_(\d+)_(.+)")
+re_unet_mid_blocks = re.compile(r"lora_unet_mid_block_attentions_(\d+)_(.+)")
+re_unet_up_blocks = re.compile(r"lora_unet_up_blocks_(\d+)_attentions_(\d+)_(.+)")
+re_text_block = re.compile(r"lora_te_text_model_encoder_layers_(\d+)_(.+)")
+
+
+def convert_diffusers_name_to_compvis(key):
+ def match(match_list, regex):
+ r = re.match(regex, key)
+ if not r:
+ return False
+
+ match_list.clear()
+ match_list.extend([int(x) if re.match(re_digits, x) else x for x in r.groups()])
+ return True
+
+ m = []
+
+ if match(m, re_unet_down_blocks):
+ return f"diffusion_model_input_blocks_{1 + m[0] * 3 + m[1]}_1_{m[2]}"
+
+ if match(m, re_unet_mid_blocks):
+ return f"diffusion_model_middle_block_1_{m[1]}"
+
+ if match(m, re_unet_up_blocks):
+ return f"diffusion_model_output_blocks_{m[0] * 3 + m[1]}_1_{m[2]}"
+
+ if match(m, re_text_block):
+ return f"transformer_text_model_encoder_layers_{m[0]}_{m[1]}"
+
+ return key
+
+
+class LoraOnDisk:
+ def __init__(self, name, filename):
+ self.name = name
+ self.filename = filename
+
+
+class LoraModule:
+ def __init__(self, name):
+ self.name = name
+ self.multiplier = 1.0
+ self.modules = {}
+ self.mtime = None
+
+
+class LoraUpDownModule:
+ def __init__(self):
+ self.up = None
+ self.down = None
+ self.alpha = None
+
+
+def assign_lora_names_to_compvis_modules(sd_model):
+ lora_layer_mapping = {}
+
+ for name, module in shared.sd_model.cond_stage_model.wrapped.named_modules():
+ lora_name = name.replace(".", "_")
+ lora_layer_mapping[lora_name] = module
+ module.lora_layer_name = lora_name
+
+ for name, module in shared.sd_model.model.named_modules():
+ lora_name = name.replace(".", "_")
+ lora_layer_mapping[lora_name] = module
+ module.lora_layer_name = lora_name
+
+ sd_model.lora_layer_mapping = lora_layer_mapping
+
+
+def load_lora(name, filename):
+ lora = LoraModule(name)
+ lora.mtime = os.path.getmtime(filename)
+
+ sd = sd_models.read_state_dict(filename)
+
+ keys_failed_to_match = []
+
+ for key_diffusers, weight in sd.items():
+ fullkey = convert_diffusers_name_to_compvis(key_diffusers)
+ key, lora_key = fullkey.split(".", 1)
+
+ sd_module = shared.sd_model.lora_layer_mapping.get(key, None)
+ if sd_module is None:
+ keys_failed_to_match.append(key_diffusers)
+ continue
+
+ lora_module = lora.modules.get(key, None)
+ if lora_module is None:
+ lora_module = LoraUpDownModule()
+ lora.modules[key] = lora_module
+
+ if lora_key == "alpha":
+ lora_module.alpha = weight.item()
+ continue
+
+ if type(sd_module) == torch.nn.Linear:
+ module = torch.nn.Linear(weight.shape[1], weight.shape[0], bias=False)
+ elif type(sd_module) == torch.nn.Conv2d:
+ module = torch.nn.Conv2d(weight.shape[1], weight.shape[0], (1, 1), bias=False)
+ else:
+ assert False, f'Lora layer {key_diffusers} matched a layer with unsupported type: {type(sd_module).__name__}'
+
+ with torch.no_grad():
+ module.weight.copy_(weight)
+
+ module.to(device=devices.device, dtype=devices.dtype)
+
+ if lora_key == "lora_up.weight":
+ lora_module.up = module
+ elif lora_key == "lora_down.weight":
+ lora_module.down = module
+ else:
+ assert False, f'Bad Lora layer name: {key_diffusers} - must end in lora_up.weight, lora_down.weight or alpha'
+
+ if len(keys_failed_to_match) > 0:
+ print(f"Failed to match keys when loading Lora {filename}: {keys_failed_to_match}")
+
+ return lora
+
+
+def load_loras(names, multipliers=None):
+ already_loaded = {}
+
+ for lora in loaded_loras:
+ if lora.name in names:
+ already_loaded[lora.name] = lora
+
+ loaded_loras.clear()
+
+ loras_on_disk = [available_loras.get(name, None) for name in names]
+ if any([x is None for x in loras_on_disk]):
+ list_available_loras()
+
+ loras_on_disk = [available_loras.get(name, None) for name in names]
+
+ for i, name in enumerate(names):
+ lora = already_loaded.get(name, None)
+
+ lora_on_disk = loras_on_disk[i]
+ if lora_on_disk is not None:
+ if lora is None or os.path.getmtime(lora_on_disk.filename) > lora.mtime:
+ lora = load_lora(name, lora_on_disk.filename)
+
+ if lora is None:
+ print(f"Couldn't find Lora with name {name}")
+ continue
+
+ lora.multiplier = multipliers[i] if multipliers else 1.0
+ loaded_loras.append(lora)
+
+
+def lora_forward(module, input, res):
+ if len(loaded_loras) == 0:
+ return res
+
+ lora_layer_name = getattr(module, 'lora_layer_name', None)
+ for lora in loaded_loras:
+ module = lora.modules.get(lora_layer_name, None)
+ if module is not None:
+ if shared.opts.lora_apply_to_outputs and res.shape == input.shape:
+ res = res + module.up(module.down(res)) * lora.multiplier * (module.alpha / module.up.weight.shape[1] if module.alpha else 1.0)
+ else:
+ res = res + module.up(module.down(input)) * lora.multiplier * (module.alpha / module.up.weight.shape[1] if module.alpha else 1.0)
+
+ return res
+
+
+def lora_Linear_forward(self, input):
+ return lora_forward(self, input, torch.nn.Linear_forward_before_lora(self, input))
+
+
+def lora_Conv2d_forward(self, input):
+ return lora_forward(self, input, torch.nn.Conv2d_forward_before_lora(self, input))
+
+
+def list_available_loras():
+ available_loras.clear()
+
+ os.makedirs(shared.cmd_opts.lora_dir, exist_ok=True)
+
+ candidates = \
+ glob.glob(os.path.join(shared.cmd_opts.lora_dir, '**/*.pt'), recursive=True) + \
+ glob.glob(os.path.join(shared.cmd_opts.lora_dir, '**/*.safetensors'), recursive=True) + \
+ glob.glob(os.path.join(shared.cmd_opts.lora_dir, '**/*.ckpt'), recursive=True)
+
+ for filename in sorted(candidates):
+ if os.path.isdir(filename):
+ continue
+
+ name = os.path.splitext(os.path.basename(filename))[0]
+
+ available_loras[name] = LoraOnDisk(name, filename)
+
+
+available_loras = {}
+loaded_loras = []
+
+list_available_loras()
diff --git a/extensions-builtin/Lora/preload.py b/extensions-builtin/Lora/preload.py
new file mode 100644
index 00000000..863dc5c0
--- /dev/null
+++ b/extensions-builtin/Lora/preload.py
@@ -0,0 +1,6 @@
+import os
+from modules import paths
+
+
+def preload(parser):
+ parser.add_argument("--lora-dir", type=str, help="Path to directory with Lora networks.", default=os.path.join(paths.models_path, 'Lora'))
diff --git a/extensions-builtin/Lora/scripts/lora_script.py b/extensions-builtin/Lora/scripts/lora_script.py
new file mode 100644
index 00000000..544b228d
--- /dev/null
+++ b/extensions-builtin/Lora/scripts/lora_script.py
@@ -0,0 +1,35 @@
+import torch
+
+import lora
+import extra_networks_lora
+import ui_extra_networks_lora
+from modules import script_callbacks, ui_extra_networks, extra_networks, shared
+
+
+def unload():
+ torch.nn.Linear.forward = torch.nn.Linear_forward_before_lora
+ torch.nn.Conv2d.forward = torch.nn.Conv2d_forward_before_lora
+
+
+def before_ui():
+ ui_extra_networks.register_page(ui_extra_networks_lora.ExtraNetworksPageLora())
+ extra_networks.register_extra_network(extra_networks_lora.ExtraNetworkLora())
+
+
+if not hasattr(torch.nn, 'Linear_forward_before_lora'):
+ torch.nn.Linear_forward_before_lora = torch.nn.Linear.forward
+
+if not hasattr(torch.nn, 'Conv2d_forward_before_lora'):
+ torch.nn.Conv2d_forward_before_lora = torch.nn.Conv2d.forward
+
+torch.nn.Linear.forward = lora.lora_Linear_forward
+torch.nn.Conv2d.forward = lora.lora_Conv2d_forward
+
+script_callbacks.on_model_loaded(lora.assign_lora_names_to_compvis_modules)
+script_callbacks.on_script_unloaded(unload)
+script_callbacks.on_before_ui(before_ui)
+
+
+shared.options_templates.update(shared.options_section(('extra_networks', "Extra Networks"), {
+ "lora_apply_to_outputs": shared.OptionInfo(False, "Apply Lora to outputs rather than inputs when possible (experimental)"),
+}))
diff --git a/extensions-builtin/Lora/ui_extra_networks_lora.py b/extensions-builtin/Lora/ui_extra_networks_lora.py
new file mode 100644
index 00000000..54a80d36
--- /dev/null
+++ b/extensions-builtin/Lora/ui_extra_networks_lora.py
@@ -0,0 +1,36 @@
+import json
+import os
+import lora
+
+from modules import shared, ui_extra_networks
+
+
+class ExtraNetworksPageLora(ui_extra_networks.ExtraNetworksPage):
+ def __init__(self):
+ super().__init__('Lora')
+
+ def refresh(self):
+ lora.list_available_loras()
+
+ def list_items(self):
+ for name, lora_on_disk in lora.available_loras.items():
+ path, ext = os.path.splitext(lora_on_disk.filename)
+ previews = [path + ".png", path + ".preview.png"]
+
+ preview = None
+ for file in previews:
+ if os.path.isfile(file):
+ preview = "./file=" + file.replace('\\', '/') + "?mtime=" + str(os.path.getmtime(file))
+ break
+
+ yield {
+ "name": name,
+ "filename": path,
+ "preview": preview,
+ "prompt": json.dumps(f"<lora:{name}:") + " + opts.extra_networks_default_multiplier + " + json.dumps(">"),
+ "local_preview": path + ".png",
+ }
+
+ def allowed_directories_for_previews(self):
+ return [shared.cmd_opts.lora_dir]
+
diff --git a/extensions-builtin/SwinIR/scripts/swinir_model.py b/extensions-builtin/SwinIR/scripts/swinir_model.py
index 9a74b253..e8783bca 100644
--- a/extensions-builtin/SwinIR/scripts/swinir_model.py
+++ b/extensions-builtin/SwinIR/scripts/swinir_model.py
@@ -8,7 +8,7 @@ from basicsr.utils.download_util import load_file_from_url
from tqdm import tqdm
from modules import modelloader, devices, script_callbacks, shared
-from modules.shared import cmd_opts, opts
+from modules.shared import cmd_opts, opts, state
from swinir_model_arch import SwinIR as net
from swinir_model_arch_v2 import Swin2SR as net2
from modules.upscaler import Upscaler, UpscalerData
@@ -145,7 +145,13 @@ def inference(img, model, tile, tile_overlap, window_size, scale):
with tqdm(total=len(h_idx_list) * len(w_idx_list), desc="SwinIR tiles") as pbar:
for h_idx in h_idx_list:
+ if state.interrupted or state.skipped:
+ break
+
for w_idx in w_idx_list:
+ if state.interrupted or state.skipped:
+ break
+
in_patch = img[..., h_idx: h_idx + tile, w_idx: w_idx + tile]
out_patch = model(in_patch)
out_patch_mask = torch.ones_like(out_patch)
diff --git a/extensions-builtin/prompt-bracket-checker/javascript/prompt-bracket-checker.js b/extensions-builtin/prompt-bracket-checker/javascript/prompt-bracket-checker.js
index eccfb0f9..4a85c8eb 100644
--- a/extensions-builtin/prompt-bracket-checker/javascript/prompt-bracket-checker.js
+++ b/extensions-builtin/prompt-bracket-checker/javascript/prompt-bracket-checker.js
@@ -4,16 +4,10 @@
// Counts open and closed brackets (round, square, curly) in the prompt and negative prompt text boxes in the txt2img and img2img tabs.
// If there's a mismatch, the keyword counter turns red and if you hover on it, a tooltip tells you what's wrong.
-function checkBrackets(evt) {
- textArea = evt.target;
- tabName = evt.target.parentElement.parentElement.id.split("_")[0];
- counterElt = document.querySelector('gradio-app').shadowRoot.querySelector('#' + tabName + '_token_counter');
-
- promptName = evt.target.parentElement.parentElement.id.includes('neg') ? ' negative' : '';
-
- errorStringParen = '(' + tabName + promptName + ' prompt) - Different number of opening and closing parentheses detected.\n';
- errorStringSquare = '[' + tabName + promptName + ' prompt] - Different number of opening and closing square brackets detected.\n';
- errorStringCurly = '{' + tabName + promptName + ' prompt} - Different number of opening and closing curly brackets detected.\n';
+function checkBrackets(evt, textArea, counterElt) {
+ errorStringParen = '(...) - Different number of opening and closing parentheses detected.\n';
+ errorStringSquare = '[...] - Different number of opening and closing square brackets detected.\n';
+ errorStringCurly = '{...} - Different number of opening and closing curly brackets detected.\n';
openBracketRegExp = /\(/g;
closeBracketRegExp = /\)/g;
@@ -86,22 +80,31 @@ function checkBrackets(evt) {
}
if(counterElt.title != '') {
- counterElt.style = 'color: #FF5555;';
+ counterElt.classList.add('error');
} else {
- counterElt.style = '';
+ counterElt.classList.remove('error');
}
}
+function setupBracketChecking(id_prompt, id_counter){
+ var textarea = gradioApp().querySelector("#" + id_prompt + " > label > textarea");
+ var counter = gradioApp().getElementById(id_counter)
+ textarea.addEventListener("input", function(evt){
+ checkBrackets(evt, textarea, counter)
+ });
+}
+
var shadowRootLoaded = setInterval(function() {
- var shadowTextArea = document.querySelector('gradio-app').shadowRoot.querySelectorAll('#txt2img_prompt > label > textarea');
- if(shadowTextArea.length < 1) {
- return false;
- }
+ var shadowRoot = document.querySelector('gradio-app').shadowRoot;
+ if(! shadowRoot) return false;
+
+ var shadowTextArea = shadowRoot.querySelectorAll('#txt2img_prompt > label > textarea');
+ if(shadowTextArea.length < 1) return false;
- clearInterval(shadowRootLoaded);
+ clearInterval(shadowRootLoaded);
- document.querySelector('gradio-app').shadowRoot.querySelector('#txt2img_prompt').onkeyup = checkBrackets;
- document.querySelector('gradio-app').shadowRoot.querySelector('#txt2img_neg_prompt').onkeyup = checkBrackets;
- document.querySelector('gradio-app').shadowRoot.querySelector('#img2img_prompt').onkeyup = checkBrackets;
- document.querySelector('gradio-app').shadowRoot.querySelector('#img2img_neg_prompt').onkeyup = checkBrackets;
+ setupBracketChecking('txt2img_prompt', 'txt2img_token_counter')
+ setupBracketChecking('txt2img_neg_prompt', 'txt2img_negative_token_counter')
+ setupBracketChecking('img2img_prompt', 'imgimg_token_counter')
+ setupBracketChecking('img2img_neg_prompt', 'img2img_negative_token_counter')
}, 1000);
diff --git a/html/card-no-preview.png b/html/card-no-preview.png
new file mode 100644
index 00000000..e2beb269
--- /dev/null
+++ b/html/card-no-preview.png
Binary files differ
diff --git a/html/extra-networks-card.html b/html/extra-networks-card.html
new file mode 100644
index 00000000..aa9fca87
--- /dev/null
+++ b/html/extra-networks-card.html
@@ -0,0 +1,11 @@
+<div class='card' {preview_html} onclick={card_clicked}>
+ <div class='actions'>
+ <div class='additional'>
+ <ul>
+ <a href="#" title="replace preview image with currently selected in gallery" onclick={save_card_preview}>replace preview</a>
+ </ul>
+ </div>
+ <span class='name'>{name}</span>
+ </div>
+</div>
+
diff --git a/html/extra-networks-no-cards.html b/html/extra-networks-no-cards.html
new file mode 100644
index 00000000..389358d6
--- /dev/null
+++ b/html/extra-networks-no-cards.html
@@ -0,0 +1,8 @@
+<div class='nocards'>
+<h1>Nothing here. Add some content to the following directories:</h1>
+
+<ul>
+{dirs}
+</ul>
+</div>
+
diff --git a/html/footer.html b/html/footer.html
new file mode 100644
index 00000000..bad87ff6
--- /dev/null
+++ b/html/footer.html
@@ -0,0 +1,13 @@
+<div>
+ <a href="/docs">API</a>
+  • 
+ <a href="https://github.com/AUTOMATIC1111/stable-diffusion-webui">Github</a>
+  • 
+ <a href="https://gradio.app">Gradio</a>
+  • 
+ <a href="/" onclick="javascript:gradioApp().getElementById('settings_restart_gradio').click(); return false">Reload UI</a>
+</div>
+<br />
+<div class="versions">
+{versions}
+</div>
diff --git a/html/image-update.svg b/html/image-update.svg
new file mode 100644
index 00000000..3abf12df
--- /dev/null
+++ b/html/image-update.svg
@@ -0,0 +1,7 @@
+<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24">
+ <filter id='shadow' color-interpolation-filters="sRGB">
+ <feDropShadow flood-color="black" dx="0" dy="0" flood-opacity="0.9" stdDeviation="0.5"/>
+ <feDropShadow flood-color="black" dx="0" dy="0" flood-opacity="0.9" stdDeviation="0.5"/>
+ </filter>
+ <path style="filter:url(#shadow);" fill="#FFFFFF" d="M13.18 19C13.35 19.72 13.64 20.39 14.03 21H5C3.9 21 3 20.11 3 19V5C3 3.9 3.9 3 5 3H19C20.11 3 21 3.9 21 5V11.18C20.5 11.07 20 11 19.5 11C19.33 11 19.17 11 19 11.03V5H5V19H13.18M11.21 15.83L9.25 13.47L6.5 17H13.03C13.14 15.54 13.73 14.22 14.64 13.19L13.96 12.29L11.21 15.83M19 13.5V12L16.75 14.25L19 16.5V15C20.38 15 21.5 16.12 21.5 17.5C21.5 17.9 21.41 18.28 21.24 18.62L22.33 19.71C22.75 19.08 23 18.32 23 17.5C23 15.29 21.21 13.5 19 13.5M19 20C17.62 20 16.5 18.88 16.5 17.5C16.5 17.1 16.59 16.72 16.76 16.38L15.67 15.29C15.25 15.92 15 16.68 15 17.5C15 19.71 16.79 21.5 19 21.5V23L21.25 20.75L19 18.5V20Z" />
+</svg>
diff --git a/html/licenses.html b/html/licenses.html
new file mode 100644
index 00000000..570630eb
--- /dev/null
+++ b/html/licenses.html
@@ -0,0 +1,419 @@
+<style>
+ #licenses h2 {font-size: 1.2em; font-weight: bold; margin-bottom: 0.2em;}
+ #licenses small {font-size: 0.95em; opacity: 0.85;}
+ #licenses pre { margin: 1em 0 2em 0;}
+</style>
+
+<h2><a href="https://github.com/sczhou/CodeFormer/blob/master/LICENSE">CodeFormer</a></h2>
+<small>Parts of CodeFormer code had to be copied to be compatible with GFPGAN.</small>
+<pre>
+S-Lab License 1.0
+
+Copyright 2022 S-Lab
+
+Redistribution and use for non-commercial purpose in source and
+binary forms, with or without modification, are permitted provided
+that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+3. Neither the name of the copyright holder nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+In the event that redistribution and/or use for commercial purpose in
+source or binary forms, with or without modification is required,
+please contact the contributor(s) of the work.
+</pre>
+
+
+<h2><a href="https://github.com/victorca25/iNNfer/blob/main/LICENSE">ESRGAN</a></h2>
+<small>Code for architecture and reading models copied.</small>
+<pre>
+MIT License
+
+Copyright (c) 2021 victorca25
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+</pre>
+
+<h2><a href="https://github.com/xinntao/Real-ESRGAN/blob/master/LICENSE">Real-ESRGAN</a></h2>
+<small>Some code is copied to support ESRGAN models.</small>
+<pre>
+BSD 3-Clause License
+
+Copyright (c) 2021, Xintao Wang
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+3. Neither the name of the copyright holder nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+</pre>
+
+<h2><a href="https://github.com/invoke-ai/InvokeAI/blob/main/LICENSE">InvokeAI</a></h2>
+<small>Some code for compatibility with OSX is taken from lstein's repository.</small>
+<pre>
+MIT License
+
+Copyright (c) 2022 InvokeAI Team
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+</pre>
+
+<h2><a href="https://github.com/Hafiidz/latent-diffusion/blob/main/LICENSE">LDSR</a></h2>
+<small>Code added by contirubtors, most likely copied from this repository.</small>
+<pre>
+MIT License
+
+Copyright (c) 2022 Machine Vision and Learning Group, LMU Munich
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+</pre>
+
+<h2><a href="https://github.com/pharmapsychotic/clip-interrogator/blob/main/LICENSE">CLIP Interrogator</a></h2>
+<small>Some small amounts of code borrowed and reworked.</small>
+<pre>
+MIT License
+
+Copyright (c) 2022 pharmapsychotic
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+</pre>
+
+<h2><a href="https://github.com/JingyunLiang/SwinIR/blob/main/LICENSE">SwinIR</a></h2>
+<small>Code added by contributors, most likely copied from this repository.</small>
+
+<pre>
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [2021] [SwinIR Authors]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+</pre>
+
+<h2><a href="https://github.com/AminRezaei0x443/memory-efficient-attention/blob/main/LICENSE">Memory Efficient Attention</a></h2>
+<small>The sub-quadratic cross attention optimization uses modified code from the Memory Efficient Attention package that Alex Birch optimized for 3D tensors. This license is updated to reflect that.</small>
+<pre>
+MIT License
+
+Copyright (c) 2023 Alex Birch
+Copyright (c) 2023 Amin Rezaei
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+</pre>
+
diff --git a/javascript/aspectRatioOverlay.js b/javascript/aspectRatioOverlay.js
index 66f26a22..0f164b82 100644
--- a/javascript/aspectRatioOverlay.js
+++ b/javascript/aspectRatioOverlay.js
@@ -21,11 +21,16 @@ function dimensionChange(e, is_width, is_height){
var targetElement = null;
var tabIndex = get_tab_index('mode_img2img')
- if(tabIndex == 0){
+ if(tabIndex == 0){ // img2img
targetElement = gradioApp().querySelector('div[data-testid=image] img');
- } else if(tabIndex == 1){
+ } else if(tabIndex == 1){ //Sketch
+ targetElement = gradioApp().querySelector('#img2img_sketch div[data-testid=image] img');
+ } else if(tabIndex == 2){ // Inpaint
targetElement = gradioApp().querySelector('#img2maskimg div[data-testid=image] img');
+ } else if(tabIndex == 3){ // Inpaint sketch
+ targetElement = gradioApp().querySelector('#inpaint_sketch div[data-testid=image] img');
}
+
if(targetElement){
diff --git a/javascript/dragdrop.js b/javascript/dragdrop.js
index 3ed1cb3c..fe008924 100644
--- a/javascript/dragdrop.js
+++ b/javascript/dragdrop.js
@@ -9,11 +9,19 @@ function dropReplaceImage( imgWrap, files ) {
return;
}
+ const tmpFile = files[0];
+
imgWrap.querySelector('.modify-upload button + button, .touch-none + div button + button')?.click();
const callback = () => {
const fileInput = imgWrap.querySelector('input[type="file"]');
if ( fileInput ) {
- fileInput.files = files;
+ if ( files.length === 0 ) {
+ files = new DataTransfer();
+ files.items.add(tmpFile);
+ fileInput.files = files.files;
+ } else {
+ fileInput.files = files;
+ }
fileInput.dispatchEvent(new Event('change'));
}
};
diff --git a/javascript/edit-attention.js b/javascript/edit-attention.js
index b947cbec..619bb1fa 100644
--- a/javascript/edit-attention.js
+++ b/javascript/edit-attention.js
@@ -1,75 +1,96 @@
-addEventListener('keydown', (event) => {
+function keyupEditAttention(event){
let target = event.originalTarget || event.composedPath()[0];
- if (!target.matches("#toprow textarea.gr-text-input[placeholder]")) return;
+ if (!target.matches("[id*='_toprow'] textarea.gr-text-input[placeholder]")) return;
if (! (event.metaKey || event.ctrlKey)) return;
-
- let plus = "ArrowUp"
- let minus = "ArrowDown"
- if (event.key != plus && event.key != minus) return;
+ let isPlus = event.key == "ArrowUp"
+ let isMinus = event.key == "ArrowDown"
+ if (!isPlus && !isMinus) return;
let selectionStart = target.selectionStart;
let selectionEnd = target.selectionEnd;
- // If the user hasn't selected anything, let's select their current parenthesis block
- if (selectionStart === selectionEnd) {
+ let text = target.value;
+
+ function selectCurrentParenthesisBlock(OPEN, CLOSE){
+ if (selectionStart !== selectionEnd) return false;
+
// Find opening parenthesis around current cursor
- const before = target.value.substring(0, selectionStart);
- let beforeParen = before.lastIndexOf("(");
- if (beforeParen == -1) return;
- let beforeParenClose = before.lastIndexOf(")");
+ const before = text.substring(0, selectionStart);
+ let beforeParen = before.lastIndexOf(OPEN);
+ if (beforeParen == -1) return false;
+ let beforeParenClose = before.lastIndexOf(CLOSE);
while (beforeParenClose !== -1 && beforeParenClose > beforeParen) {
- beforeParen = before.lastIndexOf("(", beforeParen - 1);
- beforeParenClose = before.lastIndexOf(")", beforeParenClose - 1);
+ beforeParen = before.lastIndexOf(OPEN, beforeParen - 1);
+ beforeParenClose = before.lastIndexOf(CLOSE, beforeParenClose - 1);
}
// Find closing parenthesis around current cursor
- const after = target.value.substring(selectionStart);
- let afterParen = after.indexOf(")");
- if (afterParen == -1) return;
- let afterParenOpen = after.indexOf("(");
+ const after = text.substring(selectionStart);
+ let afterParen = after.indexOf(CLOSE);
+ if (afterParen == -1) return false;
+ let afterParenOpen = after.indexOf(OPEN);
while (afterParenOpen !== -1 && afterParen > afterParenOpen) {
- afterParen = after.indexOf(")", afterParen + 1);
- afterParenOpen = after.indexOf("(", afterParenOpen + 1);
+ afterParen = after.indexOf(CLOSE, afterParen + 1);
+ afterParenOpen = after.indexOf(OPEN, afterParenOpen + 1);
}
- if (beforeParen === -1 || afterParen === -1) return;
+ if (beforeParen === -1 || afterParen === -1) return false;
// Set the selection to the text between the parenthesis
- const parenContent = target.value.substring(beforeParen + 1, selectionStart + afterParen);
+ const parenContent = text.substring(beforeParen + 1, selectionStart + afterParen);
const lastColon = parenContent.lastIndexOf(":");
selectionStart = beforeParen + 1;
selectionEnd = selectionStart + lastColon;
target.setSelectionRange(selectionStart, selectionEnd);
- }
+ return true;
+ }
+
+ // If the user hasn't selected anything, let's select their current parenthesis block
+ if(! selectCurrentParenthesisBlock('<', '>')){
+ selectCurrentParenthesisBlock('(', ')')
+ }
event.preventDefault();
- if (selectionStart == 0 || target.value[selectionStart - 1] != "(") {
- target.value = target.value.slice(0, selectionStart) +
- "(" + target.value.slice(selectionStart, selectionEnd) + ":1.0)" +
- target.value.slice(selectionEnd);
-
- target.focus();
- target.selectionStart = selectionStart + 1;
- target.selectionEnd = selectionEnd + 1;
-
- } else {
- end = target.value.slice(selectionEnd + 1).indexOf(")") + 1;
- weight = parseFloat(target.value.slice(selectionEnd + 1, selectionEnd + 1 + end));
- if (isNaN(weight)) return;
- if (event.key == minus) weight -= 0.1;
- if (event.key == plus) weight += 0.1;
-
- weight = parseFloat(weight.toPrecision(12));
-
- target.value = target.value.slice(0, selectionEnd + 1) +
- weight +
- target.value.slice(selectionEnd + 1 + end - 1);
-
- target.focus();
- target.selectionStart = selectionStart;
- target.selectionEnd = selectionEnd;
- }
- // Since we've modified a Gradio Textbox component manually, we need to simulate an `input` DOM event to ensure its
- // internal Svelte data binding remains in sync.
- target.dispatchEvent(new Event("input", { bubbles: true }));
-});
+ closeCharacter = ')'
+ delta = opts.keyedit_precision_attention
+
+ if (selectionStart > 0 && text[selectionStart - 1] == '<'){
+ closeCharacter = '>'
+ delta = opts.keyedit_precision_extra
+ } else if (selectionStart == 0 || text[selectionStart - 1] != "(") {
+
+ // do not include spaces at the end
+ while(selectionEnd > selectionStart && text[selectionEnd-1] == ' '){
+ selectionEnd -= 1;
+ }
+ if(selectionStart == selectionEnd){
+ return
+ }
+
+ text = text.slice(0, selectionStart) + "(" + text.slice(selectionStart, selectionEnd) + ":1.0)" + text.slice(selectionEnd);
+
+ selectionStart += 1;
+ selectionEnd += 1;
+ }
+
+ end = text.slice(selectionEnd + 1).indexOf(closeCharacter) + 1;
+ weight = parseFloat(text.slice(selectionEnd + 1, selectionEnd + 1 + end));
+ if (isNaN(weight)) return;
+
+ weight += isPlus ? delta : -delta;
+ weight = parseFloat(weight.toPrecision(12));
+ if(String(weight).length == 1) weight += ".0"
+
+ text = text.slice(0, selectionEnd + 1) + weight + text.slice(selectionEnd + 1 + end - 1);
+
+ target.focus();
+ target.value = text;
+ target.selectionStart = selectionStart;
+ target.selectionEnd = selectionEnd;
+
+ updateInput(target)
+}
+
+addEventListener('keydown', (event) => {
+ keyupEditAttention(event);
+}); \ No newline at end of file
diff --git a/javascript/extensions.js b/javascript/extensions.js
index 59179ca6..ac6e35b9 100644
--- a/javascript/extensions.js
+++ b/javascript/extensions.js
@@ -29,7 +29,7 @@ function install_extension_from_index(button, url){
textarea = gradioApp().querySelector('#extension_to_install textarea')
textarea.value = url
- textarea.dispatchEvent(new Event("input", { bubbles: true }))
+ updateInput(textarea)
gradioApp().querySelector('#install_extension_button').click()
}
diff --git a/javascript/extraNetworks.js b/javascript/extraNetworks.js
new file mode 100644
index 00000000..c5a9adb3
--- /dev/null
+++ b/javascript/extraNetworks.js
@@ -0,0 +1,69 @@
+
+function setupExtraNetworksForTab(tabname){
+ gradioApp().querySelector('#'+tabname+'_extra_tabs').classList.add('extra-networks')
+
+ var tabs = gradioApp().querySelector('#'+tabname+'_extra_tabs > div')
+ var search = gradioApp().querySelector('#'+tabname+'_extra_search textarea')
+ var refresh = gradioApp().getElementById(tabname+'_extra_refresh')
+ var close = gradioApp().getElementById(tabname+'_extra_close')
+
+ search.classList.add('search')
+ tabs.appendChild(search)
+ tabs.appendChild(refresh)
+ tabs.appendChild(close)
+
+ search.addEventListener("input", function(evt){
+ searchTerm = search.value.toLowerCase()
+
+ gradioApp().querySelectorAll('#'+tabname+'_extra_tabs div.card').forEach(function(elem){
+ text = elem.querySelector('.name').textContent.toLowerCase()
+ elem.style.display = text.indexOf(searchTerm) == -1 ? "none" : ""
+ })
+ });
+}
+
+var activePromptTextarea = {};
+
+function setupExtraNetworks(){
+ setupExtraNetworksForTab('txt2img')
+ setupExtraNetworksForTab('img2img')
+
+ function registerPrompt(tabname, id){
+ var textarea = gradioApp().querySelector("#" + id + " > label > textarea");
+
+ if (! activePromptTextarea[tabname]){
+ activePromptTextarea[tabname] = textarea
+ }
+
+ textarea.addEventListener("focus", function(){
+ activePromptTextarea[tabname] = textarea;
+ });
+ }
+
+ registerPrompt('txt2img', 'txt2img_prompt')
+ registerPrompt('txt2img', 'txt2img_neg_prompt')
+ registerPrompt('img2img', 'img2img_prompt')
+ registerPrompt('img2img', 'img2img_neg_prompt')
+}
+
+onUiLoaded(setupExtraNetworks)
+
+function cardClicked(tabname, textToAdd, allowNegativePrompt){
+ var textarea = allowNegativePrompt ? activePromptTextarea[tabname] : gradioApp().querySelector("#" + tabname + "_prompt > label > textarea")
+
+ textarea.value = textarea.value + " " + textToAdd
+ updateInput(textarea)
+}
+
+function saveCardPreview(event, tabname, filename){
+ var textarea = gradioApp().querySelector("#" + tabname + '_preview_filename > label > textarea')
+ var button = gradioApp().getElementById(tabname + '_save_preview')
+
+ textarea.value = filename
+ updateInput(textarea)
+
+ button.click()
+
+ event.stopPropagation()
+ event.preventDefault()
+}
diff --git a/javascript/hints.js b/javascript/hints.js
index 63e17e05..7b60b25e 100644
--- a/javascript/hints.js
+++ b/javascript/hints.js
@@ -4,7 +4,7 @@ titles = {
"Sampling steps": "How many times to improve the generated image iteratively; higher values take longer; very low values can produce bad results",
"Sampling method": "Which algorithm to use to produce the image",
"GFPGAN": "Restore low quality faces using GFPGAN neural network",
- "Euler a": "Euler Ancestral - very creative, each can get a completely different picture depending on step count, setting steps to higher than 30-40 does not help",
+ "Euler a": "Euler Ancestral - very creative, each can get a completely different picture depending on step count, setting steps higher than 30-40 does not help",
"DDIM": "Denoising Diffusion Implicit Models - best at inpainting",
"DPM adaptive": "Ignores step count - uses a number of steps determined by the CFG and resolution",
@@ -14,12 +14,14 @@ titles = {
"Seed": "A value that determines the output of random number generator - if you create an image with same parameters and seed as another image, you'll get the same result",
"\u{1f3b2}\ufe0f": "Set seed to -1, which will cause a new random number to be used every time",
"\u267b\ufe0f": "Reuse seed from last generation, mostly useful if it was randomed",
- "\u{1f3a8}": "Add a random artist to the prompt.",
"\u2199\ufe0f": "Read generation parameters from prompt or last generation if prompt is empty into user interface.",
"\u{1f4c2}": "Open images output directory",
"\u{1f4be}": "Save style",
"\U0001F5D1": "Clear prompt",
"\u{1f4cb}": "Apply selected styles to current prompt",
+ "\u{1f4d2}": "Paste available values into the field",
+ "\u{1f3b4}": "Show extra networks",
+
"Inpaint a part of image": "Draw a mask over an image, and the script will regenerate the masked area with content according to prompt",
"SD upscale": "Upscale image normally, split result into tiles, improve each tile using img2img, merge whole image back",
@@ -48,7 +50,7 @@ titles = {
"None": "Do not do anything special",
"Prompt matrix": "Separate prompts into parts using vertical pipe character (|) and the script will create a picture for every combination of them (except for the first part, which will be present in all combinations)",
- "X/Y plot": "Create a grid where images will have different parameters. Use inputs below to specify which parameters will be shared by columns and rows",
+ "X/Y/Z plot": "Create grid(s) where images will have different parameters. Use inputs below to specify which parameters will be shared by columns and rows",
"Custom code": "Run Python code. Advanced user only. Must run program with --allow-code for this to work",
"Prompt S/R": "Separate a list of words with commas, and the first word will be used as a keyword: script will search for this word in the prompt, and replace it with others",
@@ -74,16 +76,13 @@ titles = {
"Style 1": "Style to apply; styles have components for both positive and negative prompts and apply to both",
"Style 2": "Style to apply; styles have components for both positive and negative prompts and apply to both",
"Apply style": "Insert selected styles into prompt fields",
- "Create style": "Save current prompts as a style. If you add the token {prompt} to the text, the style use that as placeholder for your prompt when you use the style in the future.",
+ "Create style": "Save current prompts as a style. If you add the token {prompt} to the text, the style uses that as a placeholder for your prompt when you use the style in the future.",
"Checkpoint name": "Loads weights from checkpoint before making images. You can either use hash or a part of filename (as seen in settings) for checkpoint name. Recommended to use with Y axis for less switching.",
"Inpainting conditioning mask strength": "Only applies to inpainting models. Determines how strongly to mask off the original image for inpainting and img2img. 1.0 means fully masked, which is the default behaviour. 0.0 means a fully unmasked conditioning. Lower values will help preserve the overall composition of the image, but will struggle with large changes.",
"vram": "Torch active: Peak amount of VRAM used by Torch during generation, excluding cached data.\nTorch reserved: Peak amount of VRAM allocated by Torch, including all active and cached data.\nSys VRAM: Peak amount of VRAM allocation across all applications / total GPU VRAM (peak utilization%).",
- "Highres. fix": "Use a two step process to partially create an image at smaller resolution, upscale, and then improve details in it without changing composition",
- "Scale latent": "Uscale the image in latent space. Alternative is to produce the full image from latent representation, upscale that, and then move it back to latent space.",
-
"Eta noise seed delta": "If this values is non-zero, it will be added to seed and used to initialize RNG for noises when using samplers with Eta. You can use this to produce even more variation of images, or you can use this to match images of other software if you know what you are doing.",
"Do not add watermark to images": "If this option is enabled, watermark will not be added to created images. Warning: if you do not add watermark, you may be behaving in an unethical manner.",
@@ -94,13 +93,24 @@ titles = {
"Weighted sum": "Result = A * (1 - M) + B * M",
"Add difference": "Result = A + (B - C) * M",
+ "No interpolation": "Result = A",
- "Learning rate": "how fast should the training go. Low values will take longer to train, high values may fail to converge (not generate accurate results) and/or may break the embedding (This has happened if you see Loss: nan in the training info textbox. If this happens, you need to manually restore your embedding from an older not-broken backup).\n\nYou can set a single numeric value, or multiple learning rates using the syntax:\n\n rate_1:max_steps_1, rate_2:max_steps_2, ...\n\nEG: 0.005:100, 1e-3:1000, 1e-5\n\nWill train with rate of 0.005 for first 100 steps, then 1e-3 until 1000 steps, then 1e-5 for all remaining steps.",
+ "Initialization text": "If the number of tokens is more than the number of vectors, some may be skipped.\nLeave the textbox empty to start with zeroed out vectors",
+ "Learning rate": "How fast should training go. Low values will take longer to train, high values may fail to converge (not generate accurate results) and/or may break the embedding (This has happened if you see Loss: nan in the training info textbox. If this happens, you need to manually restore your embedding from an older not-broken backup).\n\nYou can set a single numeric value, or multiple learning rates using the syntax:\n\n rate_1:max_steps_1, rate_2:max_steps_2, ...\n\nEG: 0.005:100, 1e-3:1000, 1e-5\n\nWill train with rate of 0.005 for first 100 steps, then 1e-3 until 1000 steps, then 1e-5 for all remaining steps.",
"Clip skip": "Early stopping parameter for CLIP model; 1 is stop at last layer as usual, 2 is stop at penultimate layer, etc.",
- "Approx NN": "Cheap neural network approximation. Very fast compared to VAE, but produces pictures with 4 times smaller horizontal/vertical resoluton and lower quality.",
- "Approx cheap": "Very cheap approximation. Very fast compared to VAE, but produces pictures with 8 times smaller horizontal/vertical resoluton and extremely low quality."
+ "Approx NN": "Cheap neural network approximation. Very fast compared to VAE, but produces pictures with 4 times smaller horizontal/vertical resolution and lower quality.",
+ "Approx cheap": "Very cheap approximation. Very fast compared to VAE, but produces pictures with 8 times smaller horizontal/vertical resolution and extremely low quality.",
+
+ "Hires. fix": "Use a two step process to partially create an image at smaller resolution, upscale, and then improve details in it without changing composition",
+ "Hires steps": "Number of sampling steps for upscaled picture. If 0, uses same as for original.",
+ "Upscale by": "Adjusts the size of the image by multiplying the original width and height by the selected value. Ignored if either Resize width to or Resize height to are non-zero.",
+ "Resize width to": "Resizes image to this width. If 0, width is inferred from either of two nearby sliders.",
+ "Resize height to": "Resizes image to this height. If 0, height is inferred from either of two nearby sliders.",
+ "Multiplier for extra networks": "When adding extra network such as Hypernetwork or Lora to prompt, use this multiplier for it.",
+ "Discard weights with matching name": "Regular expression; if weights's name matches it, the weights is not written to the resulting checkpoint. Use ^model_ema to discard EMA weights.",
+ "Extra networks tab order": "Comma-separated list of tab names; tabs listed here will appear in the extra networks UI first and in order lsited."
}
diff --git a/javascript/hires_fix.js b/javascript/hires_fix.js
new file mode 100644
index 00000000..0629475f
--- /dev/null
+++ b/javascript/hires_fix.js
@@ -0,0 +1,22 @@
+
+function setInactive(elem, inactive){
+ if(inactive){
+ elem.classList.add('inactive')
+ } else{
+ elem.classList.remove('inactive')
+ }
+}
+
+function onCalcResolutionHires(enable, width, height, hr_scale, hr_resize_x, hr_resize_y){
+ hrUpscaleBy = gradioApp().getElementById('txt2img_hr_scale')
+ hrResizeX = gradioApp().getElementById('txt2img_hr_resize_x')
+ hrResizeY = gradioApp().getElementById('txt2img_hr_resize_y')
+
+ gradioApp().getElementById('txt2img_hires_fix_row2').style.display = opts.use_old_hires_fix_width_height ? "none" : ""
+
+ setInactive(hrUpscaleBy, opts.use_old_hires_fix_width_height || hr_resize_x > 0 || hr_resize_y > 0)
+ setInactive(hrResizeX, opts.use_old_hires_fix_width_height || hr_resize_x == 0)
+ setInactive(hrResizeY, opts.use_old_hires_fix_width_height || hr_resize_y == 0)
+
+ return [enable, width, height, hr_scale, hr_resize_x, hr_resize_y]
+}
diff --git a/javascript/imageviewer.js b/javascript/imageviewer.js
index 0488bfb4..3f7b1289 100644
--- a/javascript/imageviewer.js
+++ b/javascript/imageviewer.js
@@ -148,9 +148,18 @@ function showGalleryImage() {
if(e && e.parentElement.tagName == 'DIV'){
e.style.cursor='pointer'
e.style.userSelect='none'
- e.addEventListener('click', function (evt) {
- if(!opts.js_modal_lightbox) return;
+
+ var isFirefox = isFirefox = navigator.userAgent.toLowerCase().indexOf('firefox') > -1
+
+ // For Firefox, listening on click first switched to next image then shows the lightbox.
+ // If you know how to fix this without switching to mousedown event, please.
+ // For other browsers the event is click to make it possiblr to drag picture.
+ var event = isFirefox ? 'mousedown' : 'click'
+
+ e.addEventListener(event, function (evt) {
+ if(!opts.js_modal_lightbox || evt.button != 0) return;
modalZoomSet(gradioApp().getElementById('modalImage'), opts.js_modal_lightbox_initially_zoomed)
+ evt.preventDefault()
showModal(evt)
}, true);
}
diff --git a/javascript/localization.js b/javascript/localization.js
index f92d2d24..1a5a1dbb 100644
--- a/javascript/localization.js
+++ b/javascript/localization.js
@@ -10,10 +10,8 @@ ignore_ids_for_localization={
modelmerger_tertiary_model_name: 'OPTION',
train_embedding: 'OPTION',
train_hypernetwork: 'OPTION',
- txt2img_style_index: 'OPTION',
- txt2img_style2_index: 'OPTION',
- img2img_style_index: 'OPTION',
- img2img_style2_index: 'OPTION',
+ txt2img_styles: 'OPTION',
+ img2img_styles: 'OPTION',
setting_random_artist_categories: 'SPAN',
setting_face_restoration_model: 'SPAN',
setting_realesrgan_enabled_models: 'SPAN',
diff --git a/javascript/progressbar.js b/javascript/progressbar.js
index d6323ed9..ff6d757b 100644
--- a/javascript/progressbar.js
+++ b/javascript/progressbar.js
@@ -1,82 +1,25 @@
// code related to showing and updating progressbar shown as the image is being made
-global_progressbars = {}
-galleries = {}
-galleryObservers = {}
-
-// this tracks launches of window.setTimeout for progressbar to prevent starting a new timeout when the previous is still running
-timeoutIds = {}
-
-function check_progressbar(id_part, id_progressbar, id_progressbar_span, id_skip, id_interrupt, id_preview, id_gallery){
- // gradio 3.8's enlightened approach allows them to create two nested div elements inside each other with same id
- // every time you use gr.HTML(elem_id='xxx'), so we handle this here
- var progressbar = gradioApp().querySelector("#"+id_progressbar+" #"+id_progressbar)
- var progressbarParent
- if(progressbar){
- progressbarParent = gradioApp().querySelector("#"+id_progressbar)
- } else{
- progressbar = gradioApp().getElementById(id_progressbar)
- progressbarParent = null
- }
-
- var skip = id_skip ? gradioApp().getElementById(id_skip) : null
- var interrupt = gradioApp().getElementById(id_interrupt)
-
- if(opts.show_progress_in_title && progressbar && progressbar.offsetParent){
- if(progressbar.innerText){
- let newtitle = '[' + progressbar.innerText.trim() + '] Stable Diffusion';
- if(document.title != newtitle){
- document.title = newtitle;
- }
- }else{
- let newtitle = 'Stable Diffusion'
- if(document.title != newtitle){
- document.title = newtitle;
- }
- }
- }
-
- if(progressbar!= null && progressbar != global_progressbars[id_progressbar]){
- global_progressbars[id_progressbar] = progressbar
- var mutationObserver = new MutationObserver(function(m){
- if(timeoutIds[id_part]) return;
- preview = gradioApp().getElementById(id_preview)
- gallery = gradioApp().getElementById(id_gallery)
+galleries = {}
+storedGallerySelections = {}
+galleryObservers = {}
- if(preview != null && gallery != null){
- preview.style.width = gallery.clientWidth + "px"
- preview.style.height = gallery.clientHeight + "px"
- if(progressbarParent) progressbar.style.width = progressbarParent.clientWidth + "px"
+function rememberGallerySelection(id_gallery){
+ storedGallerySelections[id_gallery] = getGallerySelectedIndex(id_gallery)
+}
- //only watch gallery if there is a generation process going on
- check_gallery(id_gallery);
+function getGallerySelectedIndex(id_gallery){
+ let galleryButtons = gradioApp().querySelectorAll('#'+id_gallery+' .gallery-item')
+ let galleryBtnSelected = gradioApp().querySelector('#'+id_gallery+' .gallery-item.\\!ring-2')
- var progressDiv = gradioApp().querySelectorAll('#' + id_progressbar_span).length > 0;
- if(progressDiv){
- timeoutIds[id_part] = window.setTimeout(function() {
- timeoutIds[id_part] = null
- requestMoreProgress(id_part, id_progressbar_span, id_skip, id_interrupt)
- }, 500)
- } else{
- if (skip) {
- skip.style.display = "none"
- }
- interrupt.style.display = "none"
+ let currentlySelectedIndex = -1
+ galleryButtons.forEach(function(v, i){ if(v==galleryBtnSelected) { currentlySelectedIndex = i } })
- //disconnect observer once generation finished, so user can close selected image if they want
- if (galleryObservers[id_gallery]) {
- galleryObservers[id_gallery].disconnect();
- galleries[id_gallery] = null;
- }
- }
- }
-
- });
- mutationObserver.observe( progressbar, { childList:true, subtree:true })
- }
+ return currentlySelectedIndex
}
+// this is a workaround for https://github.com/gradio-app/gradio/issues/2984
function check_gallery(id_gallery){
let gallery = gradioApp().getElementById(id_gallery)
// if gallery has no change, no need to setting up observer again.
@@ -85,10 +28,16 @@ function check_gallery(id_gallery){
if(galleryObservers[id_gallery]){
galleryObservers[id_gallery].disconnect();
}
- let prevSelectedIndex = selected_gallery_index();
+
+ storedGallerySelections[id_gallery] = -1
+
galleryObservers[id_gallery] = new MutationObserver(function (){
let galleryButtons = gradioApp().querySelectorAll('#'+id_gallery+' .gallery-item')
let galleryBtnSelected = gradioApp().querySelector('#'+id_gallery+' .gallery-item.\\!ring-2')
+ let currentlySelectedIndex = getGallerySelectedIndex(id_gallery)
+ prevSelectedIndex = storedGallerySelections[id_gallery]
+ storedGallerySelections[id_gallery] = -1
+
if (prevSelectedIndex !== -1 && galleryButtons.length>prevSelectedIndex && !galleryBtnSelected) {
// automatically re-open previously selected index (if exists)
activeElement = gradioApp().activeElement;
@@ -120,30 +69,175 @@ function check_gallery(id_gallery){
}
onUiUpdate(function(){
- check_progressbar('txt2img', 'txt2img_progressbar', 'txt2img_progress_span', 'txt2img_skip', 'txt2img_interrupt', 'txt2img_preview', 'txt2img_gallery')
- check_progressbar('img2img', 'img2img_progressbar', 'img2img_progress_span', 'img2img_skip', 'img2img_interrupt', 'img2img_preview', 'img2img_gallery')
- check_progressbar('ti', 'ti_progressbar', 'ti_progress_span', '', 'ti_interrupt', 'ti_preview', 'ti_gallery')
+ check_gallery('txt2img_gallery')
+ check_gallery('img2img_gallery')
})
-function requestMoreProgress(id_part, id_progressbar_span, id_skip, id_interrupt){
- btn = gradioApp().getElementById(id_part+"_check_progress");
- if(btn==null) return;
-
- btn.click();
- var progressDiv = gradioApp().querySelectorAll('#' + id_progressbar_span).length > 0;
- var skip = id_skip ? gradioApp().getElementById(id_skip) : null
- var interrupt = gradioApp().getElementById(id_interrupt)
- if(progressDiv && interrupt){
- if (skip) {
- skip.style.display = "block"
+function request(url, data, handler, errorHandler){
+ var xhr = new XMLHttpRequest();
+ var url = url;
+ xhr.open("POST", url, true);
+ xhr.setRequestHeader("Content-Type", "application/json");
+ xhr.onreadystatechange = function () {
+ if (xhr.readyState === 4) {
+ if (xhr.status === 200) {
+ try {
+ var js = JSON.parse(xhr.responseText);
+ handler(js)
+ } catch (error) {
+ console.error(error);
+ errorHandler()
+ }
+ } else{
+ errorHandler()
+ }
}
- interrupt.style.display = "block"
+ };
+ var js = JSON.stringify(data);
+ xhr.send(js);
+}
+
+function pad2(x){
+ return x<10 ? '0'+x : x
+}
+
+function formatTime(secs){
+ if(secs > 3600){
+ return pad2(Math.floor(secs/60/60)) + ":" + pad2(Math.floor(secs/60)%60) + ":" + pad2(Math.floor(secs)%60)
+ } else if(secs > 60){
+ return pad2(Math.floor(secs/60)) + ":" + pad2(Math.floor(secs)%60)
+ } else{
+ return Math.floor(secs) + "s"
+ }
+}
+
+function setTitle(progress){
+ var title = 'Stable Diffusion'
+
+ if(opts.show_progress_in_title && progress){
+ title = '[' + progress.trim() + '] ' + title;
}
+
+ if(document.title != title){
+ document.title = title;
+ }
+}
+
+
+function randomId(){
+ return "task(" + Math.random().toString(36).slice(2, 7) + Math.random().toString(36).slice(2, 7) + Math.random().toString(36).slice(2, 7)+")"
}
-function requestProgress(id_part){
- btn = gradioApp().getElementById(id_part+"_check_progress_initial");
- if(btn==null) return;
+// starts sending progress requests to "/internal/progress" uri, creating progressbar above progressbarContainer element and
+// preview inside gallery element. Cleans up all created stuff when the task is over and calls atEnd.
+// calls onProgress every time there is a progress update
+function requestProgress(id_task, progressbarContainer, gallery, atEnd, onProgress){
+ var dateStart = new Date()
+ var wasEverActive = false
+ var parentProgressbar = progressbarContainer.parentNode
+ var parentGallery = gallery ? gallery.parentNode : null
+
+ var divProgress = document.createElement('div')
+ divProgress.className='progressDiv'
+ divProgress.style.display = opts.show_progressbar ? "" : "none"
+ var divInner = document.createElement('div')
+ divInner.className='progress'
+
+ divProgress.appendChild(divInner)
+ parentProgressbar.insertBefore(divProgress, progressbarContainer)
+
+ if(parentGallery){
+ var livePreview = document.createElement('div')
+ livePreview.className='livePreview'
+ parentGallery.insertBefore(livePreview, gallery)
+ }
+
+ var removeProgressBar = function(){
+ setTitle("")
+ parentProgressbar.removeChild(divProgress)
+ if(parentGallery) parentGallery.removeChild(livePreview)
+ atEnd()
+ }
+
+ var fun = function(id_task, id_live_preview){
+ request("./internal/progress", {"id_task": id_task, "id_live_preview": id_live_preview}, function(res){
+ if(res.completed){
+ removeProgressBar()
+ return
+ }
+
+ var rect = progressbarContainer.getBoundingClientRect()
+
+ if(rect.width){
+ divProgress.style.width = rect.width + "px";
+ }
+
+ progressText = ""
+
+ divInner.style.width = ((res.progress || 0) * 100.0) + '%'
+ divInner.style.background = res.progress ? "" : "transparent"
+
+ if(res.progress > 0){
+ progressText = ((res.progress || 0) * 100.0).toFixed(0) + '%'
+ }
+
+ if(res.eta){
+ progressText += " ETA: " + formatTime(res.eta)
+ }
+
+
+ setTitle(progressText)
+
+ if(res.textinfo && res.textinfo.indexOf("\n") == -1){
+ progressText = res.textinfo + " " + progressText
+ }
+
+ divInner.textContent = progressText
+
+ var elapsedFromStart = (new Date() - dateStart) / 1000
+
+ if(res.active) wasEverActive = true;
+
+ if(! res.active && wasEverActive){
+ removeProgressBar()
+ return
+ }
+
+ if(elapsedFromStart > 5 && !res.queued && !res.active){
+ removeProgressBar()
+ return
+ }
+
+
+ if(res.live_preview && gallery){
+ var rect = gallery.getBoundingClientRect()
+ if(rect.width){
+ livePreview.style.width = rect.width + "px"
+ livePreview.style.height = rect.height + "px"
+ }
+
+ var img = new Image();
+ img.onload = function() {
+ livePreview.appendChild(img)
+ if(livePreview.childElementCount > 2){
+ livePreview.removeChild(livePreview.firstElementChild)
+ }
+ }
+ img.src = res.live_preview;
+ }
+
+
+ if(onProgress){
+ onProgress(res)
+ }
+
+ setTimeout(() => {
+ fun(id_task, res.id_live_preview);
+ }, opts.live_preview_refresh_period || 500)
+ }, function(){
+ removeProgressBar()
+ })
+ }
- btn.click();
+ fun(id_task, 0)
}
diff --git a/javascript/textualInversion.js b/javascript/textualInversion.js
index 8061be08..0354b860 100644
--- a/javascript/textualInversion.js
+++ b/javascript/textualInversion.js
@@ -1,8 +1,17 @@
+
function start_training_textual_inversion(){
- requestProgress('ti')
gradioApp().querySelector('#ti_error').innerHTML=''
- return args_to_array(arguments)
+ var id = randomId()
+ requestProgress(id, gradioApp().getElementById('ti_output'), gradioApp().getElementById('ti_gallery'), function(){}, function(progress){
+ gradioApp().getElementById('ti_progress').innerHTML = progress.textinfo
+ })
+
+ var res = args_to_array(arguments)
+
+ res[0] = id
+
+ return res
}
diff --git a/javascript/ui.js b/javascript/ui.js
index 587dd782..ba72623c 100644
--- a/javascript/ui.js
+++ b/javascript/ui.js
@@ -1,4 +1,4 @@
-// various functions for interation with ui.py not large enough to warrant putting them in separate files
+// various functions for interaction with ui.py not large enough to warrant putting them in separate files
function set_theme(theme){
gradioURL = window.location.href
@@ -19,7 +19,7 @@ function selected_gallery_index(){
function extract_image_from_gallery(gallery){
if(gallery.length == 1){
- return gallery[0]
+ return [gallery[0]]
}
index = selected_gallery_index()
@@ -28,7 +28,7 @@ function extract_image_from_gallery(gallery){
return [null]
}
- return gallery[index];
+ return [gallery[index]];
}
function args_to_array(args){
@@ -45,16 +45,33 @@ function switch_to_txt2img(){
return args_to_array(arguments);
}
-function switch_to_img2img(){
+function switch_to_img2img_tab(no){
gradioApp().querySelector('#tabs').querySelectorAll('button')[1].click();
- gradioApp().getElementById('mode_img2img').querySelectorAll('button')[0].click();
+ gradioApp().getElementById('mode_img2img').querySelectorAll('button')[no].click();
+}
+function switch_to_img2img(){
+ switch_to_img2img_tab(0);
+ return args_to_array(arguments);
+}
+
+function switch_to_sketch(){
+ switch_to_img2img_tab(1);
+ return args_to_array(arguments);
+}
+function switch_to_inpaint(){
+ switch_to_img2img_tab(2);
+ return args_to_array(arguments);
+}
+
+function switch_to_inpaint_sketch(){
+ switch_to_img2img_tab(3);
return args_to_array(arguments);
}
function switch_to_inpaint(){
gradioApp().querySelector('#tabs').querySelectorAll('button')[1].click();
- gradioApp().getElementById('mode_img2img').querySelectorAll('button')[1].click();
+ gradioApp().getElementById('mode_img2img').querySelectorAll('button')[2].click();
return args_to_array(arguments);
}
@@ -87,9 +104,11 @@ function create_tab_index_args(tabId, args){
return res
}
-function get_extras_tab_index(){
- const [,,...args] = [...arguments]
- return [get_tab_index('mode_extras'), get_tab_index('extras_resize_mode'), ...args]
+function get_img2img_tab_index() {
+ let res = args_to_array(arguments)
+ res.splice(-2)
+ res[0] = get_tab_index('mode_img2img')
+ return res
}
function create_submit_args(args){
@@ -109,22 +128,54 @@ function create_submit_args(args){
return res
}
+function showSubmitButtons(tabname, show){
+ gradioApp().getElementById(tabname+'_interrupt').style.display = show ? "none" : "block"
+ gradioApp().getElementById(tabname+'_skip').style.display = show ? "none" : "block"
+}
+
function submit(){
- requestProgress('txt2img')
+ rememberGallerySelection('txt2img_gallery')
+ showSubmitButtons('txt2img', false)
+
+ var id = randomId()
+ requestProgress(id, gradioApp().getElementById('txt2img_gallery_container'), gradioApp().getElementById('txt2img_gallery'), function(){
+ showSubmitButtons('txt2img', true)
+
+ })
+
+ var res = create_submit_args(arguments)
+
+ res[0] = id
- return create_submit_args(arguments)
+ return res
}
function submit_img2img(){
- requestProgress('img2img')
+ rememberGallerySelection('img2img_gallery')
+ showSubmitButtons('img2img', false)
- res = create_submit_args(arguments)
+ var id = randomId()
+ requestProgress(id, gradioApp().getElementById('img2img_gallery_container'), gradioApp().getElementById('img2img_gallery'), function(){
+ showSubmitButtons('img2img', true)
+ })
- res[0] = get_tab_index('mode_img2img')
+ var res = create_submit_args(arguments)
+
+ res[0] = id
+ res[1] = get_tab_index('mode_img2img')
return res
}
+function modelmerger(){
+ var id = randomId()
+ requestProgress(id, gradioApp().getElementById('modelmerger_results_panel'), null, function(){})
+
+ var res = create_submit_args(arguments)
+ res[0] = id
+ return res
+}
+
function ask_for_style_name(_, prompt_text, negative_prompt_text) {
name_ = prompt('Style name:')
@@ -140,27 +191,17 @@ function confirm_clear_prompt(prompt, negative_prompt) {
return [prompt, negative_prompt]
}
-
-
opts = {}
-function apply_settings(jsdata){
- console.log(jsdata)
-
- opts = JSON.parse(jsdata)
-
- return jsdata
-}
-
onUiUpdate(function(){
if(Object.keys(opts).length != 0) return;
json_elem = gradioApp().getElementById('settings_json')
if(json_elem == null) return;
- textarea = json_elem.querySelector('textarea')
- jsdata = textarea.value
+ var textarea = json_elem.querySelector('textarea')
+ var jsdata = textarea.value
opts = JSON.parse(jsdata)
-
+ executeCallbacks(optionsChangedCallbacks);
Object.defineProperty(textarea, 'value', {
set: function(newValue) {
@@ -171,6 +212,8 @@ onUiUpdate(function(){
if (oldValue != newValue) {
opts = JSON.parse(textarea.value)
}
+
+ executeCallbacks(optionsChangedCallbacks);
},
get: function() {
var valueProp = Object.getOwnPropertyDescriptor(HTMLTextAreaElement.prototype, 'value');
@@ -180,13 +223,51 @@ onUiUpdate(function(){
json_elem.parentElement.style.display="none"
- if (!txt2img_textarea) {
- txt2img_textarea = gradioApp().querySelector("#txt2img_prompt > label > textarea");
- txt2img_textarea?.addEventListener("input", () => update_token_counter("txt2img_token_button"));
- }
- if (!img2img_textarea) {
- img2img_textarea = gradioApp().querySelector("#img2img_prompt > label > textarea");
- img2img_textarea?.addEventListener("input", () => update_token_counter("img2img_token_button"));
+ function registerTextarea(id, id_counter, id_button){
+ var prompt = gradioApp().getElementById(id)
+ var counter = gradioApp().getElementById(id_counter)
+ var textarea = gradioApp().querySelector("#" + id + " > label > textarea");
+
+ if(counter.parentElement == prompt.parentElement){
+ return
+ }
+
+
+ prompt.parentElement.insertBefore(counter, prompt)
+ counter.classList.add("token-counter")
+ prompt.parentElement.style.position = "relative"
+
+ textarea.addEventListener("input", function(){
+ update_token_counter(id_button);
+ });
+ }
+
+ registerTextarea('txt2img_prompt', 'txt2img_token_counter', 'txt2img_token_button')
+ registerTextarea('txt2img_neg_prompt', 'txt2img_negative_token_counter', 'txt2img_negative_token_button')
+ registerTextarea('img2img_prompt', 'img2img_token_counter', 'img2img_token_button')
+ registerTextarea('img2img_neg_prompt', 'img2img_negative_token_counter', 'img2img_negative_token_button')
+
+ show_all_pages = gradioApp().getElementById('settings_show_all_pages')
+ settings_tabs = gradioApp().querySelector('#settings div')
+ if(show_all_pages && settings_tabs){
+ settings_tabs.appendChild(show_all_pages)
+ show_all_pages.onclick = function(){
+ gradioApp().querySelectorAll('#settings > div').forEach(function(elem){
+ elem.style.display = "block";
+ })
+ }
+ }
+})
+
+onOptionsChanged(function(){
+ elem = gradioApp().getElementById('sd_checkpoint_hash')
+ sd_checkpoint_hash = opts.sd_checkpoint_hash || ""
+ shorthash = sd_checkpoint_hash.substr(0,10)
+
+ if(elem && elem.textContent != shorthash){
+ elem.textContent = shorthash
+ elem.title = sd_checkpoint_hash
+ elem.href = "https://google.com/search?q=" + sd_checkpoint_hash
}
})
@@ -220,3 +301,11 @@ function restart_reload(){
return []
}
+
+// Simulate an `input` DOM event for Gradio Textbox component. Needed after you edit its contents in javascript, otherwise your edits
+// will only visible on web page and not sent to python.
+function updateInput(target){
+ let e = new Event("input", { bubbles: true })
+ Object.defineProperty(e, "target", {value: target})
+ target.dispatchEvent(e);
+}
diff --git a/launch.py b/launch.py
index af0d418b..370920de 100644
--- a/launch.py
+++ b/launch.py
@@ -13,6 +13,53 @@ dir_extensions = "extensions"
python = sys.executable
git = os.environ.get('GIT', "git")
index_url = os.environ.get('INDEX_URL', "")
+stored_commit_hash = None
+skip_install = False
+
+
+def check_python_version():
+ is_windows = platform.system() == "Windows"
+ major = sys.version_info.major
+ minor = sys.version_info.minor
+ micro = sys.version_info.micro
+
+ if is_windows:
+ supported_minors = [10]
+ else:
+ supported_minors = [7, 8, 9, 10, 11]
+
+ if not (major == 3 and minor in supported_minors):
+ import modules.errors
+
+ modules.errors.print_error_explanation(f"""
+INCOMPATIBLE PYTHON VERSION
+
+This program is tested with 3.10.6 Python, but you have {major}.{minor}.{micro}.
+If you encounter an error with "RuntimeError: Couldn't install torch." message,
+or any other error regarding unsuccessful package (library) installation,
+please downgrade (or upgrade) to the latest version of 3.10 Python
+and delete current Python and "venv" folder in WebUI's directory.
+
+You can download 3.10 Python from here: https://www.python.org/downloads/release/python-3109/
+
+{"Alternatively, use a binary release of WebUI: https://github.com/AUTOMATIC1111/stable-diffusion-webui/releases" if is_windows else ""}
+
+Use --skip-python-version-check to suppress this warning.
+""")
+
+
+def commit_hash():
+ global stored_commit_hash
+
+ if stored_commit_hash is not None:
+ return stored_commit_hash
+
+ try:
+ stored_commit_hash = run(f"{git} rev-parse HEAD").strip()
+ except Exception:
+ stored_commit_hash = "<none>"
+
+ return stored_commit_hash
def extract_arg(args, name):
@@ -32,10 +79,19 @@ def extract_opt(args, name):
return args, is_present, opt
-def run(command, desc=None, errdesc=None, custom_env=None):
+def run(command, desc=None, errdesc=None, custom_env=None, live=False):
if desc is not None:
print(desc)
+ if live:
+ result = subprocess.run(command, shell=True, env=os.environ if custom_env is None else custom_env)
+ if result.returncode != 0:
+ raise RuntimeError(f"""{errdesc or 'Error running command'}.
+Command: {command}
+Error code: {result.returncode}""")
+
+ return ""
+
result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, env=os.environ if custom_env is None else custom_env)
if result.returncode != 0:
@@ -74,6 +130,9 @@ def run_python(code, desc=None, errdesc=None):
def run_pip(args, desc=None):
+ if skip_install:
+ return
+
index_url_line = f' --index-url {index_url}' if index_url != '' else ''
return run(f'"{python}" -m pip {args} --prefer-binary{index_url_line}', desc=f"Installing {desc}", errdesc=f"Couldn't install {desc}")
@@ -89,18 +148,18 @@ def git_clone(url, dir, name, commithash=None):
if commithash is None:
return
- current_hash = run(f'"{git}" -C {dir} rev-parse HEAD', None, f"Couldn't determine {name}'s hash: {commithash}").strip()
+ current_hash = run(f'"{git}" -C "{dir}" rev-parse HEAD', None, f"Couldn't determine {name}'s hash: {commithash}").strip()
if current_hash == commithash:
return
- run(f'"{git}" -C {dir} fetch', f"Fetching updates for {name}...", f"Couldn't fetch {name}")
- run(f'"{git}" -C {dir} checkout {commithash}', f"Checking out commit for {name} with hash: {commithash}...", f"Couldn't checkout commit {commithash} for {name}")
+ run(f'"{git}" -C "{dir}" fetch', f"Fetching updates for {name}...", f"Couldn't fetch {name}")
+ run(f'"{git}" -C "{dir}" checkout {commithash}', f"Checking out commit for {name} with hash: {commithash}...", f"Couldn't checkout commit {commithash} for {name}")
return
run(f'"{git}" clone "{url}" "{dir}"', f"Cloning {name} into {dir}...", f"Couldn't clone {name}")
if commithash is not None:
- run(f'"{git}" -C {dir} checkout {commithash}', None, "Couldn't checkout {name}'s hash: {commithash}")
+ run(f'"{git}" -C "{dir}" checkout {commithash}', None, "Couldn't checkout {name}'s hash: {commithash}")
def version_check(commit):
@@ -158,7 +217,9 @@ def run_extensions_installers(settings_file):
def prepare_environment():
- torch_command = os.environ.get('TORCH_COMMAND', "pip install torch==1.12.1+cu113 torchvision==0.13.1+cu113 --extra-index-url https://download.pytorch.org/whl/cu113")
+ global skip_install
+
+ torch_command = os.environ.get('TORCH_COMMAND', "pip install torch==1.13.1+cu117 torchvision==0.14.1+cu117 --extra-index-url https://download.pytorch.org/whl/cu117")
requirements_file = os.environ.get('REQS_FILE', "requirements_versions.txt")
commandline_args = os.environ.get('COMMANDLINE_ARGS', "")
@@ -166,8 +227,6 @@ def prepare_environment():
clip_package = os.environ.get('CLIP_PACKAGE', "git+https://github.com/openai/CLIP.git@d50d76daa670286dd6cacf3bcd80b5e4823fc8e1")
openclip_package = os.environ.get('OPENCLIP_PACKAGE', "git+https://github.com/mlfoundations/open_clip.git@bb6e834e9c70d9c27d0dc3ecedeebeaeb1ffad6b")
- xformers_windows_package = os.environ.get('XFORMERS_WINDOWS_PACKAGE', 'https://github.com/C43H66N12O12S2/stable-diffusion-webui/releases/download/f/xformers-0.0.14.dev0-cp310-cp310-win_amd64.whl')
-
stable_diffusion_repo = os.environ.get('STABLE_DIFFUSION_REPO', "https://github.com/Stability-AI/stablediffusion.git")
taming_transformers_repo = os.environ.get('TAMING_TRANSFORMERS_REPO', "https://github.com/CompVis/taming-transformers.git")
k_diffusion_repo = os.environ.get('K_DIFFUSION_REPO', 'https://github.com/crowsonkb/k-diffusion.git')
@@ -188,22 +247,25 @@ def prepare_environment():
sys.argv, _ = extract_arg(sys.argv, '-f')
sys.argv, skip_torch_cuda_test = extract_arg(sys.argv, '--skip-torch-cuda-test')
+ sys.argv, skip_python_version_check = extract_arg(sys.argv, '--skip-python-version-check')
sys.argv, reinstall_xformers = extract_arg(sys.argv, '--reinstall-xformers')
+ sys.argv, reinstall_torch = extract_arg(sys.argv, '--reinstall-torch')
sys.argv, update_check = extract_arg(sys.argv, '--update-check')
sys.argv, run_tests, test_dir = extract_opt(sys.argv, '--tests')
+ sys.argv, skip_install = extract_arg(sys.argv, '--skip-install')
xformers = '--xformers' in sys.argv
ngrok = '--ngrok' in sys.argv
- try:
- commit = run(f"{git} rev-parse HEAD").strip()
- except Exception:
- commit = "<none>"
+ if not skip_python_version_check:
+ check_python_version()
+
+ commit = commit_hash()
print(f"Python {sys.version}")
print(f"Commit hash: {commit}")
-
- if not is_installed("torch") or not is_installed("torchvision"):
- run(f'"{python}" -m {torch_command}', "Installing torch and torchvision", "Couldn't install torch")
+
+ if reinstall_torch or not is_installed("torch") or not is_installed("torchvision"):
+ run(f'"{python}" -m {torch_command}', "Installing torch and torchvision", "Couldn't install torch", live=True)
if not skip_torch_cuda_test:
run_python("import torch; assert torch.cuda.is_available(), 'Torch is not able to use GPU; add --skip-torch-cuda-test to COMMANDLINE_ARGS variable to disable this check'")
@@ -220,14 +282,14 @@ def prepare_environment():
if (not is_installed("xformers") or reinstall_xformers) and xformers:
if platform.system() == "Windows":
if platform.python_version().startswith("3.10"):
- run_pip(f"install -U -I --no-deps {xformers_windows_package}", "xformers")
+ run_pip(f"install -U -I --no-deps xformers==0.0.16rc425", "xformers")
else:
print("Installation of xformers is not supported in this version of Python.")
print("You can also check this and build manually: https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Xformers#building-xformers-on-windows-by-duckness")
if not is_installed("xformers"):
exit(0)
elif platform.system() == "Linux":
- run_pip("install xformers", "xformers")
+ run_pip("install xformers==0.0.16rc425", "xformers")
if not is_installed("pyngrok") and ngrok:
run_pip("install pyngrok", "ngrok")
@@ -267,9 +329,12 @@ def tests(test_dir):
sys.argv.append("./test/test_files/empty.pt")
if "--skip-torch-cuda-test" not in sys.argv:
sys.argv.append("--skip-torch-cuda-test")
+ if "--disable-nan-check" not in sys.argv:
+ sys.argv.append("--disable-nan-check")
print(f"Launching Web UI in another process for testing with arguments: {' '.join(sys.argv[1:])}")
+ os.environ['COMMANDLINE_ARGS'] = ""
with open('test/stdout.txt', "w", encoding="utf8") as stdout, open('test/stderr.txt', "w", encoding="utf8") as stderr:
proc = subprocess.Popen([sys.executable, *sys.argv], stdout=stdout, stderr=stderr)
diff --git a/modules/api/api.py b/modules/api/api.py
index 1ceba75d..eb7b1da5 100644
--- a/modules/api/api.py
+++ b/modules/api/api.py
@@ -1,34 +1,42 @@
import base64
import io
import time
+import datetime
import uvicorn
from threading import Lock
from io import BytesIO
from gradio.processing_utils import decode_base64_to_file
-from fastapi import APIRouter, Depends, FastAPI, HTTPException
+from fastapi import APIRouter, Depends, FastAPI, HTTPException, Request, Response
from fastapi.security import HTTPBasic, HTTPBasicCredentials
from secrets import compare_digest
import modules.shared as shared
-from modules import sd_samplers, deepbooru, sd_hijack
+from modules import sd_samplers, deepbooru, sd_hijack, images, scripts, ui, postprocessing
from modules.api.models import *
from modules.processing import StableDiffusionProcessingTxt2Img, StableDiffusionProcessingImg2Img, process_images
-from modules.extras import run_extras, run_pnginfo
from modules.textual_inversion.textual_inversion import create_embedding, train_embedding
from modules.textual_inversion.preprocess import preprocess
from modules.hypernetworks.hypernetwork import create_hypernetwork, train_hypernetwork
from PIL import PngImagePlugin,Image
from modules.sd_models import checkpoints_list
+from modules.sd_models_config import find_checkpoint_config_near_filename
from modules.realesrgan_model import get_realesrgan_models
from modules import devices
from typing import List
+import piexif
+import piexif.helper
def upscaler_to_index(name: str):
try:
return [x.name.lower() for x in shared.sd_upscalers].index(name.lower())
except:
- raise HTTPException(status_code=400, detail=f"Invalid upscaler, needs to be on of these: {' , '.join([x.name for x in sd_upscalers])}")
+ raise HTTPException(status_code=400, detail=f"Invalid upscaler, needs to be one of these: {' , '.join([x.name for x in sd_upscalers])}")
+def script_name_to_index(name, scripts):
+ try:
+ return [script.title().lower() for script in scripts].index(name.lower())
+ except:
+ raise HTTPException(status_code=422, detail=f"Script '{name}' not found")
def validate_sampler_name(name):
config = sd_samplers.all_samplers_map.get(name, None)
@@ -39,34 +47,69 @@ def validate_sampler_name(name):
def setUpscalers(req: dict):
reqDict = vars(req)
- reqDict['extras_upscaler_1'] = upscaler_to_index(req.upscaler_1)
- reqDict['extras_upscaler_2'] = upscaler_to_index(req.upscaler_2)
- reqDict.pop('upscaler_1')
- reqDict.pop('upscaler_2')
+ reqDict['extras_upscaler_1'] = reqDict.pop('upscaler_1', None)
+ reqDict['extras_upscaler_2'] = reqDict.pop('upscaler_2', None)
return reqDict
def decode_base64_to_image(encoding):
if encoding.startswith("data:image/"):
encoding = encoding.split(";")[1].split(",")[1]
- return Image.open(BytesIO(base64.b64decode(encoding)))
+ try:
+ image = Image.open(BytesIO(base64.b64decode(encoding)))
+ return image
+ except Exception as err:
+ raise HTTPException(status_code=500, detail="Invalid encoded image")
def encode_pil_to_base64(image):
with io.BytesIO() as output_bytes:
- # Copy any text-only metadata
- use_metadata = False
- metadata = PngImagePlugin.PngInfo()
- for key, value in image.info.items():
- if isinstance(key, str) and isinstance(value, str):
- metadata.add_text(key, value)
- use_metadata = True
+ if opts.samples_format.lower() == 'png':
+ use_metadata = False
+ metadata = PngImagePlugin.PngInfo()
+ for key, value in image.info.items():
+ if isinstance(key, str) and isinstance(value, str):
+ metadata.add_text(key, value)
+ use_metadata = True
+ image.save(output_bytes, format="PNG", pnginfo=(metadata if use_metadata else None), quality=opts.jpeg_quality)
+
+ elif opts.samples_format.lower() in ("jpg", "jpeg", "webp"):
+ parameters = image.info.get('parameters', None)
+ exif_bytes = piexif.dump({
+ "Exif": { piexif.ExifIFD.UserComment: piexif.helper.UserComment.dump(parameters or "", encoding="unicode") }
+ })
+ if opts.samples_format.lower() in ("jpg", "jpeg"):
+ image.save(output_bytes, format="JPEG", exif = exif_bytes, quality=opts.jpeg_quality)
+ else:
+ image.save(output_bytes, format="WEBP", exif = exif_bytes, quality=opts.jpeg_quality)
+
+ else:
+ raise HTTPException(status_code=500, detail="Invalid image format")
- image.save(
- output_bytes, "PNG", pnginfo=(metadata if use_metadata else None)
- )
bytes_data = output_bytes.getvalue()
+
return base64.b64encode(bytes_data)
+def api_middleware(app: FastAPI):
+ @app.middleware("http")
+ async def log_and_time(req: Request, call_next):
+ ts = time.time()
+ res: Response = await call_next(req)
+ duration = str(round(time.time() - ts, 4))
+ res.headers["X-Process-Time"] = duration
+ endpoint = req.scope.get('path', 'err')
+ if shared.cmd_opts.api_log and endpoint.startswith('/sdapi'):
+ print('API {t} {code} {prot}/{ver} {method} {endpoint} {cli} {duration}'.format(
+ t = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f"),
+ code = res.status_code,
+ ver = req.scope.get('http_version', '0.0'),
+ cli = req.scope.get('client', ('0:0.0.0', 0))[0],
+ prot = req.scope.get('scheme', 'err'),
+ method = req.scope.get('method', 'err'),
+ endpoint = endpoint,
+ duration = duration,
+ ))
+ return res
+
class Api:
def __init__(self, app: FastAPI, queue_lock: Lock):
@@ -79,6 +122,7 @@ class Api:
self.router = APIRouter()
self.app = app
self.queue_lock = queue_lock
+ api_middleware(self.app)
self.add_api_route("/sdapi/v1/txt2img", self.text2imgapi, methods=["POST"], response_model=TextToImageResponse)
self.add_api_route("/sdapi/v1/img2img", self.img2imgapi, methods=["POST"], response_model=ImageToImageResponse)
self.add_api_route("/sdapi/v1/extra-single-image", self.extras_single_image_api, methods=["POST"], response_model=ExtrasSingleImageResponse)
@@ -98,14 +142,14 @@ class Api:
self.add_api_route("/sdapi/v1/face-restorers", self.get_face_restorers, methods=["GET"], response_model=List[FaceRestorerItem])
self.add_api_route("/sdapi/v1/realesrgan-models", self.get_realesrgan_models, methods=["GET"], response_model=List[RealesrganItem])
self.add_api_route("/sdapi/v1/prompt-styles", self.get_prompt_styles, methods=["GET"], response_model=List[PromptStyleItem])
- self.add_api_route("/sdapi/v1/artist-categories", self.get_artists_categories, methods=["GET"], response_model=List[str])
- self.add_api_route("/sdapi/v1/artists", self.get_artists, methods=["GET"], response_model=List[ArtistItem])
+ self.add_api_route("/sdapi/v1/embeddings", self.get_embeddings, methods=["GET"], response_model=EmbeddingsResponse)
self.add_api_route("/sdapi/v1/refresh-checkpoints", self.refresh_checkpoints, methods=["POST"])
self.add_api_route("/sdapi/v1/create/embedding", self.create_embedding, methods=["POST"], response_model=CreateResponse)
self.add_api_route("/sdapi/v1/create/hypernetwork", self.create_hypernetwork, methods=["POST"], response_model=CreateResponse)
self.add_api_route("/sdapi/v1/preprocess", self.preprocess, methods=["POST"], response_model=PreprocessResponse)
self.add_api_route("/sdapi/v1/train/embedding", self.train_embedding, methods=["POST"], response_model=TrainResponse)
self.add_api_route("/sdapi/v1/train/hypernetwork", self.train_hypernetwork, methods=["POST"], response_model=TrainResponse)
+ self.add_api_route("/sdapi/v1/memory", self.get_memory, methods=["GET"], response_model=MemoryResponse)
def add_api_route(self, path: str, endpoint, **kwargs):
if shared.cmd_opts.api_auth:
@@ -119,9 +163,22 @@ class Api:
raise HTTPException(status_code=401, detail="Incorrect username or password", headers={"WWW-Authenticate": "Basic"})
+ def get_script(self, script_name, script_runner):
+ if script_name is None:
+ return None, None
+
+ if not script_runner.scripts:
+ script_runner.initialize_scripts(False)
+ ui.create_ui()
+
+ script_idx = script_name_to_index(script_name, script_runner.selectable_scripts)
+ script = script_runner.selectable_scripts[script_idx]
+ return script, script_idx
+
def text2imgapi(self, txt2imgreq: StableDiffusionTxt2ImgProcessingAPI):
+ script, script_idx = self.get_script(txt2imgreq.script_name, scripts.scripts_txt2img)
+
populate = txt2imgreq.copy(update={ # Override __init__ params
- "sd_model": shared.sd_model,
"sampler_name": validate_sampler_name(txt2imgreq.sampler_name or txt2imgreq.sampler_index),
"do_not_save_samples": True,
"do_not_save_grid": True
@@ -129,15 +186,22 @@ class Api:
)
if populate.sampler_name:
populate.sampler_index = None # prevent a warning later on
- p = StableDiffusionProcessingTxt2Img(**vars(populate))
- # Override object param
- shared.state.begin()
+ args = vars(populate)
+ args.pop('script_name', None)
with self.queue_lock:
- processed = process_images(p)
+ p = StableDiffusionProcessingTxt2Img(sd_model=shared.sd_model, **args)
- shared.state.end()
+ shared.state.begin()
+ if script is not None:
+ p.outpath_grids = opts.outdir_txt2img_grids
+ p.outpath_samples = opts.outdir_txt2img_samples
+ p.script_args = [script_idx + 1] + [None] * (script.args_from - 1) + p.script_args
+ processed = scripts.scripts_txt2img.run(p, *p.script_args)
+ else:
+ processed = process_images(p)
+ shared.state.end()
b64images = list(map(encode_pil_to_base64, processed.images))
@@ -148,12 +212,13 @@ class Api:
if init_images is None:
raise HTTPException(status_code=404, detail="Init image not found")
+ script, script_idx = self.get_script(img2imgreq.script_name, scripts.scripts_img2img)
+
mask = img2imgreq.mask
if mask:
mask = decode_base64_to_image(mask)
populate = img2imgreq.copy(update={ # Override __init__ params
- "sd_model": shared.sd_model,
"sampler_name": validate_sampler_name(img2imgreq.sampler_name or img2imgreq.sampler_index),
"do_not_save_samples": True,
"do_not_save_grid": True,
@@ -165,16 +230,21 @@ class Api:
args = vars(populate)
args.pop('include_init_images', None) # this is meant to be done by "exclude": True in model, but it's for a reason that I cannot determine.
- p = StableDiffusionProcessingImg2Img(**args)
-
- p.init_images = [decode_base64_to_image(x) for x in init_images]
-
- shared.state.begin()
+ args.pop('script_name', None)
with self.queue_lock:
- processed = process_images(p)
+ p = StableDiffusionProcessingImg2Img(sd_model=shared.sd_model, **args)
+ p.init_images = [decode_base64_to_image(x) for x in init_images]
- shared.state.end()
+ shared.state.begin()
+ if script is not None:
+ p.outpath_grids = opts.outdir_img2img_grids
+ p.outpath_samples = opts.outdir_img2img_samples
+ p.script_args = [script_idx + 1] + [None] * (script.args_from - 1) + p.script_args
+ processed = scripts.scripts_img2img.run(p, *p.script_args)
+ else:
+ processed = process_images(p)
+ shared.state.end()
b64images = list(map(encode_pil_to_base64, processed.images))
@@ -190,7 +260,7 @@ class Api:
reqDict['image'] = decode_base64_to_image(reqDict['image'])
with self.queue_lock:
- result = run_extras(extras_mode=0, image_folder="", input_dir="", output_dir="", save_output=False, **reqDict)
+ result = postprocessing.run_extras(extras_mode=0, image_folder="", input_dir="", output_dir="", save_output=False, **reqDict)
return ExtrasSingleImageResponse(image=encode_pil_to_base64(result[0][0]), html_info=result[1])
@@ -206,7 +276,7 @@ class Api:
reqDict.pop('imageList')
with self.queue_lock:
- result = run_extras(extras_mode=1, image="", input_dir="", output_dir="", save_output=False, **reqDict)
+ result = postprocessing.run_extras(extras_mode=1, image="", input_dir="", output_dir="", save_output=False, **reqDict)
return ExtrasBatchImagesResponse(images=list(map(encode_pil_to_base64, result[0])), html_info=result[1])
@@ -214,15 +284,23 @@ class Api:
if(not req.image.strip()):
return PNGInfoResponse(info="")
- result = run_pnginfo(decode_base64_to_image(req.image.strip()))
+ image = decode_base64_to_image(req.image.strip())
+ if image is None:
+ return PNGInfoResponse(info="")
- return PNGInfoResponse(info=result[1])
+ geninfo, items = images.read_info_from_image(image)
+ if geninfo is None:
+ geninfo = ""
+
+ items = {**{'parameters': geninfo}, **items}
+
+ return PNGInfoResponse(info=geninfo, items=items)
def progressapi(self, req: ProgressRequest = Depends()):
# copy from check_progress_call of ui.py
if shared.state.job_count == 0:
- return ProgressResponse(progress=0, eta_relative=0, state=shared.state.dict())
+ return ProgressResponse(progress=0, eta_relative=0, state=shared.state.dict(), textinfo=shared.state.textinfo)
# avoid dividing zero
progress = 0.01
@@ -244,7 +322,7 @@ class Api:
if shared.state.current_image and not req.skip_current_image:
current_image = encode_pil_to_base64(shared.state.current_image)
- return ProgressResponse(progress=progress, eta_relative=eta_relative, state=shared.state.dict(), current_image=current_image)
+ return ProgressResponse(progress=progress, eta_relative=eta_relative, state=shared.state.dict(), current_image=current_image, textinfo=shared.state.textinfo)
def interrogateapi(self, interrogatereq: InterrogateRequest):
image_b64 = interrogatereq.image
@@ -298,16 +376,19 @@ class Api:
return [{"name": sampler[0], "aliases":sampler[2], "options":sampler[3]} for sampler in sd_samplers.all_samplers]
def get_upscalers(self):
- upscalers = []
-
- for upscaler in shared.sd_upscalers:
- u = upscaler.scaler
- upscalers.append({"name":u.name, "model_name":u.model_name, "model_path":u.model_path, "model_url":u.model_url})
-
- return upscalers
+ return [
+ {
+ "name": upscaler.name,
+ "model_name": upscaler.scaler.model_name,
+ "model_path": upscaler.data_path,
+ "model_url": None,
+ "scale": upscaler.scale,
+ }
+ for upscaler in shared.sd_upscalers
+ ]
def get_sd_models(self):
- return [{"title":x.title, "model_name":x.model_name, "hash":x.hash, "filename": x.filename, "config": x.config} for x in checkpoints_list.values()]
+ return [{"title": x.title, "model_name": x.model_name, "hash": x.shorthash, "sha256": x.sha256, "filename": x.filename, "config": find_checkpoint_config_near_filename(x)} for x in checkpoints_list.values()]
def get_hypernetworks(self):
return [{"name": name, "path": shared.hypernetworks[name]} for name in shared.hypernetworks]
@@ -326,11 +407,25 @@ class Api:
return styleList
- def get_artists_categories(self):
- return shared.artist_db.cats
+ def get_embeddings(self):
+ db = sd_hijack.model_hijack.embedding_db
+
+ def convert_embedding(embedding):
+ return {
+ "step": embedding.step,
+ "sd_checkpoint": embedding.sd_checkpoint,
+ "sd_checkpoint_name": embedding.sd_checkpoint_name,
+ "shape": embedding.shape,
+ "vectors": embedding.vectors,
+ }
- def get_artists(self):
- return [{"name":x[0], "score":x[1], "category":x[2]} for x in shared.artist_db.artists]
+ def convert_embeddings(embeddings):
+ return {embedding.name: convert_embedding(embedding) for embedding in embeddings.values()}
+
+ return {
+ "loaded": convert_embeddings(db.word_embeddings),
+ "skipped": convert_embeddings(db.skipped_embeddings),
+ }
def refresh_checkpoints(self):
shared.refresh_checkpoints()
@@ -396,7 +491,7 @@ class Api:
def train_hypernetwork(self, args: dict):
try:
shared.state.begin()
- initial_hypernetwork = shared.loaded_hypernetwork
+ shared.loaded_hypernetworks = []
apply_optimizations = shared.opts.training_xattention_optimizations
error = None
filename = ''
@@ -407,16 +502,49 @@ class Api:
except Exception as e:
error = e
finally:
- shared.loaded_hypernetwork = initial_hypernetwork
shared.sd_model.cond_stage_model.to(devices.device)
shared.sd_model.first_stage_model.to(devices.device)
if not apply_optimizations:
sd_hijack.apply_optimizations()
shared.state.end()
- return TrainResponse(info = "train embedding complete: filename: {filename} error: {error}".format(filename = filename, error = error))
+ return TrainResponse(info="train embedding complete: filename: {filename} error: {error}".format(filename=filename, error=error))
except AssertionError as msg:
shared.state.end()
- return TrainResponse(info = "train embedding error: {error}".format(error = error))
+ return TrainResponse(info="train embedding error: {error}".format(error=error))
+
+ def get_memory(self):
+ try:
+ import os, psutil
+ process = psutil.Process(os.getpid())
+ res = process.memory_info() # only rss is cross-platform guaranteed so we dont rely on other values
+ ram_total = 100 * res.rss / process.memory_percent() # and total memory is calculated as actual value is not cross-platform safe
+ ram = { 'free': ram_total - res.rss, 'used': res.rss, 'total': ram_total }
+ except Exception as err:
+ ram = { 'error': f'{err}' }
+ try:
+ import torch
+ if torch.cuda.is_available():
+ s = torch.cuda.mem_get_info()
+ system = { 'free': s[0], 'used': s[1] - s[0], 'total': s[1] }
+ s = dict(torch.cuda.memory_stats(shared.device))
+ allocated = { 'current': s['allocated_bytes.all.current'], 'peak': s['allocated_bytes.all.peak'] }
+ reserved = { 'current': s['reserved_bytes.all.current'], 'peak': s['reserved_bytes.all.peak'] }
+ active = { 'current': s['active_bytes.all.current'], 'peak': s['active_bytes.all.peak'] }
+ inactive = { 'current': s['inactive_split_bytes.all.current'], 'peak': s['inactive_split_bytes.all.peak'] }
+ warnings = { 'retries': s['num_alloc_retries'], 'oom': s['num_ooms'] }
+ cuda = {
+ 'system': system,
+ 'active': active,
+ 'allocated': allocated,
+ 'reserved': reserved,
+ 'inactive': inactive,
+ 'events': warnings,
+ }
+ else:
+ cuda = { 'error': 'unavailable' }
+ except Exception as err:
+ cuda = { 'error': f'{err}' }
+ return MemoryResponse(ram = ram, cuda = cuda)
def launch(self, server_name, port):
self.app.include_router(self.router)
diff --git a/modules/api/models.py b/modules/api/models.py
index c446ce7a..cba43d3b 100644
--- a/modules/api/models.py
+++ b/modules/api/models.py
@@ -100,13 +100,13 @@ class PydanticModelGenerator:
StableDiffusionTxt2ImgProcessingAPI = PydanticModelGenerator(
"StableDiffusionProcessingTxt2Img",
StableDiffusionProcessingTxt2Img,
- [{"key": "sampler_index", "type": str, "default": "Euler"}]
+ [{"key": "sampler_index", "type": str, "default": "Euler"}, {"key": "script_name", "type": str, "default": None}, {"key": "script_args", "type": list, "default": []}]
).generate_model()
StableDiffusionImg2ImgProcessingAPI = PydanticModelGenerator(
"StableDiffusionProcessingImg2Img",
StableDiffusionProcessingImg2Img,
- [{"key": "sampler_index", "type": str, "default": "Euler"}, {"key": "init_images", "type": list, "default": None}, {"key": "denoising_strength", "type": float, "default": 0.75}, {"key": "mask", "type": str, "default": None}, {"key": "include_init_images", "type": bool, "default": False, "exclude" : True}]
+ [{"key": "sampler_index", "type": str, "default": "Euler"}, {"key": "init_images", "type": list, "default": None}, {"key": "denoising_strength", "type": float, "default": 0.75}, {"key": "mask", "type": str, "default": None}, {"key": "include_init_images", "type": bool, "default": False, "exclude" : True}, {"key": "script_name", "type": str, "default": None}, {"key": "script_args", "type": list, "default": []}]
).generate_model()
class TextToImageResponse(BaseModel):
@@ -125,7 +125,7 @@ class ExtrasBaseRequest(BaseModel):
gfpgan_visibility: float = Field(default=0, title="GFPGAN Visibility", ge=0, le=1, allow_inf_nan=False, description="Sets the visibility of GFPGAN, values should be between 0 and 1.")
codeformer_visibility: float = Field(default=0, title="CodeFormer Visibility", ge=0, le=1, allow_inf_nan=False, description="Sets the visibility of CodeFormer, values should be between 0 and 1.")
codeformer_weight: float = Field(default=0, title="CodeFormer Weight", ge=0, le=1, allow_inf_nan=False, description="Sets the weight of CodeFormer, values should be between 0 and 1.")
- upscaling_resize: float = Field(default=2, title="Upscaling Factor", ge=1, le=4, description="By how much to upscale the image, only used when resize_mode=0.")
+ upscaling_resize: float = Field(default=2, title="Upscaling Factor", ge=1, le=8, description="By how much to upscale the image, only used when resize_mode=0.")
upscaling_resize_w: int = Field(default=512, title="Target Width", ge=1, description="Target width for the upscaler to hit. Only used when resize_mode=1.")
upscaling_resize_h: int = Field(default=512, title="Target Height", ge=1, description="Target height for the upscaler to hit. Only used when resize_mode=1.")
upscaling_crop: bool = Field(default=True, title="Crop to fit", description="Should the upscaler crop the image to fit in the chosen size?")
@@ -157,7 +157,8 @@ class PNGInfoRequest(BaseModel):
image: str = Field(title="Image", description="The base64 encoded PNG image")
class PNGInfoResponse(BaseModel):
- info: str = Field(title="Image info", description="A string with all the info the image had")
+ info: str = Field(title="Image info", description="A string with the parameters used to generate the image")
+ items: dict = Field(title="Items", description="An object containing all the info the image had")
class ProgressRequest(BaseModel):
skip_current_image: bool = Field(default=False, title="Skip current image", description="Skip current image serialization")
@@ -167,6 +168,7 @@ class ProgressResponse(BaseModel):
eta_relative: float = Field(title="ETA in secs")
state: dict = Field(title="State", description="The current state snapshot")
current_image: str = Field(default=None, title="Current image", description="The current image in base64 format. opts.show_progress_every_n_steps is required for this to work.")
+ textinfo: str = Field(default=None, title="Info text", description="Info text used by WebUI.")
class InterrogateRequest(BaseModel):
image: str = Field(default="", title="Image", description="Image to work on, must be a Base64 string containing the image's data.")
@@ -218,13 +220,15 @@ class UpscalerItem(BaseModel):
model_name: Optional[str] = Field(title="Model Name")
model_path: Optional[str] = Field(title="Path")
model_url: Optional[str] = Field(title="URL")
+ scale: Optional[float] = Field(title="Scale")
class SDModelItem(BaseModel):
title: str = Field(title="Title")
model_name: str = Field(title="Model Name")
- hash: str = Field(title="Hash")
+ hash: Optional[str] = Field(title="Short hash")
+ sha256: Optional[str] = Field(title="sha256 hash")
filename: str = Field(title="Filename")
- config: str = Field(title="Config file")
+ config: Optional[str] = Field(title="Config file")
class HypernetworkItem(BaseModel):
name: str = Field(title="Name")
@@ -249,3 +253,17 @@ class ArtistItem(BaseModel):
score: float = Field(title="Score")
category: str = Field(title="Category")
+class EmbeddingItem(BaseModel):
+ step: Optional[int] = Field(title="Step", description="The number of steps that were used to train this embedding, if available")
+ sd_checkpoint: Optional[str] = Field(title="SD Checkpoint", description="The hash of the checkpoint this embedding was trained on, if available")
+ sd_checkpoint_name: Optional[str] = Field(title="SD Checkpoint Name", description="The name of the checkpoint this embedding was trained on, if available. Note that this is the name that was used by the trainer; for a stable identifier, use `sd_checkpoint` instead")
+ shape: int = Field(title="Shape", description="The length of each individual vector in the embedding")
+ vectors: int = Field(title="Vectors", description="The number of vectors in the embedding")
+
+class EmbeddingsResponse(BaseModel):
+ loaded: Dict[str, EmbeddingItem] = Field(title="Loaded", description="Embeddings loaded for the current model")
+ skipped: Dict[str, EmbeddingItem] = Field(title="Skipped", description="Embeddings skipped for the current model (likely due to architecture incompatibility)")
+
+class MemoryResponse(BaseModel):
+ ram: dict = Field(title="RAM", description="System memory stats")
+ cuda: dict = Field(title="CUDA", description="nVidia CUDA memory stats")
diff --git a/modules/artists.py b/modules/artists.py
deleted file mode 100644
index 3612758b..00000000
--- a/modules/artists.py
+++ /dev/null
@@ -1,25 +0,0 @@
-import os.path
-import csv
-from collections import namedtuple
-
-Artist = namedtuple("Artist", ['name', 'weight', 'category'])
-
-
-class ArtistsDatabase:
- def __init__(self, filename):
- self.cats = set()
- self.artists = []
-
- if not os.path.exists(filename):
- return
-
- with open(filename, "r", newline='', encoding="utf8") as file:
- reader = csv.DictReader(file)
-
- for row in reader:
- artist = Artist(row["artist"], float(row["score"]), row["category"])
- self.artists.append(artist)
- self.cats.add(artist.category)
-
- def categories(self):
- return sorted(self.cats)
diff --git a/modules/call_queue.py b/modules/call_queue.py
index 4cd49533..92097c15 100644
--- a/modules/call_queue.py
+++ b/modules/call_queue.py
@@ -4,7 +4,7 @@ import threading
import traceback
import time
-from modules import shared
+from modules import shared, progress
queue_lock = threading.Lock()
@@ -22,12 +22,23 @@ def wrap_queued_call(func):
def wrap_gradio_gpu_call(func, extra_outputs=None):
def f(*args, **kwargs):
- shared.state.begin()
+ # if the first argument is a string that says "task(...)", it is treated as a job id
+ if len(args) > 0 and type(args[0]) == str and args[0][0:5] == "task(" and args[0][-1] == ")":
+ id_task = args[0]
+ progress.add_task_to_queue(id_task)
+ else:
+ id_task = None
with queue_lock:
- res = func(*args, **kwargs)
+ shared.state.begin()
+ progress.start_task(id_task)
+
+ try:
+ res = func(*args, **kwargs)
+ finally:
+ progress.finish_task(id_task)
- shared.state.end()
+ shared.state.end()
return res
diff --git a/modules/codeformer_model.py b/modules/codeformer_model.py
index ab40d842..01fb7bd8 100644
--- a/modules/codeformer_model.py
+++ b/modules/codeformer_model.py
@@ -8,7 +8,7 @@ import torch
import modules.face_restoration
import modules.shared
from modules import shared, devices, modelloader
-from modules.paths import script_path, models_path
+from modules.paths import models_path
# codeformer people made a choice to include modified basicsr library to their project which makes
# it utterly impossible to use it alongside with other libraries that also use basicsr, like GFPGAN.
diff --git a/modules/deepbooru_model.py b/modules/deepbooru_model.py
index edd40c81..83d2ff09 100644
--- a/modules/deepbooru_model.py
+++ b/modules/deepbooru_model.py
@@ -2,6 +2,8 @@ import torch
import torch.nn as nn
import torch.nn.functional as F
+from modules import devices
+
# see https://github.com/AUTOMATIC1111/TorchDeepDanbooru for more
@@ -196,7 +198,7 @@ class DeepDanbooruModel(nn.Module):
t_358, = inputs
t_359 = t_358.permute(*[0, 3, 1, 2])
t_359_padded = F.pad(t_359, [2, 3, 2, 3], value=0)
- t_360 = self.n_Conv_0(t_359_padded)
+ t_360 = self.n_Conv_0(t_359_padded.to(self.n_Conv_0.bias.dtype) if devices.unet_needs_upcast else t_359_padded)
t_361 = F.relu(t_360)
t_361 = F.pad(t_361, [0, 1, 0, 1], value=float('-inf'))
t_362 = self.n_MaxPool_0(t_361)
diff --git a/modules/devices.py b/modules/devices.py
index 800510b7..4687944e 100644
--- a/modules/devices.py
+++ b/modules/devices.py
@@ -34,14 +34,18 @@ def get_cuda_device_string():
return "cuda"
-def get_optimal_device():
+def get_optimal_device_name():
if torch.cuda.is_available():
- return torch.device(get_cuda_device_string())
+ return get_cuda_device_string()
if has_mps():
- return torch.device("mps")
+ return "mps"
+
+ return "cpu"
- return cpu
+
+def get_optimal_device():
+ return torch.device(get_optimal_device_name())
def get_device_for(task):
@@ -79,6 +83,8 @@ cpu = torch.device("cpu")
device = device_interrogate = device_gfpgan = device_esrgan = device_codeformer = None
dtype = torch.float16
dtype_vae = torch.float16
+dtype_unet = torch.float16
+unet_needs_upcast = False
def randn(seed, shape):
@@ -106,6 +112,42 @@ def autocast(disable=False):
return torch.autocast("cuda")
+def without_autocast(disable=False):
+ return torch.autocast("cuda", enabled=False) if torch.is_autocast_enabled() and not disable else contextlib.nullcontext()
+
+
+class NansException(Exception):
+ pass
+
+
+def test_for_nans(x, where):
+ from modules import shared
+
+ if shared.cmd_opts.disable_nan_check:
+ return
+
+ if not torch.all(torch.isnan(x)).item():
+ return
+
+ if where == "unet":
+ message = "A tensor with all NaNs was produced in Unet."
+
+ if not shared.cmd_opts.no_half:
+ message += " This could be either because there's not enough precision to represent the picture, or because your video card does not support half type. Try setting the \"Upcast cross attention layer to float32\" option in Settings > Stable Diffusion or using the --no-half commandline argument to fix this."
+
+ elif where == "vae":
+ message = "A tensor with all NaNs was produced in VAE."
+
+ if not shared.cmd_opts.no_half and not shared.cmd_opts.no_half_vae:
+ message += " This could be because there's not enough precision to represent the picture. Try adding --no-half-vae commandline argument to fix this."
+ else:
+ message = "A tensor with all NaNs was produced."
+
+ message += " Use --disable-nan-check commandline argument to disable this check."
+
+ raise NansException(message)
+
+
# MPS workaround for https://github.com/pytorch/pytorch/issues/79383
orig_tensor_to = torch.Tensor.to
def tensor_to_fix(self, *args, **kwargs):
@@ -133,8 +175,30 @@ def numpy_fix(self, *args, **kwargs):
return orig_tensor_numpy(self, *args, **kwargs)
-# PyTorch 1.13 doesn't need these fixes but unfortunately is slower and has regressions that prevent training from working
-if has_mps() and version.parse(torch.__version__) < version.parse("1.13"):
- torch.Tensor.to = tensor_to_fix
- torch.nn.functional.layer_norm = layer_norm_fix
- torch.Tensor.numpy = numpy_fix
+# MPS workaround for https://github.com/pytorch/pytorch/issues/89784
+orig_cumsum = torch.cumsum
+orig_Tensor_cumsum = torch.Tensor.cumsum
+def cumsum_fix(input, cumsum_func, *args, **kwargs):
+ if input.device.type == 'mps':
+ output_dtype = kwargs.get('dtype', input.dtype)
+ if output_dtype == torch.int64:
+ return cumsum_func(input.cpu(), *args, **kwargs).to(input.device)
+ elif cumsum_needs_bool_fix and output_dtype == torch.bool or cumsum_needs_int_fix and (output_dtype == torch.int8 or output_dtype == torch.int16):
+ return cumsum_func(input.to(torch.int32), *args, **kwargs).to(torch.int64)
+ return cumsum_func(input, *args, **kwargs)
+
+
+if has_mps():
+ if version.parse(torch.__version__) < version.parse("1.13"):
+ # PyTorch 1.13 doesn't need these fixes but unfortunately is slower and has regressions that prevent training from working
+ torch.Tensor.to = tensor_to_fix
+ torch.nn.functional.layer_norm = layer_norm_fix
+ torch.Tensor.numpy = numpy_fix
+ elif version.parse(torch.__version__) > version.parse("1.13.1"):
+ cumsum_needs_int_fix = not torch.Tensor([1,2]).to(torch.device("mps")).equal(torch.ShortTensor([1,1]).to(torch.device("mps")).cumsum(0))
+ cumsum_needs_bool_fix = not torch.BoolTensor([True,True]).to(device=torch.device("mps"), dtype=torch.int64).equal(torch.BoolTensor([True,False]).to(torch.device("mps")).cumsum(0))
+ torch.cumsum = lambda input, *args, **kwargs: ( cumsum_fix(input, orig_cumsum, *args, **kwargs) )
+ torch.Tensor.cumsum = lambda self, *args, **kwargs: ( cumsum_fix(self, orig_Tensor_cumsum, *args, **kwargs) )
+ orig_narrow = torch.narrow
+ torch.narrow = lambda *args, **kwargs: ( orig_narrow(*args, **kwargs).clone() )
+
diff --git a/modules/errors.py b/modules/errors.py
index 372dc51a..f6b80dbb 100644
--- a/modules/errors.py
+++ b/modules/errors.py
@@ -2,9 +2,42 @@ import sys
import traceback
+def print_error_explanation(message):
+ lines = message.strip().split("\n")
+ max_len = max([len(x) for x in lines])
+
+ print('=' * max_len, file=sys.stderr)
+ for line in lines:
+ print(line, file=sys.stderr)
+ print('=' * max_len, file=sys.stderr)
+
+
+def display(e: Exception, task):
+ print(f"{task or 'error'}: {type(e).__name__}", file=sys.stderr)
+ print(traceback.format_exc(), file=sys.stderr)
+
+ message = str(e)
+ if "copying a param with shape torch.Size([640, 1024]) from checkpoint, the shape in current model is torch.Size([640, 768])" in message:
+ print_error_explanation("""
+The most likely cause of this is you are trying to load Stable Diffusion 2.0 model without specifying its config file.
+See https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Features#stable-diffusion-20 for how to solve this.
+ """)
+
+
+already_displayed = {}
+
+
+def display_once(e: Exception, task):
+ if task in already_displayed:
+ return
+
+ display(e, task)
+
+ already_displayed[task] = 1
+
+
def run(code, task):
try:
code()
except Exception as e:
- print(f"{task}: {type(e).__name__}", file=sys.stderr)
- print(traceback.format_exc(), file=sys.stderr)
+ display(task, e)
diff --git a/modules/extensions.py b/modules/extensions.py
index b522125c..5e12b1aa 100644
--- a/modules/extensions.py
+++ b/modules/extensions.py
@@ -7,9 +7,11 @@ import git
from modules import paths, shared
extensions = []
-extensions_dir = os.path.join(paths.script_path, "extensions")
+extensions_dir = os.path.join(paths.data_path, "extensions")
extensions_builtin_dir = os.path.join(paths.script_path, "extensions-builtin")
+if not os.path.exists(extensions_dir):
+ os.makedirs(extensions_dir)
def active():
return [x for x in extensions if x.enabled]
diff --git a/modules/extra_networks.py b/modules/extra_networks.py
new file mode 100644
index 00000000..1978673d
--- /dev/null
+++ b/modules/extra_networks.py
@@ -0,0 +1,147 @@
+import re
+from collections import defaultdict
+
+from modules import errors
+
+extra_network_registry = {}
+
+
+def initialize():
+ extra_network_registry.clear()
+
+
+def register_extra_network(extra_network):
+ extra_network_registry[extra_network.name] = extra_network
+
+
+class ExtraNetworkParams:
+ def __init__(self, items=None):
+ self.items = items or []
+
+
+class ExtraNetwork:
+ def __init__(self, name):
+ self.name = name
+
+ def activate(self, p, params_list):
+ """
+ Called by processing on every run. Whatever the extra network is meant to do should be activated here.
+ Passes arguments related to this extra network in params_list.
+ User passes arguments by specifying this in his prompt:
+
+ <name:arg1:arg2:arg3>
+
+ Where name matches the name of this ExtraNetwork object, and arg1:arg2:arg3 are any natural number of text arguments
+ separated by colon.
+
+ Even if the user does not mention this ExtraNetwork in his prompt, the call will stil be made, with empty params_list -
+ in this case, all effects of this extra networks should be disabled.
+
+ Can be called multiple times before deactivate() - each new call should override the previous call completely.
+
+ For example, if this ExtraNetwork's name is 'hypernet' and user's prompt is:
+
+ > "1girl, <hypernet:agm:1.1> <extrasupernet:master:12:13:14> <hypernet:ray>"
+
+ params_list will be:
+
+ [
+ ExtraNetworkParams(items=["agm", "1.1"]),
+ ExtraNetworkParams(items=["ray"])
+ ]
+
+ """
+ raise NotImplementedError
+
+ def deactivate(self, p):
+ """
+ Called at the end of processing for housekeeping. No need to do anything here.
+ """
+
+ raise NotImplementedError
+
+
+def activate(p, extra_network_data):
+ """call activate for extra networks in extra_network_data in specified order, then call
+ activate for all remaining registered networks with an empty argument list"""
+
+ for extra_network_name, extra_network_args in extra_network_data.items():
+ extra_network = extra_network_registry.get(extra_network_name, None)
+ if extra_network is None:
+ print(f"Skipping unknown extra network: {extra_network_name}")
+ continue
+
+ try:
+ extra_network.activate(p, extra_network_args)
+ except Exception as e:
+ errors.display(e, f"activating extra network {extra_network_name} with arguments {extra_network_args}")
+
+ for extra_network_name, extra_network in extra_network_registry.items():
+ args = extra_network_data.get(extra_network_name, None)
+ if args is not None:
+ continue
+
+ try:
+ extra_network.activate(p, [])
+ except Exception as e:
+ errors.display(e, f"activating extra network {extra_network_name}")
+
+
+def deactivate(p, extra_network_data):
+ """call deactivate for extra networks in extra_network_data in specified order, then call
+ deactivate for all remaining registered networks"""
+
+ for extra_network_name, extra_network_args in extra_network_data.items():
+ extra_network = extra_network_registry.get(extra_network_name, None)
+ if extra_network is None:
+ continue
+
+ try:
+ extra_network.deactivate(p)
+ except Exception as e:
+ errors.display(e, f"deactivating extra network {extra_network_name}")
+
+ for extra_network_name, extra_network in extra_network_registry.items():
+ args = extra_network_data.get(extra_network_name, None)
+ if args is not None:
+ continue
+
+ try:
+ extra_network.deactivate(p)
+ except Exception as e:
+ errors.display(e, f"deactivating unmentioned extra network {extra_network_name}")
+
+
+re_extra_net = re.compile(r"<(\w+):([^>]+)>")
+
+
+def parse_prompt(prompt):
+ res = defaultdict(list)
+
+ def found(m):
+ name = m.group(1)
+ args = m.group(2)
+
+ res[name].append(ExtraNetworkParams(items=args.split(":")))
+
+ return ""
+
+ prompt = re.sub(re_extra_net, found, prompt)
+
+ return prompt, res
+
+
+def parse_prompts(prompts):
+ res = []
+ extra_data = None
+
+ for prompt in prompts:
+ updated_prompt, parsed_extra_data = parse_prompt(prompt)
+
+ if extra_data is None:
+ extra_data = parsed_extra_data
+
+ res.append(updated_prompt)
+
+ return res, extra_data
+
diff --git a/modules/extra_networks_hypernet.py b/modules/extra_networks_hypernet.py
new file mode 100644
index 00000000..ff279a1f
--- /dev/null
+++ b/modules/extra_networks_hypernet.py
@@ -0,0 +1,21 @@
+from modules import extra_networks
+from modules.hypernetworks import hypernetwork
+
+
+class ExtraNetworkHypernet(extra_networks.ExtraNetwork):
+ def __init__(self):
+ super().__init__('hypernet')
+
+ def activate(self, p, params_list):
+ names = []
+ multipliers = []
+ for params in params_list:
+ assert len(params.items) > 0
+
+ names.append(params.items[0])
+ multipliers.append(float(params.items[1]) if len(params.items) > 1 else 1.0)
+
+ hypernetwork.load_hypernetworks(names, multipliers)
+
+ def deactivate(self, p):
+ pass
diff --git a/modules/extras.py b/modules/extras.py
index 68939dea..d8ece955 100644
--- a/modules/extras.py
+++ b/modules/extras.py
@@ -1,222 +1,16 @@
-from __future__ import annotations
-import math
import os
-import sys
-import traceback
+import re
+import shutil
-import numpy as np
-from PIL import Image
import torch
import tqdm
-from typing import Callable, List, OrderedDict, Tuple
-from functools import partial
-from dataclasses import dataclass
-
-from modules import processing, shared, images, devices, sd_models, sd_samplers
-from modules.shared import opts
-import modules.gfpgan_model
-from modules.ui import plaintext_to_html
-import modules.codeformer_model
-import piexif
-import piexif.helper
+from modules import shared, images, sd_models, sd_vae, sd_models_config
+from modules.ui_common import plaintext_to_html
import gradio as gr
import safetensors.torch
-class LruCache(OrderedDict):
- @dataclass(frozen=True)
- class Key:
- image_hash: int
- info_hash: int
- args_hash: int
-
- @dataclass
- class Value:
- image: Image.Image
- info: str
-
- def __init__(self, max_size: int = 5, *args, **kwargs):
- super().__init__(*args, **kwargs)
- self._max_size = max_size
-
- def get(self, key: LruCache.Key) -> LruCache.Value:
- ret = super().get(key)
- if ret is not None:
- self.move_to_end(key) # Move to end of eviction list
- return ret
-
- def put(self, key: LruCache.Key, value: LruCache.Value) -> None:
- self[key] = value
- while len(self) > self._max_size:
- self.popitem(last=False)
-
-
-cached_images: LruCache = LruCache(max_size=5)
-
-
-def run_extras(extras_mode, resize_mode, image, image_folder, input_dir, output_dir, show_extras_results, gfpgan_visibility, codeformer_visibility, codeformer_weight, upscaling_resize, upscaling_resize_w, upscaling_resize_h, upscaling_crop, extras_upscaler_1, extras_upscaler_2, extras_upscaler_2_visibility, upscale_first: bool, save_output: bool = True):
- devices.torch_gc()
-
- imageArr = []
- # Also keep track of original file names
- imageNameArr = []
- outputs = []
-
- if extras_mode == 1:
- #convert file to pillow image
- for img in image_folder:
- image = Image.open(img)
- imageArr.append(image)
- imageNameArr.append(os.path.splitext(img.orig_name)[0])
- elif extras_mode == 2:
- assert not shared.cmd_opts.hide_ui_dir_config, '--hide-ui-dir-config option must be disabled'
-
- if input_dir == '':
- return outputs, "Please select an input directory.", ''
- image_list = shared.listfiles(input_dir)
- for img in image_list:
- try:
- image = Image.open(img)
- except Exception:
- continue
- imageArr.append(image)
- imageNameArr.append(img)
- else:
- imageArr.append(image)
- imageNameArr.append(None)
-
- if extras_mode == 2 and output_dir != '':
- outpath = output_dir
- else:
- outpath = opts.outdir_samples or opts.outdir_extras_samples
-
- # Extra operation definitions
-
- def run_gfpgan(image: Image.Image, info: str) -> Tuple[Image.Image, str]:
- restored_img = modules.gfpgan_model.gfpgan_fix_faces(np.array(image, dtype=np.uint8))
- res = Image.fromarray(restored_img)
-
- if gfpgan_visibility < 1.0:
- res = Image.blend(image, res, gfpgan_visibility)
-
- info += f"GFPGAN visibility:{round(gfpgan_visibility, 2)}\n"
- return (res, info)
-
- def run_codeformer(image: Image.Image, info: str) -> Tuple[Image.Image, str]:
- restored_img = modules.codeformer_model.codeformer.restore(np.array(image, dtype=np.uint8), w=codeformer_weight)
- res = Image.fromarray(restored_img)
-
- if codeformer_visibility < 1.0:
- res = Image.blend(image, res, codeformer_visibility)
-
- info += f"CodeFormer w: {round(codeformer_weight, 2)}, CodeFormer visibility:{round(codeformer_visibility, 2)}\n"
- return (res, info)
-
- def upscale(image, scaler_index, resize, mode, resize_w, resize_h, crop):
- upscaler = shared.sd_upscalers[scaler_index]
- res = upscaler.scaler.upscale(image, resize, upscaler.data_path)
- if mode == 1 and crop:
- cropped = Image.new("RGB", (resize_w, resize_h))
- cropped.paste(res, box=(resize_w // 2 - res.width // 2, resize_h // 2 - res.height // 2))
- res = cropped
- return res
-
- def run_prepare_crop(image: Image.Image, info: str) -> Tuple[Image.Image, str]:
- # Actual crop happens in run_upscalers_blend, this just sets upscaling_resize and adds info text
- nonlocal upscaling_resize
- if resize_mode == 1:
- upscaling_resize = max(upscaling_resize_w/image.width, upscaling_resize_h/image.height)
- crop_info = " (crop)" if upscaling_crop else ""
- info += f"Resize to: {upscaling_resize_w:g}x{upscaling_resize_h:g}{crop_info}\n"
- return (image, info)
-
- @dataclass
- class UpscaleParams:
- upscaler_idx: int
- blend_alpha: float
-
- def run_upscalers_blend(params: List[UpscaleParams], image: Image.Image, info: str) -> Tuple[Image.Image, str]:
- blended_result: Image.Image = None
- image_hash: str = hash(np.array(image.getdata()).tobytes())
- for upscaler in params:
- upscale_args = (upscaler.upscaler_idx, upscaling_resize, resize_mode,
- upscaling_resize_w, upscaling_resize_h, upscaling_crop)
- cache_key = LruCache.Key(image_hash=image_hash,
- info_hash=hash(info),
- args_hash=hash(upscale_args))
- cached_entry = cached_images.get(cache_key)
- if cached_entry is None:
- res = upscale(image, *upscale_args)
- info += f"Upscale: {round(upscaling_resize, 3)}, visibility: {upscaler.blend_alpha}, model:{shared.sd_upscalers[upscaler.upscaler_idx].name}\n"
- cached_images.put(cache_key, LruCache.Value(image=res, info=info))
- else:
- res, info = cached_entry.image, cached_entry.info
-
- if blended_result is None:
- blended_result = res
- else:
- blended_result = Image.blend(blended_result, res, upscaler.blend_alpha)
- return (blended_result, info)
-
- # Build a list of operations to run
- facefix_ops: List[Callable] = []
- facefix_ops += [run_gfpgan] if gfpgan_visibility > 0 else []
- facefix_ops += [run_codeformer] if codeformer_visibility > 0 else []
-
- upscale_ops: List[Callable] = []
- upscale_ops += [run_prepare_crop] if resize_mode == 1 else []
-
- if upscaling_resize != 0:
- step_params: List[UpscaleParams] = []
- step_params.append(UpscaleParams(upscaler_idx=extras_upscaler_1, blend_alpha=1.0))
- if extras_upscaler_2 != 0 and extras_upscaler_2_visibility > 0:
- step_params.append(UpscaleParams(upscaler_idx=extras_upscaler_2, blend_alpha=extras_upscaler_2_visibility))
-
- upscale_ops.append(partial(run_upscalers_blend, step_params))
-
- extras_ops: List[Callable] = (upscale_ops + facefix_ops) if upscale_first else (facefix_ops + upscale_ops)
-
- for image, image_name in zip(imageArr, imageNameArr):
- if image is None:
- return outputs, "Please select an input image.", ''
- existing_pnginfo = image.info or {}
-
- image = image.convert("RGB")
- info = ""
- # Run each operation on each image
- for op in extras_ops:
- image, info = op(image, info)
-
- if opts.use_original_name_batch and image_name is not None:
- basename = os.path.splitext(os.path.basename(image_name))[0]
- else:
- basename = ''
-
- if save_output:
- # Add upscaler name as a suffix.
- suffix = f"-{shared.sd_upscalers[extras_upscaler_1].name}" if shared.opts.use_upscaler_name_as_suffix else ""
- # Add second upscaler if applicable.
- if suffix and extras_upscaler_2 and extras_upscaler_2_visibility:
- suffix += f"-{shared.sd_upscalers[extras_upscaler_2].name}"
-
- images.save_image(image, path=outpath, basename=basename, seed=None, prompt=None, extension=opts.samples_format, info=info, short_filename=True,
- no_prompt=True, grid=False, pnginfo_section_name="extras", existing_info=existing_pnginfo, forced_filename=None, suffix=suffix)
-
- if opts.enable_pnginfo:
- image.info = existing_pnginfo
- image.info["extras"] = info
-
- if extras_mode != 2 or show_extras_results :
- outputs.append(image)
-
- devices.torch_gc()
-
- return outputs, plaintext_to_html(info), ''
-
-def clear_cache():
- cached_images.clear()
-
def run_pnginfo(image):
if image is None:
@@ -241,7 +35,51 @@ def run_pnginfo(image):
return '', geninfo, info
-def run_modelmerger(primary_model_name, secondary_model_name, tertiary_model_name, interp_method, multiplier, save_as_half, custom_name, checkpoint_format):
+def create_config(ckpt_result, config_source, a, b, c):
+ def config(x):
+ res = sd_models_config.find_checkpoint_config_near_filename(x) if x else None
+ return res if res != shared.sd_default_config else None
+
+ if config_source == 0:
+ cfg = config(a) or config(b) or config(c)
+ elif config_source == 1:
+ cfg = config(b)
+ elif config_source == 2:
+ cfg = config(c)
+ else:
+ cfg = None
+
+ if cfg is None:
+ return
+
+ filename, _ = os.path.splitext(ckpt_result)
+ checkpoint_filename = filename + ".yaml"
+
+ print("Copying config:")
+ print(" from:", cfg)
+ print(" to:", checkpoint_filename)
+ shutil.copyfile(cfg, checkpoint_filename)
+
+
+checkpoint_dict_skip_on_merge = ["cond_stage_model.transformer.text_model.embeddings.position_ids"]
+
+
+def to_half(tensor, enable):
+ if enable and tensor.dtype == torch.float:
+ return tensor.half()
+
+ return tensor
+
+
+def run_modelmerger(id_task, primary_model_name, secondary_model_name, tertiary_model_name, interp_method, multiplier, save_as_half, custom_name, checkpoint_format, config_source, bake_in_vae, discard_weights):
+ shared.state.begin()
+ shared.state.job = 'model-merge'
+
+ def fail(message):
+ shared.state.textinfo = message
+ shared.state.end()
+ return [*[gr.update() for _ in range(4)], message]
+
def weighted_sum(theta0, theta1, alpha):
return ((1 - alpha) * theta0) + (alpha * theta1)
@@ -251,43 +89,94 @@ def run_modelmerger(primary_model_name, secondary_model_name, tertiary_model_nam
def add_difference(theta0, theta1_2_diff, alpha):
return theta0 + (alpha * theta1_2_diff)
- primary_model_info = sd_models.checkpoints_list[primary_model_name]
- secondary_model_info = sd_models.checkpoints_list[secondary_model_name]
- tertiary_model_info = sd_models.checkpoints_list.get(tertiary_model_name, None)
- result_is_inpainting_model = False
+ def filename_weighted_sum():
+ a = primary_model_info.model_name
+ b = secondary_model_info.model_name
+ Ma = round(1 - multiplier, 2)
+ Mb = round(multiplier, 2)
+
+ return f"{Ma}({a}) + {Mb}({b})"
+
+ def filename_add_difference():
+ a = primary_model_info.model_name
+ b = secondary_model_info.model_name
+ c = tertiary_model_info.model_name
+ M = round(multiplier, 2)
+
+ return f"{a} + {M}({b} - {c})"
+
+ def filename_nothing():
+ return primary_model_info.model_name
theta_funcs = {
- "Weighted sum": (None, weighted_sum),
- "Add difference": (get_difference, add_difference),
+ "Weighted sum": (filename_weighted_sum, None, weighted_sum),
+ "Add difference": (filename_add_difference, get_difference, add_difference),
+ "No interpolation": (filename_nothing, None, None),
}
- theta_func1, theta_func2 = theta_funcs[interp_method]
+ filename_generator, theta_func1, theta_func2 = theta_funcs[interp_method]
+ shared.state.job_count = (1 if theta_func1 else 0) + (1 if theta_func2 else 0)
+
+ if not primary_model_name:
+ return fail("Failed: Merging requires a primary model.")
- if theta_func1 and not tertiary_model_info:
- return ["Failed: Interpolation method requires a tertiary model."] + [gr.Dropdown.update(choices=sd_models.checkpoint_tiles()) for _ in range(4)]
+ primary_model_info = sd_models.checkpoints_list[primary_model_name]
+
+ if theta_func2 and not secondary_model_name:
+ return fail("Failed: Merging requires a secondary model.")
+
+ secondary_model_info = sd_models.checkpoints_list[secondary_model_name] if theta_func2 else None
- print(f"Loading {secondary_model_info.filename}...")
- theta_1 = sd_models.read_state_dict(secondary_model_info.filename, map_location='cpu')
+ if theta_func1 and not tertiary_model_name:
+ return fail(f"Failed: Interpolation method ({interp_method}) requires a tertiary model.")
+
+ tertiary_model_info = sd_models.checkpoints_list[tertiary_model_name] if theta_func1 else None
+
+ result_is_inpainting_model = False
+ result_is_instruct_pix2pix_model = False
+
+ if theta_func2:
+ shared.state.textinfo = f"Loading B"
+ print(f"Loading {secondary_model_info.filename}...")
+ theta_1 = sd_models.read_state_dict(secondary_model_info.filename, map_location='cpu')
+ else:
+ theta_1 = None
if theta_func1:
+ shared.state.textinfo = f"Loading C"
print(f"Loading {tertiary_model_info.filename}...")
theta_2 = sd_models.read_state_dict(tertiary_model_info.filename, map_location='cpu')
+ shared.state.textinfo = 'Merging B and C'
+ shared.state.sampling_steps = len(theta_1.keys())
for key in tqdm.tqdm(theta_1.keys()):
+ if key in checkpoint_dict_skip_on_merge:
+ continue
+
if 'model' in key:
if key in theta_2:
t2 = theta_2.get(key, torch.zeros_like(theta_1[key]))
theta_1[key] = theta_func1(theta_1[key], t2)
else:
theta_1[key] = torch.zeros_like(theta_1[key])
+
+ shared.state.sampling_step += 1
del theta_2
+ shared.state.nextjob()
+
+ shared.state.textinfo = f"Loading {primary_model_info.filename}..."
print(f"Loading {primary_model_info.filename}...")
theta_0 = sd_models.read_state_dict(primary_model_info.filename, map_location='cpu')
print("Merging...")
-
+ shared.state.textinfo = 'Merging A and B'
+ shared.state.sampling_steps = len(theta_0.keys())
for key in tqdm.tqdm(theta_0.keys()):
- if 'model' in key and key in theta_1:
+ if theta_1 and 'model' in key and key in theta_1:
+
+ if key in checkpoint_dict_skip_on_merge:
+ continue
+
a = theta_0[key]
b = theta_1[key]
@@ -297,39 +186,59 @@ def run_modelmerger(primary_model_name, secondary_model_name, tertiary_model_nam
if a.shape != b.shape and a.shape[0:1] + a.shape[2:] == b.shape[0:1] + b.shape[2:]:
if a.shape[1] == 4 and b.shape[1] == 9:
raise RuntimeError("When merging inpainting model with a normal one, A must be the inpainting model.")
+ if a.shape[1] == 4 and b.shape[1] == 8:
+ raise RuntimeError("When merging instruct-pix2pix model with a normal one, A must be the instruct-pix2pix model.")
- assert a.shape[1] == 9 and b.shape[1] == 4, f"Bad dimensions for merged layer {key}: A={a.shape}, B={b.shape}"
-
- theta_0[key][:, 0:4, :, :] = theta_func2(a[:, 0:4, :, :], b, multiplier)
- result_is_inpainting_model = True
+ if a.shape[1] == 8 and b.shape[1] == 4:#If we have an Instruct-Pix2Pix model...
+ theta_0[key][:, 0:4, :, :] = theta_func2(a[:, 0:4, :, :], b, multiplier)#Merge only the vectors the models have in common. Otherwise we get an error due to dimension mismatch.
+ result_is_instruct_pix2pix_model = True
+ else:
+ assert a.shape[1] == 9 and b.shape[1] == 4, f"Bad dimensions for merged layer {key}: A={a.shape}, B={b.shape}"
+ theta_0[key][:, 0:4, :, :] = theta_func2(a[:, 0:4, :, :], b, multiplier)
+ result_is_inpainting_model = True
else:
theta_0[key] = theta_func2(a, b, multiplier)
+
+ theta_0[key] = to_half(theta_0[key], save_as_half)
- if save_as_half:
- theta_0[key] = theta_0[key].half()
+ shared.state.sampling_step += 1
- # I believe this part should be discarded, but I'll leave it for now until I am sure
- for key in theta_1.keys():
- if 'model' in key and key not in theta_0:
- theta_0[key] = theta_1[key]
- if save_as_half:
- theta_0[key] = theta_0[key].half()
del theta_1
- ckpt_dir = shared.cmd_opts.ckpt_dir or sd_models.model_path
+ bake_in_vae_filename = sd_vae.vae_dict.get(bake_in_vae, None)
+ if bake_in_vae_filename is not None:
+ print(f"Baking in VAE from {bake_in_vae_filename}")
+ shared.state.textinfo = 'Baking in VAE'
+ vae_dict = sd_vae.load_vae_dict(bake_in_vae_filename, map_location='cpu')
+
+ for key in vae_dict.keys():
+ theta_0_key = 'first_stage_model.' + key
+ if theta_0_key in theta_0:
+ theta_0[theta_0_key] = to_half(vae_dict[key], save_as_half)
+
+ del vae_dict
+
+ if save_as_half and not theta_func2:
+ for key in theta_0.keys():
+ theta_0[key] = to_half(theta_0[key], save_as_half)
- filename = \
- primary_model_info.model_name + '_' + str(round(1-multiplier, 2)) + '-' + \
- secondary_model_info.model_name + '_' + str(round(multiplier, 2)) + '-' + \
- interp_method.replace(" ", "_") + \
- '-merged.' + \
- ("inpainting." if result_is_inpainting_model else "") + \
- checkpoint_format
+ if discard_weights:
+ regex = re.compile(discard_weights)
+ for key in list(theta_0):
+ if re.search(regex, key):
+ theta_0.pop(key, None)
- filename = filename if custom_name == '' else (custom_name + '.' + checkpoint_format)
+ ckpt_dir = shared.cmd_opts.ckpt_dir or sd_models.model_path
+
+ filename = filename_generator() if custom_name == '' else custom_name
+ filename += ".inpainting" if result_is_inpainting_model else ""
+ filename += ".instruct-pix2pix" if result_is_instruct_pix2pix_model else ""
+ filename += "." + checkpoint_format
output_modelname = os.path.join(ckpt_dir, filename)
+ shared.state.nextjob()
+ shared.state.textinfo = "Saving"
print(f"Saving to {output_modelname}...")
_, extension = os.path.splitext(output_modelname)
@@ -340,5 +249,10 @@ def run_modelmerger(primary_model_name, secondary_model_name, tertiary_model_nam
sd_models.list_models()
- print("Checkpoint saved.")
- return ["Checkpoint saved to " + output_modelname] + [gr.Dropdown.update(choices=sd_models.checkpoint_tiles()) for _ in range(4)]
+ create_config(output_modelname, config_source, primary_model_info, secondary_model_info, tertiary_model_info)
+
+ print(f"Checkpoint saved to {output_modelname}.")
+ shared.state.textinfo = "Checkpoint saved"
+ shared.state.end()
+
+ return [*[gr.Dropdown.update(choices=sd_models.checkpoint_tiles()) for _ in range(4)], "Checkpoint saved to " + output_modelname]
diff --git a/modules/generation_parameters_copypaste.py b/modules/generation_parameters_copypaste.py
index fbd91300..773c5c0e 100644
--- a/modules/generation_parameters_copypaste.py
+++ b/modules/generation_parameters_copypaste.py
@@ -1,12 +1,13 @@
import base64
import io
+import math
import os
import re
from pathlib import Path
import gradio as gr
-from modules.shared import script_path
-from modules import shared
+from modules.paths import data_path
+from modules import shared, ui_tempdir, script_callbacks
import tempfile
from PIL import Image
@@ -36,9 +37,15 @@ def quote(text):
def image_from_url_text(filedata):
- if type(filedata) == dict and filedata["is_file"]:
+ if filedata is None:
+ return None
+
+ if type(filedata) == list and len(filedata) > 0 and type(filedata[0]) == dict and filedata[0].get("is_file", False):
+ filedata = filedata[0]
+
+ if type(filedata) == dict and filedata.get("is_file", False):
filename = filedata["name"]
- is_in_right_dir = any(Path(temp_dir).resolve() in Path(filename).resolve().parents for temp_dir in shared.demo.temp_dirs)
+ is_in_right_dir = ui_tempdir.check_tmp_file(shared.demo, filename)
assert is_in_right_dir, 'trying to open image file outside of allowed directories'
return Image.open(filename)
@@ -72,8 +79,6 @@ def integrate_settings_paste_fields(component_dict):
from modules import ui
settings_map = {
- 'sd_hypernetwork': 'Hypernet',
- 'sd_hypernetwork_strength': 'Hypernet strength',
'CLIP_stop_at_last_layers': 'Clip skip',
'inpainting_mask_weight': 'Conditional mask weight',
'sd_model_checkpoint': 'Model hash',
@@ -93,7 +98,7 @@ def integrate_settings_paste_fields(component_dict):
def create_buttons(tabs_list):
buttons = {}
for tab in tabs_list:
- buttons[tab] = gr.Button(f"Send to {tab}")
+ buttons[tab] = gr.Button(f"Send to {tab}", elem_id=f"{tab}_tab")
return buttons
@@ -102,35 +107,57 @@ def bind_buttons(buttons, send_image, send_generate_info):
bind_list.append([buttons, send_image, send_generate_info])
+def send_image_and_dimensions(x):
+ if isinstance(x, Image.Image):
+ img = x
+ else:
+ img = image_from_url_text(x)
+
+ if shared.opts.send_size and isinstance(img, Image.Image):
+ w = img.width
+ h = img.height
+ else:
+ w = gr.update()
+ h = gr.update()
+
+ return img, w, h
+
+
def run_bind():
- for buttons, send_image, send_generate_info in bind_list:
+ for buttons, source_image_component, send_generate_info in bind_list:
for tab in buttons:
button = buttons[tab]
- if send_image and paste_fields[tab]["init_img"]:
- if type(send_image) == gr.Gallery:
- button.click(
- fn=lambda x: image_from_url_text(x),
- _js="extract_image_from_gallery",
- inputs=[send_image],
- outputs=[paste_fields[tab]["init_img"]],
- )
+ destination_image_component = paste_fields[tab]["init_img"]
+ fields = paste_fields[tab]["fields"]
+
+ destination_width_component = next(iter([field for field, name in fields if name == "Size-1"] if fields else []), None)
+ destination_height_component = next(iter([field for field, name in fields if name == "Size-2"] if fields else []), None)
+
+ if source_image_component and destination_image_component:
+ if isinstance(source_image_component, gr.Gallery):
+ func = send_image_and_dimensions if destination_width_component else image_from_url_text
+ jsfunc = "extract_image_from_gallery"
else:
- button.click(
- fn=lambda x: x,
- inputs=[send_image],
- outputs=[paste_fields[tab]["init_img"]],
- )
+ func = send_image_and_dimensions if destination_width_component else lambda x: x
+ jsfunc = None
- if send_generate_info and paste_fields[tab]["fields"] is not None:
+ button.click(
+ fn=func,
+ _js=jsfunc,
+ inputs=[source_image_component],
+ outputs=[destination_image_component, destination_width_component, destination_height_component] if destination_width_component else [destination_image_component],
+ )
+
+ if send_generate_info and fields is not None:
if send_generate_info in paste_fields:
- paste_field_names = ['Prompt', 'Negative prompt', 'Steps', 'Face restoration'] + (['Size-1', 'Size-2'] if shared.opts.send_size else []) + (["Seed"] if shared.opts.send_seed else [])
+ paste_field_names = ['Prompt', 'Negative prompt', 'Steps', 'Face restoration'] + (["Seed"] if shared.opts.send_seed else [])
button.click(
fn=lambda *x: x,
inputs=[field for field, name in paste_fields[send_generate_info]["fields"] if name in paste_field_names],
- outputs=[field for field, name in paste_fields[tab]["fields"] if name in paste_field_names],
+ outputs=[field for field, name in fields if name in paste_field_names],
)
else:
- connect_paste(button, paste_fields[tab]["fields"], send_generate_info)
+ connect_paste(button, fields, send_generate_info)
button.click(
fn=None,
@@ -164,6 +191,39 @@ def find_hypernetwork_key(hypernet_name, hypernet_hash=None):
return None
+def restore_old_hires_fix_params(res):
+ """for infotexts that specify old First pass size parameter, convert it into
+ width, height, and hr scale"""
+
+ firstpass_width = res.get('First pass size-1', None)
+ firstpass_height = res.get('First pass size-2', None)
+
+ if shared.opts.use_old_hires_fix_width_height:
+ hires_width = int(res.get("Hires resize-1", 0))
+ hires_height = int(res.get("Hires resize-2", 0))
+
+ if hires_width and hires_height:
+ res['Size-1'] = hires_width
+ res['Size-2'] = hires_height
+ return
+
+ if firstpass_width is None or firstpass_height is None:
+ return
+
+ firstpass_width, firstpass_height = int(firstpass_width), int(firstpass_height)
+ width = int(res.get("Size-1", 512))
+ height = int(res.get("Size-2", 512))
+
+ if firstpass_width == 0 or firstpass_height == 0:
+ from modules import processing
+ firstpass_width, firstpass_height = processing.old_hires_fix_first_pass_dimensions(width, height)
+
+ res['Size-1'] = firstpass_width
+ res['Size-2'] = firstpass_height
+ res['Hires resize-1'] = width
+ res['Hires resize-2'] = height
+
+
def parse_generation_parameters(x: str):
"""parses generation parameters string, the one you see in text field under the picture in UI:
```
@@ -213,13 +273,15 @@ Steps: 20, Sampler: Euler a, CFG scale: 7, Seed: 965400086, Size: 512x512, Model
if "Clip skip" not in res:
res["Clip skip"] = "1"
- if "Hypernet strength" not in res:
- res["Hypernet strength"] = "1"
+ hypernet = res.get("Hypernet", None)
+ if hypernet is not None:
+ res["Prompt"] += f"""<hypernet:{hypernet}:{res.get("Hypernet strength", "1.0")}>"""
+
+ if "Hires resize-1" not in res:
+ res["Hires resize-1"] = 0
+ res["Hires resize-2"] = 0
- if "Hypernet" in res:
- hypernet_name = res["Hypernet"]
- hypernet_hash = res.get("Hypernet hash", None)
- res["Hypernet"] = find_hypernetwork_key(hypernet_name, hypernet_hash)
+ restore_old_hires_fix_params(res)
return res
@@ -227,12 +289,13 @@ Steps: 20, Sampler: Euler a, CFG scale: 7, Seed: 965400086, Size: 512x512, Model
def connect_paste(button, paste_fields, input_comp, jsfunc=None):
def paste_func(prompt):
if not prompt and not shared.cmd_opts.hide_ui_dir_config:
- filename = os.path.join(script_path, "params.txt")
+ filename = os.path.join(data_path, "params.txt")
if os.path.exists(filename):
with open(filename, "r", encoding="utf8") as file:
prompt = file.read()
params = parse_generation_parameters(prompt)
+ script_callbacks.infotext_pasted_callback(prompt, params)
res = []
for output, key in paste_fields:
diff --git a/modules/gfpgan_model.py b/modules/gfpgan_model.py
index 1e2dbc32..fbe6215a 100644
--- a/modules/gfpgan_model.py
+++ b/modules/gfpgan_model.py
@@ -6,12 +6,11 @@ import facexlib
import gfpgan
import modules.face_restoration
-from modules import shared, devices, modelloader
-from modules.paths import models_path
+from modules import paths, shared, devices, modelloader
model_dir = "GFPGAN"
user_path = None
-model_path = os.path.join(models_path, model_dir)
+model_path = os.path.join(paths.models_path, model_dir)
model_url = "https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.4.pth"
have_gfpgan = False
loaded_gfpgan_model = None
diff --git a/modules/hashes.py b/modules/hashes.py
new file mode 100644
index 00000000..819362a3
--- /dev/null
+++ b/modules/hashes.py
@@ -0,0 +1,87 @@
+import hashlib
+import json
+import os.path
+
+import filelock
+
+from modules.paths import data_path
+
+
+cache_filename = os.path.join(data_path, "cache.json")
+cache_data = None
+
+
+def dump_cache():
+ with filelock.FileLock(cache_filename+".lock"):
+ with open(cache_filename, "w", encoding="utf8") as file:
+ json.dump(cache_data, file, indent=4)
+
+
+def cache(subsection):
+ global cache_data
+
+ if cache_data is None:
+ with filelock.FileLock(cache_filename+".lock"):
+ if not os.path.isfile(cache_filename):
+ cache_data = {}
+ else:
+ with open(cache_filename, "r", encoding="utf8") as file:
+ cache_data = json.load(file)
+
+ s = cache_data.get(subsection, {})
+ cache_data[subsection] = s
+
+ return s
+
+
+def calculate_sha256(filename):
+ hash_sha256 = hashlib.sha256()
+ blksize = 1024 * 1024
+
+ with open(filename, "rb") as f:
+ for chunk in iter(lambda: f.read(blksize), b""):
+ hash_sha256.update(chunk)
+
+ return hash_sha256.hexdigest()
+
+
+def sha256_from_cache(filename, title):
+ hashes = cache("hashes")
+ ondisk_mtime = os.path.getmtime(filename)
+
+ if title not in hashes:
+ return None
+
+ cached_sha256 = hashes[title].get("sha256", None)
+ cached_mtime = hashes[title].get("mtime", 0)
+
+ if ondisk_mtime > cached_mtime or cached_sha256 is None:
+ return None
+
+ return cached_sha256
+
+
+def sha256(filename, title):
+ hashes = cache("hashes")
+
+ sha256_value = sha256_from_cache(filename, title)
+ if sha256_value is not None:
+ return sha256_value
+
+ print(f"Calculating sha256 for {filename}: ", end='')
+ sha256_value = calculate_sha256(filename)
+ print(f"{sha256_value}")
+
+ hashes[title] = {
+ "mtime": os.path.getmtime(filename),
+ "sha256": sha256_value,
+ }
+
+ dump_cache()
+
+ return sha256_value
+
+
+
+
+
diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py
index 109e8078..503534e2 100644
--- a/modules/hypernetworks/hypernetwork.py
+++ b/modules/hypernetworks/hypernetwork.py
@@ -12,8 +12,8 @@ import torch
import tqdm
from einops import rearrange, repeat
from ldm.util import default
-from modules import devices, processing, sd_models, shared, sd_samplers
-from modules.textual_inversion import textual_inversion
+from modules import devices, processing, sd_models, shared, sd_samplers, hashes, sd_hijack_checkpoint
+from modules.textual_inversion import textual_inversion, logging
from modules.textual_inversion.learn_schedule import LearnRateScheduler
from torch import einsum
from torch.nn.init import normal_, xavier_normal_, xavier_uniform_, kaiming_normal_, kaiming_uniform_, zeros_
@@ -25,7 +25,6 @@ from statistics import stdev, mean
optimizer_dict = {optim_name : cls_obj for optim_name, cls_obj in inspect.getmembers(torch.optim, inspect.isclass) if optim_name != "Optimizer"}
class HypernetworkModule(torch.nn.Module):
- multiplier = 1.0
activation_dict = {
"linear": torch.nn.Identity,
"relu": torch.nn.ReLU,
@@ -38,9 +37,11 @@ class HypernetworkModule(torch.nn.Module):
activation_dict.update({cls_name.lower(): cls_obj for cls_name, cls_obj in inspect.getmembers(torch.nn.modules.activation) if inspect.isclass(cls_obj) and cls_obj.__module__ == 'torch.nn.modules.activation'})
def __init__(self, dim, state_dict=None, layer_structure=None, activation_func=None, weight_init='Normal',
- add_layer_norm=False, use_dropout=False, activate_output=False, last_layer_dropout=False):
+ add_layer_norm=False, activate_output=False, dropout_structure=None):
super().__init__()
+ self.multiplier = 1.0
+
assert layer_structure is not None, "layer_structure must not be None"
assert layer_structure[0] == 1, "Multiplier Sequence should start with size 1!"
assert layer_structure[-1] == 1, "Multiplier Sequence should end with size 1!"
@@ -63,9 +64,12 @@ class HypernetworkModule(torch.nn.Module):
if add_layer_norm:
linears.append(torch.nn.LayerNorm(int(dim * layer_structure[i+1])))
- # Add dropout except last layer
- if use_dropout and (i < len(layer_structure) - 3 or last_layer_dropout and i < len(layer_structure) - 2):
- linears.append(torch.nn.Dropout(p=0.3))
+ # Everything should be now parsed into dropout structure, and applied here.
+ # Since we only have dropouts after layers, dropout structure should start with 0 and end with 0.
+ if dropout_structure is not None and dropout_structure[i+1] > 0:
+ assert 0 < dropout_structure[i+1] < 1, "Dropout probability should be 0 or float between 0 and 1!"
+ linears.append(torch.nn.Dropout(p=dropout_structure[i+1]))
+ # Code explanation : [1, 2, 1] -> dropout is missing when last_layer_dropout is false. [1, 2, 2, 1] -> [0, 0.3, 0, 0], when its True, [0, 0.3, 0.3, 0].
self.linear = torch.nn.Sequential(*linears)
@@ -112,7 +116,7 @@ class HypernetworkModule(torch.nn.Module):
state_dict[to] = x
def forward(self, x):
- return x + self.linear(x) * self.multiplier
+ return x + self.linear(x) * (self.multiplier if not self.training else 1)
def trainables(self):
layer_structure = []
@@ -122,8 +126,20 @@ class HypernetworkModule(torch.nn.Module):
return layer_structure
-def apply_strength(value=None):
- HypernetworkModule.multiplier = value if value is not None else shared.opts.sd_hypernetwork_strength
+#param layer_structure : sequence used for length, use_dropout : controlling boolean, last_layer_dropout : for compatibility check.
+def parse_dropout_structure(layer_structure, use_dropout, last_layer_dropout):
+ if layer_structure is None:
+ layer_structure = [1, 2, 1]
+ if not use_dropout:
+ return [0] * len(layer_structure)
+ dropout_values = [0]
+ dropout_values.extend([0.3] * (len(layer_structure) - 3))
+ if last_layer_dropout:
+ dropout_values.append(0.3)
+ else:
+ dropout_values.append(0)
+ dropout_values.append(0)
+ return dropout_values
class Hypernetwork:
@@ -143,18 +159,22 @@ class Hypernetwork:
self.add_layer_norm = add_layer_norm
self.use_dropout = use_dropout
self.activate_output = activate_output
- self.last_layer_dropout = kwargs['last_layer_dropout'] if 'last_layer_dropout' in kwargs else True
+ self.last_layer_dropout = kwargs.get('last_layer_dropout', True)
+ self.dropout_structure = kwargs.get('dropout_structure', None)
+ if self.dropout_structure is None:
+ self.dropout_structure = parse_dropout_structure(self.layer_structure, self.use_dropout, self.last_layer_dropout)
self.optimizer_name = None
self.optimizer_state_dict = None
+ self.optional_info = None
for size in enable_sizes or []:
self.layers[size] = (
HypernetworkModule(size, None, self.layer_structure, self.activation_func, self.weight_init,
- self.add_layer_norm, self.use_dropout, self.activate_output, last_layer_dropout=self.last_layer_dropout),
+ self.add_layer_norm, self.activate_output, dropout_structure=self.dropout_structure),
HypernetworkModule(size, None, self.layer_structure, self.activation_func, self.weight_init,
- self.add_layer_norm, self.use_dropout, self.activate_output, last_layer_dropout=self.last_layer_dropout),
+ self.add_layer_norm, self.activate_output, dropout_structure=self.dropout_structure),
)
- self.eval_mode()
+ self.eval()
def weights(self):
res = []
@@ -163,14 +183,28 @@ class Hypernetwork:
res += layer.parameters()
return res
- def train_mode(self):
+ def train(self, mode=True):
for k, layers in self.layers.items():
for layer in layers:
- layer.train()
+ layer.train(mode=mode)
for param in layer.parameters():
- param.requires_grad = True
+ param.requires_grad = mode
+
+ def to(self, device):
+ for k, layers in self.layers.items():
+ for layer in layers:
+ layer.to(device)
+
+ return self
- def eval_mode(self):
+ def set_multiplier(self, multiplier):
+ for k, layers in self.layers.items():
+ for layer in layers:
+ layer.multiplier = multiplier
+
+ return self
+
+ def eval(self):
for k, layers in self.layers.items():
for layer in layers:
layer.eval()
@@ -190,18 +224,20 @@ class Hypernetwork:
state_dict['activation_func'] = self.activation_func
state_dict['is_layer_norm'] = self.add_layer_norm
state_dict['weight_initialization'] = self.weight_init
- state_dict['use_dropout'] = self.use_dropout
state_dict['sd_checkpoint'] = self.sd_checkpoint
state_dict['sd_checkpoint_name'] = self.sd_checkpoint_name
state_dict['activate_output'] = self.activate_output
- state_dict['last_layer_dropout'] = self.last_layer_dropout
+ state_dict['use_dropout'] = self.use_dropout
+ state_dict['dropout_structure'] = self.dropout_structure
+ state_dict['last_layer_dropout'] = (self.dropout_structure[-2] != 0) if self.dropout_structure is not None else self.last_layer_dropout
+ state_dict['optional_info'] = self.optional_info if self.optional_info else None
if self.optimizer_name is not None:
optimizer_saved_dict['optimizer_name'] = self.optimizer_name
torch.save(state_dict, filename)
if shared.opts.save_optimizer_state and self.optimizer_state_dict:
- optimizer_saved_dict['hash'] = sd_models.model_hash(filename)
+ optimizer_saved_dict['hash'] = self.shorthash()
optimizer_saved_dict['optimizer_state_dict'] = self.optimizer_state_dict
torch.save(optimizer_saved_dict, filename + '.optim')
@@ -213,44 +249,65 @@ class Hypernetwork:
state_dict = torch.load(filename, map_location='cpu')
self.layer_structure = state_dict.get('layer_structure', [1, 2, 1])
- print(self.layer_structure)
+ self.optional_info = state_dict.get('optional_info', None)
self.activation_func = state_dict.get('activation_func', None)
- print(f"Activation function is {self.activation_func}")
self.weight_init = state_dict.get('weight_initialization', 'Normal')
- print(f"Weight initialization is {self.weight_init}")
self.add_layer_norm = state_dict.get('is_layer_norm', False)
- print(f"Layer norm is set to {self.add_layer_norm}")
- self.use_dropout = state_dict.get('use_dropout', False)
- print(f"Dropout usage is set to {self.use_dropout}" )
+ self.dropout_structure = state_dict.get('dropout_structure', None)
+ self.use_dropout = True if self.dropout_structure is not None and any(self.dropout_structure) else state_dict.get('use_dropout', False)
self.activate_output = state_dict.get('activate_output', True)
- print(f"Activate last layer is set to {self.activate_output}")
self.last_layer_dropout = state_dict.get('last_layer_dropout', False)
+ # Dropout structure should have same length as layer structure, Every digits should be in [0,1), and last digit must be 0.
+ if self.dropout_structure is None:
+ self.dropout_structure = parse_dropout_structure(self.layer_structure, self.use_dropout, self.last_layer_dropout)
+
+ if shared.opts.print_hypernet_extra:
+ if self.optional_info is not None:
+ print(f" INFO:\n {self.optional_info}\n")
+
+ print(f" Layer structure: {self.layer_structure}")
+ print(f" Activation function: {self.activation_func}")
+ print(f" Weight initialization: {self.weight_init}")
+ print(f" Layer norm: {self.add_layer_norm}")
+ print(f" Dropout usage: {self.use_dropout}" )
+ print(f" Activate last layer: {self.activate_output}")
+ print(f" Dropout structure: {self.dropout_structure}")
+
+ optimizer_saved_dict = torch.load(self.filename + '.optim', map_location='cpu') if os.path.exists(self.filename + '.optim') else {}
- optimizer_saved_dict = torch.load(self.filename + '.optim', map_location = 'cpu') if os.path.exists(self.filename + '.optim') else {}
- self.optimizer_name = optimizer_saved_dict.get('optimizer_name', 'AdamW')
- print(f"Optimizer name is {self.optimizer_name}")
- if sd_models.model_hash(filename) == optimizer_saved_dict.get('hash', None):
+ if self.shorthash() == optimizer_saved_dict.get('hash', None):
self.optimizer_state_dict = optimizer_saved_dict.get('optimizer_state_dict', None)
else:
self.optimizer_state_dict = None
if self.optimizer_state_dict:
- print("Loaded existing optimizer from checkpoint")
+ self.optimizer_name = optimizer_saved_dict.get('optimizer_name', 'AdamW')
+ if shared.opts.print_hypernet_extra:
+ print("Loaded existing optimizer from checkpoint")
+ print(f"Optimizer name is {self.optimizer_name}")
else:
- print("No saved optimizer exists in checkpoint")
+ self.optimizer_name = "AdamW"
+ if shared.opts.print_hypernet_extra:
+ print("No saved optimizer exists in checkpoint")
for size, sd in state_dict.items():
if type(size) == int:
self.layers[size] = (
HypernetworkModule(size, sd[0], self.layer_structure, self.activation_func, self.weight_init,
- self.add_layer_norm, self.use_dropout, self.activate_output, last_layer_dropout=self.last_layer_dropout),
+ self.add_layer_norm, self.activate_output, self.dropout_structure),
HypernetworkModule(size, sd[1], self.layer_structure, self.activation_func, self.weight_init,
- self.add_layer_norm, self.use_dropout, self.activate_output, last_layer_dropout=self.last_layer_dropout),
+ self.add_layer_norm, self.activate_output, self.dropout_structure),
)
self.name = state_dict.get('name', self.name)
self.step = state_dict.get('step', 0)
self.sd_checkpoint = state_dict.get('sd_checkpoint', None)
self.sd_checkpoint_name = state_dict.get('sd_checkpoint_name', None)
+ self.eval()
+
+ def shorthash(self):
+ sha256 = hashes.sha256(self.filename, f'hypernet/{self.name}')
+
+ return sha256[0:10]
def list_hypernetworks(path):
@@ -259,27 +316,47 @@ def list_hypernetworks(path):
name = os.path.splitext(os.path.basename(filename))[0]
# Prevent a hypothetical "None.pt" from being listed.
if name != "None":
- res[name + f"({sd_models.model_hash(filename)})"] = filename
+ res[name] = filename
return res
-def load_hypernetwork(filename):
- path = shared.hypernetworks.get(filename, None)
- # Prevent any file named "None.pt" from being loaded.
- if path is not None and filename != "None":
- print(f"Loading hypernetwork {filename}")
- try:
- shared.loaded_hypernetwork = Hypernetwork()
- shared.loaded_hypernetwork.load(path)
+def load_hypernetwork(name):
+ path = shared.hypernetworks.get(name, None)
- except Exception:
- print(f"Error loading hypernetwork {path}", file=sys.stderr)
- print(traceback.format_exc(), file=sys.stderr)
- else:
- if shared.loaded_hypernetwork is not None:
- print("Unloading hypernetwork")
+ if path is None:
+ return None
+
+ hypernetwork = Hypernetwork()
+
+ try:
+ hypernetwork.load(path)
+ except Exception:
+ print(f"Error loading hypernetwork {path}", file=sys.stderr)
+ print(traceback.format_exc(), file=sys.stderr)
+ return None
+
+ return hypernetwork
+
+
+def load_hypernetworks(names, multipliers=None):
+ already_loaded = {}
- shared.loaded_hypernetwork = None
+ for hypernetwork in shared.loaded_hypernetworks:
+ if hypernetwork.name in names:
+ already_loaded[hypernetwork.name] = hypernetwork
+
+ shared.loaded_hypernetworks.clear()
+
+ for i, name in enumerate(names):
+ hypernetwork = already_loaded.get(name, None)
+ if hypernetwork is None:
+ hypernetwork = load_hypernetwork(name)
+
+ if hypernetwork is None:
+ continue
+
+ hypernetwork.set_multiplier(multipliers[i] if multipliers else 1.0)
+ shared.loaded_hypernetworks.append(hypernetwork)
def find_closest_hypernetwork_name(search: str):
@@ -293,18 +370,27 @@ def find_closest_hypernetwork_name(search: str):
return applicable[0]
-def apply_hypernetwork(hypernetwork, context, layer=None):
- hypernetwork_layers = (hypernetwork.layers if hypernetwork is not None else {}).get(context.shape[2], None)
+def apply_single_hypernetwork(hypernetwork, context_k, context_v, layer=None):
+ hypernetwork_layers = (hypernetwork.layers if hypernetwork is not None else {}).get(context_k.shape[2], None)
if hypernetwork_layers is None:
- return context, context
+ return context_k, context_v
if layer is not None:
layer.hyper_k = hypernetwork_layers[0]
layer.hyper_v = hypernetwork_layers[1]
- context_k = hypernetwork_layers[0](context)
- context_v = hypernetwork_layers[1](context)
+ context_k = hypernetwork_layers[0](context_k)
+ context_v = hypernetwork_layers[1](context_v)
+ return context_k, context_v
+
+
+def apply_hypernetworks(hypernetworks, context, layer=None):
+ context_k = context
+ context_v = context
+ for hypernetwork in hypernetworks:
+ context_k, context_v = apply_single_hypernetwork(hypernetwork, context_k, context_v, layer)
+
return context_k, context_v
@@ -314,7 +400,7 @@ def attention_CrossAttention_forward(self, x, context=None, mask=None):
q = self.to_q(x)
context = default(context, x)
- context_k, context_v = apply_hypernetwork(shared.loaded_hypernetwork, context, self)
+ context_k, context_v = apply_hypernetworks(shared.loaded_hypernetworks, context, self)
k = self.to_k(context_k)
v = self.to_v(context_v)
@@ -378,9 +464,10 @@ def report_statistics(loss_info:dict):
print(e)
-def create_hypernetwork(name, enable_sizes, overwrite_old, layer_structure=None, activation_func=None, weight_init=None, add_layer_norm=False, use_dropout=False):
+def create_hypernetwork(name, enable_sizes, overwrite_old, layer_structure=None, activation_func=None, weight_init=None, add_layer_norm=False, use_dropout=False, dropout_structure=None):
# Remove illegal characters from name.
name = "".join( x for x in name if (x.isalnum() or x in "._- "))
+ assert name, "Name cannot be empty!"
fn = os.path.join(shared.cmd_opts.hypernetwork_dir, f"{name}.pt")
if not overwrite_old:
@@ -389,6 +476,11 @@ def create_hypernetwork(name, enable_sizes, overwrite_old, layer_structure=None,
if type(layer_structure) == str:
layer_structure = [float(x.strip()) for x in layer_structure.split(",")]
+ if use_dropout and dropout_structure and type(dropout_structure) == str:
+ dropout_structure = [float(x.strip()) for x in dropout_structure.split(",")]
+ else:
+ dropout_structure = [0] * len(layer_structure)
+
hypernet = modules.hypernetworks.hypernetwork.Hypernetwork(
name=name,
enable_sizes=[int(x) for x in enable_sizes],
@@ -397,26 +489,29 @@ def create_hypernetwork(name, enable_sizes, overwrite_old, layer_structure=None,
weight_init=weight_init,
add_layer_norm=add_layer_norm,
use_dropout=use_dropout,
+ dropout_structure=dropout_structure
)
hypernet.save(fn)
shared.reload_hypernetworks()
- return fn
-
-def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, gradient_step, data_root, log_directory, training_width, training_height, steps, shuffle_tags, tag_drop_out, latent_sampling_method, create_image_every, save_hypernetwork_every, template_file, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height):
+def train_hypernetwork(id_task, hypernetwork_name, learn_rate, batch_size, gradient_step, data_root, log_directory, training_width, training_height, varsize, steps, clip_grad_mode, clip_grad_value, shuffle_tags, tag_drop_out, latent_sampling_method, create_image_every, save_hypernetwork_every, template_filename, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height):
# images allows training previews to have infotext. Importing it at the top causes a circular import problem.
from modules import images
save_hypernetwork_every = save_hypernetwork_every or 0
create_image_every = create_image_every or 0
- textual_inversion.validate_train_inputs(hypernetwork_name, learn_rate, batch_size, gradient_step, data_root, template_file, steps, save_hypernetwork_every, create_image_every, log_directory, name="hypernetwork")
+ template_file = textual_inversion.textual_inversion_templates.get(template_filename, None)
+ textual_inversion.validate_train_inputs(hypernetwork_name, learn_rate, batch_size, gradient_step, data_root, template_file, template_filename, steps, save_hypernetwork_every, create_image_every, log_directory, name="hypernetwork")
+ template_file = template_file.path
path = shared.hypernetworks.get(hypernetwork_name, None)
- shared.loaded_hypernetwork = Hypernetwork()
- shared.loaded_hypernetwork.load(path)
+ hypernetwork = Hypernetwork()
+ hypernetwork.load(path)
+ shared.loaded_hypernetworks = [hypernetwork]
+ shared.state.job = "train-hypernetwork"
shared.state.textinfo = "Initializing hypernetwork training..."
shared.state.job_count = steps
@@ -438,7 +533,6 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, gradient_step,
else:
images_dir = None
- hypernetwork = shared.loaded_hypernetwork
checkpoint = sd_models.select_checkpoint()
initial_step = hypernetwork.step or 0
@@ -447,14 +541,28 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, gradient_step,
return hypernetwork, filename
scheduler = LearnRateScheduler(learn_rate, steps, initial_step)
+
+ clip_grad = torch.nn.utils.clip_grad_value_ if clip_grad_mode == "value" else torch.nn.utils.clip_grad_norm_ if clip_grad_mode == "norm" else None
+ if clip_grad:
+ clip_grad_sched = LearnRateScheduler(clip_grad_value, steps, initial_step, verbose=False)
+
+ if shared.opts.training_enable_tensorboard:
+ tensorboard_writer = textual_inversion.tensorboard_setup(log_directory)
# dataset loading may take a while, so input validations and early returns should be done before this
shared.state.textinfo = f"Preparing dataset from {html.escape(data_root)}..."
pin_memory = shared.opts.pin_memory
- ds = modules.textual_inversion.dataset.PersonalizedBase(data_root=data_root, width=training_width, height=training_height, repeats=shared.opts.training_image_repeats_per_epoch, placeholder_token=hypernetwork_name, model=shared.sd_model, cond_model=shared.sd_model.cond_stage_model, device=devices.device, template_file=template_file, include_cond=True, batch_size=batch_size, gradient_step=gradient_step, shuffle_tags=shuffle_tags, tag_drop_out=tag_drop_out, latent_sampling_method=latent_sampling_method)
-
+ ds = modules.textual_inversion.dataset.PersonalizedBase(data_root=data_root, width=training_width, height=training_height, repeats=shared.opts.training_image_repeats_per_epoch, placeholder_token=hypernetwork_name, model=shared.sd_model, cond_model=shared.sd_model.cond_stage_model, device=devices.device, template_file=template_file, include_cond=True, batch_size=batch_size, gradient_step=gradient_step, shuffle_tags=shuffle_tags, tag_drop_out=tag_drop_out, latent_sampling_method=latent_sampling_method, varsize=varsize)
+
+ if shared.opts.save_training_settings_to_txt:
+ saved_params = dict(
+ model_name=checkpoint.model_name, model_hash=checkpoint.shorthash, num_of_dataset_images=len(ds),
+ **{field: getattr(hypernetwork, field) for field in ['layer_structure', 'activation_func', 'weight_init', 'add_layer_norm', 'use_dropout', ]}
+ )
+ logging.save_settings_to_file(log_directory, {**saved_params, **locals()})
+
latent_sampling_method = ds.latent_sampling_method
dl = modules.textual_inversion.dataset.PersonalizedDataLoader(ds, latent_sampling_method=latent_sampling_method, batch_size=ds.batch_size, pin_memory=pin_memory)
@@ -465,9 +573,9 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, gradient_step,
shared.parallel_processing_allowed = False
shared.sd_model.cond_stage_model.to(devices.cpu)
shared.sd_model.first_stage_model.to(devices.cpu)
-
+
weights = hypernetwork.weights()
- hypernetwork.train_mode()
+ hypernetwork.train()
# Here we use optimizer from saved HN, or we can specify as UI option.
if hypernetwork.optimizer_name in optimizer_dict:
@@ -496,6 +604,7 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, gradient_step,
_loss_step = 0 #internal
# size = len(ds.indexes)
# loss_dict = defaultdict(lambda : deque(maxlen = 1024))
+ loss_logging = deque(maxlen=len(ds) * 3) # this should be configurable parameter, this is 3 * epoch(dataset size)
# losses = torch.zeros((size,))
# previous_mean_losses = [0]
# previous_mean_loss = 0
@@ -509,6 +618,8 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, gradient_step,
pbar = tqdm.tqdm(total=steps - initial_step)
try:
+ sd_hijack_checkpoint.add()
+
for i in range((steps-initial_step) * gradient_step):
if scheduler.finished:
break
@@ -524,6 +635,9 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, gradient_step,
if shared.state.interrupted:
break
+ if clip_grad:
+ clip_grad_sched.step(hypernetwork.step)
+
with devices.autocast():
x = batch.latent_sample.to(devices.device, non_blocking=pin_memory)
if tag_drop_out != 0 or shuffle_tags:
@@ -538,14 +652,14 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, gradient_step,
_loss_step += loss.item()
scaler.scale(loss).backward()
+
# go back until we reach gradient accumulation steps
if (j + 1) % gradient_step != 0:
continue
- # print(f"grad:{weights[0].grad.detach().cpu().abs().mean().item():.7f}")
- # scaler.unscale_(optimizer)
- # print(f"grad:{weights[0].grad.detach().cpu().abs().mean().item():.15f}")
- # torch.nn.utils.clip_grad_norm_(weights, max_norm=1.0)
- # print(f"grad:{weights[0].grad.detach().cpu().abs().mean().item():.15f}")
+ loss_logging.append(_loss_step)
+ if clip_grad:
+ clip_grad(weights, clip_grad_sched.learn_rate)
+
scaler.step(optimizer)
scaler.update()
hypernetwork.step += 1
@@ -559,7 +673,8 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, gradient_step,
epoch_num = hypernetwork.step // steps_per_epoch
epoch_step = hypernetwork.step % steps_per_epoch
- pbar.set_description(f"[Epoch {epoch_num}: {epoch_step+1}/{steps_per_epoch}]loss: {loss_step:.7f}")
+ description = f"Training hypernetwork [Epoch {epoch_num}: {epoch_step+1}/{steps_per_epoch}]loss: {loss_step:.7f}"
+ pbar.set_description(description)
if hypernetwork_dir is not None and steps_done % save_hypernetwork_every == 0:
# Before saving, change name to match current checkpoint.
hypernetwork_name_every = f'{hypernetwork_name}-{steps_done}'
@@ -570,6 +685,14 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, gradient_step,
save_hypernetwork(hypernetwork, checkpoint, hypernetwork_name, last_saved_file)
hypernetwork.optimizer_state_dict = None # dereference it after saving, to save memory.
+
+
+ if shared.opts.training_enable_tensorboard:
+ epoch_num = hypernetwork.step // len(ds)
+ epoch_step = hypernetwork.step - (epoch_num * len(ds)) + 1
+ mean_loss = sum(loss_logging) / len(loss_logging)
+ textual_inversion.tensorboard_add(tensorboard_writer, loss=mean_loss, global_step=hypernetwork.step, step=epoch_step, learn_rate=scheduler.learn_rate, epoch_num=epoch_num)
+
textual_inversion.write_loss(log_directory, "hypernetwork_loss.csv", hypernetwork.step, steps_per_epoch, {
"loss": f"{loss_step:.7f}",
"learn_rate": scheduler.learn_rate
@@ -578,7 +701,11 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, gradient_step,
if images_dir is not None and steps_done % create_image_every == 0:
forced_filename = f'{hypernetwork_name}-{steps_done}'
last_saved_image = os.path.join(images_dir, forced_filename)
- hypernetwork.eval_mode()
+ hypernetwork.eval()
+ rng_state = torch.get_rng_state()
+ cuda_rng_state = None
+ if torch.cuda.is_available():
+ cuda_rng_state = torch.cuda.get_rng_state_all()
shared.sd_model.cond_stage_model.to(devices.device)
shared.sd_model.first_stage_model.to(devices.device)
@@ -588,6 +715,8 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, gradient_step,
do_not_save_samples=True,
)
+ p.disable_extra_networks = True
+
if preview_from_txt2img:
p.prompt = preview_prompt
p.negative_prompt = preview_negative_prompt
@@ -611,9 +740,16 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, gradient_step,
if unload:
shared.sd_model.cond_stage_model.to(devices.cpu)
shared.sd_model.first_stage_model.to(devices.cpu)
- hypernetwork.train_mode()
+ torch.set_rng_state(rng_state)
+ if torch.cuda.is_available():
+ torch.cuda.set_rng_state_all(cuda_rng_state)
+ hypernetwork.train()
if image is not None:
- shared.state.current_image = image
+ shared.state.assign_current_image(image)
+ if shared.opts.training_enable_tensorboard and shared.opts.training_tensorboard_save_images:
+ textual_inversion.tensorboard_add_image(tensorboard_writer,
+ f"Validation at epoch {epoch_num}", image,
+ hypernetwork.step)
last_saved_image, last_text_info = images.save_image(image, images_dir, "", p.seed, p.prompt, shared.opts.samples_format, processed.infotexts[0], p=p, forced_filename=forced_filename, save_to_dirs=False)
last_saved_image += f", prompt: {preview_text}"
@@ -633,8 +769,11 @@ Last saved image: {html.escape(last_saved_image)}<br/>
finally:
pbar.leave = False
pbar.close()
- hypernetwork.eval_mode()
+ hypernetwork.eval()
#report_statistics(loss_dict)
+ sd_hijack_checkpoint.remove()
+
+
filename = os.path.join(shared.cmd_opts.hypernetwork_dir, f'{hypernetwork_name}.pt')
hypernetwork.optimizer_name = optimizer_name
@@ -655,7 +794,7 @@ def save_hypernetwork(hypernetwork, checkpoint, hypernetwork_name, filename):
old_sd_checkpoint = hypernetwork.sd_checkpoint if hasattr(hypernetwork, "sd_checkpoint") else None
old_sd_checkpoint_name = hypernetwork.sd_checkpoint_name if hasattr(hypernetwork, "sd_checkpoint_name") else None
try:
- hypernetwork.sd_checkpoint = checkpoint.hash
+ hypernetwork.sd_checkpoint = checkpoint.shorthash
hypernetwork.sd_checkpoint_name = checkpoint.model_name
hypernetwork.name = hypernetwork_name
hypernetwork.save(filename)
diff --git a/modules/hypernetworks/ui.py b/modules/hypernetworks/ui.py
index e7f9e593..76599f5a 100644
--- a/modules/hypernetworks/ui.py
+++ b/modules/hypernetworks/ui.py
@@ -9,15 +9,15 @@ from modules import devices, sd_hijack, shared
not_available = ["hardswish", "multiheadattention"]
keys = list(x for x in modules.hypernetworks.hypernetwork.HypernetworkModule.activation_dict.keys() if x not in not_available)
-def create_hypernetwork(name, enable_sizes, overwrite_old, layer_structure=None, activation_func=None, weight_init=None, add_layer_norm=False, use_dropout=False):
- filename = modules.hypernetworks.hypernetwork.create_hypernetwork(name, enable_sizes, overwrite_old, layer_structure, activation_func, weight_init, add_layer_norm, use_dropout)
+
+def create_hypernetwork(name, enable_sizes, overwrite_old, layer_structure=None, activation_func=None, weight_init=None, add_layer_norm=False, use_dropout=False, dropout_structure=None):
+ filename = modules.hypernetworks.hypernetwork.create_hypernetwork(name, enable_sizes, overwrite_old, layer_structure, activation_func, weight_init, add_layer_norm, use_dropout, dropout_structure)
return gr.Dropdown.update(choices=sorted([x for x in shared.hypernetworks.keys()])), f"Created: {filename}", ""
def train_hypernetwork(*args):
-
- initial_hypernetwork = shared.loaded_hypernetwork
+ shared.loaded_hypernetworks = []
assert not shared.cmd_opts.lowvram, 'Training models with lowvram is not possible'
@@ -34,7 +34,6 @@ Hypernetwork saved to {html.escape(filename)}
except Exception:
raise
finally:
- shared.loaded_hypernetwork = initial_hypernetwork
shared.sd_model.cond_stage_model.to(devices.device)
shared.sd_model.first_stage_model.to(devices.device)
sd_hijack.apply_optimizations()
diff --git a/modules/images.py b/modules/images.py
index 31d4528d..0bc3d524 100644
--- a/modules/images.py
+++ b/modules/images.py
@@ -39,11 +39,14 @@ def image_grid(imgs, batch_size=1, rows=None):
cols = math.ceil(len(imgs) / rows)
+ params = script_callbacks.ImageGridLoopParams(imgs, cols, rows)
+ script_callbacks.image_grid_callback(params)
+
w, h = imgs[0].size
- grid = Image.new('RGB', size=(cols * w, rows * h), color='black')
+ grid = Image.new('RGB', size=(params.cols * w, params.rows * h), color='black')
- for i, img in enumerate(imgs):
- grid.paste(img, box=(i % cols * w, i // cols * h))
+ for i, img in enumerate(params.imgs):
+ grid.paste(img, box=(i % params.cols * w, i // params.cols * h))
return grid
@@ -192,7 +195,7 @@ def draw_grid_annotations(im, width, height, hor_texts, ver_texts):
ver_text_heights = [sum([line.size[1] + line_spacing for line in lines]) - line_spacing * len(lines) for lines in
ver_texts]
- pad_top = max(hor_text_heights) + line_spacing * 2
+ pad_top = 0 if sum(hor_text_heights) == 0 else max(hor_text_heights) + line_spacing * 2
result = Image.new("RGB", (im.width + pad_left, im.height + pad_top), "white")
result.paste(im, (pad_left, pad_top))
@@ -227,16 +230,32 @@ def draw_prompt_matrix(im, width, height, all_prompts):
return draw_grid_annotations(im, width, height, hor_texts, ver_texts)
-def resize_image(resize_mode, im, width, height):
+def resize_image(resize_mode, im, width, height, upscaler_name=None):
+ """
+ Resizes an image with the specified resize_mode, width, and height.
+
+ Args:
+ resize_mode: The mode to use when resizing the image.
+ 0: Resize the image to the specified width and height.
+ 1: Resize the image to fill the specified width and height, maintaining the aspect ratio, and then center the image within the dimensions, cropping the excess.
+ 2: Resize the image to fit within the specified width and height, maintaining the aspect ratio, and then center the image within the dimensions, filling empty with data from image.
+ im: The image to resize.
+ width: The width to resize the image to.
+ height: The height to resize the image to.
+ upscaler_name: The name of the upscaler to use. If not provided, defaults to opts.upscaler_for_img2img.
+ """
+
+ upscaler_name = upscaler_name or opts.upscaler_for_img2img
+
def resize(im, w, h):
- if opts.upscaler_for_img2img is None or opts.upscaler_for_img2img == "None" or im.mode == 'L':
+ if upscaler_name is None or upscaler_name == "None" or im.mode == 'L':
return im.resize((w, h), resample=LANCZOS)
scale = max(w / im.width, h / im.height)
if scale > 1.0:
- upscalers = [x for x in shared.sd_upscalers if x.name == opts.upscaler_for_img2img]
- assert len(upscalers) > 0, f"could not find upscaler named {opts.upscaler_for_img2img}"
+ upscalers = [x for x in shared.sd_upscalers if x.name == upscaler_name]
+ assert len(upscalers) > 0, f"could not find upscaler named {upscaler_name}"
upscaler = upscalers[0]
im = upscaler.scaler.upscale(im, scale, upscaler.data_path)
@@ -525,6 +544,9 @@ def save_image(image, path, basename, seed=None, prompt=None, extension='png', i
image_to_save.save(temp_file_path, format=image_format, quality=opts.jpeg_quality, pnginfo=pnginfo_data)
elif extension.lower() in (".jpg", ".jpeg", ".webp"):
+ if image_to_save.mode == 'RGBA':
+ image_to_save = image_to_save.convert("RGB")
+
image_to_save.save(temp_file_path, format=image_format, quality=opts.jpeg_quality)
if opts.enable_pnginfo and info is not None:
@@ -583,8 +605,9 @@ def read_info_from_image(image):
except ValueError:
exif_comment = exif_comment.decode('utf8', errors="ignore")
- items['exif comment'] = exif_comment
- geninfo = exif_comment
+ if exif_comment:
+ items['exif comment'] = exif_comment
+ geninfo = exif_comment
for field in ['jfif', 'jfif_version', 'jfif_unit', 'jfif_density', 'dpi', 'exif',
'loop', 'background', 'timestamp', 'duration']:
diff --git a/modules/img2img.py b/modules/img2img.py
index 81da4b13..fe9447c7 100644
--- a/modules/img2img.py
+++ b/modules/img2img.py
@@ -16,11 +16,16 @@ import modules.images as images
import modules.scripts
-def process_batch(p, input_dir, output_dir, args):
+def process_batch(p, input_dir, output_dir, inpaint_mask_dir, args):
processing.fix_seed(p)
images = shared.listfiles(input_dir)
+ inpaint_masks = shared.listfiles(inpaint_mask_dir)
+ is_inpaint_batch = inpaint_mask_dir and len(inpaint_masks) > 0
+ if is_inpaint_batch:
+ print(f"\nInpaint batch is enabled. {len(inpaint_masks)} masks found.")
+
print(f"Will process {len(images)} images, creating {p.n_iter * p.batch_size} new images for each.")
save_normally = output_dir == ''
@@ -43,6 +48,15 @@ def process_batch(p, input_dir, output_dir, args):
img = ImageOps.exif_transpose(img)
p.init_images = [img] * p.batch_size
+ if is_inpaint_batch:
+ # try to find corresponding mask for an image using simple filename matching
+ mask_image_path = os.path.join(inpaint_mask_dir, os.path.basename(image))
+ # if not found use first one ("same mask for all images" use-case)
+ if not mask_image_path in inpaint_masks:
+ mask_image_path = inpaint_masks[0]
+ mask_image = Image.open(mask_image_path)
+ p.image_mask = mask_image
+
proc = modules.scripts.scripts_img2img.run(p, *args)
if proc is None:
proc = process_images(p)
@@ -59,38 +73,34 @@ def process_batch(p, input_dir, output_dir, args):
processed_image.save(os.path.join(output_dir, filename))
-def img2img(mode: int, prompt: str, negative_prompt: str, prompt_style: str, prompt_style2: str, init_img, init_img_with_mask, init_img_with_mask_orig, init_img_inpaint, init_mask_inpaint, mask_mode, steps: int, sampler_index: int, mask_blur: int, mask_alpha: float, inpainting_fill: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, denoising_strength: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, height: int, width: int, resize_mode: int, inpaint_full_res: bool, inpaint_full_res_padding: int, inpainting_mask_invert: int, img2img_batch_input_dir: str, img2img_batch_output_dir: str, *args):
- is_inpaint = mode == 1
- is_batch = mode == 2
-
- if is_inpaint:
- # Drawn mask
- if mask_mode == 0:
- is_mask_sketch = isinstance(init_img_with_mask, dict)
- is_mask_paint = not is_mask_sketch
- if is_mask_sketch:
- # Sketch: mask iff. not transparent
- image, mask = init_img_with_mask["image"], init_img_with_mask["mask"]
- alpha_mask = ImageOps.invert(image.split()[-1]).convert('L').point(lambda x: 255 if x > 0 else 0, mode='1')
- mask = ImageChops.lighter(alpha_mask, mask.convert('L')).convert('L')
- else:
- # Color-sketch: mask iff. painted over
- image = init_img_with_mask
- orig = init_img_with_mask_orig or init_img_with_mask
- pred = np.any(np.array(image) != np.array(orig), axis=-1)
- mask = Image.fromarray(pred.astype(np.uint8) * 255, "L")
- mask = ImageEnhance.Brightness(mask).enhance(1 - mask_alpha / 100)
- blur = ImageFilter.GaussianBlur(mask_blur)
- image = Image.composite(image.filter(blur), orig, mask.filter(blur))
-
- image = image.convert("RGB")
- # Uploaded mask
- else:
- image = init_img_inpaint
- mask = init_mask_inpaint
- # No mask
+def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_styles, init_img, sketch, init_img_with_mask, inpaint_color_sketch, inpaint_color_sketch_orig, init_img_inpaint, init_mask_inpaint, steps: int, sampler_index: int, mask_blur: int, mask_alpha: float, inpainting_fill: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, denoising_strength: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, height: int, width: int, resize_mode: int, inpaint_full_res: bool, inpaint_full_res_padding: int, inpainting_mask_invert: int, img2img_batch_input_dir: str, img2img_batch_output_dir: str, img2img_batch_inpaint_mask_dir: str, *args):
+ is_batch = mode == 5
+
+ if mode == 0: # img2img
+ image = init_img.convert("RGB")
+ mask = None
+ elif mode == 1: # img2img sketch
+ image = sketch.convert("RGB")
+ mask = None
+ elif mode == 2: # inpaint
+ image, mask = init_img_with_mask["image"], init_img_with_mask["mask"]
+ alpha_mask = ImageOps.invert(image.split()[-1]).convert('L').point(lambda x: 255 if x > 0 else 0, mode='1')
+ mask = ImageChops.lighter(alpha_mask, mask.convert('L')).convert('L')
+ image = image.convert("RGB")
+ elif mode == 3: # inpaint sketch
+ image = inpaint_color_sketch
+ orig = inpaint_color_sketch_orig or inpaint_color_sketch
+ pred = np.any(np.array(image) != np.array(orig), axis=-1)
+ mask = Image.fromarray(pred.astype(np.uint8) * 255, "L")
+ mask = ImageEnhance.Brightness(mask).enhance(1 - mask_alpha / 100)
+ blur = ImageFilter.GaussianBlur(mask_blur)
+ image = Image.composite(image.filter(blur), orig, mask.filter(blur))
+ image = image.convert("RGB")
+ elif mode == 4: # inpaint upload mask
+ image = init_img_inpaint
+ mask = init_mask_inpaint
else:
- image = init_img
+ image = None
mask = None
# Use the EXIF orientation of photos taken by smartphones.
@@ -105,7 +115,7 @@ def img2img(mode: int, prompt: str, negative_prompt: str, prompt_style: str, pro
outpath_grids=opts.outdir_grids or opts.outdir_img2img_grids,
prompt=prompt,
negative_prompt=negative_prompt,
- styles=[prompt_style, prompt_style2],
+ styles=prompt_styles,
seed=seed,
subseed=subseed,
subseed_strength=subseed_strength,
@@ -143,7 +153,7 @@ def img2img(mode: int, prompt: str, negative_prompt: str, prompt_style: str, pro
if is_batch:
assert not shared.cmd_opts.hide_ui_dir_config, "Launched with --hide-ui-dir-config, batch img2img disabled"
- process_batch(p, img2img_batch_input_dir, img2img_batch_output_dir, args)
+ process_batch(p, img2img_batch_input_dir, img2img_batch_output_dir, img2img_batch_inpaint_mask_dir, args)
processed = Processed(p, [], p.seed, "")
else:
@@ -162,4 +172,4 @@ def img2img(mode: int, prompt: str, negative_prompt: str, prompt_style: str, pro
if opts.do_not_show_images:
processed.images = []
- return processed.images, generation_info_js, plaintext_to_html(processed.info)
+ return processed.images, generation_info_js, plaintext_to_html(processed.info), plaintext_to_html(processed.comments)
diff --git a/modules/interrogate.py b/modules/interrogate.py
index 46935210..cbb80683 100644
--- a/modules/interrogate.py
+++ b/modules/interrogate.py
@@ -2,15 +2,17 @@ import os
import sys
import traceback
from collections import namedtuple
+from pathlib import Path
import re
import torch
+import torch.hub
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
import modules.shared as shared
-from modules import devices, paths, lowvram, modelloader
+from modules import devices, paths, shared, lowvram, modelloader, errors
blip_image_eval_size = 384
clip_model_name = 'ViT-L/14'
@@ -19,30 +21,76 @@ Category = namedtuple("Category", ["name", "topn", "items"])
re_topn = re.compile(r"\.top(\d+)\.")
+def category_types():
+ return [f.stem for f in Path(shared.interrogator.content_dir).glob('*.txt')]
+
+
+def download_default_clip_interrogate_categories(content_dir):
+ print("Downloading CLIP categories...")
+
+ tmpdir = content_dir + "_tmp"
+ category_types = ["artists", "flavors", "mediums", "movements"]
+
+ try:
+ os.makedirs(tmpdir)
+ for category_type in category_types:
+ torch.hub.download_url_to_file(f"https://raw.githubusercontent.com/pharmapsychotic/clip-interrogator/main/clip_interrogator/data/{category_type}.txt", os.path.join(tmpdir, f"{category_type}.txt"))
+ os.rename(tmpdir, content_dir)
+
+ except Exception as e:
+ errors.display(e, "downloading default CLIP interrogate categories")
+ finally:
+ if os.path.exists(tmpdir):
+ os.remove(tmpdir)
+
class InterrogateModels:
blip_model = None
clip_model = None
clip_preprocess = None
- categories = None
dtype = None
running_on_cpu = None
def __init__(self, content_dir):
- self.categories = []
+ self.loaded_categories = None
+ self.skip_categories = []
+ self.content_dir = content_dir
self.running_on_cpu = devices.device_interrogate == torch.device("cpu")
- if os.path.exists(content_dir):
- for filename in os.listdir(content_dir):
- m = re_topn.search(filename)
- topn = 1 if m is None else int(m.group(1))
+ def categories(self):
+ if not os.path.exists(self.content_dir):
+ download_default_clip_interrogate_categories(self.content_dir)
+
+ if self.loaded_categories is not None and self.skip_categories == shared.opts.interrogate_clip_skip_categories:
+ return self.loaded_categories
- with open(os.path.join(content_dir, filename), "r", encoding="utf8") as file:
+ self.loaded_categories = []
+
+ if os.path.exists(self.content_dir):
+ self.skip_categories = shared.opts.interrogate_clip_skip_categories
+ category_types = []
+ for filename in Path(self.content_dir).glob('*.txt'):
+ category_types.append(filename.stem)
+ if filename.stem in self.skip_categories:
+ continue
+ m = re_topn.search(filename.stem)
+ topn = 1 if m is None else int(m.group(1))
+ with open(filename, "r", encoding="utf8") as file:
lines = [x.strip() for x in file.readlines()]
- self.categories.append(Category(name=filename, topn=topn, items=lines))
+ self.loaded_categories.append(Category(name=filename.stem, topn=topn, items=lines))
+
+ return self.loaded_categories
+
+ def create_fake_fairscale(self):
+ class FakeFairscale:
+ def checkpoint_wrapper(self):
+ pass
+
+ sys.modules["fairscale.nn.checkpoint.checkpoint_activations"] = FakeFairscale
def load_blip_model(self):
+ self.create_fake_fairscale()
import models.blip
files = modelloader.load_models(
@@ -106,6 +154,8 @@ class InterrogateModels:
def rank(self, image_features, text_array, top_count=1):
import clip
+ devices.torch_gc()
+
if shared.opts.interrogate_clip_dict_limit != 0:
text_array = text_array[0:int(shared.opts.interrogate_clip_dict_limit)]
@@ -135,10 +185,10 @@ class InterrogateModels:
return caption[0]
def interrogate(self, pil_image):
- res = None
-
+ res = ""
+ shared.state.begin()
+ shared.state.job = 'interrogate'
try:
-
if shared.cmd_opts.lowvram or shared.cmd_opts.medvram:
lowvram.send_everything_to_cpu()
devices.torch_gc()
@@ -158,12 +208,7 @@ class InterrogateModels:
image_features /= image_features.norm(dim=-1, keepdim=True)
- if shared.opts.interrogate_use_builtin_artists:
- artist = self.rank(image_features, ["by " + artist.name for artist in shared.artist_db.artists])[0]
-
- res += ", " + artist[0]
-
- for name, topn, items in self.categories:
+ for name, topn, items in self.categories():
matches = self.rank(image_features, items, top_count=topn)
for match, score in matches:
if shared.opts.interrogate_return_ranks:
@@ -177,5 +222,6 @@ class InterrogateModels:
res += "<error>"
self.unload()
+ shared.state.end()
return res
diff --git a/modules/memmon.py b/modules/memmon.py
index 9fb9b687..a7060f58 100644
--- a/modules/memmon.py
+++ b/modules/memmon.py
@@ -71,10 +71,13 @@ class MemUsageMonitor(threading.Thread):
def read(self):
if not self.disabled:
free, total = torch.cuda.mem_get_info()
+ self.data["free"] = free
self.data["total"] = total
torch_stats = torch.cuda.memory_stats(self.device)
+ self.data["active"] = torch_stats["active.all.current"]
self.data["active_peak"] = torch_stats["active_bytes.all.peak"]
+ self.data["reserved"] = torch_stats["reserved_bytes.all.current"]
self.data["reserved_peak"] = torch_stats["reserved_bytes.all.peak"]
self.data["system_peak"] = total - self.data["min_free"]
diff --git a/modules/modelloader.py b/modules/modelloader.py
index e647f6fa..e9aa514e 100644
--- a/modules/modelloader.py
+++ b/modules/modelloader.py
@@ -10,7 +10,7 @@ from modules.upscaler import Upscaler
from modules.paths import script_path, models_path
-def load_models(model_path: str, model_url: str = None, command_path: str = None, ext_filter=None, download_name=None) -> list:
+def load_models(model_path: str, model_url: str = None, command_path: str = None, ext_filter=None, download_name=None, ext_blacklist=None) -> list:
"""
A one-and done loader to try finding the desired models in specified directories.
@@ -45,6 +45,8 @@ def load_models(model_path: str, model_url: str = None, command_path: str = None
full_path = file
if os.path.isdir(full_path):
continue
+ if ext_blacklist is not None and any([full_path.endswith(x) for x in ext_blacklist]):
+ continue
if len(ext_filter) != 0:
model_name, extension = os.path.splitext(file)
if extension not in ext_filter:
@@ -123,6 +125,23 @@ def move_files(src_path: str, dest_path: str, ext_filter: str = None):
pass
+builtin_upscaler_classes = []
+forbidden_upscaler_classes = set()
+
+
+def list_builtin_upscalers():
+ load_upscalers()
+
+ builtin_upscaler_classes.clear()
+ builtin_upscaler_classes.extend(Upscaler.__subclasses__())
+
+
+def forbid_loaded_nonbuiltin_upscalers():
+ for cls in Upscaler.__subclasses__():
+ if cls not in builtin_upscaler_classes:
+ forbidden_upscaler_classes.add(cls)
+
+
def load_upscalers():
# We can only do this 'magic' method to dynamically load upscalers if they are referenced,
# so we'll try to import any _model.py files before looking in __subclasses__
@@ -139,6 +158,9 @@ def load_upscalers():
datas = []
commandline_options = vars(shared.cmd_opts)
for cls in Upscaler.__subclasses__():
+ if cls in forbidden_upscaler_classes:
+ continue
+
name = cls.__name__
cmd_name = f"{name.lower().replace('upscaler', '')}_models_path"
scaler = cls(commandline_options.get(cmd_name, None))
diff --git a/modules/models/diffusion/ddpm_edit.py b/modules/models/diffusion/ddpm_edit.py
new file mode 100644
index 00000000..f3d49c44
--- /dev/null
+++ b/modules/models/diffusion/ddpm_edit.py
@@ -0,0 +1,1459 @@
+"""
+wild mixture of
+https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py
+https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py
+https://github.com/CompVis/taming-transformers
+-- merci
+"""
+
+# File modified by authors of InstructPix2Pix from original (https://github.com/CompVis/stable-diffusion).
+# See more details in LICENSE.
+
+import torch
+import torch.nn as nn
+import numpy as np
+import pytorch_lightning as pl
+from torch.optim.lr_scheduler import LambdaLR
+from einops import rearrange, repeat
+from contextlib import contextmanager
+from functools import partial
+from tqdm import tqdm
+from torchvision.utils import make_grid
+from pytorch_lightning.utilities.distributed import rank_zero_only
+
+from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config
+from ldm.modules.ema import LitEma
+from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution
+from ldm.models.autoencoder import VQModelInterface, IdentityFirstStage, AutoencoderKL
+from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like
+from ldm.models.diffusion.ddim import DDIMSampler
+
+
+__conditioning_keys__ = {'concat': 'c_concat',
+ 'crossattn': 'c_crossattn',
+ 'adm': 'y'}
+
+
+def disabled_train(self, mode=True):
+ """Overwrite model.train with this function to make sure train/eval mode
+ does not change anymore."""
+ return self
+
+
+def uniform_on_device(r1, r2, shape, device):
+ return (r1 - r2) * torch.rand(*shape, device=device) + r2
+
+
+class DDPM(pl.LightningModule):
+ # classic DDPM with Gaussian diffusion, in image space
+ def __init__(self,
+ unet_config,
+ timesteps=1000,
+ beta_schedule="linear",
+ loss_type="l2",
+ ckpt_path=None,
+ ignore_keys=[],
+ load_only_unet=False,
+ monitor="val/loss",
+ use_ema=True,
+ first_stage_key="image",
+ image_size=256,
+ channels=3,
+ log_every_t=100,
+ clip_denoised=True,
+ linear_start=1e-4,
+ linear_end=2e-2,
+ cosine_s=8e-3,
+ given_betas=None,
+ original_elbo_weight=0.,
+ v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta
+ l_simple_weight=1.,
+ conditioning_key=None,
+ parameterization="eps", # all assuming fixed variance schedules
+ scheduler_config=None,
+ use_positional_encodings=False,
+ learn_logvar=False,
+ logvar_init=0.,
+ load_ema=True,
+ ):
+ super().__init__()
+ assert parameterization in ["eps", "x0"], 'currently only supporting "eps" and "x0"'
+ self.parameterization = parameterization
+ print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode")
+ self.cond_stage_model = None
+ self.clip_denoised = clip_denoised
+ self.log_every_t = log_every_t
+ self.first_stage_key = first_stage_key
+ self.image_size = image_size # try conv?
+ self.channels = channels
+ self.use_positional_encodings = use_positional_encodings
+ self.model = DiffusionWrapper(unet_config, conditioning_key)
+ count_params(self.model, verbose=True)
+ self.use_ema = use_ema
+
+ self.use_scheduler = scheduler_config is not None
+ if self.use_scheduler:
+ self.scheduler_config = scheduler_config
+
+ self.v_posterior = v_posterior
+ self.original_elbo_weight = original_elbo_weight
+ self.l_simple_weight = l_simple_weight
+
+ if monitor is not None:
+ self.monitor = monitor
+
+ if self.use_ema and load_ema:
+ self.model_ema = LitEma(self.model)
+ print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.")
+
+ if ckpt_path is not None:
+ self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet)
+
+ # If initialing from EMA-only checkpoint, create EMA model after loading.
+ if self.use_ema and not load_ema:
+ self.model_ema = LitEma(self.model)
+ print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.")
+
+ self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps,
+ linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s)
+
+ self.loss_type = loss_type
+
+ self.learn_logvar = learn_logvar
+ self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,))
+ if self.learn_logvar:
+ self.logvar = nn.Parameter(self.logvar, requires_grad=True)
+
+
+ def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000,
+ linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):
+ if exists(given_betas):
+ betas = given_betas
+ else:
+ betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end,
+ cosine_s=cosine_s)
+ alphas = 1. - betas
+ alphas_cumprod = np.cumprod(alphas, axis=0)
+ alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1])
+
+ timesteps, = betas.shape
+ self.num_timesteps = int(timesteps)
+ self.linear_start = linear_start
+ self.linear_end = linear_end
+ assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep'
+
+ to_torch = partial(torch.tensor, dtype=torch.float32)
+
+ self.register_buffer('betas', to_torch(betas))
+ self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))
+ self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev))
+
+ # calculations for diffusion q(x_t | x_{t-1}) and others
+ self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod)))
+ self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod)))
+ self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod)))
+ self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod)))
+ self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1)))
+
+ # calculations for posterior q(x_{t-1} | x_t, x_0)
+ posterior_variance = (1 - self.v_posterior) * betas * (1. - alphas_cumprod_prev) / (
+ 1. - alphas_cumprod) + self.v_posterior * betas
+ # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t)
+ self.register_buffer('posterior_variance', to_torch(posterior_variance))
+ # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain
+ self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20))))
+ self.register_buffer('posterior_mean_coef1', to_torch(
+ betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod)))
+ self.register_buffer('posterior_mean_coef2', to_torch(
+ (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod)))
+
+ if self.parameterization == "eps":
+ lvlb_weights = self.betas ** 2 / (
+ 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod))
+ elif self.parameterization == "x0":
+ lvlb_weights = 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2. * 1 - torch.Tensor(alphas_cumprod))
+ else:
+ raise NotImplementedError("mu not supported")
+ # TODO how to choose this term
+ lvlb_weights[0] = lvlb_weights[1]
+ self.register_buffer('lvlb_weights', lvlb_weights, persistent=False)
+ assert not torch.isnan(self.lvlb_weights).all()
+
+ @contextmanager
+ def ema_scope(self, context=None):
+ if self.use_ema:
+ self.model_ema.store(self.model.parameters())
+ self.model_ema.copy_to(self.model)
+ if context is not None:
+ print(f"{context}: Switched to EMA weights")
+ try:
+ yield None
+ finally:
+ if self.use_ema:
+ self.model_ema.restore(self.model.parameters())
+ if context is not None:
+ print(f"{context}: Restored training weights")
+
+ def init_from_ckpt(self, path, ignore_keys=list(), only_model=False):
+ sd = torch.load(path, map_location="cpu")
+ if "state_dict" in list(sd.keys()):
+ sd = sd["state_dict"]
+ keys = list(sd.keys())
+
+ # Our model adds additional channels to the first layer to condition on an input image.
+ # For the first layer, copy existing channel weights and initialize new channel weights to zero.
+ input_keys = [
+ "model.diffusion_model.input_blocks.0.0.weight",
+ "model_ema.diffusion_modelinput_blocks00weight",
+ ]
+
+ self_sd = self.state_dict()
+ for input_key in input_keys:
+ if input_key not in sd or input_key not in self_sd:
+ continue
+
+ input_weight = self_sd[input_key]
+
+ if input_weight.size() != sd[input_key].size():
+ print(f"Manual init: {input_key}")
+ input_weight.zero_()
+ input_weight[:, :4, :, :].copy_(sd[input_key])
+ ignore_keys.append(input_key)
+
+ for k in keys:
+ for ik in ignore_keys:
+ if k.startswith(ik):
+ print("Deleting key {} from state_dict.".format(k))
+ del sd[k]
+ missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict(
+ sd, strict=False)
+ print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys")
+ if len(missing) > 0:
+ print(f"Missing Keys: {missing}")
+ if len(unexpected) > 0:
+ print(f"Unexpected Keys: {unexpected}")
+
+ def q_mean_variance(self, x_start, t):
+ """
+ Get the distribution q(x_t | x_0).
+ :param x_start: the [N x C x ...] tensor of noiseless inputs.
+ :param t: the number of diffusion steps (minus 1). Here, 0 means one step.
+ :return: A tuple (mean, variance, log_variance), all of x_start's shape.
+ """
+ mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start)
+ variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape)
+ log_variance = extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape)
+ return mean, variance, log_variance
+
+ def predict_start_from_noise(self, x_t, t, noise):
+ return (
+ extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t -
+ extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise
+ )
+
+ def q_posterior(self, x_start, x_t, t):
+ posterior_mean = (
+ extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start +
+ extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t
+ )
+ posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape)
+ posterior_log_variance_clipped = extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape)
+ return posterior_mean, posterior_variance, posterior_log_variance_clipped
+
+ def p_mean_variance(self, x, t, clip_denoised: bool):
+ model_out = self.model(x, t)
+ if self.parameterization == "eps":
+ x_recon = self.predict_start_from_noise(x, t=t, noise=model_out)
+ elif self.parameterization == "x0":
+ x_recon = model_out
+ if clip_denoised:
+ x_recon.clamp_(-1., 1.)
+
+ model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t)
+ return model_mean, posterior_variance, posterior_log_variance
+
+ @torch.no_grad()
+ def p_sample(self, x, t, clip_denoised=True, repeat_noise=False):
+ b, *_, device = *x.shape, x.device
+ model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, clip_denoised=clip_denoised)
+ noise = noise_like(x.shape, device, repeat_noise)
+ # no noise when t == 0
+ nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1)))
+ return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise
+
+ @torch.no_grad()
+ def p_sample_loop(self, shape, return_intermediates=False):
+ device = self.betas.device
+ b = shape[0]
+ img = torch.randn(shape, device=device)
+ intermediates = [img]
+ for i in tqdm(reversed(range(0, self.num_timesteps)), desc='Sampling t', total=self.num_timesteps):
+ img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long),
+ clip_denoised=self.clip_denoised)
+ if i % self.log_every_t == 0 or i == self.num_timesteps - 1:
+ intermediates.append(img)
+ if return_intermediates:
+ return img, intermediates
+ return img
+
+ @torch.no_grad()
+ def sample(self, batch_size=16, return_intermediates=False):
+ image_size = self.image_size
+ channels = self.channels
+ return self.p_sample_loop((batch_size, channels, image_size, image_size),
+ return_intermediates=return_intermediates)
+
+ def q_sample(self, x_start, t, noise=None):
+ noise = default(noise, lambda: torch.randn_like(x_start))
+ return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start +
+ extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise)
+
+ def get_loss(self, pred, target, mean=True):
+ if self.loss_type == 'l1':
+ loss = (target - pred).abs()
+ if mean:
+ loss = loss.mean()
+ elif self.loss_type == 'l2':
+ if mean:
+ loss = torch.nn.functional.mse_loss(target, pred)
+ else:
+ loss = torch.nn.functional.mse_loss(target, pred, reduction='none')
+ else:
+ raise NotImplementedError("unknown loss type '{loss_type}'")
+
+ return loss
+
+ def p_losses(self, x_start, t, noise=None):
+ noise = default(noise, lambda: torch.randn_like(x_start))
+ x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)
+ model_out = self.model(x_noisy, t)
+
+ loss_dict = {}
+ if self.parameterization == "eps":
+ target = noise
+ elif self.parameterization == "x0":
+ target = x_start
+ else:
+ raise NotImplementedError(f"Paramterization {self.parameterization} not yet supported")
+
+ loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3])
+
+ log_prefix = 'train' if self.training else 'val'
+
+ loss_dict.update({f'{log_prefix}/loss_simple': loss.mean()})
+ loss_simple = loss.mean() * self.l_simple_weight
+
+ loss_vlb = (self.lvlb_weights[t] * loss).mean()
+ loss_dict.update({f'{log_prefix}/loss_vlb': loss_vlb})
+
+ loss = loss_simple + self.original_elbo_weight * loss_vlb
+
+ loss_dict.update({f'{log_prefix}/loss': loss})
+
+ return loss, loss_dict
+
+ def forward(self, x, *args, **kwargs):
+ # b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size
+ # assert h == img_size and w == img_size, f'height and width of image must be {img_size}'
+ t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long()
+ return self.p_losses(x, t, *args, **kwargs)
+
+ def get_input(self, batch, k):
+ return batch[k]
+
+ def shared_step(self, batch):
+ x = self.get_input(batch, self.first_stage_key)
+ loss, loss_dict = self(x)
+ return loss, loss_dict
+
+ def training_step(self, batch, batch_idx):
+ loss, loss_dict = self.shared_step(batch)
+
+ self.log_dict(loss_dict, prog_bar=True,
+ logger=True, on_step=True, on_epoch=True)
+
+ self.log("global_step", self.global_step,
+ prog_bar=True, logger=True, on_step=True, on_epoch=False)
+
+ if self.use_scheduler:
+ lr = self.optimizers().param_groups[0]['lr']
+ self.log('lr_abs', lr, prog_bar=True, logger=True, on_step=True, on_epoch=False)
+
+ return loss
+
+ @torch.no_grad()
+ def validation_step(self, batch, batch_idx):
+ _, loss_dict_no_ema = self.shared_step(batch)
+ with self.ema_scope():
+ _, loss_dict_ema = self.shared_step(batch)
+ loss_dict_ema = {key + '_ema': loss_dict_ema[key] for key in loss_dict_ema}
+ self.log_dict(loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True)
+ self.log_dict(loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True)
+
+ def on_train_batch_end(self, *args, **kwargs):
+ if self.use_ema:
+ self.model_ema(self.model)
+
+ def _get_rows_from_list(self, samples):
+ n_imgs_per_row = len(samples)
+ denoise_grid = rearrange(samples, 'n b c h w -> b n c h w')
+ denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w')
+ denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row)
+ return denoise_grid
+
+ @torch.no_grad()
+ def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs):
+ log = dict()
+ x = self.get_input(batch, self.first_stage_key)
+ N = min(x.shape[0], N)
+ n_row = min(x.shape[0], n_row)
+ x = x.to(self.device)[:N]
+ log["inputs"] = x
+
+ # get diffusion row
+ diffusion_row = list()
+ x_start = x[:n_row]
+
+ for t in range(self.num_timesteps):
+ if t % self.log_every_t == 0 or t == self.num_timesteps - 1:
+ t = repeat(torch.tensor([t]), '1 -> b', b=n_row)
+ t = t.to(self.device).long()
+ noise = torch.randn_like(x_start)
+ x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)
+ diffusion_row.append(x_noisy)
+
+ log["diffusion_row"] = self._get_rows_from_list(diffusion_row)
+
+ if sample:
+ # get denoise row
+ with self.ema_scope("Plotting"):
+ samples, denoise_row = self.sample(batch_size=N, return_intermediates=True)
+
+ log["samples"] = samples
+ log["denoise_row"] = self._get_rows_from_list(denoise_row)
+
+ if return_keys:
+ if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0:
+ return log
+ else:
+ return {key: log[key] for key in return_keys}
+ return log
+
+ def configure_optimizers(self):
+ lr = self.learning_rate
+ params = list(self.model.parameters())
+ if self.learn_logvar:
+ params = params + [self.logvar]
+ opt = torch.optim.AdamW(params, lr=lr)
+ return opt
+
+
+class LatentDiffusion(DDPM):
+ """main class"""
+ def __init__(self,
+ first_stage_config,
+ cond_stage_config,
+ num_timesteps_cond=None,
+ cond_stage_key="image",
+ cond_stage_trainable=False,
+ concat_mode=True,
+ cond_stage_forward=None,
+ conditioning_key=None,
+ scale_factor=1.0,
+ scale_by_std=False,
+ load_ema=True,
+ *args, **kwargs):
+ self.num_timesteps_cond = default(num_timesteps_cond, 1)
+ self.scale_by_std = scale_by_std
+ assert self.num_timesteps_cond <= kwargs['timesteps']
+ # for backwards compatibility after implementation of DiffusionWrapper
+ if conditioning_key is None:
+ conditioning_key = 'concat' if concat_mode else 'crossattn'
+ if cond_stage_config == '__is_unconditional__':
+ conditioning_key = None
+ ckpt_path = kwargs.pop("ckpt_path", None)
+ ignore_keys = kwargs.pop("ignore_keys", [])
+ super().__init__(conditioning_key=conditioning_key, *args, load_ema=load_ema, **kwargs)
+ self.concat_mode = concat_mode
+ self.cond_stage_trainable = cond_stage_trainable
+ self.cond_stage_key = cond_stage_key
+ try:
+ self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1
+ except:
+ self.num_downs = 0
+ if not scale_by_std:
+ self.scale_factor = scale_factor
+ else:
+ self.register_buffer('scale_factor', torch.tensor(scale_factor))
+ self.instantiate_first_stage(first_stage_config)
+ self.instantiate_cond_stage(cond_stage_config)
+ self.cond_stage_forward = cond_stage_forward
+ self.clip_denoised = False
+ self.bbox_tokenizer = None
+
+ self.restarted_from_ckpt = False
+ if ckpt_path is not None:
+ self.init_from_ckpt(ckpt_path, ignore_keys)
+ self.restarted_from_ckpt = True
+
+ if self.use_ema and not load_ema:
+ self.model_ema = LitEma(self.model)
+ print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.")
+
+ def make_cond_schedule(self, ):
+ self.cond_ids = torch.full(size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long)
+ ids = torch.round(torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond)).long()
+ self.cond_ids[:self.num_timesteps_cond] = ids
+
+ @rank_zero_only
+ @torch.no_grad()
+ def on_train_batch_start(self, batch, batch_idx, dataloader_idx):
+ # only for very first batch
+ if self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt:
+ assert self.scale_factor == 1., 'rather not use custom rescaling and std-rescaling simultaneously'
+ # set rescale weight to 1./std of encodings
+ print("### USING STD-RESCALING ###")
+ x = super().get_input(batch, self.first_stage_key)
+ x = x.to(self.device)
+ encoder_posterior = self.encode_first_stage(x)
+ z = self.get_first_stage_encoding(encoder_posterior).detach()
+ del self.scale_factor
+ self.register_buffer('scale_factor', 1. / z.flatten().std())
+ print(f"setting self.scale_factor to {self.scale_factor}")
+ print("### USING STD-RESCALING ###")
+
+ def register_schedule(self,
+ given_betas=None, beta_schedule="linear", timesteps=1000,
+ linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):
+ super().register_schedule(given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s)
+
+ self.shorten_cond_schedule = self.num_timesteps_cond > 1
+ if self.shorten_cond_schedule:
+ self.make_cond_schedule()
+
+ def instantiate_first_stage(self, config):
+ model = instantiate_from_config(config)
+ self.first_stage_model = model.eval()
+ self.first_stage_model.train = disabled_train
+ for param in self.first_stage_model.parameters():
+ param.requires_grad = False
+
+ def instantiate_cond_stage(self, config):
+ if not self.cond_stage_trainable:
+ if config == "__is_first_stage__":
+ print("Using first stage also as cond stage.")
+ self.cond_stage_model = self.first_stage_model
+ elif config == "__is_unconditional__":
+ print(f"Training {self.__class__.__name__} as an unconditional model.")
+ self.cond_stage_model = None
+ # self.be_unconditional = True
+ else:
+ model = instantiate_from_config(config)
+ self.cond_stage_model = model.eval()
+ self.cond_stage_model.train = disabled_train
+ for param in self.cond_stage_model.parameters():
+ param.requires_grad = False
+ else:
+ assert config != '__is_first_stage__'
+ assert config != '__is_unconditional__'
+ model = instantiate_from_config(config)
+ self.cond_stage_model = model
+
+ def _get_denoise_row_from_list(self, samples, desc='', force_no_decoder_quantization=False):
+ denoise_row = []
+ for zd in tqdm(samples, desc=desc):
+ denoise_row.append(self.decode_first_stage(zd.to(self.device),
+ force_not_quantize=force_no_decoder_quantization))
+ n_imgs_per_row = len(denoise_row)
+ denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W
+ denoise_grid = rearrange(denoise_row, 'n b c h w -> b n c h w')
+ denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w')
+ denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row)
+ return denoise_grid
+
+ def get_first_stage_encoding(self, encoder_posterior):
+ if isinstance(encoder_posterior, DiagonalGaussianDistribution):
+ z = encoder_posterior.sample()
+ elif isinstance(encoder_posterior, torch.Tensor):
+ z = encoder_posterior
+ else:
+ raise NotImplementedError(f"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented")
+ return self.scale_factor * z
+
+ def get_learned_conditioning(self, c):
+ if self.cond_stage_forward is None:
+ if hasattr(self.cond_stage_model, 'encode') and callable(self.cond_stage_model.encode):
+ c = self.cond_stage_model.encode(c)
+ if isinstance(c, DiagonalGaussianDistribution):
+ c = c.mode()
+ else:
+ c = self.cond_stage_model(c)
+ else:
+ assert hasattr(self.cond_stage_model, self.cond_stage_forward)
+ c = getattr(self.cond_stage_model, self.cond_stage_forward)(c)
+ return c
+
+ def meshgrid(self, h, w):
+ y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1)
+ x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1)
+
+ arr = torch.cat([y, x], dim=-1)
+ return arr
+
+ def delta_border(self, h, w):
+ """
+ :param h: height
+ :param w: width
+ :return: normalized distance to image border,
+ wtith min distance = 0 at border and max dist = 0.5 at image center
+ """
+ lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2)
+ arr = self.meshgrid(h, w) / lower_right_corner
+ dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0]
+ dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0]
+ edge_dist = torch.min(torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1)[0]
+ return edge_dist
+
+ def get_weighting(self, h, w, Ly, Lx, device):
+ weighting = self.delta_border(h, w)
+ weighting = torch.clip(weighting, self.split_input_params["clip_min_weight"],
+ self.split_input_params["clip_max_weight"], )
+ weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device)
+
+ if self.split_input_params["tie_braker"]:
+ L_weighting = self.delta_border(Ly, Lx)
+ L_weighting = torch.clip(L_weighting,
+ self.split_input_params["clip_min_tie_weight"],
+ self.split_input_params["clip_max_tie_weight"])
+
+ L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device)
+ weighting = weighting * L_weighting
+ return weighting
+
+ def get_fold_unfold(self, x, kernel_size, stride, uf=1, df=1): # todo load once not every time, shorten code
+ """
+ :param x: img of size (bs, c, h, w)
+ :return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1])
+ """
+ bs, nc, h, w = x.shape
+
+ # number of crops in image
+ Ly = (h - kernel_size[0]) // stride[0] + 1
+ Lx = (w - kernel_size[1]) // stride[1] + 1
+
+ if uf == 1 and df == 1:
+ fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)
+ unfold = torch.nn.Unfold(**fold_params)
+
+ fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params)
+
+ weighting = self.get_weighting(kernel_size[0], kernel_size[1], Ly, Lx, x.device).to(x.dtype)
+ normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap
+ weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx))
+
+ elif uf > 1 and df == 1:
+ fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)
+ unfold = torch.nn.Unfold(**fold_params)
+
+ fold_params2 = dict(kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf),
+ dilation=1, padding=0,
+ stride=(stride[0] * uf, stride[1] * uf))
+ fold = torch.nn.Fold(output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2)
+
+ weighting = self.get_weighting(kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device).to(x.dtype)
+ normalization = fold(weighting).view(1, 1, h * uf, w * uf) # normalizes the overlap
+ weighting = weighting.view((1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx))
+
+ elif df > 1 and uf == 1:
+ fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)
+ unfold = torch.nn.Unfold(**fold_params)
+
+ fold_params2 = dict(kernel_size=(kernel_size[0] // df, kernel_size[0] // df),
+ dilation=1, padding=0,
+ stride=(stride[0] // df, stride[1] // df))
+ fold = torch.nn.Fold(output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2)
+
+ weighting = self.get_weighting(kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device).to(x.dtype)
+ normalization = fold(weighting).view(1, 1, h // df, w // df) # normalizes the overlap
+ weighting = weighting.view((1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx))
+
+ else:
+ raise NotImplementedError
+
+ return fold, unfold, normalization, weighting
+
+ @torch.no_grad()
+ def get_input(self, batch, k, return_first_stage_outputs=False, force_c_encode=False,
+ cond_key=None, return_original_cond=False, bs=None, uncond=0.05):
+ x = super().get_input(batch, k)
+ if bs is not None:
+ x = x[:bs]
+ x = x.to(self.device)
+ encoder_posterior = self.encode_first_stage(x)
+ z = self.get_first_stage_encoding(encoder_posterior).detach()
+ cond_key = cond_key or self.cond_stage_key
+ xc = super().get_input(batch, cond_key)
+ if bs is not None:
+ xc["c_crossattn"] = xc["c_crossattn"][:bs]
+ xc["c_concat"] = xc["c_concat"][:bs]
+ cond = {}
+
+ # To support classifier-free guidance, randomly drop out only text conditioning 5%, only image conditioning 5%, and both 5%.
+ random = torch.rand(x.size(0), device=x.device)
+ prompt_mask = rearrange(random < 2 * uncond, "n -> n 1 1")
+ input_mask = 1 - rearrange((random >= uncond).float() * (random < 3 * uncond).float(), "n -> n 1 1 1")
+
+ null_prompt = self.get_learned_conditioning([""])
+ cond["c_crossattn"] = [torch.where(prompt_mask, null_prompt, self.get_learned_conditioning(xc["c_crossattn"]).detach())]
+ cond["c_concat"] = [input_mask * self.encode_first_stage((xc["c_concat"].to(self.device))).mode().detach()]
+
+ out = [z, cond]
+ if return_first_stage_outputs:
+ xrec = self.decode_first_stage(z)
+ out.extend([x, xrec])
+ if return_original_cond:
+ out.append(xc)
+ return out
+
+ @torch.no_grad()
+ def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False):
+ if predict_cids:
+ if z.dim() == 4:
+ z = torch.argmax(z.exp(), dim=1).long()
+ z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None)
+ z = rearrange(z, 'b h w c -> b c h w').contiguous()
+
+ z = 1. / self.scale_factor * z
+
+ if hasattr(self, "split_input_params"):
+ if self.split_input_params["patch_distributed_vq"]:
+ ks = self.split_input_params["ks"] # eg. (128, 128)
+ stride = self.split_input_params["stride"] # eg. (64, 64)
+ uf = self.split_input_params["vqf"]
+ bs, nc, h, w = z.shape
+ if ks[0] > h or ks[1] > w:
+ ks = (min(ks[0], h), min(ks[1], w))
+ print("reducing Kernel")
+
+ if stride[0] > h or stride[1] > w:
+ stride = (min(stride[0], h), min(stride[1], w))
+ print("reducing stride")
+
+ fold, unfold, normalization, weighting = self.get_fold_unfold(z, ks, stride, uf=uf)
+
+ z = unfold(z) # (bn, nc * prod(**ks), L)
+ # 1. Reshape to img shape
+ z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L )
+
+ # 2. apply model loop over last dim
+ if isinstance(self.first_stage_model, VQModelInterface):
+ output_list = [self.first_stage_model.decode(z[:, :, :, :, i],
+ force_not_quantize=predict_cids or force_not_quantize)
+ for i in range(z.shape[-1])]
+ else:
+
+ output_list = [self.first_stage_model.decode(z[:, :, :, :, i])
+ for i in range(z.shape[-1])]
+
+ o = torch.stack(output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L)
+ o = o * weighting
+ # Reverse 1. reshape to img shape
+ o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L)
+ # stitch crops together
+ decoded = fold(o)
+ decoded = decoded / normalization # norm is shape (1, 1, h, w)
+ return decoded
+ else:
+ if isinstance(self.first_stage_model, VQModelInterface):
+ return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize)
+ else:
+ return self.first_stage_model.decode(z)
+
+ else:
+ if isinstance(self.first_stage_model, VQModelInterface):
+ return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize)
+ else:
+ return self.first_stage_model.decode(z)
+
+ # same as above but without decorator
+ def differentiable_decode_first_stage(self, z, predict_cids=False, force_not_quantize=False):
+ if predict_cids:
+ if z.dim() == 4:
+ z = torch.argmax(z.exp(), dim=1).long()
+ z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None)
+ z = rearrange(z, 'b h w c -> b c h w').contiguous()
+
+ z = 1. / self.scale_factor * z
+
+ if hasattr(self, "split_input_params"):
+ if self.split_input_params["patch_distributed_vq"]:
+ ks = self.split_input_params["ks"] # eg. (128, 128)
+ stride = self.split_input_params["stride"] # eg. (64, 64)
+ uf = self.split_input_params["vqf"]
+ bs, nc, h, w = z.shape
+ if ks[0] > h or ks[1] > w:
+ ks = (min(ks[0], h), min(ks[1], w))
+ print("reducing Kernel")
+
+ if stride[0] > h or stride[1] > w:
+ stride = (min(stride[0], h), min(stride[1], w))
+ print("reducing stride")
+
+ fold, unfold, normalization, weighting = self.get_fold_unfold(z, ks, stride, uf=uf)
+
+ z = unfold(z) # (bn, nc * prod(**ks), L)
+ # 1. Reshape to img shape
+ z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L )
+
+ # 2. apply model loop over last dim
+ if isinstance(self.first_stage_model, VQModelInterface):
+ output_list = [self.first_stage_model.decode(z[:, :, :, :, i],
+ force_not_quantize=predict_cids or force_not_quantize)
+ for i in range(z.shape[-1])]
+ else:
+
+ output_list = [self.first_stage_model.decode(z[:, :, :, :, i])
+ for i in range(z.shape[-1])]
+
+ o = torch.stack(output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L)
+ o = o * weighting
+ # Reverse 1. reshape to img shape
+ o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L)
+ # stitch crops together
+ decoded = fold(o)
+ decoded = decoded / normalization # norm is shape (1, 1, h, w)
+ return decoded
+ else:
+ if isinstance(self.first_stage_model, VQModelInterface):
+ return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize)
+ else:
+ return self.first_stage_model.decode(z)
+
+ else:
+ if isinstance(self.first_stage_model, VQModelInterface):
+ return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize)
+ else:
+ return self.first_stage_model.decode(z)
+
+ @torch.no_grad()
+ def encode_first_stage(self, x):
+ if hasattr(self, "split_input_params"):
+ if self.split_input_params["patch_distributed_vq"]:
+ ks = self.split_input_params["ks"] # eg. (128, 128)
+ stride = self.split_input_params["stride"] # eg. (64, 64)
+ df = self.split_input_params["vqf"]
+ self.split_input_params['original_image_size'] = x.shape[-2:]
+ bs, nc, h, w = x.shape
+ if ks[0] > h or ks[1] > w:
+ ks = (min(ks[0], h), min(ks[1], w))
+ print("reducing Kernel")
+
+ if stride[0] > h or stride[1] > w:
+ stride = (min(stride[0], h), min(stride[1], w))
+ print("reducing stride")
+
+ fold, unfold, normalization, weighting = self.get_fold_unfold(x, ks, stride, df=df)
+ z = unfold(x) # (bn, nc * prod(**ks), L)
+ # Reshape to img shape
+ z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L )
+
+ output_list = [self.first_stage_model.encode(z[:, :, :, :, i])
+ for i in range(z.shape[-1])]
+
+ o = torch.stack(output_list, axis=-1)
+ o = o * weighting
+
+ # Reverse reshape to img shape
+ o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L)
+ # stitch crops together
+ decoded = fold(o)
+ decoded = decoded / normalization
+ return decoded
+
+ else:
+ return self.first_stage_model.encode(x)
+ else:
+ return self.first_stage_model.encode(x)
+
+ def shared_step(self, batch, **kwargs):
+ x, c = self.get_input(batch, self.first_stage_key)
+ loss = self(x, c)
+ return loss
+
+ def forward(self, x, c, *args, **kwargs):
+ t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long()
+ if self.model.conditioning_key is not None:
+ assert c is not None
+ if self.cond_stage_trainable:
+ c = self.get_learned_conditioning(c)
+ if self.shorten_cond_schedule: # TODO: drop this option
+ tc = self.cond_ids[t].to(self.device)
+ c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float()))
+ return self.p_losses(x, c, t, *args, **kwargs)
+
+ def _rescale_annotations(self, bboxes, crop_coordinates): # TODO: move to dataset
+ def rescale_bbox(bbox):
+ x0 = clamp((bbox[0] - crop_coordinates[0]) / crop_coordinates[2])
+ y0 = clamp((bbox[1] - crop_coordinates[1]) / crop_coordinates[3])
+ w = min(bbox[2] / crop_coordinates[2], 1 - x0)
+ h = min(bbox[3] / crop_coordinates[3], 1 - y0)
+ return x0, y0, w, h
+
+ return [rescale_bbox(b) for b in bboxes]
+
+ def apply_model(self, x_noisy, t, cond, return_ids=False):
+
+ if isinstance(cond, dict):
+ # hybrid case, cond is exptected to be a dict
+ pass
+ else:
+ if not isinstance(cond, list):
+ cond = [cond]
+ key = 'c_concat' if self.model.conditioning_key == 'concat' else 'c_crossattn'
+ cond = {key: cond}
+
+ if hasattr(self, "split_input_params"):
+ assert len(cond) == 1 # todo can only deal with one conditioning atm
+ assert not return_ids
+ ks = self.split_input_params["ks"] # eg. (128, 128)
+ stride = self.split_input_params["stride"] # eg. (64, 64)
+
+ h, w = x_noisy.shape[-2:]
+
+ fold, unfold, normalization, weighting = self.get_fold_unfold(x_noisy, ks, stride)
+
+ z = unfold(x_noisy) # (bn, nc * prod(**ks), L)
+ # Reshape to img shape
+ z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L )
+ z_list = [z[:, :, :, :, i] for i in range(z.shape[-1])]
+
+ if self.cond_stage_key in ["image", "LR_image", "segmentation",
+ 'bbox_img'] and self.model.conditioning_key: # todo check for completeness
+ c_key = next(iter(cond.keys())) # get key
+ c = next(iter(cond.values())) # get value
+ assert (len(c) == 1) # todo extend to list with more than one elem
+ c = c[0] # get element
+
+ c = unfold(c)
+ c = c.view((c.shape[0], -1, ks[0], ks[1], c.shape[-1])) # (bn, nc, ks[0], ks[1], L )
+
+ cond_list = [{c_key: [c[:, :, :, :, i]]} for i in range(c.shape[-1])]
+
+ elif self.cond_stage_key == 'coordinates_bbox':
+ assert 'original_image_size' in self.split_input_params, 'BoudingBoxRescaling is missing original_image_size'
+
+ # assuming padding of unfold is always 0 and its dilation is always 1
+ n_patches_per_row = int((w - ks[0]) / stride[0] + 1)
+ full_img_h, full_img_w = self.split_input_params['original_image_size']
+ # as we are operating on latents, we need the factor from the original image size to the
+ # spatial latent size to properly rescale the crops for regenerating the bbox annotations
+ num_downs = self.first_stage_model.encoder.num_resolutions - 1
+ rescale_latent = 2 ** (num_downs)
+
+ # get top left postions of patches as conforming for the bbbox tokenizer, therefore we
+ # need to rescale the tl patch coordinates to be in between (0,1)
+ tl_patch_coordinates = [(rescale_latent * stride[0] * (patch_nr % n_patches_per_row) / full_img_w,
+ rescale_latent * stride[1] * (patch_nr // n_patches_per_row) / full_img_h)
+ for patch_nr in range(z.shape[-1])]
+
+ # patch_limits are tl_coord, width and height coordinates as (x_tl, y_tl, h, w)
+ patch_limits = [(x_tl, y_tl,
+ rescale_latent * ks[0] / full_img_w,
+ rescale_latent * ks[1] / full_img_h) for x_tl, y_tl in tl_patch_coordinates]
+ # patch_values = [(np.arange(x_tl,min(x_tl+ks, 1.)),np.arange(y_tl,min(y_tl+ks, 1.))) for x_tl, y_tl in tl_patch_coordinates]
+
+ # tokenize crop coordinates for the bounding boxes of the respective patches
+ patch_limits_tknzd = [torch.LongTensor(self.bbox_tokenizer._crop_encoder(bbox))[None].to(self.device)
+ for bbox in patch_limits] # list of length l with tensors of shape (1, 2)
+ print(patch_limits_tknzd[0].shape)
+ # cut tknzd crop position from conditioning
+ assert isinstance(cond, dict), 'cond must be dict to be fed into model'
+ cut_cond = cond['c_crossattn'][0][..., :-2].to(self.device)
+ print(cut_cond.shape)
+
+ adapted_cond = torch.stack([torch.cat([cut_cond, p], dim=1) for p in patch_limits_tknzd])
+ adapted_cond = rearrange(adapted_cond, 'l b n -> (l b) n')
+ print(adapted_cond.shape)
+ adapted_cond = self.get_learned_conditioning(adapted_cond)
+ print(adapted_cond.shape)
+ adapted_cond = rearrange(adapted_cond, '(l b) n d -> l b n d', l=z.shape[-1])
+ print(adapted_cond.shape)
+
+ cond_list = [{'c_crossattn': [e]} for e in adapted_cond]
+
+ else:
+ cond_list = [cond for i in range(z.shape[-1])] # Todo make this more efficient
+
+ # apply model by loop over crops
+ output_list = [self.model(z_list[i], t, **cond_list[i]) for i in range(z.shape[-1])]
+ assert not isinstance(output_list[0],
+ tuple) # todo cant deal with multiple model outputs check this never happens
+
+ o = torch.stack(output_list, axis=-1)
+ o = o * weighting
+ # Reverse reshape to img shape
+ o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L)
+ # stitch crops together
+ x_recon = fold(o) / normalization
+
+ else:
+ x_recon = self.model(x_noisy, t, **cond)
+
+ if isinstance(x_recon, tuple) and not return_ids:
+ return x_recon[0]
+ else:
+ return x_recon
+
+ def _predict_eps_from_xstart(self, x_t, t, pred_xstart):
+ return (extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart) / \
+ extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape)
+
+ def _prior_bpd(self, x_start):
+ """
+ Get the prior KL term for the variational lower-bound, measured in
+ bits-per-dim.
+ This term can't be optimized, as it only depends on the encoder.
+ :param x_start: the [N x C x ...] tensor of inputs.
+ :return: a batch of [N] KL values (in bits), one per batch element.
+ """
+ batch_size = x_start.shape[0]
+ t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device)
+ qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t)
+ kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0)
+ return mean_flat(kl_prior) / np.log(2.0)
+
+ def p_losses(self, x_start, cond, t, noise=None):
+ noise = default(noise, lambda: torch.randn_like(x_start))
+ x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)
+ model_output = self.apply_model(x_noisy, t, cond)
+
+ loss_dict = {}
+ prefix = 'train' if self.training else 'val'
+
+ if self.parameterization == "x0":
+ target = x_start
+ elif self.parameterization == "eps":
+ target = noise
+ else:
+ raise NotImplementedError()
+
+ loss_simple = self.get_loss(model_output, target, mean=False).mean([1, 2, 3])
+ loss_dict.update({f'{prefix}/loss_simple': loss_simple.mean()})
+
+ logvar_t = self.logvar[t].to(self.device)
+ loss = loss_simple / torch.exp(logvar_t) + logvar_t
+ # loss = loss_simple / torch.exp(self.logvar) + self.logvar
+ if self.learn_logvar:
+ loss_dict.update({f'{prefix}/loss_gamma': loss.mean()})
+ loss_dict.update({'logvar': self.logvar.data.mean()})
+
+ loss = self.l_simple_weight * loss.mean()
+
+ loss_vlb = self.get_loss(model_output, target, mean=False).mean(dim=(1, 2, 3))
+ loss_vlb = (self.lvlb_weights[t] * loss_vlb).mean()
+ loss_dict.update({f'{prefix}/loss_vlb': loss_vlb})
+ loss += (self.original_elbo_weight * loss_vlb)
+ loss_dict.update({f'{prefix}/loss': loss})
+
+ return loss, loss_dict
+
+ def p_mean_variance(self, x, c, t, clip_denoised: bool, return_codebook_ids=False, quantize_denoised=False,
+ return_x0=False, score_corrector=None, corrector_kwargs=None):
+ t_in = t
+ model_out = self.apply_model(x, t_in, c, return_ids=return_codebook_ids)
+
+ if score_corrector is not None:
+ assert self.parameterization == "eps"
+ model_out = score_corrector.modify_score(self, model_out, x, t, c, **corrector_kwargs)
+
+ if return_codebook_ids:
+ model_out, logits = model_out
+
+ if self.parameterization == "eps":
+ x_recon = self.predict_start_from_noise(x, t=t, noise=model_out)
+ elif self.parameterization == "x0":
+ x_recon = model_out
+ else:
+ raise NotImplementedError()
+
+ if clip_denoised:
+ x_recon.clamp_(-1., 1.)
+ if quantize_denoised:
+ x_recon, _, [_, _, indices] = self.first_stage_model.quantize(x_recon)
+ model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t)
+ if return_codebook_ids:
+ return model_mean, posterior_variance, posterior_log_variance, logits
+ elif return_x0:
+ return model_mean, posterior_variance, posterior_log_variance, x_recon
+ else:
+ return model_mean, posterior_variance, posterior_log_variance
+
+ @torch.no_grad()
+ def p_sample(self, x, c, t, clip_denoised=False, repeat_noise=False,
+ return_codebook_ids=False, quantize_denoised=False, return_x0=False,
+ temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None):
+ b, *_, device = *x.shape, x.device
+ outputs = self.p_mean_variance(x=x, c=c, t=t, clip_denoised=clip_denoised,
+ return_codebook_ids=return_codebook_ids,
+ quantize_denoised=quantize_denoised,
+ return_x0=return_x0,
+ score_corrector=score_corrector, corrector_kwargs=corrector_kwargs)
+ if return_codebook_ids:
+ raise DeprecationWarning("Support dropped.")
+ model_mean, _, model_log_variance, logits = outputs
+ elif return_x0:
+ model_mean, _, model_log_variance, x0 = outputs
+ else:
+ model_mean, _, model_log_variance = outputs
+
+ noise = noise_like(x.shape, device, repeat_noise) * temperature
+ if noise_dropout > 0.:
+ noise = torch.nn.functional.dropout(noise, p=noise_dropout)
+ # no noise when t == 0
+ nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1)))
+
+ if return_codebook_ids:
+ return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, logits.argmax(dim=1)
+ if return_x0:
+ return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, x0
+ else:
+ return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise
+
+ @torch.no_grad()
+ def progressive_denoising(self, cond, shape, verbose=True, callback=None, quantize_denoised=False,
+ img_callback=None, mask=None, x0=None, temperature=1., noise_dropout=0.,
+ score_corrector=None, corrector_kwargs=None, batch_size=None, x_T=None, start_T=None,
+ log_every_t=None):
+ if not log_every_t:
+ log_every_t = self.log_every_t
+ timesteps = self.num_timesteps
+ if batch_size is not None:
+ b = batch_size if batch_size is not None else shape[0]
+ shape = [batch_size] + list(shape)
+ else:
+ b = batch_size = shape[0]
+ if x_T is None:
+ img = torch.randn(shape, device=self.device)
+ else:
+ img = x_T
+ intermediates = []
+ if cond is not None:
+ if isinstance(cond, dict):
+ cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else
+ list(map(lambda x: x[:batch_size], cond[key])) for key in cond}
+ else:
+ cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size]
+
+ if start_T is not None:
+ timesteps = min(timesteps, start_T)
+ iterator = tqdm(reversed(range(0, timesteps)), desc='Progressive Generation',
+ total=timesteps) if verbose else reversed(
+ range(0, timesteps))
+ if type(temperature) == float:
+ temperature = [temperature] * timesteps
+
+ for i in iterator:
+ ts = torch.full((b,), i, device=self.device, dtype=torch.long)
+ if self.shorten_cond_schedule:
+ assert self.model.conditioning_key != 'hybrid'
+ tc = self.cond_ids[ts].to(cond.device)
+ cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond))
+
+ img, x0_partial = self.p_sample(img, cond, ts,
+ clip_denoised=self.clip_denoised,
+ quantize_denoised=quantize_denoised, return_x0=True,
+ temperature=temperature[i], noise_dropout=noise_dropout,
+ score_corrector=score_corrector, corrector_kwargs=corrector_kwargs)
+ if mask is not None:
+ assert x0 is not None
+ img_orig = self.q_sample(x0, ts)
+ img = img_orig * mask + (1. - mask) * img
+
+ if i % log_every_t == 0 or i == timesteps - 1:
+ intermediates.append(x0_partial)
+ if callback: callback(i)
+ if img_callback: img_callback(img, i)
+ return img, intermediates
+
+ @torch.no_grad()
+ def p_sample_loop(self, cond, shape, return_intermediates=False,
+ x_T=None, verbose=True, callback=None, timesteps=None, quantize_denoised=False,
+ mask=None, x0=None, img_callback=None, start_T=None,
+ log_every_t=None):
+
+ if not log_every_t:
+ log_every_t = self.log_every_t
+ device = self.betas.device
+ b = shape[0]
+ if x_T is None:
+ img = torch.randn(shape, device=device)
+ else:
+ img = x_T
+
+ intermediates = [img]
+ if timesteps is None:
+ timesteps = self.num_timesteps
+
+ if start_T is not None:
+ timesteps = min(timesteps, start_T)
+ iterator = tqdm(reversed(range(0, timesteps)), desc='Sampling t', total=timesteps) if verbose else reversed(
+ range(0, timesteps))
+
+ if mask is not None:
+ assert x0 is not None
+ assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match
+
+ for i in iterator:
+ ts = torch.full((b,), i, device=device, dtype=torch.long)
+ if self.shorten_cond_schedule:
+ assert self.model.conditioning_key != 'hybrid'
+ tc = self.cond_ids[ts].to(cond.device)
+ cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond))
+
+ img = self.p_sample(img, cond, ts,
+ clip_denoised=self.clip_denoised,
+ quantize_denoised=quantize_denoised)
+ if mask is not None:
+ img_orig = self.q_sample(x0, ts)
+ img = img_orig * mask + (1. - mask) * img
+
+ if i % log_every_t == 0 or i == timesteps - 1:
+ intermediates.append(img)
+ if callback: callback(i)
+ if img_callback: img_callback(img, i)
+
+ if return_intermediates:
+ return img, intermediates
+ return img
+
+ @torch.no_grad()
+ def sample(self, cond, batch_size=16, return_intermediates=False, x_T=None,
+ verbose=True, timesteps=None, quantize_denoised=False,
+ mask=None, x0=None, shape=None,**kwargs):
+ if shape is None:
+ shape = (batch_size, self.channels, self.image_size, self.image_size)
+ if cond is not None:
+ if isinstance(cond, dict):
+ cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else
+ list(map(lambda x: x[:batch_size], cond[key])) for key in cond}
+ else:
+ cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size]
+ return self.p_sample_loop(cond,
+ shape,
+ return_intermediates=return_intermediates, x_T=x_T,
+ verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised,
+ mask=mask, x0=x0)
+
+ @torch.no_grad()
+ def sample_log(self,cond,batch_size,ddim, ddim_steps,**kwargs):
+
+ if ddim:
+ ddim_sampler = DDIMSampler(self)
+ shape = (self.channels, self.image_size, self.image_size)
+ samples, intermediates =ddim_sampler.sample(ddim_steps,batch_size,
+ shape,cond,verbose=False,**kwargs)
+
+ else:
+ samples, intermediates = self.sample(cond=cond, batch_size=batch_size,
+ return_intermediates=True,**kwargs)
+
+ return samples, intermediates
+
+
+ @torch.no_grad()
+ def log_images(self, batch, N=4, n_row=4, sample=True, ddim_steps=200, ddim_eta=1., return_keys=None,
+ quantize_denoised=True, inpaint=False, plot_denoise_rows=False, plot_progressive_rows=False,
+ plot_diffusion_rows=False, **kwargs):
+
+ use_ddim = False
+
+ log = dict()
+ z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key,
+ return_first_stage_outputs=True,
+ force_c_encode=True,
+ return_original_cond=True,
+ bs=N, uncond=0)
+ N = min(x.shape[0], N)
+ n_row = min(x.shape[0], n_row)
+ log["inputs"] = x
+ log["reals"] = xc["c_concat"]
+ log["reconstruction"] = xrec
+ if self.model.conditioning_key is not None:
+ if hasattr(self.cond_stage_model, "decode"):
+ xc = self.cond_stage_model.decode(c)
+ log["conditioning"] = xc
+ elif self.cond_stage_key in ["caption"]:
+ xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["caption"])
+ log["conditioning"] = xc
+ elif self.cond_stage_key == 'class_label':
+ xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"])
+ log['conditioning'] = xc
+ elif isimage(xc):
+ log["conditioning"] = xc
+ if ismap(xc):
+ log["original_conditioning"] = self.to_rgb(xc)
+
+ if plot_diffusion_rows:
+ # get diffusion row
+ diffusion_row = list()
+ z_start = z[:n_row]
+ for t in range(self.num_timesteps):
+ if t % self.log_every_t == 0 or t == self.num_timesteps - 1:
+ t = repeat(torch.tensor([t]), '1 -> b', b=n_row)
+ t = t.to(self.device).long()
+ noise = torch.randn_like(z_start)
+ z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise)
+ diffusion_row.append(self.decode_first_stage(z_noisy))
+
+ diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W
+ diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w')
+ diffusion_grid = rearrange(diffusion_grid, 'b n c h w -> (b n) c h w')
+ diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0])
+ log["diffusion_row"] = diffusion_grid
+
+ if sample:
+ # get denoise row
+ with self.ema_scope("Plotting"):
+ samples, z_denoise_row = self.sample_log(cond=c,batch_size=N,ddim=use_ddim,
+ ddim_steps=ddim_steps,eta=ddim_eta)
+ # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True)
+ x_samples = self.decode_first_stage(samples)
+ log["samples"] = x_samples
+ if plot_denoise_rows:
+ denoise_grid = self._get_denoise_row_from_list(z_denoise_row)
+ log["denoise_row"] = denoise_grid
+
+ if quantize_denoised and not isinstance(self.first_stage_model, AutoencoderKL) and not isinstance(
+ self.first_stage_model, IdentityFirstStage):
+ # also display when quantizing x0 while sampling
+ with self.ema_scope("Plotting Quantized Denoised"):
+ samples, z_denoise_row = self.sample_log(cond=c,batch_size=N,ddim=use_ddim,
+ ddim_steps=ddim_steps,eta=ddim_eta,
+ quantize_denoised=True)
+ # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True,
+ # quantize_denoised=True)
+ x_samples = self.decode_first_stage(samples.to(self.device))
+ log["samples_x0_quantized"] = x_samples
+
+ if inpaint:
+ # make a simple center square
+ b, h, w = z.shape[0], z.shape[2], z.shape[3]
+ mask = torch.ones(N, h, w).to(self.device)
+ # zeros will be filled in
+ mask[:, h // 4:3 * h // 4, w // 4:3 * w // 4] = 0.
+ mask = mask[:, None, ...]
+ with self.ema_scope("Plotting Inpaint"):
+
+ samples, _ = self.sample_log(cond=c,batch_size=N,ddim=use_ddim, eta=ddim_eta,
+ ddim_steps=ddim_steps, x0=z[:N], mask=mask)
+ x_samples = self.decode_first_stage(samples.to(self.device))
+ log["samples_inpainting"] = x_samples
+ log["mask"] = mask
+
+ # outpaint
+ with self.ema_scope("Plotting Outpaint"):
+ samples, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim,eta=ddim_eta,
+ ddim_steps=ddim_steps, x0=z[:N], mask=mask)
+ x_samples = self.decode_first_stage(samples.to(self.device))
+ log["samples_outpainting"] = x_samples
+
+ if plot_progressive_rows:
+ with self.ema_scope("Plotting Progressives"):
+ img, progressives = self.progressive_denoising(c,
+ shape=(self.channels, self.image_size, self.image_size),
+ batch_size=N)
+ prog_row = self._get_denoise_row_from_list(progressives, desc="Progressive Generation")
+ log["progressive_row"] = prog_row
+
+ if return_keys:
+ if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0:
+ return log
+ else:
+ return {key: log[key] for key in return_keys}
+ return log
+
+ def configure_optimizers(self):
+ lr = self.learning_rate
+ params = list(self.model.parameters())
+ if self.cond_stage_trainable:
+ print(f"{self.__class__.__name__}: Also optimizing conditioner params!")
+ params = params + list(self.cond_stage_model.parameters())
+ if self.learn_logvar:
+ print('Diffusion model optimizing logvar')
+ params.append(self.logvar)
+ opt = torch.optim.AdamW(params, lr=lr)
+ if self.use_scheduler:
+ assert 'target' in self.scheduler_config
+ scheduler = instantiate_from_config(self.scheduler_config)
+
+ print("Setting up LambdaLR scheduler...")
+ scheduler = [
+ {
+ 'scheduler': LambdaLR(opt, lr_lambda=scheduler.schedule),
+ 'interval': 'step',
+ 'frequency': 1
+ }]
+ return [opt], scheduler
+ return opt
+
+ @torch.no_grad()
+ def to_rgb(self, x):
+ x = x.float()
+ if not hasattr(self, "colorize"):
+ self.colorize = torch.randn(3, x.shape[1], 1, 1).to(x)
+ x = nn.functional.conv2d(x, weight=self.colorize)
+ x = 2. * (x - x.min()) / (x.max() - x.min()) - 1.
+ return x
+
+
+class DiffusionWrapper(pl.LightningModule):
+ def __init__(self, diff_model_config, conditioning_key):
+ super().__init__()
+ self.diffusion_model = instantiate_from_config(diff_model_config)
+ self.conditioning_key = conditioning_key
+ assert self.conditioning_key in [None, 'concat', 'crossattn', 'hybrid', 'adm']
+
+ def forward(self, x, t, c_concat: list = None, c_crossattn: list = None):
+ if self.conditioning_key is None:
+ out = self.diffusion_model(x, t)
+ elif self.conditioning_key == 'concat':
+ xc = torch.cat([x] + c_concat, dim=1)
+ out = self.diffusion_model(xc, t)
+ elif self.conditioning_key == 'crossattn':
+ cc = torch.cat(c_crossattn, 1)
+ out = self.diffusion_model(x, t, context=cc)
+ elif self.conditioning_key == 'hybrid':
+ xc = torch.cat([x] + c_concat, dim=1)
+ cc = torch.cat(c_crossattn, 1)
+ out = self.diffusion_model(xc, t, context=cc)
+ elif self.conditioning_key == 'adm':
+ cc = c_crossattn[0]
+ out = self.diffusion_model(x, t, y=cc)
+ else:
+ raise NotImplementedError()
+
+ return out
+
+
+class Layout2ImgDiffusion(LatentDiffusion):
+ # TODO: move all layout-specific hacks to this class
+ def __init__(self, cond_stage_key, *args, **kwargs):
+ assert cond_stage_key == 'coordinates_bbox', 'Layout2ImgDiffusion only for cond_stage_key="coordinates_bbox"'
+ super().__init__(cond_stage_key=cond_stage_key, *args, **kwargs)
+
+ def log_images(self, batch, N=8, *args, **kwargs):
+ logs = super().log_images(batch=batch, N=N, *args, **kwargs)
+
+ key = 'train' if self.training else 'validation'
+ dset = self.trainer.datamodule.datasets[key]
+ mapper = dset.conditional_builders[self.cond_stage_key]
+
+ bbox_imgs = []
+ map_fn = lambda catno: dset.get_textual_label(dset.get_category_id(catno))
+ for tknzd_bbox in batch[self.cond_stage_key][:N]:
+ bboximg = mapper.plot(tknzd_bbox.detach().cpu(), map_fn, (256, 256))
+ bbox_imgs.append(bboximg)
+
+ cond_img = torch.stack(bbox_imgs, dim=0)
+ logs['bbox_image'] = cond_img
+ return logs
diff --git a/modules/paths.py b/modules/paths.py
index 4dd03a35..d991cc71 100644
--- a/modules/paths.py
+++ b/modules/paths.py
@@ -4,7 +4,15 @@ import sys
import modules.safe
script_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
-models_path = os.path.join(script_path, "models")
+
+# Parse the --data-dir flag first so we can use it as a base for our other argument default values
+parser = argparse.ArgumentParser(add_help=False)
+parser.add_argument("--data-dir", type=str, default=os.path.dirname(os.path.dirname(os.path.realpath(__file__))), help="base path where all user data is stored",)
+cmd_opts_pre = parser.parse_known_args()[0]
+data_path = cmd_opts_pre.data_dir
+models_path = os.path.join(data_path, "models")
+
+# data_path = cmd_opts_pre.data
sys.path.insert(0, script_path)
# search for directory of stable diffusion in following places
@@ -38,3 +46,17 @@ for d, must_exist, what, options in path_dirs:
else:
sys.path.append(d)
paths[what] = d
+
+
+class Prioritize:
+ def __init__(self, name):
+ self.name = name
+ self.path = None
+
+ def __enter__(self):
+ self.path = sys.path.copy()
+ sys.path = [paths[self.name]] + sys.path
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ sys.path = self.path
+ self.path = None
diff --git a/modules/postprocessing.py b/modules/postprocessing.py
new file mode 100644
index 00000000..09d8e605
--- /dev/null
+++ b/modules/postprocessing.py
@@ -0,0 +1,103 @@
+import os
+
+from PIL import Image
+
+from modules import shared, images, devices, scripts, scripts_postprocessing, ui_common, generation_parameters_copypaste
+from modules.shared import opts
+
+
+def run_postprocessing(extras_mode, image, image_folder, input_dir, output_dir, show_extras_results, *args, save_output: bool = True):
+ devices.torch_gc()
+
+ shared.state.begin()
+ shared.state.job = 'extras'
+
+ image_data = []
+ image_names = []
+ outputs = []
+
+ if extras_mode == 1:
+ for img in image_folder:
+ image = Image.open(img)
+ image_data.append(image)
+ image_names.append(os.path.splitext(img.orig_name)[0])
+ elif extras_mode == 2:
+ assert not shared.cmd_opts.hide_ui_dir_config, '--hide-ui-dir-config option must be disabled'
+ assert input_dir, 'input directory not selected'
+
+ image_list = shared.listfiles(input_dir)
+ for filename in image_list:
+ try:
+ image = Image.open(filename)
+ except Exception:
+ continue
+ image_data.append(image)
+ image_names.append(filename)
+ else:
+ assert image, 'image not selected'
+
+ image_data.append(image)
+ image_names.append(None)
+
+ if extras_mode == 2 and output_dir != '':
+ outpath = output_dir
+ else:
+ outpath = opts.outdir_samples or opts.outdir_extras_samples
+
+ infotext = ''
+
+ for image, name in zip(image_data, image_names):
+ shared.state.textinfo = name
+
+ existing_pnginfo = image.info or {}
+
+ pp = scripts_postprocessing.PostprocessedImage(image.convert("RGB"))
+
+ scripts.scripts_postproc.run(pp, args)
+
+ if opts.use_original_name_batch and name is not None:
+ basename = os.path.splitext(os.path.basename(name))[0]
+ else:
+ basename = ''
+
+ infotext = ", ".join([k if k == v else f'{k}: {generation_parameters_copypaste.quote(v)}' for k, v in pp.info.items() if v is not None])
+
+ if opts.enable_pnginfo:
+ pp.image.info = existing_pnginfo
+ pp.image.info["postprocessing"] = infotext
+
+ if save_output:
+ images.save_image(pp.image, path=outpath, basename=basename, seed=None, prompt=None, extension=opts.samples_format, info=infotext, short_filename=True, no_prompt=True, grid=False, pnginfo_section_name="extras", existing_info=existing_pnginfo, forced_filename=None)
+
+ if extras_mode != 2 or show_extras_results:
+ outputs.append(pp.image)
+
+ devices.torch_gc()
+
+ return outputs, ui_common.plaintext_to_html(infotext), ''
+
+
+def run_extras(extras_mode, resize_mode, image, image_folder, input_dir, output_dir, show_extras_results, gfpgan_visibility, codeformer_visibility, codeformer_weight, upscaling_resize, upscaling_resize_w, upscaling_resize_h, upscaling_crop, extras_upscaler_1, extras_upscaler_2, extras_upscaler_2_visibility, upscale_first: bool, save_output: bool = True):
+ """old handler for API"""
+
+ args = scripts.scripts_postproc.create_args_for_run({
+ "Upscale": {
+ "upscale_mode": resize_mode,
+ "upscale_by": upscaling_resize,
+ "upscale_to_width": upscaling_resize_w,
+ "upscale_to_height": upscaling_resize_h,
+ "upscale_crop": upscaling_crop,
+ "upscaler_1_name": extras_upscaler_1,
+ "upscaler_2_name": extras_upscaler_2,
+ "upscaler_2_visibility": extras_upscaler_2_visibility,
+ },
+ "GFPGAN": {
+ "gfpgan_visibility": gfpgan_visibility,
+ },
+ "CodeFormer": {
+ "codeformer_visibility": codeformer_visibility,
+ "codeformer_weight": codeformer_weight,
+ },
+ })
+
+ return run_postprocessing(extras_mode, image, image_folder, input_dir, output_dir, show_extras_results, *args, save_output=save_output)
diff --git a/modules/processing.py b/modules/processing.py
index 4a406084..5072fc40 100644
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -13,10 +13,11 @@ from skimage import exposure
from typing import Any, Dict, List, Optional
import modules.sd_hijack
-from modules import devices, prompt_parser, masking, sd_samplers, lowvram, generation_parameters_copypaste, script_callbacks
+from modules import devices, prompt_parser, masking, sd_samplers, lowvram, generation_parameters_copypaste, script_callbacks, extra_networks, sd_vae_approx, scripts
from modules.sd_hijack import model_hijack
from modules.shared import opts, cmd_opts, state
import modules.shared as shared
+import modules.paths as paths
import modules.face_restoration
import modules.images as images
import modules.styles
@@ -50,9 +51,9 @@ def apply_color_correction(correction, original_image):
correction,
channel_axis=2
), cv2.COLOR_LAB2RGB).astype("uint8"))
-
+
image = blendLayers(image, original_image, BlendType.LUMINOSITY)
-
+
return image
@@ -76,15 +77,32 @@ def apply_overlay(image, paste_loc, index, overlays):
return image
-class StableDiffusionProcessing():
+def txt2img_image_conditioning(sd_model, x, width, height):
+ if sd_model.model.conditioning_key not in {'hybrid', 'concat'}:
+ # Dummy zero conditioning if we're not using inpainting model.
+ # Still takes up a bit of memory, but no encoder call.
+ # Pretty sure we can just make this a 1x1 image since its not going to be used besides its batch size.
+ return x.new_zeros(x.shape[0], 5, 1, 1, dtype=x.dtype, device=x.device)
+
+ # The "masked-image" in this case will just be all zeros since the entire image is masked.
+ image_conditioning = torch.zeros(x.shape[0], 3, height, width, device=x.device)
+ image_conditioning = sd_model.get_first_stage_encoding(sd_model.encode_first_stage(image_conditioning))
+
+ # Add the fake full 1s mask to the first dimension.
+ image_conditioning = torch.nn.functional.pad(image_conditioning, (0, 0, 0, 0, 1, 0), value=1.0)
+ image_conditioning = image_conditioning.to(x.dtype)
+
+ return image_conditioning
+
+
+class StableDiffusionProcessing:
"""
The first set of paramaters: sd_models -> do_not_reload_embeddings represent the minimum required to create a StableDiffusionProcessing
"""
- def __init__(self, sd_model=None, outpath_samples=None, outpath_grids=None, prompt: str = "", styles: List[str] = None, seed: int = -1, subseed: int = -1, subseed_strength: float = 0, seed_resize_from_h: int = -1, seed_resize_from_w: int = -1, seed_enable_extras: bool = True, sampler_name: str = None, batch_size: int = 1, n_iter: int = 1, steps: int = 50, cfg_scale: float = 7.0, width: int = 512, height: int = 512, restore_faces: bool = False, tiling: bool = False, do_not_save_samples: bool = False, do_not_save_grid: bool = False, extra_generation_params: Dict[Any, Any] = None, overlay_images: Any = None, negative_prompt: str = None, eta: float = None, do_not_reload_embeddings: bool = False, denoising_strength: float = 0, ddim_discretize: str = None, s_churn: float = 0.0, s_tmax: float = None, s_tmin: float = 0.0, s_noise: float = 1.0, override_settings: Dict[str, Any] = None, override_settings_restore_afterwards: bool = True, sampler_index: int = None):
+ def __init__(self, sd_model=None, outpath_samples=None, outpath_grids=None, prompt: str = "", styles: List[str] = None, seed: int = -1, subseed: int = -1, subseed_strength: float = 0, seed_resize_from_h: int = -1, seed_resize_from_w: int = -1, seed_enable_extras: bool = True, sampler_name: str = None, batch_size: int = 1, n_iter: int = 1, steps: int = 50, cfg_scale: float = 7.0, width: int = 512, height: int = 512, restore_faces: bool = False, tiling: bool = False, do_not_save_samples: bool = False, do_not_save_grid: bool = False, extra_generation_params: Dict[Any, Any] = None, overlay_images: Any = None, negative_prompt: str = None, eta: float = None, do_not_reload_embeddings: bool = False, denoising_strength: float = 0, ddim_discretize: str = None, s_churn: float = 0.0, s_tmax: float = None, s_tmin: float = 0.0, s_noise: float = 1.0, override_settings: Dict[str, Any] = None, override_settings_restore_afterwards: bool = True, sampler_index: int = None, script_args: list = None):
if sampler_index is not None:
print("sampler_index argument for StableDiffusionProcessing does not do anything; use sampler_name", file=sys.stderr)
- self.sd_model = sd_model
self.outpath_samples: str = outpath_samples
self.outpath_grids: str = outpath_grids
self.prompt: str = prompt
@@ -123,6 +141,7 @@ class StableDiffusionProcessing():
self.override_settings = {k: v for k, v in (override_settings or {}).items() if k not in shared.restricted_opts}
self.override_settings_restore_afterwards = override_settings_restore_afterwards
self.is_using_inpainting_conditioning = False
+ self.disable_extra_networks = False
if not seed_enable_extras:
self.subseed = -1
@@ -131,33 +150,21 @@ class StableDiffusionProcessing():
self.seed_resize_from_w = 0
self.scripts = None
- self.script_args = None
+ self.script_args = script_args
self.all_prompts = None
self.all_negative_prompts = None
self.all_seeds = None
self.all_subseeds = None
+ self.iteration = 0
- def txt2img_image_conditioning(self, x, width=None, height=None):
- if self.sampler.conditioning_key not in {'hybrid', 'concat'}:
- # Dummy zero conditioning if we're not using inpainting model.
- # Still takes up a bit of memory, but no encoder call.
- # Pretty sure we can just make this a 1x1 image since its not going to be used besides its batch size.
- return x.new_zeros(x.shape[0], 5, 1, 1)
+ @property
+ def sd_model(self):
+ return shared.sd_model
- self.is_using_inpainting_conditioning = True
-
- height = height or self.height
- width = width or self.width
-
- # The "masked-image" in this case will just be all zeros since the entire image is masked.
- image_conditioning = torch.zeros(x.shape[0], 3, height, width, device=x.device)
- image_conditioning = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(image_conditioning))
-
- # Add the fake full 1s mask to the first dimension.
- image_conditioning = torch.nn.functional.pad(image_conditioning, (0, 0, 0, 0, 1, 0), value=1.0)
- image_conditioning = image_conditioning.to(x.dtype)
+ def txt2img_image_conditioning(self, x, width=None, height=None):
+ self.is_using_inpainting_conditioning = self.sd_model.model.conditioning_key in {'hybrid', 'concat'}
- return image_conditioning
+ return txt2img_image_conditioning(self.sd_model, x, width or self.width, height or self.height)
def depth2img_image_conditioning(self, source_image):
# Use the AddMiDaS helper to Format our source image to suit the MiDaS model
@@ -166,7 +173,8 @@ class StableDiffusionProcessing():
midas_in = torch.from_numpy(transformed["midas_in"][None, ...]).to(device=shared.device)
midas_in = repeat(midas_in, "1 ... -> n ...", n=self.batch_size)
- conditioning_image = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(source_image))
+ conditioning_image = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(source_image.to(devices.dtype_vae) if devices.unet_needs_upcast else source_image))
+ conditioning_image = conditioning_image.float() if devices.unet_needs_upcast else conditioning_image
conditioning = torch.nn.functional.interpolate(
self.sd_model.depth_model(midas_in),
size=conditioning_image.shape[2:],
@@ -178,7 +186,12 @@ class StableDiffusionProcessing():
conditioning = 2. * (conditioning - depth_min) / (depth_max - depth_min) - 1.
return conditioning
- def inpainting_image_conditioning(self, source_image, latent_image, image_mask = None):
+ def edit_image_conditioning(self, source_image):
+ conditioning_image = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(source_image))
+
+ return conditioning_image
+
+ def inpainting_image_conditioning(self, source_image, latent_image, image_mask=None):
self.is_using_inpainting_conditioning = True
# Handle the different mask inputs
@@ -197,7 +210,7 @@ class StableDiffusionProcessing():
# Create another latent image, this time with a masked version of the original input.
# Smoothly interpolate between the masked and unmasked latent conditioning image using a parameter.
- conditioning_mask = conditioning_mask.to(source_image.device).to(source_image.dtype)
+ conditioning_mask = conditioning_mask.to(device=source_image.device, dtype=source_image.dtype)
conditioning_image = torch.lerp(
source_image,
source_image * (1.0 - conditioning_mask),
@@ -205,7 +218,7 @@ class StableDiffusionProcessing():
)
# Encode the new masked image using first stage of network.
- conditioning_image = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(conditioning_image))
+ conditioning_image = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(conditioning_image.to(devices.dtype_vae) if devices.unet_needs_upcast else conditioning_image))
# Create the concatenated conditioning tensor to be fed to `c_concat`
conditioning_mask = torch.nn.functional.interpolate(conditioning_mask, size=latent_image.shape[-2:])
@@ -219,10 +232,13 @@ class StableDiffusionProcessing():
# HACK: Using introspection as the Depth2Image model doesn't appear to uniquely
# identify itself with a field common to all models. The conditioning_key is also hybrid.
if isinstance(self.sd_model, LatentDepth2ImageDiffusion):
- return self.depth2img_image_conditioning(source_image)
+ return self.depth2img_image_conditioning(source_image.float() if devices.unet_needs_upcast else source_image)
+
+ if self.sd_model.cond_stage_key == "edit":
+ return self.edit_image_conditioning(source_image)
if self.sampler.conditioning_key in {'hybrid', 'concat'}:
- return self.inpainting_image_conditioning(source_image, latent_image, image_mask=image_mask)
+ return self.inpainting_image_conditioning(source_image.float() if devices.unet_needs_upcast else source_image, latent_image, image_mask=image_mask)
# Dummy zero conditioning if we're not using inpainting or depth model.
return latent_image.new_zeros(latent_image.shape[0], 5, 1, 1)
@@ -234,12 +250,11 @@ class StableDiffusionProcessing():
raise NotImplementedError()
def close(self):
- self.sd_model = None
self.sampler = None
class Processed:
- def __init__(self, p: StableDiffusionProcessing, images_list, seed=-1, info="", subseed=None, all_prompts=None, all_negative_prompts=None, all_seeds=None, all_subseeds=None, index_of_first_image=0, infotexts=None):
+ def __init__(self, p: StableDiffusionProcessing, images_list, seed=-1, info="", subseed=None, all_prompts=None, all_negative_prompts=None, all_seeds=None, all_subseeds=None, index_of_first_image=0, infotexts=None, comments=""):
self.images = images_list
self.prompt = p.prompt
self.negative_prompt = p.negative_prompt
@@ -247,6 +262,7 @@ class Processed:
self.subseed = subseed
self.subseed_strength = p.subseed_strength
self.info = info
+ self.comments = comments
self.width = p.width
self.height = p.height
self.sampler_name = p.sampler_name
@@ -338,13 +354,14 @@ def slerp(val, low, high):
def create_random_tensors(shape, seeds, subseeds=None, subseed_strength=0.0, seed_resize_from_h=0, seed_resize_from_w=0, p=None):
+ eta_noise_seed_delta = opts.eta_noise_seed_delta or 0
xs = []
# if we have multiple seeds, this means we are working with batch size>1; this then
# enables the generation of additional tensors with noise that the sampler will use during its processing.
# Using those pre-generated tensors instead of simple torch.randn allows a batch with seeds [100, 101] to
# produce the same images as with two batches [100], [101].
- if p is not None and p.sampler is not None and (len(seeds) > 1 and opts.enable_batch_seeds or opts.eta_noise_seed_delta > 0):
+ if p is not None and p.sampler is not None and (len(seeds) > 1 and opts.enable_batch_seeds or eta_noise_seed_delta > 0):
sampler_noises = [[] for _ in range(p.sampler.number_of_needed_noises(p))]
else:
sampler_noises = None
@@ -384,8 +401,8 @@ def create_random_tensors(shape, seeds, subseeds=None, subseed_strength=0.0, see
if sampler_noises is not None:
cnt = p.sampler.number_of_needed_noises(p)
- if opts.eta_noise_seed_delta > 0:
- torch.manual_seed(seed + opts.eta_noise_seed_delta)
+ if eta_noise_seed_delta > 0:
+ torch.manual_seed(seed + eta_noise_seed_delta)
for j in range(cnt):
sampler_noises[j].append(devices.randn_without_seed(tuple(noise_shape)))
@@ -401,7 +418,7 @@ def create_random_tensors(shape, seeds, subseeds=None, subseed_strength=0.0, see
def decode_first_stage(model, x):
with devices.autocast(disable=x.dtype == devices.dtype_vae):
- x = model.decode_first_stage(x)
+ x = model.decode_first_stage(x.to(devices.dtype_vae) if devices.unet_needs_upcast else x)
return x
@@ -418,7 +435,7 @@ def fix_seed(p):
p.subseed = get_fixed_seed(p.subseed)
-def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments, iteration=0, position_in_batch=0):
+def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iteration=0, position_in_batch=0):
index = position_in_batch + iteration * p.batch_size
clip_skip = getattr(p, 'clip_skip', opts.CLIP_stop_at_last_layers)
@@ -432,9 +449,6 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments, iteration
"Size": f"{p.width}x{p.height}",
"Model hash": getattr(p, 'sd_model_hash', None if not opts.add_model_hash_to_info or not shared.sd_model.sd_model_hash else shared.sd_model.sd_model_hash),
"Model": (None if not opts.add_model_name_to_info or not shared.sd_model.sd_checkpoint_info.model_name else shared.sd_model.sd_checkpoint_info.model_name.replace(',', '').replace(':', '')),
- "Hypernet": (None if shared.loaded_hypernetwork is None else shared.loaded_hypernetwork.name),
- "Hypernet hash": (None if shared.loaded_hypernetwork is None else sd_models.model_hash(shared.loaded_hypernetwork.filename)),
- "Hypernet strength": (None if shared.loaded_hypernetwork is None or shared.opts.sd_hypernetwork_strength >= 1 else shared.opts.sd_hypernetwork_strength),
"Batch size": (None if p.batch_size < 2 else p.batch_size),
"Batch pos": (None if p.batch_size < 2 else position_in_batch),
"Variation seed": (None if p.subseed_strength == 0 else all_subseeds[index]),
@@ -462,9 +476,12 @@ def process_images(p: StableDiffusionProcessing) -> Processed:
try:
for k, v in p.override_settings.items():
setattr(opts, k, v)
- if k == 'sd_hypernetwork': shared.reload_hypernetworks() # make onchange call for changing hypernet
- if k == 'sd_model_checkpoint': sd_models.reload_model_weights() # make onchange call for changing SD model
- if k == 'sd_vae': sd_vae.reload_vae_weights() # make onchange call for changing VAE
+
+ if k == 'sd_model_checkpoint':
+ sd_models.reload_model_weights()
+
+ if k == 'sd_vae':
+ sd_vae.reload_vae_weights()
res = process_images_inner(p)
@@ -473,9 +490,11 @@ def process_images(p: StableDiffusionProcessing) -> Processed:
if p.override_settings_restore_afterwards:
for k, v in stored_opts.items():
setattr(opts, k, v)
- if k == 'sd_hypernetwork': shared.reload_hypernetworks()
- if k == 'sd_model_checkpoint': sd_models.reload_model_weights()
- if k == 'sd_vae': sd_vae.reload_vae_weights()
+ if k == 'sd_model_checkpoint':
+ sd_models.reload_model_weights()
+
+ if k == 'sd_vae':
+ sd_vae.reload_vae_weights()
return res
@@ -521,27 +540,61 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
def infotext(iteration=0, position_in_batch=0):
return create_infotext(p, p.all_prompts, p.all_seeds, p.all_subseeds, comments, iteration, position_in_batch)
- with open(os.path.join(shared.script_path, "params.txt"), "w", encoding="utf8") as file:
- processed = Processed(p, [], p.seed, "")
- file.write(processed.infotext(p, 0))
-
if os.path.exists(cmd_opts.embeddings_dir) and not p.do_not_reload_embeddings:
model_hijack.embedding_db.load_textual_inversion_embeddings()
+ _, extra_network_data = extra_networks.parse_prompts(p.all_prompts[0:1])
+
if p.scripts is not None:
p.scripts.process(p)
infotexts = []
output_images = []
+ cached_uc = [None, None]
+ cached_c = [None, None]
+
+ def get_conds_with_caching(function, required_prompts, steps, cache):
+ """
+ Returns the result of calling function(shared.sd_model, required_prompts, steps)
+ using a cache to store the result if the same arguments have been used before.
+
+ cache is an array containing two elements. The first element is a tuple
+ representing the previously used arguments, or None if no arguments
+ have been used before. The second element is where the previously
+ computed result is stored.
+ """
+
+ if cache[0] is not None and (required_prompts, steps) == cache[0]:
+ return cache[1]
+
+ with devices.autocast():
+ cache[1] = function(shared.sd_model, required_prompts, steps)
+
+ cache[0] = (required_prompts, steps)
+ return cache[1]
+
with torch.no_grad(), p.sd_model.ema_scope():
with devices.autocast():
p.init(p.all_prompts, p.all_seeds, p.all_subseeds)
+ # for OSX, loading the model during sampling changes the generated picture, so it is loaded here
+ if shared.opts.live_previews_enable and opts.show_progress_type == "Approx NN":
+ sd_vae_approx.model()
+
+ if not p.disable_extra_networks:
+ extra_networks.activate(p, extra_network_data)
+
+ with open(os.path.join(paths.data_path, "params.txt"), "w", encoding="utf8") as file:
+ processed = Processed(p, [], p.seed, "")
+ file.write(processed.infotext(p, 0))
+
if state.job_count == -1:
state.job_count = p.n_iter
for n in range(p.n_iter):
+ p.iteration = n
+
if state.skipped:
state.skipped = False
@@ -556,12 +609,13 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
if len(prompts) == 0:
break
+ prompts, _ = extra_networks.parse_prompts(prompts)
+
if p.scripts is not None:
p.scripts.process_batch(p, batch_number=n, prompts=prompts, seeds=seeds, subseeds=subseeds)
- with devices.autocast():
- uc = prompt_parser.get_learned_conditioning(shared.sd_model, negative_prompts, p.steps)
- c = prompt_parser.get_multicond_learned_conditioning(shared.sd_model, prompts, p.steps)
+ uc = get_conds_with_caching(prompt_parser.get_learned_conditioning, negative_prompts, p.steps, cached_uc)
+ c = get_conds_with_caching(prompt_parser.get_multicond_learned_conditioning, prompts, p.steps, cached_c)
if len(model_hijack.comments) > 0:
for comment in model_hijack.comments:
@@ -570,10 +624,13 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
if p.n_iter > 1:
shared.state.job = f"Batch {n+1} out of {p.n_iter}"
- with devices.autocast():
+ with devices.without_autocast() if devices.unet_needs_upcast else devices.autocast():
samples_ddim = p.sample(conditioning=c, unconditional_conditioning=uc, seeds=seeds, subseeds=subseeds, subseed_strength=p.subseed_strength, prompts=prompts)
x_samples_ddim = [decode_first_stage(p.sd_model, samples_ddim[i:i+1].to(dtype=devices.dtype_vae))[0].cpu() for i in range(samples_ddim.size(0))]
+ for x in x_samples_ddim:
+ devices.test_for_nans(x, "vae")
+
x_samples_ddim = torch.stack(x_samples_ddim).float()
x_samples_ddim = torch.clamp((x_samples_ddim + 1.0) / 2.0, min=0.0, max=1.0)
@@ -602,6 +659,11 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
image = Image.fromarray(x_sample)
+ if p.scripts is not None:
+ pp = scripts.PostprocessImageArgs(image)
+ p.scripts.postprocess_image(p, pp)
+ image = pp.image
+
if p.color_corrections is not None and i < len(p.color_corrections):
if opts.save and not p.do_not_save_samples and opts.save_images_before_color_correction:
image_without_cc = apply_overlay(image, p.paste_to, i, p.overlay_images)
@@ -643,9 +705,12 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
if opts.grid_save:
images.save_image(grid, p.outpath_grids, "grid", p.all_seeds[0], p.all_prompts[0], opts.grid_format, info=infotext(), short_filename=not opts.grid_extended_filename, p=p, grid=True)
+ if not p.disable_extra_networks:
+ extra_networks.deactivate(p, extra_network_data)
+
devices.torch_gc()
- res = Processed(p, output_images, p.all_seeds[0], infotext() + "".join(["\n\n" + x for x in comments]), subseed=p.all_subseeds[0], index_of_first_image=index_of_first_image, infotexts=infotexts)
+ res = Processed(p, output_images, p.all_seeds[0], infotext(), comments="".join(["\n\n" + x for x in comments]), subseed=p.all_subseeds[0], index_of_first_image=index_of_first_image, infotexts=infotexts)
if p.scripts is not None:
p.scripts.postprocess(p, res)
@@ -653,79 +718,138 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
return res
+def old_hires_fix_first_pass_dimensions(width, height):
+ """old algorithm for auto-calculating first pass size"""
+
+ desired_pixel_count = 512 * 512
+ actual_pixel_count = width * height
+ scale = math.sqrt(desired_pixel_count / actual_pixel_count)
+ width = math.ceil(scale * width / 64) * 64
+ height = math.ceil(scale * height / 64) * 64
+
+ return width, height
+
+
class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
sampler = None
- def __init__(self, enable_hr: bool=False, denoising_strength: float=0.75, firstphase_width: int=0, firstphase_height: int=0, **kwargs):
+ def __init__(self, enable_hr: bool = False, denoising_strength: float = 0.75, firstphase_width: int = 0, firstphase_height: int = 0, hr_scale: float = 2.0, hr_upscaler: str = None, hr_second_pass_steps: int = 0, hr_resize_x: int = 0, hr_resize_y: int = 0, **kwargs):
super().__init__(**kwargs)
self.enable_hr = enable_hr
self.denoising_strength = denoising_strength
- self.firstphase_width = firstphase_width
- self.firstphase_height = firstphase_height
+ self.hr_scale = hr_scale
+ self.hr_upscaler = hr_upscaler
+ self.hr_second_pass_steps = hr_second_pass_steps
+ self.hr_resize_x = hr_resize_x
+ self.hr_resize_y = hr_resize_y
+ self.hr_upscale_to_x = hr_resize_x
+ self.hr_upscale_to_y = hr_resize_y
+
+ if firstphase_width != 0 or firstphase_height != 0:
+ self.hr_upscale_to_x = self.width
+ self.hr_upscale_to_y = self.height
+ self.width = firstphase_width
+ self.height = firstphase_height
+
self.truncate_x = 0
self.truncate_y = 0
+ self.applied_old_hires_behavior_to = None
def init(self, all_prompts, all_seeds, all_subseeds):
if self.enable_hr:
- if state.job_count == -1:
- state.job_count = self.n_iter * 2
+ if opts.use_old_hires_fix_width_height and self.applied_old_hires_behavior_to != (self.width, self.height):
+ self.hr_resize_x = self.width
+ self.hr_resize_y = self.height
+ self.hr_upscale_to_x = self.width
+ self.hr_upscale_to_y = self.height
+
+ self.width, self.height = old_hires_fix_first_pass_dimensions(self.width, self.height)
+ self.applied_old_hires_behavior_to = (self.width, self.height)
+
+ if self.hr_resize_x == 0 and self.hr_resize_y == 0:
+ self.extra_generation_params["Hires upscale"] = self.hr_scale
+ self.hr_upscale_to_x = int(self.width * self.hr_scale)
+ self.hr_upscale_to_y = int(self.height * self.hr_scale)
else:
- state.job_count = state.job_count * 2
-
- self.extra_generation_params["First pass size"] = f"{self.firstphase_width}x{self.firstphase_height}"
+ self.extra_generation_params["Hires resize"] = f"{self.hr_resize_x}x{self.hr_resize_y}"
+
+ if self.hr_resize_y == 0:
+ self.hr_upscale_to_x = self.hr_resize_x
+ self.hr_upscale_to_y = self.hr_resize_x * self.height // self.width
+ elif self.hr_resize_x == 0:
+ self.hr_upscale_to_x = self.hr_resize_y * self.width // self.height
+ self.hr_upscale_to_y = self.hr_resize_y
+ else:
+ target_w = self.hr_resize_x
+ target_h = self.hr_resize_y
+ src_ratio = self.width / self.height
+ dst_ratio = self.hr_resize_x / self.hr_resize_y
+
+ if src_ratio < dst_ratio:
+ self.hr_upscale_to_x = self.hr_resize_x
+ self.hr_upscale_to_y = self.hr_resize_x * self.height // self.width
+ else:
+ self.hr_upscale_to_x = self.hr_resize_y * self.width // self.height
+ self.hr_upscale_to_y = self.hr_resize_y
+
+ self.truncate_x = (self.hr_upscale_to_x - target_w) // opt_f
+ self.truncate_y = (self.hr_upscale_to_y - target_h) // opt_f
+
+ # special case: the user has chosen to do nothing
+ if self.hr_upscale_to_x == self.width and self.hr_upscale_to_y == self.height:
+ self.enable_hr = False
+ self.denoising_strength = None
+ self.extra_generation_params.pop("Hires upscale", None)
+ self.extra_generation_params.pop("Hires resize", None)
+ return
- if self.firstphase_width == 0 or self.firstphase_height == 0:
- desired_pixel_count = 512 * 512
- actual_pixel_count = self.width * self.height
- scale = math.sqrt(desired_pixel_count / actual_pixel_count)
- self.firstphase_width = math.ceil(scale * self.width / 64) * 64
- self.firstphase_height = math.ceil(scale * self.height / 64) * 64
- firstphase_width_truncated = int(scale * self.width)
- firstphase_height_truncated = int(scale * self.height)
+ if not state.processing_has_refined_job_count:
+ if state.job_count == -1:
+ state.job_count = self.n_iter
- else:
-
- width_ratio = self.width / self.firstphase_width
- height_ratio = self.height / self.firstphase_height
+ shared.total_tqdm.updateTotal((self.steps + (self.hr_second_pass_steps or self.steps)) * state.job_count)
+ state.job_count = state.job_count * 2
+ state.processing_has_refined_job_count = True
- if width_ratio > height_ratio:
- firstphase_width_truncated = self.firstphase_width
- firstphase_height_truncated = self.firstphase_width * self.height / self.width
- else:
- firstphase_width_truncated = self.firstphase_height * self.width / self.height
- firstphase_height_truncated = self.firstphase_height
+ if self.hr_second_pass_steps:
+ self.extra_generation_params["Hires steps"] = self.hr_second_pass_steps
- self.truncate_x = int(self.firstphase_width - firstphase_width_truncated) // opt_f
- self.truncate_y = int(self.firstphase_height - firstphase_height_truncated) // opt_f
+ if self.hr_upscaler is not None:
+ self.extra_generation_params["Hires upscaler"] = self.hr_upscaler
def sample(self, conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength, prompts):
self.sampler = sd_samplers.create_sampler(self.sampler_name, self.sd_model)
+ latent_scale_mode = shared.latent_upscale_modes.get(self.hr_upscaler, None) if self.hr_upscaler is not None else shared.latent_upscale_modes.get(shared.latent_upscale_default_mode, "nearest")
+ if self.enable_hr and latent_scale_mode is None:
+ assert len([x for x in shared.sd_upscalers if x.name == self.hr_upscaler]) > 0, f"could not find upscaler named {self.hr_upscaler}"
+
+ x = create_random_tensors([opt_C, self.height // opt_f, self.width // opt_f], seeds=seeds, subseeds=subseeds, subseed_strength=self.subseed_strength, seed_resize_from_h=self.seed_resize_from_h, seed_resize_from_w=self.seed_resize_from_w, p=self)
+ samples = self.sampler.sample(self, x, conditioning, unconditional_conditioning, image_conditioning=self.txt2img_image_conditioning(x))
+
if not self.enable_hr:
- x = create_random_tensors([opt_C, self.height // opt_f, self.width // opt_f], seeds=seeds, subseeds=subseeds, subseed_strength=self.subseed_strength, seed_resize_from_h=self.seed_resize_from_h, seed_resize_from_w=self.seed_resize_from_w, p=self)
- samples = self.sampler.sample(self, x, conditioning, unconditional_conditioning, image_conditioning=self.txt2img_image_conditioning(x))
return samples
- x = create_random_tensors([opt_C, self.firstphase_height // opt_f, self.firstphase_width // opt_f], seeds=seeds, subseeds=subseeds, subseed_strength=self.subseed_strength, seed_resize_from_h=self.seed_resize_from_h, seed_resize_from_w=self.seed_resize_from_w, p=self)
- samples = self.sampler.sample(self, x, conditioning, unconditional_conditioning, image_conditioning=self.txt2img_image_conditioning(x, self.firstphase_width, self.firstphase_height))
-
- samples = samples[:, :, self.truncate_y//2:samples.shape[2]-self.truncate_y//2, self.truncate_x//2:samples.shape[3]-self.truncate_x//2]
+ target_width = self.hr_upscale_to_x
+ target_height = self.hr_upscale_to_y
- """saves image before applying hires fix, if enabled in options; takes as an argument either an image or batch with latent space images"""
def save_intermediate(image, index):
+ """saves image before applying hires fix, if enabled in options; takes as an argument either an image or batch with latent space images"""
+
if not opts.save or self.do_not_save_samples or not opts.save_images_before_highres_fix:
return
if not isinstance(image, Image.Image):
- image = sd_samplers.sample_to_image(image, index)
+ image = sd_samplers.sample_to_image(image, index, approximation=0)
- images.save_image(image, self.outpath_samples, "", seeds[index], prompts[index], opts.samples_format, suffix="-before-highres-fix")
+ info = create_infotext(self, self.all_prompts, self.all_seeds, self.all_subseeds, [], iteration=self.iteration, position_in_batch=index)
+ images.save_image(image, self.outpath_samples, "", seeds[index], prompts[index], opts.samples_format, info=info, suffix="-before-highres-fix")
- if opts.use_scale_latent_for_hires_fix:
+ if latent_scale_mode is not None:
for i in range(samples.shape[0]):
save_intermediate(samples, i)
- samples = torch.nn.functional.interpolate(samples, size=(self.height // opt_f, self.width // opt_f), mode="bilinear")
+ samples = torch.nn.functional.interpolate(samples, size=(target_height // opt_f, target_width // opt_f), mode=latent_scale_mode["mode"], antialias=latent_scale_mode["antialias"])
# Avoid making the inpainting conditioning unless necessary as
# this does need some extra compute to decode / encode the image again.
@@ -745,7 +869,7 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
save_intermediate(image, i)
- image = images.resize_image(0, image, self.width, self.height)
+ image = images.resize_image(0, image, target_width, target_height, upscaler_name=self.hr_upscaler)
image = np.array(image).astype(np.float32) / 255.0
image = np.moveaxis(image, 2, 0)
batch_images.append(image)
@@ -760,15 +884,18 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
shared.state.nextjob()
- self.sampler = sd_samplers.create_sampler(self.sampler_name, self.sd_model)
+ img2img_sampler_name = self.sampler_name if self.sampler_name != 'PLMS' else 'DDIM' # PLMS does not support img2img so we just silently switch ot DDIM
+ self.sampler = sd_samplers.create_sampler(img2img_sampler_name, self.sd_model)
+
+ samples = samples[:, :, self.truncate_y//2:samples.shape[2]-(self.truncate_y+1)//2, self.truncate_x//2:samples.shape[3]-(self.truncate_x+1)//2]
- noise = create_random_tensors(samples.shape[1:], seeds=seeds, subseeds=subseeds, subseed_strength=subseed_strength, seed_resize_from_h=self.seed_resize_from_h, seed_resize_from_w=self.seed_resize_from_w, p=self)
+ noise = create_random_tensors(samples.shape[1:], seeds=seeds, subseeds=subseeds, subseed_strength=subseed_strength, p=self)
# GC now before running the next img2img to prevent running out of memory
x = None
devices.torch_gc()
- samples = self.sampler.sample_img2img(self, samples, noise, conditioning, unconditional_conditioning, steps=self.steps, image_conditioning=image_conditioning)
+ samples = self.sampler.sample_img2img(self, samples, noise, conditioning, unconditional_conditioning, steps=self.hr_second_pass_steps or self.steps, image_conditioning=image_conditioning)
return samples
@@ -880,7 +1007,7 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing):
image = torch.from_numpy(batch_images)
image = 2. * image - 1.
- image = image.to(shared.device)
+ image = image.to(device=shared.device, dtype=devices.dtype_vae if devices.unet_needs_upcast else None)
self.init_latent = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(image))
diff --git a/modules/progress.py b/modules/progress.py
new file mode 100644
index 00000000..c69ecf3d
--- /dev/null
+++ b/modules/progress.py
@@ -0,0 +1,99 @@
+import base64
+import io
+import time
+
+import gradio as gr
+from pydantic import BaseModel, Field
+
+from modules.shared import opts
+
+import modules.shared as shared
+
+
+current_task = None
+pending_tasks = {}
+finished_tasks = []
+
+
+def start_task(id_task):
+ global current_task
+
+ current_task = id_task
+ pending_tasks.pop(id_task, None)
+
+
+def finish_task(id_task):
+ global current_task
+
+ if current_task == id_task:
+ current_task = None
+
+ finished_tasks.append(id_task)
+ if len(finished_tasks) > 16:
+ finished_tasks.pop(0)
+
+
+def add_task_to_queue(id_job):
+ pending_tasks[id_job] = time.time()
+
+
+class ProgressRequest(BaseModel):
+ id_task: str = Field(default=None, title="Task ID", description="id of the task to get progress for")
+ id_live_preview: int = Field(default=-1, title="Live preview image ID", description="id of last received last preview image")
+
+
+class ProgressResponse(BaseModel):
+ active: bool = Field(title="Whether the task is being worked on right now")
+ queued: bool = Field(title="Whether the task is in queue")
+ completed: bool = Field(title="Whether the task has already finished")
+ progress: float = Field(default=None, title="Progress", description="The progress with a range of 0 to 1")
+ eta: float = Field(default=None, title="ETA in secs")
+ live_preview: str = Field(default=None, title="Live preview image", description="Current live preview; a data: uri")
+ id_live_preview: int = Field(default=None, title="Live preview image ID", description="Send this together with next request to prevent receiving same image")
+ textinfo: str = Field(default=None, title="Info text", description="Info text used by WebUI.")
+
+
+def setup_progress_api(app):
+ return app.add_api_route("/internal/progress", progressapi, methods=["POST"], response_model=ProgressResponse)
+
+
+def progressapi(req: ProgressRequest):
+ active = req.id_task == current_task
+ queued = req.id_task in pending_tasks
+ completed = req.id_task in finished_tasks
+
+ if not active:
+ return ProgressResponse(active=active, queued=queued, completed=completed, id_live_preview=-1, textinfo="In queue..." if queued else "Waiting...")
+
+ progress = 0
+
+ job_count, job_no = shared.state.job_count, shared.state.job_no
+ sampling_steps, sampling_step = shared.state.sampling_steps, shared.state.sampling_step
+
+ if job_count > 0:
+ progress += job_no / job_count
+ if sampling_steps > 0 and job_count > 0:
+ progress += 1 / job_count * sampling_step / sampling_steps
+
+ progress = min(progress, 1)
+
+ elapsed_since_start = time.time() - shared.state.time_start
+ predicted_duration = elapsed_since_start / progress if progress > 0 else None
+ eta = predicted_duration - elapsed_since_start if predicted_duration is not None else None
+
+ id_live_preview = req.id_live_preview
+ shared.state.set_current_image()
+ if opts.live_previews_enable and shared.state.id_live_preview != req.id_live_preview:
+ image = shared.state.current_image
+ if image is not None:
+ buffered = io.BytesIO()
+ image.save(buffered, format="png")
+ live_preview = 'data:image/png;base64,' + base64.b64encode(buffered.getvalue()).decode("ascii")
+ id_live_preview = shared.state.id_live_preview
+ else:
+ live_preview = None
+ else:
+ live_preview = None
+
+ return ProgressResponse(active=active, queued=queued, completed=completed, progress=progress, eta=eta, live_preview=live_preview, id_live_preview=id_live_preview, textinfo=shared.state.textinfo)
+
diff --git a/modules/prompt_parser.py b/modules/prompt_parser.py
index f70872c4..69665372 100644
--- a/modules/prompt_parser.py
+++ b/modules/prompt_parser.py
@@ -49,6 +49,8 @@ def get_learned_conditioning_prompt_schedules(prompts, steps):
[[5, 'a c'], [10, 'a {b|d{ c']]
>>> g("((a][:b:c [d:3]")
[[3, '((a][:b:c '], [10, '((a][:b:c d']]
+ >>> g("[a|(b:1.1)]")
+ [[1, 'a'], [2, '(b:1.1)'], [3, 'a'], [4, '(b:1.1)'], [5, 'a'], [6, '(b:1.1)'], [7, 'a'], [8, '(b:1.1)'], [9, 'a'], [10, '(b:1.1)']]
"""
def collect_steps(steps, tree):
@@ -84,7 +86,7 @@ def get_learned_conditioning_prompt_schedules(prompts, steps):
yield args[0].value
def __default__(self, data, children, meta):
for child in children:
- yield from child
+ yield child
return AtStep().transform(tree)
def get_schedule(prompt):
@@ -272,6 +274,7 @@ re_attention = re.compile(r"""
:
""", re.X)
+re_break = re.compile(r"\s*\bBREAK\b\s*", re.S)
def parse_prompt_attention(text):
"""
@@ -337,7 +340,11 @@ def parse_prompt_attention(text):
elif text == ']' and len(square_brackets) > 0:
multiply_range(square_brackets.pop(), square_bracket_multiplier)
else:
- res.append([text, 1.0])
+ parts = re.split(re_break, text)
+ for i, part in enumerate(parts):
+ if i > 0:
+ res.append(["BREAK", -1])
+ res.append([part, 1.0])
for pos in round_brackets:
multiply_range(pos, round_bracket_multiplier)
diff --git a/modules/realesrgan_model.py b/modules/realesrgan_model.py
index 3ac0b97a..47f70251 100644
--- a/modules/realesrgan_model.py
+++ b/modules/realesrgan_model.py
@@ -38,13 +38,13 @@ class UpscalerRealESRGAN(Upscaler):
return img
info = self.load_model(path)
- if not os.path.exists(info.data_path):
+ if not os.path.exists(info.local_data_path):
print("Unable to load RealESRGAN model: %s" % info.name)
return img
upsampler = RealESRGANer(
scale=info.scale,
- model_path=info.data_path,
+ model_path=info.local_data_path,
model=info.model(),
half=not cmd_opts.no_half,
tile=opts.ESRGAN_tile,
@@ -58,17 +58,13 @@ class UpscalerRealESRGAN(Upscaler):
def load_model(self, path):
try:
- info = None
- for scaler in self.scalers:
- if scaler.data_path == path:
- info = scaler
+ info = next(iter([scaler for scaler in self.scalers if scaler.data_path == path]), None)
if info is None:
print(f"Unable to find model info: {path}")
return None
- model_file = load_file_from_url(url=info.data_path, model_dir=self.model_path, progress=True)
- info.data_path = model_file
+ info.local_data_path = load_file_from_url(url=info.data_path, model_dir=self.model_path, progress=True)
return info
except Exception as e:
print(f"Error making Real-ESRGAN models list: {e}", file=sys.stderr)
diff --git a/modules/script_callbacks.py b/modules/script_callbacks.py
index 8e22f875..4bb45ec7 100644
--- a/modules/script_callbacks.py
+++ b/modules/script_callbacks.py
@@ -2,7 +2,7 @@ import sys
import traceback
from collections import namedtuple
import inspect
-from typing import Optional
+from typing import Optional, Dict, Any
from fastapi import FastAPI
from gradio import Blocks
@@ -51,6 +51,13 @@ class UiTrainTabParams:
self.txt2img_preview_params = txt2img_preview_params
+class ImageGridLoopParams:
+ def __init__(self, imgs, cols, rows):
+ self.imgs = imgs
+ self.cols = cols
+ self.rows = rows
+
+
ScriptCallback = namedtuple("ScriptCallback", ["script", "callback"])
callback_map = dict(
callbacks_app_started=[],
@@ -63,6 +70,10 @@ callback_map = dict(
callbacks_cfg_denoiser=[],
callbacks_before_component=[],
callbacks_after_component=[],
+ callbacks_image_grid=[],
+ callbacks_infotext_pasted=[],
+ callbacks_script_unloaded=[],
+ callbacks_before_ui=[],
)
@@ -155,6 +166,38 @@ def after_component_callback(component, **kwargs):
report_exception(c, 'after_component_callback')
+def image_grid_callback(params: ImageGridLoopParams):
+ for c in callback_map['callbacks_image_grid']:
+ try:
+ c.callback(params)
+ except Exception:
+ report_exception(c, 'image_grid')
+
+
+def infotext_pasted_callback(infotext: str, params: Dict[str, Any]):
+ for c in callback_map['callbacks_infotext_pasted']:
+ try:
+ c.callback(infotext, params)
+ except Exception:
+ report_exception(c, 'infotext_pasted')
+
+
+def script_unloaded_callback():
+ for c in reversed(callback_map['callbacks_script_unloaded']):
+ try:
+ c.callback()
+ except Exception:
+ report_exception(c, 'script_unloaded')
+
+
+def before_ui_callback():
+ for c in reversed(callback_map['callbacks_before_ui']):
+ try:
+ c.callback()
+ except Exception:
+ report_exception(c, 'before_ui')
+
+
def add_callback(callbacks, fun):
stack = [x for x in inspect.stack() if x.filename != __file__]
filename = stack[0].filename if len(stack) > 0 else 'unknown file'
@@ -186,7 +229,7 @@ def on_app_started(callback):
def on_model_loaded(callback):
"""register a function to be called when the stable diffusion model is created; the model is
- passed as an argument"""
+ passed as an argument; this function is also called when the script is reloaded. """
add_callback(callback_map['callbacks_model_loaded'], callback)
@@ -255,3 +298,33 @@ def on_before_component(callback):
def on_after_component(callback):
"""register a function to be called after a component is created. See on_before_component for more."""
add_callback(callback_map['callbacks_after_component'], callback)
+
+
+def on_image_grid(callback):
+ """register a function to be called before making an image grid.
+ The callback is called with one argument:
+ - params: ImageGridLoopParams - parameters to be used for grid creation. Can be modified.
+ """
+ add_callback(callback_map['callbacks_image_grid'], callback)
+
+
+def on_infotext_pasted(callback):
+ """register a function to be called before applying an infotext.
+ The callback is called with two arguments:
+ - infotext: str - raw infotext.
+ - result: Dict[str, any] - parsed infotext parameters.
+ """
+ add_callback(callback_map['callbacks_infotext_pasted'], callback)
+
+
+def on_script_unloaded(callback):
+ """register a function to be called before the script is unloaded. Any hooks/hijacks/monkeying about that
+ the script did should be reverted here"""
+
+ add_callback(callback_map['callbacks_script_unloaded'], callback)
+
+
+def on_before_ui(callback):
+ """register a function to be called before the UI is created."""
+
+ add_callback(callback_map['callbacks_before_ui'], callback)
diff --git a/modules/script_loading.py b/modules/script_loading.py
index f93f0951..a7d2203f 100644
--- a/modules/script_loading.py
+++ b/modules/script_loading.py
@@ -1,16 +1,14 @@
import os
import sys
import traceback
+import importlib.util
from types import ModuleType
def load_module(path):
- with open(path, "r", encoding="utf8") as file:
- text = file.read()
-
- compiled = compile(text, path, 'exec')
- module = ModuleType(os.path.basename(path))
- exec(compiled, module.__dict__)
+ module_spec = importlib.util.spec_from_file_location(os.path.basename(path), path)
+ module = importlib.util.module_from_spec(module_spec)
+ module_spec.loader.exec_module(module)
return module
diff --git a/modules/scripts.py b/modules/scripts.py
index 722f8685..6e9dc0c0 100644
--- a/modules/scripts.py
+++ b/modules/scripts.py
@@ -1,16 +1,21 @@
import os
+import re
import sys
import traceback
from collections import namedtuple
import gradio as gr
-from modules.processing import StableDiffusionProcessing
-from modules import shared, paths, script_callbacks, extensions, script_loading
+from modules import shared, paths, script_callbacks, extensions, script_loading, scripts_postprocessing
AlwaysVisible = object()
+class PostprocessImageArgs:
+ def __init__(self, image):
+ self.image = image
+
+
class Script:
filename = None
args_from = None
@@ -64,7 +69,7 @@ class Script:
args contains all values returned by components from ui()
"""
- raise NotImplementedError()
+ pass
def process(self, p, *args):
"""
@@ -99,6 +104,13 @@ class Script:
pass
+ def postprocess_image(self, p, pp: PostprocessImageArgs, *args):
+ """
+ Called for every image after it has been generated.
+ """
+
+ pass
+
def postprocess(self, p, processed, *args):
"""
This function is called after processing ends for AlwaysVisible scripts.
@@ -128,6 +140,15 @@ class Script:
"""unused"""
return ""
+ def elem_id(self, item_id):
+ """helper function to generate id for a HTML element, constructs final id out of script name, tab and user-supplied item_id"""
+
+ need_tabname = self.show(True) == self.show(False)
+ tabname = ('img2img' if self.is_img2img else 'txt2txt') + "_" if need_tabname else ""
+ title = re.sub(r'[^a-z_0-9]', '', re.sub(r'\s', '_', self.title().lower()))
+
+ return f'script_{tabname}{title}_{item_id}'
+
current_basedir = paths.script_path
@@ -140,9 +161,11 @@ def basedir():
return current_basedir
-scripts_data = []
ScriptFile = namedtuple("ScriptFile", ["basedir", "filename", "path"])
-ScriptClassData = namedtuple("ScriptClassData", ["script_class", "path", "basedir"])
+
+scripts_data = []
+postprocessing_scripts_data = []
+ScriptClassData = namedtuple("ScriptClassData", ["script_class", "path", "basedir", "module"])
def list_scripts(scriptdirname, extension):
@@ -180,23 +203,31 @@ def list_files_with_name(filename):
def load_scripts():
global current_basedir
scripts_data.clear()
+ postprocessing_scripts_data.clear()
script_callbacks.clear_callbacks()
scripts_list = list_scripts("scripts", ".py")
syspath = sys.path
+ def register_scripts_from_module(module):
+ for key, script_class in module.__dict__.items():
+ if type(script_class) != type:
+ continue
+
+ if issubclass(script_class, Script):
+ scripts_data.append(ScriptClassData(script_class, scriptfile.path, scriptfile.basedir, module))
+ elif issubclass(script_class, scripts_postprocessing.ScriptPostprocessing):
+ postprocessing_scripts_data.append(ScriptClassData(script_class, scriptfile.path, scriptfile.basedir, module))
+
for scriptfile in sorted(scripts_list):
try:
if scriptfile.basedir != paths.script_path:
sys.path = [scriptfile.basedir] + sys.path
current_basedir = scriptfile.basedir
- module = script_loading.load_module(scriptfile.path)
-
- for key, script_class in module.__dict__.items():
- if type(script_class) == type and issubclass(script_class, Script):
- scripts_data.append(ScriptClassData(script_class, scriptfile.path, scriptfile.basedir))
+ script_module = script_loading.load_module(scriptfile.path)
+ register_scripts_from_module(script_module)
except Exception:
print(f"Error loading script: {scriptfile.filename}", file=sys.stderr)
@@ -227,11 +258,15 @@ class ScriptRunner:
self.infotext_fields = []
def initialize_scripts(self, is_img2img):
+ from modules import scripts_auto_postprocessing
+
self.scripts.clear()
self.alwayson_scripts.clear()
self.selectable_scripts.clear()
- for script_class, path, basedir in scripts_data:
+ auto_processing_scripts = scripts_auto_postprocessing.create_auto_preprocessing_script_data()
+
+ for script_class, path, basedir, script_module in auto_processing_scripts + scripts_data:
script = script_class()
script.filename = path
script.is_txt2img = not is_img2img
@@ -280,7 +315,6 @@ class ScriptRunner:
script.group = group
dropdown = gr.Dropdown(label="Script", elem_id="script_list", choices=["None"] + self.titles, value="None", type="index")
- dropdown.save_to_config = True
inputs[0] = dropdown
for script in self.selectable_scripts:
@@ -313,7 +347,7 @@ class ScriptRunner:
return inputs
- def run(self, p: StableDiffusionProcessing, *args):
+ def run(self, p, *args):
script_index = args[0]
if script_index == 0:
@@ -367,6 +401,15 @@ class ScriptRunner:
print(f"Error running postprocess_batch: {script.filename}", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
+ def postprocess_image(self, p, pp: PostprocessImageArgs):
+ for script in self.alwayson_scripts:
+ try:
+ script_args = p.script_args[script.args_from:script.args_to]
+ script.postprocess_image(p, pp, *script_args)
+ except Exception:
+ print(f"Error running postprocess_batch: {script.filename}", file=sys.stderr)
+ print(traceback.format_exc(), file=sys.stderr)
+
def before_component(self, component, **kwargs):
for script in self.scripts:
try:
@@ -404,6 +447,7 @@ class ScriptRunner:
scripts_txt2img = ScriptRunner()
scripts_img2img = ScriptRunner()
+scripts_postproc = scripts_postprocessing.ScriptPostprocessingRunner()
scripts_current: ScriptRunner = None
@@ -414,12 +458,13 @@ def reload_script_body_only():
def reload_scripts():
- global scripts_txt2img, scripts_img2img
+ global scripts_txt2img, scripts_img2img, scripts_postproc
load_scripts()
scripts_txt2img = ScriptRunner()
scripts_img2img = ScriptRunner()
+ scripts_postproc = scripts_postprocessing.ScriptPostprocessingRunner()
def IOComponent_init(self, *args, **kwargs):
diff --git a/modules/scripts_auto_postprocessing.py b/modules/scripts_auto_postprocessing.py
new file mode 100644
index 00000000..30d6d658
--- /dev/null
+++ b/modules/scripts_auto_postprocessing.py
@@ -0,0 +1,42 @@
+from modules import scripts, scripts_postprocessing, shared
+
+
+class ScriptPostprocessingForMainUI(scripts.Script):
+ def __init__(self, script_postproc):
+ self.script: scripts_postprocessing.ScriptPostprocessing = script_postproc
+ self.postprocessing_controls = None
+
+ def title(self):
+ return self.script.name
+
+ def show(self, is_img2img):
+ return scripts.AlwaysVisible
+
+ def ui(self, is_img2img):
+ self.postprocessing_controls = self.script.ui()
+ return self.postprocessing_controls.values()
+
+ def postprocess_image(self, p, script_pp, *args):
+ args_dict = {k: v for k, v in zip(self.postprocessing_controls, args)}
+
+ pp = scripts_postprocessing.PostprocessedImage(script_pp.image)
+ pp.info = {}
+ self.script.process(pp, **args_dict)
+ p.extra_generation_params.update(pp.info)
+ script_pp.image = pp.image
+
+
+def create_auto_preprocessing_script_data():
+ from modules import scripts
+
+ res = []
+
+ for name in shared.opts.postprocessing_enable_in_main_ui:
+ script = next(iter([x for x in scripts.postprocessing_scripts_data if x.script_class.name == name]), None)
+ if script is None:
+ continue
+
+ constructor = lambda s=script: ScriptPostprocessingForMainUI(s.script_class())
+ res.append(scripts.ScriptClassData(script_class=constructor, path=script.path, basedir=script.basedir, module=script.module))
+
+ return res
diff --git a/modules/scripts_postprocessing.py b/modules/scripts_postprocessing.py
new file mode 100644
index 00000000..ce0ebb61
--- /dev/null
+++ b/modules/scripts_postprocessing.py
@@ -0,0 +1,152 @@
+import os
+import gradio as gr
+
+from modules import errors, shared
+
+
+class PostprocessedImage:
+ def __init__(self, image):
+ self.image = image
+ self.info = {}
+
+
+class ScriptPostprocessing:
+ filename = None
+ controls = None
+ args_from = None
+ args_to = None
+
+ order = 1000
+ """scripts will be ordred by this value in postprocessing UI"""
+
+ name = None
+ """this function should return the title of the script."""
+
+ group = None
+ """A gr.Group component that has all script's UI inside it"""
+
+ def ui(self):
+ """
+ This function should create gradio UI elements. See https://gradio.app/docs/#components
+ The return value should be a dictionary that maps parameter names to components used in processing.
+ Values of those components will be passed to process() function.
+ """
+
+ pass
+
+ def process(self, pp: PostprocessedImage, **args):
+ """
+ This function is called to postprocess the image.
+ args contains a dictionary with all values returned by components from ui()
+ """
+
+ pass
+
+ def image_changed(self):
+ pass
+
+
+
+
+def wrap_call(func, filename, funcname, *args, default=None, **kwargs):
+ try:
+ res = func(*args, **kwargs)
+ return res
+ except Exception as e:
+ errors.display(e, f"calling {filename}/{funcname}")
+
+ return default
+
+
+class ScriptPostprocessingRunner:
+ def __init__(self):
+ self.scripts = None
+ self.ui_created = False
+
+ def initialize_scripts(self, scripts_data):
+ self.scripts = []
+
+ for script_class, path, basedir, script_module in scripts_data:
+ script: ScriptPostprocessing = script_class()
+ script.filename = path
+
+ if script.name == "Simple Upscale":
+ continue
+
+ self.scripts.append(script)
+
+ def create_script_ui(self, script, inputs):
+ script.args_from = len(inputs)
+ script.args_to = len(inputs)
+
+ script.controls = wrap_call(script.ui, script.filename, "ui")
+
+ for control in script.controls.values():
+ control.custom_script_source = os.path.basename(script.filename)
+
+ inputs += list(script.controls.values())
+ script.args_to = len(inputs)
+
+ def scripts_in_preferred_order(self):
+ if self.scripts is None:
+ import modules.scripts
+ self.initialize_scripts(modules.scripts.postprocessing_scripts_data)
+
+ scripts_order = shared.opts.postprocessing_operation_order
+
+ def script_score(name):
+ for i, possible_match in enumerate(scripts_order):
+ if possible_match == name:
+ return i
+
+ return len(self.scripts)
+
+ script_scores = {script.name: (script_score(script.name), script.order, script.name, original_index) for original_index, script in enumerate(self.scripts)}
+
+ return sorted(self.scripts, key=lambda x: script_scores[x.name])
+
+ def setup_ui(self):
+ inputs = []
+
+ for script in self.scripts_in_preferred_order():
+ with gr.Box() as group:
+ self.create_script_ui(script, inputs)
+
+ script.group = group
+
+ self.ui_created = True
+ return inputs
+
+ def run(self, pp: PostprocessedImage, args):
+ for script in self.scripts_in_preferred_order():
+ shared.state.job = script.name
+
+ script_args = args[script.args_from:script.args_to]
+
+ process_args = {}
+ for (name, component), value in zip(script.controls.items(), script_args):
+ process_args[name] = value
+
+ script.process(pp, **process_args)
+
+ def create_args_for_run(self, scripts_args):
+ if not self.ui_created:
+ with gr.Blocks(analytics_enabled=False):
+ self.setup_ui()
+
+ scripts = self.scripts_in_preferred_order()
+ args = [None] * max([x.args_to for x in scripts])
+
+ for script in scripts:
+ script_args_dict = scripts_args.get(script.name, None)
+ if script_args_dict is not None:
+
+ for i, name in enumerate(script.controls):
+ args[script.args_from + i] = script_args_dict.get(name, None)
+
+ return args
+
+ def image_changed(self):
+ for script in self.scripts_in_preferred_order():
+ script.image_changed()
+
diff --git a/modules/sd_disable_initialization.py b/modules/sd_disable_initialization.py
new file mode 100644
index 00000000..e90aa9fe
--- /dev/null
+++ b/modules/sd_disable_initialization.py
@@ -0,0 +1,90 @@
+import ldm.modules.encoders.modules
+import open_clip
+import torch
+import transformers.utils.hub
+
+
+class DisableInitialization:
+ """
+ When an object of this class enters a `with` block, it starts:
+ - preventing torch's layer initialization functions from working
+ - changes CLIP and OpenCLIP to not download model weights
+ - changes CLIP to not make requests to check if there is a new version of a file you already have
+
+ When it leaves the block, it reverts everything to how it was before.
+
+ Use it like this:
+ ```
+ with DisableInitialization():
+ do_things()
+ ```
+ """
+
+ def __init__(self):
+ self.replaced = []
+
+ def replace(self, obj, field, func):
+ original = getattr(obj, field, None)
+ if original is None:
+ return None
+
+ self.replaced.append((obj, field, original))
+ setattr(obj, field, func)
+
+ return original
+
+ def __enter__(self):
+ def do_nothing(*args, **kwargs):
+ pass
+
+ def create_model_and_transforms_without_pretrained(*args, pretrained=None, **kwargs):
+ return self.create_model_and_transforms(*args, pretrained=None, **kwargs)
+
+ def CLIPTextModel_from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs):
+ res = self.CLIPTextModel_from_pretrained(None, *model_args, config=pretrained_model_name_or_path, state_dict={}, **kwargs)
+ res.name_or_path = pretrained_model_name_or_path
+ return res
+
+ def transformers_modeling_utils_load_pretrained_model(*args, **kwargs):
+ args = args[0:3] + ('/', ) + args[4:] # resolved_archive_file; must set it to something to prevent what seems to be a bug
+ return self.transformers_modeling_utils_load_pretrained_model(*args, **kwargs)
+
+ def transformers_utils_hub_get_file_from_cache(original, url, *args, **kwargs):
+
+ # this file is always 404, prevent making request
+ if url == 'https://huggingface.co/openai/clip-vit-large-patch14/resolve/main/added_tokens.json' or url == 'openai/clip-vit-large-patch14' and args[0] == 'added_tokens.json':
+ return None
+
+ try:
+ res = original(url, *args, local_files_only=True, **kwargs)
+ if res is None:
+ res = original(url, *args, local_files_only=False, **kwargs)
+ return res
+ except Exception as e:
+ return original(url, *args, local_files_only=False, **kwargs)
+
+ def transformers_utils_hub_get_from_cache(url, *args, local_files_only=False, **kwargs):
+ return transformers_utils_hub_get_file_from_cache(self.transformers_utils_hub_get_from_cache, url, *args, **kwargs)
+
+ def transformers_tokenization_utils_base_cached_file(url, *args, local_files_only=False, **kwargs):
+ return transformers_utils_hub_get_file_from_cache(self.transformers_tokenization_utils_base_cached_file, url, *args, **kwargs)
+
+ def transformers_configuration_utils_cached_file(url, *args, local_files_only=False, **kwargs):
+ return transformers_utils_hub_get_file_from_cache(self.transformers_configuration_utils_cached_file, url, *args, **kwargs)
+
+ self.replace(torch.nn.init, 'kaiming_uniform_', do_nothing)
+ self.replace(torch.nn.init, '_no_grad_normal_', do_nothing)
+ self.replace(torch.nn.init, '_no_grad_uniform_', do_nothing)
+ self.create_model_and_transforms = self.replace(open_clip, 'create_model_and_transforms', create_model_and_transforms_without_pretrained)
+ self.CLIPTextModel_from_pretrained = self.replace(ldm.modules.encoders.modules.CLIPTextModel, 'from_pretrained', CLIPTextModel_from_pretrained)
+ self.transformers_modeling_utils_load_pretrained_model = self.replace(transformers.modeling_utils.PreTrainedModel, '_load_pretrained_model', transformers_modeling_utils_load_pretrained_model)
+ self.transformers_tokenization_utils_base_cached_file = self.replace(transformers.tokenization_utils_base, 'cached_file', transformers_tokenization_utils_base_cached_file)
+ self.transformers_configuration_utils_cached_file = self.replace(transformers.configuration_utils, 'cached_file', transformers_configuration_utils_cached_file)
+ self.transformers_utils_hub_get_from_cache = self.replace(transformers.utils.hub, 'get_from_cache', transformers_utils_hub_get_from_cache)
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ for obj, field, original in self.replaced:
+ setattr(obj, field, original)
+
+ self.replaced.clear()
+
diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py
index 690a9ec2..f9652d21 100644
--- a/modules/sd_hijack.py
+++ b/modules/sd_hijack.py
@@ -5,9 +5,7 @@ import modules.textual_inversion.textual_inversion
from modules import devices, sd_hijack_optimizations, shared, sd_hijack_checkpoint
from modules.hypernetworks import hypernetwork
from modules.shared import cmd_opts
-from modules import sd_hijack_clip, sd_hijack_open_clip, sd_hijack_unet
-
-from modules.sd_hijack_optimizations import invokeAI_mps_available
+from modules import sd_hijack_clip, sd_hijack_open_clip, sd_hijack_unet, sd_hijack_xlmr, xlmr
import ldm.modules.attention
import ldm.modules.diffusionmodules.model
@@ -35,26 +33,34 @@ def apply_optimizations():
ldm.modules.diffusionmodules.model.nonlinearity = silu
ldm.modules.diffusionmodules.openaimodel.th = sd_hijack_unet.th
+
+ optimization_method = None
if cmd_opts.force_enable_xformers or (cmd_opts.xformers and shared.xformers_available and torch.version.cuda and (6, 0) <= torch.cuda.get_device_capability(shared.device) <= (9, 0)):
print("Applying xformers cross attention optimization.")
ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.xformers_attention_forward
ldm.modules.diffusionmodules.model.AttnBlock.forward = sd_hijack_optimizations.xformers_attnblock_forward
+ optimization_method = 'xformers'
+ elif cmd_opts.opt_sub_quad_attention:
+ print("Applying sub-quadratic cross attention optimization.")
+ ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.sub_quad_attention_forward
+ ldm.modules.diffusionmodules.model.AttnBlock.forward = sd_hijack_optimizations.sub_quad_attnblock_forward
+ optimization_method = 'sub-quadratic'
elif cmd_opts.opt_split_attention_v1:
print("Applying v1 cross attention optimization.")
ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.split_cross_attention_forward_v1
- elif not cmd_opts.disable_opt_split_attention and (cmd_opts.opt_split_attention_invokeai or not torch.cuda.is_available()):
- if not invokeAI_mps_available and shared.device.type == 'mps':
- print("The InvokeAI cross attention optimization for MPS requires the psutil package which is not installed.")
- print("Applying v1 cross attention optimization.")
- ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.split_cross_attention_forward_v1
- else:
- print("Applying cross attention optimization (InvokeAI).")
- ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.split_cross_attention_forward_invokeAI
+ optimization_method = 'V1'
+ elif not cmd_opts.disable_opt_split_attention and (cmd_opts.opt_split_attention_invokeai or not cmd_opts.opt_split_attention and not torch.cuda.is_available()):
+ print("Applying cross attention optimization (InvokeAI).")
+ ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.split_cross_attention_forward_invokeAI
+ optimization_method = 'InvokeAI'
elif not cmd_opts.disable_opt_split_attention and (cmd_opts.opt_split_attention or torch.cuda.is_available()):
print("Applying cross attention optimization (Doggettx).")
ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.split_cross_attention_forward
ldm.modules.diffusionmodules.model.AttnBlock.forward = sd_hijack_optimizations.cross_attention_attnblock_forward
+ optimization_method = 'Doggettx'
+
+ return optimization_method
def undo_optimizations():
@@ -64,9 +70,11 @@ def undo_optimizations():
def fix_checkpoint():
- ldm.modules.attention.BasicTransformerBlock.forward = sd_hijack_checkpoint.BasicTransformerBlock_forward
- ldm.modules.diffusionmodules.openaimodel.ResBlock.forward = sd_hijack_checkpoint.ResBlock_forward
- ldm.modules.diffusionmodules.openaimodel.AttentionBlock.forward = sd_hijack_checkpoint.AttentionBlock_forward
+ """checkpoints are now added and removed in embedding/hypernet code, since torch doesn't want
+ checkpoints to be added when not training (there's a warning)"""
+
+ pass
+
class StableDiffusionModelHijack:
fixes = None
@@ -74,22 +82,31 @@ class StableDiffusionModelHijack:
layers = None
circular_enabled = False
clip = None
+ optimization_method = None
+
+ embedding_db = modules.textual_inversion.textual_inversion.EmbeddingDatabase()
- embedding_db = modules.textual_inversion.textual_inversion.EmbeddingDatabase(cmd_opts.embeddings_dir)
+ def __init__(self):
+ self.embedding_db.add_embedding_dir(cmd_opts.embeddings_dir)
def hijack(self, m):
- if type(m.cond_stage_model) == ldm.modules.encoders.modules.FrozenCLIPEmbedder:
+ if type(m.cond_stage_model) == xlmr.BertSeriesModelWithTransformation:
+ model_embeddings = m.cond_stage_model.roberta.embeddings
+ model_embeddings.token_embedding = EmbeddingsWithFixes(model_embeddings.word_embeddings, self)
+ m.cond_stage_model = sd_hijack_xlmr.FrozenXLMREmbedderWithCustomWords(m.cond_stage_model, self)
+
+ elif type(m.cond_stage_model) == ldm.modules.encoders.modules.FrozenCLIPEmbedder:
model_embeddings = m.cond_stage_model.transformer.text_model.embeddings
model_embeddings.token_embedding = EmbeddingsWithFixes(model_embeddings.token_embedding, self)
m.cond_stage_model = sd_hijack_clip.FrozenCLIPEmbedderWithCustomWords(m.cond_stage_model, self)
+
elif type(m.cond_stage_model) == ldm.modules.encoders.modules.FrozenOpenCLIPEmbedder:
m.cond_stage_model.model.token_embedding = EmbeddingsWithFixes(m.cond_stage_model.model.token_embedding, self)
m.cond_stage_model = sd_hijack_open_clip.FrozenOpenCLIPEmbedderWithCustomWords(m.cond_stage_model, self)
- self.clip = m.cond_stage_model
+ self.optimization_method = apply_optimizations()
- apply_optimizations()
- fix_checkpoint()
+ self.clip = m.cond_stage_model
def flatten(el):
flattened = [flatten(children) for children in el.children()]
@@ -101,7 +118,10 @@ class StableDiffusionModelHijack:
self.layers = flatten(m)
def undo_hijack(self, m):
- if type(m.cond_stage_model) == sd_hijack_clip.FrozenCLIPEmbedderWithCustomWords:
+ if type(m.cond_stage_model) == xlmr.BertSeriesModelWithTransformation:
+ m.cond_stage_model = m.cond_stage_model.wrapped
+
+ elif type(m.cond_stage_model) == sd_hijack_clip.FrozenCLIPEmbedderWithCustomWords:
m.cond_stage_model = m.cond_stage_model.wrapped
model_embeddings = m.cond_stage_model.transformer.text_model.embeddings
@@ -127,10 +147,10 @@ class StableDiffusionModelHijack:
def clear_comments(self):
self.comments = []
- def tokenize(self, text):
- _, remade_batch_tokens, _, _, _, token_count = self.clip.process_text([text])
- return remade_batch_tokens[0], token_count, sd_hijack_clip.get_target_prompt_token_count(token_count)
+ def get_prompt_lengths(self, text):
+ _, token_count = self.clip.process_texts([text])
+ return token_count, self.clip.get_target_prompt_token_count(token_count)
class EmbeddingsWithFixes(torch.nn.Module):
diff --git a/modules/sd_hijack_checkpoint.py b/modules/sd_hijack_checkpoint.py
index 5712972f..2604d969 100644
--- a/modules/sd_hijack_checkpoint.py
+++ b/modules/sd_hijack_checkpoint.py
@@ -1,10 +1,46 @@
from torch.utils.checkpoint import checkpoint
+import ldm.modules.attention
+import ldm.modules.diffusionmodules.openaimodel
+
+
def BasicTransformerBlock_forward(self, x, context=None):
return checkpoint(self._forward, x, context)
+
def AttentionBlock_forward(self, x):
return checkpoint(self._forward, x)
+
def ResBlock_forward(self, x, emb):
- return checkpoint(self._forward, x, emb) \ No newline at end of file
+ return checkpoint(self._forward, x, emb)
+
+
+stored = []
+
+
+def add():
+ if len(stored) != 0:
+ return
+
+ stored.extend([
+ ldm.modules.attention.BasicTransformerBlock.forward,
+ ldm.modules.diffusionmodules.openaimodel.ResBlock.forward,
+ ldm.modules.diffusionmodules.openaimodel.AttentionBlock.forward
+ ])
+
+ ldm.modules.attention.BasicTransformerBlock.forward = BasicTransformerBlock_forward
+ ldm.modules.diffusionmodules.openaimodel.ResBlock.forward = ResBlock_forward
+ ldm.modules.diffusionmodules.openaimodel.AttentionBlock.forward = AttentionBlock_forward
+
+
+def remove():
+ if len(stored) == 0:
+ return
+
+ ldm.modules.attention.BasicTransformerBlock.forward = stored[0]
+ ldm.modules.diffusionmodules.openaimodel.ResBlock.forward = stored[1]
+ ldm.modules.diffusionmodules.openaimodel.AttentionBlock.forward = stored[2]
+
+ stored.clear()
+
diff --git a/modules/sd_hijack_clip.py b/modules/sd_hijack_clip.py
index b451d1cf..9fa5c5c5 100644
--- a/modules/sd_hijack_clip.py
+++ b/modules/sd_hijack_clip.py
@@ -1,31 +1,89 @@
import math
+from collections import namedtuple
import torch
-from modules import prompt_parser, devices
+from modules import prompt_parser, devices, sd_hijack
from modules.shared import opts
-def get_target_prompt_token_count(token_count):
- return math.ceil(max(token_count, 1) / 75) * 75
+class PromptChunk:
+ """
+ This object contains token ids, weight (multipliers:1.4) and textual inversion embedding info for a chunk of prompt.
+ If a prompt is short, it is represented by one PromptChunk, otherwise, multiple are necessary.
+ Each PromptChunk contains an exact amount of tokens - 77, which includes one for start and end token,
+ so just 75 tokens from prompt.
+ """
+
+ def __init__(self):
+ self.tokens = []
+ self.multipliers = []
+ self.fixes = []
+
+
+PromptChunkFix = namedtuple('PromptChunkFix', ['offset', 'embedding'])
+"""An object of this type is a marker showing that textual inversion embedding's vectors have to placed at offset in the prompt
+chunk. Thos objects are found in PromptChunk.fixes and, are placed into FrozenCLIPEmbedderWithCustomWordsBase.hijack.fixes, and finally
+are applied by sd_hijack.EmbeddingsWithFixes's forward function."""
class FrozenCLIPEmbedderWithCustomWordsBase(torch.nn.Module):
+ """A pytorch module that is a wrapper for FrozenCLIPEmbedder module. it enhances FrozenCLIPEmbedder, making it possible to
+ have unlimited prompt length and assign weights to tokens in prompt.
+ """
+
def __init__(self, wrapped, hijack):
super().__init__()
+
self.wrapped = wrapped
- self.hijack = hijack
+ """Original FrozenCLIPEmbedder module; can also be FrozenOpenCLIPEmbedder or xlmr.BertSeriesModelWithTransformation,
+ depending on model."""
+
+ self.hijack: sd_hijack.StableDiffusionModelHijack = hijack
+ self.chunk_length = 75
+
+ def empty_chunk(self):
+ """creates an empty PromptChunk and returns it"""
+
+ chunk = PromptChunk()
+ chunk.tokens = [self.id_start] + [self.id_end] * (self.chunk_length + 1)
+ chunk.multipliers = [1.0] * (self.chunk_length + 2)
+ return chunk
+
+ def get_target_prompt_token_count(self, token_count):
+ """returns the maximum number of tokens a prompt of a known length can have before it requires one more PromptChunk to be represented"""
+
+ return math.ceil(max(token_count, 1) / self.chunk_length) * self.chunk_length
def tokenize(self, texts):
+ """Converts a batch of texts into a batch of token ids"""
+
raise NotImplementedError
def encode_with_transformers(self, tokens):
+ """
+ converts a batch of token ids (in python lists) into a single tensor with numeric respresentation of those tokens;
+ All python lists with tokens are assumed to have same length, usually 77.
+ if input is a list with B elements and each element has T tokens, expected output shape is (B, T, C), where C depends on
+ model - can be 768 and 1024.
+ Among other things, this call will read self.hijack.fixes, apply it to its inputs, and clear it (setting it to None).
+ """
+
raise NotImplementedError
def encode_embedding_init_text(self, init_text, nvpt):
+ """Converts text into a tensor with this text's tokens' embeddings. Note that those are embeddings before they are passed through
+ transformers. nvpt is used as a maximum length in tokens. If text produces less teokens than nvpt, only this many is returned."""
+
raise NotImplementedError
- def tokenize_line(self, line, used_custom_terms, hijack_comments):
+ def tokenize_line(self, line):
+ """
+ this transforms a single prompt into a list of PromptChunk objects - as many as needed to
+ represent the prompt.
+ Returns the list and the total number of tokens in the prompt.
+ """
+
if opts.enable_emphasis:
parsed = prompt_parser.parse_prompt_attention(line)
else:
@@ -33,205 +91,161 @@ class FrozenCLIPEmbedderWithCustomWordsBase(torch.nn.Module):
tokenized = self.tokenize([text for text, _ in parsed])
- fixes = []
- remade_tokens = []
- multipliers = []
+ chunks = []
+ chunk = PromptChunk()
+ token_count = 0
last_comma = -1
+ def next_chunk(is_last=False):
+ """puts current chunk into the list of results and produces the next one - empty;
+ if is_last is true, tokens <end-of-text> tokens at the end won't add to token_count"""
+ nonlocal token_count
+ nonlocal last_comma
+ nonlocal chunk
+
+ if is_last:
+ token_count += len(chunk.tokens)
+ else:
+ token_count += self.chunk_length
+
+ to_add = self.chunk_length - len(chunk.tokens)
+ if to_add > 0:
+ chunk.tokens += [self.id_end] * to_add
+ chunk.multipliers += [1.0] * to_add
+
+ chunk.tokens = [self.id_start] + chunk.tokens + [self.id_end]
+ chunk.multipliers = [1.0] + chunk.multipliers + [1.0]
+
+ last_comma = -1
+ chunks.append(chunk)
+ chunk = PromptChunk()
+
for tokens, (text, weight) in zip(tokenized, parsed):
- i = 0
- while i < len(tokens):
- token = tokens[i]
+ if text == 'BREAK' and weight == -1:
+ next_chunk()
+ continue
- embedding, embedding_length_in_tokens = self.hijack.embedding_db.find_embedding_at_position(tokens, i)
+ position = 0
+ while position < len(tokens):
+ token = tokens[position]
if token == self.comma_token:
- last_comma = len(remade_tokens)
- elif opts.comma_padding_backtrack != 0 and max(len(remade_tokens), 1) % 75 == 0 and last_comma != -1 and len(remade_tokens) - last_comma <= opts.comma_padding_backtrack:
- last_comma += 1
- reloc_tokens = remade_tokens[last_comma:]
- reloc_mults = multipliers[last_comma:]
+ last_comma = len(chunk.tokens)
+
+ # this is when we are at the end of alloted 75 tokens for the current chunk, and the current token is not a comma. opts.comma_padding_backtrack
+ # is a setting that specifies that if there is a comma nearby, the text after the comma should be moved out of this chunk and into the next.
+ elif opts.comma_padding_backtrack != 0 and len(chunk.tokens) == self.chunk_length and last_comma != -1 and len(chunk.tokens) - last_comma <= opts.comma_padding_backtrack:
+ break_location = last_comma + 1
+
+ reloc_tokens = chunk.tokens[break_location:]
+ reloc_mults = chunk.multipliers[break_location:]
- remade_tokens = remade_tokens[:last_comma]
- length = len(remade_tokens)
+ chunk.tokens = chunk.tokens[:break_location]
+ chunk.multipliers = chunk.multipliers[:break_location]
- rem = int(math.ceil(length / 75)) * 75 - length
- remade_tokens += [self.id_end] * rem + reloc_tokens
- multipliers = multipliers[:last_comma] + [1.0] * rem + reloc_mults
+ next_chunk()
+ chunk.tokens = reloc_tokens
+ chunk.multipliers = reloc_mults
+ if len(chunk.tokens) == self.chunk_length:
+ next_chunk()
+
+ embedding, embedding_length_in_tokens = self.hijack.embedding_db.find_embedding_at_position(tokens, position)
if embedding is None:
- remade_tokens.append(token)
- multipliers.append(weight)
- i += 1
- else:
- emb_len = int(embedding.vec.shape[0])
- iteration = len(remade_tokens) // 75
- if (len(remade_tokens) + emb_len) // 75 != iteration:
- rem = (75 * (iteration + 1) - len(remade_tokens))
- remade_tokens += [self.id_end] * rem
- multipliers += [1.0] * rem
- iteration += 1
- fixes.append((iteration, (len(remade_tokens) % 75, embedding)))
- remade_tokens += [0] * emb_len
- multipliers += [weight] * emb_len
- used_custom_terms.append((embedding.name, embedding.checksum()))
- i += embedding_length_in_tokens
-
- token_count = len(remade_tokens)
- prompt_target_length = get_target_prompt_token_count(token_count)
- tokens_to_add = prompt_target_length - len(remade_tokens)
-
- remade_tokens = remade_tokens + [self.id_end] * tokens_to_add
- multipliers = multipliers + [1.0] * tokens_to_add
-
- return remade_tokens, fixes, multipliers, token_count
-
- def process_text(self, texts):
- used_custom_terms = []
- remade_batch_tokens = []
- hijack_comments = []
- hijack_fixes = []
+ chunk.tokens.append(token)
+ chunk.multipliers.append(weight)
+ position += 1
+ continue
+
+ emb_len = int(embedding.vec.shape[0])
+ if len(chunk.tokens) + emb_len > self.chunk_length:
+ next_chunk()
+
+ chunk.fixes.append(PromptChunkFix(len(chunk.tokens), embedding))
+
+ chunk.tokens += [0] * emb_len
+ chunk.multipliers += [weight] * emb_len
+ position += embedding_length_in_tokens
+
+ if len(chunk.tokens) > 0 or len(chunks) == 0:
+ next_chunk(is_last=True)
+
+ return chunks, token_count
+
+ def process_texts(self, texts):
+ """
+ Accepts a list of texts and calls tokenize_line() on each, with cache. Returns the list of results and maximum
+ length, in tokens, of all texts.
+ """
+
token_count = 0
cache = {}
- batch_multipliers = []
+ batch_chunks = []
for line in texts:
if line in cache:
- remade_tokens, fixes, multipliers = cache[line]
+ chunks = cache[line]
else:
- remade_tokens, fixes, multipliers, current_token_count = self.tokenize_line(line, used_custom_terms, hijack_comments)
+ chunks, current_token_count = self.tokenize_line(line)
token_count = max(current_token_count, token_count)
- cache[line] = (remade_tokens, fixes, multipliers)
+ cache[line] = chunks
- remade_batch_tokens.append(remade_tokens)
- hijack_fixes.append(fixes)
- batch_multipliers.append(multipliers)
+ batch_chunks.append(chunks)
- return batch_multipliers, remade_batch_tokens, used_custom_terms, hijack_comments, hijack_fixes, token_count
+ return batch_chunks, token_count
- def process_text_old(self, texts):
- id_start = self.id_start
- id_end = self.id_end
- maxlen = self.wrapped.max_length # you get to stay at 77
- used_custom_terms = []
- remade_batch_tokens = []
- hijack_comments = []
- hijack_fixes = []
- token_count = 0
+ def forward(self, texts):
+ """
+ Accepts an array of texts; Passes texts through transformers network to create a tensor with numerical representation of those texts.
+ Returns a tensor with shape of (B, T, C), where B is length of the array; T is length, in tokens, of texts (including padding) - T will
+ be a multiple of 77; and C is dimensionality of each token - for SD1 it's 768, and for SD2 it's 1024.
+ An example shape returned by this function can be: (2, 77, 768).
+ Webui usually sends just one text at a time through this function - the only time when texts is an array with more than one elemenet
+ is when you do prompt editing: "a picture of a [cat:dog:0.4] eating ice cream"
+ """
- cache = {}
- batch_tokens = self.tokenize(texts)
- batch_multipliers = []
- for tokens in batch_tokens:
- tuple_tokens = tuple(tokens)
+ if opts.use_old_emphasis_implementation:
+ import modules.sd_hijack_clip_old
+ return modules.sd_hijack_clip_old.forward_old(self, texts)
- if tuple_tokens in cache:
- remade_tokens, fixes, multipliers = cache[tuple_tokens]
- else:
- fixes = []
- remade_tokens = []
- multipliers = []
- mult = 1.0
-
- i = 0
- while i < len(tokens):
- token = tokens[i]
-
- embedding, embedding_length_in_tokens = self.hijack.embedding_db.find_embedding_at_position(tokens, i)
-
- mult_change = self.token_mults.get(token) if opts.enable_emphasis else None
- if mult_change is not None:
- mult *= mult_change
- i += 1
- elif embedding is None:
- remade_tokens.append(token)
- multipliers.append(mult)
- i += 1
- else:
- emb_len = int(embedding.vec.shape[0])
- fixes.append((len(remade_tokens), embedding))
- remade_tokens += [0] * emb_len
- multipliers += [mult] * emb_len
- used_custom_terms.append((embedding.name, embedding.checksum()))
- i += embedding_length_in_tokens
-
- if len(remade_tokens) > maxlen - 2:
- vocab = {v: k for k, v in self.wrapped.tokenizer.get_vocab().items()}
- ovf = remade_tokens[maxlen - 2:]
- overflowing_words = [vocab.get(int(x), "") for x in ovf]
- overflowing_text = self.wrapped.tokenizer.convert_tokens_to_string(''.join(overflowing_words))
- hijack_comments.append(f"Warning: too many input tokens; some ({len(overflowing_words)}) have been truncated:\n{overflowing_text}\n")
-
- token_count = len(remade_tokens)
- remade_tokens = remade_tokens + [id_end] * (maxlen - 2 - len(remade_tokens))
- remade_tokens = [id_start] + remade_tokens[0:maxlen - 2] + [id_end]
- cache[tuple_tokens] = (remade_tokens, fixes, multipliers)
-
- multipliers = multipliers + [1.0] * (maxlen - 2 - len(multipliers))
- multipliers = [1.0] + multipliers[0:maxlen - 2] + [1.0]
-
- remade_batch_tokens.append(remade_tokens)
- hijack_fixes.append(fixes)
- batch_multipliers.append(multipliers)
- return batch_multipliers, remade_batch_tokens, used_custom_terms, hijack_comments, hijack_fixes, token_count
-
- def forward(self, text):
- use_old = opts.use_old_emphasis_implementation
- if use_old:
- batch_multipliers, remade_batch_tokens, used_custom_terms, hijack_comments, hijack_fixes, token_count = self.process_text_old(text)
- else:
- batch_multipliers, remade_batch_tokens, used_custom_terms, hijack_comments, hijack_fixes, token_count = self.process_text(text)
-
- self.hijack.comments += hijack_comments
-
- if len(used_custom_terms) > 0:
- self.hijack.comments.append("Used embeddings: " + ", ".join([f'{word} [{checksum}]' for word, checksum in used_custom_terms]))
-
- if use_old:
- self.hijack.fixes = hijack_fixes
- return self.process_tokens(remade_batch_tokens, batch_multipliers)
-
- z = None
- i = 0
- while max(map(len, remade_batch_tokens)) != 0:
- rem_tokens = [x[75:] for x in remade_batch_tokens]
- rem_multipliers = [x[75:] for x in batch_multipliers]
-
- self.hijack.fixes = []
- for unfiltered in hijack_fixes:
- fixes = []
- for fix in unfiltered:
- if fix[0] == i:
- fixes.append(fix[1])
- self.hijack.fixes.append(fixes)
-
- tokens = []
- multipliers = []
- for j in range(len(remade_batch_tokens)):
- if len(remade_batch_tokens[j]) > 0:
- tokens.append(remade_batch_tokens[j][:75])
- multipliers.append(batch_multipliers[j][:75])
- else:
- tokens.append([self.id_end] * 75)
- multipliers.append([1.0] * 75)
-
- z1 = self.process_tokens(tokens, multipliers)
- z = z1 if z is None else torch.cat((z, z1), axis=-2)
-
- remade_batch_tokens = rem_tokens
- batch_multipliers = rem_multipliers
- i += 1
+ batch_chunks, token_count = self.process_texts(texts)
- return z
+ used_embeddings = {}
+ chunk_count = max([len(x) for x in batch_chunks])
- def process_tokens(self, remade_batch_tokens, batch_multipliers):
- if not opts.use_old_emphasis_implementation:
- remade_batch_tokens = [[self.id_start] + x[:75] + [self.id_end] for x in remade_batch_tokens]
- batch_multipliers = [[1.0] + x[:75] + [1.0] for x in batch_multipliers]
+ zs = []
+ for i in range(chunk_count):
+ batch_chunk = [chunks[i] if i < len(chunks) else self.empty_chunk() for chunks in batch_chunks]
+
+ tokens = [x.tokens for x in batch_chunk]
+ multipliers = [x.multipliers for x in batch_chunk]
+ self.hijack.fixes = [x.fixes for x in batch_chunk]
+
+ for fixes in self.hijack.fixes:
+ for position, embedding in fixes:
+ used_embeddings[embedding.name] = embedding
+
+ z = self.process_tokens(tokens, multipliers)
+ zs.append(z)
+ if len(used_embeddings) > 0:
+ embeddings_list = ", ".join([f'{name} [{embedding.checksum()}]' for name, embedding in used_embeddings.items()])
+ self.hijack.comments.append(f"Used embeddings: {embeddings_list}")
+
+ return torch.hstack(zs)
+
+ def process_tokens(self, remade_batch_tokens, batch_multipliers):
+ """
+ sends one single prompt chunk to be encoded by transformers neural network.
+ remade_batch_tokens is a batch of tokens - a list, where every element is a list of tokens; usually
+ there are exactly 77 tokens in the list. batch_multipliers is the same but for multipliers instead of tokens.
+ Multipliers are used to give more or less weight to the outputs of transformers network. Each multiplier
+ corresponds to one token.
+ """
tokens = torch.asarray(remade_batch_tokens).to(devices.device)
+ # this is for SD2: SD1 uses the same token for padding and end of text, while SD2 uses different ones.
if self.id_end != self.id_pad:
for batch_pos in range(len(remade_batch_tokens)):
index = remade_batch_tokens[batch_pos].index(self.id_end)
@@ -240,12 +254,11 @@ class FrozenCLIPEmbedderWithCustomWordsBase(torch.nn.Module):
z = self.encode_with_transformers(tokens)
# restoring original mean is likely not correct, but it seems to work well to prevent artifacts that happen otherwise
- batch_multipliers_of_same_length = [x + [1.0] * (75 - len(x)) for x in batch_multipliers]
- batch_multipliers = torch.asarray(batch_multipliers_of_same_length).to(devices.device)
+ batch_multipliers = torch.asarray(batch_multipliers).to(devices.device)
original_mean = z.mean()
- z *= batch_multipliers.reshape(batch_multipliers.shape + (1,)).expand(z.shape)
+ z = z * batch_multipliers.reshape(batch_multipliers.shape + (1,)).expand(z.shape)
new_mean = z.mean()
- z *= original_mean / new_mean
+ z = z * (original_mean / new_mean)
return z
@@ -254,10 +267,13 @@ class FrozenCLIPEmbedderWithCustomWords(FrozenCLIPEmbedderWithCustomWordsBase):
def __init__(self, wrapped, hijack):
super().__init__(wrapped, hijack)
self.tokenizer = wrapped.tokenizer
- self.comma_token = [v for k, v in self.tokenizer.get_vocab().items() if k == ',</w>'][0]
+
+ vocab = self.tokenizer.get_vocab()
+
+ self.comma_token = vocab.get(',</w>', None)
self.token_mults = {}
- tokens_with_parens = [(k, v) for k, v in self.tokenizer.get_vocab().items() if '(' in k or ')' in k or '[' in k or ']' in k]
+ tokens_with_parens = [(k, v) for k, v in vocab.items() if '(' in k or ')' in k or '[' in k or ']' in k]
for text, ident in tokens_with_parens:
mult = 1.0
for c in text:
@@ -296,6 +312,6 @@ class FrozenCLIPEmbedderWithCustomWords(FrozenCLIPEmbedderWithCustomWordsBase):
def encode_embedding_init_text(self, init_text, nvpt):
embedding_layer = self.wrapped.transformer.text_model.embeddings
ids = self.wrapped.tokenizer(init_text, max_length=nvpt, return_tensors="pt", add_special_tokens=False)["input_ids"]
- embedded = embedding_layer.token_embedding.wrapped(ids.to(devices.device)).squeeze(0)
+ embedded = embedding_layer.token_embedding.wrapped(ids.to(embedding_layer.token_embedding.wrapped.weight.device)).squeeze(0)
return embedded
diff --git a/modules/sd_hijack_clip_old.py b/modules/sd_hijack_clip_old.py
new file mode 100644
index 00000000..6d9fbbe6
--- /dev/null
+++ b/modules/sd_hijack_clip_old.py
@@ -0,0 +1,81 @@
+from modules import sd_hijack_clip
+from modules import shared
+
+
+def process_text_old(self: sd_hijack_clip.FrozenCLIPEmbedderWithCustomWordsBase, texts):
+ id_start = self.id_start
+ id_end = self.id_end
+ maxlen = self.wrapped.max_length # you get to stay at 77
+ used_custom_terms = []
+ remade_batch_tokens = []
+ hijack_comments = []
+ hijack_fixes = []
+ token_count = 0
+
+ cache = {}
+ batch_tokens = self.tokenize(texts)
+ batch_multipliers = []
+ for tokens in batch_tokens:
+ tuple_tokens = tuple(tokens)
+
+ if tuple_tokens in cache:
+ remade_tokens, fixes, multipliers = cache[tuple_tokens]
+ else:
+ fixes = []
+ remade_tokens = []
+ multipliers = []
+ mult = 1.0
+
+ i = 0
+ while i < len(tokens):
+ token = tokens[i]
+
+ embedding, embedding_length_in_tokens = self.hijack.embedding_db.find_embedding_at_position(tokens, i)
+
+ mult_change = self.token_mults.get(token) if shared.opts.enable_emphasis else None
+ if mult_change is not None:
+ mult *= mult_change
+ i += 1
+ elif embedding is None:
+ remade_tokens.append(token)
+ multipliers.append(mult)
+ i += 1
+ else:
+ emb_len = int(embedding.vec.shape[0])
+ fixes.append((len(remade_tokens), embedding))
+ remade_tokens += [0] * emb_len
+ multipliers += [mult] * emb_len
+ used_custom_terms.append((embedding.name, embedding.checksum()))
+ i += embedding_length_in_tokens
+
+ if len(remade_tokens) > maxlen - 2:
+ vocab = {v: k for k, v in self.wrapped.tokenizer.get_vocab().items()}
+ ovf = remade_tokens[maxlen - 2:]
+ overflowing_words = [vocab.get(int(x), "") for x in ovf]
+ overflowing_text = self.wrapped.tokenizer.convert_tokens_to_string(''.join(overflowing_words))
+ hijack_comments.append(f"Warning: too many input tokens; some ({len(overflowing_words)}) have been truncated:\n{overflowing_text}\n")
+
+ token_count = len(remade_tokens)
+ remade_tokens = remade_tokens + [id_end] * (maxlen - 2 - len(remade_tokens))
+ remade_tokens = [id_start] + remade_tokens[0:maxlen - 2] + [id_end]
+ cache[tuple_tokens] = (remade_tokens, fixes, multipliers)
+
+ multipliers = multipliers + [1.0] * (maxlen - 2 - len(multipliers))
+ multipliers = [1.0] + multipliers[0:maxlen - 2] + [1.0]
+
+ remade_batch_tokens.append(remade_tokens)
+ hijack_fixes.append(fixes)
+ batch_multipliers.append(multipliers)
+ return batch_multipliers, remade_batch_tokens, used_custom_terms, hijack_comments, hijack_fixes, token_count
+
+
+def forward_old(self: sd_hijack_clip.FrozenCLIPEmbedderWithCustomWordsBase, texts):
+ batch_multipliers, remade_batch_tokens, used_custom_terms, hijack_comments, hijack_fixes, token_count = process_text_old(self, texts)
+
+ self.hijack.comments += hijack_comments
+
+ if len(used_custom_terms) > 0:
+ self.hijack.comments.append("Used embeddings: " + ", ".join([f'{word} [{checksum}]' for word, checksum in used_custom_terms]))
+
+ self.hijack.fixes = hijack_fixes
+ return self.process_tokens(remade_batch_tokens, batch_multipliers)
diff --git a/modules/sd_hijack_inpainting.py b/modules/sd_hijack_inpainting.py
index bb5499b3..478cd499 100644
--- a/modules/sd_hijack_inpainting.py
+++ b/modules/sd_hijack_inpainting.py
@@ -12,191 +12,6 @@ from ldm.models.diffusion.ddpm import LatentDiffusion
from ldm.models.diffusion.plms import PLMSSampler
from ldm.models.diffusion.ddim import DDIMSampler, noise_like
-# =================================================================================================
-# Monkey patch DDIMSampler methods from RunwayML repo directly.
-# Adapted from:
-# https://github.com/runwayml/stable-diffusion/blob/main/ldm/models/diffusion/ddim.py
-# =================================================================================================
-@torch.no_grad()
-def sample_ddim(self,
- S,
- batch_size,
- shape,
- conditioning=None,
- callback=None,
- normals_sequence=None,
- img_callback=None,
- quantize_x0=False,
- eta=0.,
- mask=None,
- x0=None,
- temperature=1.,
- noise_dropout=0.,
- score_corrector=None,
- corrector_kwargs=None,
- verbose=True,
- x_T=None,
- log_every_t=100,
- unconditional_guidance_scale=1.,
- unconditional_conditioning=None,
- # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...
- **kwargs
- ):
- if conditioning is not None:
- if isinstance(conditioning, dict):
- ctmp = conditioning[list(conditioning.keys())[0]]
- while isinstance(ctmp, list):
- ctmp = ctmp[0]
- cbs = ctmp.shape[0]
- if cbs != batch_size:
- print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}")
- else:
- if conditioning.shape[0] != batch_size:
- print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}")
-
- self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)
- # sampling
- C, H, W = shape
- size = (batch_size, C, H, W)
- print(f'Data shape for DDIM sampling is {size}, eta {eta}')
-
- samples, intermediates = self.ddim_sampling(conditioning, size,
- callback=callback,
- img_callback=img_callback,
- quantize_denoised=quantize_x0,
- mask=mask, x0=x0,
- ddim_use_original_steps=False,
- noise_dropout=noise_dropout,
- temperature=temperature,
- score_corrector=score_corrector,
- corrector_kwargs=corrector_kwargs,
- x_T=x_T,
- log_every_t=log_every_t,
- unconditional_guidance_scale=unconditional_guidance_scale,
- unconditional_conditioning=unconditional_conditioning,
- )
- return samples, intermediates
-
-@torch.no_grad()
-def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,
- temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,
- unconditional_guidance_scale=1., unconditional_conditioning=None):
- b, *_, device = *x.shape, x.device
-
- if unconditional_conditioning is None or unconditional_guidance_scale == 1.:
- e_t = self.model.apply_model(x, t, c)
- else:
- x_in = torch.cat([x] * 2)
- t_in = torch.cat([t] * 2)
- if isinstance(c, dict):
- assert isinstance(unconditional_conditioning, dict)
- c_in = dict()
- for k in c:
- if isinstance(c[k], list):
- c_in[k] = [
- torch.cat([unconditional_conditioning[k][i], c[k][i]])
- for i in range(len(c[k]))
- ]
- else:
- c_in[k] = torch.cat([unconditional_conditioning[k], c[k]])
- else:
- c_in = torch.cat([unconditional_conditioning, c])
- e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)
- e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)
-
- if score_corrector is not None:
- assert self.model.parameterization == "eps"
- e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)
-
- alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas
- alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev
- sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas
- sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas
- # select parameters corresponding to the currently considered timestep
- a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)
- a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)
- sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)
- sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)
-
- # current prediction for x_0
- pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()
- if quantize_denoised:
- pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)
- # direction pointing to x_t
- dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t
- noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature
- if noise_dropout > 0.:
- noise = torch.nn.functional.dropout(noise, p=noise_dropout)
- x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise
- return x_prev, pred_x0
-
-
-# =================================================================================================
-# Monkey patch PLMSSampler methods.
-# This one was not actually patched correctly in the RunwayML repo, but we can replicate the changes.
-# Adapted from:
-# https://github.com/CompVis/stable-diffusion/blob/main/ldm/models/diffusion/plms.py
-# =================================================================================================
-@torch.no_grad()
-def sample_plms(self,
- S,
- batch_size,
- shape,
- conditioning=None,
- callback=None,
- normals_sequence=None,
- img_callback=None,
- quantize_x0=False,
- eta=0.,
- mask=None,
- x0=None,
- temperature=1.,
- noise_dropout=0.,
- score_corrector=None,
- corrector_kwargs=None,
- verbose=True,
- x_T=None,
- log_every_t=100,
- unconditional_guidance_scale=1.,
- unconditional_conditioning=None,
- # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...
- **kwargs
- ):
- if conditioning is not None:
- if isinstance(conditioning, dict):
- ctmp = conditioning[list(conditioning.keys())[0]]
- while isinstance(ctmp, list):
- ctmp = ctmp[0]
- cbs = ctmp.shape[0]
- if cbs != batch_size:
- print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}")
- else:
- if conditioning.shape[0] != batch_size:
- print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}")
-
- self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)
- # sampling
- C, H, W = shape
- size = (batch_size, C, H, W)
- print(f'Data shape for PLMS sampling is {size}')
-
- samples, intermediates = self.plms_sampling(conditioning, size,
- callback=callback,
- img_callback=img_callback,
- quantize_denoised=quantize_x0,
- mask=mask, x0=x0,
- ddim_use_original_steps=False,
- noise_dropout=noise_dropout,
- temperature=temperature,
- score_corrector=score_corrector,
- corrector_kwargs=corrector_kwargs,
- x_T=x_T,
- log_every_t=log_every_t,
- unconditional_guidance_scale=unconditional_guidance_scale,
- unconditional_conditioning=unconditional_conditioning,
- )
- return samples, intermediates
-
@torch.no_grad()
def p_sample_plms(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,
@@ -280,61 +95,8 @@ def p_sample_plms(self, x, c, t, index, repeat_noise=False, use_original_steps=F
return x_prev, pred_x0, e_t
-# =================================================================================================
-# Monkey patch LatentInpaintDiffusion to load the checkpoint with a proper config.
-# Adapted from:
-# https://github.com/runwayml/stable-diffusion/blob/main/ldm/models/diffusion/ddpm.py
-# =================================================================================================
-
-@torch.no_grad()
-def get_unconditional_conditioning(self, batch_size, null_label=None):
- if null_label is not None:
- xc = null_label
- if isinstance(xc, ListConfig):
- xc = list(xc)
- if isinstance(xc, dict) or isinstance(xc, list):
- c = self.get_learned_conditioning(xc)
- else:
- if hasattr(xc, "to"):
- xc = xc.to(self.device)
- c = self.get_learned_conditioning(xc)
- else:
- # todo: get null label from cond_stage_model
- raise NotImplementedError()
- c = repeat(c, "1 ... -> b ...", b=batch_size).to(self.device)
- return c
-
-
-class LatentInpaintDiffusion(LatentDiffusion):
- def __init__(
- self,
- concat_keys=("mask", "masked_image"),
- masked_image_key="masked_image",
- *args,
- **kwargs,
- ):
- super().__init__(*args, **kwargs)
- self.masked_image_key = masked_image_key
- assert self.masked_image_key in concat_keys
- self.concat_keys = concat_keys
-
-
-def should_hijack_inpainting(checkpoint_info):
- ckpt_basename = os.path.basename(checkpoint_info.filename).lower()
- cfg_basename = os.path.basename(checkpoint_info.config).lower()
- return "inpainting" in ckpt_basename and not "inpainting" in cfg_basename
-
def do_inpainting_hijack():
- # most of this stuff seems to no longer be needed because it is already included into SD2.0
# p_sample_plms is needed because PLMS can't work with dicts as conditionings
- # this file should be cleaned up later if everything turns out to work fine
-
- # ldm.models.diffusion.ddpm.get_unconditional_conditioning = get_unconditional_conditioning
- # ldm.models.diffusion.ddpm.LatentInpaintDiffusion = LatentInpaintDiffusion
-
- # ldm.models.diffusion.ddim.DDIMSampler.p_sample_ddim = p_sample_ddim
- # ldm.models.diffusion.ddim.DDIMSampler.sample = sample_ddim
ldm.models.diffusion.plms.PLMSSampler.p_sample_plms = p_sample_plms
- # ldm.models.diffusion.plms.PLMSSampler.sample = sample_plms
diff --git a/modules/sd_hijack_ip2p.py b/modules/sd_hijack_ip2p.py
new file mode 100644
index 00000000..3c727d3b
--- /dev/null
+++ b/modules/sd_hijack_ip2p.py
@@ -0,0 +1,13 @@
+import collections
+import os.path
+import sys
+import gc
+import time
+
+def should_hijack_ip2p(checkpoint_info):
+ from modules import sd_models_config
+
+ ckpt_basename = os.path.basename(checkpoint_info.filename).lower()
+ cfg_basename = os.path.basename(sd_models_config.find_checkpoint_config_near_filename(checkpoint_info)).lower()
+
+ return "pix2pix" in ckpt_basename and not "pix2pix" in cfg_basename
diff --git a/modules/sd_hijack_optimizations.py b/modules/sd_hijack_optimizations.py
index 02c87f40..c02d954c 100644
--- a/modules/sd_hijack_optimizations.py
+++ b/modules/sd_hijack_optimizations.py
@@ -1,7 +1,7 @@
import math
import sys
import traceback
-import importlib
+import psutil
import torch
from torch import einsum
@@ -9,9 +9,11 @@ from torch import einsum
from ldm.util import default
from einops import rearrange
-from modules import shared
+from modules import shared, errors, devices
from modules.hypernetworks import hypernetwork
+from .sub_quadratic_attention import efficient_dot_product_attention
+
if shared.cmd_opts.xformers or shared.cmd_opts.force_enable_xformers:
try:
@@ -22,6 +24,19 @@ if shared.cmd_opts.xformers or shared.cmd_opts.force_enable_xformers:
print(traceback.format_exc(), file=sys.stderr)
+def get_available_vram():
+ if shared.device.type == 'cuda':
+ stats = torch.cuda.memory_stats(shared.device)
+ mem_active = stats['active_bytes.all.current']
+ mem_reserved = stats['reserved_bytes.all.current']
+ mem_free_cuda, _ = torch.cuda.mem_get_info(torch.cuda.current_device())
+ mem_free_torch = mem_reserved - mem_active
+ mem_free_total = mem_free_cuda + mem_free_torch
+ return mem_free_total
+ else:
+ return psutil.virtual_memory().available
+
+
# see https://github.com/basujindal/stable-diffusion/pull/117 for discussion
def split_cross_attention_forward_v1(self, x, context=None, mask=None):
h = self.heads
@@ -29,7 +44,7 @@ def split_cross_attention_forward_v1(self, x, context=None, mask=None):
q_in = self.to_q(x)
context = default(context, x)
- context_k, context_v = hypernetwork.apply_hypernetwork(shared.loaded_hypernetwork, context)
+ context_k, context_v = hypernetwork.apply_hypernetworks(shared.loaded_hypernetworks, context)
k_in = self.to_k(context_k)
v_in = self.to_v(context_v)
del context, context_k, context_v, x
@@ -37,18 +52,25 @@ def split_cross_attention_forward_v1(self, x, context=None, mask=None):
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q_in, k_in, v_in))
del q_in, k_in, v_in
- r1 = torch.zeros(q.shape[0], q.shape[1], v.shape[2], device=q.device)
- for i in range(0, q.shape[0], 2):
- end = i + 2
- s1 = einsum('b i d, b j d -> b i j', q[i:end], k[i:end])
- s1 *= self.scale
+ dtype = q.dtype
+ if shared.opts.upcast_attn:
+ q, k, v = q.float(), k.float(), v.float()
- s2 = s1.softmax(dim=-1)
- del s1
+ with devices.without_autocast(disable=not shared.opts.upcast_attn):
+ r1 = torch.zeros(q.shape[0], q.shape[1], v.shape[2], device=q.device, dtype=q.dtype)
+ for i in range(0, q.shape[0], 2):
+ end = i + 2
+ s1 = einsum('b i d, b j d -> b i j', q[i:end], k[i:end])
+ s1 *= self.scale
+
+ s2 = s1.softmax(dim=-1)
+ del s1
+
+ r1[i:end] = einsum('b i j, b j d -> b i d', s2, v[i:end])
+ del s2
+ del q, k, v
- r1[i:end] = einsum('b i j, b j d -> b i d', s2, v[i:end])
- del s2
- del q, k, v
+ r1 = r1.to(dtype)
r2 = rearrange(r1, '(b h) n d -> b n (h d)', h=h)
del r1
@@ -63,54 +85,56 @@ def split_cross_attention_forward(self, x, context=None, mask=None):
q_in = self.to_q(x)
context = default(context, x)
- context_k, context_v = hypernetwork.apply_hypernetwork(shared.loaded_hypernetwork, context)
+ context_k, context_v = hypernetwork.apply_hypernetworks(shared.loaded_hypernetworks, context)
k_in = self.to_k(context_k)
v_in = self.to_v(context_v)
- k_in *= self.scale
-
- del context, x
-
- q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q_in, k_in, v_in))
- del q_in, k_in, v_in
-
- r1 = torch.zeros(q.shape[0], q.shape[1], v.shape[2], device=q.device, dtype=q.dtype)
-
- stats = torch.cuda.memory_stats(q.device)
- mem_active = stats['active_bytes.all.current']
- mem_reserved = stats['reserved_bytes.all.current']
- mem_free_cuda, _ = torch.cuda.mem_get_info(torch.cuda.current_device())
- mem_free_torch = mem_reserved - mem_active
- mem_free_total = mem_free_cuda + mem_free_torch
-
- gb = 1024 ** 3
- tensor_size = q.shape[0] * q.shape[1] * k.shape[1] * q.element_size()
- modifier = 3 if q.element_size() == 2 else 2.5
- mem_required = tensor_size * modifier
- steps = 1
-
- if mem_required > mem_free_total:
- steps = 2 ** (math.ceil(math.log(mem_required / mem_free_total, 2)))
- # print(f"Expected tensor size:{tensor_size/gb:0.1f}GB, cuda free:{mem_free_cuda/gb:0.1f}GB "
- # f"torch free:{mem_free_torch/gb:0.1f} total:{mem_free_total/gb:0.1f} steps:{steps}")
+ dtype = q_in.dtype
+ if shared.opts.upcast_attn:
+ q_in, k_in, v_in = q_in.float(), k_in.float(), v_in if v_in.device.type == 'mps' else v_in.float()
- if steps > 64:
- max_res = math.floor(math.sqrt(math.sqrt(mem_free_total / 2.5)) / 8) * 64
- raise RuntimeError(f'Not enough memory, use lower resolution (max approx. {max_res}x{max_res}). '
- f'Need: {mem_required / 64 / gb:0.1f}GB free, Have:{mem_free_total / gb:0.1f}GB free')
-
- slice_size = q.shape[1] // steps if (q.shape[1] % steps) == 0 else q.shape[1]
- for i in range(0, q.shape[1], slice_size):
- end = i + slice_size
- s1 = einsum('b i d, b j d -> b i j', q[:, i:end], k)
-
- s2 = s1.softmax(dim=-1, dtype=q.dtype)
- del s1
-
- r1[:, i:end] = einsum('b i j, b j d -> b i d', s2, v)
- del s2
+ with devices.without_autocast(disable=not shared.opts.upcast_attn):
+ k_in = k_in * self.scale
+
+ del context, x
+
+ q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q_in, k_in, v_in))
+ del q_in, k_in, v_in
+
+ r1 = torch.zeros(q.shape[0], q.shape[1], v.shape[2], device=q.device, dtype=q.dtype)
+
+ mem_free_total = get_available_vram()
+
+ gb = 1024 ** 3
+ tensor_size = q.shape[0] * q.shape[1] * k.shape[1] * q.element_size()
+ modifier = 3 if q.element_size() == 2 else 2.5
+ mem_required = tensor_size * modifier
+ steps = 1
+
+ if mem_required > mem_free_total:
+ steps = 2 ** (math.ceil(math.log(mem_required / mem_free_total, 2)))
+ # print(f"Expected tensor size:{tensor_size/gb:0.1f}GB, cuda free:{mem_free_cuda/gb:0.1f}GB "
+ # f"torch free:{mem_free_torch/gb:0.1f} total:{mem_free_total/gb:0.1f} steps:{steps}")
+
+ if steps > 64:
+ max_res = math.floor(math.sqrt(math.sqrt(mem_free_total / 2.5)) / 8) * 64
+ raise RuntimeError(f'Not enough memory, use lower resolution (max approx. {max_res}x{max_res}). '
+ f'Need: {mem_required / 64 / gb:0.1f}GB free, Have:{mem_free_total / gb:0.1f}GB free')
+
+ slice_size = q.shape[1] // steps if (q.shape[1] % steps) == 0 else q.shape[1]
+ for i in range(0, q.shape[1], slice_size):
+ end = i + slice_size
+ s1 = einsum('b i d, b j d -> b i j', q[:, i:end], k)
+
+ s2 = s1.softmax(dim=-1, dtype=q.dtype)
+ del s1
+
+ r1[:, i:end] = einsum('b i j, b j d -> b i d', s2, v)
+ del s2
+
+ del q, k, v
- del q, k, v
+ r1 = r1.to(dtype)
r2 = rearrange(r1, '(b h) n d -> b n (h d)', h=h)
del r1
@@ -118,19 +142,8 @@ def split_cross_attention_forward(self, x, context=None, mask=None):
return self.to_out(r2)
-def check_for_psutil():
- try:
- spec = importlib.util.find_spec('psutil')
- return spec is not None
- except ModuleNotFoundError:
- return False
-
-invokeAI_mps_available = check_for_psutil()
-
# -- Taken from https://github.com/invoke-ai/InvokeAI and modified --
-if invokeAI_mps_available:
- import psutil
- mem_total_gb = psutil.virtual_memory().total // (1 << 30)
+mem_total_gb = psutil.virtual_memory().total // (1 << 30)
def einsum_op_compvis(q, k, v):
s = einsum('b i d, b j d -> b i j', q, k)
@@ -204,29 +217,131 @@ def split_cross_attention_forward_invokeAI(self, x, context=None, mask=None):
q = self.to_q(x)
context = default(context, x)
- context_k, context_v = hypernetwork.apply_hypernetwork(shared.loaded_hypernetwork, context)
- k = self.to_k(context_k) * self.scale
+ context_k, context_v = hypernetwork.apply_hypernetworks(shared.loaded_hypernetworks, context)
+ k = self.to_k(context_k)
v = self.to_v(context_v)
del context, context_k, context_v, x
- q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v))
- r = einsum_op(q, k, v)
+ dtype = q.dtype
+ if shared.opts.upcast_attn:
+ q, k, v = q.float(), k.float(), v if v.device.type == 'mps' else v.float()
+
+ with devices.without_autocast(disable=not shared.opts.upcast_attn):
+ k = k * self.scale
+
+ q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v))
+ r = einsum_op(q, k, v)
+ r = r.to(dtype)
return self.to_out(rearrange(r, '(b h) n d -> b n (h d)', h=h))
# -- End of code from https://github.com/invoke-ai/InvokeAI --
+
+# Based on Birch-san's modified implementation of sub-quadratic attention from https://github.com/Birch-san/diffusers/pull/1
+# The sub_quad_attention_forward function is under the MIT License listed under Memory Efficient Attention in the Licenses section of the web UI interface
+def sub_quad_attention_forward(self, x, context=None, mask=None):
+ assert mask is None, "attention-mask not currently implemented for SubQuadraticCrossAttnProcessor."
+
+ h = self.heads
+
+ q = self.to_q(x)
+ context = default(context, x)
+
+ context_k, context_v = hypernetwork.apply_hypernetworks(shared.loaded_hypernetworks, context)
+ k = self.to_k(context_k)
+ v = self.to_v(context_v)
+ del context, context_k, context_v, x
+
+ q = q.unflatten(-1, (h, -1)).transpose(1,2).flatten(end_dim=1)
+ k = k.unflatten(-1, (h, -1)).transpose(1,2).flatten(end_dim=1)
+ v = v.unflatten(-1, (h, -1)).transpose(1,2).flatten(end_dim=1)
+
+ dtype = q.dtype
+ if shared.opts.upcast_attn:
+ q, k = q.float(), k.float()
+
+ x = sub_quad_attention(q, k, v, q_chunk_size=shared.cmd_opts.sub_quad_q_chunk_size, kv_chunk_size=shared.cmd_opts.sub_quad_kv_chunk_size, chunk_threshold=shared.cmd_opts.sub_quad_chunk_threshold, use_checkpoint=self.training)
+
+ x = x.to(dtype)
+
+ x = x.unflatten(0, (-1, h)).transpose(1,2).flatten(start_dim=2)
+
+ out_proj, dropout = self.to_out
+ x = out_proj(x)
+ x = dropout(x)
+
+ return x
+
+def sub_quad_attention(q, k, v, q_chunk_size=1024, kv_chunk_size=None, kv_chunk_size_min=None, chunk_threshold=None, use_checkpoint=True):
+ bytes_per_token = torch.finfo(q.dtype).bits//8
+ batch_x_heads, q_tokens, _ = q.shape
+ _, k_tokens, _ = k.shape
+ qk_matmul_size_bytes = batch_x_heads * bytes_per_token * q_tokens * k_tokens
+
+ if chunk_threshold is None:
+ chunk_threshold_bytes = int(get_available_vram() * 0.9) if q.device.type == 'mps' else int(get_available_vram() * 0.7)
+ elif chunk_threshold == 0:
+ chunk_threshold_bytes = None
+ else:
+ chunk_threshold_bytes = int(0.01 * chunk_threshold * get_available_vram())
+
+ if kv_chunk_size_min is None and chunk_threshold_bytes is not None:
+ kv_chunk_size_min = chunk_threshold_bytes // (batch_x_heads * bytes_per_token * (k.shape[2] + v.shape[2]))
+ elif kv_chunk_size_min == 0:
+ kv_chunk_size_min = None
+
+ if chunk_threshold_bytes is not None and qk_matmul_size_bytes <= chunk_threshold_bytes:
+ # the big matmul fits into our memory limit; do everything in 1 chunk,
+ # i.e. send it down the unchunked fast-path
+ query_chunk_size = q_tokens
+ kv_chunk_size = k_tokens
+
+ with devices.without_autocast(disable=q.dtype == v.dtype):
+ return efficient_dot_product_attention(
+ q,
+ k,
+ v,
+ query_chunk_size=q_chunk_size,
+ kv_chunk_size=kv_chunk_size,
+ kv_chunk_size_min = kv_chunk_size_min,
+ use_checkpoint=use_checkpoint,
+ )
+
+
+def get_xformers_flash_attention_op(q, k, v):
+ if not shared.cmd_opts.xformers_flash_attention:
+ return None
+
+ try:
+ flash_attention_op = xformers.ops.MemoryEfficientAttentionFlashAttentionOp
+ fw, bw = flash_attention_op
+ if fw.supports(xformers.ops.fmha.Inputs(query=q, key=k, value=v, attn_bias=None)):
+ return flash_attention_op
+ except Exception as e:
+ errors.display_once(e, "enabling flash attention")
+
+ return None
+
+
def xformers_attention_forward(self, x, context=None, mask=None):
h = self.heads
q_in = self.to_q(x)
context = default(context, x)
- context_k, context_v = hypernetwork.apply_hypernetwork(shared.loaded_hypernetwork, context)
+ context_k, context_v = hypernetwork.apply_hypernetworks(shared.loaded_hypernetworks, context)
k_in = self.to_k(context_k)
v_in = self.to_v(context_v)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b n h d', h=h), (q_in, k_in, v_in))
del q_in, k_in, v_in
- out = xformers.ops.memory_efficient_attention(q, k, v, attn_bias=None)
+
+ dtype = q.dtype
+ if shared.opts.upcast_attn:
+ q, k = q.float(), k.float()
+
+ out = xformers.ops.memory_efficient_attention(q, k, v, attn_bias=None, op=get_xformers_flash_attention_op(q, k, v))
+
+ out = out.to(dtype)
out = rearrange(out, 'b n h d -> b n (h d)', h=h)
return self.to_out(out)
@@ -252,12 +367,7 @@ def cross_attention_attnblock_forward(self, x):
h_ = torch.zeros_like(k, device=q.device)
- stats = torch.cuda.memory_stats(q.device)
- mem_active = stats['active_bytes.all.current']
- mem_reserved = stats['reserved_bytes.all.current']
- mem_free_cuda, _ = torch.cuda.mem_get_info(torch.cuda.current_device())
- mem_free_torch = mem_reserved - mem_active
- mem_free_total = mem_free_cuda + mem_free_torch
+ mem_free_total = get_available_vram()
tensor_size = q.shape[0] * q.shape[1] * k.shape[2] * q.element_size()
mem_required = tensor_size * 2.5
@@ -303,12 +413,32 @@ def xformers_attnblock_forward(self, x):
v = self.v(h_)
b, c, h, w = q.shape
q, k, v = map(lambda t: rearrange(t, 'b c h w -> b (h w) c'), (q, k, v))
+ dtype = q.dtype
+ if shared.opts.upcast_attn:
+ q, k = q.float(), k.float()
q = q.contiguous()
k = k.contiguous()
v = v.contiguous()
- out = xformers.ops.memory_efficient_attention(q, k, v)
+ out = xformers.ops.memory_efficient_attention(q, k, v, op=get_xformers_flash_attention_op(q, k, v))
+ out = out.to(dtype)
out = rearrange(out, 'b (h w) c -> b c h w', h=h)
out = self.proj_out(out)
return x + out
except NotImplementedError:
return cross_attention_attnblock_forward(self, x)
+
+def sub_quad_attnblock_forward(self, x):
+ h_ = x
+ h_ = self.norm(h_)
+ q = self.q(h_)
+ k = self.k(h_)
+ v = self.v(h_)
+ b, c, h, w = q.shape
+ q, k, v = map(lambda t: rearrange(t, 'b c h w -> b (h w) c'), (q, k, v))
+ q = q.contiguous()
+ k = k.contiguous()
+ v = v.contiguous()
+ out = sub_quad_attention(q, k, v, q_chunk_size=shared.cmd_opts.sub_quad_q_chunk_size, kv_chunk_size=shared.cmd_opts.sub_quad_kv_chunk_size, chunk_threshold=shared.cmd_opts.sub_quad_chunk_threshold, use_checkpoint=self.training)
+ out = rearrange(out, 'b (h w) c -> b c h w', h=h)
+ out = self.proj_out(out)
+ return x + out
diff --git a/modules/sd_hijack_unet.py b/modules/sd_hijack_unet.py
index 18daf8c1..a6ee577c 100644
--- a/modules/sd_hijack_unet.py
+++ b/modules/sd_hijack_unet.py
@@ -1,4 +1,8 @@
import torch
+from packaging import version
+
+from modules import devices
+from modules.sd_hijack_utils import CondFunc
class TorchHijackForUnet:
@@ -28,3 +32,31 @@ class TorchHijackForUnet:
th = TorchHijackForUnet()
+
+
+# Below are monkey patches to enable upcasting a float16 UNet for float32 sampling
+def apply_model(orig_func, self, x_noisy, t, cond, **kwargs):
+
+ if isinstance(cond, dict):
+ for y in cond.keys():
+ cond[y] = [x.to(devices.dtype_unet) if isinstance(x, torch.Tensor) else x for x in cond[y]]
+
+ with devices.autocast():
+ return orig_func(self, x_noisy.to(devices.dtype_unet), t.to(devices.dtype_unet), cond, **kwargs).float()
+
+class GELUHijack(torch.nn.GELU, torch.nn.Module):
+ def __init__(self, *args, **kwargs):
+ torch.nn.GELU.__init__(self, *args, **kwargs)
+ def forward(self, x):
+ if devices.unet_needs_upcast:
+ return torch.nn.GELU.forward(self.float(), x.float()).to(devices.dtype_unet)
+ else:
+ return torch.nn.GELU.forward(self, x)
+
+unet_needs_upcast = lambda *args, **kwargs: devices.unet_needs_upcast
+CondFunc('ldm.models.diffusion.ddpm.LatentDiffusion.apply_model', apply_model, unet_needs_upcast)
+CondFunc('ldm.modules.diffusionmodules.openaimodel.timestep_embedding', lambda orig_func, *args, **kwargs: orig_func(*args, **kwargs).to(devices.dtype_unet), unet_needs_upcast)
+if version.parse(torch.__version__) <= version.parse("1.13.1"):
+ CondFunc('ldm.modules.diffusionmodules.util.GroupNorm32.forward', lambda orig_func, self, *args, **kwargs: orig_func(self.float(), *args, **kwargs), unet_needs_upcast)
+ CondFunc('ldm.modules.attention.GEGLU.forward', lambda orig_func, self, x: orig_func(self.float(), x.float()).to(devices.dtype_unet), unet_needs_upcast)
+ CondFunc('open_clip.transformer.ResidualAttentionBlock.__init__', lambda orig_func, *args, **kwargs: kwargs.update({'act_layer': GELUHijack}) and False or orig_func(*args, **kwargs), lambda _, *args, **kwargs: kwargs.get('act_layer') is None or kwargs['act_layer'] == torch.nn.GELU)
diff --git a/modules/sd_hijack_utils.py b/modules/sd_hijack_utils.py
new file mode 100644
index 00000000..f8684475
--- /dev/null
+++ b/modules/sd_hijack_utils.py
@@ -0,0 +1,28 @@
+import importlib
+
+class CondFunc:
+ def __new__(cls, orig_func, sub_func, cond_func):
+ self = super(CondFunc, cls).__new__(cls)
+ if isinstance(orig_func, str):
+ func_path = orig_func.split('.')
+ for i in range(len(func_path)-1, -1, -1):
+ try:
+ resolved_obj = importlib.import_module('.'.join(func_path[:i]))
+ break
+ except ImportError:
+ pass
+ for attr_name in func_path[i:-1]:
+ resolved_obj = getattr(resolved_obj, attr_name)
+ orig_func = getattr(resolved_obj, func_path[-1])
+ setattr(resolved_obj, func_path[-1], lambda *args, **kwargs: self(*args, **kwargs))
+ self.__init__(orig_func, sub_func, cond_func)
+ return lambda *args, **kwargs: self(*args, **kwargs)
+ def __init__(self, orig_func, sub_func, cond_func):
+ self.__orig_func = orig_func
+ self.__sub_func = sub_func
+ self.__cond_func = cond_func
+ def __call__(self, *args, **kwargs):
+ if not self.__cond_func or self.__cond_func(self.__orig_func, *args, **kwargs):
+ return self.__sub_func(self.__orig_func, *args, **kwargs)
+ else:
+ return self.__orig_func(*args, **kwargs)
diff --git a/modules/sd_hijack_xlmr.py b/modules/sd_hijack_xlmr.py
new file mode 100644
index 00000000..4ac51c38
--- /dev/null
+++ b/modules/sd_hijack_xlmr.py
@@ -0,0 +1,34 @@
+import open_clip.tokenizer
+import torch
+
+from modules import sd_hijack_clip, devices
+from modules.shared import opts
+
+
+class FrozenXLMREmbedderWithCustomWords(sd_hijack_clip.FrozenCLIPEmbedderWithCustomWords):
+ def __init__(self, wrapped, hijack):
+ super().__init__(wrapped, hijack)
+
+ self.id_start = wrapped.config.bos_token_id
+ self.id_end = wrapped.config.eos_token_id
+ self.id_pad = wrapped.config.pad_token_id
+
+ self.comma_token = self.tokenizer.get_vocab().get(',', None) # alt diffusion doesn't have </w> bits for comma
+
+ def encode_with_transformers(self, tokens):
+ # there's no CLIP Skip here because all hidden layers have size of 1024 and the last one uses a
+ # trained layer to transform those 1024 into 768 for unet; so you can't choose which transformer
+ # layer to work with - you have to use the last
+
+ attention_mask = (tokens != self.id_pad).to(device=tokens.device, dtype=torch.int64)
+ features = self.wrapped(input_ids=tokens, attention_mask=attention_mask)
+ z = features['projection_state']
+
+ return z
+
+ def encode_embedding_init_text(self, init_text, nvpt):
+ embedding_layer = self.wrapped.roberta.embeddings
+ ids = self.wrapped.tokenizer(init_text, max_length=nvpt, return_tensors="pt", add_special_tokens=False)["input_ids"]
+ embedded = embedding_layer.token_embedding.wrapped(ids.to(devices.device)).squeeze(0)
+
+ return embedded
diff --git a/modules/sd_models.py b/modules/sd_models.py
index ecdd91c5..b2d48a51 100644
--- a/modules/sd_models.py
+++ b/modules/sd_models.py
@@ -2,7 +2,6 @@ import collections
import os.path
import sys
import gc
-from collections import namedtuple
import torch
import re
import safetensors.torch
@@ -13,17 +12,63 @@ import ldm.modules.midas as midas
from ldm.util import instantiate_from_config
-from modules import shared, modelloader, devices, script_callbacks, sd_vae
+from modules import paths, shared, modelloader, devices, script_callbacks, sd_vae, sd_disable_initialization, errors, hashes, sd_models_config
from modules.paths import models_path
-from modules.sd_hijack_inpainting import do_inpainting_hijack, should_hijack_inpainting
+from modules.sd_hijack_inpainting import do_inpainting_hijack
+from modules.timer import Timer
model_dir = "Stable-diffusion"
-model_path = os.path.abspath(os.path.join(models_path, model_dir))
+model_path = os.path.abspath(os.path.join(paths.models_path, model_dir))
-CheckpointInfo = namedtuple("CheckpointInfo", ['filename', 'title', 'hash', 'model_name', 'config'])
checkpoints_list = {}
+checkpoint_alisases = {}
checkpoints_loaded = collections.OrderedDict()
+
+class CheckpointInfo:
+ def __init__(self, filename):
+ self.filename = filename
+ abspath = os.path.abspath(filename)
+
+ if shared.cmd_opts.ckpt_dir is not None and abspath.startswith(shared.cmd_opts.ckpt_dir):
+ name = abspath.replace(shared.cmd_opts.ckpt_dir, '')
+ elif abspath.startswith(model_path):
+ name = abspath.replace(model_path, '')
+ else:
+ name = os.path.basename(filename)
+
+ if name.startswith("\\") or name.startswith("/"):
+ name = name[1:]
+
+ self.name = name
+ self.model_name = os.path.splitext(name.replace("/", "_").replace("\\", "_"))[0]
+ self.hash = model_hash(filename)
+
+ self.sha256 = hashes.sha256_from_cache(self.filename, "checkpoint/" + name)
+ self.shorthash = self.sha256[0:10] if self.sha256 else None
+
+ self.title = name if self.shorthash is None else f'{name} [{self.shorthash}]'
+
+ self.ids = [self.hash, self.model_name, self.title, name, f'{name} [{self.hash}]'] + ([self.shorthash, self.sha256, f'{self.name} [{self.shorthash}]'] if self.shorthash else [])
+
+ def register(self):
+ checkpoints_list[self.title] = self
+ for id in self.ids:
+ checkpoint_alisases[id] = self
+
+ def calculate_shorthash(self):
+ self.sha256 = hashes.sha256(self.filename, "checkpoint/" + self.name)
+ self.shorthash = self.sha256[0:10]
+
+ if self.shorthash not in self.ids:
+ self.ids += [self.shorthash, self.sha256]
+ self.register()
+
+ self.title = f'{self.name} [{self.shorthash}]'
+
+ return self.shorthash
+
+
try:
# this silences the annoying "Some weights of the model checkpoint were not used when initializing..." message at start.
@@ -42,61 +87,50 @@ def setup_model():
enable_midas_autodownload()
-def checkpoint_tiles():
- convert = lambda name: int(name) if name.isdigit() else name.lower()
- alphanumeric_key = lambda key: [convert(c) for c in re.split('([0-9]+)', key)]
- return sorted([x.title for x in checkpoints_list.values()], key = alphanumeric_key)
-
-
-def list_models():
- checkpoints_list.clear()
- model_list = modelloader.load_models(model_path=model_path, command_path=shared.cmd_opts.ckpt_dir, ext_filter=[".ckpt", ".safetensors"])
-
- def modeltitle(path, shorthash):
- abspath = os.path.abspath(path)
+def checkpoint_tiles():
+ def convert(name):
+ return int(name) if name.isdigit() else name.lower()
- if shared.cmd_opts.ckpt_dir is not None and abspath.startswith(shared.cmd_opts.ckpt_dir):
- name = abspath.replace(shared.cmd_opts.ckpt_dir, '')
- elif abspath.startswith(model_path):
- name = abspath.replace(model_path, '')
- else:
- name = os.path.basename(path)
+ def alphanumeric_key(key):
+ return [convert(c) for c in re.split('([0-9]+)', key)]
- if name.startswith("\\") or name.startswith("/"):
- name = name[1:]
+ return sorted([x.title for x in checkpoints_list.values()], key=alphanumeric_key)
- shortname = os.path.splitext(name.replace("/", "_").replace("\\", "_"))[0]
- return f'{name} [{shorthash}]', shortname
+def list_models():
+ checkpoints_list.clear()
+ checkpoint_alisases.clear()
+ model_list = modelloader.load_models(model_path=model_path, command_path=shared.cmd_opts.ckpt_dir, ext_filter=[".ckpt", ".safetensors"], ext_blacklist=[".vae.safetensors"])
cmd_ckpt = shared.cmd_opts.ckpt
if os.path.exists(cmd_ckpt):
- h = model_hash(cmd_ckpt)
- title, short_model_name = modeltitle(cmd_ckpt, h)
- checkpoints_list[title] = CheckpointInfo(cmd_ckpt, title, h, short_model_name, shared.cmd_opts.config)
- shared.opts.data['sd_model_checkpoint'] = title
+ checkpoint_info = CheckpointInfo(cmd_ckpt)
+ checkpoint_info.register()
+
+ shared.opts.data['sd_model_checkpoint'] = checkpoint_info.title
elif cmd_ckpt is not None and cmd_ckpt != shared.default_sd_model_file:
print(f"Checkpoint in --ckpt argument not found (Possible it was moved to {model_path}: {cmd_ckpt}", file=sys.stderr)
+
for filename in model_list:
- h = model_hash(filename)
- title, short_model_name = modeltitle(filename, h)
+ checkpoint_info = CheckpointInfo(filename)
+ checkpoint_info.register()
- basename, _ = os.path.splitext(filename)
- config = basename + ".yaml"
- if not os.path.exists(config):
- config = shared.cmd_opts.config
- checkpoints_list[title] = CheckpointInfo(filename, title, h, short_model_name, config)
+def get_closet_checkpoint_match(search_string):
+ checkpoint_info = checkpoint_alisases.get(search_string, None)
+ if checkpoint_info is not None:
+ return checkpoint_info
+ found = sorted([info for info in checkpoints_list.values() if search_string in info.title], key=lambda x: len(x.title))
+ if found:
+ return found[0]
-def get_closet_checkpoint_match(searchString):
- applicable = sorted([info for info in checkpoints_list.values() if searchString in info.title], key = lambda x:len(x.title))
- if len(applicable) > 0:
- return applicable[0]
return None
def model_hash(filename):
+ """old hash that only looks at a small part of the file and is prone to collisions"""
+
try:
with open(filename, "rb") as file:
import hashlib
@@ -112,7 +146,7 @@ def model_hash(filename):
def select_checkpoint():
model_checkpoint = shared.opts.sd_model_checkpoint
- checkpoint_info = checkpoints_list.get(model_checkpoint, None)
+ checkpoint_info = checkpoint_alisases.get(model_checkpoint, None)
if checkpoint_info is not None:
return checkpoint_info
@@ -168,7 +202,8 @@ def get_state_dict_from_checkpoint(pl_sd):
def read_state_dict(checkpoint_file, print_global_state=False, map_location=None):
_, extension = os.path.splitext(checkpoint_file)
if extension.lower() == ".safetensors":
- pl_sd = safetensors.torch.load_file(checkpoint_file, device=map_location or shared.weight_load_location)
+ device = map_location or shared.weight_load_location or devices.get_optimal_device_name()
+ pl_sd = safetensors.torch.load_file(checkpoint_file, device=device)
else:
pl_sd = torch.load(checkpoint_file, map_location=map_location or shared.weight_load_location)
@@ -179,59 +214,87 @@ def read_state_dict(checkpoint_file, print_global_state=False, map_location=None
return sd
-def load_model_weights(model, checkpoint_info, vae_file="auto"):
- checkpoint_file = checkpoint_info.filename
- sd_model_hash = checkpoint_info.hash
-
- cache_enabled = shared.opts.sd_checkpoint_cache > 0
+def get_checkpoint_state_dict(checkpoint_info: CheckpointInfo, timer):
+ sd_model_hash = checkpoint_info.calculate_shorthash()
+ timer.record("calculate hash")
- if cache_enabled and checkpoint_info in checkpoints_loaded:
+ if checkpoint_info in checkpoints_loaded:
# use checkpoint cache
print(f"Loading weights [{sd_model_hash}] from cache")
- model.load_state_dict(checkpoints_loaded[checkpoint_info])
- else:
- # load from file
- print(f"Loading weights [{sd_model_hash}] from {checkpoint_file}")
+ return checkpoints_loaded[checkpoint_info]
- sd = read_state_dict(checkpoint_file)
- model.load_state_dict(sd, strict=False)
- del sd
-
- if cache_enabled:
- # cache newly loaded model
- checkpoints_loaded[checkpoint_info] = model.state_dict().copy()
+ print(f"Loading weights [{sd_model_hash}] from {checkpoint_info.filename}")
+ res = read_state_dict(checkpoint_info.filename)
+ timer.record("load weights from disk")
+
+ return res
+
+
+def load_model_weights(model, checkpoint_info: CheckpointInfo, state_dict, timer):
+ title = checkpoint_info.title
+ sd_model_hash = checkpoint_info.calculate_shorthash()
+ timer.record("calculate hash")
+
+ if checkpoint_info.title != title:
+ shared.opts.data["sd_model_checkpoint"] = checkpoint_info.title
+
+ if state_dict is None:
+ state_dict = get_checkpoint_state_dict(checkpoint_info, timer)
+
+ model.load_state_dict(state_dict, strict=False)
+ del state_dict
+ timer.record("apply weights to model")
+
+ if shared.opts.sd_checkpoint_cache > 0:
+ # cache newly loaded model
+ checkpoints_loaded[checkpoint_info] = model.state_dict().copy()
- if shared.cmd_opts.opt_channelslast:
- model.to(memory_format=torch.channels_last)
+ if shared.cmd_opts.opt_channelslast:
+ model.to(memory_format=torch.channels_last)
+ timer.record("apply channels_last")
- if not shared.cmd_opts.no_half:
- vae = model.first_stage_model
+ if not shared.cmd_opts.no_half:
+ vae = model.first_stage_model
+ depth_model = getattr(model, 'depth_model', None)
- # with --no-half-vae, remove VAE from model when doing half() to prevent its weights from being converted to float16
- if shared.cmd_opts.no_half_vae:
- model.first_stage_model = None
+ # with --no-half-vae, remove VAE from model when doing half() to prevent its weights from being converted to float16
+ if shared.cmd_opts.no_half_vae:
+ model.first_stage_model = None
+ # with --upcast-sampling, don't convert the depth model weights to float16
+ if shared.cmd_opts.upcast_sampling and depth_model:
+ model.depth_model = None
- model.half()
- model.first_stage_model = vae
+ model.half()
+ model.first_stage_model = vae
+ if depth_model:
+ model.depth_model = depth_model
- devices.dtype = torch.float32 if shared.cmd_opts.no_half else torch.float16
- devices.dtype_vae = torch.float32 if shared.cmd_opts.no_half or shared.cmd_opts.no_half_vae else torch.float16
+ timer.record("apply half()")
- model.first_stage_model.to(devices.dtype_vae)
+ devices.dtype = torch.float32 if shared.cmd_opts.no_half else torch.float16
+ devices.dtype_vae = torch.float32 if shared.cmd_opts.no_half or shared.cmd_opts.no_half_vae else torch.float16
+ devices.dtype_unet = model.model.diffusion_model.dtype
+ devices.unet_needs_upcast = shared.cmd_opts.upcast_sampling and devices.dtype == torch.float16 and devices.dtype_unet == torch.float16
+
+ model.first_stage_model.to(devices.dtype_vae)
+ timer.record("apply dtype to VAE")
# clean up cache if limit is reached
- if cache_enabled:
- while len(checkpoints_loaded) > shared.opts.sd_checkpoint_cache + 1: # we need to count the current model
- checkpoints_loaded.popitem(last=False) # LRU
+ while len(checkpoints_loaded) > shared.opts.sd_checkpoint_cache:
+ checkpoints_loaded.popitem(last=False)
model.sd_model_hash = sd_model_hash
- model.sd_model_checkpoint = checkpoint_file
+ model.sd_model_checkpoint = checkpoint_info.filename
model.sd_checkpoint_info = checkpoint_info
+ shared.opts.data["sd_checkpoint_hash"] = checkpoint_info.sha256
+
+ model.logvar = model.logvar.to(devices.device) # fix for training
sd_vae.delete_base_vae()
sd_vae.clear_loaded_vae()
- vae_file = sd_vae.resolve_vae(checkpoint_file, vae_file=vae_file)
- sd_vae.load_vae(model, vae_file)
+ vae_file, vae_source = sd_vae.resolve_vae(checkpoint_info.filename)
+ sd_vae.load_vae(model, vae_file, vae_source)
+ timer.record("load VAE")
def enable_midas_autodownload():
@@ -244,7 +307,7 @@ def enable_midas_autodownload():
location automatically.
"""
- midas_path = os.path.join(models_path, 'midas')
+ midas_path = os.path.join(paths.models_path, 'midas')
# stable-diffusion-stability-ai hard-codes the midas model path to
# a location that differs from where other scripts using this model look.
@@ -276,88 +339,144 @@ def enable_midas_autodownload():
midas.api.load_model = load_model_wrapper
-def load_model(checkpoint_info=None):
+
+def repair_config(sd_config):
+
+ if not hasattr(sd_config.model.params, "use_ema"):
+ sd_config.model.params.use_ema = False
+
+ if shared.cmd_opts.no_half:
+ sd_config.model.params.unet_config.params.use_fp16 = False
+ elif shared.cmd_opts.upcast_sampling:
+ sd_config.model.params.unet_config.params.use_fp16 = True
+
+
+def load_model(checkpoint_info=None, already_loaded_state_dict=None, time_taken_to_load_state_dict=None):
from modules import lowvram, sd_hijack
checkpoint_info = checkpoint_info or select_checkpoint()
- if checkpoint_info.config != shared.cmd_opts.config:
- print(f"Loading config from: {checkpoint_info.config}")
-
if shared.sd_model:
sd_hijack.model_hijack.undo_hijack(shared.sd_model)
shared.sd_model = None
gc.collect()
devices.torch_gc()
- sd_config = OmegaConf.load(checkpoint_info.config)
-
- if should_hijack_inpainting(checkpoint_info):
- # Hardcoded config for now...
- sd_config.model.target = "ldm.models.diffusion.ddpm.LatentInpaintDiffusion"
- sd_config.model.params.conditioning_key = "hybrid"
- sd_config.model.params.unet_config.params.in_channels = 9
- sd_config.model.params.finetune_keys = None
+ do_inpainting_hijack()
- # Create a "fake" config with a different name so that we know to unload it when switching models.
- checkpoint_info = checkpoint_info._replace(config=checkpoint_info.config.replace(".yaml", "-inpainting.yaml"))
+ timer = Timer()
- if not hasattr(sd_config.model.params, "use_ema"):
- sd_config.model.params.use_ema = False
+ if already_loaded_state_dict is not None:
+ state_dict = already_loaded_state_dict
+ else:
+ state_dict = get_checkpoint_state_dict(checkpoint_info, timer)
- do_inpainting_hijack()
+ checkpoint_config = sd_models_config.find_checkpoint_config(state_dict, checkpoint_info)
- if shared.cmd_opts.no_half:
- sd_config.model.params.unet_config.params.use_fp16 = False
+ timer.record("find config")
+
+ sd_config = OmegaConf.load(checkpoint_config)
+ repair_config(sd_config)
+
+ timer.record("load config")
+
+ print(f"Creating model from config: {checkpoint_config}")
+
+ sd_model = None
+ try:
+ with sd_disable_initialization.DisableInitialization():
+ sd_model = instantiate_from_config(sd_config.model)
+ except Exception as e:
+ pass
- sd_model = instantiate_from_config(sd_config.model)
- load_model_weights(sd_model, checkpoint_info)
+ if sd_model is None:
+ print('Failed to create model quickly; will retry using slow method.', file=sys.stderr)
+ sd_model = instantiate_from_config(sd_config.model)
+
+ sd_model.used_config = checkpoint_config
+
+ timer.record("create model")
+
+ load_model_weights(sd_model, checkpoint_info, state_dict, timer)
if shared.cmd_opts.lowvram or shared.cmd_opts.medvram:
lowvram.setup_for_low_vram(sd_model, shared.cmd_opts.medvram)
else:
sd_model.to(shared.device)
+ timer.record("move model to device")
+
sd_hijack.model_hijack.hijack(sd_model)
+ timer.record("hijack")
+
sd_model.eval()
shared.sd_model = sd_model
+ sd_hijack.model_hijack.embedding_db.load_textual_inversion_embeddings(force_reload=True) # Reload embeddings after model load as they may or may not fit the model
+
+ timer.record("load textual inversion embeddings")
+
script_callbacks.model_loaded_callback(sd_model)
- print("Model loaded.")
+ timer.record("scripts callbacks")
+
+ print(f"Model loaded in {timer.summary()}.")
+
return sd_model
def reload_model_weights(sd_model=None, info=None):
from modules import lowvram, devices, sd_hijack
checkpoint_info = info or select_checkpoint()
-
+
if not sd_model:
sd_model = shared.sd_model
- if sd_model.sd_model_checkpoint == checkpoint_info.filename:
- return
+ if sd_model is None: # previous model load failed
+ current_checkpoint_info = None
+ else:
+ current_checkpoint_info = sd_model.sd_checkpoint_info
+ if sd_model.sd_model_checkpoint == checkpoint_info.filename:
+ return
+
+ if shared.cmd_opts.lowvram or shared.cmd_opts.medvram:
+ lowvram.send_everything_to_cpu()
+ else:
+ sd_model.to(devices.cpu)
+
+ sd_hijack.model_hijack.undo_hijack(sd_model)
+
+ timer = Timer()
+
+ state_dict = get_checkpoint_state_dict(checkpoint_info, timer)
+
+ checkpoint_config = sd_models_config.find_checkpoint_config(state_dict, checkpoint_info)
+
+ timer.record("find config")
- if sd_model.sd_checkpoint_info.config != checkpoint_info.config or should_hijack_inpainting(checkpoint_info) != should_hijack_inpainting(sd_model.sd_checkpoint_info):
+ if sd_model is None or checkpoint_config != sd_model.used_config:
del sd_model
checkpoints_loaded.clear()
- load_model(checkpoint_info)
+ load_model(checkpoint_info, already_loaded_state_dict=state_dict, time_taken_to_load_state_dict=timer.records["load weights from disk"])
return shared.sd_model
- if shared.cmd_opts.lowvram or shared.cmd_opts.medvram:
- lowvram.send_everything_to_cpu()
- else:
- sd_model.to(devices.cpu)
-
- sd_hijack.model_hijack.undo_hijack(sd_model)
+ try:
+ load_model_weights(sd_model, checkpoint_info, state_dict, timer)
+ except Exception as e:
+ print("Failed to load checkpoint, restoring previous")
+ load_model_weights(sd_model, current_checkpoint_info, None, timer)
+ raise
+ finally:
+ sd_hijack.model_hijack.hijack(sd_model)
+ timer.record("hijack")
- load_model_weights(sd_model, checkpoint_info)
+ script_callbacks.model_loaded_callback(sd_model)
+ timer.record("script callbacks")
- sd_hijack.model_hijack.hijack(sd_model)
- script_callbacks.model_loaded_callback(sd_model)
+ if not shared.cmd_opts.lowvram and not shared.cmd_opts.medvram:
+ sd_model.to(devices.device)
+ timer.record("move model to device")
- if not shared.cmd_opts.lowvram and not shared.cmd_opts.medvram:
- sd_model.to(devices.device)
+ print(f"Weights loaded in {timer.summary()}.")
- print("Weights loaded.")
return sd_model
diff --git a/modules/sd_models_config.py b/modules/sd_models_config.py
new file mode 100644
index 00000000..00217990
--- /dev/null
+++ b/modules/sd_models_config.py
@@ -0,0 +1,71 @@
+import re
+import os
+
+from modules import shared, paths
+
+sd_configs_path = shared.sd_configs_path
+sd_repo_configs_path = os.path.join(paths.paths['Stable Diffusion'], "configs", "stable-diffusion")
+
+
+config_default = shared.sd_default_config
+config_sd2 = os.path.join(sd_repo_configs_path, "v2-inference.yaml")
+config_sd2v = os.path.join(sd_repo_configs_path, "v2-inference-v.yaml")
+config_sd2_inpainting = os.path.join(sd_repo_configs_path, "v2-inpainting-inference.yaml")
+config_depth_model = os.path.join(sd_repo_configs_path, "v2-midas-inference.yaml")
+config_inpainting = os.path.join(sd_configs_path, "v1-inpainting-inference.yaml")
+config_instruct_pix2pix = os.path.join(sd_configs_path, "instruct-pix2pix.yaml")
+config_alt_diffusion = os.path.join(sd_configs_path, "alt-diffusion-inference.yaml")
+
+re_parametrization_v = re.compile(r'-v\b')
+
+
+def guess_model_config_from_state_dict(sd, filename):
+ fn = os.path.basename(filename)
+
+ sd2_cond_proj_weight = sd.get('cond_stage_model.model.transformer.resblocks.0.attn.in_proj_weight', None)
+ diffusion_model_input = sd.get('model.diffusion_model.input_blocks.0.0.weight', None)
+
+ if sd.get('depth_model.model.pretrained.act_postprocess3.0.project.0.bias', None) is not None:
+ return config_depth_model
+
+ if sd2_cond_proj_weight is not None and sd2_cond_proj_weight.shape[1] == 1024:
+ if diffusion_model_input.shape[1] == 9:
+ return config_sd2_inpainting
+ elif re.search(re_parametrization_v, fn):
+ return config_sd2v
+ else:
+ return config_sd2
+
+ if diffusion_model_input is not None:
+ if diffusion_model_input.shape[1] == 9:
+ return config_inpainting
+ if diffusion_model_input.shape[1] == 8:
+ return config_instruct_pix2pix
+
+ if sd.get('cond_stage_model.roberta.embeddings.word_embeddings.weight', None) is not None:
+ return config_alt_diffusion
+
+ return config_default
+
+
+def find_checkpoint_config(state_dict, info):
+ if info is None:
+ return guess_model_config_from_state_dict(state_dict, "")
+
+ config = find_checkpoint_config_near_filename(info)
+ if config is not None:
+ return config
+
+ return guess_model_config_from_state_dict(state_dict, info.filename)
+
+
+def find_checkpoint_config_near_filename(info):
+ if info is None:
+ return None
+
+ config = os.path.splitext(info.filename)[0] + ".yaml"
+ if os.path.exists(config):
+ return config
+
+ return None
+
diff --git a/modules/sd_samplers.py b/modules/sd_samplers.py
index 177b5338..a7910b56 100644
--- a/modules/sd_samplers.py
+++ b/modules/sd_samplers.py
@@ -97,8 +97,9 @@ sampler_extra_params = {
def setup_img2img_steps(p, steps=None):
if opts.img2img_fix_steps or steps is not None:
- steps = int((steps or p.steps) / min(p.denoising_strength, 0.999)) if p.denoising_strength > 0 else 0
- t_enc = p.steps - 1
+ requested_steps = (steps or p.steps)
+ steps = int(requested_steps / min(p.denoising_strength, 0.999)) if p.denoising_strength > 0 else 0
+ t_enc = requested_steps - 1
else:
steps = p.steps
t_enc = int(min(p.denoising_strength, 0.999) * steps)
@@ -137,9 +138,9 @@ def samples_to_image_grid(samples, approximation=None):
def store_latent(decoded):
state.current_latent = decoded
- if opts.show_progress_every_n_steps > 0 and shared.state.sampling_step % opts.show_progress_every_n_steps == 0:
+ if opts.live_previews_enable and opts.show_progress_every_n_steps > 0 and shared.state.sampling_step % opts.show_progress_every_n_steps == 0:
if not shared.parallel_processing_allowed:
- shared.state.current_image = sample_to_image(decoded)
+ shared.state.assign_current_image(sample_to_image(decoded))
class InterruptedException(BaseException):
@@ -242,7 +243,7 @@ class VanillaStableDiffusionSampler:
self.nmask = p.nmask if hasattr(p, 'nmask') else None
def adjust_steps_if_invalid(self, p, num_steps):
- if (self.config.name == 'DDIM' and p.ddim_discretize == 'uniform') or (self.config.name == 'PLMS'):
+ if (self.config.name == 'DDIM' and p.ddim_discretize == 'uniform') or (self.config.name == 'PLMS'):
valid_step = 999 / (1000 // num_steps)
if valid_step == floor(valid_step):
return int(valid_step) + 1
@@ -265,8 +266,7 @@ class VanillaStableDiffusionSampler:
if image_conditioning is not None:
conditioning = {"c_concat": [image_conditioning], "c_crossattn": [conditioning]}
unconditional_conditioning = {"c_concat": [image_conditioning], "c_crossattn": [unconditional_conditioning]}
-
-
+
samples = self.launch_sampling(t_enc + 1, lambda: self.sampler.decode(x1, conditioning, t_enc, unconditional_guidance_scale=p.cfg_scale, unconditional_conditioning=unconditional_conditioning))
return samples
@@ -351,6 +351,13 @@ class CFGDenoiser(torch.nn.Module):
x_out[-uncond.shape[0]:] = self.inner_model(x_in[-uncond.shape[0]:], sigma_in[-uncond.shape[0]:], cond={"c_crossattn": [uncond], "c_concat": [image_cond_in[-uncond.shape[0]:]]})
+ devices.test_for_nans(x_out, "unet")
+
+ if opts.live_preview_content == "Prompt":
+ store_latent(x_out[0:uncond.shape[0]])
+ elif opts.live_preview_content == "Negative prompt":
+ store_latent(x_out[-uncond.shape[0]:])
+
denoised = self.combine_denoised(x_out, conds_list, uncond, cond_scale)
if self.mask is not None:
@@ -422,7 +429,8 @@ class KDiffusionSampler:
def callback_state(self, d):
step = d['i']
latent = d["denoised"]
- store_latent(latent)
+ if opts.live_preview_content == "Combined":
+ store_latent(latent)
self.last_latent = latent
if self.stop_at is not None and step > self.stop_at:
@@ -446,7 +454,7 @@ class KDiffusionSampler:
def initialize(self, p):
self.model_wrap_cfg.mask = p.mask if hasattr(p, 'mask') else None
self.model_wrap_cfg.nmask = p.nmask if hasattr(p, 'nmask') else None
- self.model_wrap.step = 0
+ self.model_wrap_cfg.step = 0
self.eta = p.eta or opts.eta_ancestral
k_diffusion.sampling.torch = TorchHijack(self.sampler_noises if self.sampler_noises is not None else [])
@@ -462,14 +470,23 @@ class KDiffusionSampler:
return extra_params_kwargs
def get_sigmas(self, p, steps):
+ discard_next_to_last_sigma = self.config is not None and self.config.options.get('discard_next_to_last_sigma', False)
+ if opts.always_discard_next_to_last_sigma and not discard_next_to_last_sigma:
+ discard_next_to_last_sigma = True
+ p.extra_generation_params["Discard penultimate sigma"] = True
+
+ steps += 1 if discard_next_to_last_sigma else 0
+
if p.sampler_noise_scheduler_override:
sigmas = p.sampler_noise_scheduler_override(steps)
elif self.config is not None and self.config.options.get('scheduler', None) == 'karras':
- sigmas = k_diffusion.sampling.get_sigmas_karras(n=steps, sigma_min=0.1, sigma_max=10, device=shared.device)
+ sigma_min, sigma_max = (0.1, 10) if opts.use_old_karras_scheduler_sigmas else (self.model_wrap.sigmas[0].item(), self.model_wrap.sigmas[-1].item())
+
+ sigmas = k_diffusion.sampling.get_sigmas_karras(n=steps, sigma_min=sigma_min, sigma_max=sigma_max, device=shared.device)
else:
sigmas = self.model_wrap.get_sigmas(steps)
- if self.config is not None and self.config.options.get('discard_next_to_last_sigma', False):
+ if discard_next_to_last_sigma:
sigmas = torch.cat([sigmas[:-2], sigmas[-1:]])
return sigmas
diff --git a/modules/sd_vae.py b/modules/sd_vae.py
index 3856418e..9b00f76e 100644
--- a/modules/sd_vae.py
+++ b/modules/sd_vae.py
@@ -1,35 +1,23 @@
import torch
+import safetensors.torch
import os
+import collections
from collections import namedtuple
-from modules import shared, devices, script_callbacks
-from modules.paths import models_path
+from modules import paths, shared, devices, script_callbacks, sd_models
import glob
from copy import deepcopy
-model_dir = "Stable-diffusion"
-model_path = os.path.abspath(os.path.join(models_path, model_dir))
-vae_dir = "VAE"
-vae_path = os.path.abspath(os.path.join(models_path, vae_dir))
-
-
+vae_path = os.path.abspath(os.path.join(paths.models_path, "VAE"))
vae_ignore_keys = {"model_ema.decay", "model_ema.num_updates"}
-
-
-default_vae_dict = {"auto": "auto", "None": None, None: None}
-default_vae_list = ["auto", "None"]
-
-
-default_vae_values = [default_vae_dict[x] for x in default_vae_list]
-vae_dict = dict(default_vae_dict)
-vae_list = list(default_vae_list)
-first_load = True
+vae_dict = {}
base_vae = None
loaded_vae_file = None
checkpoint_info = None
+checkpoints_loaded = collections.OrderedDict()
def get_base_vae(model):
if base_vae is not None and checkpoint_info == model.sd_checkpoint_info and model:
@@ -61,126 +49,138 @@ def restore_base_vae(model):
def get_filename(filepath):
- return os.path.splitext(os.path.basename(filepath))[0]
+ return os.path.basename(filepath)
-def refresh_vae_list(vae_path=vae_path, model_path=model_path):
- global vae_dict, vae_list
- res = {}
- candidates = [
- *glob.iglob(os.path.join(model_path, '**/*.vae.ckpt'), recursive=True),
- *glob.iglob(os.path.join(model_path, '**/*.vae.pt'), recursive=True),
- *glob.iglob(os.path.join(vae_path, '**/*.ckpt'), recursive=True),
- *glob.iglob(os.path.join(vae_path, '**/*.pt'), recursive=True)
+def refresh_vae_list():
+ vae_dict.clear()
+
+ paths = [
+ os.path.join(sd_models.model_path, '**/*.vae.ckpt'),
+ os.path.join(sd_models.model_path, '**/*.vae.pt'),
+ os.path.join(sd_models.model_path, '**/*.vae.safetensors'),
+ os.path.join(vae_path, '**/*.ckpt'),
+ os.path.join(vae_path, '**/*.pt'),
+ os.path.join(vae_path, '**/*.safetensors'),
]
- if shared.cmd_opts.vae_path is not None and os.path.isfile(shared.cmd_opts.vae_path):
- candidates.append(shared.cmd_opts.vae_path)
+
+ if shared.cmd_opts.ckpt_dir is not None and os.path.isdir(shared.cmd_opts.ckpt_dir):
+ paths += [
+ os.path.join(shared.cmd_opts.ckpt_dir, '**/*.vae.ckpt'),
+ os.path.join(shared.cmd_opts.ckpt_dir, '**/*.vae.pt'),
+ os.path.join(shared.cmd_opts.ckpt_dir, '**/*.vae.safetensors'),
+ ]
+
+ if shared.cmd_opts.vae_dir is not None and os.path.isdir(shared.cmd_opts.vae_dir):
+ paths += [
+ os.path.join(shared.cmd_opts.vae_dir, '**/*.ckpt'),
+ os.path.join(shared.cmd_opts.vae_dir, '**/*.pt'),
+ os.path.join(shared.cmd_opts.vae_dir, '**/*.safetensors'),
+ ]
+
+ candidates = []
+ for path in paths:
+ candidates += glob.iglob(path, recursive=True)
+
for filepath in candidates:
name = get_filename(filepath)
- res[name] = filepath
- vae_list.clear()
- vae_list.extend(default_vae_list)
- vae_list.extend(list(res.keys()))
- vae_dict.clear()
- vae_dict.update(res)
- vae_dict.update(default_vae_dict)
- return vae_list
-
-
-def get_vae_from_settings(vae_file="auto"):
- # else, we load from settings, if not set to be default
- if vae_file == "auto" and shared.opts.sd_vae is not None:
- # if saved VAE settings isn't recognized, fallback to auto
- vae_file = vae_dict.get(shared.opts.sd_vae, "auto")
- # if VAE selected but not found, fallback to auto
- if vae_file not in default_vae_values and not os.path.isfile(vae_file):
- vae_file = "auto"
- print(f"Selected VAE doesn't exist: {vae_file}")
- return vae_file
-
-
-def resolve_vae(checkpoint_file=None, vae_file="auto"):
- global first_load, vae_dict, vae_list
-
- # if vae_file argument is provided, it takes priority, but not saved
- if vae_file and vae_file not in default_vae_list:
- if not os.path.isfile(vae_file):
- print(f"VAE provided as function argument doesn't exist: {vae_file}")
- vae_file = "auto"
- # for the first load, if vae-path is provided, it takes priority, saved, and failure is reported
- if first_load and shared.cmd_opts.vae_path is not None:
- if os.path.isfile(shared.cmd_opts.vae_path):
- vae_file = shared.cmd_opts.vae_path
- shared.opts.data['sd_vae'] = get_filename(vae_file)
- else:
- print(f"VAE provided as command line argument doesn't exist: {vae_file}")
- # fallback to selector in settings, if vae selector not set to act as default fallback
- if not shared.opts.sd_vae_as_default:
- vae_file = get_vae_from_settings(vae_file)
- # vae-path cmd arg takes priority for auto
- if vae_file == "auto" and shared.cmd_opts.vae_path is not None:
- if os.path.isfile(shared.cmd_opts.vae_path):
- vae_file = shared.cmd_opts.vae_path
- print(f"Using VAE provided as command line argument: {vae_file}")
- # if still not found, try look for ".vae.pt" beside model
- model_path = os.path.splitext(checkpoint_file)[0]
- if vae_file == "auto":
- vae_file_try = model_path + ".vae.pt"
- if os.path.isfile(vae_file_try):
- vae_file = vae_file_try
- print(f"Using VAE found similar to selected model: {vae_file}")
- # if still not found, try look for ".vae.ckpt" beside model
- if vae_file == "auto":
- vae_file_try = model_path + ".vae.ckpt"
- if os.path.isfile(vae_file_try):
- vae_file = vae_file_try
- print(f"Using VAE found similar to selected model: {vae_file}")
- # No more fallbacks for auto
- if vae_file == "auto":
- vae_file = None
- # Last check, just because
- if vae_file and not os.path.exists(vae_file):
- vae_file = None
-
- return vae_file
-
-
-def load_vae(model, vae_file=None):
- global first_load, vae_dict, vae_list, loaded_vae_file
+ vae_dict[name] = filepath
+
+
+def find_vae_near_checkpoint(checkpoint_file):
+ checkpoint_path = os.path.splitext(checkpoint_file)[0]
+ for vae_location in [checkpoint_path + ".vae.pt", checkpoint_path + ".vae.ckpt", checkpoint_path + ".vae.safetensors"]:
+ if os.path.isfile(vae_location):
+ return vae_location
+
+ return None
+
+
+def resolve_vae(checkpoint_file):
+ if shared.cmd_opts.vae_path is not None:
+ return shared.cmd_opts.vae_path, 'from commandline argument'
+
+ is_automatic = shared.opts.sd_vae in {"Automatic", "auto"} # "auto" for people with old config
+
+ vae_near_checkpoint = find_vae_near_checkpoint(checkpoint_file)
+ if vae_near_checkpoint is not None and (shared.opts.sd_vae_as_default or is_automatic):
+ return vae_near_checkpoint, 'found near the checkpoint'
+
+ if shared.opts.sd_vae == "None":
+ return None, None
+
+ vae_from_options = vae_dict.get(shared.opts.sd_vae, None)
+ if vae_from_options is not None:
+ return vae_from_options, 'specified in settings'
+
+ if not is_automatic:
+ print(f"Couldn't find VAE named {shared.opts.sd_vae}; using None instead")
+
+ return None, None
+
+
+def load_vae_dict(filename, map_location):
+ vae_ckpt = sd_models.read_state_dict(filename, map_location=map_location)
+ vae_dict_1 = {k: v for k, v in vae_ckpt.items() if k[0:4] != "loss" and k not in vae_ignore_keys}
+ return vae_dict_1
+
+
+def load_vae(model, vae_file=None, vae_source="from unknown source"):
+ global vae_dict, loaded_vae_file
# save_settings = False
+ cache_enabled = shared.opts.sd_vae_checkpoint_cache > 0
+
if vae_file:
- assert os.path.isfile(vae_file), f"VAE file doesn't exist: {vae_file}"
- print(f"Loading VAE weights from: {vae_file}")
- store_base_vae(model)
- vae_ckpt = torch.load(vae_file, map_location=shared.weight_load_location)
- vae_dict_1 = {k: v for k, v in vae_ckpt["state_dict"].items() if k[0:4] != "loss" and k not in vae_ignore_keys}
- _load_vae_dict(model, vae_dict_1)
+ if cache_enabled and vae_file in checkpoints_loaded:
+ # use vae checkpoint cache
+ print(f"Loading VAE weights {vae_source}: cached {get_filename(vae_file)}")
+ store_base_vae(model)
+ _load_vae_dict(model, checkpoints_loaded[vae_file])
+ else:
+ assert os.path.isfile(vae_file), f"VAE {vae_source} doesn't exist: {vae_file}"
+ print(f"Loading VAE weights {vae_source}: {vae_file}")
+ store_base_vae(model)
+
+ vae_dict_1 = load_vae_dict(vae_file, map_location=shared.weight_load_location)
+ _load_vae_dict(model, vae_dict_1)
+
+ if cache_enabled:
+ # cache newly loaded vae
+ checkpoints_loaded[vae_file] = vae_dict_1.copy()
+
+ # clean up cache if limit is reached
+ if cache_enabled:
+ while len(checkpoints_loaded) > shared.opts.sd_vae_checkpoint_cache + 1: # we need to count the current model
+ checkpoints_loaded.popitem(last=False) # LRU
# If vae used is not in dict, update it
# It will be removed on refresh though
vae_opt = get_filename(vae_file)
if vae_opt not in vae_dict:
vae_dict[vae_opt] = vae_file
- vae_list.append(vae_opt)
+
elif loaded_vae_file:
restore_base_vae(model)
loaded_vae_file = vae_file
- first_load = False
-
# don't call this from outside
def _load_vae_dict(model, vae_dict_1):
model.first_stage_model.load_state_dict(vae_dict_1)
model.first_stage_model.to(devices.dtype_vae)
+
def clear_loaded_vae():
global loaded_vae_file
loaded_vae_file = None
-def reload_vae_weights(sd_model=None, vae_file="auto"):
+
+unspecified = object()
+
+
+def reload_vae_weights(sd_model=None, vae_file=unspecified):
from modules import lowvram, devices, sd_hijack
if not sd_model:
@@ -188,7 +188,11 @@ def reload_vae_weights(sd_model=None, vae_file="auto"):
checkpoint_info = sd_model.sd_checkpoint_info
checkpoint_file = checkpoint_info.filename
- vae_file = resolve_vae(checkpoint_file, vae_file=vae_file)
+
+ if vae_file == unspecified:
+ vae_file, vae_source = resolve_vae(checkpoint_file)
+ else:
+ vae_source = "from function argument"
if loaded_vae_file == vae_file:
return
@@ -200,7 +204,7 @@ def reload_vae_weights(sd_model=None, vae_file="auto"):
sd_hijack.model_hijack.undo_hijack(sd_model)
- load_vae(sd_model, vae_file)
+ load_vae(sd_model, vae_file, vae_source)
sd_hijack.model_hijack.hijack(sd_model)
script_callbacks.model_loaded_callback(sd_model)
@@ -208,5 +212,5 @@ def reload_vae_weights(sd_model=None, vae_file="auto"):
if not shared.cmd_opts.lowvram and not shared.cmd_opts.medvram:
sd_model.to(devices.device)
- print("VAE Weights loaded.")
+ print("VAE weights loaded.")
return sd_model
diff --git a/modules/sd_vae_approx.py b/modules/sd_vae_approx.py
index 0a58542d..0027343a 100644
--- a/modules/sd_vae_approx.py
+++ b/modules/sd_vae_approx.py
@@ -36,7 +36,7 @@ def model():
if sd_vae_approx_model is None:
sd_vae_approx_model = VAEApprox()
- sd_vae_approx_model.load_state_dict(torch.load(os.path.join(paths.models_path, "VAE-approx", "model.pt")))
+ sd_vae_approx_model.load_state_dict(torch.load(os.path.join(paths.models_path, "VAE-approx", "model.pt"), map_location='cpu' if devices.device.type != 'cuda' else None))
sd_vae_approx_model.eval()
sd_vae_approx_model.to(devices.device, devices.dtype)
diff --git a/modules/shared.py b/modules/shared.py
index d4ddeea0..474fcc42 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -9,30 +9,35 @@ from PIL import Image
import gradio as gr
import tqdm
-import modules.artists
import modules.interrogate
import modules.memmon
import modules.styles
import modules.devices as devices
-from modules import localization, sd_vae, extensions, script_loading
-from modules.paths import models_path, script_path, sd_path
+from modules import localization, extensions, script_loading, errors, ui_components, shared_items
+from modules.paths import models_path, script_path, data_path
demo = None
+sd_configs_path = os.path.join(script_path, "configs")
+sd_default_config = os.path.join(sd_configs_path, "v1-inference.yaml")
sd_model_file = os.path.join(script_path, 'model.ckpt')
default_sd_model_file = sd_model_file
+
parser = argparse.ArgumentParser()
-parser.add_argument("--config", type=str, default=os.path.join(script_path, "v1-inference.yaml"), help="path to config which constructs model",)
+parser.add_argument("--data-dir", type=str, default=os.path.dirname(os.path.dirname(os.path.realpath(__file__))), help="base path where all user data is stored",)
+parser.add_argument("--config", type=str, default=sd_default_config, help="path to config which constructs model",)
parser.add_argument("--ckpt", type=str, default=sd_model_file, help="path to checkpoint of stable diffusion model; if specified, this checkpoint will be added to the list of checkpoints and loaded",)
parser.add_argument("--ckpt-dir", type=str, default=None, help="Path to directory with stable diffusion checkpoints")
+parser.add_argument("--vae-dir", type=str, default=None, help="Path to directory with VAE files")
parser.add_argument("--gfpgan-dir", type=str, help="GFPGAN directory", default=('./src/gfpgan' if os.path.exists('./src/gfpgan') else './GFPGAN'))
parser.add_argument("--gfpgan-model", type=str, help="GFPGAN model file name", default=None)
parser.add_argument("--no-half", action='store_true', help="do not switch the model to 16-bit floats")
parser.add_argument("--no-half-vae", action='store_true', help="do not switch the VAE model to 16-bit floats")
parser.add_argument("--no-progressbar-hiding", action='store_true', help="do not hide progressbar in gradio UI (we hide it because it slows down ML if you have hardware acceleration in browser)")
parser.add_argument("--max-batch-count", type=int, default=16, help="maximum batch count value for the UI")
-parser.add_argument("--embeddings-dir", type=str, default=os.path.join(script_path, 'embeddings'), help="embeddings directory for textual inversion (default: embeddings)")
+parser.add_argument("--embeddings-dir", type=str, default=os.path.join(data_path, 'embeddings'), help="embeddings directory for textual inversion (default: embeddings)")
+parser.add_argument("--textual-inversion-templates-dir", type=str, default=os.path.join(script_path, 'textual_inversion_templates'), help="directory with textual inversion templates")
parser.add_argument("--hypernetwork-dir", type=str, default=os.path.join(models_path, 'hypernetworks'), help="hypernetwork directory")
parser.add_argument("--localizations-dir", type=str, default=os.path.join(script_path, 'localizations'), help="localizations directory")
parser.add_argument("--allow-code", action='store_true', help="allow custom script execution from webui")
@@ -42,6 +47,7 @@ parser.add_argument("--lowram", action='store_true', help="load stable diffusion
parser.add_argument("--always-batch-cond-uncond", action='store_true', help="disables cond/uncond batching that is enabled to save memory with --medvram or --lowvram")
parser.add_argument("--unload-gfpgan", action='store_true', help="does not do anything.")
parser.add_argument("--precision", type=str, help="evaluate at this precision", choices=["full", "autocast"], default="autocast")
+parser.add_argument("--upcast-sampling", action='store_true', help="upcast sampling. No effect with --no-half. Usually produces similar results to --no-half with better performance while using less memory.")
parser.add_argument("--share", action='store_true', help="use share=True for gradio and make the UI accessible through their site")
parser.add_argument("--ngrok", type=str, help="ngrok authtoken, alternative to gradio --share", default=None)
parser.add_argument("--ngrok-region", type=str, help="The region in which ngrok should start.", default="us")
@@ -54,34 +60,41 @@ parser.add_argument("--realesrgan-models-path", type=str, help="Path to director
parser.add_argument("--clip-models-path", type=str, help="Path to directory with CLIP model file(s).", default=None)
parser.add_argument("--xformers", action='store_true', help="enable xformers for cross attention layers")
parser.add_argument("--force-enable-xformers", action='store_true', help="enable xformers for cross attention layers regardless of whether the checking code thinks you can run it; do not make bug reports if this fails to work")
+parser.add_argument("--xformers-flash-attention", action='store_true', help="enable xformers with Flash Attention to improve reproducibility (supported for SD2.x or variant only)")
parser.add_argument("--deepdanbooru", action='store_true', help="does not do anything")
parser.add_argument("--opt-split-attention", action='store_true', help="force-enables Doggettx's cross-attention layer optimization. By default, it's on for torch cuda.")
+parser.add_argument("--opt-sub-quad-attention", action='store_true', help="enable memory efficient sub-quadratic cross-attention layer optimization")
+parser.add_argument("--sub-quad-q-chunk-size", type=int, help="query chunk size for the sub-quadratic cross-attention layer optimization to use", default=1024)
+parser.add_argument("--sub-quad-kv-chunk-size", type=int, help="kv chunk size for the sub-quadratic cross-attention layer optimization to use", default=None)
+parser.add_argument("--sub-quad-chunk-threshold", type=int, help="the percentage of VRAM threshold for the sub-quadratic cross-attention layer optimization to use chunking", default=None)
parser.add_argument("--opt-split-attention-invokeai", action='store_true', help="force-enables InvokeAI's cross-attention layer optimization. By default, it's on when cuda is unavailable.")
parser.add_argument("--opt-split-attention-v1", action='store_true', help="enable older version of split attention optimization that does not consume all the VRAM it can find")
parser.add_argument("--disable-opt-split-attention", action='store_true', help="force-disables cross-attention layer optimization")
+parser.add_argument("--disable-nan-check", action='store_true', help="do not check if produced images/latent spaces have nans; useful for running without a checkpoint in CI")
parser.add_argument("--use-cpu", nargs='+', help="use CPU as torch device for specified modules", default=[], type=str.lower)
parser.add_argument("--listen", action='store_true', help="launch gradio with 0.0.0.0 as server name, allowing to respond to network requests")
parser.add_argument("--port", type=int, help="launch gradio with given server port, you need root/admin rights for ports < 1024, defaults to 7860 if available", default=None)
parser.add_argument("--show-negative-prompt", action='store_true', help="does not do anything", default=False)
-parser.add_argument("--ui-config-file", type=str, help="filename to use for ui configuration", default=os.path.join(script_path, 'ui-config.json'))
+parser.add_argument("--ui-config-file", type=str, help="filename to use for ui configuration", default=os.path.join(data_path, 'ui-config.json'))
parser.add_argument("--hide-ui-dir-config", action='store_true', help="hide directory configuration from webui", default=False)
parser.add_argument("--freeze-settings", action='store_true', help="disable editing settings", default=False)
-parser.add_argument("--ui-settings-file", type=str, help="filename to use for ui settings", default=os.path.join(script_path, 'config.json'))
+parser.add_argument("--ui-settings-file", type=str, help="filename to use for ui settings", default=os.path.join(data_path, 'config.json'))
parser.add_argument("--gradio-debug", action='store_true', help="launch gradio with --debug option")
parser.add_argument("--gradio-auth", type=str, help='set gradio authentication like "username:password"; or comma-delimit multiple like "u1:p1,u2:p2,u3:p3"', default=None)
-parser.add_argument("--gradio-img2img-tool", type=str, help='gradio image uploader tool: can be either editor for ctopping, or color-sketch for drawing', choices=["color-sketch", "editor"], default="editor")
-parser.add_argument("--gradio-inpaint-tool", type=str, choices=["sketch", "color-sketch"], default="sketch", help="gradio inpainting editor: can be either sketch to only blur/noise the input, or color-sketch to paint over it")
+parser.add_argument("--gradio-img2img-tool", type=str, help='does not do anything')
+parser.add_argument("--gradio-inpaint-tool", type=str, help="does not do anything")
parser.add_argument("--opt-channelslast", action='store_true', help="change memory type for stable diffusion to channels last")
-parser.add_argument("--styles-file", type=str, help="filename to use for styles", default=os.path.join(script_path, 'styles.csv'))
+parser.add_argument("--styles-file", type=str, help="filename to use for styles", default=os.path.join(data_path, 'styles.csv'))
parser.add_argument("--autolaunch", action='store_true', help="open the webui URL in the system's default browser upon launch", default=False)
parser.add_argument("--theme", type=str, help="launches the UI with light or dark theme", default=None)
parser.add_argument("--use-textbox-seed", action='store_true', help="use textbox for seeds in UI (no up/down, but possible to input long seeds)", default=False)
parser.add_argument("--disable-console-progressbars", action='store_true', help="do not output progressbars to console", default=False)
parser.add_argument("--enable-console-prompts", action='store_true', help="print prompts to console when generating with txt2img and img2img", default=False)
-parser.add_argument('--vae-path', type=str, help='Path to Variational Autoencoders model', default=None)
+parser.add_argument('--vae-path', type=str, help='Checkpoint to use as VAE; setting this argument disables all settings related to VAE', default=None)
parser.add_argument("--disable-safe-unpickle", action='store_true', help="disable checking pytorch models for malicious code", default=False)
parser.add_argument("--api", action='store_true', help="use api=True to launch the API together with the webui (use --nowebui instead for only the API)")
parser.add_argument("--api-auth", type=str, help='Set authentication for API like "username:password"; or comma-delimit multiple like "u1:p1,u2:p2,u3:p3"', default=None)
+parser.add_argument("--api-log", action='store_true', help="use api-log=True to enable logging of all API requests")
parser.add_argument("--nowebui", action='store_true', help="use api=True to launch the API instead of the webui")
parser.add_argument("--ui-debug-mode", action='store_true', help="Don't load model to quickly launch UI")
parser.add_argument("--device-id", type=str, help="Select the default CUDA device to use (export CUDA_VISIBLE_DEVICES=0,1,etc might be needed before)", default=None)
@@ -91,6 +104,8 @@ parser.add_argument("--cors-allow-origins-regex", type=str, help="Allowed CORS o
parser.add_argument("--tls-keyfile", type=str, help="Partially enables TLS, requires --tls-certfile to fully function", default=None)
parser.add_argument("--tls-certfile", type=str, help="Partially enables TLS, requires --tls-keyfile to fully function", default=None)
parser.add_argument("--server-name", type=str, help="Sets hostname of server", default=None)
+parser.add_argument("--gradio-queue", action='store_true', help="Uses gradio queue; experimental option; breaks restart UI button")
+
script_loading.preload_extensions(extensions.extensions_dir, parser)
script_loading.preload_extensions(extensions.extensions_builtin_dir, parser)
@@ -109,6 +124,18 @@ restricted_opts = {
"outdir_save",
}
+ui_reorder_categories = [
+ "inpaint",
+ "sampler",
+ "dimensions",
+ "cfg",
+ "seed",
+ "checkboxes",
+ "hires_fix",
+ "batch",
+ "scripts",
+]
+
cmd_opts.disable_extension_access = (cmd_opts.share or cmd_opts.listen or cmd_opts.server_name) and not cmd_opts.enable_insecure_extension_access
devices.device, devices.device_interrogate, devices.device_gfpgan, devices.device_esrgan, devices.device_codeformer = \
@@ -124,7 +151,7 @@ config_filename = cmd_opts.ui_settings_file
os.makedirs(cmd_opts.hypernetwork_dir, exist_ok=True)
hypernetworks = {}
-loaded_hypernetwork = None
+loaded_hypernetworks = []
def reload_hypernetworks():
@@ -132,7 +159,6 @@ def reload_hypernetworks():
global hypernetworks
hypernetworks = hypernetwork.list_hypernetworks(cmd_opts.hypernetwork_dir)
- hypernetwork.load_hypernetwork(opts.sd_hypernetwork)
class State:
@@ -141,15 +167,18 @@ class State:
job = ""
job_no = 0
job_count = 0
+ processing_has_refined_job_count = False
job_timestamp = '0'
sampling_step = 0
sampling_steps = 0
current_latent = None
current_image = None
current_image_sampling_step = 0
+ id_live_preview = 0
textinfo = None
time_start = None
need_restart = False
+ server_start = None
def skip(self):
self.skipped = True
@@ -158,7 +187,7 @@ class State:
self.interrupted = True
def nextjob(self):
- if opts.show_progress_every_n_steps == -1:
+ if opts.live_previews_enable and opts.show_progress_every_n_steps == -1:
self.do_set_current_image()
self.job_no += 1
@@ -168,9 +197,10 @@ class State:
def dict(self):
obj = {
"skipped": self.skipped,
- "interrupted": self.skipped,
+ "interrupted": self.interrupted,
"job": self.job,
"job_count": self.job_count,
+ "job_timestamp": self.job_timestamp,
"job_no": self.job_no,
"sampling_step": self.sampling_step,
"sampling_steps": self.sampling_steps,
@@ -181,11 +211,13 @@ class State:
def begin(self):
self.sampling_step = 0
self.job_count = -1
+ self.processing_has_refined_job_count = False
self.job_no = 0
self.job_timestamp = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
self.current_latent = None
self.current_image = None
self.current_image_sampling_step = 0
+ self.id_live_preview = 0
self.skipped = False
self.interrupted = False
self.textinfo = None
@@ -199,28 +231,33 @@ class State:
devices.torch_gc()
- """sets self.current_image from self.current_latent if enough sampling steps have been made after the last call to this"""
def set_current_image(self):
- if self.sampling_step - self.current_image_sampling_step >= opts.show_progress_every_n_steps and opts.show_progress_every_n_steps > 0:
+ """sets self.current_image from self.current_latent if enough sampling steps have been made after the last call to this"""
+ if not parallel_processing_allowed:
+ return
+
+ if self.sampling_step - self.current_image_sampling_step >= opts.show_progress_every_n_steps and opts.live_previews_enable and opts.show_progress_every_n_steps != -1:
self.do_set_current_image()
def do_set_current_image(self):
- if not parallel_processing_allowed:
- return
if self.current_latent is None:
return
import modules.sd_samplers
if opts.show_progress_grid:
- self.current_image = modules.sd_samplers.samples_to_image_grid(self.current_latent)
+ self.assign_current_image(modules.sd_samplers.samples_to_image_grid(self.current_latent))
else:
- self.current_image = modules.sd_samplers.sample_to_image(self.current_latent)
+ self.assign_current_image(modules.sd_samplers.sample_to_image(self.current_latent))
self.current_image_sampling_step = self.sampling_step
-state = State()
+ def assign_current_image(self, image):
+ self.current_image = image
+ self.id_live_preview += 1
-artist_db = modules.artists.ArtistsDatabase(os.path.join(script_path, 'artists.csv'))
+
+state = State()
+state.server_start = time.time()
styles_filename = cmd_opts.styles_file
prompt_styles = modules.styles.StyleDatabase(styles_filename)
@@ -229,12 +266,6 @@ interrogator = modules.interrogate.InterrogateModels("interrogate")
face_restorers = []
-
-def realesrgan_models_names():
- import modules.realesrgan_model
- return [x.name for x in modules.realesrgan_model.get_realesrgan_models(None)]
-
-
class OptionInfo:
def __init__(self, default=None, label="", component=None, component_args=None, onchange=None, section=None, refresh=None):
self.default = default
@@ -325,9 +356,8 @@ options_templates.update(options_section(('saving-to-dirs', "Saving to a directo
options_templates.update(options_section(('upscaling', "Upscaling"), {
"ESRGAN_tile": OptionInfo(192, "Tile size for ESRGAN upscalers. 0 = no tiling.", gr.Slider, {"minimum": 0, "maximum": 512, "step": 16}),
"ESRGAN_tile_overlap": OptionInfo(8, "Tile overlap, in pixels for ESRGAN upscalers. Low values = visible seam.", gr.Slider, {"minimum": 0, "maximum": 48, "step": 1}),
- "realesrgan_enabled_models": OptionInfo(["R-ESRGAN 4x+", "R-ESRGAN 4x+ Anime6B"], "Select which Real-ESRGAN models to show in the web UI. (Requires restart)", gr.CheckboxGroup, lambda: {"choices": realesrgan_models_names()}),
+ "realesrgan_enabled_models": OptionInfo(["R-ESRGAN 4x+", "R-ESRGAN 4x+ Anime6B"], "Select which Real-ESRGAN models to show in the web UI. (Requires restart)", gr.CheckboxGroup, lambda: {"choices": shared_items.realesrgan_models_names()}),
"upscaler_for_img2img": OptionInfo(None, "Upscaler for img2img", gr.Dropdown, lambda: {"choices": [x.name for x in sd_upscalers]}),
- "use_scale_latent_for_hires_fix": OptionInfo(False, "Upscale latent space image when doing hires. fix"),
}))
options_templates.update(options_section(('face-restoration', "Face restoration"), {
@@ -337,51 +367,62 @@ options_templates.update(options_section(('face-restoration', "Face restoration"
}))
options_templates.update(options_section(('system', "System"), {
+ "show_warnings": OptionInfo(False, "Show warnings in console."),
"memmon_poll_rate": OptionInfo(8, "VRAM usage polls per second during generation. Set to 0 to disable.", gr.Slider, {"minimum": 0, "maximum": 40, "step": 1}),
"samples_log_stdout": OptionInfo(False, "Always print all generation info to standard output"),
"multiple_tqdm": OptionInfo(True, "Add a second progress bar to the console that shows progress for an entire job."),
+ "print_hypernet_extra": OptionInfo(False, "Print extra hypernetwork information to console."),
}))
options_templates.update(options_section(('training', "Training"), {
"unload_models_when_training": OptionInfo(False, "Move VAE and CLIP to RAM when training if possible. Saves VRAM."),
"pin_memory": OptionInfo(False, "Turn on pin_memory for DataLoader. Makes training slightly faster but can increase memory usage."),
- "save_optimizer_state": OptionInfo(False, "Saves Optimizer state as separate *.optim file. Training can be resumed with HN itself and matching optim file."),
+ "save_optimizer_state": OptionInfo(False, "Saves Optimizer state as separate *.optim file. Training of embedding or HN can be resumed with the matching optim file."),
+ "save_training_settings_to_txt": OptionInfo(True, "Save textual inversion and hypernet settings to a text file whenever training starts."),
"dataset_filename_word_regex": OptionInfo("", "Filename word regex"),
"dataset_filename_join_string": OptionInfo(" ", "Filename join string"),
"training_image_repeats_per_epoch": OptionInfo(1, "Number of repeats for a single input image per epoch; used only for displaying epoch number", gr.Number, {"precision": 0}),
"training_write_csv_every": OptionInfo(500, "Save an csv containing the loss to log directory every N steps, 0 to disable"),
"training_xattention_optimizations": OptionInfo(False, "Use cross attention optimizations while training"),
+ "training_enable_tensorboard": OptionInfo(False, "Enable tensorboard logging."),
+ "training_tensorboard_save_images": OptionInfo(False, "Save generated images within tensorboard."),
+ "training_tensorboard_flush_every": OptionInfo(120, "How often, in seconds, to flush the pending tensorboard events and summaries to disk."),
}))
options_templates.update(options_section(('sd', "Stable Diffusion"), {
"sd_model_checkpoint": OptionInfo(None, "Stable Diffusion checkpoint", gr.Dropdown, lambda: {"choices": list_checkpoint_tiles()}, refresh=refresh_checkpoints),
"sd_checkpoint_cache": OptionInfo(0, "Checkpoints to cache in RAM", gr.Slider, {"minimum": 0, "maximum": 10, "step": 1}),
- "sd_vae": OptionInfo("auto", "SD VAE", gr.Dropdown, lambda: {"choices": sd_vae.vae_list}, refresh=sd_vae.refresh_vae_list),
- "sd_vae_as_default": OptionInfo(False, "Ignore selected VAE for stable diffusion checkpoints that have their own .vae.pt next to them"),
- "sd_hypernetwork": OptionInfo("None", "Hypernetwork", gr.Dropdown, lambda: {"choices": ["None"] + [x for x in hypernetworks.keys()]}, refresh=reload_hypernetworks),
- "sd_hypernetwork_strength": OptionInfo(1.0, "Hypernetwork strength", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.001}),
+ "sd_vae_checkpoint_cache": OptionInfo(0, "VAE Checkpoints to cache in RAM", gr.Slider, {"minimum": 0, "maximum": 10, "step": 1}),
+ "sd_vae": OptionInfo("Automatic", "SD VAE", gr.Dropdown, lambda: {"choices": shared_items.sd_vae_items()}, refresh=shared_items.refresh_vae_list),
+ "sd_vae_as_default": OptionInfo(True, "Ignore selected VAE for stable diffusion checkpoints that have their own .vae.pt next to them"),
"inpainting_mask_weight": OptionInfo(1.0, "Inpainting conditioning mask strength", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}),
- "initial_noise_multiplier": OptionInfo(1.0, "Noise multiplier for img2img", gr.Slider, {"minimum": 0.5, "maximum": 1.5, "step": 0.01 }),
+ "initial_noise_multiplier": OptionInfo(1.0, "Noise multiplier for img2img", gr.Slider, {"minimum": 0.5, "maximum": 1.5, "step": 0.01}),
"img2img_color_correction": OptionInfo(False, "Apply color correction to img2img results to match original colors."),
"img2img_fix_steps": OptionInfo(False, "With img2img, do exactly the amount of steps the slider specifies (normally you'd do less with less denoising)."),
- "img2img_background_color": OptionInfo("#ffffff", "With img2img, fill image's transparent parts with this color.", gr.ColorPicker, {}),
+ "img2img_background_color": OptionInfo("#ffffff", "With img2img, fill image's transparent parts with this color.", ui_components.FormColorPicker, {}),
"enable_quantization": OptionInfo(False, "Enable quantization in K samplers for sharper and cleaner results. This may change existing seeds. Requires restart to apply."),
"enable_emphasis": OptionInfo(True, "Emphasis: use (text) to make model pay more attention to text and [text] to make it pay less attention"),
- "use_old_emphasis_implementation": OptionInfo(False, "Use old emphasis implementation. Can be useful to reproduce old seeds."),
"enable_batch_seeds": OptionInfo(True, "Make K-diffusion samplers produce same images in a batch as when making a single image"),
"comma_padding_backtrack": OptionInfo(20, "Increase coherency by padding from the last comma within n tokens when using more than 75 tokens", gr.Slider, {"minimum": 0, "maximum": 74, "step": 1 }),
- 'CLIP_stop_at_last_layers': OptionInfo(1, "Clip skip", gr.Slider, {"minimum": 1, "maximum": 12, "step": 1}),
- "random_artist_categories": OptionInfo([], "Allowed categories for random artists selection when using the Roll button", gr.CheckboxGroup, {"choices": artist_db.categories()}),
+ "CLIP_stop_at_last_layers": OptionInfo(1, "Clip skip", gr.Slider, {"minimum": 1, "maximum": 12, "step": 1}),
+ "extra_networks_default_multiplier": OptionInfo(1.0, "Multiplier for extra networks", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}),
+ "upcast_attn": OptionInfo(False, "Upcast cross attention layer to float32"),
+}))
+
+options_templates.update(options_section(('compatibility', "Compatibility"), {
+ "use_old_emphasis_implementation": OptionInfo(False, "Use old emphasis implementation. Can be useful to reproduce old seeds."),
+ "use_old_karras_scheduler_sigmas": OptionInfo(False, "Use old karras scheduler sigmas (0.1 to 10)."),
+ "use_old_hires_fix_width_height": OptionInfo(False, "For hires fix, use width/height sliders to set final resolution rather than first pass (disables Upscale by, Resize width/height to)."),
}))
options_templates.update(options_section(('interrogate', "Interrogate Options"), {
"interrogate_keep_models_in_memory": OptionInfo(False, "Interrogate: keep models in VRAM"),
- "interrogate_use_builtin_artists": OptionInfo(True, "Interrogate: use artists from artists.csv"),
"interrogate_return_ranks": OptionInfo(False, "Interrogate: include ranks of model tags matches in results (Has no effect on caption-based interrogators)."),
"interrogate_clip_num_beams": OptionInfo(1, "Interrogate: num_beams for BLIP", gr.Slider, {"minimum": 1, "maximum": 16, "step": 1}),
"interrogate_clip_min_length": OptionInfo(24, "Interrogate: minimum description length (excluding artists, etc..)", gr.Slider, {"minimum": 1, "maximum": 128, "step": 1}),
"interrogate_clip_max_length": OptionInfo(48, "Interrogate: maximum description length", gr.Slider, {"minimum": 1, "maximum": 256, "step": 1}),
"interrogate_clip_dict_limit": OptionInfo(1500, "CLIP: maximum number of lines in text file (0 = No limit)"),
+ "interrogate_clip_skip_categories": OptionInfo([], "CLIP: skip inquire categories", gr.CheckboxGroup, lambda: {"choices": modules.interrogate.category_types()}, refresh=modules.interrogate.category_types),
"interrogate_deepbooru_score_threshold": OptionInfo(0.5, "Interrogate: deepbooru score threshold", gr.Slider, {"minimum": 0, "maximum": 1, "step": 0.01}),
"deepbooru_sort_alpha": OptionInfo(True, "Interrogate: deepbooru sort alphabetically"),
"deepbooru_use_spaces": OptionInfo(False, "use spaces for tags in deepbooru"),
@@ -389,15 +430,15 @@ options_templates.update(options_section(('interrogate', "Interrogate Options"),
"deepbooru_filter_tags": OptionInfo("", "filter out those tags from deepbooru output (separated by comma)"),
}))
+options_templates.update(options_section(('extra_networks', "Extra Networks"), {
+ "extra_networks_default_view": OptionInfo("cards", "Default view for Extra Networks", gr.Dropdown, { "choices": ["cards", "thumbs"] }),
+}))
+
options_templates.update(options_section(('ui', "User interface"), {
- "show_progressbar": OptionInfo(True, "Show progressbar"),
- "show_progress_every_n_steps": OptionInfo(0, "Show image creation progress every N sampling steps. Set to 0 to disable. Set to -1 to show after completion of batch.", gr.Slider, {"minimum": -1, "maximum": 32, "step": 1}),
- "show_progress_type": OptionInfo("Full", "Image creation progress preview mode", gr.Radio, {"choices": ["Full", "Approx NN", "Approx cheap"]}),
- "show_progress_grid": OptionInfo(True, "Show previews of all images generated in a batch as a grid"),
"return_grid": OptionInfo(True, "Show grid in results for web"),
"do_not_show_images": OptionInfo(False, "Do not show any images in results for web"),
"add_model_hash_to_info": OptionInfo(True, "Add model hash to generation information"),
- "add_model_name_to_info": OptionInfo(False, "Add model name to generation information"),
+ "add_model_name_to_info": OptionInfo(True, "Add model name to generation information"),
"disable_weights_auto_swap": OptionInfo(False, "When reading generation parameters from text into UI (from PNG info or pasted text), do not change the selected model/checkpoint."),
"send_seed": OptionInfo(True, "Send seed when sending prompt or image to other interface"),
"send_size": OptionInfo(True, "Send size when sending prompt or image to another interface"),
@@ -405,8 +446,24 @@ options_templates.update(options_section(('ui', "User interface"), {
"js_modal_lightbox": OptionInfo(True, "Enable full page image viewer"),
"js_modal_lightbox_initially_zoomed": OptionInfo(True, "Show images zoomed in by default in full page image viewer"),
"show_progress_in_title": OptionInfo(True, "Show generation progress in window title."),
- 'quicksettings': OptionInfo("sd_model_checkpoint", "Quicksettings list"),
- 'localization': OptionInfo("None", "Localization (requires restart)", gr.Dropdown, lambda: {"choices": ["None"] + list(localization.localizations.keys())}, refresh=lambda: localization.list_localizations(cmd_opts.localizations_dir)),
+ "samplers_in_dropdown": OptionInfo(True, "Use dropdown for sampler selection instead of radio group"),
+ "dimensions_and_batch_together": OptionInfo(True, "Show Width/Height and Batch sliders in same row"),
+ "keyedit_precision_attention": OptionInfo(0.1, "Ctrl+up/down precision when editing (attention:1.1)", gr.Slider, {"minimum": 0.01, "maximum": 0.2, "step": 0.001}),
+ "keyedit_precision_extra": OptionInfo(0.05, "Ctrl+up/down precision when editing <extra networks:0.9>", gr.Slider, {"minimum": 0.01, "maximum": 0.2, "step": 0.001}),
+ "quicksettings": OptionInfo("sd_model_checkpoint", "Quicksettings list"),
+ "ui_reorder": OptionInfo(", ".join(ui_reorder_categories), "txt2img/img2img UI item order"),
+ "ui_extra_networks_tab_reorder": OptionInfo("", "Extra networks tab order"),
+ "localization": OptionInfo("None", "Localization (requires restart)", gr.Dropdown, lambda: {"choices": ["None"] + list(localization.localizations.keys())}, refresh=lambda: localization.list_localizations(cmd_opts.localizations_dir)),
+}))
+
+options_templates.update(options_section(('ui', "Live previews"), {
+ "show_progressbar": OptionInfo(True, "Show progressbar"),
+ "live_previews_enable": OptionInfo(True, "Show live previews of the created image"),
+ "show_progress_grid": OptionInfo(True, "Show previews of all images generated in a batch as a grid"),
+ "show_progress_every_n_steps": OptionInfo(10, "Show new live preview image every N sampling steps. Set to -1 to show after completion of batch.", gr.Slider, {"minimum": -1, "maximum": 32, "step": 1}),
+ "show_progress_type": OptionInfo("Approx NN", "Image creation progress preview mode", gr.Radio, {"choices": ["Full", "Approx NN", "Approx cheap"]}),
+ "live_preview_content": OptionInfo("Prompt", "Live preview subject", gr.Radio, {"choices": ["Combined", "Prompt", "Negative prompt"]}),
+ "live_preview_refresh_period": OptionInfo(1000, "Progressbar/preview update period, in milliseconds")
}))
options_templates.update(options_section(('sampler-params', "Sampler parameters"), {
@@ -418,10 +475,18 @@ options_templates.update(options_section(('sampler-params', "Sampler parameters"
's_tmin': OptionInfo(0.0, "sigma tmin", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}),
's_noise': OptionInfo(1.0, "sigma noise", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}),
'eta_noise_seed_delta': OptionInfo(0, "Eta noise seed delta", gr.Number, {"precision": 0}),
+ 'always_discard_next_to_last_sigma': OptionInfo(False, "Always discard next-to-last sigma"),
+}))
+
+options_templates.update(options_section(('postprocessing', "Postprocessing"), {
+ 'postprocessing_enable_in_main_ui': OptionInfo([], "Enable postprocessing operations in txt2img and img2img tabs", ui_components.DropdownMulti, lambda: {"choices": [x.name for x in shared_items.postprocessing_scripts()]}),
+ 'postprocessing_operation_order': OptionInfo([], "Postprocessing operation order", ui_components.DropdownMulti, lambda: {"choices": [x.name for x in shared_items.postprocessing_scripts()]}),
+ 'upscaling_max_images_in_cache': OptionInfo(5, "Maximum number of images in upscaling cache", gr.Slider, {"minimum": 0, "maximum": 10, "step": 1}),
}))
options_templates.update(options_section((None, "Hidden options"), {
"disabled_extensions": OptionInfo([], "Disable those extensions"),
+ "sd_checkpoint_hash": OptionInfo("", "SHA256 hash of the current checkpoint"),
}))
options_templates.update()
@@ -476,7 +541,12 @@ class Options:
return False
if self.data_labels[key].onchange is not None:
- self.data_labels[key].onchange()
+ try:
+ self.data_labels[key].onchange()
+ except Exception as e:
+ errors.display(e, f"changing setting {key} to {value}")
+ setattr(self, key, oldval)
+ return False
return True
@@ -539,6 +609,16 @@ opts = Options()
if os.path.exists(config_filename):
opts.load(config_filename)
+latent_upscale_default_mode = "Latent"
+latent_upscale_modes = {
+ "Latent": {"mode": "bilinear", "antialias": False},
+ "Latent (antialiased)": {"mode": "bilinear", "antialias": True},
+ "Latent (bicubic)": {"mode": "bicubic", "antialias": False},
+ "Latent (bicubic antialiased)": {"mode": "bicubic", "antialias": True},
+ "Latent (nearest)": {"mode": "nearest", "antialias": False},
+ "Latent (nearest-exact)": {"mode": "nearest-exact", "antialias": False},
+}
+
sd_upscalers = []
sd_model = None
@@ -572,7 +652,7 @@ class TotalTQDM:
return
if self._tqdm is None:
self.reset()
- self._tqdm.total=new_total
+ self._tqdm.total = new_total
def clear(self):
if self._tqdm is not None:
@@ -589,3 +669,17 @@ mem_mon.start()
def listfiles(dirname):
filenames = [os.path.join(dirname, x) for x in sorted(os.listdir(dirname)) if not x.startswith(".")]
return [file for file in filenames if os.path.isfile(file)]
+
+
+def html_path(filename):
+ return os.path.join(script_path, "html", filename)
+
+
+def html(filename):
+ path = html_path(filename)
+
+ if os.path.exists(path):
+ with open(path, encoding="utf8") as file:
+ return file.read()
+
+ return ""
diff --git a/modules/shared_items.py b/modules/shared_items.py
new file mode 100644
index 00000000..8b5ec96d
--- /dev/null
+++ b/modules/shared_items.py
@@ -0,0 +1,23 @@
+
+
+def realesrgan_models_names():
+ import modules.realesrgan_model
+ return [x.name for x in modules.realesrgan_model.get_realesrgan_models(None)]
+
+
+def postprocessing_scripts():
+ import modules.scripts
+
+ return modules.scripts.scripts_postproc.scripts
+
+
+def sd_vae_items():
+ import modules.sd_vae
+
+ return ["Automatic", "None"] + list(modules.sd_vae.vae_dict)
+
+
+def refresh_vae_list():
+ import modules.sd_vae
+
+ return modules.sd_vae.refresh_vae_list
diff --git a/modules/styles.py b/modules/styles.py
index ce6e71ca..990d5623 100644
--- a/modules/styles.py
+++ b/modules/styles.py
@@ -40,12 +40,18 @@ def apply_styles_to_prompt(prompt, styles):
class StyleDatabase:
def __init__(self, path: str):
self.no_style = PromptStyle("None", "", "")
- self.styles = {"None": self.no_style}
+ self.styles = {}
+ self.path = path
- if not os.path.exists(path):
+ self.reload()
+
+ def reload(self):
+ self.styles.clear()
+
+ if not os.path.exists(self.path):
return
- with open(path, "r", encoding="utf-8-sig", newline='') as file:
+ with open(self.path, "r", encoding="utf-8-sig", newline='') as file:
reader = csv.DictReader(file)
for row in reader:
# Support loading old CSV format with "name, text"-columns
diff --git a/modules/sub_quadratic_attention.py b/modules/sub_quadratic_attention.py
new file mode 100644
index 00000000..05595323
--- /dev/null
+++ b/modules/sub_quadratic_attention.py
@@ -0,0 +1,214 @@
+# original source:
+# https://github.com/AminRezaei0x443/memory-efficient-attention/blob/1bc0d9e6ac5f82ea43a375135c4e1d3896ee1694/memory_efficient_attention/attention_torch.py
+# license:
+# MIT License (see Memory Efficient Attention under the Licenses section in the web UI interface for the full license)
+# credit:
+# Amin Rezaei (original author)
+# Alex Birch (optimized algorithm for 3D tensors, at the expense of removing bias, masking and callbacks)
+# brkirch (modified to use torch.narrow instead of dynamic_slice implementation)
+# implementation of:
+# Self-attention Does Not Need O(n2) Memory":
+# https://arxiv.org/abs/2112.05682v2
+
+from functools import partial
+import torch
+from torch import Tensor
+from torch.utils.checkpoint import checkpoint
+import math
+from typing import Optional, NamedTuple, List
+
+
+def narrow_trunc(
+ input: Tensor,
+ dim: int,
+ start: int,
+ length: int
+) -> Tensor:
+ return torch.narrow(input, dim, start, length if input.shape[dim] >= start + length else input.shape[dim] - start)
+
+
+class AttnChunk(NamedTuple):
+ exp_values: Tensor
+ exp_weights_sum: Tensor
+ max_score: Tensor
+
+
+class SummarizeChunk:
+ @staticmethod
+ def __call__(
+ query: Tensor,
+ key: Tensor,
+ value: Tensor,
+ ) -> AttnChunk: ...
+
+
+class ComputeQueryChunkAttn:
+ @staticmethod
+ def __call__(
+ query: Tensor,
+ key: Tensor,
+ value: Tensor,
+ ) -> Tensor: ...
+
+
+def _summarize_chunk(
+ query: Tensor,
+ key: Tensor,
+ value: Tensor,
+ scale: float,
+) -> AttnChunk:
+ attn_weights = torch.baddbmm(
+ torch.empty(1, 1, 1, device=query.device, dtype=query.dtype),
+ query,
+ key.transpose(1,2),
+ alpha=scale,
+ beta=0,
+ )
+ max_score, _ = torch.max(attn_weights, -1, keepdim=True)
+ max_score = max_score.detach()
+ exp_weights = torch.exp(attn_weights - max_score)
+ exp_values = torch.bmm(exp_weights, value) if query.device.type == 'mps' else torch.bmm(exp_weights, value.to(exp_weights.dtype)).to(value.dtype)
+ max_score = max_score.squeeze(-1)
+ return AttnChunk(exp_values, exp_weights.sum(dim=-1), max_score)
+
+
+def _query_chunk_attention(
+ query: Tensor,
+ key: Tensor,
+ value: Tensor,
+ summarize_chunk: SummarizeChunk,
+ kv_chunk_size: int,
+) -> Tensor:
+ batch_x_heads, k_tokens, k_channels_per_head = key.shape
+ _, _, v_channels_per_head = value.shape
+
+ def chunk_scanner(chunk_idx: int) -> AttnChunk:
+ key_chunk = narrow_trunc(
+ key,
+ 1,
+ chunk_idx,
+ kv_chunk_size
+ )
+ value_chunk = narrow_trunc(
+ value,
+ 1,
+ chunk_idx,
+ kv_chunk_size
+ )
+ return summarize_chunk(query, key_chunk, value_chunk)
+
+ chunks: List[AttnChunk] = [
+ chunk_scanner(chunk) for chunk in torch.arange(0, k_tokens, kv_chunk_size)
+ ]
+ acc_chunk = AttnChunk(*map(torch.stack, zip(*chunks)))
+ chunk_values, chunk_weights, chunk_max = acc_chunk
+
+ global_max, _ = torch.max(chunk_max, 0, keepdim=True)
+ max_diffs = torch.exp(chunk_max - global_max)
+ chunk_values *= torch.unsqueeze(max_diffs, -1)
+ chunk_weights *= max_diffs
+
+ all_values = chunk_values.sum(dim=0)
+ all_weights = torch.unsqueeze(chunk_weights, -1).sum(dim=0)
+ return all_values / all_weights
+
+
+# TODO: refactor CrossAttention#get_attention_scores to share code with this
+def _get_attention_scores_no_kv_chunking(
+ query: Tensor,
+ key: Tensor,
+ value: Tensor,
+ scale: float,
+) -> Tensor:
+ attn_scores = torch.baddbmm(
+ torch.empty(1, 1, 1, device=query.device, dtype=query.dtype),
+ query,
+ key.transpose(1,2),
+ alpha=scale,
+ beta=0,
+ )
+ attn_probs = attn_scores.softmax(dim=-1)
+ del attn_scores
+ hidden_states_slice = torch.bmm(attn_probs, value) if query.device.type == 'mps' else torch.bmm(attn_probs, value.to(attn_probs.dtype)).to(value.dtype)
+ return hidden_states_slice
+
+
+class ScannedChunk(NamedTuple):
+ chunk_idx: int
+ attn_chunk: AttnChunk
+
+
+def efficient_dot_product_attention(
+ query: Tensor,
+ key: Tensor,
+ value: Tensor,
+ query_chunk_size=1024,
+ kv_chunk_size: Optional[int] = None,
+ kv_chunk_size_min: Optional[int] = None,
+ use_checkpoint=True,
+):
+ """Computes efficient dot-product attention given query, key, and value.
+ This is efficient version of attention presented in
+ https://arxiv.org/abs/2112.05682v2 which comes with O(sqrt(n)) memory requirements.
+ Args:
+ query: queries for calculating attention with shape of
+ `[batch * num_heads, tokens, channels_per_head]`.
+ key: keys for calculating attention with shape of
+ `[batch * num_heads, tokens, channels_per_head]`.
+ value: values to be used in attention with shape of
+ `[batch * num_heads, tokens, channels_per_head]`.
+ query_chunk_size: int: query chunks size
+ kv_chunk_size: Optional[int]: key/value chunks size. if None: defaults to sqrt(key_tokens)
+ kv_chunk_size_min: Optional[int]: key/value minimum chunk size. only considered when kv_chunk_size is None. changes `sqrt(key_tokens)` into `max(sqrt(key_tokens), kv_chunk_size_min)`, to ensure our chunk sizes don't get too small (smaller chunks = more chunks = less concurrent work done).
+ use_checkpoint: bool: whether to use checkpointing (recommended True for training, False for inference)
+ Returns:
+ Output of shape `[batch * num_heads, query_tokens, channels_per_head]`.
+ """
+ batch_x_heads, q_tokens, q_channels_per_head = query.shape
+ _, k_tokens, _ = key.shape
+ scale = q_channels_per_head ** -0.5
+
+ kv_chunk_size = min(kv_chunk_size or int(math.sqrt(k_tokens)), k_tokens)
+ if kv_chunk_size_min is not None:
+ kv_chunk_size = max(kv_chunk_size, kv_chunk_size_min)
+
+ def get_query_chunk(chunk_idx: int) -> Tensor:
+ return narrow_trunc(
+ query,
+ 1,
+ chunk_idx,
+ min(query_chunk_size, q_tokens)
+ )
+
+ summarize_chunk: SummarizeChunk = partial(_summarize_chunk, scale=scale)
+ summarize_chunk: SummarizeChunk = partial(checkpoint, summarize_chunk) if use_checkpoint else summarize_chunk
+ compute_query_chunk_attn: ComputeQueryChunkAttn = partial(
+ _get_attention_scores_no_kv_chunking,
+ scale=scale
+ ) if k_tokens <= kv_chunk_size else (
+ # fast-path for when there's just 1 key-value chunk per query chunk (this is just sliced attention btw)
+ partial(
+ _query_chunk_attention,
+ kv_chunk_size=kv_chunk_size,
+ summarize_chunk=summarize_chunk,
+ )
+ )
+
+ if q_tokens <= query_chunk_size:
+ # fast-path for when there's just 1 query chunk
+ return compute_query_chunk_attn(
+ query=query,
+ key=key,
+ value=value,
+ )
+
+ # TODO: maybe we should use torch.empty_like(query) to allocate storage in-advance,
+ # and pass slices to be mutated, instead of torch.cat()ing the returned slices
+ res = torch.cat([
+ compute_query_chunk_attn(
+ query=get_query_chunk(i * query_chunk_size),
+ key=key,
+ value=value,
+ ) for i in range(math.ceil(q_tokens / query_chunk_size))
+ ], dim=1)
+ return res
diff --git a/modules/textual_inversion/dataset.py b/modules/textual_inversion/dataset.py
index 88d68c76..d31963d4 100644
--- a/modules/textual_inversion/dataset.py
+++ b/modules/textual_inversion/dataset.py
@@ -3,8 +3,10 @@ import numpy as np
import PIL
import torch
from PIL import Image
-from torch.utils.data import Dataset, DataLoader
+from torch.utils.data import Dataset, DataLoader, Sampler
from torchvision import transforms
+from collections import defaultdict
+from random import shuffle, choices
import random
import tqdm
@@ -28,13 +30,11 @@ class DatasetEntry:
class PersonalizedBase(Dataset):
- def __init__(self, data_root, width, height, repeats, flip_p=0.5, placeholder_token="*", model=None, cond_model=None, device=None, template_file=None, include_cond=False, batch_size=1, gradient_step=1, shuffle_tags=False, tag_drop_out=0, latent_sampling_method='once'):
+ def __init__(self, data_root, width, height, repeats, flip_p=0.5, placeholder_token="*", model=None, cond_model=None, device=None, template_file=None, include_cond=False, batch_size=1, gradient_step=1, shuffle_tags=False, tag_drop_out=0, latent_sampling_method='once', varsize=False):
re_word = re.compile(shared.opts.dataset_filename_word_regex) if len(shared.opts.dataset_filename_word_regex) > 0 else None
self.placeholder_token = placeholder_token
- self.width = width
- self.height = height
self.flip = transforms.RandomHorizontalFlip(p=flip_p)
self.dataset = []
@@ -50,16 +50,18 @@ class PersonalizedBase(Dataset):
self.image_paths = [os.path.join(data_root, file_path) for file_path in os.listdir(data_root)]
-
self.shuffle_tags = shuffle_tags
self.tag_drop_out = tag_drop_out
+ groups = defaultdict(list)
print("Preparing dataset...")
for path in tqdm.tqdm(self.image_paths):
if shared.state.interrupted:
raise Exception("interrupted")
try:
- image = Image.open(path).convert('RGB').resize((self.width, self.height), PIL.Image.BICUBIC)
+ image = Image.open(path).convert('RGB')
+ if not varsize:
+ image = image.resize((width, height), PIL.Image.BICUBIC)
except Exception:
continue
@@ -103,18 +105,25 @@ class PersonalizedBase(Dataset):
if include_cond and not (self.tag_drop_out != 0 or self.shuffle_tags):
with devices.autocast():
entry.cond = cond_model([entry.cond_text]).to(devices.cpu).squeeze(0)
-
+ groups[image.size].append(len(self.dataset))
self.dataset.append(entry)
del torchdata
del latent_dist
del latent_sample
self.length = len(self.dataset)
+ self.groups = list(groups.values())
assert self.length > 0, "No images have been found in the dataset."
self.batch_size = min(batch_size, self.length)
self.gradient_step = min(gradient_step, self.length // self.batch_size)
self.latent_sampling_method = latent_sampling_method
+ if len(groups) > 1:
+ print("Buckets:")
+ for (w, h), ids in sorted(groups.items(), key=lambda x: x[0]):
+ print(f" {w}x{h}: {len(ids)}")
+ print()
+
def create_text(self, filename_text):
text = random.choice(self.lines)
tags = filename_text.split(',')
@@ -137,9 +146,44 @@ class PersonalizedBase(Dataset):
entry.latent_sample = shared.sd_model.get_first_stage_encoding(entry.latent_dist).to(devices.cpu)
return entry
+
+class GroupedBatchSampler(Sampler):
+ def __init__(self, data_source: PersonalizedBase, batch_size: int):
+ super().__init__(data_source)
+
+ n = len(data_source)
+ self.groups = data_source.groups
+ self.len = n_batch = n // batch_size
+ expected = [len(g) / n * n_batch * batch_size for g in data_source.groups]
+ self.base = [int(e) // batch_size for e in expected]
+ self.n_rand_batches = nrb = n_batch - sum(self.base)
+ self.probs = [e%batch_size/nrb/batch_size if nrb>0 else 0 for e in expected]
+ self.batch_size = batch_size
+
+ def __len__(self):
+ return self.len
+
+ def __iter__(self):
+ b = self.batch_size
+
+ for g in self.groups:
+ shuffle(g)
+
+ batches = []
+ for g in self.groups:
+ batches.extend(g[i*b:(i+1)*b] for i in range(len(g) // b))
+ for _ in range(self.n_rand_batches):
+ rand_group = choices(self.groups, self.probs)[0]
+ batches.append(choices(rand_group, k=b))
+
+ shuffle(batches)
+
+ yield from batches
+
+
class PersonalizedDataLoader(DataLoader):
def __init__(self, dataset, latent_sampling_method="once", batch_size=1, pin_memory=False):
- super(PersonalizedDataLoader, self).__init__(dataset, shuffle=True, drop_last=True, batch_size=batch_size, pin_memory=pin_memory)
+ super(PersonalizedDataLoader, self).__init__(dataset, batch_sampler=GroupedBatchSampler(dataset, batch_size), pin_memory=pin_memory)
if latent_sampling_method == "random":
self.collate_fn = collate_wrapper_random
else:
diff --git a/modules/textual_inversion/image_embedding.py b/modules/textual_inversion/image_embedding.py
index ea653806..5593f88c 100644
--- a/modules/textual_inversion/image_embedding.py
+++ b/modules/textual_inversion/image_embedding.py
@@ -76,10 +76,10 @@ def insert_image_data_embed(image, data):
next_size = data_np_low.shape[0] + (h-(data_np_low.shape[0] % h))
next_size = next_size + ((h*d)-(next_size % (h*d)))
- data_np_low.resize(next_size)
+ data_np_low = np.resize(data_np_low, next_size)
data_np_low = data_np_low.reshape((h, -1, d))
- data_np_high.resize(next_size)
+ data_np_high = np.resize(data_np_high, next_size)
data_np_high = data_np_high.reshape((h, -1, d))
edge_style = list(data['string_to_param'].values())[0].cpu().detach().numpy().tolist()[0][:1024]
diff --git a/modules/textual_inversion/learn_schedule.py b/modules/textual_inversion/learn_schedule.py
index dd0c0ad1..f63fc72f 100644
--- a/modules/textual_inversion/learn_schedule.py
+++ b/modules/textual_inversion/learn_schedule.py
@@ -58,14 +58,19 @@ class LearnRateScheduler:
self.finished = False
- def apply(self, optimizer, step_number):
+ def step(self, step_number):
if step_number < self.end_step:
- return
+ return False
try:
(self.learn_rate, self.end_step) = next(self.schedules)
- except Exception:
+ except StopIteration:
self.finished = True
+ return False
+ return True
+
+ def apply(self, optimizer, step_number):
+ if not self.step(step_number):
return
if self.verbose:
diff --git a/modules/textual_inversion/logging.py b/modules/textual_inversion/logging.py
new file mode 100644
index 00000000..734a4b6f
--- /dev/null
+++ b/modules/textual_inversion/logging.py
@@ -0,0 +1,24 @@
+import datetime
+import json
+import os
+
+saved_params_shared = {"model_name", "model_hash", "initial_step", "num_of_dataset_images", "learn_rate", "batch_size", "clip_grad_mode", "clip_grad_value", "gradient_step", "data_root", "log_directory", "training_width", "training_height", "steps", "create_image_every", "template_file", "gradient_step", "latent_sampling_method"}
+saved_params_ti = {"embedding_name", "num_vectors_per_token", "save_embedding_every", "save_image_with_stored_embedding"}
+saved_params_hypernet = {"hypernetwork_name", "layer_structure", "activation_func", "weight_init", "add_layer_norm", "use_dropout", "save_hypernetwork_every"}
+saved_params_all = saved_params_shared | saved_params_ti | saved_params_hypernet
+saved_params_previews = {"preview_prompt", "preview_negative_prompt", "preview_steps", "preview_sampler_index", "preview_cfg_scale", "preview_seed", "preview_width", "preview_height"}
+
+
+def save_settings_to_file(log_directory, all_params):
+ now = datetime.datetime.now()
+ params = {"datetime": now.strftime("%Y-%m-%d %H:%M:%S")}
+
+ keys = saved_params_all
+ if all_params.get('preview_from_txt2img'):
+ keys = keys | saved_params_previews
+
+ params.update({k: v for k, v in all_params.items() if k in keys})
+
+ filename = f'settings-{now.strftime("%Y-%m-%d-%H-%M-%S")}.json'
+ with open(os.path.join(log_directory, filename), "w") as file:
+ json.dump(params, file, indent=4)
diff --git a/modules/textual_inversion/preprocess.py b/modules/textual_inversion/preprocess.py
index 56b9b2eb..2239cb84 100644
--- a/modules/textual_inversion/preprocess.py
+++ b/modules/textual_inversion/preprocess.py
@@ -6,13 +6,12 @@ import sys
import tqdm
import time
-from modules import shared, images, deepbooru
-from modules.paths import models_path
+from modules import paths, shared, images, deepbooru
from modules.shared import opts, cmd_opts
from modules.textual_inversion import autocrop
-def preprocess(process_src, process_dst, process_width, process_height, preprocess_txt_action, process_flip, process_split, process_caption, process_caption_deepbooru=False, split_threshold=0.5, overlap_ratio=0.2, process_focal_crop=False, process_focal_crop_face_weight=0.9, process_focal_crop_entropy_weight=0.3, process_focal_crop_edges_weight=0.5, process_focal_crop_debug=False):
+def preprocess(id_task, process_src, process_dst, process_width, process_height, preprocess_txt_action, process_flip, process_split, process_caption, process_caption_deepbooru=False, split_threshold=0.5, overlap_ratio=0.2, process_focal_crop=False, process_focal_crop_face_weight=0.9, process_focal_crop_entropy_weight=0.3, process_focal_crop_edges_weight=0.5, process_focal_crop_debug=False, process_multicrop=None, process_multicrop_mindim=None, process_multicrop_maxdim=None, process_multicrop_minarea=None, process_multicrop_maxarea=None, process_multicrop_objective=None, process_multicrop_threshold=None):
try:
if process_caption:
shared.interrogator.load()
@@ -20,7 +19,7 @@ def preprocess(process_src, process_dst, process_width, process_height, preproce
if process_caption_deepbooru:
deepbooru.model.start()
- preprocess_work(process_src, process_dst, process_width, process_height, preprocess_txt_action, process_flip, process_split, process_caption, process_caption_deepbooru, split_threshold, overlap_ratio, process_focal_crop, process_focal_crop_face_weight, process_focal_crop_entropy_weight, process_focal_crop_edges_weight, process_focal_crop_debug)
+ preprocess_work(process_src, process_dst, process_width, process_height, preprocess_txt_action, process_flip, process_split, process_caption, process_caption_deepbooru, split_threshold, overlap_ratio, process_focal_crop, process_focal_crop_face_weight, process_focal_crop_entropy_weight, process_focal_crop_edges_weight, process_focal_crop_debug, process_multicrop, process_multicrop_mindim, process_multicrop_maxdim, process_multicrop_minarea, process_multicrop_maxarea, process_multicrop_objective, process_multicrop_threshold)
finally:
@@ -109,8 +108,30 @@ def split_pic(image, inverse_xy, width, height, overlap_ratio):
splitted = image.crop((0, y, to_w, y + to_h))
yield splitted
-
-def preprocess_work(process_src, process_dst, process_width, process_height, preprocess_txt_action, process_flip, process_split, process_caption, process_caption_deepbooru=False, split_threshold=0.5, overlap_ratio=0.2, process_focal_crop=False, process_focal_crop_face_weight=0.9, process_focal_crop_entropy_weight=0.3, process_focal_crop_edges_weight=0.5, process_focal_crop_debug=False):
+# not using torchvision.transforms.CenterCrop because it doesn't allow float regions
+def center_crop(image: Image, w: int, h: int):
+ iw, ih = image.size
+ if ih / h < iw / w:
+ sw = w * ih / h
+ box = (iw - sw) / 2, 0, iw - (iw - sw) / 2, ih
+ else:
+ sh = h * iw / w
+ box = 0, (ih - sh) / 2, iw, ih - (ih - sh) / 2
+ return image.resize((w, h), Image.Resampling.LANCZOS, box)
+
+
+def multicrop_pic(image: Image, mindim, maxdim, minarea, maxarea, objective, threshold):
+ iw, ih = image.size
+ err = lambda w, h: 1-(lambda x: x if x < 1 else 1/x)(iw/ih/(w/h))
+ wh = max(((w, h) for w in range(mindim, maxdim+1, 64) for h in range(mindim, maxdim+1, 64)
+ if minarea <= w * h <= maxarea and err(w, h) <= threshold),
+ key= lambda wh: (wh[0]*wh[1], -err(*wh))[::1 if objective=='Maximize area' else -1],
+ default=None
+ )
+ return wh and center_crop(image, *wh)
+
+
+def preprocess_work(process_src, process_dst, process_width, process_height, preprocess_txt_action, process_flip, process_split, process_caption, process_caption_deepbooru=False, split_threshold=0.5, overlap_ratio=0.2, process_focal_crop=False, process_focal_crop_face_weight=0.9, process_focal_crop_entropy_weight=0.3, process_focal_crop_edges_weight=0.5, process_focal_crop_debug=False, process_multicrop=None, process_multicrop_mindim=None, process_multicrop_maxdim=None, process_multicrop_minarea=None, process_multicrop_maxarea=None, process_multicrop_objective=None, process_multicrop_threshold=None):
width = process_width
height = process_height
src = os.path.abspath(process_src)
@@ -124,6 +145,7 @@ def preprocess_work(process_src, process_dst, process_width, process_height, pre
files = listfiles(src)
+ shared.state.job = "preprocess"
shared.state.textinfo = "Preprocessing..."
shared.state.job_count = len(files)
@@ -134,7 +156,8 @@ def preprocess_work(process_src, process_dst, process_width, process_height, pre
params.process_caption_deepbooru = process_caption_deepbooru
params.preprocess_txt_action = preprocess_txt_action
- for index, imagefile in enumerate(tqdm.tqdm(files)):
+ pbar = tqdm.tqdm(files)
+ for index, imagefile in enumerate(pbar):
params.subindex = 0
filename = os.path.join(src, imagefile)
try:
@@ -142,6 +165,10 @@ def preprocess_work(process_src, process_dst, process_width, process_height, pre
except Exception:
continue
+ description = f"Preprocessing [Image {index}/{len(files)}]"
+ pbar.set_description(description)
+ shared.state.textinfo = description
+
params.src = filename
existing_caption = None
@@ -171,7 +198,7 @@ def preprocess_work(process_src, process_dst, process_width, process_height, pre
dnn_model_path = None
try:
- dnn_model_path = autocrop.download_and_cache_models(os.path.join(models_path, "opencv"))
+ dnn_model_path = autocrop.download_and_cache_models(os.path.join(paths.models_path, "opencv"))
except Exception as e:
print("Unable to load face detection model for auto crop selection. Falling back to lower quality haar method.", e)
@@ -188,6 +215,14 @@ def preprocess_work(process_src, process_dst, process_width, process_height, pre
save_pic(focal, index, params, existing_caption=existing_caption)
process_default_resize = False
+ if process_multicrop:
+ cropped = multicrop_pic(img, process_multicrop_mindim, process_multicrop_maxdim, process_multicrop_minarea, process_multicrop_maxarea, process_multicrop_objective, process_multicrop_threshold)
+ if cropped is not None:
+ save_pic(cropped, index, params, existing_caption=existing_caption)
+ else:
+ print(f"skipped {img.width}x{img.height} image {filename} (can't find suitable size within error threshold)")
+ process_default_resize = False
+
if process_default_resize:
img = images.resize_image(1, img, width, height)
save_pic(img, index, params, existing_caption=existing_caption)
diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py
index f6112578..6cf00e65 100644
--- a/modules/textual_inversion/textual_inversion.py
+++ b/modules/textual_inversion/textual_inversion.py
@@ -1,31 +1,56 @@
import os
import sys
import traceback
+import inspect
+from collections import namedtuple
import torch
import tqdm
import html
import datetime
import csv
+import safetensors.torch
+import numpy as np
from PIL import Image, PngImagePlugin
+from torch.utils.tensorboard import SummaryWriter
-from modules import shared, devices, sd_hijack, processing, sd_models, images, sd_samplers
+from modules import shared, devices, sd_hijack, processing, sd_models, images, sd_samplers, sd_hijack_checkpoint
import modules.textual_inversion.dataset
from modules.textual_inversion.learn_schedule import LearnRateScheduler
-from modules.textual_inversion.image_embedding import (embedding_to_b64, embedding_from_b64,
- insert_image_data_embed, extract_image_data_embed,
- caption_image_overlay)
+from modules.textual_inversion.image_embedding import embedding_to_b64, embedding_from_b64, insert_image_data_embed, extract_image_data_embed, caption_image_overlay
+from modules.textual_inversion.logging import save_settings_to_file
+
+
+TextualInversionTemplate = namedtuple("TextualInversionTemplate", ["name", "path"])
+textual_inversion_templates = {}
+
+
+def list_textual_inversion_templates():
+ textual_inversion_templates.clear()
+
+ for root, dirs, fns in os.walk(shared.cmd_opts.textual_inversion_templates_dir):
+ for fn in fns:
+ path = os.path.join(root, fn)
+
+ textual_inversion_templates[fn] = TextualInversionTemplate(fn, path)
+
+ return textual_inversion_templates
+
class Embedding:
def __init__(self, vec, name, step=None):
self.vec = vec
self.name = name
self.step = step
+ self.shape = None
+ self.vectors = 0
self.cached_checksum = None
self.sd_checkpoint = None
self.sd_checkpoint_name = None
+ self.optimizer_state_dict = None
+ self.filename = None
def save(self, filename):
embedding_data = {
@@ -39,6 +64,13 @@ class Embedding:
torch.save(embedding_data, filename)
+ if shared.opts.save_optimizer_state and self.optimizer_state_dict is not None:
+ optimizer_saved_dict = {
+ 'hash': self.checksum(),
+ 'optimizer_state_dict': self.optimizer_state_dict,
+ }
+ torch.save(optimizer_saved_dict, filename + '.optim')
+
def checksum(self):
if self.cached_checksum is not None:
return self.cached_checksum
@@ -53,18 +85,43 @@ class Embedding:
return self.cached_checksum
+class DirWithTextualInversionEmbeddings:
+ def __init__(self, path):
+ self.path = path
+ self.mtime = None
+
+ def has_changed(self):
+ if not os.path.isdir(self.path):
+ return False
+
+ mt = os.path.getmtime(self.path)
+ if self.mtime is None or mt > self.mtime:
+ return True
+
+ def update(self):
+ if not os.path.isdir(self.path):
+ return
+
+ self.mtime = os.path.getmtime(self.path)
+
+
class EmbeddingDatabase:
- def __init__(self, embeddings_dir):
+ def __init__(self):
self.ids_lookup = {}
self.word_embeddings = {}
- self.dir_mtime = None
- self.embeddings_dir = embeddings_dir
+ self.skipped_embeddings = {}
+ self.expected_shape = -1
+ self.embedding_dirs = {}
- def register_embedding(self, embedding, model):
+ def add_embedding_dir(self, path):
+ self.embedding_dirs[path] = DirWithTextualInversionEmbeddings(path)
+
+ def clear_embedding_dirs(self):
+ self.embedding_dirs.clear()
+ def register_embedding(self, embedding, model):
self.word_embeddings[embedding.name] = embedding
- # TODO changing between clip and open clip changes tokenization, which will cause embeddings to stop working
ids = model.cond_stage_model.tokenize([embedding.name])[0]
first_id = ids[0]
@@ -75,70 +132,105 @@ class EmbeddingDatabase:
return embedding
- def load_textual_inversion_embeddings(self):
- mt = os.path.getmtime(self.embeddings_dir)
- if self.dir_mtime is not None and mt <= self.dir_mtime:
- return
-
- self.dir_mtime = mt
- self.ids_lookup.clear()
- self.word_embeddings.clear()
+ def get_expected_shape(self):
+ vec = shared.sd_model.cond_stage_model.encode_embedding_init_text(",", 1)
+ return vec.shape[1]
- def process_file(path, filename):
- name = os.path.splitext(filename)[0]
+ def load_from_file(self, path, filename):
+ name, ext = os.path.splitext(filename)
+ ext = ext.upper()
- data = []
+ if ext in ['.PNG', '.WEBP', '.JXL', '.AVIF']:
+ _, second_ext = os.path.splitext(name)
+ if second_ext.upper() == '.PREVIEW':
+ return
- if os.path.splitext(filename.upper())[-1] in ['.PNG', '.WEBP', '.JXL', '.AVIF']:
- embed_image = Image.open(path)
- if hasattr(embed_image, 'text') and 'sd-ti-embedding' in embed_image.text:
- data = embedding_from_b64(embed_image.text['sd-ti-embedding'])
- name = data.get('name', name)
- else:
- data = extract_image_data_embed(embed_image)
- name = data.get('name', name)
- else:
- data = torch.load(path, map_location="cpu")
-
- # textual inversion embeddings
- if 'string_to_param' in data:
- param_dict = data['string_to_param']
- if hasattr(param_dict, '_parameters'):
- param_dict = getattr(param_dict, '_parameters') # fix for torch 1.12.1 loading saved file from torch 1.11
- assert len(param_dict) == 1, 'embedding file has multiple terms in it'
- emb = next(iter(param_dict.items()))[1]
- # diffuser concepts
- elif type(data) == dict and type(next(iter(data.values()))) == torch.Tensor:
- assert len(data.keys()) == 1, 'embedding file has multiple terms in it'
-
- emb = next(iter(data.values()))
- if len(emb.shape) == 1:
- emb = emb.unsqueeze(0)
+ embed_image = Image.open(path)
+ if hasattr(embed_image, 'text') and 'sd-ti-embedding' in embed_image.text:
+ data = embedding_from_b64(embed_image.text['sd-ti-embedding'])
+ name = data.get('name', name)
else:
- raise Exception(f"Couldn't identify {filename} as neither textual inversion embedding nor diffuser concept.")
+ data = extract_image_data_embed(embed_image)
+ name = data.get('name', name)
+ elif ext in ['.BIN', '.PT']:
+ data = torch.load(path, map_location="cpu")
+ elif ext in ['.SAFETENSORS']:
+ data = safetensors.torch.load_file(path, device="cpu")
+ else:
+ return
- vec = emb.detach().to(devices.device, dtype=torch.float32)
- embedding = Embedding(vec, name)
- embedding.step = data.get('step', None)
- embedding.sd_checkpoint = data.get('sd_checkpoint', None)
- embedding.sd_checkpoint_name = data.get('sd_checkpoint_name', None)
+ # textual inversion embeddings
+ if 'string_to_param' in data:
+ param_dict = data['string_to_param']
+ if hasattr(param_dict, '_parameters'):
+ param_dict = getattr(param_dict, '_parameters') # fix for torch 1.12.1 loading saved file from torch 1.11
+ assert len(param_dict) == 1, 'embedding file has multiple terms in it'
+ emb = next(iter(param_dict.items()))[1]
+ # diffuser concepts
+ elif type(data) == dict and type(next(iter(data.values()))) == torch.Tensor:
+ assert len(data.keys()) == 1, 'embedding file has multiple terms in it'
+
+ emb = next(iter(data.values()))
+ if len(emb.shape) == 1:
+ emb = emb.unsqueeze(0)
+ else:
+ raise Exception(f"Couldn't identify {filename} as neither textual inversion embedding nor diffuser concept.")
+
+ vec = emb.detach().to(devices.device, dtype=torch.float32)
+ embedding = Embedding(vec, name)
+ embedding.step = data.get('step', None)
+ embedding.sd_checkpoint = data.get('sd_checkpoint', None)
+ embedding.sd_checkpoint_name = data.get('sd_checkpoint_name', None)
+ embedding.vectors = vec.shape[0]
+ embedding.shape = vec.shape[-1]
+ embedding.filename = path
+
+ if self.expected_shape == -1 or self.expected_shape == embedding.shape:
self.register_embedding(embedding, shared.sd_model)
+ else:
+ self.skipped_embeddings[name] = embedding
- for fn in os.listdir(self.embeddings_dir):
- try:
- fullfn = os.path.join(self.embeddings_dir, fn)
+ def load_from_dir(self, embdir):
+ if not os.path.isdir(embdir.path):
+ return
+
+ for root, dirs, fns in os.walk(embdir.path, followlinks=True):
+ for fn in fns:
+ try:
+ fullfn = os.path.join(root, fn)
+
+ if os.stat(fullfn).st_size == 0:
+ continue
- if os.stat(fullfn).st_size == 0:
+ self.load_from_file(fullfn, fn)
+ except Exception:
+ print(f"Error loading embedding {fn}:", file=sys.stderr)
+ print(traceback.format_exc(), file=sys.stderr)
continue
- process_file(fullfn, fn)
- except Exception:
- print(f"Error loading embedding {fn}:", file=sys.stderr)
- print(traceback.format_exc(), file=sys.stderr)
- continue
+ def load_textual_inversion_embeddings(self, force_reload=False):
+ if not force_reload:
+ need_reload = False
+ for path, embdir in self.embedding_dirs.items():
+ if embdir.has_changed():
+ need_reload = True
+ break
+
+ if not need_reload:
+ return
+
+ self.ids_lookup.clear()
+ self.word_embeddings.clear()
+ self.skipped_embeddings.clear()
+ self.expected_shape = self.get_expected_shape()
+
+ for path, embdir in self.embedding_dirs.items():
+ self.load_from_dir(embdir)
+ embdir.update()
- print(f"Loaded a total of {len(self.word_embeddings)} textual inversion embeddings.")
- print("Embeddings:", ', '.join(self.word_embeddings.keys()))
+ print(f"Textual inversion embeddings loaded({len(self.word_embeddings)}): {', '.join(self.word_embeddings.keys())}")
+ if len(self.skipped_embeddings) > 0:
+ print(f"Textual inversion embeddings skipped({len(self.skipped_embeddings)}): {', '.join(self.skipped_embeddings.keys())}")
def find_embedding_at_position(self, tokens, offset):
token = tokens[offset]
@@ -160,11 +252,14 @@ def create_embedding(name, num_vectors_per_token, overwrite_old, init_text='*'):
with devices.autocast():
cond_model([""]) # will send cond model to GPU if lowvram/medvram is active
- embedded = cond_model.encode_embedding_init_text(init_text, num_vectors_per_token)
+ #cond_model expects at least some text, so we provide '*' as backup.
+ embedded = cond_model.encode_embedding_init_text(init_text or '*', num_vectors_per_token)
vec = torch.zeros((num_vectors_per_token, embedded.shape[1]), device=devices.device)
- for i in range(num_vectors_per_token):
- vec[i] = embedded[i * int(embedded.shape[0]) // num_vectors_per_token]
+ #Only copy if we provided an init_text, otherwise keep vectors as zeros
+ if init_text:
+ for i in range(num_vectors_per_token):
+ vec[i] = embedded[i * int(embedded.shape[0]) // num_vectors_per_token]
# Remove illegal characters from name.
name = "".join( x for x in name if (x.isalnum() or x in "._- "))
@@ -203,7 +298,32 @@ def write_loss(log_directory, filename, step, epoch_len, values):
**values,
})
-def validate_train_inputs(model_name, learn_rate, batch_size, gradient_step, data_root, template_file, steps, save_model_every, create_image_every, log_directory, name="embedding"):
+def tensorboard_setup(log_directory):
+ os.makedirs(os.path.join(log_directory, "tensorboard"), exist_ok=True)
+ return SummaryWriter(
+ log_dir=os.path.join(log_directory, "tensorboard"),
+ flush_secs=shared.opts.training_tensorboard_flush_every)
+
+def tensorboard_add(tensorboard_writer, loss, global_step, step, learn_rate, epoch_num):
+ tensorboard_add_scaler(tensorboard_writer, "Loss/train", loss, global_step)
+ tensorboard_add_scaler(tensorboard_writer, f"Loss/train/epoch-{epoch_num}", loss, step)
+ tensorboard_add_scaler(tensorboard_writer, "Learn rate/train", learn_rate, global_step)
+ tensorboard_add_scaler(tensorboard_writer, f"Learn rate/train/epoch-{epoch_num}", learn_rate, step)
+
+def tensorboard_add_scaler(tensorboard_writer, tag, value, step):
+ tensorboard_writer.add_scalar(tag=tag,
+ scalar_value=value, global_step=step)
+
+def tensorboard_add_image(tensorboard_writer, tag, pil_image, step):
+ # Convert a pil image to a torch tensor
+ img_tensor = torch.as_tensor(np.array(pil_image, copy=True))
+ img_tensor = img_tensor.view(pil_image.size[1], pil_image.size[0],
+ len(pil_image.getbands()))
+ img_tensor = img_tensor.permute((2, 0, 1))
+
+ tensorboard_writer.add_image(tag, img_tensor, global_step=step)
+
+def validate_train_inputs(model_name, learn_rate, batch_size, gradient_step, data_root, template_file, template_filename, steps, save_model_every, create_image_every, log_directory, name="embedding"):
assert model_name, f"{name} not selected"
assert learn_rate, "Learning rate is empty or 0"
assert isinstance(batch_size, int), "Batch size must be integer"
@@ -213,23 +333,28 @@ def validate_train_inputs(model_name, learn_rate, batch_size, gradient_step, dat
assert data_root, "Dataset directory is empty"
assert os.path.isdir(data_root), "Dataset directory doesn't exist"
assert os.listdir(data_root), "Dataset directory is empty"
- assert template_file, "Prompt template file is empty"
- assert os.path.isfile(template_file), "Prompt template file doesn't exist"
+ assert template_filename, "Prompt template file not selected"
+ assert template_file, f"Prompt template file {template_filename} not found"
+ assert os.path.isfile(template_file.path), f"Prompt template file {template_filename} doesn't exist"
assert steps, "Max steps is empty or 0"
assert isinstance(steps, int), "Max steps must be integer"
- assert steps > 0 , "Max steps must be positive"
+ assert steps > 0, "Max steps must be positive"
assert isinstance(save_model_every, int), "Save {name} must be integer"
- assert save_model_every >= 0 , "Save {name} must be positive or 0"
+ assert save_model_every >= 0, "Save {name} must be positive or 0"
assert isinstance(create_image_every, int), "Create image must be integer"
- assert create_image_every >= 0 , "Create image must be positive or 0"
+ assert create_image_every >= 0, "Create image must be positive or 0"
if save_model_every or create_image_every:
assert log_directory, "Log directory is empty"
-def train_embedding(embedding_name, learn_rate, batch_size, gradient_step, data_root, log_directory, training_width, training_height, steps, shuffle_tags, tag_drop_out, latent_sampling_method, create_image_every, save_embedding_every, template_file, save_image_with_stored_embedding, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height):
+
+def train_embedding(id_task, embedding_name, learn_rate, batch_size, gradient_step, data_root, log_directory, training_width, training_height, varsize, steps, clip_grad_mode, clip_grad_value, shuffle_tags, tag_drop_out, latent_sampling_method, create_image_every, save_embedding_every, template_filename, save_image_with_stored_embedding, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height):
save_embedding_every = save_embedding_every or 0
create_image_every = create_image_every or 0
- validate_train_inputs(embedding_name, learn_rate, batch_size, gradient_step, data_root, template_file, steps, save_embedding_every, create_image_every, log_directory, name="embedding")
+ template_file = textual_inversion_templates.get(template_filename, None)
+ validate_train_inputs(embedding_name, learn_rate, batch_size, gradient_step, data_root, template_file, template_filename, steps, save_embedding_every, create_image_every, log_directory, name="embedding")
+ template_file = template_file.path
+ shared.state.job = "train-embedding"
shared.state.textinfo = "Initializing textual inversion training..."
shared.state.job_count = steps
@@ -265,15 +390,26 @@ def train_embedding(embedding_name, learn_rate, batch_size, gradient_step, data_
if initial_step >= steps:
shared.state.textinfo = "Model has already been trained beyond specified max steps"
return embedding, filename
+
scheduler = LearnRateScheduler(learn_rate, steps, initial_step)
-
- # dataset loading may take a while, so input validations and early returns should be done before this
+ clip_grad = torch.nn.utils.clip_grad_value_ if clip_grad_mode == "value" else \
+ torch.nn.utils.clip_grad_norm_ if clip_grad_mode == "norm" else \
+ None
+ if clip_grad:
+ clip_grad_sched = LearnRateScheduler(clip_grad_value, steps, initial_step, verbose=False)
+ # dataset loading may take a while, so input validations and early returns should be done before this
shared.state.textinfo = f"Preparing dataset from {html.escape(data_root)}..."
old_parallel_processing_allowed = shared.parallel_processing_allowed
+
+ if shared.opts.training_enable_tensorboard:
+ tensorboard_writer = tensorboard_setup(log_directory)
pin_memory = shared.opts.pin_memory
- ds = modules.textual_inversion.dataset.PersonalizedBase(data_root=data_root, width=training_width, height=training_height, repeats=shared.opts.training_image_repeats_per_epoch, placeholder_token=embedding_name, model=shared.sd_model, cond_model=shared.sd_model.cond_stage_model, device=devices.device, template_file=template_file, batch_size=batch_size, gradient_step=gradient_step, shuffle_tags=shuffle_tags, tag_drop_out=tag_drop_out, latent_sampling_method=latent_sampling_method)
+ ds = modules.textual_inversion.dataset.PersonalizedBase(data_root=data_root, width=training_width, height=training_height, repeats=shared.opts.training_image_repeats_per_epoch, placeholder_token=embedding_name, model=shared.sd_model, cond_model=shared.sd_model.cond_stage_model, device=devices.device, template_file=template_file, batch_size=batch_size, gradient_step=gradient_step, shuffle_tags=shuffle_tags, tag_drop_out=tag_drop_out, latent_sampling_method=latent_sampling_method, varsize=varsize)
+
+ if shared.opts.save_training_settings_to_txt:
+ save_settings_to_file(log_directory, {**dict(model_name=checkpoint.model_name, model_hash=checkpoint.shorthash, num_of_dataset_images=len(ds), num_vectors_per_token=len(embedding.vec)), **locals()})
latent_sampling_method = ds.latent_sampling_method
@@ -285,6 +421,19 @@ def train_embedding(embedding_name, learn_rate, batch_size, gradient_step, data_
embedding.vec.requires_grad = True
optimizer = torch.optim.AdamW([embedding.vec], lr=scheduler.learn_rate, weight_decay=0.0)
+ if shared.opts.save_optimizer_state:
+ optimizer_state_dict = None
+ if os.path.exists(filename + '.optim'):
+ optimizer_saved_dict = torch.load(filename + '.optim', map_location='cpu')
+ if embedding.checksum() == optimizer_saved_dict.get('hash', None):
+ optimizer_state_dict = optimizer_saved_dict.get('optimizer_state_dict', None)
+
+ if optimizer_state_dict is not None:
+ optimizer.load_state_dict(optimizer_state_dict)
+ print("Loaded existing optimizer from checkpoint")
+ else:
+ print("No saved optimizer exists in checkpoint")
+
scaler = torch.cuda.amp.GradScaler()
batch_size = ds.batch_size
@@ -295,14 +444,18 @@ def train_embedding(embedding_name, learn_rate, batch_size, gradient_step, data_
loss_step = 0
_loss_step = 0 #internal
-
last_saved_file = "<none>"
last_saved_image = "<none>"
forced_filename = "<none>"
embedding_yet_to_be_embedded = False
+ is_training_inpainting_model = shared.sd_model.model.conditioning_key in {'hybrid', 'concat'}
+ img_c = None
+
pbar = tqdm.tqdm(total=steps - initial_step)
try:
+ sd_hijack_checkpoint.add()
+
for i in range((steps-initial_step) * gradient_step):
if scheduler.finished:
break
@@ -318,14 +471,22 @@ def train_embedding(embedding_name, learn_rate, batch_size, gradient_step, data_
if shared.state.interrupted:
break
+ if clip_grad:
+ clip_grad_sched.step(embedding.step)
+
with devices.autocast():
- # c = stack_conds(batch.cond).to(devices.device)
- # mask = torch.tensor(batch.emb_index).to(devices.device, non_blocking=pin_memory)
- # print(mask)
- # c[:, 1:1+embedding.vec.shape[0]] = embedding.vec.to(devices.device, non_blocking=pin_memory)
x = batch.latent_sample.to(devices.device, non_blocking=pin_memory)
c = shared.sd_model.cond_stage_model(batch.cond_text)
- loss = shared.sd_model(x, c)[0] / gradient_step
+
+ if is_training_inpainting_model:
+ if img_c is None:
+ img_c = processing.txt2img_image_conditioning(shared.sd_model, c, training_width, training_height)
+
+ cond = {"c_concat": [img_c], "c_crossattn": [c]}
+ else:
+ cond = c
+
+ loss = shared.sd_model(x, cond)[0] / gradient_step
del x
_loss_step += loss.item()
@@ -334,6 +495,10 @@ def train_embedding(embedding_name, learn_rate, batch_size, gradient_step, data_
# go back until we reach gradient accumulation steps
if (j + 1) % gradient_step != 0:
continue
+
+ if clip_grad:
+ clip_grad(embedding.vec, clip_grad_sched.learn_rate)
+
scaler.step(optimizer)
scaler.update()
embedding.step += 1
@@ -347,14 +512,13 @@ def train_embedding(embedding_name, learn_rate, batch_size, gradient_step, data_
epoch_num = embedding.step // steps_per_epoch
epoch_step = embedding.step % steps_per_epoch
- pbar.set_description(f"[Epoch {epoch_num}: {epoch_step+1}/{steps_per_epoch}]loss: {loss_step:.7f}")
+ description = f"Training textual inversion [Epoch {epoch_num}: {epoch_step+1}/{steps_per_epoch}] loss: {loss_step:.7f}"
+ pbar.set_description(description)
if embedding_dir is not None and steps_done % save_embedding_every == 0:
# Before saving, change name to match current checkpoint.
embedding_name_every = f'{embedding_name}-{steps_done}'
last_saved_file = os.path.join(embedding_dir, f'{embedding_name_every}.pt')
- #if shared.opts.save_optimizer_state:
- #embedding.optimizer_state_dict = optimizer.state_dict()
- save_embedding(embedding, checkpoint, embedding_name_every, last_saved_file, remove_cached_checksum=True)
+ save_embedding(embedding, optimizer, checkpoint, embedding_name_every, last_saved_file, remove_cached_checksum=True)
embedding_yet_to_be_embedded = True
write_loss(log_directory, "textual_inversion_loss.csv", embedding.step, steps_per_epoch, {
@@ -399,10 +563,14 @@ def train_embedding(embedding_name, learn_rate, batch_size, gradient_step, data_
shared.sd_model.first_stage_model.to(devices.cpu)
if image is not None:
- shared.state.current_image = image
+ shared.state.assign_current_image(image)
+
last_saved_image, last_text_info = images.save_image(image, images_dir, "", p.seed, p.prompt, shared.opts.samples_format, processed.infotexts[0], p=p, forced_filename=forced_filename, save_to_dirs=False)
last_saved_image += f", prompt: {preview_text}"
+ if shared.opts.training_enable_tensorboard and shared.opts.training_tensorboard_save_images:
+ tensorboard_add_image(tensorboard_writer, f"Validation at epoch {epoch_num}", image, embedding.step)
+
if save_image_with_stored_embedding and os.path.exists(last_saved_file) and embedding_yet_to_be_embedded:
last_saved_image_chunks = os.path.join(images_embeds_dir, f'{embedding_name}-{steps_done}.png')
@@ -420,7 +588,7 @@ def train_embedding(embedding_name, learn_rate, batch_size, gradient_step, data_
checkpoint = sd_models.select_checkpoint()
footer_left = checkpoint.model_name
- footer_mid = '[{}]'.format(checkpoint.hash)
+ footer_mid = '[{}]'.format(checkpoint.shorthash)
footer_right = '{}v {}s'.format(vectorSize, steps_done)
captioned_image = caption_image_overlay(image, title, footer_left, footer_mid, footer_right)
@@ -444,7 +612,7 @@ Last saved image: {html.escape(last_saved_image)}<br/>
</p>
"""
filename = os.path.join(shared.cmd_opts.embeddings_dir, f'{embedding_name}.pt')
- save_embedding(embedding, checkpoint, embedding_name, filename, remove_cached_checksum=True)
+ save_embedding(embedding, optimizer, checkpoint, embedding_name, filename, remove_cached_checksum=True)
except Exception:
print(traceback.format_exc(), file=sys.stderr)
pass
@@ -453,20 +621,23 @@ Last saved image: {html.escape(last_saved_image)}<br/>
pbar.close()
shared.sd_model.first_stage_model.to(devices.device)
shared.parallel_processing_allowed = old_parallel_processing_allowed
+ sd_hijack_checkpoint.remove()
return embedding, filename
-def save_embedding(embedding, checkpoint, embedding_name, filename, remove_cached_checksum=True):
+
+def save_embedding(embedding, optimizer, checkpoint, embedding_name, filename, remove_cached_checksum=True):
old_embedding_name = embedding.name
old_sd_checkpoint = embedding.sd_checkpoint if hasattr(embedding, "sd_checkpoint") else None
old_sd_checkpoint_name = embedding.sd_checkpoint_name if hasattr(embedding, "sd_checkpoint_name") else None
old_cached_checksum = embedding.cached_checksum if hasattr(embedding, "cached_checksum") else None
try:
- embedding.sd_checkpoint = checkpoint.hash
+ embedding.sd_checkpoint = checkpoint.shorthash
embedding.sd_checkpoint_name = checkpoint.model_name
if remove_cached_checksum:
embedding.cached_checksum = None
embedding.name = embedding_name
+ embedding.optimizer_state_dict = optimizer.state_dict()
embedding.save(filename)
except:
embedding.sd_checkpoint = old_sd_checkpoint
diff --git a/modules/timer.py b/modules/timer.py
new file mode 100644
index 00000000..57a4f17a
--- /dev/null
+++ b/modules/timer.py
@@ -0,0 +1,35 @@
+import time
+
+
+class Timer:
+ def __init__(self):
+ self.start = time.time()
+ self.records = {}
+ self.total = 0
+
+ def elapsed(self):
+ end = time.time()
+ res = end - self.start
+ self.start = end
+ return res
+
+ def record(self, category, extra_time=0):
+ e = self.elapsed()
+ if category not in self.records:
+ self.records[category] = 0
+
+ self.records[category] += e + extra_time
+ self.total += e + extra_time
+
+ def summary(self):
+ res = f"{self.total:.1f}s"
+
+ additions = [x for x in self.records.items() if x[1] >= 0.1]
+ if not additions:
+ return res
+
+ res += " ("
+ res += ", ".join([f"{category}: {time_taken:.1f}s" for category, time_taken in additions])
+ res += ")"
+
+ return res
diff --git a/modules/txt2img.py b/modules/txt2img.py
index c8f81176..e945fd69 100644
--- a/modules/txt2img.py
+++ b/modules/txt2img.py
@@ -8,13 +8,13 @@ import modules.processing as processing
from modules.ui import plaintext_to_html
-def txt2img(prompt: str, negative_prompt: str, prompt_style: str, prompt_style2: str, steps: int, sampler_index: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, height: int, width: int, enable_hr: bool, denoising_strength: float, firstphase_width: int, firstphase_height: int, *args):
+def txt2img(id_task: str, prompt: str, negative_prompt: str, prompt_styles, steps: int, sampler_index: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, height: int, width: int, enable_hr: bool, denoising_strength: float, hr_scale: float, hr_upscaler: str, hr_second_pass_steps: int, hr_resize_x: int, hr_resize_y: int, *args):
p = StableDiffusionProcessingTxt2Img(
sd_model=shared.sd_model,
outpath_samples=opts.outdir_samples or opts.outdir_txt2img_samples,
outpath_grids=opts.outdir_grids or opts.outdir_txt2img_grids,
prompt=prompt,
- styles=[prompt_style, prompt_style2],
+ styles=prompt_styles,
negative_prompt=negative_prompt,
seed=seed,
subseed=subseed,
@@ -33,8 +33,11 @@ def txt2img(prompt: str, negative_prompt: str, prompt_style: str, prompt_style2:
tiling=tiling,
enable_hr=enable_hr,
denoising_strength=denoising_strength if enable_hr else None,
- firstphase_width=firstphase_width if enable_hr else None,
- firstphase_height=firstphase_height if enable_hr else None,
+ hr_scale=hr_scale,
+ hr_upscaler=hr_upscaler,
+ hr_second_pass_steps=hr_second_pass_steps,
+ hr_resize_x=hr_resize_x,
+ hr_resize_y=hr_resize_y,
)
p.scripts = modules.scripts.scripts_txt2img
@@ -59,4 +62,4 @@ def txt2img(prompt: str, negative_prompt: str, prompt_style: str, prompt_style2:
if opts.do_not_show_images:
processed.images = []
- return processed.images, generation_info_js, plaintext_to_html(processed.info)
+ return processed.images, generation_info_js, plaintext_to_html(processed.info), plaintext_to_html(processed.comments)
diff --git a/modules/ui.py b/modules/ui.py
index 57ee0465..9f4cfda1 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -5,12 +5,12 @@ import mimetypes
import os
import platform
import random
-import subprocess as sp
import sys
import tempfile
import time
import traceback
from functools import partial, reduce
+import warnings
import gradio as gr
import gradio.routes
@@ -19,8 +19,9 @@ import numpy as np
from PIL import Image, PngImagePlugin
from modules.call_queue import wrap_gradio_gpu_call, wrap_queued_call, wrap_gradio_call
-from modules import sd_hijack, sd_models, localization, script_callbacks, ui_extensions, deepbooru
-from modules.paths import script_path
+from modules import sd_hijack, sd_models, localization, script_callbacks, ui_extensions, deepbooru, sd_vae, extra_networks, postprocessing, ui_components, ui_common, ui_postprocessing
+from modules.ui_components import FormRow, FormGroup, ToolButton, FormHTML
+from modules.paths import script_path, data_path
from modules.shared import opts, cmd_opts, restricted_opts
@@ -36,9 +37,12 @@ from modules import prompt_parser
from modules.images import save_image
from modules.sd_hijack import model_hijack
from modules.sd_samplers import samplers, samplers_for_img2img
-import modules.textual_inversion.ui
+from modules.textual_inversion import textual_inversion
import modules.hypernetworks.ui
from modules.generation_parameters_copypaste import image_from_url_text
+import modules.extras
+
+warnings.filterwarnings("default" if opts.show_warnings else "ignore", category=UserWarning)
# this is a fix for Windows users. Without it, javascript files will be served with text/html content-type and the browser will not show any UI
mimetypes.init()
@@ -71,6 +75,7 @@ css_hide_progressbar = """
.wrap .m-12::before { content:"Loading..." }
.wrap .z-20 svg { display:none!important; }
.wrap .z-20::before { content:"Loading..." }
+.wrap.cover-bg .z-20::before { content:"" }
.progress-bar { display:none!important; }
.meta-text { display:none!important; }
.meta-text-center { display:none!important; }
@@ -80,167 +85,24 @@ css_hide_progressbar = """
# Important that they exactly match script.js for tooltip to work.
random_symbol = '\U0001f3b2\ufe0f' # 🎲️
reuse_symbol = '\u267b\ufe0f' # ♻️
-art_symbol = '\U0001f3a8' # 🎨
paste_symbol = '\u2199\ufe0f' # ↙
-folder_symbol = '\U0001f4c2' # 📂
refresh_symbol = '\U0001f504' # 🔄
save_style_symbol = '\U0001f4be' # 💾
apply_style_symbol = '\U0001f4cb' # 📋
clear_prompt_symbol = '\U0001F5D1' # 🗑️
+extra_networks_symbol = '\U0001F3B4' # 🎴
+switch_values_symbol = '\U000021C5' # ⇅
def plaintext_to_html(text):
- text = "<p>" + "<br>\n".join([f"{html.escape(x)}" for x in text.split('\n')]) + "</p>"
- return text
+ return ui_common.plaintext_to_html(text)
+
def send_gradio_gallery_to_image(x):
if len(x) == 0:
return None
return image_from_url_text(x[0])
-def save_files(js_data, images, do_make_zip, index):
- import csv
- filenames = []
- fullfns = []
-
- #quick dictionary to class object conversion. Its necessary due apply_filename_pattern requiring it
- class MyObject:
- def __init__(self, d=None):
- if d is not None:
- for key, value in d.items():
- setattr(self, key, value)
-
- data = json.loads(js_data)
-
- p = MyObject(data)
- path = opts.outdir_save
- save_to_dirs = opts.use_save_to_dirs_for_ui
- extension: str = opts.samples_format
- start_index = 0
-
- if index > -1 and opts.save_selected_only and (index >= data["index_of_first_image"]): # ensures we are looking at a specific non-grid picture, and we have save_selected_only
-
- images = [images[index]]
- start_index = index
-
- os.makedirs(opts.outdir_save, exist_ok=True)
-
- with open(os.path.join(opts.outdir_save, "log.csv"), "a", encoding="utf8", newline='') as file:
- at_start = file.tell() == 0
- writer = csv.writer(file)
- if at_start:
- writer.writerow(["prompt", "seed", "width", "height", "sampler", "cfgs", "steps", "filename", "negative_prompt"])
-
- for image_index, filedata in enumerate(images, start_index):
- image = image_from_url_text(filedata)
-
- is_grid = image_index < p.index_of_first_image
- i = 0 if is_grid else (image_index - p.index_of_first_image)
-
- fullfn, txt_fullfn = save_image(image, path, "", seed=p.all_seeds[i], prompt=p.all_prompts[i], extension=extension, info=p.infotexts[image_index], grid=is_grid, p=p, save_to_dirs=save_to_dirs)
-
- filename = os.path.relpath(fullfn, path)
- filenames.append(filename)
- fullfns.append(fullfn)
- if txt_fullfn:
- filenames.append(os.path.basename(txt_fullfn))
- fullfns.append(txt_fullfn)
-
- writer.writerow([data["prompt"], data["seed"], data["width"], data["height"], data["sampler_name"], data["cfg_scale"], data["steps"], filenames[0], data["negative_prompt"]])
-
- # Make Zip
- if do_make_zip:
- zip_filepath = os.path.join(path, "images.zip")
-
- from zipfile import ZipFile
- with ZipFile(zip_filepath, "w") as zip_file:
- for i in range(len(fullfns)):
- with open(fullfns[i], mode="rb") as f:
- zip_file.writestr(filenames[i], f.read())
- fullfns.insert(0, zip_filepath)
-
- return gr.File.update(value=fullfns, visible=True), '', '', plaintext_to_html(f"Saved: {filenames[0]}")
-
-
-
-
-def calc_time_left(progress, threshold, label, force_display):
- if progress == 0:
- return ""
- else:
- time_since_start = time.time() - shared.state.time_start
- eta = (time_since_start/progress)
- eta_relative = eta-time_since_start
- if (eta_relative > threshold and progress > 0.02) or force_display:
- if eta_relative > 3600:
- return label + time.strftime('%H:%M:%S', time.gmtime(eta_relative))
- elif eta_relative > 60:
- return label + time.strftime('%M:%S', time.gmtime(eta_relative))
- else:
- return label + time.strftime('%Ss', time.gmtime(eta_relative))
- else:
- return ""
-
-
-def check_progress_call(id_part):
- if shared.state.job_count == 0:
- return "", gr_show(False), gr_show(False), gr_show(False)
-
- progress = 0
-
- if shared.state.job_count > 0:
- progress += shared.state.job_no / shared.state.job_count
- if shared.state.sampling_steps > 0:
- progress += 1 / shared.state.job_count * shared.state.sampling_step / shared.state.sampling_steps
-
- time_left = calc_time_left( progress, 1, " ETA: ", shared.state.time_left_force_display )
- if time_left != "":
- shared.state.time_left_force_display = True
-
- progress = min(progress, 1)
-
- progressbar = ""
- if opts.show_progressbar:
- progressbar = f"""<div class='progressDiv'><div class='progress' style="overflow:visible;width:{progress * 100}%;white-space:nowrap;">{"&nbsp;" * 2 + str(int(progress*100))+"%" + time_left if progress > 0.01 else ""}</div></div>"""
-
- image = gr_show(False)
- preview_visibility = gr_show(False)
-
- if opts.show_progress_every_n_steps != 0:
- shared.state.set_current_image()
- image = shared.state.current_image
-
- if image is None:
- image = gr.update(value=None)
- else:
- preview_visibility = gr_show(True)
-
- if shared.state.textinfo is not None:
- textinfo_result = gr.HTML.update(value=shared.state.textinfo, visible=True)
- else:
- textinfo_result = gr_show(False)
-
- return f"<span id='{id_part}_progress_span' style='display: none'>{time.time()}</span><p>{progressbar}</p>", preview_visibility, image, textinfo_result
-
-
-def check_progress_call_initial(id_part):
- shared.state.job_count = -1
- shared.state.current_latent = None
- shared.state.current_image = None
- shared.state.textinfo = None
- shared.state.time_start = time.time()
- shared.state.time_left_force_display = False
-
- return check_progress_call(id_part)
-
-
-def roll_artist(prompt):
- allowed_cats = set([x for x in shared.artist_db.categories() if len(opts.random_artist_categories)==0 or x in opts.random_artist_categories])
- artist = random.choice([x for x in shared.artist_db.artists if x.category in allowed_cats])
-
- return prompt + ", " + artist.name if prompt != '' else artist.name
-
-
def visit(x, func, path=""):
if hasattr(x, 'children'):
for c in x.children:
@@ -259,56 +121,88 @@ def add_style(name: str, prompt: str, negative_prompt: str):
# reserialize all styles every time we save them
shared.prompt_styles.save_styles(shared.styles_filename)
- return [gr.Dropdown.update(visible=True, choices=list(shared.prompt_styles.styles)) for _ in range(4)]
+ return [gr.Dropdown.update(visible=True, choices=list(shared.prompt_styles.styles)) for _ in range(2)]
-def apply_styles(prompt, prompt_neg, style1_name, style2_name):
- prompt = shared.prompt_styles.apply_styles_to_prompt(prompt, [style1_name, style2_name])
- prompt_neg = shared.prompt_styles.apply_negative_styles_to_prompt(prompt_neg, [style1_name, style2_name])
+def calc_resolution_hires(enable, width, height, hr_scale, hr_resize_x, hr_resize_y):
+ from modules import processing, devices
+
+ if not enable:
+ return ""
- return [gr.Textbox.update(value=prompt), gr.Textbox.update(value=prompt_neg), gr.Dropdown.update(value="None"), gr.Dropdown.update(value="None")]
+ p = processing.StableDiffusionProcessingTxt2Img(width=width, height=height, enable_hr=True, hr_scale=hr_scale, hr_resize_x=hr_resize_x, hr_resize_y=hr_resize_y)
+
+ with devices.autocast():
+ p.init([""], [0], [0])
+
+ return f"resize: from <span class='resolution'>{p.width}x{p.height}</span> to <span class='resolution'>{p.hr_resize_x or p.hr_upscale_to_x}x{p.hr_resize_y or p.hr_upscale_to_y}</span>"
+
+
+def apply_styles(prompt, prompt_neg, styles):
+ prompt = shared.prompt_styles.apply_styles_to_prompt(prompt, styles)
+ prompt_neg = shared.prompt_styles.apply_negative_styles_to_prompt(prompt_neg, styles)
+
+ return [gr.Textbox.update(value=prompt), gr.Textbox.update(value=prompt_neg), gr.Dropdown.update(value=[])]
+
+
+def process_interrogate(interrogation_function, mode, ii_input_dir, ii_output_dir, *ii_singles):
+ if mode in {0, 1, 3, 4}:
+ return [interrogation_function(ii_singles[mode]), None]
+ elif mode == 2:
+ return [interrogation_function(ii_singles[mode]["image"]), None]
+ elif mode == 5:
+ assert not shared.cmd_opts.hide_ui_dir_config, "Launched with --hide-ui-dir-config, batch img2img disabled"
+ images = shared.listfiles(ii_input_dir)
+ print(f"Will process {len(images)} images.")
+ if ii_output_dir != "":
+ os.makedirs(ii_output_dir, exist_ok=True)
+ else:
+ ii_output_dir = ii_input_dir
+
+ for image in images:
+ img = Image.open(image)
+ filename = os.path.basename(image)
+ left, _ = os.path.splitext(filename)
+ print(interrogation_function(img), file=open(os.path.join(ii_output_dir, left + ".txt"), 'a'))
+
+ return [gr.update(), None]
def interrogate(image):
prompt = shared.interrogator.interrogate(image.convert("RGB"))
-
- return gr_show(True) if prompt is None else prompt
+ return gr.update() if prompt is None else prompt
def interrogate_deepbooru(image):
prompt = deepbooru.model.tag(image)
- return gr_show(True) if prompt is None else prompt
+ return gr.update() if prompt is None else prompt
-def create_seed_inputs():
- with gr.Row():
- with gr.Box():
- with gr.Row(elem_id='seed_row'):
- seed = (gr.Textbox if cmd_opts.use_textbox_seed else gr.Number)(label='Seed', value=-1)
- seed.style(container=False)
- random_seed = gr.Button(random_symbol, elem_id='random_seed')
- reuse_seed = gr.Button(reuse_symbol, elem_id='reuse_seed')
+def create_seed_inputs(target_interface):
+ with FormRow(elem_id=target_interface + '_seed_row'):
+ seed = (gr.Textbox if cmd_opts.use_textbox_seed else gr.Number)(label='Seed', value=-1, elem_id=target_interface + '_seed')
+ seed.style(container=False)
+ random_seed = gr.Button(random_symbol, elem_id=target_interface + '_random_seed')
+ reuse_seed = gr.Button(reuse_symbol, elem_id=target_interface + '_reuse_seed')
- with gr.Box(elem_id='subseed_show_box'):
- seed_checkbox = gr.Checkbox(label='Extra', elem_id='subseed_show', value=False)
+ with gr.Group(elem_id=target_interface + '_subseed_show_box'):
+ seed_checkbox = gr.Checkbox(label='Extra', elem_id=target_interface + '_subseed_show', value=False)
# Components to show/hide based on the 'Extra' checkbox
seed_extras = []
- with gr.Row(visible=False) as seed_extra_row_1:
+ with FormRow(visible=False, elem_id=target_interface + '_subseed_row') as seed_extra_row_1:
seed_extras.append(seed_extra_row_1)
- with gr.Box():
- with gr.Row(elem_id='subseed_row'):
- subseed = gr.Number(label='Variation seed', value=-1)
- subseed.style(container=False)
- random_subseed = gr.Button(random_symbol, elem_id='random_subseed')
- reuse_subseed = gr.Button(reuse_symbol, elem_id='reuse_subseed')
- subseed_strength = gr.Slider(label='Variation strength', value=0.0, minimum=0, maximum=1, step=0.01)
-
- with gr.Row(visible=False) as seed_extra_row_2:
+ subseed = gr.Number(label='Variation seed', value=-1, elem_id=target_interface + '_subseed')
+ subseed.style(container=False)
+ random_subseed = gr.Button(random_symbol, elem_id=target_interface + '_random_subseed')
+ reuse_subseed = gr.Button(reuse_symbol, elem_id=target_interface + '_reuse_subseed')
+ subseed_strength = gr.Slider(label='Variation strength', value=0.0, minimum=0, maximum=1, step=0.01, elem_id=target_interface + '_subseed_strength')
+
+ with FormRow(visible=False) as seed_extra_row_2:
seed_extras.append(seed_extra_row_2)
- seed_resize_from_w = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize seed from width", value=0)
- seed_resize_from_h = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize seed from height", value=0)
+ seed_resize_from_w = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize seed from width", value=0, elem_id=target_interface + '_seed_resize_from_w')
+ seed_resize_from_h = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize seed from height", value=0, elem_id=target_interface + '_seed_resize_from_h')
random_seed.click(fn=lambda: -1, show_progress=False, inputs=[], outputs=[seed])
random_subseed.click(fn=lambda: -1, show_progress=False, inputs=[], outputs=[subseed])
@@ -368,6 +262,8 @@ def connect_reuse_seed(seed: gr.Number, reuse_seed: gr.Button, generation_info:
def update_token_counter(text, steps):
try:
+ text, _ = extra_networks.parse_prompt(text)
+
_, prompt_flat_list, _ = prompt_parser.get_multicond_prompt_list([text])
prompt_schedules = prompt_parser.get_learned_conditioning_prompt_schedules(prompt_flat_list, steps)
@@ -378,45 +274,24 @@ def update_token_counter(text, steps):
flat_prompts = reduce(lambda list1, list2: list1+list2, prompt_schedules)
prompts = [prompt_text for step, prompt_text in flat_prompts]
- tokens, token_count, max_length = max([model_hijack.tokenize(prompt) for prompt in prompts], key=lambda args: args[1])
- style_class = ' class="red"' if (token_count > max_length) else ""
- return f"<span {style_class}>{token_count}/{max_length}</span>"
+ token_count, max_length = max([model_hijack.get_prompt_lengths(prompt) for prompt in prompts], key=lambda args: args[0])
+ return f"<span class='gr-box gr-text-input'>{token_count}/{max_length}</span>"
def create_toprow(is_img2img):
id_part = "img2img" if is_img2img else "txt2img"
- with gr.Row(elem_id="toprow"):
- with gr.Column(scale=6):
+ with gr.Row(elem_id=f"{id_part}_toprow", variant="compact"):
+ with gr.Column(elem_id=f"{id_part}_prompt_container", scale=6):
with gr.Row():
with gr.Column(scale=80):
with gr.Row():
- prompt = gr.Textbox(label="Prompt", elem_id=f"{id_part}_prompt", show_label=False, lines=2,
- placeholder="Prompt (press Ctrl+Enter or Alt+Enter to generate)"
- )
+ prompt = gr.Textbox(label="Prompt", elem_id=f"{id_part}_prompt", show_label=False, lines=3, placeholder="Prompt (press Ctrl+Enter or Alt+Enter to generate)")
with gr.Row():
with gr.Column(scale=80):
with gr.Row():
- negative_prompt = gr.Textbox(label="Negative prompt", elem_id=f"{id_part}_neg_prompt", show_label=False, lines=2,
- placeholder="Negative prompt (press Ctrl+Enter or Alt+Enter to generate)"
- )
-
- with gr.Column(scale=1, elem_id="roll_col"):
- roll = gr.Button(value=art_symbol, elem_id="roll", visible=len(shared.artist_db.artists) > 0)
- paste = gr.Button(value=paste_symbol, elem_id="paste")
- save_style = gr.Button(value=save_style_symbol, elem_id="style_create")
- prompt_style_apply = gr.Button(value=apply_style_symbol, elem_id="style_apply")
- clear_prompt_button = gr.Button(value=clear_prompt_symbol, elem_id=f"{id_part}_clear_prompt")
- token_counter = gr.HTML(value="<span></span>", elem_id=f"{id_part}_token_counter")
- token_button = gr.Button(visible=False, elem_id=f"{id_part}_token_button")
-
- clear_prompt_button.click(
- fn=lambda *x: x,
- _js="confirm_clear_prompt",
- inputs=[prompt, negative_prompt],
- outputs=[prompt, negative_prompt],
- )
+ negative_prompt = gr.Textbox(label="Negative prompt", elem_id=f"{id_part}_neg_prompt", show_label=False, lines=2, placeholder="Negative prompt (press Ctrl+Enter or Alt+Enter to generate)")
button_interrogate = None
button_deepbooru = None
@@ -425,10 +300,10 @@ def create_toprow(is_img2img):
button_interrogate = gr.Button('Interrogate\nCLIP', elem_id="interrogate")
button_deepbooru = gr.Button('Interrogate\nDeepBooru', elem_id="deepbooru")
- with gr.Column(scale=1):
- with gr.Row():
- skip = gr.Button('Skip', elem_id=f"{id_part}_skip")
+ with gr.Column(scale=1, elem_id=f"{id_part}_actions_column"):
+ with gr.Row(elem_id=f"{id_part}_generate_box"):
interrupt = gr.Button('Interrupt', elem_id=f"{id_part}_interrupt")
+ skip = gr.Button('Skip', elem_id=f"{id_part}_skip")
submit = gr.Button('Generate', elem_id=f"{id_part}_generate", variant='primary')
skip.click(
@@ -443,37 +318,34 @@ def create_toprow(is_img2img):
outputs=[],
)
- with gr.Row():
- with gr.Column(scale=1, elem_id="style_pos_col"):
- prompt_style = gr.Dropdown(label="Style 1", elem_id=f"{id_part}_style_index", choices=[k for k, v in shared.prompt_styles.styles.items()], value=next(iter(shared.prompt_styles.styles.keys())))
- prompt_style.save_to_config = True
+ with gr.Row(elem_id=f"{id_part}_tools"):
+ paste = ToolButton(value=paste_symbol, elem_id="paste")
+ clear_prompt_button = ToolButton(value=clear_prompt_symbol, elem_id=f"{id_part}_clear_prompt")
+ extra_networks_button = ToolButton(value=extra_networks_symbol, elem_id=f"{id_part}_extra_networks")
+ prompt_style_apply = ToolButton(value=apply_style_symbol, elem_id=f"{id_part}_style_apply")
+ save_style = ToolButton(value=save_style_symbol, elem_id=f"{id_part}_style_create")
+
+ token_counter = gr.HTML(value="<span></span>", elem_id=f"{id_part}_token_counter")
+ token_button = gr.Button(visible=False, elem_id=f"{id_part}_token_button")
+ negative_token_counter = gr.HTML(value="<span></span>", elem_id=f"{id_part}_negative_token_counter")
+ negative_token_button = gr.Button(visible=False, elem_id=f"{id_part}_negative_token_button")
+
+ clear_prompt_button.click(
+ fn=lambda *x: x,
+ _js="confirm_clear_prompt",
+ inputs=[prompt, negative_prompt],
+ outputs=[prompt, negative_prompt],
+ )
- with gr.Column(scale=1, elem_id="style_neg_col"):
- prompt_style2 = gr.Dropdown(label="Style 2", elem_id=f"{id_part}_style2_index", choices=[k for k, v in shared.prompt_styles.styles.items()], value=next(iter(shared.prompt_styles.styles.keys())))
- prompt_style2.save_to_config = True
+ with gr.Row(elem_id=f"{id_part}_styles_row"):
+ prompt_styles = gr.Dropdown(label="Styles", elem_id=f"{id_part}_styles", choices=[k for k, v in shared.prompt_styles.styles.items()], value=[], multiselect=True)
+ create_refresh_button(prompt_styles, shared.prompt_styles.reload, lambda: {"choices": [k for k, v in shared.prompt_styles.styles.items()]}, f"refresh_{id_part}_styles")
- return prompt, roll, prompt_style, negative_prompt, prompt_style2, submit, button_interrogate, button_deepbooru, prompt_style_apply, save_style, paste, token_counter, token_button
+ return prompt, prompt_styles, negative_prompt, submit, button_interrogate, button_deepbooru, prompt_style_apply, save_style, paste, extra_networks_button, token_counter, token_button, negative_token_counter, negative_token_button
-def setup_progressbar(progressbar, preview, id_part, textinfo=None):
- if textinfo is None:
- textinfo = gr.HTML(visible=False)
-
- check_progress = gr.Button('Check progress', elem_id=f"{id_part}_check_progress", visible=False)
- check_progress.click(
- fn=lambda: check_progress_call(id_part),
- show_progress=False,
- inputs=[],
- outputs=[progressbar, preview, preview, textinfo],
- )
-
- check_progress_initial = gr.Button('Check progress (first)', elem_id=f"{id_part}_check_progress_initial", visible=False)
- check_progress_initial.click(
- fn=lambda: check_progress_call_initial(id_part),
- show_progress=False,
- inputs=[],
- outputs=[progressbar, preview, preview, textinfo],
- )
+def setup_progressbar(*args, **kwargs):
+ pass
def apply_setting(key, value):
@@ -500,27 +372,13 @@ def apply_setting(key, value):
return
valtype = type(opts.data_labels[key].default)
- oldval = opts.data[key]
+ oldval = opts.data.get(key, None)
opts.data[key] = valtype(value) if valtype != type(None) else value
if oldval != value and opts.data_labels[key].onchange is not None:
opts.data_labels[key].onchange()
opts.save(shared.config_filename)
- return value
-
-
-def update_generation_info(args):
- generation_info, html_info, img_index = args
- try:
- generation_info = json.loads(generation_info)
- if img_index < 0 or img_index >= len(generation_info["infotexts"]):
- return html_info
- return plaintext_to_html(generation_info["infotexts"][img_index])
- except Exception:
- pass
- # if the json parse or anything else fails, just return the old html_info
- return html_info
-
+ return getattr(opts, key)
def create_refresh_button(refresh_component, refresh_method, refreshed_args, elem_id):
def refresh():
@@ -532,7 +390,7 @@ def create_refresh_button(refresh_component, refresh_method, refreshed_args, ele
return gr.update(**(args or {}))
- refresh_button = gr.Button(value=refresh_symbol, elem_id=elem_id)
+ refresh_button = ToolButton(value=refresh_symbol, elem_id=elem_id)
refresh_button.click(
fn=refresh,
inputs=[],
@@ -542,89 +400,37 @@ def create_refresh_button(refresh_component, refresh_method, refreshed_args, ele
def create_output_panel(tabname, outdir):
- def open_folder(f):
- if not os.path.exists(f):
- print(f'Folder "{f}" does not exist. After you create an image, the folder will be created.')
- return
- elif not os.path.isdir(f):
- print(f"""
-WARNING
-An open_folder request was made with an argument that is not a folder.
-This could be an error or a malicious attempt to run code on your computer.
-Requested path was: {f}
-""", file=sys.stderr)
- return
-
- if not shared.cmd_opts.hide_ui_dir_config:
- path = os.path.normpath(f)
- if platform.system() == "Windows":
- os.startfile(path)
- elif platform.system() == "Darwin":
- sp.Popen(["open", path])
- else:
- sp.Popen(["xdg-open", path])
+ return ui_common.create_output_panel(tabname, outdir)
- with gr.Column(variant='panel'):
- with gr.Group():
- result_gallery = gr.Gallery(label='Output', show_label=False, elem_id=f"{tabname}_gallery").style(grid=4)
- generation_info = None
- with gr.Column():
- with gr.Row():
- if tabname != "extras":
- save = gr.Button('Save', elem_id=f'save_{tabname}')
+def create_sampler_and_steps_selection(choices, tabname):
+ if opts.samplers_in_dropdown:
+ with FormRow(elem_id=f"sampler_selection_{tabname}"):
+ sampler_index = gr.Dropdown(label='Sampling method', elem_id=f"{tabname}_sampling", choices=[x.name for x in choices], value=choices[0].name, type="index")
+ steps = gr.Slider(minimum=1, maximum=150, step=1, elem_id=f"{tabname}_steps", label="Sampling steps", value=20)
+ else:
+ with FormGroup(elem_id=f"sampler_selection_{tabname}"):
+ steps = gr.Slider(minimum=1, maximum=150, step=1, elem_id=f"{tabname}_steps", label="Sampling steps", value=20)
+ sampler_index = gr.Radio(label='Sampling method', elem_id=f"{tabname}_sampling", choices=[x.name for x in choices], value=choices[0].name, type="index")
- buttons = parameters_copypaste.create_buttons(["img2img", "inpaint", "extras"])
- button_id = "hidden_element" if shared.cmd_opts.hide_ui_dir_config else 'open_folder'
- open_folder_button = gr.Button(folder_symbol, elem_id=button_id)
+ return steps, sampler_index
- open_folder_button.click(
- fn=lambda: open_folder(opts.outdir_samples or outdir),
- inputs=[],
- outputs=[],
- )
- if tabname != "extras":
- with gr.Row():
- do_make_zip = gr.Checkbox(label="Make Zip when Save?", value=False)
+def ordered_ui_categories():
+ user_order = {x.strip(): i * 2 + 1 for i, x in enumerate(shared.opts.ui_reorder.split(","))}
- with gr.Row():
- download_files = gr.File(None, file_count="multiple", interactive=False, show_label=False, visible=False)
-
- with gr.Group():
- html_info = gr.HTML()
- generation_info = gr.Textbox(visible=False)
- if tabname == 'txt2img' or tabname == 'img2img':
- generation_info_button = gr.Button(visible=False, elem_id=f"{tabname}_generation_info_button")
- generation_info_button.click(
- fn=update_generation_info,
- _js="(x, y) => [x, y, selected_gallery_index()]",
- inputs=[generation_info, html_info],
- outputs=[html_info],
- preprocess=False
- )
-
- save.click(
- fn=wrap_gradio_call(save_files),
- _js="(x, y, z, w) => [x, y, z, selected_gallery_index()]",
- inputs=[
- generation_info,
- result_gallery,
- do_make_zip,
- html_info,
- ],
- outputs=[
- download_files,
- html_info,
- html_info,
- html_info,
- ]
- )
- else:
- html_info_x = gr.HTML()
- html_info = gr.HTML()
- parameters_copypaste.bind_buttons(buttons, result_gallery, "txt2img" if tabname == "txt2img" else None)
- return result_gallery, generation_info if tabname != "extras" else html_info_x, html_info
+ for i, category in sorted(enumerate(shared.ui_reorder_categories), key=lambda x: user_order.get(x[1], x[0] * 2 + 0)):
+ yield category
+
+
+def get_value_for_setting(key):
+ value = getattr(opts, key)
+
+ info = opts.data_labels[key]
+ args = info.component_args() if callable(info.component_args) else info.component_args or {}
+ args = {k: v for k, v in args.items() if k not in {'precision'}}
+
+ return gr.update(value=value, **args)
def create_ui():
@@ -639,67 +445,98 @@ def create_ui():
modules.scripts.scripts_txt2img.initialize_scripts(is_img2img=False)
with gr.Blocks(analytics_enabled=False) as txt2img_interface:
- txt2img_prompt, roll, txt2img_prompt_style, txt2img_negative_prompt, txt2img_prompt_style2, submit, _, _,txt2img_prompt_style_apply, txt2img_save_style, txt2img_paste, token_counter, token_button = create_toprow(is_img2img=False)
+ txt2img_prompt, txt2img_prompt_styles, txt2img_negative_prompt, submit, _, _, txt2img_prompt_style_apply, txt2img_save_style, txt2img_paste, extra_networks_button, token_counter, token_button, negative_token_counter, negative_token_button = create_toprow(is_img2img=False)
dummy_component = gr.Label(visible=False)
- txt_prompt_img = gr.File(label="", elem_id="txt2img_prompt_image", file_count="single", type="bytes", visible=False)
-
-
-
+ txt_prompt_img = gr.File(label="", elem_id="txt2img_prompt_image", file_count="single", type="binary", visible=False)
- with gr.Row(elem_id='txt2img_progress_row'):
- with gr.Column(scale=1):
- pass
-
- with gr.Column(scale=1):
- progressbar = gr.HTML(elem_id="txt2img_progressbar")
- txt2img_preview = gr.Image(elem_id='txt2img_preview', visible=False)
- setup_progressbar(progressbar, txt2img_preview, 'txt2img')
+ with FormRow(variant='compact', elem_id="txt2img_extra_networks", visible=False) as extra_networks:
+ from modules import ui_extra_networks
+ extra_networks_ui = ui_extra_networks.create_ui(extra_networks, extra_networks_button, 'txt2img')
with gr.Row().style(equal_height=False):
- with gr.Column(variant='panel', elem_id="txt2img_settings"):
- steps = gr.Slider(minimum=1, maximum=150, step=1, label="Sampling Steps", value=20)
- sampler_index = gr.Radio(label='Sampling method', elem_id="txt2img_sampling", choices=[x.name for x in samplers], value=samplers[0].name, type="index")
-
- with gr.Group():
- width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512)
- height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512)
-
- with gr.Row():
- restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1)
- tiling = gr.Checkbox(label='Tiling', value=False)
- enable_hr = gr.Checkbox(label='Highres. fix', value=False)
-
- with gr.Row(visible=False) as hr_options:
- firstphase_width = gr.Slider(minimum=0, maximum=1024, step=8, label="Firstpass width", value=0)
- firstphase_height = gr.Slider(minimum=0, maximum=1024, step=8, label="Firstpass height", value=0)
- denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.7)
-
- with gr.Row(equal_height=True):
- batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1)
- batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1)
-
- cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0)
-
- seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox = create_seed_inputs()
-
- with gr.Group():
- custom_inputs = modules.scripts.scripts_txt2img.setup_ui()
+ with gr.Column(variant='compact', elem_id="txt2img_settings"):
+ for category in ordered_ui_categories():
+ if category == "sampler":
+ steps, sampler_index = create_sampler_and_steps_selection(samplers, "txt2img")
+
+ elif category == "dimensions":
+ with FormRow():
+ with gr.Column(elem_id="txt2img_column_size", scale=4):
+ width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="txt2img_width")
+ height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="txt2img_height")
+
+ if opts.dimensions_and_batch_together:
+ res_switch_btn = ToolButton(value=switch_values_symbol, elem_id="txt2img_res_switch_btn")
+ with gr.Column(elem_id="txt2img_column_batch"):
+ batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="txt2img_batch_count")
+ batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="txt2img_batch_size")
+
+ elif category == "cfg":
+ cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0, elem_id="txt2img_cfg_scale")
+
+ elif category == "seed":
+ seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox = create_seed_inputs('txt2img')
+
+ elif category == "checkboxes":
+ with FormRow(elem_id="txt2img_checkboxes", variant="compact"):
+ restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1, elem_id="txt2img_restore_faces")
+ tiling = gr.Checkbox(label='Tiling', value=False, elem_id="txt2img_tiling")
+ enable_hr = gr.Checkbox(label='Hires. fix', value=False, elem_id="txt2img_enable_hr")
+ hr_final_resolution = FormHTML(value="", elem_id="txtimg_hr_finalres", label="Upscaled resolution", interactive=False)
+
+ elif category == "hires_fix":
+ with FormGroup(visible=False, elem_id="txt2img_hires_fix") as hr_options:
+ with FormRow(elem_id="txt2img_hires_fix_row1", variant="compact"):
+ hr_upscaler = gr.Dropdown(label="Upscaler", elem_id="txt2img_hr_upscaler", choices=[*shared.latent_upscale_modes, *[x.name for x in shared.sd_upscalers]], value=shared.latent_upscale_default_mode)
+ hr_second_pass_steps = gr.Slider(minimum=0, maximum=150, step=1, label='Hires steps', value=0, elem_id="txt2img_hires_steps")
+ denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.7, elem_id="txt2img_denoising_strength")
+
+ with FormRow(elem_id="txt2img_hires_fix_row2", variant="compact"):
+ hr_scale = gr.Slider(minimum=1.0, maximum=4.0, step=0.05, label="Upscale by", value=2.0, elem_id="txt2img_hr_scale")
+ hr_resize_x = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize width to", value=0, elem_id="txt2img_hr_resize_x")
+ hr_resize_y = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize height to", value=0, elem_id="txt2img_hr_resize_y")
+
+ elif category == "batch":
+ if not opts.dimensions_and_batch_together:
+ with FormRow(elem_id="txt2img_column_batch"):
+ batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="txt2img_batch_count")
+ batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="txt2img_batch_size")
+
+ elif category == "scripts":
+ with FormGroup(elem_id="txt2img_script_container"):
+ custom_inputs = modules.scripts.scripts_txt2img.setup_ui()
+
+ hr_resolution_preview_inputs = [enable_hr, width, height, hr_scale, hr_resize_x, hr_resize_y]
+ for input in hr_resolution_preview_inputs:
+ input.change(
+ fn=calc_resolution_hires,
+ inputs=hr_resolution_preview_inputs,
+ outputs=[hr_final_resolution],
+ show_progress=False,
+ )
+ input.change(
+ None,
+ _js="onCalcResolutionHires",
+ inputs=hr_resolution_preview_inputs,
+ outputs=[],
+ show_progress=False,
+ )
- txt2img_gallery, generation_info, html_info = create_output_panel("txt2img", opts.outdir_txt2img_samples)
+ txt2img_gallery, generation_info, html_info, html_log = create_output_panel("txt2img", opts.outdir_txt2img_samples)
parameters_copypaste.bind_buttons({"txt2img": txt2img_paste}, None, txt2img_prompt)
connect_reuse_seed(seed, reuse_seed, generation_info, dummy_component, is_subseed=False)
connect_reuse_seed(subseed, reuse_subseed, generation_info, dummy_component, is_subseed=True)
txt2img_args = dict(
- fn=wrap_gradio_gpu_call(modules.txt2img.txt2img),
+ fn=wrap_gradio_gpu_call(modules.txt2img.txt2img, extra_outputs=[None, '', '']),
_js="submit",
inputs=[
+ dummy_component,
txt2img_prompt,
txt2img_negative_prompt,
- txt2img_prompt_style,
- txt2img_prompt_style2,
+ txt2img_prompt_styles,
steps,
sampler_index,
restore_faces,
@@ -713,14 +550,18 @@ def create_ui():
width,
enable_hr,
denoising_strength,
- firstphase_width,
- firstphase_height,
+ hr_scale,
+ hr_upscaler,
+ hr_second_pass_steps,
+ hr_resize_x,
+ hr_resize_y,
] + custom_inputs,
outputs=[
txt2img_gallery,
generation_info,
- html_info
+ html_info,
+ html_log,
],
show_progress=False,
)
@@ -728,6 +569,8 @@ def create_ui():
txt2img_prompt.submit(**txt2img_args)
submit.click(**txt2img_args)
+ res_switch_btn.click(lambda w, h: (h, w), inputs=[width, height], outputs=[width, height])
+
txt_prompt_img.change(
fn=modules.images.image_data,
inputs=[
@@ -743,17 +586,7 @@ def create_ui():
fn=lambda x: gr_show(x),
inputs=[enable_hr],
outputs=[hr_options],
- )
-
- roll.click(
- fn=roll_artist,
- _js="update_txt2img_tokens",
- inputs=[
- txt2img_prompt,
- ],
- outputs=[
- txt2img_prompt,
- ]
+ show_progress = False,
)
txt2img_paste_fields = [
@@ -774,8 +607,11 @@ def create_ui():
(denoising_strength, "Denoising strength"),
(enable_hr, lambda d: "Denoising strength" in d),
(hr_options, lambda d: gr.Row.update(visible="Denoising strength" in d)),
- (firstphase_width, "First pass size-1"),
- (firstphase_height, "First pass size-2"),
+ (hr_scale, "Hires upscale"),
+ (hr_upscaler, "Hires upscaler"),
+ (hr_second_pass_steps, "Hires steps"),
+ (hr_resize_x, "Hires resize-1"),
+ (hr_resize_y, "Hires resize-2"),
*modules.scripts.scripts_txt2img.infotext_fields
]
parameters_copypaste.add_paste_fields("txt2img", None, txt2img_paste_fields)
@@ -792,98 +628,174 @@ def create_ui():
]
token_button.click(fn=wrap_queued_call(update_token_counter), inputs=[txt2img_prompt, steps], outputs=[token_counter])
+ negative_token_button.click(fn=wrap_queued_call(update_token_counter), inputs=[txt2img_negative_prompt, steps], outputs=[negative_token_counter])
+
+ ui_extra_networks.setup_ui(extra_networks_ui, txt2img_gallery)
modules.scripts.scripts_current = modules.scripts.scripts_img2img
modules.scripts.scripts_img2img.initialize_scripts(is_img2img=True)
with gr.Blocks(analytics_enabled=False) as img2img_interface:
- img2img_prompt, roll, img2img_prompt_style, img2img_negative_prompt, img2img_prompt_style2, submit, img2img_interrogate, img2img_deepbooru, img2img_prompt_style_apply, img2img_save_style, img2img_paste,token_counter, token_button = create_toprow(is_img2img=True)
+ img2img_prompt, img2img_prompt_styles, img2img_negative_prompt, submit, img2img_interrogate, img2img_deepbooru, img2img_prompt_style_apply, img2img_save_style, img2img_paste, extra_networks_button, token_counter, token_button, negative_token_counter, negative_token_button = create_toprow(is_img2img=True)
+ img2img_prompt_img = gr.File(label="", elem_id="img2img_prompt_image", file_count="single", type="binary", visible=False)
- with gr.Row(elem_id='img2img_progress_row'):
- img2img_prompt_img = gr.File(label="", elem_id="img2img_prompt_image", file_count="single", type="bytes", visible=False)
+ with FormRow(variant='compact', elem_id="img2img_extra_networks", visible=False) as extra_networks:
+ from modules import ui_extra_networks
+ extra_networks_ui_img2img = ui_extra_networks.create_ui(extra_networks, extra_networks_button, 'img2img')
- with gr.Column(scale=1):
- pass
-
- with gr.Column(scale=1):
- progressbar = gr.HTML(elem_id="img2img_progressbar")
- img2img_preview = gr.Image(elem_id='img2img_preview', visible=False)
- setup_progressbar(progressbar, img2img_preview, 'img2img')
+ with FormRow().style(equal_height=False):
+ with gr.Column(variant='compact', elem_id="img2img_settings"):
+ copy_image_buttons = []
+ copy_image_destinations = {}
- with gr.Row().style(equal_height=False):
- with gr.Column(variant='panel', elem_id="img2img_settings"):
+ def add_copy_image_controls(tab_name, elem):
+ with gr.Row(variant="compact", elem_id=f"img2img_copy_to_{tab_name}"):
+ gr.HTML("Copy image to: ", elem_id=f"img2img_label_copy_to_{tab_name}")
- with gr.Tabs(elem_id="mode_img2img") as tabs_img2img_mode:
- with gr.TabItem('img2img', id='img2img'):
- init_img = gr.Image(label="Image for img2img", elem_id="img2img_image", show_label=False, source="upload", interactive=True, type="pil", tool=cmd_opts.gradio_img2img_tool, image_mode="RGBA").style(height=480)
+ for title, name in zip(['img2img', 'sketch', 'inpaint', 'inpaint sketch'], ['img2img', 'sketch', 'inpaint', 'inpaint_sketch']):
+ if name == tab_name:
+ gr.Button(title, interactive=False)
+ copy_image_destinations[name] = elem
+ continue
- with gr.TabItem('Inpaint', id='inpaint'):
- init_img_with_mask = gr.Image(label="Image for inpainting with mask", show_label=False, elem_id="img2maskimg", source="upload", interactive=True, type="pil", tool=cmd_opts.gradio_inpaint_tool, image_mode="RGBA").style(height=480)
- init_img_with_mask_orig = gr.State(None)
+ button = gr.Button(title)
+ copy_image_buttons.append((button, name, elem))
- use_color_sketch = cmd_opts.gradio_inpaint_tool == "color-sketch"
- if use_color_sketch:
- def update_orig(image, state):
- if image is not None:
- same_size = state is not None and state.size == image.size
- has_exact_match = np.any(np.all(np.array(image) == np.array(state), axis=-1))
- edited = same_size and has_exact_match
- return image if not edited or state is None else state
+ with gr.Tabs(elem_id="mode_img2img"):
+ with gr.TabItem('img2img', id='img2img', elem_id="img2img_img2img_tab") as tab_img2img:
+ init_img = gr.Image(label="Image for img2img", elem_id="img2img_image", show_label=False, source="upload", interactive=True, type="pil", tool="editor", image_mode="RGBA").style(height=480)
+ add_copy_image_controls('img2img', init_img)
- init_img_with_mask.change(update_orig, [init_img_with_mask, init_img_with_mask_orig], init_img_with_mask_orig)
+ with gr.TabItem('Sketch', id='img2img_sketch', elem_id="img2img_img2img_sketch_tab") as tab_sketch:
+ sketch = gr.Image(label="Image for img2img", elem_id="img2img_sketch", show_label=False, source="upload", interactive=True, type="pil", tool="color-sketch", image_mode="RGBA").style(height=480)
+ add_copy_image_controls('sketch', sketch)
- init_img_inpaint = gr.Image(label="Image for img2img", show_label=False, source="upload", interactive=True, type="pil", visible=False, elem_id="img_inpaint_base")
- init_mask_inpaint = gr.Image(label="Mask", source="upload", interactive=True, type="pil", visible=False, elem_id="img_inpaint_mask")
+ with gr.TabItem('Inpaint', id='inpaint', elem_id="img2img_inpaint_tab") as tab_inpaint:
+ init_img_with_mask = gr.Image(label="Image for inpainting with mask", show_label=False, elem_id="img2maskimg", source="upload", interactive=True, type="pil", tool="sketch", image_mode="RGBA").style(height=480)
+ add_copy_image_controls('inpaint', init_img_with_mask)
- with gr.Row():
- mask_blur = gr.Slider(label='Mask blur', minimum=0, maximum=64, step=1, value=4)
- mask_alpha = gr.Slider(label="Mask transparency", interactive=use_color_sketch, visible=use_color_sketch)
+ with gr.TabItem('Inpaint sketch', id='inpaint_sketch', elem_id="img2img_inpaint_sketch_tab") as tab_inpaint_color:
+ inpaint_color_sketch = gr.Image(label="Color sketch inpainting", show_label=False, elem_id="inpaint_sketch", source="upload", interactive=True, type="pil", tool="color-sketch", image_mode="RGBA").style(height=480)
+ inpaint_color_sketch_orig = gr.State(None)
+ add_copy_image_controls('inpaint_sketch', inpaint_color_sketch)
- with gr.Row():
- mask_mode = gr.Radio(label="Mask mode", show_label=False, choices=["Draw mask", "Upload mask"], type="index", value="Draw mask", elem_id="mask_mode")
- inpainting_mask_invert = gr.Radio(label='Masking mode', show_label=False, choices=['Inpaint masked', 'Inpaint not masked'], value='Inpaint masked', type="index")
+ def update_orig(image, state):
+ if image is not None:
+ same_size = state is not None and state.size == image.size
+ has_exact_match = np.any(np.all(np.array(image) == np.array(state), axis=-1))
+ edited = same_size and has_exact_match
+ return image if not edited or state is None else state
- inpainting_fill = gr.Radio(label='Masked content', choices=['fill', 'original', 'latent noise', 'latent nothing'], value='original', type="index")
+ inpaint_color_sketch.change(update_orig, [inpaint_color_sketch, inpaint_color_sketch_orig], inpaint_color_sketch_orig)
- with gr.Row():
- inpaint_full_res = gr.Checkbox(label='Inpaint at full resolution', value=False)
- inpaint_full_res_padding = gr.Slider(label='Inpaint at full resolution padding, pixels', minimum=0, maximum=256, step=4, value=32)
+ with gr.TabItem('Inpaint upload', id='inpaint_upload', elem_id="img2img_inpaint_upload_tab") as tab_inpaint_upload:
+ init_img_inpaint = gr.Image(label="Image for img2img", show_label=False, source="upload", interactive=True, type="pil", elem_id="img_inpaint_base")
+ init_mask_inpaint = gr.Image(label="Mask", source="upload", interactive=True, type="pil", elem_id="img_inpaint_mask")
- with gr.TabItem('Batch img2img', id='batch'):
+ with gr.TabItem('Batch', id='batch', elem_id="img2img_batch_tab") as tab_batch:
hidden = '<br>Disabled when launched with --hide-ui-dir-config.' if shared.cmd_opts.hide_ui_dir_config else ''
- gr.HTML(f"<p class=\"text-gray-500\">Process images in a directory on the same machine where the server is running.<br>Use an empty output directory to save pictures normally instead of writing to the output directory.{hidden}</p>")
- img2img_batch_input_dir = gr.Textbox(label="Input directory", **shared.hide_dirs)
- img2img_batch_output_dir = gr.Textbox(label="Output directory", **shared.hide_dirs)
-
- with gr.Row():
- resize_mode = gr.Radio(label="Resize mode", elem_id="resize_mode", show_label=False, choices=["Just resize", "Crop and resize", "Resize and fill", "Just resize (latent upscale)"], type="index", value="Just resize")
-
- steps = gr.Slider(minimum=1, maximum=150, step=1, label="Sampling Steps", value=20)
- sampler_index = gr.Radio(label='Sampling method', choices=[x.name for x in samplers_for_img2img], value=samplers_for_img2img[0].name, type="index")
-
- with gr.Group():
- width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="img2img_width")
- height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="img2img_height")
-
- with gr.Row():
- restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1)
- tiling = gr.Checkbox(label='Tiling', value=False)
-
- with gr.Row():
- batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1)
- batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1)
+ gr.HTML(
+ f"<p style='padding-bottom: 1em;' class=\"text-gray-500\">Process images in a directory on the same machine where the server is running." +
+ f"<br>Use an empty output directory to save pictures normally instead of writing to the output directory." +
+ f"<br>Add inpaint batch mask directory to enable inpaint batch processing."
+ f"{hidden}</p>"
+ )
+ img2img_batch_input_dir = gr.Textbox(label="Input directory", **shared.hide_dirs, elem_id="img2img_batch_input_dir")
+ img2img_batch_output_dir = gr.Textbox(label="Output directory", **shared.hide_dirs, elem_id="img2img_batch_output_dir")
+ img2img_batch_inpaint_mask_dir = gr.Textbox(label="Inpaint batch mask directory (required for inpaint batch processing only)", **shared.hide_dirs, elem_id="img2img_batch_inpaint_mask_dir")
- with gr.Group():
- cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0)
- denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.75)
+ def copy_image(img):
+ if isinstance(img, dict) and 'image' in img:
+ return img['image']
- seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox = create_seed_inputs()
+ return img
- with gr.Group():
- custom_inputs = modules.scripts.scripts_img2img.setup_ui()
+ for button, name, elem in copy_image_buttons:
+ button.click(
+ fn=copy_image,
+ inputs=[elem],
+ outputs=[copy_image_destinations[name]],
+ )
+ button.click(
+ fn=lambda: None,
+ _js="switch_to_"+name.replace(" ", "_"),
+ inputs=[],
+ outputs=[],
+ )
- img2img_gallery, generation_info, html_info = create_output_panel("img2img", opts.outdir_img2img_samples)
+ with FormRow():
+ resize_mode = gr.Radio(label="Resize mode", elem_id="resize_mode", choices=["Just resize", "Crop and resize", "Resize and fill", "Just resize (latent upscale)"], type="index", value="Just resize")
+
+ for category in ordered_ui_categories():
+ if category == "sampler":
+ steps, sampler_index = create_sampler_and_steps_selection(samplers_for_img2img, "img2img")
+
+ elif category == "dimensions":
+ with FormRow():
+ with gr.Column(elem_id="img2img_column_size", scale=4):
+ width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="img2img_width")
+ height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="img2img_height")
+
+ if opts.dimensions_and_batch_together:
+ res_switch_btn = ToolButton(value=switch_values_symbol, elem_id="img2img_res_switch_btn")
+ with gr.Column(elem_id="img2img_column_batch"):
+ batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="img2img_batch_count")
+ batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="img2img_batch_size")
+
+ elif category == "cfg":
+ with FormGroup():
+ cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0, elem_id="img2img_cfg_scale")
+ denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.75, elem_id="img2img_denoising_strength")
+
+ elif category == "seed":
+ seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox = create_seed_inputs('img2img')
+
+ elif category == "checkboxes":
+ with FormRow(elem_id="img2img_checkboxes", variant="compact"):
+ restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1, elem_id="img2img_restore_faces")
+ tiling = gr.Checkbox(label='Tiling', value=False, elem_id="img2img_tiling")
+
+ elif category == "batch":
+ if not opts.dimensions_and_batch_together:
+ with FormRow(elem_id="img2img_column_batch"):
+ batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="img2img_batch_count")
+ batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="img2img_batch_size")
+
+ elif category == "scripts":
+ with FormGroup(elem_id="img2img_script_container"):
+ custom_inputs = modules.scripts.scripts_img2img.setup_ui()
+
+ elif category == "inpaint":
+ with FormGroup(elem_id="inpaint_controls", visible=False) as inpaint_controls:
+ with FormRow():
+ mask_blur = gr.Slider(label='Mask blur', minimum=0, maximum=64, step=1, value=4, elem_id="img2img_mask_blur")
+ mask_alpha = gr.Slider(label="Mask transparency", visible=False, elem_id="img2img_mask_alpha")
+
+ with FormRow():
+ inpainting_mask_invert = gr.Radio(label='Mask mode', choices=['Inpaint masked', 'Inpaint not masked'], value='Inpaint masked', type="index", elem_id="img2img_mask_mode")
+
+ with FormRow():
+ inpainting_fill = gr.Radio(label='Masked content', choices=['fill', 'original', 'latent noise', 'latent nothing'], value='original', type="index", elem_id="img2img_inpainting_fill")
+
+ with FormRow():
+ with gr.Column():
+ inpaint_full_res = gr.Radio(label="Inpaint area", choices=["Whole picture", "Only masked"], type="index", value="Whole picture", elem_id="img2img_inpaint_full_res")
+
+ with gr.Column(scale=4):
+ inpaint_full_res_padding = gr.Slider(label='Only masked padding, pixels', minimum=0, maximum=256, step=4, value=32, elem_id="img2img_inpaint_full_res_padding")
+
+ def select_img2img_tab(tab):
+ return gr.update(visible=tab in [2, 3, 4]), gr.update(visible=tab == 3),
+
+ for i, elem in enumerate([tab_img2img, tab_sketch, tab_inpaint, tab_inpaint_color, tab_inpaint_upload, tab_batch]):
+ elem.select(
+ fn=lambda tab=i: select_img2img_tab(tab),
+ inputs=[],
+ outputs=[inpaint_controls, mask_alpha],
+ )
+
+ img2img_gallery, generation_info, html_info, html_log = create_output_panel("img2img", opts.outdir_img2img_samples)
parameters_copypaste.bind_buttons({"img2img": img2img_paste}, None, img2img_prompt)
connect_reuse_seed(seed, reuse_seed, generation_info, dummy_component, is_subseed=False)
@@ -900,35 +812,22 @@ def create_ui():
]
)
- mask_mode.change(
- lambda mode, img: {
- init_img_with_mask: gr_show(mode == 0),
- init_img_inpaint: gr_show(mode == 1),
- init_mask_inpaint: gr_show(mode == 1),
- },
- inputs=[mask_mode, init_img_with_mask],
- outputs=[
- init_img_with_mask,
- init_img_inpaint,
- init_mask_inpaint,
- ],
- )
-
img2img_args = dict(
- fn=wrap_gradio_gpu_call(modules.img2img.img2img),
+ fn=wrap_gradio_gpu_call(modules.img2img.img2img, extra_outputs=[None, '', '']),
_js="submit_img2img",
inputs=[
dummy_component,
+ dummy_component,
img2img_prompt,
img2img_negative_prompt,
- img2img_prompt_style,
- img2img_prompt_style2,
+ img2img_prompt_styles,
init_img,
+ sketch,
init_img_with_mask,
- init_img_with_mask_orig,
+ inpaint_color_sketch,
+ inpaint_color_sketch_orig,
init_img_inpaint,
init_mask_inpaint,
- mask_mode,
steps,
sampler_index,
mask_blur,
@@ -950,44 +849,48 @@ def create_ui():
inpainting_mask_invert,
img2img_batch_input_dir,
img2img_batch_output_dir,
+ img2img_batch_inpaint_mask_dir
] + custom_inputs,
outputs=[
img2img_gallery,
generation_info,
- html_info
+ html_info,
+ html_log,
],
show_progress=False,
)
+ interrogate_args = dict(
+ _js="get_img2img_tab_index",
+ inputs=[
+ dummy_component,
+ img2img_batch_input_dir,
+ img2img_batch_output_dir,
+ init_img,
+ sketch,
+ init_img_with_mask,
+ inpaint_color_sketch,
+ init_img_inpaint,
+ ],
+ outputs=[img2img_prompt, dummy_component],
+ )
+
img2img_prompt.submit(**img2img_args)
submit.click(**img2img_args)
+ res_switch_btn.click(lambda w, h: (h, w), inputs=[width, height], outputs=[width, height])
img2img_interrogate.click(
- fn=interrogate,
- inputs=[init_img],
- outputs=[img2img_prompt],
+ fn=lambda *args: process_interrogate(interrogate, *args),
+ **interrogate_args,
)
img2img_deepbooru.click(
- fn=interrogate_deepbooru,
- inputs=[init_img],
- outputs=[img2img_prompt],
- )
-
-
- roll.click(
- fn=roll_artist,
- _js="update_img2img_tokens",
- inputs=[
- img2img_prompt,
- ],
- outputs=[
- img2img_prompt,
- ]
+ fn=lambda *args: process_interrogate(interrogate_deepbooru, *args),
+ **interrogate_args,
)
prompts = [(txt2img_prompt, txt2img_negative_prompt), (img2img_prompt, img2img_negative_prompt)]
- style_dropdowns = [(txt2img_prompt_style, txt2img_prompt_style2), (img2img_prompt_style, img2img_prompt_style2)]
+ style_dropdowns = [txt2img_prompt_styles, img2img_prompt_styles]
style_js_funcs = ["update_txt2img_tokens", "update_img2img_tokens"]
for button, (prompt, negative_prompt) in zip([txt2img_save_style, img2img_save_style], prompts):
@@ -997,18 +900,21 @@ def create_ui():
# Have to pass empty dummy component here, because the JavaScript and Python function have to accept
# the same number of parameters, but we only know the style-name after the JavaScript prompt
inputs=[dummy_component, prompt, negative_prompt],
- outputs=[txt2img_prompt_style, img2img_prompt_style, txt2img_prompt_style2, img2img_prompt_style2],
+ outputs=[txt2img_prompt_styles, img2img_prompt_styles],
)
- for button, (prompt, negative_prompt), (style1, style2), js_func in zip([txt2img_prompt_style_apply, img2img_prompt_style_apply], prompts, style_dropdowns, style_js_funcs):
+ for button, (prompt, negative_prompt), styles, js_func in zip([txt2img_prompt_style_apply, img2img_prompt_style_apply], prompts, style_dropdowns, style_js_funcs):
button.click(
fn=apply_styles,
_js=js_func,
- inputs=[prompt, negative_prompt, style1, style2],
- outputs=[prompt, negative_prompt, style1, style2],
+ inputs=[prompt, negative_prompt, styles],
+ outputs=[prompt, negative_prompt, styles],
)
token_button.click(fn=update_token_counter, inputs=[img2img_prompt, steps], outputs=[token_counter])
+ negative_token_button.click(fn=wrap_queued_call(update_token_counter), inputs=[txt2img_negative_prompt, steps], outputs=[negative_token_counter])
+
+ ui_extra_networks.setup_ui(extra_networks_ui_img2img, img2img_gallery)
img2img_paste_fields = [
(img2img_prompt, "Prompt"),
@@ -1035,86 +941,7 @@ def create_ui():
modules.scripts.scripts_current = None
with gr.Blocks(analytics_enabled=False) as extras_interface:
- with gr.Row().style(equal_height=False):
- with gr.Column(variant='panel'):
- with gr.Tabs(elem_id="mode_extras"):
- with gr.TabItem('Single Image'):
- extras_image = gr.Image(label="Source", source="upload", interactive=True, type="pil")
-
- with gr.TabItem('Batch Process'):
- image_batch = gr.File(label="Batch Process", file_count="multiple", interactive=True, type="file")
-
- with gr.TabItem('Batch from Directory'):
- extras_batch_input_dir = gr.Textbox(label="Input directory", **shared.hide_dirs, placeholder="A directory on the same machine where the server is running.")
- extras_batch_output_dir = gr.Textbox(label="Output directory", **shared.hide_dirs, placeholder="Leave blank to save images to the default path.")
- show_extras_results = gr.Checkbox(label='Show result images', value=True)
-
- submit = gr.Button('Generate', elem_id="extras_generate", variant='primary')
-
- with gr.Tabs(elem_id="extras_resize_mode"):
- with gr.TabItem('Scale by'):
- upscaling_resize = gr.Slider(minimum=1.0, maximum=8.0, step=0.05, label="Resize", value=4)
- with gr.TabItem('Scale to'):
- with gr.Group():
- with gr.Row():
- upscaling_resize_w = gr.Number(label="Width", value=512, precision=0)
- upscaling_resize_h = gr.Number(label="Height", value=512, precision=0)
- upscaling_crop = gr.Checkbox(label='Crop to fit', value=True)
-
- with gr.Group():
- extras_upscaler_1 = gr.Radio(label='Upscaler 1', elem_id="extras_upscaler_1", choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name, type="index")
-
- with gr.Group():
- extras_upscaler_2 = gr.Radio(label='Upscaler 2', elem_id="extras_upscaler_2", choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name, type="index")
- extras_upscaler_2_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="Upscaler 2 visibility", value=1)
-
- with gr.Group():
- gfpgan_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="GFPGAN visibility", value=0, interactive=modules.gfpgan_model.have_gfpgan)
-
- with gr.Group():
- codeformer_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="CodeFormer visibility", value=0, interactive=modules.codeformer_model.have_codeformer)
- codeformer_weight = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="CodeFormer weight (0 = maximum effect, 1 = minimum effect)", value=0, interactive=modules.codeformer_model.have_codeformer)
-
- with gr.Group():
- upscale_before_face_fix = gr.Checkbox(label='Upscale Before Restoring Faces', value=False)
-
- result_images, html_info_x, html_info = create_output_panel("extras", opts.outdir_extras_samples)
-
- submit.click(
- fn=wrap_gradio_gpu_call(modules.extras.run_extras),
- _js="get_extras_tab_index",
- inputs=[
- dummy_component,
- dummy_component,
- extras_image,
- image_batch,
- extras_batch_input_dir,
- extras_batch_output_dir,
- show_extras_results,
- gfpgan_visibility,
- codeformer_visibility,
- codeformer_weight,
- upscaling_resize,
- upscaling_resize_w,
- upscaling_resize_h,
- upscaling_crop,
- extras_upscaler_1,
- extras_upscaler_2,
- extras_upscaler_2_visibility,
- upscale_before_face_fix,
- ],
- outputs=[
- result_images,
- html_info_x,
- html_info,
- ]
- )
- parameters_copypaste.add_paste_fields("extras", extras_image, None)
-
- extras_image.change(
- fn=modules.extras.clear_cache,
- inputs=[], outputs=[]
- )
+ ui_postprocessing.create_ui()
with gr.Blocks(analytics_enabled=False) as pnginfo_interface:
with gr.Row().style(equal_height=False):
@@ -1123,7 +950,7 @@ def create_ui():
with gr.Column(variant='panel'):
html = gr.HTML()
- generation_info = gr.Textbox(visible=False)
+ generation_info = gr.Textbox(visible=False, elem_id="pnginfo_generation_info")
html2 = gr.HTML()
with gr.Row():
buttons = parameters_copypaste.create_buttons(["txt2img", "img2img", "inpaint", "extras"])
@@ -1135,99 +962,141 @@ def create_ui():
outputs=[html, generation_info, html2],
)
+ def update_interp_description(value):
+ interp_description_css = "<p style='margin-bottom: 2.5em'>{}</p>"
+ interp_descriptions = {
+ "No interpolation": interp_description_css.format("No interpolation will be used. Requires one model; A. Allows for format conversion and VAE baking."),
+ "Weighted sum": interp_description_css.format("A weighted sum will be used for interpolation. Requires two models; A and B. The result is calculated as A * (1 - M) + B * M"),
+ "Add difference": interp_description_css.format("The difference between the last two models will be added to the first. Requires three models; A, B and C. The result is calculated as A + (B - C) * M")
+ }
+ return interp_descriptions[value]
+
with gr.Blocks(analytics_enabled=False) as modelmerger_interface:
with gr.Row().style(equal_height=False):
- with gr.Column(variant='panel'):
- gr.HTML(value="<p>A merger of the two checkpoints will be generated in your <b>checkpoint</b> directory.</p>")
+ with gr.Column(variant='compact'):
+ interp_description = gr.HTML(value=update_interp_description("Weighted sum"), elem_id="modelmerger_interp_description")
- with gr.Row():
+ with FormRow(elem_id="modelmerger_models"):
primary_model_name = gr.Dropdown(modules.sd_models.checkpoint_tiles(), elem_id="modelmerger_primary_model_name", label="Primary model (A)")
+ create_refresh_button(primary_model_name, modules.sd_models.list_models, lambda: {"choices": modules.sd_models.checkpoint_tiles()}, "refresh_checkpoint_A")
+
secondary_model_name = gr.Dropdown(modules.sd_models.checkpoint_tiles(), elem_id="modelmerger_secondary_model_name", label="Secondary model (B)")
+ create_refresh_button(secondary_model_name, modules.sd_models.list_models, lambda: {"choices": modules.sd_models.checkpoint_tiles()}, "refresh_checkpoint_B")
+
tertiary_model_name = gr.Dropdown(modules.sd_models.checkpoint_tiles(), elem_id="modelmerger_tertiary_model_name", label="Tertiary model (C)")
- custom_name = gr.Textbox(label="Custom Name (Optional)")
- interp_amount = gr.Slider(minimum=0.0, maximum=1.0, step=0.05, label='Multiplier (M) - set to 0 to get model A', value=0.3)
- interp_method = gr.Radio(choices=["Weighted sum", "Add difference"], value="Weighted sum", label="Interpolation Method")
+ create_refresh_button(tertiary_model_name, modules.sd_models.list_models, lambda: {"choices": modules.sd_models.checkpoint_tiles()}, "refresh_checkpoint_C")
- with gr.Row():
- checkpoint_format = gr.Radio(choices=["ckpt", "safetensors"], value="ckpt", label="Checkpoint format")
- save_as_half = gr.Checkbox(value=False, label="Save as float16")
+ custom_name = gr.Textbox(label="Custom Name (Optional)", elem_id="modelmerger_custom_name")
+ interp_amount = gr.Slider(minimum=0.0, maximum=1.0, step=0.05, label='Multiplier (M) - set to 0 to get model A', value=0.3, elem_id="modelmerger_interp_amount")
+ interp_method = gr.Radio(choices=["No interpolation", "Weighted sum", "Add difference"], value="Weighted sum", label="Interpolation Method", elem_id="modelmerger_interp_method")
+ interp_method.change(fn=update_interp_description, inputs=[interp_method], outputs=[interp_description])
- modelmerger_merge = gr.Button(elem_id="modelmerger_merge", label="Merge", variant='primary')
+ with FormRow():
+ checkpoint_format = gr.Radio(choices=["ckpt", "safetensors"], value="ckpt", label="Checkpoint format", elem_id="modelmerger_checkpoint_format")
+ save_as_half = gr.Checkbox(value=False, label="Save as float16", elem_id="modelmerger_save_as_half")
- with gr.Column(variant='panel'):
- submit_result = gr.Textbox(elem_id="modelmerger_result", show_label=False)
+ with FormRow():
+ with gr.Column():
+ config_source = gr.Radio(choices=["A, B or C", "B", "C", "Don't"], value="A, B or C", label="Copy config from", type="index", elem_id="modelmerger_config_method")
+
+ with gr.Column():
+ with FormRow():
+ bake_in_vae = gr.Dropdown(choices=["None"] + list(sd_vae.vae_dict), value="None", label="Bake in VAE", elem_id="modelmerger_bake_in_vae")
+ create_refresh_button(bake_in_vae, sd_vae.refresh_vae_list, lambda: {"choices": ["None"] + list(sd_vae.vae_dict)}, "modelmerger_refresh_bake_in_vae")
+
+ with FormRow():
+ discard_weights = gr.Textbox(value="", label="Discard weights with matching name", elem_id="modelmerger_discard_weights")
+
+ with gr.Row():
+ modelmerger_merge = gr.Button(elem_id="modelmerger_merge", value="Merge", variant='primary')
- sd_hijack.model_hijack.embedding_db.load_textual_inversion_embeddings()
+ with gr.Column(variant='compact', elem_id="modelmerger_results_container"):
+ with gr.Group(elem_id="modelmerger_results_panel"):
+ modelmerger_result = gr.HTML(elem_id="modelmerger_result", show_label=False)
with gr.Blocks(analytics_enabled=False) as train_interface:
with gr.Row().style(equal_height=False):
gr.HTML(value="<p style='margin-bottom: 0.7em'>See <b><a href=\"https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Textual-Inversion\">wiki</a></b> for detailed explanation.</p>")
- with gr.Row().style(equal_height=False):
+ with gr.Row(variant="compact").style(equal_height=False):
with gr.Tabs(elem_id="train_tabs"):
with gr.Tab(label="Create embedding"):
- new_embedding_name = gr.Textbox(label="Name")
- initialization_text = gr.Textbox(label="Initialization text", value="*")
- nvpt = gr.Slider(label="Number of vectors per token", minimum=1, maximum=75, step=1, value=1)
- overwrite_old_embedding = gr.Checkbox(value=False, label="Overwrite Old Embedding")
+ new_embedding_name = gr.Textbox(label="Name", elem_id="train_new_embedding_name")
+ initialization_text = gr.Textbox(label="Initialization text", value="*", elem_id="train_initialization_text")
+ nvpt = gr.Slider(label="Number of vectors per token", minimum=1, maximum=75, step=1, value=1, elem_id="train_nvpt")
+ overwrite_old_embedding = gr.Checkbox(value=False, label="Overwrite Old Embedding", elem_id="train_overwrite_old_embedding")
with gr.Row():
with gr.Column(scale=3):
gr.HTML(value="")
with gr.Column():
- create_embedding = gr.Button(value="Create embedding", variant='primary')
+ create_embedding = gr.Button(value="Create embedding", variant='primary', elem_id="train_create_embedding")
with gr.Tab(label="Create hypernetwork"):
- new_hypernetwork_name = gr.Textbox(label="Name")
- new_hypernetwork_sizes = gr.CheckboxGroup(label="Modules", value=["768", "320", "640", "1280"], choices=["768", "1024", "320", "640", "1280"])
- new_hypernetwork_layer_structure = gr.Textbox("1, 2, 1", label="Enter hypernetwork layer structure", placeholder="1st and last digit must be 1. ex:'1, 2, 1'")
- new_hypernetwork_activation_func = gr.Dropdown(value="linear", label="Select activation function of hypernetwork. Recommended : Swish / Linear(none)", choices=modules.hypernetworks.ui.keys)
- new_hypernetwork_initialization_option = gr.Dropdown(value = "Normal", label="Select Layer weights initialization. Recommended: Kaiming for relu-like, Xavier for sigmoid-like, Normal otherwise", choices=["Normal", "KaimingUniform", "KaimingNormal", "XavierUniform", "XavierNormal"])
- new_hypernetwork_add_layer_norm = gr.Checkbox(label="Add layer normalization")
- new_hypernetwork_use_dropout = gr.Checkbox(label="Use dropout")
- overwrite_old_hypernetwork = gr.Checkbox(value=False, label="Overwrite Old Hypernetwork")
+ new_hypernetwork_name = gr.Textbox(label="Name", elem_id="train_new_hypernetwork_name")
+ new_hypernetwork_sizes = gr.CheckboxGroup(label="Modules", value=["768", "320", "640", "1280"], choices=["768", "1024", "320", "640", "1280"], elem_id="train_new_hypernetwork_sizes")
+ new_hypernetwork_layer_structure = gr.Textbox("1, 2, 1", label="Enter hypernetwork layer structure", placeholder="1st and last digit must be 1. ex:'1, 2, 1'", elem_id="train_new_hypernetwork_layer_structure")
+ new_hypernetwork_activation_func = gr.Dropdown(value="linear", label="Select activation function of hypernetwork. Recommended : Swish / Linear(none)", choices=modules.hypernetworks.ui.keys, elem_id="train_new_hypernetwork_activation_func")
+ new_hypernetwork_initialization_option = gr.Dropdown(value = "Normal", label="Select Layer weights initialization. Recommended: Kaiming for relu-like, Xavier for sigmoid-like, Normal otherwise", choices=["Normal", "KaimingUniform", "KaimingNormal", "XavierUniform", "XavierNormal"], elem_id="train_new_hypernetwork_initialization_option")
+ new_hypernetwork_add_layer_norm = gr.Checkbox(label="Add layer normalization", elem_id="train_new_hypernetwork_add_layer_norm")
+ new_hypernetwork_use_dropout = gr.Checkbox(label="Use dropout", elem_id="train_new_hypernetwork_use_dropout")
+ new_hypernetwork_dropout_structure = gr.Textbox("0, 0, 0", label="Enter hypernetwork Dropout structure (or empty). Recommended : 0~0.35 incrementing sequence: 0, 0.05, 0.15", placeholder="1st and last digit must be 0 and values should be between 0 and 1. ex:'0, 0.01, 0'")
+ overwrite_old_hypernetwork = gr.Checkbox(value=False, label="Overwrite Old Hypernetwork", elem_id="train_overwrite_old_hypernetwork")
with gr.Row():
with gr.Column(scale=3):
gr.HTML(value="")
with gr.Column():
- create_hypernetwork = gr.Button(value="Create hypernetwork", variant='primary')
+ create_hypernetwork = gr.Button(value="Create hypernetwork", variant='primary', elem_id="train_create_hypernetwork")
with gr.Tab(label="Preprocess images"):
- process_src = gr.Textbox(label='Source directory')
- process_dst = gr.Textbox(label='Destination directory')
- process_width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512)
- process_height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512)
- preprocess_txt_action = gr.Dropdown(label='Existing Caption txt Action', value="ignore", choices=["ignore", "copy", "prepend", "append"])
+ process_src = gr.Textbox(label='Source directory', elem_id="train_process_src")
+ process_dst = gr.Textbox(label='Destination directory', elem_id="train_process_dst")
+ process_width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="train_process_width")
+ process_height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="train_process_height")
+ preprocess_txt_action = gr.Dropdown(label='Existing Caption txt Action', value="ignore", choices=["ignore", "copy", "prepend", "append"], elem_id="train_preprocess_txt_action")
with gr.Row():
- process_flip = gr.Checkbox(label='Create flipped copies')
- process_split = gr.Checkbox(label='Split oversized images')
- process_focal_crop = gr.Checkbox(label='Auto focal point crop')
- process_caption = gr.Checkbox(label='Use BLIP for caption')
- process_caption_deepbooru = gr.Checkbox(label='Use deepbooru for caption', visible=True)
+ process_flip = gr.Checkbox(label='Create flipped copies', elem_id="train_process_flip")
+ process_split = gr.Checkbox(label='Split oversized images', elem_id="train_process_split")
+ process_focal_crop = gr.Checkbox(label='Auto focal point crop', elem_id="train_process_focal_crop")
+ process_multicrop = gr.Checkbox(label='Auto-sized crop', elem_id="train_process_multicrop")
+ process_caption = gr.Checkbox(label='Use BLIP for caption', elem_id="train_process_caption")
+ process_caption_deepbooru = gr.Checkbox(label='Use deepbooru for caption', visible=True, elem_id="train_process_caption_deepbooru")
with gr.Row(visible=False) as process_split_extra_row:
- process_split_threshold = gr.Slider(label='Split image threshold', value=0.5, minimum=0.0, maximum=1.0, step=0.05)
- process_overlap_ratio = gr.Slider(label='Split image overlap ratio', value=0.2, minimum=0.0, maximum=0.9, step=0.05)
+ process_split_threshold = gr.Slider(label='Split image threshold', value=0.5, minimum=0.0, maximum=1.0, step=0.05, elem_id="train_process_split_threshold")
+ process_overlap_ratio = gr.Slider(label='Split image overlap ratio', value=0.2, minimum=0.0, maximum=0.9, step=0.05, elem_id="train_process_overlap_ratio")
with gr.Row(visible=False) as process_focal_crop_row:
- process_focal_crop_face_weight = gr.Slider(label='Focal point face weight', value=0.9, minimum=0.0, maximum=1.0, step=0.05)
- process_focal_crop_entropy_weight = gr.Slider(label='Focal point entropy weight', value=0.15, minimum=0.0, maximum=1.0, step=0.05)
- process_focal_crop_edges_weight = gr.Slider(label='Focal point edges weight', value=0.5, minimum=0.0, maximum=1.0, step=0.05)
- process_focal_crop_debug = gr.Checkbox(label='Create debug image')
-
+ process_focal_crop_face_weight = gr.Slider(label='Focal point face weight', value=0.9, minimum=0.0, maximum=1.0, step=0.05, elem_id="train_process_focal_crop_face_weight")
+ process_focal_crop_entropy_weight = gr.Slider(label='Focal point entropy weight', value=0.15, minimum=0.0, maximum=1.0, step=0.05, elem_id="train_process_focal_crop_entropy_weight")
+ process_focal_crop_edges_weight = gr.Slider(label='Focal point edges weight', value=0.5, minimum=0.0, maximum=1.0, step=0.05, elem_id="train_process_focal_crop_edges_weight")
+ process_focal_crop_debug = gr.Checkbox(label='Create debug image', elem_id="train_process_focal_crop_debug")
+
+ with gr.Column(visible=False) as process_multicrop_col:
+ gr.Markdown('Each image is center-cropped with an automatically chosen width and height.')
+ with gr.Row():
+ process_multicrop_mindim = gr.Slider(minimum=64, maximum=2048, step=8, label="Dimension lower bound", value=384, elem_id="train_process_multicrop_mindim")
+ process_multicrop_maxdim = gr.Slider(minimum=64, maximum=2048, step=8, label="Dimension upper bound", value=768, elem_id="train_process_multicrop_maxdim")
+ with gr.Row():
+ process_multicrop_minarea = gr.Slider(minimum=64*64, maximum=2048*2048, step=1, label="Area lower bound", value=64*64, elem_id="train_process_multicrop_minarea")
+ process_multicrop_maxarea = gr.Slider(minimum=64*64, maximum=2048*2048, step=1, label="Area upper bound", value=640*640, elem_id="train_process_multicrop_maxarea")
+ with gr.Row():
+ process_multicrop_objective = gr.Radio(["Maximize area", "Minimize error"], value="Maximize area", label="Resizing objective", elem_id="train_process_multicrop_objective")
+ process_multicrop_threshold = gr.Slider(minimum=0, maximum=1, step=0.01, label="Error threshold", value=0.1, elem_id="train_process_multicrop_threshold")
+
with gr.Row():
with gr.Column(scale=3):
gr.HTML(value="")
with gr.Column():
with gr.Row():
- interrupt_preprocessing = gr.Button("Interrupt")
- run_preprocess = gr.Button(value="Preprocess", variant='primary')
+ interrupt_preprocessing = gr.Button("Interrupt", elem_id="train_interrupt_preprocessing")
+ run_preprocess = gr.Button(value="Preprocess", variant='primary', elem_id="train_run_preprocess")
process_split.change(
fn=lambda show: gr_show(show),
@@ -1241,54 +1110,74 @@ def create_ui():
outputs=[process_focal_crop_row],
)
+ process_multicrop.change(
+ fn=lambda show: gr_show(show),
+ inputs=[process_multicrop],
+ outputs=[process_multicrop_col],
+ )
+
+ def get_textual_inversion_template_names():
+ return sorted([x for x in textual_inversion.textual_inversion_templates])
+
with gr.Tab(label="Train"):
gr.HTML(value="<p style='margin-bottom: 0.7em'>Train an embedding or Hypernetwork; you must specify a directory with a set of 1:1 ratio images <a href=\"https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Textual-Inversion\" style=\"font-weight:bold;\">[wiki]</a></p>")
- with gr.Row():
+ with FormRow():
train_embedding_name = gr.Dropdown(label='Embedding', elem_id="train_embedding", choices=sorted(sd_hijack.model_hijack.embedding_db.word_embeddings.keys()))
create_refresh_button(train_embedding_name, sd_hijack.model_hijack.embedding_db.load_textual_inversion_embeddings, lambda: {"choices": sorted(sd_hijack.model_hijack.embedding_db.word_embeddings.keys())}, "refresh_train_embedding_name")
- with gr.Row():
+
train_hypernetwork_name = gr.Dropdown(label='Hypernetwork', elem_id="train_hypernetwork", choices=[x for x in shared.hypernetworks.keys()])
create_refresh_button(train_hypernetwork_name, shared.reload_hypernetworks, lambda: {"choices": sorted([x for x in shared.hypernetworks.keys()])}, "refresh_train_hypernetwork_name")
- with gr.Row():
- embedding_learn_rate = gr.Textbox(label='Embedding Learning rate', placeholder="Embedding Learning rate", value="0.005")
- hypernetwork_learn_rate = gr.Textbox(label='Hypernetwork Learning rate', placeholder="Hypernetwork Learning rate", value="0.00001")
-
- batch_size = gr.Number(label='Batch size', value=1, precision=0)
- gradient_step = gr.Number(label='Gradient accumulation steps', value=1, precision=0)
- dataset_directory = gr.Textbox(label='Dataset directory', placeholder="Path to directory with input images")
- log_directory = gr.Textbox(label='Log directory', placeholder="Path to directory where to write outputs", value="textual_inversion")
- template_file = gr.Textbox(label='Prompt template file', value=os.path.join(script_path, "textual_inversion_templates", "style_filewords.txt"))
- training_width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512)
- training_height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512)
- steps = gr.Number(label='Max steps', value=100000, precision=0)
- create_image_every = gr.Number(label='Save an image to log directory every N steps, 0 to disable', value=500, precision=0)
- save_embedding_every = gr.Number(label='Save a copy of embedding to log directory every N steps, 0 to disable', value=500, precision=0)
- save_image_with_stored_embedding = gr.Checkbox(label='Save images with embedding in PNG chunks', value=True)
- preview_from_txt2img = gr.Checkbox(label='Read parameters (prompt, etc...) from txt2img tab when making previews', value=False)
- with gr.Row():
- shuffle_tags = gr.Checkbox(label="Shuffle tags by ',' when creating prompts.", value=False)
- tag_drop_out = gr.Slider(minimum=0, maximum=1, step=0.1, label="Drop out tags when creating prompts.", value=0)
- with gr.Row():
- latent_sampling_method = gr.Radio(label='Choose latent sampling method', value="once", choices=['once', 'deterministic', 'random'])
+
+ with FormRow():
+ embedding_learn_rate = gr.Textbox(label='Embedding Learning rate', placeholder="Embedding Learning rate", value="0.005", elem_id="train_embedding_learn_rate")
+ hypernetwork_learn_rate = gr.Textbox(label='Hypernetwork Learning rate', placeholder="Hypernetwork Learning rate", value="0.00001", elem_id="train_hypernetwork_learn_rate")
+
+ with FormRow():
+ clip_grad_mode = gr.Dropdown(value="disabled", label="Gradient Clipping", choices=["disabled", "value", "norm"])
+ clip_grad_value = gr.Textbox(placeholder="Gradient clip value", value="0.1", show_label=False)
+
+ with FormRow():
+ batch_size = gr.Number(label='Batch size', value=1, precision=0, elem_id="train_batch_size")
+ gradient_step = gr.Number(label='Gradient accumulation steps', value=1, precision=0, elem_id="train_gradient_step")
+
+ dataset_directory = gr.Textbox(label='Dataset directory', placeholder="Path to directory with input images", elem_id="train_dataset_directory")
+ log_directory = gr.Textbox(label='Log directory', placeholder="Path to directory where to write outputs", value="textual_inversion", elem_id="train_log_directory")
+
+ with FormRow():
+ template_file = gr.Dropdown(label='Prompt template', value="style_filewords.txt", elem_id="train_template_file", choices=get_textual_inversion_template_names())
+ create_refresh_button(template_file, textual_inversion.list_textual_inversion_templates, lambda: {"choices": get_textual_inversion_template_names()}, "refrsh_train_template_file")
+
+ training_width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="train_training_width")
+ training_height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="train_training_height")
+ varsize = gr.Checkbox(label="Do not resize images", value=False, elem_id="train_varsize")
+ steps = gr.Number(label='Max steps', value=100000, precision=0, elem_id="train_steps")
+
+ with FormRow():
+ create_image_every = gr.Number(label='Save an image to log directory every N steps, 0 to disable', value=500, precision=0, elem_id="train_create_image_every")
+ save_embedding_every = gr.Number(label='Save a copy of embedding to log directory every N steps, 0 to disable', value=500, precision=0, elem_id="train_save_embedding_every")
+
+ save_image_with_stored_embedding = gr.Checkbox(label='Save images with embedding in PNG chunks', value=True, elem_id="train_save_image_with_stored_embedding")
+ preview_from_txt2img = gr.Checkbox(label='Read parameters (prompt, etc...) from txt2img tab when making previews', value=False, elem_id="train_preview_from_txt2img")
+
+ shuffle_tags = gr.Checkbox(label="Shuffle tags by ',' when creating prompts.", value=False, elem_id="train_shuffle_tags")
+ tag_drop_out = gr.Slider(minimum=0, maximum=1, step=0.1, label="Drop out tags when creating prompts.", value=0, elem_id="train_tag_drop_out")
+
+ latent_sampling_method = gr.Radio(label='Choose latent sampling method', value="once", choices=['once', 'deterministic', 'random'], elem_id="train_latent_sampling_method")
with gr.Row():
- interrupt_training = gr.Button(value="Interrupt")
- train_hypernetwork = gr.Button(value="Train Hypernetwork", variant='primary')
- train_embedding = gr.Button(value="Train Embedding", variant='primary')
+ train_embedding = gr.Button(value="Train Embedding", variant='primary', elem_id="train_train_embedding")
+ interrupt_training = gr.Button(value="Interrupt", elem_id="train_interrupt_training")
+ train_hypernetwork = gr.Button(value="Train Hypernetwork", variant='primary', elem_id="train_train_hypernetwork")
params = script_callbacks.UiTrainTabParams(txt2img_preview_params)
script_callbacks.ui_train_tabs_callback(params)
- with gr.Column():
- progressbar = gr.HTML(elem_id="ti_progressbar")
+ with gr.Column(elem_id='ti_gallery_container'):
ti_output = gr.Text(elem_id="ti_output", value="", show_label=False)
-
ti_gallery = gr.Gallery(label='Output', show_label=False, elem_id='ti_gallery').style(grid=4)
- ti_preview = gr.Image(elem_id='ti_preview', visible=False)
ti_progress = gr.HTML(elem_id="ti_progress", value="")
ti_outcome = gr.HTML(elem_id="ti_error", value="")
- setup_progressbar(progressbar, ti_preview, 'ti', textinfo=ti_progress)
create_embedding.click(
fn=modules.textual_inversion.ui.create_embedding,
@@ -1315,7 +1204,8 @@ def create_ui():
new_hypernetwork_activation_func,
new_hypernetwork_initialization_option,
new_hypernetwork_add_layer_norm,
- new_hypernetwork_use_dropout
+ new_hypernetwork_use_dropout,
+ new_hypernetwork_dropout_structure
],
outputs=[
train_hypernetwork_name,
@@ -1328,6 +1218,7 @@ def create_ui():
fn=wrap_gradio_gpu_call(modules.textual_inversion.ui.preprocess, extra_outputs=[gr.update()]),
_js="start_training_textual_inversion",
inputs=[
+ dummy_component,
process_src,
process_dst,
process_width,
@@ -1344,6 +1235,13 @@ def create_ui():
process_focal_crop_entropy_weight,
process_focal_crop_edges_weight,
process_focal_crop_debug,
+ process_multicrop,
+ process_multicrop_mindim,
+ process_multicrop_maxdim,
+ process_multicrop_minarea,
+ process_multicrop_maxarea,
+ process_multicrop_objective,
+ process_multicrop_threshold,
],
outputs=[
ti_output,
@@ -1355,6 +1253,7 @@ def create_ui():
fn=wrap_gradio_gpu_call(modules.textual_inversion.ui.train_embedding, extra_outputs=[gr.update()]),
_js="start_training_textual_inversion",
inputs=[
+ dummy_component,
train_embedding_name,
embedding_learn_rate,
batch_size,
@@ -1363,7 +1262,10 @@ def create_ui():
log_directory,
training_width,
training_height,
+ varsize,
steps,
+ clip_grad_mode,
+ clip_grad_value,
shuffle_tags,
tag_drop_out,
latent_sampling_method,
@@ -1384,6 +1286,7 @@ def create_ui():
fn=wrap_gradio_gpu_call(modules.hypernetworks.ui.train_hypernetwork, extra_outputs=[gr.update()]),
_js="start_training_textual_inversion",
inputs=[
+ dummy_component,
train_hypernetwork_name,
hypernetwork_learn_rate,
batch_size,
@@ -1392,7 +1295,10 @@ def create_ui():
log_directory,
training_width,
training_height,
+ varsize,
steps,
+ clip_grad_mode,
+ clip_grad_value,
shuffle_tags,
tag_drop_out,
latent_sampling_method,
@@ -1447,7 +1353,7 @@ def create_ui():
res = comp(label=info.label, value=fun(), elem_id=elem_id, **(args or {}))
create_refresh_button(res, info.refresh, info.component_args, "refresh_" + key)
else:
- with gr.Row(variant="compact"):
+ with FormRow():
res = comp(label=info.label, value=fun(), elem_id=elem_id, **(args or {}))
create_refresh_button(res, info.refresh, info.component_args, "refresh_" + key)
else:
@@ -1489,44 +1395,44 @@ def create_ui():
opts.save(shared.config_filename)
- return gr.update(value=value), opts.dumpjson()
+ return get_value_for_setting(key), opts.dumpjson()
with gr.Blocks(analytics_enabled=False) as settings_interface:
- settings_submit = gr.Button(value="Apply settings", variant='primary')
- result = gr.HTML()
+ with gr.Row():
+ with gr.Column(scale=6):
+ settings_submit = gr.Button(value="Apply settings", variant='primary', elem_id="settings_submit")
+ with gr.Column():
+ restart_gradio = gr.Button(value='Reload UI', variant='primary', elem_id="settings_restart_gradio")
- settings_cols = 3
- items_per_col = int(len(opts.data_labels) * 0.9 / settings_cols)
+ result = gr.HTML(elem_id="settings_result")
quicksettings_names = [x.strip() for x in opts.quicksettings.split(",")]
- quicksettings_names = set(x for x in quicksettings_names if x != 'quicksettings')
+ quicksettings_names = {x: i for i, x in enumerate(quicksettings_names) if x != 'quicksettings'}
quicksettings_list = []
- cols_displayed = 0
- items_displayed = 0
previous_section = None
- column = None
- with gr.Row(elem_id="settings").style(equal_height=False):
+ current_tab = None
+ current_row = None
+ with gr.Tabs(elem_id="settings"):
for i, (k, item) in enumerate(opts.data_labels.items()):
section_must_be_skipped = item.section[0] is None
if previous_section != item.section and not section_must_be_skipped:
- if cols_displayed < settings_cols and (items_displayed >= items_per_col or previous_section is None):
- if column is not None:
- column.__exit__()
+ elem_id, text = item.section
- column = gr.Column(variant='panel')
- column.__enter__()
+ if current_tab is not None:
+ current_row.__exit__()
+ current_tab.__exit__()
- items_displayed = 0
- cols_displayed += 1
+ gr.Group()
+ current_tab = gr.TabItem(elem_id="settings_{}".format(elem_id), label=text)
+ current_tab.__enter__()
+ current_row = gr.Column(variant='compact')
+ current_row.__enter__()
previous_section = item.section
- elem_id, text = item.section
- gr.HTML(elem_id="settings_header_text_{}".format(elem_id), value='<h1 class="gr-button-lg">{}</h1>'.format(text))
-
if k in quicksettings_names and not shared.cmd_opts.freeze_settings:
quicksettings_list.append((i, k, item))
components.append(dummy_component)
@@ -1536,15 +1442,20 @@ def create_ui():
component = create_setting_component(k)
component_dict[k] = component
components.append(component)
- items_displayed += 1
- with gr.Row():
- request_notifications = gr.Button(value='Request browser notifications', elem_id="request_notifications")
- download_localization = gr.Button(value='Download localization template', elem_id="download_localization")
+ if current_tab is not None:
+ current_row.__exit__()
+ current_tab.__exit__()
- with gr.Row():
- reload_script_bodies = gr.Button(value='Reload custom script bodies (No ui updates, No restart)', variant='secondary')
- restart_gradio = gr.Button(value='Restart Gradio and Refresh components (Custom Scripts, ui.py, js and css only)', variant='primary')
+ with gr.TabItem("Actions"):
+ request_notifications = gr.Button(value='Request browser notifications', elem_id="request_notifications")
+ download_localization = gr.Button(value='Download localization template', elem_id="download_localization")
+ reload_script_bodies = gr.Button(value='Reload custom script bodies (No ui updates, No restart)', variant='secondary', elem_id="settings_reload_script_bodies")
+
+ with gr.TabItem("Licenses"):
+ gr.HTML(shared.html("licenses.html"), elem_id="licenses")
+
+ gr.Button(value="Show all pages", elem_id="settings_show_all_pages")
request_notifications.click(
fn=lambda: None,
@@ -1581,9 +1492,6 @@ def create_ui():
outputs=[],
)
- if column is not None:
- column.__exit__()
-
interfaces = [
(txt2img_interface, "txt2img", "txt2img"),
(img2img_interface, "img2img", "img2img"),
@@ -1602,8 +1510,8 @@ def create_ui():
with open(cssfile, "r", encoding="utf8") as file:
css += file.read() + "\n"
- if os.path.exists(os.path.join(script_path, "user.css")):
- with open(os.path.join(script_path, "user.css"), "r", encoding="utf8") as file:
+ if os.path.exists(os.path.join(data_path, "user.css")):
+ with open(os.path.join(data_path, "user.css"), "r", encoding="utf8") as file:
css += file.read() + "\n"
if not cmd_opts.no_progressbar_hiding:
@@ -1616,8 +1524,8 @@ def create_ui():
interfaces += [(extensions_interface, "Extensions", "extensions")]
with gr.Blocks(css=css, analytics_enabled=False, title="Stable Diffusion") as demo:
- with gr.Row(elem_id="quicksettings"):
- for i, k, item in quicksettings_list:
+ with gr.Row(elem_id="quicksettings", variant="compact"):
+ for i, k, item in sorted(quicksettings_list, key=lambda x: quicksettings_names.get(x[1], x[0])):
component = create_setting_component(k, is_quicksettings=True)
component_dict[k] = component
@@ -1632,6 +1540,10 @@ def create_ui():
if os.path.exists(os.path.join(script_path, "notification.mp3")):
audio_notification = gr.Audio(interactive=False, value=os.path.join(script_path, "notification.mp3"), elem_id="audio_notification", visible=False)
+ footer = shared.html("footer.html")
+ footer = footer.format(versions=versions_html())
+ gr.HTML(footer, elem_id="footer")
+
text_settings = gr.Textbox(elem_id="settings_json", value=lambda: opts.dumpjson(), visible=False)
settings_submit.click(
fn=wrap_gradio_call(run_settings, extra_outputs=[gr.update()]),
@@ -1651,7 +1563,7 @@ def create_ui():
component_keys = [k for k in opts.data_labels.keys() if k in component_dict]
def get_settings_values():
- return [getattr(opts, key) for key in component_keys]
+ return [get_value_for_setting(key) for key in component_keys]
demo.load(
fn=get_settings_values,
@@ -1666,12 +1578,15 @@ def create_ui():
print("Error loading/saving model file:", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
modules.sd_models.list_models() # to remove the potentially missing models from the list
- return ["Error loading/saving model file. It doesn't exist or the name contains illegal characters"] + [gr.Dropdown.update(choices=modules.sd_models.checkpoint_tiles()) for _ in range(3)]
+ return [*[gr.Dropdown.update(choices=modules.sd_models.checkpoint_tiles()) for _ in range(4)], f"Error merging checkpoints: {e}"]
return results
+ modelmerger_merge.click(fn=lambda: '', inputs=[], outputs=[modelmerger_result])
modelmerger_merge.click(
- fn=modelmerger,
+ fn=wrap_gradio_gpu_call(modelmerger, extra_outputs=lambda: [gr.update() for _ in range(4)]),
+ _js='modelmerger',
inputs=[
+ dummy_component,
primary_model_name,
secondary_model_name,
tertiary_model_name,
@@ -1680,13 +1595,16 @@ def create_ui():
save_as_half,
custom_name,
checkpoint_format,
+ config_source,
+ bake_in_vae,
+ discard_weights,
],
outputs=[
- submit_result,
primary_model_name,
secondary_model_name,
tertiary_model_name,
component_dict['sd_model_checkpoint'],
+ modelmerger_result,
]
)
@@ -1718,13 +1636,16 @@ def create_ui():
if saved_value is None:
ui_settings[key] = getattr(obj, field)
elif condition and not condition(saved_value):
- print(f'Warning: Bad ui setting value: {key}: {saved_value}; Default value "{getattr(obj, field)}" will be used instead.')
+ pass
+
+ # this warning is generally not useful;
+ # print(f'Warning: Bad ui setting value: {key}: {saved_value}; Default value "{getattr(obj, field)}" will be used instead.')
else:
setattr(obj, field, saved_value)
if init_field is not None:
init_field(saved_value)
- if type(x) in [gr.Slider, gr.Radio, gr.Checkbox, gr.Textbox, gr.Number] and x.visible:
+ if type(x) in [gr.Slider, gr.Radio, gr.Checkbox, gr.Textbox, gr.Number, gr.Dropdown] and x.visible:
apply_field(x, 'visible')
if type(x) == gr.Slider:
@@ -1745,43 +1666,46 @@ def create_ui():
if type(x) == gr.Number:
apply_field(x, 'value')
- # Since there are many dropdowns that shouldn't be saved,
- # we only mark dropdowns that should be saved.
- if type(x) == gr.Dropdown and getattr(x, 'save_to_config', False):
- apply_field(x, 'value', lambda val: val in x.choices, getattr(x, 'init_field', None))
- apply_field(x, 'visible')
+ if type(x) == gr.Dropdown:
+ def check_dropdown(val):
+ if getattr(x, 'multiselect', False):
+ return all([value in x.choices for value in val])
+ else:
+ return val in x.choices
+
+ apply_field(x, 'value', check_dropdown, getattr(x, 'init_field', None))
visit(txt2img_interface, loadsave, "txt2img")
visit(img2img_interface, loadsave, "img2img")
visit(extras_interface, loadsave, "extras")
visit(modelmerger_interface, loadsave, "modelmerger")
+ visit(train_interface, loadsave, "train")
if not error_loading and (not os.path.exists(ui_config_file) or settings_count != len(ui_settings)):
with open(ui_config_file, "w", encoding="utf8") as file:
json.dump(ui_settings, file, indent=4)
+ # Required as a workaround for change() event not triggering when loading values from ui-config.json
+ interp_description.value = update_interp_description(interp_method.value)
+
return demo
def reload_javascript():
- with open(os.path.join(script_path, "script.js"), "r", encoding="utf8") as jsfile:
- javascript = f'<script>{jsfile.read()}</script>'
-
- scripts_list = modules.scripts.list_scripts("javascript", ".js")
-
- for basedir, filename, path in scripts_list:
- with open(path, "r", encoding="utf8") as jsfile:
- javascript += f"\n<!-- {filename} --><script>{jsfile.read()}</script>"
+ head = f'<script type="text/javascript" src="file={os.path.abspath("script.js")}"></script>\n'
+ inline = f"{localization.localization_js(shared.opts.localization)};"
if cmd_opts.theme is not None:
- javascript += f"\n<script>set_theme('{cmd_opts.theme}');</script>\n"
+ inline += f"set_theme('{cmd_opts.theme}');"
+
+ for script in modules.scripts.list_scripts("javascript", ".js"):
+ head += f'<script type="text/javascript" src="file={script.path}"></script>\n'
- javascript += f"\n<script>{localization.localization_js(shared.opts.localization)}</script>"
+ head += f'<script type="text/javascript">{inline}</script>\n'
def template_response(*args, **kwargs):
res = shared.GradioTemplateResponseOriginal(*args, **kwargs)
- res.body = res.body.replace(
- b'</head>', f'{javascript}</head>'.encode("utf8"))
+ res.body = res.body.replace(b'</head>', f'{head}</head>'.encode("utf8"))
res.init_headers()
return res
@@ -1790,3 +1714,32 @@ def reload_javascript():
if not hasattr(shared, 'GradioTemplateResponseOriginal'):
shared.GradioTemplateResponseOriginal = gradio.routes.templates.TemplateResponse
+
+
+def versions_html():
+ import torch
+ import launch
+
+ python_version = ".".join([str(x) for x in sys.version_info[0:3]])
+ commit = launch.commit_hash()
+ short_commit = commit[0:8]
+
+ if shared.xformers_available:
+ import xformers
+ xformers_version = xformers.__version__
+ else:
+ xformers_version = "N/A"
+
+ return f"""
+python: <span title="{sys.version}">{python_version}</span>
+ • 
+torch: {torch.__version__}
+ • 
+xformers: {xformers_version}
+ • 
+gradio: {gr.__version__}
+ • 
+commit: <a href="https://github.com/AUTOMATIC1111/stable-diffusion-webui/commit/{commit}">{short_commit}</a>
+ • 
+checkpoint: <a id="sd_checkpoint_hash">N/A</a>
+"""
diff --git a/modules/ui_common.py b/modules/ui_common.py
new file mode 100644
index 00000000..9405ac1f
--- /dev/null
+++ b/modules/ui_common.py
@@ -0,0 +1,202 @@
+import json
+import html
+import os
+import platform
+import sys
+
+import gradio as gr
+import subprocess as sp
+
+from modules import call_queue, shared
+from modules.generation_parameters_copypaste import image_from_url_text
+import modules.images
+
+folder_symbol = '\U0001f4c2' # 📂
+
+
+def update_generation_info(generation_info, html_info, img_index):
+ try:
+ generation_info = json.loads(generation_info)
+ if img_index < 0 or img_index >= len(generation_info["infotexts"]):
+ return html_info, gr.update()
+ return plaintext_to_html(generation_info["infotexts"][img_index]), gr.update()
+ except Exception:
+ pass
+ # if the json parse or anything else fails, just return the old html_info
+ return html_info, gr.update()
+
+
+def plaintext_to_html(text):
+ text = "<p>" + "<br>\n".join([f"{html.escape(x)}" for x in text.split('\n')]) + "</p>"
+ return text
+
+
+def save_files(js_data, images, do_make_zip, index):
+ import csv
+ filenames = []
+ fullfns = []
+
+ #quick dictionary to class object conversion. Its necessary due apply_filename_pattern requiring it
+ class MyObject:
+ def __init__(self, d=None):
+ if d is not None:
+ for key, value in d.items():
+ setattr(self, key, value)
+
+ data = json.loads(js_data)
+
+ p = MyObject(data)
+ path = shared.opts.outdir_save
+ save_to_dirs = shared.opts.use_save_to_dirs_for_ui
+ extension: str = shared.opts.samples_format
+ start_index = 0
+
+ if index > -1 and shared.opts.save_selected_only and (index >= data["index_of_first_image"]): # ensures we are looking at a specific non-grid picture, and we have save_selected_only
+
+ images = [images[index]]
+ start_index = index
+
+ os.makedirs(shared.opts.outdir_save, exist_ok=True)
+
+ with open(os.path.join(shared.opts.outdir_save, "log.csv"), "a", encoding="utf8", newline='') as file:
+ at_start = file.tell() == 0
+ writer = csv.writer(file)
+ if at_start:
+ writer.writerow(["prompt", "seed", "width", "height", "sampler", "cfgs", "steps", "filename", "negative_prompt"])
+
+ for image_index, filedata in enumerate(images, start_index):
+ image = image_from_url_text(filedata)
+
+ is_grid = image_index < p.index_of_first_image
+ i = 0 if is_grid else (image_index - p.index_of_first_image)
+
+ fullfn, txt_fullfn = modules.images.save_image(image, path, "", seed=p.all_seeds[i], prompt=p.all_prompts[i], extension=extension, info=p.infotexts[image_index], grid=is_grid, p=p, save_to_dirs=save_to_dirs)
+
+ filename = os.path.relpath(fullfn, path)
+ filenames.append(filename)
+ fullfns.append(fullfn)
+ if txt_fullfn:
+ filenames.append(os.path.basename(txt_fullfn))
+ fullfns.append(txt_fullfn)
+
+ writer.writerow([data["prompt"], data["seed"], data["width"], data["height"], data["sampler_name"], data["cfg_scale"], data["steps"], filenames[0], data["negative_prompt"]])
+
+ # Make Zip
+ if do_make_zip:
+ zip_filepath = os.path.join(path, "images.zip")
+
+ from zipfile import ZipFile
+ with ZipFile(zip_filepath, "w") as zip_file:
+ for i in range(len(fullfns)):
+ with open(fullfns[i], mode="rb") as f:
+ zip_file.writestr(filenames[i], f.read())
+ fullfns.insert(0, zip_filepath)
+
+ return gr.File.update(value=fullfns, visible=True), plaintext_to_html(f"Saved: {filenames[0]}")
+
+
+def create_output_panel(tabname, outdir):
+ from modules import shared
+ import modules.generation_parameters_copypaste as parameters_copypaste
+
+ def open_folder(f):
+ if not os.path.exists(f):
+ print(f'Folder "{f}" does not exist. After you create an image, the folder will be created.')
+ return
+ elif not os.path.isdir(f):
+ print(f"""
+WARNING
+An open_folder request was made with an argument that is not a folder.
+This could be an error or a malicious attempt to run code on your computer.
+Requested path was: {f}
+""", file=sys.stderr)
+ return
+
+ if not shared.cmd_opts.hide_ui_dir_config:
+ path = os.path.normpath(f)
+ if platform.system() == "Windows":
+ os.startfile(path)
+ elif platform.system() == "Darwin":
+ sp.Popen(["open", path])
+ elif "microsoft-standard-WSL2" in platform.uname().release:
+ sp.Popen(["wsl-open", path])
+ else:
+ sp.Popen(["xdg-open", path])
+
+ with gr.Column(variant='panel', elem_id=f"{tabname}_results"):
+ with gr.Group(elem_id=f"{tabname}_gallery_container"):
+ result_gallery = gr.Gallery(label='Output', show_label=False, elem_id=f"{tabname}_gallery").style(grid=4)
+
+ generation_info = None
+ with gr.Column():
+ with gr.Row(elem_id=f"image_buttons_{tabname}"):
+ open_folder_button = gr.Button(folder_symbol, elem_id="hidden_element" if shared.cmd_opts.hide_ui_dir_config else f'open_folder_{tabname}')
+
+ if tabname != "extras":
+ save = gr.Button('Save', elem_id=f'save_{tabname}')
+ save_zip = gr.Button('Zip', elem_id=f'save_zip_{tabname}')
+
+ buttons = parameters_copypaste.create_buttons(["img2img", "inpaint", "extras"])
+
+ open_folder_button.click(
+ fn=lambda: open_folder(shared.opts.outdir_samples or outdir),
+ inputs=[],
+ outputs=[],
+ )
+
+ if tabname != "extras":
+ with gr.Row():
+ download_files = gr.File(None, file_count="multiple", interactive=False, show_label=False, visible=False, elem_id=f'download_files_{tabname}')
+
+ with gr.Group():
+ html_info = gr.HTML(elem_id=f'html_info_{tabname}')
+ html_log = gr.HTML(elem_id=f'html_log_{tabname}')
+
+ generation_info = gr.Textbox(visible=False, elem_id=f'generation_info_{tabname}')
+ if tabname == 'txt2img' or tabname == 'img2img':
+ generation_info_button = gr.Button(visible=False, elem_id=f"{tabname}_generation_info_button")
+ generation_info_button.click(
+ fn=update_generation_info,
+ _js="function(x, y, z){ return [x, y, selected_gallery_index()] }",
+ inputs=[generation_info, html_info, html_info],
+ outputs=[html_info, html_info],
+ )
+
+ save.click(
+ fn=call_queue.wrap_gradio_call(save_files),
+ _js="(x, y, z, w) => [x, y, false, selected_gallery_index()]",
+ inputs=[
+ generation_info,
+ result_gallery,
+ html_info,
+ html_info,
+ ],
+ outputs=[
+ download_files,
+ html_log,
+ ],
+ show_progress=False,
+ )
+
+ save_zip.click(
+ fn=call_queue.wrap_gradio_call(save_files),
+ _js="(x, y, z, w) => [x, y, true, selected_gallery_index()]",
+ inputs=[
+ generation_info,
+ result_gallery,
+ html_info,
+ html_info,
+ ],
+ outputs=[
+ download_files,
+ html_log,
+ ]
+ )
+
+ else:
+ html_info_x = gr.HTML(elem_id=f'html_info_x_{tabname}')
+ html_info = gr.HTML(elem_id=f'html_info_{tabname}')
+ html_log = gr.HTML(elem_id=f'html_log_{tabname}')
+
+ parameters_copypaste.bind_buttons(buttons, result_gallery, "txt2img" if tabname == "txt2img" else None)
+ return result_gallery, generation_info if tabname != "extras" else html_info_x, html_info, html_log
diff --git a/modules/ui_components.py b/modules/ui_components.py
new file mode 100644
index 00000000..284ca0cf
--- /dev/null
+++ b/modules/ui_components.py
@@ -0,0 +1,58 @@
+import gradio as gr
+
+
+class ToolButton(gr.Button, gr.components.FormComponent):
+ """Small button with single emoji as text, fits inside gradio forms"""
+
+ def __init__(self, **kwargs):
+ super().__init__(variant="tool", **kwargs)
+
+ def get_block_name(self):
+ return "button"
+
+
+class ToolButtonTop(gr.Button, gr.components.FormComponent):
+ """Small button with single emoji as text, with extra margin at top, fits inside gradio forms"""
+
+ def __init__(self, **kwargs):
+ super().__init__(variant="tool-top", **kwargs)
+
+ def get_block_name(self):
+ return "button"
+
+
+class FormRow(gr.Row, gr.components.FormComponent):
+ """Same as gr.Row but fits inside gradio forms"""
+
+ def get_block_name(self):
+ return "row"
+
+
+class FormGroup(gr.Group, gr.components.FormComponent):
+ """Same as gr.Row but fits inside gradio forms"""
+
+ def get_block_name(self):
+ return "group"
+
+
+class FormHTML(gr.HTML, gr.components.FormComponent):
+ """Same as gr.HTML but fits inside gradio forms"""
+
+ def get_block_name(self):
+ return "html"
+
+
+class FormColorPicker(gr.ColorPicker, gr.components.FormComponent):
+ """Same as gr.ColorPicker but fits inside gradio forms"""
+
+ def get_block_name(self):
+ return "colorpicker"
+
+
+class DropdownMulti(gr.Dropdown):
+ """Same as gr.Dropdown but always multiselect"""
+ def __init__(self, **kwargs):
+ super().__init__(multiselect=True, **kwargs)
+
+ def get_block_name(self):
+ return "dropdown"
diff --git a/modules/ui_extensions.py b/modules/ui_extensions.py
index eec9586f..66a41865 100644
--- a/modules/ui_extensions.py
+++ b/modules/ui_extensions.py
@@ -132,7 +132,7 @@ def install_extension_from_url(dirname, url):
normalized_url = normalize_git_url(url)
assert len([x for x in extensions.extensions if normalize_git_url(x.remote) == normalized_url]) == 0, 'Extension with this URL is already installed'
- tmpdir = os.path.join(paths.script_path, "tmp", dirname)
+ tmpdir = os.path.join(paths.data_path, "tmp", dirname)
try:
shutil.rmtree(tmpdir, True)
@@ -162,15 +162,15 @@ def install_extension_from_url(dirname, url):
shutil.rmtree(tmpdir, True)
-def install_extension_from_index(url, hide_tags):
+def install_extension_from_index(url, hide_tags, sort_column):
ext_table, message = install_extension_from_url(None, url)
- code, _ = refresh_available_extensions_from_data(hide_tags)
+ code, _ = refresh_available_extensions_from_data(hide_tags, sort_column)
return code, ext_table, message
-def refresh_available_extensions(url, hide_tags):
+def refresh_available_extensions(url, hide_tags, sort_column):
global available_extensions
import urllib.request
@@ -179,18 +179,28 @@ def refresh_available_extensions(url, hide_tags):
available_extensions = json.loads(text)
- code, tags = refresh_available_extensions_from_data(hide_tags)
+ code, tags = refresh_available_extensions_from_data(hide_tags, sort_column)
return url, code, gr.CheckboxGroup.update(choices=tags), ''
-def refresh_available_extensions_for_tags(hide_tags):
- code, _ = refresh_available_extensions_from_data(hide_tags)
+def refresh_available_extensions_for_tags(hide_tags, sort_column):
+ code, _ = refresh_available_extensions_from_data(hide_tags, sort_column)
return code, ''
-def refresh_available_extensions_from_data(hide_tags):
+sort_ordering = [
+ # (reverse, order_by_function)
+ (True, lambda x: x.get('added', 'z')),
+ (False, lambda x: x.get('added', 'z')),
+ (False, lambda x: x.get('name', 'z')),
+ (True, lambda x: x.get('name', 'z')),
+ (False, lambda x: 'z'),
+]
+
+
+def refresh_available_extensions_from_data(hide_tags, sort_column):
extlist = available_extensions["extensions"]
installed_extension_urls = {normalize_git_url(extension.remote): extension.name for extension in extensions.extensions}
@@ -210,8 +220,11 @@ def refresh_available_extensions_from_data(hide_tags):
<tbody>
"""
- for ext in extlist:
+ sort_reverse, sort_function = sort_ordering[sort_column if 0 <= sort_column < len(sort_ordering) else 0]
+
+ for ext in sorted(extlist, key=sort_function, reverse=sort_reverse):
name = ext.get("name", "noname")
+ added = ext.get('added', 'unknown')
url = ext.get("url", None)
description = ext.get("description", "")
extension_tags = ext.get("tags", [])
@@ -233,7 +246,7 @@ def refresh_available_extensions_from_data(hide_tags):
code += f"""
<tr>
<td><a href="{html.escape(url)}" target="_blank">{html.escape(name)}</a><br />{tags_text}</td>
- <td>{html.escape(description)}</td>
+ <td>{html.escape(description)}<p class="info"><span class="date_added">Added: {html.escape(added)}</span></p></td>
<td>{install_code}</td>
</tr>
@@ -291,25 +304,32 @@ def create_ui():
with gr.Row():
hide_tags = gr.CheckboxGroup(value=["ads", "localization", "installed"], label="Hide extensions with tags", choices=["script", "ads", "localization", "installed"])
+ sort_column = gr.Radio(value="newest first", label="Order", choices=["newest first", "oldest first", "a-z", "z-a", "internal order", ], type="index")
install_result = gr.HTML()
available_extensions_table = gr.HTML()
refresh_available_extensions_button.click(
fn=modules.ui.wrap_gradio_call(refresh_available_extensions, extra_outputs=[gr.update(), gr.update(), gr.update()]),
- inputs=[available_extensions_index, hide_tags],
+ inputs=[available_extensions_index, hide_tags, sort_column],
outputs=[available_extensions_index, available_extensions_table, hide_tags, install_result],
)
install_extension_button.click(
fn=modules.ui.wrap_gradio_call(install_extension_from_index, extra_outputs=[gr.update(), gr.update()]),
- inputs=[extension_to_install, hide_tags],
+ inputs=[extension_to_install, hide_tags, sort_column],
outputs=[available_extensions_table, extensions_table, install_result],
)
hide_tags.change(
fn=modules.ui.wrap_gradio_call(refresh_available_extensions_for_tags, extra_outputs=[gr.update()]),
- inputs=[hide_tags],
+ inputs=[hide_tags, sort_column],
+ outputs=[available_extensions_table, install_result]
+ )
+
+ sort_column.change(
+ fn=modules.ui.wrap_gradio_call(refresh_available_extensions_for_tags, extra_outputs=[gr.update()]),
+ inputs=[hide_tags, sort_column],
outputs=[available_extensions_table, install_result]
)
diff --git a/modules/ui_extra_networks.py b/modules/ui_extra_networks.py
new file mode 100644
index 00000000..c6ff889a
--- /dev/null
+++ b/modules/ui_extra_networks.py
@@ -0,0 +1,179 @@
+import os.path
+
+from modules import shared
+import gradio as gr
+import json
+import html
+
+from modules.generation_parameters_copypaste import image_from_url_text
+
+extra_pages = []
+
+
+def register_page(page):
+ """registers extra networks page for the UI; recommend doing it in on_before_ui() callback for extensions"""
+
+ extra_pages.append(page)
+
+
+class ExtraNetworksPage:
+ def __init__(self, title):
+ self.title = title
+ self.name = title.lower()
+ self.card_page = shared.html("extra-networks-card.html")
+ self.allow_negative_prompt = False
+
+ def refresh(self):
+ pass
+
+ def create_html(self, tabname):
+ view = shared.opts.extra_networks_default_view
+ items_html = ''
+
+ for item in self.list_items():
+ items_html += self.create_html_for_item(item, tabname)
+
+ if items_html == '':
+ dirs = "".join([f"<li>{x}</li>" for x in self.allowed_directories_for_previews()])
+ items_html = shared.html("extra-networks-no-cards.html").format(dirs=dirs)
+
+ res = f"""
+<div id='{tabname}_{self.name}_cards' class='extra-network-{view}'>
+{items_html}
+</div>
+"""
+
+ return res
+
+ def list_items(self):
+ raise NotImplementedError()
+
+ def allowed_directories_for_previews(self):
+ return []
+
+ def create_html_for_item(self, item, tabname):
+ preview = item.get("preview", None)
+
+ args = {
+ "preview_html": "style='background-image: url(\"" + html.escape(preview) + "\")'" if preview else '',
+ "prompt": item["prompt"],
+ "tabname": json.dumps(tabname),
+ "local_preview": json.dumps(item["local_preview"]),
+ "name": item["name"],
+ "card_clicked": '"' + html.escape(f"""return cardClicked({json.dumps(tabname)}, {item["prompt"]}, {"true" if self.allow_negative_prompt else "false"})""") + '"',
+ "save_card_preview": '"' + html.escape(f"""return saveCardPreview(event, {json.dumps(tabname)}, {json.dumps(item["local_preview"])})""") + '"',
+ }
+
+ return self.card_page.format(**args)
+
+
+def intialize():
+ extra_pages.clear()
+
+
+class ExtraNetworksUi:
+ def __init__(self):
+ self.pages = None
+ self.stored_extra_pages = None
+
+ self.button_save_preview = None
+ self.preview_target_filename = None
+
+ self.tabname = None
+
+
+def pages_in_preferred_order(pages):
+ tab_order = [x.lower().strip() for x in shared.opts.ui_extra_networks_tab_reorder.split(",")]
+
+ def tab_name_score(name):
+ name = name.lower()
+ for i, possible_match in enumerate(tab_order):
+ if possible_match in name:
+ return i
+
+ return len(pages)
+
+ tab_scores = {page.name: (tab_name_score(page.name), original_index) for original_index, page in enumerate(pages)}
+
+ return sorted(pages, key=lambda x: tab_scores[x.name])
+
+
+def create_ui(container, button, tabname):
+ ui = ExtraNetworksUi()
+ ui.pages = []
+ ui.stored_extra_pages = pages_in_preferred_order(extra_pages.copy())
+ ui.tabname = tabname
+
+ with gr.Tabs(elem_id=tabname+"_extra_tabs") as tabs:
+ for page in ui.stored_extra_pages:
+ with gr.Tab(page.title):
+ page_elem = gr.HTML(page.create_html(ui.tabname))
+ ui.pages.append(page_elem)
+
+ filter = gr.Textbox('', show_label=False, elem_id=tabname+"_extra_search", placeholder="Search...", visible=False)
+ button_refresh = gr.Button('Refresh', elem_id=tabname+"_extra_refresh")
+ button_close = gr.Button('Close', elem_id=tabname+"_extra_close")
+
+ ui.button_save_preview = gr.Button('Save preview', elem_id=tabname+"_save_preview", visible=False)
+ ui.preview_target_filename = gr.Textbox('Preview save filename', elem_id=tabname+"_preview_filename", visible=False)
+
+ def toggle_visibility(is_visible):
+ is_visible = not is_visible
+ return is_visible, gr.update(visible=is_visible)
+
+ state_visible = gr.State(value=False)
+ button.click(fn=toggle_visibility, inputs=[state_visible], outputs=[state_visible, container])
+ button_close.click(fn=toggle_visibility, inputs=[state_visible], outputs=[state_visible, container])
+
+ def refresh():
+ res = []
+
+ for pg in ui.stored_extra_pages:
+ pg.refresh()
+ res.append(pg.create_html(ui.tabname))
+
+ return res
+
+ button_refresh.click(fn=refresh, inputs=[], outputs=ui.pages)
+
+ return ui
+
+
+def path_is_parent(parent_path, child_path):
+ parent_path = os.path.abspath(parent_path)
+ child_path = os.path.abspath(child_path)
+
+ return os.path.commonpath([parent_path]) == os.path.commonpath([parent_path, child_path])
+
+
+def setup_ui(ui, gallery):
+ def save_preview(index, images, filename):
+ if len(images) == 0:
+ print("There is no image in gallery to save as a preview.")
+ return [page.create_html(ui.tabname) for page in ui.stored_extra_pages]
+
+ index = int(index)
+ index = 0 if index < 0 else index
+ index = len(images) - 1 if index >= len(images) else index
+
+ img_info = images[index if index >= 0 else 0]
+ image = image_from_url_text(img_info)
+
+ is_allowed = False
+ for extra_page in ui.stored_extra_pages:
+ if any([path_is_parent(x, filename) for x in extra_page.allowed_directories_for_previews()]):
+ is_allowed = True
+ break
+
+ assert is_allowed, f'writing to {filename} is not allowed'
+
+ image.save(filename)
+
+ return [page.create_html(ui.tabname) for page in ui.stored_extra_pages]
+
+ ui.button_save_preview.click(
+ fn=save_preview,
+ _js="function(x, y, z){console.log(x, y, z); return [selected_gallery_index(), y, z]}",
+ inputs=[ui.preview_target_filename, gallery, ui.preview_target_filename],
+ outputs=[*ui.pages]
+ )
diff --git a/modules/ui_extra_networks_hypernets.py b/modules/ui_extra_networks_hypernets.py
new file mode 100644
index 00000000..65d000cf
--- /dev/null
+++ b/modules/ui_extra_networks_hypernets.py
@@ -0,0 +1,35 @@
+import json
+import os
+
+from modules import shared, ui_extra_networks
+
+
+class ExtraNetworksPageHypernetworks(ui_extra_networks.ExtraNetworksPage):
+ def __init__(self):
+ super().__init__('Hypernetworks')
+
+ def refresh(self):
+ shared.reload_hypernetworks()
+
+ def list_items(self):
+ for name, path in shared.hypernetworks.items():
+ path, ext = os.path.splitext(path)
+ previews = [path + ".png", path + ".preview.png"]
+
+ preview = None
+ for file in previews:
+ if os.path.isfile(file):
+ preview = "./file=" + file.replace('\\', '/') + "?mtime=" + str(os.path.getmtime(file))
+ break
+
+ yield {
+ "name": name,
+ "filename": path,
+ "preview": preview,
+ "prompt": json.dumps(f"<hypernet:{name}:") + " + opts.extra_networks_default_multiplier + " + json.dumps(">"),
+ "local_preview": path + ".png",
+ }
+
+ def allowed_directories_for_previews(self):
+ return [shared.cmd_opts.hypernetwork_dir]
+
diff --git a/modules/ui_extra_networks_textual_inversion.py b/modules/ui_extra_networks_textual_inversion.py
new file mode 100644
index 00000000..dbd23d2d
--- /dev/null
+++ b/modules/ui_extra_networks_textual_inversion.py
@@ -0,0 +1,33 @@
+import json
+import os
+
+from modules import ui_extra_networks, sd_hijack
+
+
+class ExtraNetworksPageTextualInversion(ui_extra_networks.ExtraNetworksPage):
+ def __init__(self):
+ super().__init__('Textual Inversion')
+ self.allow_negative_prompt = True
+
+ def refresh(self):
+ sd_hijack.model_hijack.embedding_db.load_textual_inversion_embeddings(force_reload=True)
+
+ def list_items(self):
+ for embedding in sd_hijack.model_hijack.embedding_db.word_embeddings.values():
+ path, ext = os.path.splitext(embedding.filename)
+ preview_file = path + ".preview.png"
+
+ preview = None
+ if os.path.isfile(preview_file):
+ preview = "./file=" + preview_file.replace('\\', '/') + "?mtime=" + str(os.path.getmtime(preview_file))
+
+ yield {
+ "name": embedding.name,
+ "filename": embedding.filename,
+ "preview": preview,
+ "prompt": json.dumps(embedding.name),
+ "local_preview": path + ".preview.png",
+ }
+
+ def allowed_directories_for_previews(self):
+ return list(sd_hijack.model_hijack.embedding_db.embedding_dirs)
diff --git a/modules/ui_postprocessing.py b/modules/ui_postprocessing.py
new file mode 100644
index 00000000..b418d955
--- /dev/null
+++ b/modules/ui_postprocessing.py
@@ -0,0 +1,57 @@
+import gradio as gr
+from modules import scripts_postprocessing, scripts, shared, gfpgan_model, codeformer_model, ui_common, postprocessing, call_queue
+import modules.generation_parameters_copypaste as parameters_copypaste
+
+
+def create_ui():
+ tab_index = gr.State(value=0)
+
+ with gr.Row().style(equal_height=False, variant='compact'):
+ with gr.Column(variant='compact'):
+ with gr.Tabs(elem_id="mode_extras"):
+ with gr.TabItem('Single Image', elem_id="extras_single_tab") as tab_single:
+ extras_image = gr.Image(label="Source", source="upload", interactive=True, type="pil", elem_id="extras_image")
+
+ with gr.TabItem('Batch Process', elem_id="extras_batch_process_tab") as tab_batch:
+ image_batch = gr.File(label="Batch Process", file_count="multiple", interactive=True, type="file", elem_id="extras_image_batch")
+
+ with gr.TabItem('Batch from Directory', elem_id="extras_batch_directory_tab") as tab_batch_dir:
+ extras_batch_input_dir = gr.Textbox(label="Input directory", **shared.hide_dirs, placeholder="A directory on the same machine where the server is running.", elem_id="extras_batch_input_dir")
+ extras_batch_output_dir = gr.Textbox(label="Output directory", **shared.hide_dirs, placeholder="Leave blank to save images to the default path.", elem_id="extras_batch_output_dir")
+ show_extras_results = gr.Checkbox(label='Show result images', value=True, elem_id="extras_show_extras_results")
+
+ submit = gr.Button('Generate', elem_id="extras_generate", variant='primary')
+
+ script_inputs = scripts.scripts_postproc.setup_ui()
+
+ with gr.Column():
+ result_images, html_info_x, html_info, html_log = ui_common.create_output_panel("extras", shared.opts.outdir_extras_samples)
+
+ tab_single.select(fn=lambda: 0, inputs=[], outputs=[tab_index])
+ tab_batch.select(fn=lambda: 1, inputs=[], outputs=[tab_index])
+ tab_batch_dir.select(fn=lambda: 2, inputs=[], outputs=[tab_index])
+
+ submit.click(
+ fn=call_queue.wrap_gradio_gpu_call(postprocessing.run_postprocessing, extra_outputs=[None, '']),
+ inputs=[
+ tab_index,
+ extras_image,
+ image_batch,
+ extras_batch_input_dir,
+ extras_batch_output_dir,
+ show_extras_results,
+ *script_inputs
+ ],
+ outputs=[
+ result_images,
+ html_info_x,
+ html_info,
+ ]
+ )
+
+ parameters_copypaste.add_paste_fields("extras", extras_image, None)
+
+ extras_image.change(
+ fn=scripts.scripts_postproc.image_changed,
+ inputs=[], outputs=[]
+ )
diff --git a/modules/ui_tempdir.py b/modules/ui_tempdir.py
index 07210d14..21945235 100644
--- a/modules/ui_tempdir.py
+++ b/modules/ui_tempdir.py
@@ -1,6 +1,7 @@
import os
import tempfile
from collections import namedtuple
+from pathlib import Path
import gradio as gr
@@ -12,10 +13,29 @@ from modules import shared
Savedfile = namedtuple("Savedfile", ["name"])
+def register_tmp_file(gradio, filename):
+ if hasattr(gradio, 'temp_file_sets'): # gradio 3.15
+ gradio.temp_file_sets[0] = gradio.temp_file_sets[0] | {os.path.abspath(filename)}
+
+ if hasattr(gradio, 'temp_dirs'): # gradio 3.9
+ gradio.temp_dirs = gradio.temp_dirs | {os.path.abspath(os.path.dirname(filename))}
+
+
+def check_tmp_file(gradio, filename):
+ if hasattr(gradio, 'temp_file_sets'):
+ return any([filename in fileset for fileset in gradio.temp_file_sets])
+
+ if hasattr(gradio, 'temp_dirs'):
+ return any(Path(temp_dir).resolve() in Path(filename).resolve().parents for temp_dir in gradio.temp_dirs)
+
+ return False
+
+
def save_pil_to_file(pil_image, dir=None):
already_saved_as = getattr(pil_image, 'already_saved_as', None)
if already_saved_as and os.path.isfile(already_saved_as):
- shared.demo.temp_dirs = shared.demo.temp_dirs | {os.path.abspath(os.path.dirname(already_saved_as))}
+ register_tmp_file(shared.demo, already_saved_as)
+
file_obj = Savedfile(already_saved_as)
return file_obj
@@ -44,7 +64,7 @@ def on_tmpdir_changed():
os.makedirs(shared.opts.temp_dir, exist_ok=True)
- shared.demo.temp_dirs = shared.demo.temp_dirs | {os.path.abspath(shared.opts.temp_dir)}
+ register_tmp_file(shared.demo, os.path.join(shared.opts.temp_dir, "x"))
def cleanup_tmpdr():
diff --git a/modules/upscaler.py b/modules/upscaler.py
index c4e6e6bd..e2eaa730 100644
--- a/modules/upscaler.py
+++ b/modules/upscaler.py
@@ -11,7 +11,6 @@ from modules import modelloader, shared
LANCZOS = (Image.Resampling.LANCZOS if hasattr(Image, 'Resampling') else Image.LANCZOS)
NEAREST = (Image.Resampling.NEAREST if hasattr(Image, 'Resampling') else Image.NEAREST)
-from modules.paths import models_path
class Upscaler:
@@ -39,7 +38,7 @@ class Upscaler:
self.mod_scale = None
if self.model_path is None and self.name:
- self.model_path = os.path.join(models_path, self.name)
+ self.model_path = os.path.join(shared.models_path, self.name)
if self.model_path and create_dirs:
os.makedirs(self.model_path, exist_ok=True)
@@ -53,10 +52,10 @@ class Upscaler:
def do_upscale(self, img: PIL.Image, selected_model: str):
return img
- def upscale(self, img: PIL.Image, scale: int, selected_model: str = None):
+ def upscale(self, img: PIL.Image, scale, selected_model: str = None):
self.scale = scale
- dest_w = img.width * scale
- dest_h = img.height * scale
+ dest_w = int(img.width * scale)
+ dest_h = int(img.height * scale)
for i in range(3):
shape = (img.width, img.height)
@@ -95,6 +94,7 @@ class UpscalerData:
def __init__(self, name: str, path: str, upscaler: Upscaler = None, scale: int = 4, model=None):
self.name = name
self.data_path = path
+ self.local_data_path = path
self.scaler = upscaler
self.scale = scale
self.model = model
@@ -142,4 +142,4 @@ class UpscalerNearest(Upscaler):
def __init__(self, dirname=None):
super().__init__(False)
self.name = "Nearest"
- self.scalers = [UpscalerData("Nearest", None, self)] \ No newline at end of file
+ self.scalers = [UpscalerData("Nearest", None, self)]
diff --git a/modules/xlmr.py b/modules/xlmr.py
new file mode 100644
index 00000000..beab3fdf
--- /dev/null
+++ b/modules/xlmr.py
@@ -0,0 +1,137 @@
+from transformers import BertPreTrainedModel,BertModel,BertConfig
+import torch.nn as nn
+import torch
+from transformers.models.xlm_roberta.configuration_xlm_roberta import XLMRobertaConfig
+from transformers import XLMRobertaModel,XLMRobertaTokenizer
+from typing import Optional
+
+class BertSeriesConfig(BertConfig):
+ def __init__(self, vocab_size=30522, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, pad_token_id=0, position_embedding_type="absolute", use_cache=True, classifier_dropout=None,project_dim=512, pooler_fn="average",learn_encoder=False,model_type='bert',**kwargs):
+
+ super().__init__(vocab_size, hidden_size, num_hidden_layers, num_attention_heads, intermediate_size, hidden_act, hidden_dropout_prob, attention_probs_dropout_prob, max_position_embeddings, type_vocab_size, initializer_range, layer_norm_eps, pad_token_id, position_embedding_type, use_cache, classifier_dropout, **kwargs)
+ self.project_dim = project_dim
+ self.pooler_fn = pooler_fn
+ self.learn_encoder = learn_encoder
+
+class RobertaSeriesConfig(XLMRobertaConfig):
+ def __init__(self, pad_token_id=1, bos_token_id=0, eos_token_id=2,project_dim=512,pooler_fn='cls',learn_encoder=False, **kwargs):
+ super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
+ self.project_dim = project_dim
+ self.pooler_fn = pooler_fn
+ self.learn_encoder = learn_encoder
+
+
+class BertSeriesModelWithTransformation(BertPreTrainedModel):
+
+ _keys_to_ignore_on_load_unexpected = [r"pooler"]
+ _keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"]
+ config_class = BertSeriesConfig
+
+ def __init__(self, config=None, **kargs):
+ # modify initialization for autoloading
+ if config is None:
+ config = XLMRobertaConfig()
+ config.attention_probs_dropout_prob= 0.1
+ config.bos_token_id=0
+ config.eos_token_id=2
+ config.hidden_act='gelu'
+ config.hidden_dropout_prob=0.1
+ config.hidden_size=1024
+ config.initializer_range=0.02
+ config.intermediate_size=4096
+ config.layer_norm_eps=1e-05
+ config.max_position_embeddings=514
+
+ config.num_attention_heads=16
+ config.num_hidden_layers=24
+ config.output_past=True
+ config.pad_token_id=1
+ config.position_embedding_type= "absolute"
+
+ config.type_vocab_size= 1
+ config.use_cache=True
+ config.vocab_size= 250002
+ config.project_dim = 768
+ config.learn_encoder = False
+ super().__init__(config)
+ self.roberta = XLMRobertaModel(config)
+ self.transformation = nn.Linear(config.hidden_size,config.project_dim)
+ self.pre_LN=nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.tokenizer = XLMRobertaTokenizer.from_pretrained('xlm-roberta-large')
+ self.pooler = lambda x: x[:,0]
+ self.post_init()
+
+ def encode(self,c):
+ device = next(self.parameters()).device
+ text = self.tokenizer(c,
+ truncation=True,
+ max_length=77,
+ return_length=False,
+ return_overflowing_tokens=False,
+ padding="max_length",
+ return_tensors="pt")
+ text["input_ids"] = torch.tensor(text["input_ids"]).to(device)
+ text["attention_mask"] = torch.tensor(
+ text['attention_mask']).to(device)
+ features = self(**text)
+ return features['projection_state']
+
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ encoder_hidden_states: Optional[torch.Tensor] = None,
+ encoder_attention_mask: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ ) :
+ r"""
+ """
+
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+
+ outputs = self.roberta(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_attention_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=True,
+ return_dict=return_dict,
+ )
+
+ # last module outputs
+ sequence_output = outputs[0]
+
+
+ # project every module
+ sequence_output_ln = self.pre_LN(sequence_output)
+
+ # pooler
+ pooler_output = self.pooler(sequence_output_ln)
+ pooler_output = self.transformation(pooler_output)
+ projection_state = self.transformation(outputs.last_hidden_state)
+
+ return {
+ 'pooler_output':pooler_output,
+ 'last_hidden_state':outputs.last_hidden_state,
+ 'hidden_states':outputs.hidden_states,
+ 'attentions':outputs.attentions,
+ 'projection_state':projection_state,
+ 'sequence_out': sequence_output
+ }
+
+
+class RobertaSeriesModelWithTransformation(BertSeriesModelWithTransformation):
+ base_model_prefix = 'roberta'
+ config_class= RobertaSeriesConfig \ No newline at end of file
diff --git a/requirements.txt b/requirements.txt
index 5bed694e..6d53f089 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,15 +1,14 @@
blendmodes
accelerate
basicsr
-fairscale==0.4.4
fonts
font-roboto
gfpgan
-gradio==3.9
+gradio==3.16.2
invisible-watermark
numpy
omegaconf
-opencv-python
+opencv-contrib-python
requests
piexif
Pillow
@@ -17,7 +16,7 @@ pytorch_lightning==1.7.7
realesrgan
scikit-image>=0.19
timm==0.4.12
-transformers==4.19.2
+transformers==4.25.1
torch
einops
jsonmerge
@@ -30,4 +29,4 @@ inflection
GitPython
torchsde
safetensors
-psutil; sys_platform == 'darwin'
+psutil
diff --git a/requirements_versions.txt b/requirements_versions.txt
index c126c8c4..eaa08806 100644
--- a/requirements_versions.txt
+++ b/requirements_versions.txt
@@ -1,11 +1,11 @@
blendmodes==2022
-transformers==4.19.2
+transformers==4.25.1
accelerate==0.12.0
basicsr==1.4.2
gfpgan==1.3.8
-gradio==3.9
+gradio==3.16.2
numpy==1.23.3
-Pillow==9.2.0
+Pillow==9.4.0
realesrgan==0.3.0
torch
omegaconf==2.2.3
@@ -14,7 +14,6 @@ scikit-image==0.19.2
fonts
font-roboto
timm==0.6.7
-fairscale==0.4.9
piexif==1.1.3
einops==0.4.1
jsonmerge==1.8.0
@@ -26,5 +25,5 @@ lark==1.1.2
inflection==0.5.1
GitPython==3.1.27
torchsde==0.2.5
-safetensors==0.2.5
+safetensors==0.2.7
httpcore<=0.15
diff --git a/screenshot.png b/screenshot.png
index 86c3209f..47a1be4e 100644
--- a/screenshot.png
+++ b/screenshot.png
Binary files differ
diff --git a/script.js b/script.js
index 9748ec90..97e0bfcf 100644
--- a/script.js
+++ b/script.js
@@ -1,10 +1,11 @@
function gradioApp() {
- const gradioShadowRoot = document.getElementsByTagName('gradio-app')[0].shadowRoot
+ const elems = document.getElementsByTagName('gradio-app')
+ const gradioShadowRoot = elems.length == 0 ? null : elems[0].shadowRoot
return !!gradioShadowRoot ? gradioShadowRoot : document;
}
function get_uiCurrentTab() {
- return gradioApp().querySelector('.tabs button:not(.border-transparent)')
+ return gradioApp().querySelector('#tabs button:not(.border-transparent)')
}
function get_uiCurrentTabContent() {
@@ -12,15 +13,23 @@ function get_uiCurrentTabContent() {
}
uiUpdateCallbacks = []
+uiLoadedCallbacks = []
uiTabChangeCallbacks = []
+optionsChangedCallbacks = []
let uiCurrentTab = null
function onUiUpdate(callback){
uiUpdateCallbacks.push(callback)
}
+function onUiLoaded(callback){
+ uiLoadedCallbacks.push(callback)
+}
function onUiTabChange(callback){
uiTabChangeCallbacks.push(callback)
}
+function onOptionsChanged(callback){
+ optionsChangedCallbacks.push(callback)
+}
function runCallback(x, m){
try {
@@ -33,8 +42,15 @@ function executeCallbacks(queue, m) {
queue.forEach(function(x){runCallback(x, m)})
}
+var executedOnLoaded = false;
+
document.addEventListener("DOMContentLoaded", function() {
var mutationObserver = new MutationObserver(function(m){
+ if(!executedOnLoaded && gradioApp().querySelector('#txt2img_prompt')){
+ executedOnLoaded = true;
+ executeCallbacks(uiLoadedCallbacks);
+ }
+
executeCallbacks(uiUpdateCallbacks, m);
const newTab = get_uiCurrentTab();
if ( newTab && ( newTab !== uiCurrentTab ) ) {
@@ -48,7 +64,7 @@ document.addEventListener("DOMContentLoaded", function() {
/**
* Add a ctrl+enter as a shortcut to start a generation
*/
- document.addEventListener('keydown', function(e) {
+document.addEventListener('keydown', function(e) {
var handled = false;
if (e.key !== undefined) {
if((e.key == "Enter" && (e.metaKey || e.ctrlKey || e.altKey))) handled = true;
diff --git a/scripts/custom_code.py b/scripts/custom_code.py
index 22e7b77a..d29113e6 100644
--- a/scripts/custom_code.py
+++ b/scripts/custom_code.py
@@ -9,12 +9,11 @@ class Script(scripts.Script):
def title(self):
return "Custom code"
-
def show(self, is_img2img):
return cmd_opts.allow_code
def ui(self, is_img2img):
- code = gr.Textbox(label="Python code", lines=1)
+ code = gr.Textbox(label="Python code", lines=1, elem_id=self.elem_id("code"))
return [code]
diff --git a/scripts/img2imgalt.py b/scripts/img2imgalt.py
index 1229f61b..cbdfc6b3 100644
--- a/scripts/img2imgalt.py
+++ b/scripts/img2imgalt.py
@@ -125,25 +125,25 @@ class Script(scripts.Script):
def show(self, is_img2img):
return is_img2img
- def ui(self, is_img2img):
+ def ui(self, is_img2img):
info = gr.Markdown('''
* `CFG Scale` should be 2 or lower.
''')
- override_sampler = gr.Checkbox(label="Override `Sampling method` to Euler?(this method is built for it)", value=True)
+ override_sampler = gr.Checkbox(label="Override `Sampling method` to Euler?(this method is built for it)", value=True, elem_id=self.elem_id("override_sampler"))
- override_prompt = gr.Checkbox(label="Override `prompt` to the same value as `original prompt`?(and `negative prompt`)", value=True)
- original_prompt = gr.Textbox(label="Original prompt", lines=1)
- original_negative_prompt = gr.Textbox(label="Original negative prompt", lines=1)
+ override_prompt = gr.Checkbox(label="Override `prompt` to the same value as `original prompt`?(and `negative prompt`)", value=True, elem_id=self.elem_id("override_prompt"))
+ original_prompt = gr.Textbox(label="Original prompt", lines=1, elem_id=self.elem_id("original_prompt"))
+ original_negative_prompt = gr.Textbox(label="Original negative prompt", lines=1, elem_id=self.elem_id("original_negative_prompt"))
- override_steps = gr.Checkbox(label="Override `Sampling Steps` to the same value as `Decode steps`?", value=True)
- st = gr.Slider(label="Decode steps", minimum=1, maximum=150, step=1, value=50)
+ override_steps = gr.Checkbox(label="Override `Sampling Steps` to the same value as `Decode steps`?", value=True, elem_id=self.elem_id("override_steps"))
+ st = gr.Slider(label="Decode steps", minimum=1, maximum=150, step=1, value=50, elem_id=self.elem_id("st"))
- override_strength = gr.Checkbox(label="Override `Denoising strength` to 1?", value=True)
+ override_strength = gr.Checkbox(label="Override `Denoising strength` to 1?", value=True, elem_id=self.elem_id("override_strength"))
- cfg = gr.Slider(label="Decode CFG scale", minimum=0.0, maximum=15.0, step=0.1, value=1.0)
- randomness = gr.Slider(label="Randomness", minimum=0.0, maximum=1.0, step=0.01, value=0.0)
- sigma_adjustment = gr.Checkbox(label="Sigma adjustment for finding noise for image", value=False)
+ cfg = gr.Slider(label="Decode CFG scale", minimum=0.0, maximum=15.0, step=0.1, value=1.0, elem_id=self.elem_id("cfg"))
+ randomness = gr.Slider(label="Randomness", minimum=0.0, maximum=1.0, step=0.01, value=0.0, elem_id=self.elem_id("randomness"))
+ sigma_adjustment = gr.Checkbox(label="Sigma adjustment for finding noise for image", value=False, elem_id=self.elem_id("sigma_adjustment"))
return [
info,
diff --git a/scripts/loopback.py b/scripts/loopback.py
index d8c68af8..1dab9476 100644
--- a/scripts/loopback.py
+++ b/scripts/loopback.py
@@ -9,6 +9,7 @@ from modules.processing import Processed
from modules.sd_samplers import samplers
from modules.shared import opts, cmd_opts, state
+
class Script(scripts.Script):
def title(self):
return "Loopback"
@@ -16,9 +17,9 @@ class Script(scripts.Script):
def show(self, is_img2img):
return is_img2img
- def ui(self, is_img2img):
- loops = gr.Slider(minimum=1, maximum=32, step=1, label='Loops', value=4)
- denoising_strength_change_factor = gr.Slider(minimum=0.9, maximum=1.1, step=0.01, label='Denoising strength change factor', value=1)
+ def ui(self, is_img2img):
+ loops = gr.Slider(minimum=1, maximum=32, step=1, label='Loops', value=4, elem_id=self.elem_id("loops"))
+ denoising_strength_change_factor = gr.Slider(minimum=0.9, maximum=1.1, step=0.01, label='Denoising strength change factor', value=1, elem_id=self.elem_id("denoising_strength_change_factor"))
return [loops, denoising_strength_change_factor]
diff --git a/scripts/outpainting_mk_2.py b/scripts/outpainting_mk_2.py
index cf71cb92..0906da6a 100644
--- a/scripts/outpainting_mk_2.py
+++ b/scripts/outpainting_mk_2.py
@@ -131,11 +131,11 @@ class Script(scripts.Script):
info = gr.HTML("<p style=\"margin-bottom:0.75em\">Recommended settings: Sampling Steps: 80-100, Sampler: Euler a, Denoising strength: 0.8</p>")
- pixels = gr.Slider(label="Pixels to expand", minimum=8, maximum=256, step=8, value=128)
- mask_blur = gr.Slider(label='Mask blur', minimum=0, maximum=64, step=1, value=8)
- direction = gr.CheckboxGroup(label="Outpainting direction", choices=['left', 'right', 'up', 'down'], value=['left', 'right', 'up', 'down'])
- noise_q = gr.Slider(label="Fall-off exponent (lower=higher detail)", minimum=0.0, maximum=4.0, step=0.01, value=1.0)
- color_variation = gr.Slider(label="Color variation", minimum=0.0, maximum=1.0, step=0.01, value=0.05)
+ pixels = gr.Slider(label="Pixels to expand", minimum=8, maximum=256, step=8, value=128, elem_id=self.elem_id("pixels"))
+ mask_blur = gr.Slider(label='Mask blur', minimum=0, maximum=64, step=1, value=8, elem_id=self.elem_id("mask_blur"))
+ direction = gr.CheckboxGroup(label="Outpainting direction", choices=['left', 'right', 'up', 'down'], value=['left', 'right', 'up', 'down'], elem_id=self.elem_id("direction"))
+ noise_q = gr.Slider(label="Fall-off exponent (lower=higher detail)", minimum=0.0, maximum=4.0, step=0.01, value=1.0, elem_id=self.elem_id("noise_q"))
+ color_variation = gr.Slider(label="Color variation", minimum=0.0, maximum=1.0, step=0.01, value=0.05, elem_id=self.elem_id("color_variation"))
return [info, pixels, mask_blur, direction, noise_q, color_variation]
diff --git a/scripts/poor_mans_outpainting.py b/scripts/poor_mans_outpainting.py
index ea45beb0..d8feda00 100644
--- a/scripts/poor_mans_outpainting.py
+++ b/scripts/poor_mans_outpainting.py
@@ -9,7 +9,6 @@ from modules.processing import Processed, process_images
from modules.shared import opts, cmd_opts, state
-
class Script(scripts.Script):
def title(self):
return "Poor man's outpainting"
@@ -20,11 +19,11 @@ class Script(scripts.Script):
def ui(self, is_img2img):
if not is_img2img:
return None
-
- pixels = gr.Slider(label="Pixels to expand", minimum=8, maximum=256, step=8, value=128)
- mask_blur = gr.Slider(label='Mask blur', minimum=0, maximum=64, step=1, value=4)
- inpainting_fill = gr.Radio(label='Masked content', choices=['fill', 'original', 'latent noise', 'latent nothing'], value='fill', type="index")
- direction = gr.CheckboxGroup(label="Outpainting direction", choices=['left', 'right', 'up', 'down'], value=['left', 'right', 'up', 'down'])
+
+ pixels = gr.Slider(label="Pixels to expand", minimum=8, maximum=256, step=8, value=128, elem_id=self.elem_id("pixels"))
+ mask_blur = gr.Slider(label='Mask blur', minimum=0, maximum=64, step=1, value=4, elem_id=self.elem_id("mask_blur"))
+ inpainting_fill = gr.Radio(label='Masked content', choices=['fill', 'original', 'latent noise', 'latent nothing'], value='fill', type="index", elem_id=self.elem_id("inpainting_fill"))
+ direction = gr.CheckboxGroup(label="Outpainting direction", choices=['left', 'right', 'up', 'down'], value=['left', 'right', 'up', 'down'], elem_id=self.elem_id("direction"))
return [pixels, mask_blur, inpainting_fill, direction]
diff --git a/scripts/postprocessing_codeformer.py b/scripts/postprocessing_codeformer.py
new file mode 100644
index 00000000..a7d80d40
--- /dev/null
+++ b/scripts/postprocessing_codeformer.py
@@ -0,0 +1,36 @@
+from PIL import Image
+import numpy as np
+
+from modules import scripts_postprocessing, codeformer_model
+import gradio as gr
+
+from modules.ui_components import FormRow
+
+
+class ScriptPostprocessingCodeFormer(scripts_postprocessing.ScriptPostprocessing):
+ name = "CodeFormer"
+ order = 3000
+
+ def ui(self):
+ with FormRow():
+ codeformer_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="CodeFormer visibility", value=0, elem_id="extras_codeformer_visibility")
+ codeformer_weight = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="CodeFormer weight (0 = maximum effect, 1 = minimum effect)", value=0, elem_id="extras_codeformer_weight")
+
+ return {
+ "codeformer_visibility": codeformer_visibility,
+ "codeformer_weight": codeformer_weight,
+ }
+
+ def process(self, pp: scripts_postprocessing.PostprocessedImage, codeformer_visibility, codeformer_weight):
+ if codeformer_visibility == 0:
+ return
+
+ restored_img = codeformer_model.codeformer.restore(np.array(pp.image, dtype=np.uint8), w=codeformer_weight)
+ res = Image.fromarray(restored_img)
+
+ if codeformer_visibility < 1.0:
+ res = Image.blend(pp.image, res, codeformer_visibility)
+
+ pp.image = res
+ pp.info["CodeFormer visibility"] = round(codeformer_visibility, 3)
+ pp.info["CodeFormer weight"] = round(codeformer_weight, 3)
diff --git a/scripts/postprocessing_gfpgan.py b/scripts/postprocessing_gfpgan.py
new file mode 100644
index 00000000..d854f3f7
--- /dev/null
+++ b/scripts/postprocessing_gfpgan.py
@@ -0,0 +1,33 @@
+from PIL import Image
+import numpy as np
+
+from modules import scripts_postprocessing, gfpgan_model
+import gradio as gr
+
+from modules.ui_components import FormRow
+
+
+class ScriptPostprocessingGfpGan(scripts_postprocessing.ScriptPostprocessing):
+ name = "GFPGAN"
+ order = 2000
+
+ def ui(self):
+ with FormRow():
+ gfpgan_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="GFPGAN visibility", value=0, elem_id="extras_gfpgan_visibility")
+
+ return {
+ "gfpgan_visibility": gfpgan_visibility,
+ }
+
+ def process(self, pp: scripts_postprocessing.PostprocessedImage, gfpgan_visibility):
+ if gfpgan_visibility == 0:
+ return
+
+ restored_img = gfpgan_model.gfpgan_fix_faces(np.array(pp.image, dtype=np.uint8))
+ res = Image.fromarray(restored_img)
+
+ if gfpgan_visibility < 1.0:
+ res = Image.blend(pp.image, res, gfpgan_visibility)
+
+ pp.image = res
+ pp.info["GFPGAN visibility"] = round(gfpgan_visibility, 3)
diff --git a/scripts/postprocessing_upscale.py b/scripts/postprocessing_upscale.py
new file mode 100644
index 00000000..8842bd91
--- /dev/null
+++ b/scripts/postprocessing_upscale.py
@@ -0,0 +1,131 @@
+from PIL import Image
+import numpy as np
+
+from modules import scripts_postprocessing, shared
+import gradio as gr
+
+from modules.ui_components import FormRow
+
+
+upscale_cache = {}
+
+
+class ScriptPostprocessingUpscale(scripts_postprocessing.ScriptPostprocessing):
+ name = "Upscale"
+ order = 1000
+
+ def ui(self):
+ selected_tab = gr.State(value=0)
+
+ with gr.Tabs(elem_id="extras_resize_mode"):
+ with gr.TabItem('Scale by', elem_id="extras_scale_by_tab") as tab_scale_by:
+ upscaling_resize = gr.Slider(minimum=1.0, maximum=8.0, step=0.05, label="Resize", value=4, elem_id="extras_upscaling_resize")
+
+ with gr.TabItem('Scale to', elem_id="extras_scale_to_tab") as tab_scale_to:
+ with FormRow():
+ upscaling_resize_w = gr.Number(label="Width", value=512, precision=0, elem_id="extras_upscaling_resize_w")
+ upscaling_resize_h = gr.Number(label="Height", value=512, precision=0, elem_id="extras_upscaling_resize_h")
+ upscaling_crop = gr.Checkbox(label='Crop to fit', value=True, elem_id="extras_upscaling_crop")
+
+ with FormRow():
+ extras_upscaler_1 = gr.Dropdown(label='Upscaler 1', elem_id="extras_upscaler_1", choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name)
+
+ with FormRow():
+ extras_upscaler_2 = gr.Dropdown(label='Upscaler 2', elem_id="extras_upscaler_2", choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name)
+ extras_upscaler_2_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="Upscaler 2 visibility", value=0.0, elem_id="extras_upscaler_2_visibility")
+
+ tab_scale_by.select(fn=lambda: 0, inputs=[], outputs=[selected_tab])
+ tab_scale_to.select(fn=lambda: 1, inputs=[], outputs=[selected_tab])
+
+ return {
+ "upscale_mode": selected_tab,
+ "upscale_by": upscaling_resize,
+ "upscale_to_width": upscaling_resize_w,
+ "upscale_to_height": upscaling_resize_h,
+ "upscale_crop": upscaling_crop,
+ "upscaler_1_name": extras_upscaler_1,
+ "upscaler_2_name": extras_upscaler_2,
+ "upscaler_2_visibility": extras_upscaler_2_visibility,
+ }
+
+ def upscale(self, image, info, upscaler, upscale_mode, upscale_by, upscale_to_width, upscale_to_height, upscale_crop):
+ if upscale_mode == 1:
+ upscale_by = max(upscale_to_width/image.width, upscale_to_height/image.height)
+ info["Postprocess upscale to"] = f"{upscale_to_width}x{upscale_to_height}"
+ else:
+ info["Postprocess upscale by"] = upscale_by
+
+ cache_key = (hash(np.array(image.getdata()).tobytes()), upscaler.name, upscale_mode, upscale_by, upscale_to_width, upscale_to_height, upscale_crop)
+ cached_image = upscale_cache.pop(cache_key, None)
+
+ if cached_image is not None:
+ image = cached_image
+ else:
+ image = upscaler.scaler.upscale(image, upscale_by, upscaler.data_path)
+
+ upscale_cache[cache_key] = image
+ if len(upscale_cache) > shared.opts.upscaling_max_images_in_cache:
+ upscale_cache.pop(next(iter(upscale_cache), None), None)
+
+ if upscale_mode == 1 and upscale_crop:
+ cropped = Image.new("RGB", (upscale_to_width, upscale_to_height))
+ cropped.paste(image, box=(upscale_to_width // 2 - image.width // 2, upscale_to_height // 2 - image.height // 2))
+ image = cropped
+ info["Postprocess crop to"] = f"{image.width}x{image.height}"
+
+ return image
+
+ def process(self, pp: scripts_postprocessing.PostprocessedImage, upscale_mode=1, upscale_by=2.0, upscale_to_width=None, upscale_to_height=None, upscale_crop=False, upscaler_1_name=None, upscaler_2_name=None, upscaler_2_visibility=0.0):
+ if upscaler_1_name == "None":
+ upscaler_1_name = None
+
+ upscaler1 = next(iter([x for x in shared.sd_upscalers if x.name == upscaler_1_name]), None)
+ assert upscaler1 or (upscaler_1_name is None), f'could not find upscaler named {upscaler_1_name}'
+
+ if not upscaler1:
+ return
+
+ if upscaler_2_name == "None":
+ upscaler_2_name = None
+
+ upscaler2 = next(iter([x for x in shared.sd_upscalers if x.name == upscaler_2_name and x.name != "None"]), None)
+ assert upscaler2 or (upscaler_2_name is None), f'could not find upscaler named {upscaler_2_name}'
+
+ upscaled_image = self.upscale(pp.image, pp.info, upscaler1, upscale_mode, upscale_by, upscale_to_width, upscale_to_height, upscale_crop)
+ pp.info[f"Postprocess upscaler"] = upscaler1.name
+
+ if upscaler2 and upscaler_2_visibility > 0:
+ second_upscale = self.upscale(pp.image, pp.info, upscaler2, upscale_mode, upscale_by, upscale_to_width, upscale_to_height, upscale_crop)
+ upscaled_image = Image.blend(upscaled_image, second_upscale, upscaler_2_visibility)
+
+ pp.info[f"Postprocess upscaler 2"] = upscaler2.name
+
+ pp.image = upscaled_image
+
+ def image_changed(self):
+ upscale_cache.clear()
+
+
+class ScriptPostprocessingUpscaleSimple(ScriptPostprocessingUpscale):
+ name = "Simple Upscale"
+ order = 900
+
+ def ui(self):
+ with FormRow():
+ upscaler_name = gr.Dropdown(label='Upscaler', choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name)
+ upscale_by = gr.Slider(minimum=0.05, maximum=8.0, step=0.05, label="Upscale by", value=2)
+
+ return {
+ "upscale_by": upscale_by,
+ "upscaler_name": upscaler_name,
+ }
+
+ def process(self, pp: scripts_postprocessing.PostprocessedImage, upscale_by=2.0, upscaler_name=None):
+ if upscaler_name is None or upscaler_name == "None":
+ return
+
+ upscaler1 = next(iter([x for x in shared.sd_upscalers if x.name == upscaler_name]), None)
+ assert upscaler1, f'could not find upscaler named {upscaler_name}'
+
+ pp.image = self.upscale(pp.image, pp.info, upscaler1, 0, upscale_by, 0, 0, False)
+ pp.info[f"Postprocess upscaler"] = upscaler1.name
diff --git a/scripts/prompt_matrix.py b/scripts/prompt_matrix.py
index 4c79eaef..dd95e588 100644
--- a/scripts/prompt_matrix.py
+++ b/scripts/prompt_matrix.py
@@ -44,9 +44,9 @@ class Script(scripts.Script):
def title(self):
return "Prompt matrix"
- def ui(self, is_img2img):
- put_at_start = gr.Checkbox(label='Put variable parts at start of prompt', value=False)
- different_seeds = gr.Checkbox(label='Use different seed for each picture', value=False)
+ def ui(self, is_img2img):
+ put_at_start = gr.Checkbox(label='Put variable parts at start of prompt', value=False, elem_id=self.elem_id("put_at_start"))
+ different_seeds = gr.Checkbox(label='Use different seed for each picture', value=False, elem_id=self.elem_id("different_seeds"))
return [put_at_start, different_seeds]
diff --git a/scripts/prompts_from_file.py b/scripts/prompts_from_file.py
index e8386ed2..76dc5778 100644
--- a/scripts/prompts_from_file.py
+++ b/scripts/prompts_from_file.py
@@ -111,12 +111,12 @@ class Script(scripts.Script):
def title(self):
return "Prompts from file or textbox"
- def ui(self, is_img2img):
- checkbox_iterate = gr.Checkbox(label="Iterate seed every line", value=False)
- checkbox_iterate_batch = gr.Checkbox(label="Use same random seed for all lines", value=False)
+ def ui(self, is_img2img):
+ checkbox_iterate = gr.Checkbox(label="Iterate seed every line", value=False, elem_id=self.elem_id("checkbox_iterate"))
+ checkbox_iterate_batch = gr.Checkbox(label="Use same random seed for all lines", value=False, elem_id=self.elem_id("checkbox_iterate_batch"))
- prompt_txt = gr.Textbox(label="List of prompt inputs", lines=1)
- file = gr.File(label="Upload prompt inputs", type='bytes')
+ prompt_txt = gr.Textbox(label="List of prompt inputs", lines=1, elem_id=self.elem_id("prompt_txt"))
+ file = gr.File(label="Upload prompt inputs", type='binary', elem_id=self.elem_id("file"))
file.change(fn=load_prompt_file, inputs=[file], outputs=[file, prompt_txt, prompt_txt])
@@ -146,11 +146,7 @@ class Script(scripts.Script):
else:
args = {"prompt": line}
- n_iter = args.get("n_iter", 1)
- if n_iter != 1:
- job_count += n_iter
- else:
- job_count += 1
+ job_count += args.get("n_iter", p.n_iter)
jobs.append(args)
diff --git a/scripts/sd_upscale.py b/scripts/sd_upscale.py
index e8c80a6c..332d76d9 100644
--- a/scripts/sd_upscale.py
+++ b/scripts/sd_upscale.py
@@ -16,15 +16,17 @@ class Script(scripts.Script):
def show(self, is_img2img):
return is_img2img
- def ui(self, is_img2img):
+ def ui(self, is_img2img):
info = gr.HTML("<p style=\"margin-bottom:0.75em\">Will upscale the image by the selected scale factor; use width and height sliders to set tile size</p>")
- overlap = gr.Slider(minimum=0, maximum=256, step=16, label='Tile overlap', value=64)
- scale_factor = gr.Slider(minimum=1, maximum=4, step=1, label='Scale Factor', value=2)
- upscaler_index = gr.Radio(label='Upscaler', choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name, type="index")
+ overlap = gr.Slider(minimum=0, maximum=256, step=16, label='Tile overlap', value=64, elem_id=self.elem_id("overlap"))
+ scale_factor = gr.Slider(minimum=1.0, maximum=4.0, step=0.05, label='Scale Factor', value=2.0, elem_id=self.elem_id("scale_factor"))
+ upscaler_index = gr.Radio(label='Upscaler', choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name, type="index", elem_id=self.elem_id("upscaler_index"))
return [info, overlap, upscaler_index, scale_factor]
def run(self, p, _, overlap, upscaler_index, scale_factor):
+ if isinstance(upscaler_index, str):
+ upscaler_index = [x.name.lower() for x in shared.sd_upscalers].index(upscaler_index.lower())
processing.fix_seed(p)
upscaler = shared.sd_upscalers[upscaler_index]
diff --git a/scripts/xy_grid.py b/scripts/xy_grid.py
deleted file mode 100644
index 3e0b2805..00000000
--- a/scripts/xy_grid.py
+++ /dev/null
@@ -1,430 +0,0 @@
-from collections import namedtuple
-from copy import copy
-from itertools import permutations, chain
-import random
-import csv
-from io import StringIO
-from PIL import Image
-import numpy as np
-
-import modules.scripts as scripts
-import gradio as gr
-
-from modules import images, paths, sd_samplers
-from modules.hypernetworks import hypernetwork
-from modules.processing import process_images, Processed, StableDiffusionProcessingTxt2Img
-from modules.shared import opts, cmd_opts, state
-import modules.shared as shared
-import modules.sd_samplers
-import modules.sd_models
-import modules.sd_vae
-import glob
-import os
-import re
-
-
-def apply_field(field):
- def fun(p, x, xs):
- setattr(p, field, x)
-
- return fun
-
-
-def apply_prompt(p, x, xs):
- if xs[0] not in p.prompt and xs[0] not in p.negative_prompt:
- raise RuntimeError(f"Prompt S/R did not find {xs[0]} in prompt or negative prompt.")
-
- p.prompt = p.prompt.replace(xs[0], x)
- p.negative_prompt = p.negative_prompt.replace(xs[0], x)
-
-
-def apply_order(p, x, xs):
- token_order = []
-
- # Initally grab the tokens from the prompt, so they can be replaced in order of earliest seen
- for token in x:
- token_order.append((p.prompt.find(token), token))
-
- token_order.sort(key=lambda t: t[0])
-
- prompt_parts = []
-
- # Split the prompt up, taking out the tokens
- for _, token in token_order:
- n = p.prompt.find(token)
- prompt_parts.append(p.prompt[0:n])
- p.prompt = p.prompt[n + len(token):]
-
- # Rebuild the prompt with the tokens in the order we want
- prompt_tmp = ""
- for idx, part in enumerate(prompt_parts):
- prompt_tmp += part
- prompt_tmp += x[idx]
- p.prompt = prompt_tmp + p.prompt
-
-
-def apply_sampler(p, x, xs):
- sampler_name = sd_samplers.samplers_map.get(x.lower(), None)
- if sampler_name is None:
- raise RuntimeError(f"Unknown sampler: {x}")
-
- p.sampler_name = sampler_name
-
-
-def confirm_samplers(p, xs):
- for x in xs:
- if x.lower() not in sd_samplers.samplers_map:
- raise RuntimeError(f"Unknown sampler: {x}")
-
-
-def apply_checkpoint(p, x, xs):
- info = modules.sd_models.get_closet_checkpoint_match(x)
- if info is None:
- raise RuntimeError(f"Unknown checkpoint: {x}")
- modules.sd_models.reload_model_weights(shared.sd_model, info)
- p.sd_model = shared.sd_model
-
-
-def confirm_checkpoints(p, xs):
- for x in xs:
- if modules.sd_models.get_closet_checkpoint_match(x) is None:
- raise RuntimeError(f"Unknown checkpoint: {x}")
-
-
-def apply_hypernetwork(p, x, xs):
- if x.lower() in ["", "none"]:
- name = None
- else:
- name = hypernetwork.find_closest_hypernetwork_name(x)
- if not name:
- raise RuntimeError(f"Unknown hypernetwork: {x}")
- hypernetwork.load_hypernetwork(name)
-
-
-def apply_hypernetwork_strength(p, x, xs):
- hypernetwork.apply_strength(x)
-
-
-def confirm_hypernetworks(p, xs):
- for x in xs:
- if x.lower() in ["", "none"]:
- continue
- if not hypernetwork.find_closest_hypernetwork_name(x):
- raise RuntimeError(f"Unknown hypernetwork: {x}")
-
-
-def apply_clip_skip(p, x, xs):
- opts.data["CLIP_stop_at_last_layers"] = x
-
-
-def apply_upscale_latent_space(p, x, xs):
- if x.lower().strip() != '0':
- opts.data["use_scale_latent_for_hires_fix"] = True
- else:
- opts.data["use_scale_latent_for_hires_fix"] = False
-
-
-def find_vae(name: str):
- if name.lower() in ['auto', 'none']:
- return name
- else:
- vae_path = os.path.abspath(os.path.join(paths.models_path, 'VAE'))
- found = glob.glob(os.path.join(vae_path, f'**/{name}.*pt'), recursive=True)
- if found:
- return found[0]
- else:
- return 'auto'
-
-
-def apply_vae(p, x, xs):
- if x.lower().strip() == 'none':
- modules.sd_vae.reload_vae_weights(shared.sd_model, vae_file='None')
- else:
- found = find_vae(x)
- if found:
- v = modules.sd_vae.reload_vae_weights(shared.sd_model, vae_file=found)
-
-
-def apply_styles(p: StableDiffusionProcessingTxt2Img, x: str, _):
- p.styles = x.split(',')
-
-
-def format_value_add_label(p, opt, x):
- if type(x) == float:
- x = round(x, 8)
-
- return f"{opt.label}: {x}"
-
-
-def format_value(p, opt, x):
- if type(x) == float:
- x = round(x, 8)
- return x
-
-
-def format_value_join_list(p, opt, x):
- return ", ".join(x)
-
-
-def do_nothing(p, x, xs):
- pass
-
-
-def format_nothing(p, opt, x):
- return ""
-
-
-def str_permutations(x):
- """dummy function for specifying it in AxisOption's type when you want to get a list of permutations"""
- return x
-
-AxisOption = namedtuple("AxisOption", ["label", "type", "apply", "format_value", "confirm"])
-AxisOptionImg2Img = namedtuple("AxisOptionImg2Img", ["label", "type", "apply", "format_value", "confirm"])
-
-
-axis_options = [
- AxisOption("Nothing", str, do_nothing, format_nothing, None),
- AxisOption("Seed", int, apply_field("seed"), format_value_add_label, None),
- AxisOption("Var. seed", int, apply_field("subseed"), format_value_add_label, None),
- AxisOption("Var. strength", float, apply_field("subseed_strength"), format_value_add_label, None),
- AxisOption("Steps", int, apply_field("steps"), format_value_add_label, None),
- AxisOption("CFG Scale", float, apply_field("cfg_scale"), format_value_add_label, None),
- AxisOption("Prompt S/R", str, apply_prompt, format_value, None),
- AxisOption("Prompt order", str_permutations, apply_order, format_value_join_list, None),
- AxisOption("Sampler", str, apply_sampler, format_value, confirm_samplers),
- AxisOption("Checkpoint name", str, apply_checkpoint, format_value, confirm_checkpoints),
- AxisOption("Hypernetwork", str, apply_hypernetwork, format_value, confirm_hypernetworks),
- AxisOption("Hypernet str.", float, apply_hypernetwork_strength, format_value_add_label, None),
- AxisOption("Sigma Churn", float, apply_field("s_churn"), format_value_add_label, None),
- AxisOption("Sigma min", float, apply_field("s_tmin"), format_value_add_label, None),
- AxisOption("Sigma max", float, apply_field("s_tmax"), format_value_add_label, None),
- AxisOption("Sigma noise", float, apply_field("s_noise"), format_value_add_label, None),
- AxisOption("Eta", float, apply_field("eta"), format_value_add_label, None),
- AxisOption("Clip skip", int, apply_clip_skip, format_value_add_label, None),
- AxisOption("Denoising", float, apply_field("denoising_strength"), format_value_add_label, None),
- AxisOption("Upscale latent space for hires.", str, apply_upscale_latent_space, format_value_add_label, None),
- AxisOption("Cond. Image Mask Weight", float, apply_field("inpainting_mask_weight"), format_value_add_label, None),
- AxisOption("VAE", str, apply_vae, format_value_add_label, None),
- AxisOption("Styles", str, apply_styles, format_value_add_label, None),
-]
-
-
-def draw_xy_grid(p, xs, ys, x_labels, y_labels, cell, draw_legend, include_lone_images):
- ver_texts = [[images.GridAnnotation(y)] for y in y_labels]
- hor_texts = [[images.GridAnnotation(x)] for x in x_labels]
-
- # Temporary list of all the images that are generated to be populated into the grid.
- # Will be filled with empty images for any individual step that fails to process properly
- image_cache = []
-
- processed_result = None
- cell_mode = "P"
- cell_size = (1,1)
-
- state.job_count = len(xs) * len(ys) * p.n_iter
-
- for iy, y in enumerate(ys):
- for ix, x in enumerate(xs):
- state.job = f"{ix + iy * len(xs) + 1} out of {len(xs) * len(ys)}"
-
- processed:Processed = cell(x, y)
- try:
- # this dereference will throw an exception if the image was not processed
- # (this happens in cases such as if the user stops the process from the UI)
- processed_image = processed.images[0]
-
- if processed_result is None:
- # Use our first valid processed result as a template container to hold our full results
- processed_result = copy(processed)
- cell_mode = processed_image.mode
- cell_size = processed_image.size
- processed_result.images = [Image.new(cell_mode, cell_size)]
-
- image_cache.append(processed_image)
- if include_lone_images:
- processed_result.images.append(processed_image)
- processed_result.all_prompts.append(processed.prompt)
- processed_result.all_seeds.append(processed.seed)
- processed_result.infotexts.append(processed.infotexts[0])
- except:
- image_cache.append(Image.new(cell_mode, cell_size))
-
- if not processed_result:
- print("Unexpected error: draw_xy_grid failed to return even a single processed image")
- return Processed()
-
- grid = images.image_grid(image_cache, rows=len(ys))
- if draw_legend:
- grid = images.draw_grid_annotations(grid, cell_size[0], cell_size[1], hor_texts, ver_texts)
-
- processed_result.images[0] = grid
-
- return processed_result
-
-
-class SharedSettingsStackHelper(object):
- def __enter__(self):
- self.CLIP_stop_at_last_layers = opts.CLIP_stop_at_last_layers
- self.hypernetwork = opts.sd_hypernetwork
- self.model = shared.sd_model
- self.use_scale_latent_for_hires_fix = opts.use_scale_latent_for_hires_fix
- self.vae = opts.sd_vae
-
- def __exit__(self, exc_type, exc_value, tb):
- modules.sd_models.reload_model_weights(self.model)
- modules.sd_vae.reload_vae_weights(self.model, vae_file=find_vae(self.vae))
-
- hypernetwork.load_hypernetwork(self.hypernetwork)
- hypernetwork.apply_strength()
-
- opts.data["CLIP_stop_at_last_layers"] = self.CLIP_stop_at_last_layers
- opts.data["use_scale_latent_for_hires_fix"] = self.use_scale_latent_for_hires_fix
-
-
-re_range = re.compile(r"\s*([+-]?\s*\d+)\s*-\s*([+-]?\s*\d+)(?:\s*\(([+-]\d+)\s*\))?\s*")
-re_range_float = re.compile(r"\s*([+-]?\s*\d+(?:.\d*)?)\s*-\s*([+-]?\s*\d+(?:.\d*)?)(?:\s*\(([+-]\d+(?:.\d*)?)\s*\))?\s*")
-
-re_range_count = re.compile(r"\s*([+-]?\s*\d+)\s*-\s*([+-]?\s*\d+)(?:\s*\[(\d+)\s*\])?\s*")
-re_range_count_float = re.compile(r"\s*([+-]?\s*\d+(?:.\d*)?)\s*-\s*([+-]?\s*\d+(?:.\d*)?)(?:\s*\[(\d+(?:.\d*)?)\s*\])?\s*")
-
-class Script(scripts.Script):
- def title(self):
- return "X/Y plot"
-
- def ui(self, is_img2img):
- current_axis_options = [x for x in axis_options if type(x) == AxisOption or type(x) == AxisOptionImg2Img and is_img2img]
-
- with gr.Row():
- x_type = gr.Dropdown(label="X type", choices=[x.label for x in current_axis_options], value=current_axis_options[1].label, type="index", elem_id="x_type")
- x_values = gr.Textbox(label="X values", lines=1)
-
- with gr.Row():
- y_type = gr.Dropdown(label="Y type", choices=[x.label for x in current_axis_options], value=current_axis_options[0].label, type="index", elem_id="y_type")
- y_values = gr.Textbox(label="Y values", lines=1)
-
- draw_legend = gr.Checkbox(label='Draw legend', value=True)
- include_lone_images = gr.Checkbox(label='Include Separate Images', value=False)
- no_fixed_seeds = gr.Checkbox(label='Keep -1 for seeds', value=False)
-
- return [x_type, x_values, y_type, y_values, draw_legend, include_lone_images, no_fixed_seeds]
-
- def run(self, p, x_type, x_values, y_type, y_values, draw_legend, include_lone_images, no_fixed_seeds):
- if not no_fixed_seeds:
- modules.processing.fix_seed(p)
-
- if not opts.return_grid:
- p.batch_size = 1
-
- def process_axis(opt, vals):
- if opt.label == 'Nothing':
- return [0]
-
- valslist = [x.strip() for x in chain.from_iterable(csv.reader(StringIO(vals)))]
-
- if opt.type == int:
- valslist_ext = []
-
- for val in valslist:
- m = re_range.fullmatch(val)
- mc = re_range_count.fullmatch(val)
- if m is not None:
- start = int(m.group(1))
- end = int(m.group(2))+1
- step = int(m.group(3)) if m.group(3) is not None else 1
-
- valslist_ext += list(range(start, end, step))
- elif mc is not None:
- start = int(mc.group(1))
- end = int(mc.group(2))
- num = int(mc.group(3)) if mc.group(3) is not None else 1
-
- valslist_ext += [int(x) for x in np.linspace(start=start, stop=end, num=num).tolist()]
- else:
- valslist_ext.append(val)
-
- valslist = valslist_ext
- elif opt.type == float:
- valslist_ext = []
-
- for val in valslist:
- m = re_range_float.fullmatch(val)
- mc = re_range_count_float.fullmatch(val)
- if m is not None:
- start = float(m.group(1))
- end = float(m.group(2))
- step = float(m.group(3)) if m.group(3) is not None else 1
-
- valslist_ext += np.arange(start, end + step, step).tolist()
- elif mc is not None:
- start = float(mc.group(1))
- end = float(mc.group(2))
- num = int(mc.group(3)) if mc.group(3) is not None else 1
-
- valslist_ext += np.linspace(start=start, stop=end, num=num).tolist()
- else:
- valslist_ext.append(val)
-
- valslist = valslist_ext
- elif opt.type == str_permutations:
- valslist = list(permutations(valslist))
-
- valslist = [opt.type(x) for x in valslist]
-
- # Confirm options are valid before starting
- if opt.confirm:
- opt.confirm(p, valslist)
-
- return valslist
-
- x_opt = axis_options[x_type]
- xs = process_axis(x_opt, x_values)
-
- y_opt = axis_options[y_type]
- ys = process_axis(y_opt, y_values)
-
- def fix_axis_seeds(axis_opt, axis_list):
- if axis_opt.label in ['Seed','Var. seed']:
- return [int(random.randrange(4294967294)) if val is None or val == '' or val == -1 else val for val in axis_list]
- else:
- return axis_list
-
- if not no_fixed_seeds:
- xs = fix_axis_seeds(x_opt, xs)
- ys = fix_axis_seeds(y_opt, ys)
-
- if x_opt.label == 'Steps':
- total_steps = sum(xs) * len(ys)
- elif y_opt.label == 'Steps':
- total_steps = sum(ys) * len(xs)
- else:
- total_steps = p.steps * len(xs) * len(ys)
-
- if isinstance(p, StableDiffusionProcessingTxt2Img) and p.enable_hr:
- total_steps *= 2
-
- print(f"X/Y plot will create {len(xs) * len(ys) * p.n_iter} images on a {len(xs)}x{len(ys)} grid. (Total steps to process: {total_steps * p.n_iter})")
- shared.total_tqdm.updateTotal(total_steps * p.n_iter)
-
- def cell(x, y):
- pc = copy(p)
- x_opt.apply(pc, x, xs)
- y_opt.apply(pc, y, ys)
-
- return process_images(pc)
-
- with SharedSettingsStackHelper():
- processed = draw_xy_grid(
- p,
- xs=xs,
- ys=ys,
- x_labels=[x_opt.format_value(p, x_opt, x) for x in xs],
- y_labels=[y_opt.format_value(p, y_opt, y) for y in ys],
- cell=cell,
- draw_legend=draw_legend,
- include_lone_images=include_lone_images
- )
-
- if opts.grid_save:
- images.save_image(processed.images[0], p.outpath_grids, "xy_grid", extension=opts.grid_format, prompt=p.prompt, seed=processed.seed, grid=True, p=p)
-
- return processed
diff --git a/scripts/xyz_grid.py b/scripts/xyz_grid.py
new file mode 100644
index 00000000..f0116055
--- /dev/null
+++ b/scripts/xyz_grid.py
@@ -0,0 +1,588 @@
+from collections import namedtuple
+from copy import copy
+from itertools import permutations, chain
+import random
+import csv
+from io import StringIO
+from PIL import Image
+import numpy as np
+
+import modules.scripts as scripts
+import gradio as gr
+
+from modules import images, paths, sd_samplers, processing, sd_models, sd_vae
+from modules.processing import process_images, Processed, StableDiffusionProcessingTxt2Img
+from modules.shared import opts, cmd_opts, state
+import modules.shared as shared
+import modules.sd_samplers
+import modules.sd_models
+import modules.sd_vae
+import glob
+import os
+import re
+
+from modules.ui_components import ToolButton
+
+fill_values_symbol = "\U0001f4d2" # 📒
+
+
+def apply_field(field):
+ def fun(p, x, xs):
+ setattr(p, field, x)
+
+ return fun
+
+
+def apply_prompt(p, x, xs):
+ if xs[0] not in p.prompt and xs[0] not in p.negative_prompt:
+ raise RuntimeError(f"Prompt S/R did not find {xs[0]} in prompt or negative prompt.")
+
+ p.prompt = p.prompt.replace(xs[0], x)
+ p.negative_prompt = p.negative_prompt.replace(xs[0], x)
+
+
+def apply_order(p, x, xs):
+ token_order = []
+
+ # Initally grab the tokens from the prompt, so they can be replaced in order of earliest seen
+ for token in x:
+ token_order.append((p.prompt.find(token), token))
+
+ token_order.sort(key=lambda t: t[0])
+
+ prompt_parts = []
+
+ # Split the prompt up, taking out the tokens
+ for _, token in token_order:
+ n = p.prompt.find(token)
+ prompt_parts.append(p.prompt[0:n])
+ p.prompt = p.prompt[n + len(token):]
+
+ # Rebuild the prompt with the tokens in the order we want
+ prompt_tmp = ""
+ for idx, part in enumerate(prompt_parts):
+ prompt_tmp += part
+ prompt_tmp += x[idx]
+ p.prompt = prompt_tmp + p.prompt
+
+
+def apply_sampler(p, x, xs):
+ sampler_name = sd_samplers.samplers_map.get(x.lower(), None)
+ if sampler_name is None:
+ raise RuntimeError(f"Unknown sampler: {x}")
+
+ p.sampler_name = sampler_name
+
+
+def confirm_samplers(p, xs):
+ for x in xs:
+ if x.lower() not in sd_samplers.samplers_map:
+ raise RuntimeError(f"Unknown sampler: {x}")
+
+
+def apply_checkpoint(p, x, xs):
+ info = modules.sd_models.get_closet_checkpoint_match(x)
+ if info is None:
+ raise RuntimeError(f"Unknown checkpoint: {x}")
+ modules.sd_models.reload_model_weights(shared.sd_model, info)
+
+
+def confirm_checkpoints(p, xs):
+ for x in xs:
+ if modules.sd_models.get_closet_checkpoint_match(x) is None:
+ raise RuntimeError(f"Unknown checkpoint: {x}")
+
+
+def apply_clip_skip(p, x, xs):
+ opts.data["CLIP_stop_at_last_layers"] = x
+
+
+def apply_upscale_latent_space(p, x, xs):
+ if x.lower().strip() != '0':
+ opts.data["use_scale_latent_for_hires_fix"] = True
+ else:
+ opts.data["use_scale_latent_for_hires_fix"] = False
+
+
+def find_vae(name: str):
+ if name.lower() in ['auto', 'automatic']:
+ return modules.sd_vae.unspecified
+ if name.lower() == 'none':
+ return None
+ else:
+ choices = [x for x in sorted(modules.sd_vae.vae_dict, key=lambda x: len(x)) if name.lower().strip() in x.lower()]
+ if len(choices) == 0:
+ print(f"No VAE found for {name}; using automatic")
+ return modules.sd_vae.unspecified
+ else:
+ return modules.sd_vae.vae_dict[choices[0]]
+
+
+def apply_vae(p, x, xs):
+ modules.sd_vae.reload_vae_weights(shared.sd_model, vae_file=find_vae(x))
+
+
+def apply_styles(p: StableDiffusionProcessingTxt2Img, x: str, _):
+ p.styles.extend(x.split(','))
+
+
+def format_value_add_label(p, opt, x):
+ if type(x) == float:
+ x = round(x, 8)
+
+ return f"{opt.label}: {x}"
+
+
+def format_value(p, opt, x):
+ if type(x) == float:
+ x = round(x, 8)
+ return x
+
+
+def format_value_join_list(p, opt, x):
+ return ", ".join(x)
+
+
+def do_nothing(p, x, xs):
+ pass
+
+
+def format_nothing(p, opt, x):
+ return ""
+
+
+def str_permutations(x):
+ """dummy function for specifying it in AxisOption's type when you want to get a list of permutations"""
+ return x
+
+
+class AxisOption:
+ def __init__(self, label, type, apply, format_value=format_value_add_label, confirm=None, cost=0.0, choices=None):
+ self.label = label
+ self.type = type
+ self.apply = apply
+ self.format_value = format_value
+ self.confirm = confirm
+ self.cost = cost
+ self.choices = choices
+
+
+class AxisOptionImg2Img(AxisOption):
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ self.is_img2img = True
+
+class AxisOptionTxt2Img(AxisOption):
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ self.is_img2img = False
+
+
+axis_options = [
+ AxisOption("Nothing", str, do_nothing, format_value=format_nothing),
+ AxisOption("Seed", int, apply_field("seed")),
+ AxisOption("Var. seed", int, apply_field("subseed")),
+ AxisOption("Var. strength", float, apply_field("subseed_strength")),
+ AxisOption("Steps", int, apply_field("steps")),
+ AxisOptionTxt2Img("Hires steps", int, apply_field("hr_second_pass_steps")),
+ AxisOption("CFG Scale", float, apply_field("cfg_scale")),
+ AxisOption("Prompt S/R", str, apply_prompt, format_value=format_value),
+ AxisOption("Prompt order", str_permutations, apply_order, format_value=format_value_join_list),
+ AxisOptionTxt2Img("Sampler", str, apply_sampler, format_value=format_value, confirm=confirm_samplers, choices=lambda: [x.name for x in sd_samplers.samplers]),
+ AxisOptionImg2Img("Sampler", str, apply_sampler, format_value=format_value, confirm=confirm_samplers, choices=lambda: [x.name for x in sd_samplers.samplers_for_img2img]),
+ AxisOption("Checkpoint name", str, apply_checkpoint, format_value=format_value, confirm=confirm_checkpoints, cost=1.0, choices=lambda: list(sd_models.checkpoints_list)),
+ AxisOption("Sigma Churn", float, apply_field("s_churn")),
+ AxisOption("Sigma min", float, apply_field("s_tmin")),
+ AxisOption("Sigma max", float, apply_field("s_tmax")),
+ AxisOption("Sigma noise", float, apply_field("s_noise")),
+ AxisOption("Eta", float, apply_field("eta")),
+ AxisOption("Clip skip", int, apply_clip_skip),
+ AxisOption("Denoising", float, apply_field("denoising_strength")),
+ AxisOptionTxt2Img("Hires upscaler", str, apply_field("hr_upscaler"), choices=lambda: [*shared.latent_upscale_modes, *[x.name for x in shared.sd_upscalers]]),
+ AxisOptionImg2Img("Cond. Image Mask Weight", float, apply_field("inpainting_mask_weight")),
+ AxisOption("VAE", str, apply_vae, cost=0.7, choices=lambda: list(sd_vae.vae_dict)),
+ AxisOption("Styles", str, apply_styles, choices=lambda: list(shared.prompt_styles.styles)),
+]
+
+
+def draw_xyz_grid(p, xs, ys, zs, x_labels, y_labels, z_labels, cell, draw_legend, include_lone_images, include_sub_grids, first_axes_processed, second_axes_processed):
+ hor_texts = [[images.GridAnnotation(x)] for x in x_labels]
+ ver_texts = [[images.GridAnnotation(y)] for y in y_labels]
+ title_texts = [[images.GridAnnotation(z)] for z in z_labels]
+
+ # Temporary list of all the images that are generated to be populated into the grid.
+ # Will be filled with empty images for any individual step that fails to process properly
+ image_cache = [None] * (len(xs) * len(ys) * len(zs))
+
+ processed_result = None
+ cell_mode = "P"
+ cell_size = (1, 1)
+
+ state.job_count = len(xs) * len(ys) * len(zs) * p.n_iter
+
+ def process_cell(x, y, z, ix, iy, iz):
+ nonlocal image_cache, processed_result, cell_mode, cell_size
+
+ def index(ix, iy, iz):
+ return ix + iy * len(xs) + iz * len(xs) * len(ys)
+
+ state.job = f"{index(ix, iy, iz) + 1} out of {len(xs) * len(ys) * len(zs)}"
+
+ processed: Processed = cell(x, y, z)
+
+ try:
+ # this dereference will throw an exception if the image was not processed
+ # (this happens in cases such as if the user stops the process from the UI)
+ processed_image = processed.images[0]
+
+ if processed_result is None:
+ # Use our first valid processed result as a template container to hold our full results
+ processed_result = copy(processed)
+ cell_mode = processed_image.mode
+ cell_size = processed_image.size
+ processed_result.images = [Image.new(cell_mode, cell_size)]
+
+ image_cache[index(ix, iy, iz)] = processed_image
+ if include_lone_images:
+ processed_result.images.append(processed_image)
+ processed_result.all_prompts.append(processed.prompt)
+ processed_result.all_seeds.append(processed.seed)
+ processed_result.infotexts.append(processed.infotexts[0])
+ except:
+ image_cache[index(ix, iy, iz)] = Image.new(cell_mode, cell_size)
+
+ if first_axes_processed == 'x':
+ for ix, x in enumerate(xs):
+ if second_axes_processed == 'y':
+ for iy, y in enumerate(ys):
+ for iz, z in enumerate(zs):
+ process_cell(x, y, z, ix, iy, iz)
+ else:
+ for iz, z in enumerate(zs):
+ for iy, y in enumerate(ys):
+ process_cell(x, y, z, ix, iy, iz)
+ elif first_axes_processed == 'y':
+ for iy, y in enumerate(ys):
+ if second_axes_processed == 'x':
+ for ix, x in enumerate(xs):
+ for iz, z in enumerate(zs):
+ process_cell(x, y, z, ix, iy, iz)
+ else:
+ for iz, z in enumerate(zs):
+ for ix, x in enumerate(xs):
+ process_cell(x, y, z, ix, iy, iz)
+ elif first_axes_processed == 'z':
+ for iz, z in enumerate(zs):
+ if second_axes_processed == 'x':
+ for ix, x in enumerate(xs):
+ for iy, y in enumerate(ys):
+ process_cell(x, y, z, ix, iy, iz)
+ else:
+ for iy, y in enumerate(ys):
+ for ix, x in enumerate(xs):
+ process_cell(x, y, z, ix, iy, iz)
+
+ if not processed_result:
+ print("Unexpected error: draw_xyz_grid failed to return even a single processed image")
+ return Processed(p, [])
+
+ grids = [None] * len(zs)
+ for i in range(len(zs)):
+ start_index = i * len(xs) * len(ys)
+ end_index = start_index + len(xs) * len(ys)
+ grid = images.image_grid(image_cache[start_index:end_index], rows=len(ys))
+ if draw_legend:
+ grid = images.draw_grid_annotations(grid, cell_size[0], cell_size[1], hor_texts, ver_texts)
+
+ grids[i] = grid
+ if include_sub_grids and len(zs) > 1:
+ processed_result.images.insert(i+1, grid)
+
+ original_grid_size = grids[0].size
+ grids = images.image_grid(grids, rows=1)
+ processed_result.images[0] = images.draw_grid_annotations(grids, original_grid_size[0], original_grid_size[1], title_texts, [[images.GridAnnotation()]])
+
+ return processed_result
+
+
+class SharedSettingsStackHelper(object):
+ def __enter__(self):
+ self.CLIP_stop_at_last_layers = opts.CLIP_stop_at_last_layers
+ self.vae = opts.sd_vae
+
+ def __exit__(self, exc_type, exc_value, tb):
+ opts.data["sd_vae"] = self.vae
+ modules.sd_models.reload_model_weights()
+ modules.sd_vae.reload_vae_weights()
+
+ opts.data["CLIP_stop_at_last_layers"] = self.CLIP_stop_at_last_layers
+
+
+re_range = re.compile(r"\s*([+-]?\s*\d+)\s*-\s*([+-]?\s*\d+)(?:\s*\(([+-]\d+)\s*\))?\s*")
+re_range_float = re.compile(r"\s*([+-]?\s*\d+(?:.\d*)?)\s*-\s*([+-]?\s*\d+(?:.\d*)?)(?:\s*\(([+-]\d+(?:.\d*)?)\s*\))?\s*")
+
+re_range_count = re.compile(r"\s*([+-]?\s*\d+)\s*-\s*([+-]?\s*\d+)(?:\s*\[(\d+)\s*\])?\s*")
+re_range_count_float = re.compile(r"\s*([+-]?\s*\d+(?:.\d*)?)\s*-\s*([+-]?\s*\d+(?:.\d*)?)(?:\s*\[(\d+(?:.\d*)?)\s*\])?\s*")
+
+
+class Script(scripts.Script):
+ def title(self):
+ return "X/Y/Z plot"
+
+ def ui(self, is_img2img):
+ self.current_axis_options = [x for x in axis_options if type(x) == AxisOption or x.is_img2img == is_img2img]
+
+ with gr.Row():
+ with gr.Column(scale=19):
+ with gr.Row():
+ x_type = gr.Dropdown(label="X type", choices=[x.label for x in self.current_axis_options], value=self.current_axis_options[1].label, type="index", elem_id=self.elem_id("x_type"))
+ x_values = gr.Textbox(label="X values", lines=1, elem_id=self.elem_id("x_values"))
+ fill_x_button = ToolButton(value=fill_values_symbol, elem_id="xyz_grid_fill_x_tool_button", visible=False)
+
+ with gr.Row():
+ y_type = gr.Dropdown(label="Y type", choices=[x.label for x in self.current_axis_options], value=self.current_axis_options[0].label, type="index", elem_id=self.elem_id("y_type"))
+ y_values = gr.Textbox(label="Y values", lines=1, elem_id=self.elem_id("y_values"))
+ fill_y_button = ToolButton(value=fill_values_symbol, elem_id="xyz_grid_fill_y_tool_button", visible=False)
+
+ with gr.Row():
+ z_type = gr.Dropdown(label="Z type", choices=[x.label for x in self.current_axis_options], value=self.current_axis_options[0].label, type="index", elem_id=self.elem_id("z_type"))
+ z_values = gr.Textbox(label="Z values", lines=1, elem_id=self.elem_id("z_values"))
+ fill_z_button = ToolButton(value=fill_values_symbol, elem_id="xyz_grid_fill_z_tool_button", visible=False)
+
+ with gr.Row(variant="compact", elem_id="axis_options"):
+ draw_legend = gr.Checkbox(label='Draw legend', value=True, elem_id=self.elem_id("draw_legend"))
+ include_lone_images = gr.Checkbox(label='Include Sub Images', value=False, elem_id=self.elem_id("include_lone_images"))
+ include_sub_grids = gr.Checkbox(label='Include Sub Grids', value=False, elem_id=self.elem_id("include_sub_grids"))
+ no_fixed_seeds = gr.Checkbox(label='Keep -1 for seeds', value=False, elem_id=self.elem_id("no_fixed_seeds"))
+ swap_xy_axes_button = gr.Button(value="Swap X/Y axes", elem_id="xy_grid_swap_axes_button")
+ swap_yz_axes_button = gr.Button(value="Swap Y/Z axes", elem_id="yz_grid_swap_axes_button")
+ swap_xz_axes_button = gr.Button(value="Swap X/Z axes", elem_id="xz_grid_swap_axes_button")
+
+ def swap_axes(axis1_type, axis1_values, axis2_type, axis2_values):
+ return self.current_axis_options[axis2_type].label, axis2_values, self.current_axis_options[axis1_type].label, axis1_values
+
+ xy_swap_args = [x_type, x_values, y_type, y_values]
+ swap_xy_axes_button.click(swap_axes, inputs=xy_swap_args, outputs=xy_swap_args)
+ yz_swap_args = [y_type, y_values, z_type, z_values]
+ swap_yz_axes_button.click(swap_axes, inputs=yz_swap_args, outputs=yz_swap_args)
+ xz_swap_args = [x_type, x_values, z_type, z_values]
+ swap_xz_axes_button.click(swap_axes, inputs=xz_swap_args, outputs=xz_swap_args)
+
+ def fill(x_type):
+ axis = self.current_axis_options[x_type]
+ return ", ".join(axis.choices()) if axis.choices else gr.update()
+
+ fill_x_button.click(fn=fill, inputs=[x_type], outputs=[x_values])
+ fill_y_button.click(fn=fill, inputs=[y_type], outputs=[y_values])
+ fill_z_button.click(fn=fill, inputs=[z_type], outputs=[z_values])
+
+ def select_axis(x_type):
+ return gr.Button.update(visible=self.current_axis_options[x_type].choices is not None)
+
+ x_type.change(fn=select_axis, inputs=[x_type], outputs=[fill_x_button])
+ y_type.change(fn=select_axis, inputs=[y_type], outputs=[fill_y_button])
+ z_type.change(fn=select_axis, inputs=[z_type], outputs=[fill_z_button])
+
+ return [x_type, x_values, y_type, y_values, z_type, z_values, draw_legend, include_lone_images, include_sub_grids, no_fixed_seeds]
+
+ def run(self, p, x_type, x_values, y_type, y_values, z_type, z_values, draw_legend, include_lone_images, include_sub_grids, no_fixed_seeds):
+ if not no_fixed_seeds:
+ modules.processing.fix_seed(p)
+
+ if not opts.return_grid:
+ p.batch_size = 1
+
+ def process_axis(opt, vals):
+ if opt.label == 'Nothing':
+ return [0]
+
+ valslist = [x.strip() for x in chain.from_iterable(csv.reader(StringIO(vals)))]
+
+ if opt.type == int:
+ valslist_ext = []
+
+ for val in valslist:
+ m = re_range.fullmatch(val)
+ mc = re_range_count.fullmatch(val)
+ if m is not None:
+ start = int(m.group(1))
+ end = int(m.group(2))+1
+ step = int(m.group(3)) if m.group(3) is not None else 1
+
+ valslist_ext += list(range(start, end, step))
+ elif mc is not None:
+ start = int(mc.group(1))
+ end = int(mc.group(2))
+ num = int(mc.group(3)) if mc.group(3) is not None else 1
+
+ valslist_ext += [int(x) for x in np.linspace(start=start, stop=end, num=num).tolist()]
+ else:
+ valslist_ext.append(val)
+
+ valslist = valslist_ext
+ elif opt.type == float:
+ valslist_ext = []
+
+ for val in valslist:
+ m = re_range_float.fullmatch(val)
+ mc = re_range_count_float.fullmatch(val)
+ if m is not None:
+ start = float(m.group(1))
+ end = float(m.group(2))
+ step = float(m.group(3)) if m.group(3) is not None else 1
+
+ valslist_ext += np.arange(start, end + step, step).tolist()
+ elif mc is not None:
+ start = float(mc.group(1))
+ end = float(mc.group(2))
+ num = int(mc.group(3)) if mc.group(3) is not None else 1
+
+ valslist_ext += np.linspace(start=start, stop=end, num=num).tolist()
+ else:
+ valslist_ext.append(val)
+
+ valslist = valslist_ext
+ elif opt.type == str_permutations:
+ valslist = list(permutations(valslist))
+
+ valslist = [opt.type(x) for x in valslist]
+
+ # Confirm options are valid before starting
+ if opt.confirm:
+ opt.confirm(p, valslist)
+
+ return valslist
+
+ x_opt = self.current_axis_options[x_type]
+ xs = process_axis(x_opt, x_values)
+
+ y_opt = self.current_axis_options[y_type]
+ ys = process_axis(y_opt, y_values)
+
+ z_opt = self.current_axis_options[z_type]
+ zs = process_axis(z_opt, z_values)
+
+ def fix_axis_seeds(axis_opt, axis_list):
+ if axis_opt.label in ['Seed', 'Var. seed']:
+ return [int(random.randrange(4294967294)) if val is None or val == '' or val == -1 else val for val in axis_list]
+ else:
+ return axis_list
+
+ if not no_fixed_seeds:
+ xs = fix_axis_seeds(x_opt, xs)
+ ys = fix_axis_seeds(y_opt, ys)
+ zs = fix_axis_seeds(z_opt, zs)
+
+ if x_opt.label == 'Steps':
+ total_steps = sum(xs) * len(ys) * len(zs)
+ elif y_opt.label == 'Steps':
+ total_steps = sum(ys) * len(xs) * len(zs)
+ elif z_opt.label == 'Steps':
+ total_steps = sum(zs) * len(xs) * len(ys)
+ else:
+ total_steps = p.steps * len(xs) * len(ys) * len(zs)
+
+ if isinstance(p, StableDiffusionProcessingTxt2Img) and p.enable_hr:
+ if x_opt.label == "Hires steps":
+ total_steps += sum(xs) * len(ys) * len(zs)
+ elif y_opt.label == "Hires steps":
+ total_steps += sum(ys) * len(xs) * len(zs)
+ elif z_opt.label == "Hires steps":
+ total_steps += sum(zs) * len(xs) * len(ys)
+ elif p.hr_second_pass_steps:
+ total_steps += p.hr_second_pass_steps * len(xs) * len(ys) * len(zs)
+ else:
+ total_steps *= 2
+
+ total_steps *= p.n_iter
+
+ image_cell_count = p.n_iter * p.batch_size
+ cell_console_text = f"; {image_cell_count} images per cell" if image_cell_count > 1 else ""
+ plural_s = 's' if len(zs) > 1 else ''
+ print(f"X/Y/Z plot will create {len(xs) * len(ys) * len(zs) * image_cell_count} images on {len(zs)} {len(xs)}x{len(ys)} grid{plural_s}{cell_console_text}. (Total steps to process: {total_steps})")
+ shared.total_tqdm.updateTotal(total_steps)
+
+ grid_infotext = [None]
+
+ # If one of the axes is very slow to change between (like SD model
+ # checkpoint), then make sure it is in the outer iteration of the nested
+ # `for` loop.
+ first_axes_processed = 'x'
+ second_axes_processed = 'y'
+ if x_opt.cost > y_opt.cost and x_opt.cost > z_opt.cost:
+ first_axes_processed = 'x'
+ if y_opt.cost > z_opt.cost:
+ second_axes_processed = 'y'
+ else:
+ second_axes_processed = 'z'
+ elif y_opt.cost > x_opt.cost and y_opt.cost > z_opt.cost:
+ first_axes_processed = 'y'
+ if x_opt.cost > z_opt.cost:
+ second_axes_processed = 'x'
+ else:
+ second_axes_processed = 'z'
+ elif z_opt.cost > x_opt.cost and z_opt.cost > y_opt.cost:
+ first_axes_processed = 'z'
+ if x_opt.cost > y_opt.cost:
+ second_axes_processed = 'x'
+ else:
+ second_axes_processed = 'y'
+
+ def cell(x, y, z):
+ if shared.state.interrupted:
+ return Processed(p, [], p.seed, "")
+
+ pc = copy(p)
+ pc.styles = pc.styles[:]
+ x_opt.apply(pc, x, xs)
+ y_opt.apply(pc, y, ys)
+ z_opt.apply(pc, z, zs)
+
+ res = process_images(pc)
+
+ if grid_infotext[0] is None:
+ pc.extra_generation_params = copy(pc.extra_generation_params)
+
+ if x_opt.label != 'Nothing':
+ pc.extra_generation_params["X Type"] = x_opt.label
+ pc.extra_generation_params["X Values"] = x_values
+ if x_opt.label in ["Seed", "Var. seed"] and not no_fixed_seeds:
+ pc.extra_generation_params["Fixed X Values"] = ", ".join([str(x) for x in xs])
+
+ if y_opt.label != 'Nothing':
+ pc.extra_generation_params["Y Type"] = y_opt.label
+ pc.extra_generation_params["Y Values"] = y_values
+ if y_opt.label in ["Seed", "Var. seed"] and not no_fixed_seeds:
+ pc.extra_generation_params["Fixed Y Values"] = ", ".join([str(y) for y in ys])
+
+ if z_opt.label != 'Nothing':
+ pc.extra_generation_params["Z Type"] = z_opt.label
+ pc.extra_generation_params["Z Values"] = z_values
+ if z_opt.label in ["Seed", "Var. seed"] and not no_fixed_seeds:
+ pc.extra_generation_params["Fixed Z Values"] = ", ".join([str(z) for z in zs])
+
+ grid_infotext[0] = processing.create_infotext(pc, pc.all_prompts, pc.all_seeds, pc.all_subseeds)
+
+ return res
+
+ with SharedSettingsStackHelper():
+ processed = draw_xyz_grid(
+ p,
+ xs=xs,
+ ys=ys,
+ zs=zs,
+ x_labels=[x_opt.format_value(p, x_opt, x) for x in xs],
+ y_labels=[y_opt.format_value(p, y_opt, y) for y in ys],
+ z_labels=[z_opt.format_value(p, z_opt, z) for z in zs],
+ cell=cell,
+ draw_legend=draw_legend,
+ include_lone_images=include_lone_images,
+ include_sub_grids=include_sub_grids,
+ first_axes_processed=first_axes_processed,
+ second_axes_processed=second_axes_processed
+ )
+
+ if opts.grid_save:
+ images.save_image(processed.images[0], p.outpath_grids, "xyz_grid", info=grid_infotext[0], extension=opts.grid_format, prompt=p.prompt, seed=processed.seed, grid=True, p=p)
+
+ return processed
diff --git a/style.css b/style.css
index 3ad78006..dd914104 100644
--- a/style.css
+++ b/style.css
@@ -2,12 +2,26 @@
max-width: 100%;
}
-#txt2img_token_counter {
- height: 0px;
+.token-counter{
+ position: absolute;
+ display: inline-block;
+ right: 2em;
+ min-width: 0 !important;
+ width: auto;
+ z-index: 100;
}
-#img2img_token_counter {
- height: 0px;
+.token-counter.error span{
+ box-shadow: 0 0 0.0 0.3em rgba(255,0,0,0.15), inset 0 0 0.6em rgba(255,0,0,0.075);
+ border: 2px solid rgba(255,0,0,0.4) !important;
+}
+
+.token-counter div{
+ display: inline;
+}
+
+.token-counter span{
+ padding: 0.1em 0.75em;
}
#sh{
@@ -20,7 +34,7 @@
padding-right: 0.25em;
margin: 0.1em 0;
opacity: 0%;
- cursor: default;
+ cursor: default;
}
.output-html p {margin: 0 0.5em;}
@@ -73,8 +87,9 @@
margin-right: auto;
}
-#random_seed, #random_subseed, #reuse_seed, #reuse_subseed, #open_folder{
- min-width: auto;
+[id$=_random_seed], [id$=_random_subseed], [id$=_reuse_seed], [id$=_reuse_subseed], #open_folder{
+ min-width: 2.3em;
+ height: 2.5em;
flex-grow: 0;
padding-left: 0.25em;
padding-right: 0.25em;
@@ -84,53 +99,47 @@
display: none;
}
-#seed_row, #subseed_row{
+[id$=_seed_row], [id$=_subseed_row]{
gap: 0.5rem;
+ padding: 0.6em;
}
-#subseed_show_box{
+[id$=_subseed_show_box]{
min-width: auto;
flex-grow: 0;
}
-#subseed_show_box > div{
+[id$=_subseed_show_box] > div{
border: 0;
height: 100%;
}
-#subseed_show{
+[id$=_subseed_show]{
min-width: auto;
flex-grow: 0;
padding: 0;
}
-#subseed_show label{
+[id$=_subseed_show] label{
height: 100%;
}
-#roll_col{
- min-width: unset !important;
- flex-grow: 0 !important;
- padding: 0.4em 0;
+#txt2img_actions_column, #img2img_actions_column{
+ gap: 0;
}
-#roll_col > button {
- min-width: 2em;
- min-height: 2em;
- max-width: 2em;
- max-height: 2em;
- flex-grow: 0;
- padding-left: 0.25em;
- padding-right: 0.25em;
- margin: 0.1em 0;
+#txt2img_tools, #img2img_tools{
+ gap: 0.4em;
}
#interrogate_col{
min-width: 0 !important;
max-width: 8em !important;
+ margin-right: 1em;
+ gap: 0;
}
#interrogate, #deepbooru{
- margin: 0em 0.25em 0.9em 0.25em;
+ margin: 0em 0.25em 0.5em 0.25em;
min-width: 8em;
max-width: 8em;
}
@@ -139,8 +148,25 @@
min-width: 8em !important;
}
-#txt2img_style_index, #txt2img_style2_index, #img2img_style_index, #img2img_style2_index{
- margin-top: 1em;
+#txt2img_styles_row, #img2img_styles_row{
+ gap: 0.25em;
+}
+
+#txt2img_styles_row > button, #img2img_styles_row > button{
+ margin: 0;
+}
+
+#txt2img_styles, #img2img_styles{
+ padding: 0;
+}
+
+#txt2img_styles > label > div, #img2img_styles > label > div{
+ min-height: 3.2em;
+}
+
+ul.list-none{
+ max-height: 35em;
+ z-index: 2000;
}
.gr-form{
@@ -152,12 +178,6 @@
margin-bottom: 0;
}
-#toprow div{
- border: none;
- gap: 0;
- background: transparent;
-}
-
#resize_mode{
flex: 1.5;
}
@@ -206,24 +226,27 @@ button{
fieldset span.text-gray-500, .gr-block.gr-box span.text-gray-500, label.block span{
position: absolute;
- top: -0.6em;
+ top: -0.7em;
line-height: 1.2em;
- padding: 0 0.5em;
- margin: 0;
+ padding: 0;
+ margin: 0 0.5em;
background-color: white;
- border-top: 1px solid #eee;
- border-left: 1px solid #eee;
- border-right: 1px solid #eee;
+ box-shadow: 6px 0 6px 0px white, -6px 0 6px 0px white;
z-index: 300;
}
.dark fieldset span.text-gray-500, .dark .gr-block.gr-box span.text-gray-500, .dark label.block span{
background-color: rgb(31, 41, 55);
- border-top: 1px solid rgb(55 65 81);
- border-left: 1px solid rgb(55 65 81);
- border-right: 1px solid rgb(55 65 81);
+ box-shadow: none;
+ border: 1px solid rgba(128, 128, 128, 0.1);
+ border-radius: 6px;
+ padding: 0.1em 0.5em;
+}
+
+#txt2img_column_batch, #img2img_column_batch{
+ min-width: min(13.5em, 100%) !important;
}
#settings fieldset span.text-gray-500, #settings .gr-block.gr-box span.text-gray-500, #settings label.block span{
@@ -232,22 +255,40 @@ fieldset span.text-gray-500, .gr-block.gr-box span.text-gray-500, label.block s
margin-right: 8em;
}
-.gr-panel div.flex-col div.justify-between label span{
- margin: 0;
-}
-
#settings .gr-panel div.flex-col div.justify-between div{
position: relative;
z-index: 200;
}
-input[type="range"]{
- margin: 0.5em 0 -0.3em 0;
+#settings{
+ display: block;
}
-#txt2img_sampling label{
- padding-left: 0.6em;
- padding-right: 0.6em;
+#settings > div{
+ border: none;
+ margin-left: 10em;
+}
+
+#settings > div.flex-wrap{
+ float: left;
+ display: block;
+ margin-left: 0;
+ width: 10em;
+}
+
+#settings > div.flex-wrap button{
+ display: block;
+ border: none;
+ text-align: left;
+}
+
+#settings_result{
+ height: 1.4em;
+ margin: 0 1.2em;
+}
+
+input[type="range"]{
+ margin: 0.5em 0 -0.3em 0;
}
#mask_bug_info {
@@ -266,45 +307,52 @@ input[type="range"]{
}
/* more gradio's garbage cleanup */
-.min-h-\[4rem\] {
- min-height: unset !important;
+.min-h-\[4rem\] { min-height: unset !important; }
+.min-h-\[6rem\] { min-height: unset !important; }
+
+.progressDiv{
+ position: absolute;
+ height: 20px;
+ top: -20px;
+ background: #b4c0cc;
+ border-radius: 3px !important;
}
-#txt2img_progressbar, #img2img_progressbar, #ti_progressbar{
- position: absolute;
- z-index: 1000;
- right: 0;
- padding-left: 5px;
- padding-right: 5px;
- display: block;
+.dark .progressDiv{
+ background: #424c5b;
}
-#txt2img_progress_row, #img2img_progress_row{
- margin-bottom: 10px;
- margin-top: -18px;
+.progressDiv .progress{
+ width: 0%;
+ height: 20px;
+ background: #0060df;
+ color: white;
+ font-weight: bold;
+ line-height: 20px;
+ padding: 0 8px 0 0;
+ text-align: right;
+ border-radius: 3px;
+ overflow: visible;
+ white-space: nowrap;
+ padding: 0 0.5em;
}
-.progressDiv{
- width: 100%;
- height: 20px;
- background: #b4c0cc;
- border-radius: 8px;
+.livePreview{
+ position: absolute;
+ z-index: 300;
+ background-color: white;
+ margin: -4px;
}
-.dark .progressDiv{
- background: #424c5b;
+.dark .livePreview{
+ background-color: rgb(17 24 39 / var(--tw-bg-opacity));
}
-.progressDiv .progress{
- width: 0%;
- height: 20px;
- background: #0060df;
- color: white;
- font-weight: bold;
- line-height: 20px;
- padding: 0 8px 0 0;
- text-align: right;
- border-radius: 8px;
+.livePreview img{
+ position: absolute;
+ object-fit: contain;
+ width: 100%;
+ height: 100%;
}
#lightboxModal{
@@ -351,7 +399,7 @@ input[type="range"]{
grid-area: tile;
}
-.modalClose,
+.modalClose,
.modalZoom,
.modalTileImage {
color: white;
@@ -430,23 +478,25 @@ input[type="range"]{
display:none
}
-#txt2img_interrupt, #img2img_interrupt{
- position: absolute;
- width: 50%;
- height: 72px;
- background: #b4c0cc;
- border-radius: 0px;
- display: none;
+#txt2img_generate_box, #img2img_generate_box{
+ position: relative;
}
+#txt2img_interrupt, #img2img_interrupt, #txt2img_skip, #img2img_skip{
+ position: absolute;
+ width: 50%;
+ height: 100%;
+ background: #b4c0cc;
+ display: none;
+}
+
+#txt2img_interrupt, #img2img_interrupt{
+ left: 0;
+ border-radius: 0.5rem 0 0 0.5rem;
+}
#txt2img_skip, #img2img_skip{
- position: absolute;
- width: 50%;
- right: 0px;
- height: 72px;
- background: #b4c0cc;
- border-radius: 0px;
- display: none;
+ right: 0;
+ border-radius: 0 0.5rem 0.5rem 0;
}
.red {
@@ -488,26 +538,21 @@ input[type="range"]{
gap: 0.4em;
}
-#quicksettings > div{
- border: none;
- background: none;
- flex: unset;
- gap: 0.5em;
-}
-
-#quicksettings > div > div{
- max-width: 32em;
+#quicksettings > div, #quicksettings > fieldset{
+ max-width: 24em;
min-width: 24em;
padding: 0;
+ border: none;
+ box-shadow: none;
+ background: none;
}
-#refresh_sd_model_checkpoint, #refresh_sd_vae, #refresh_sd_hypernetwork, #refresh_train_hypernetwork_name, #refresh_train_embedding_name, #refresh_localization{
- max-width: 2.5em;
- min-width: 2.5em;
- height: 2.4em;
+#quicksettings > div > div > div > label > span {
+ position: relative;
+ margin-right: 9em;
+ margin-bottom: -1em;
}
-
canvas[key="mask"] {
z-index: 12 !important;
filter: invert();
@@ -521,7 +566,7 @@ canvas[key="mask"] {
position: absolute;
right: 0.5em;
top: -0.6em;
- z-index: 200;
+ z-index: 400;
width: 8em;
}
#quicksettings .gr-box > div > div > input.gr-text-input {
@@ -533,7 +578,9 @@ canvas[key="mask"] {
}
#img2img_image, #img2img_image > .h-60, #img2img_image > .h-60 > div, #img2img_image > .h-60 > div > img,
-img2maskimg, #img2maskimg > .h-60, #img2maskimg > .h-60 > div, #img2maskimg > .h-60 > div > img
+#img2img_sketch, #img2img_sketch > .h-60, #img2img_sketch > .h-60 > div, #img2img_sketch > .h-60 > div > img,
+#img2maskimg, #img2maskimg > .h-60, #img2maskimg > .h-60 > div, #img2maskimg > .h-60 > div > img,
+#inpaint_sketch, #inpaint_sketch > .h-60, #inpaint_sketch > .h-60 > div, #inpaint_sketch > .h-60 > div > img
{
height: 480px !important;
max-height: 480px !important;
@@ -568,81 +615,313 @@ img2maskimg, #img2maskimg > .h-60, #img2maskimg > .h-60 > div, #img2maskimg > .h
font-size: 95%;
}
-/* The following handles localization for right-to-left (RTL) languages like Arabic.
-The rtl media type will only be activated by the logic in javascript/localization.js.
-If you change anything above, you need to make sure it is RTL compliant by just running
-your changes through converters like https://cssjanus.github.io/ or https://rtlcss.com/.
-Then, you will need to add the RTL counterpart only if needed in the rtl section below.*/
-@media rtl {
- /* this part was added manually */
- :host {
- direction: rtl;
- }
- select, .file-preview, .gr-text-input, .output-html:has(.performance), #ti_progress {
- direction: ltr;
- }
- #script_list > label > select,
- #x_type > label > select,
- #y_type > label > select {
- direction: rtl;
- }
- .gr-radio, .gr-checkbox{
- margin-left: 0.25em;
- }
+#available_extensions .info{
+ margin: 0;
+}
+
+#available_extensions .date_added{
+ opacity: 0.85;
+ font-size: 90%;
+}
+
+#image_buttons_txt2img button, #image_buttons_img2img button, #image_buttons_extras button{
+ min-width: auto;
+ padding-left: 0.5em;
+ padding-right: 0.5em;
+}
+
+.gr-form{
+ background-color: white;
+}
+
+.dark .gr-form{
+ background-color: rgb(31 41 55 / var(--tw-bg-opacity));
+}
+
+.gr-button-tool, .gr-button-tool-top{
+ max-width: 2.5em;
+ min-width: 2.5em !important;
+ height: 2.4em;
+}
+
+.gr-button-tool{
+ margin: 0.6em 0em 0.55em 0;
+}
+
+.gr-button-tool-top, #settings .gr-button-tool{
+ margin: 1.6em 0.7em 0.55em 0;
+}
+
+
+#modelmerger_results_container{
+ margin-top: 1em;
+ overflow: visible;
+}
+
+#modelmerger_models{
+ gap: 0;
+}
+
+
+#quicksettings .gr-button-tool{
+ margin: 0;
+}
+
+
+#img2img_settings > div.gr-form, #txt2img_settings > div.gr-form {
+ padding-top: 0.9em;
+}
+
+#img2img_settings div.gr-form .gr-form, #txt2img_settings div.gr-form .gr-form, #train_tabs div.gr-form .gr-form{
+ border: none;
+ padding-bottom: 0.5em;
+}
+
+footer {
+ display: none !important;
+}
+
+#footer{
+ text-align: center;
+}
+
+#footer div{
+ display: inline-block;
+}
+
+#footer .versions{
+ font-size: 85%;
+ opacity: 0.85;
+}
+
+#txtimg_hr_finalres{
+ min-height: 0 !important;
+ padding: .625rem .75rem;
+ margin-left: -0.75em
+
+}
+
+#txtimg_hr_finalres .resolution{
+ font-weight: bold;
+}
+
+#txt2img_checkboxes, #img2img_checkboxes{
+ margin-bottom: 0.5em;
+ margin-left: 0em;
+}
+#txt2img_checkboxes > div, #img2img_checkboxes > div{
+ flex: 0;
+ white-space: nowrap;
+ min-width: auto;
+}
+
+#img2img_copy_to_img2img, #img2img_copy_to_sketch, #img2img_copy_to_inpaint, #img2img_copy_to_inpaint_sketch{
+ margin-left: 0em;
+}
+
+#axis_options {
+ margin-left: 0em;
+}
+
+.inactive{
+ opacity: 0.5;
+}
+
+[id*='_prompt_container']{
+ gap: 0;
+}
+
+[id*='_prompt_container'] > div{
+ margin: -0.4em 0 0 0;
+}
+
+.gr-compact {
+ border: none;
+}
+
+.dark .gr-compact{
+ background-color: rgb(31 41 55 / var(--tw-bg-opacity));
+}
+
+.gr-compact{
+ overflow: visible;
+}
+
+.gr-compact > *{
+}
+
+.gr-compact .gr-block, .gr-compact .gr-form{
+ border: none;
+ box-shadow: none;
+}
+
+.gr-compact .gr-box{
+ border-radius: .5rem !important;
+ border-width: 1px !important;
+}
+
+#mode_img2img > div > div{
+ gap: 0 !important;
+}
+
+[id*='img2img_copy_to_'] {
+ border: none;
+}
+
+[id*='img2img_copy_to_'] > button {
+}
+
+[id*='img2img_label_copy_to_'] {
+ font-size: 1.0em;
+ font-weight: bold;
+ text-align: center;
+ line-height: 2.4em;
+}
+
+.extra-networks > div > [id *= '_extra_']{
+ margin: 0.3em;
+}
+
+
+
+#txt2img_extra_networks .search, #img2img_extra_networks .search{
+ display: inline-block;
+ max-width: 16em;
+ margin: 0.3em;
+ align-self: center;
+}
+
+#txt2img_extra_view, #img2img_extra_view {
+ width: auto;
+}
+
+.extra-network-cards .nocards, .extra-network-thumbs .nocards{
+ margin: 1.25em 0.5em 0.5em 0.5em;
+}
+
+.extra-network-cards .nocards h1, .extra-network-thumbs .nocards h1{
+ font-size: 1.5em;
+ margin-bottom: 1em;
+}
+
+.extra-network-cards .nocards li, .extra-network-thumbs .nocards li{
+ margin-left: 0.5em;
+}
+
+.extra-network-thumbs {
+ display: flex;
+ flex-flow: row wrap;
+ gap: 10px;
+}
+
+.extra-network-thumbs .card {
+ height: 6em;
+ width: 6em;
+ cursor: pointer;
+ background-image: url('./file=html/card-no-preview.png');
+ background-size: cover;
+ background-position: center center;
+ position: relative;
+}
+
+.extra-network-thumbs .card:hover .additional a {
+ display: block;
+}
+
+.extra-network-thumbs .actions .additional a {
+ background-image: url('./file=html/image-update.svg');
+ background-repeat: no-repeat;
+ background-size: cover;
+ background-position: center center;
+ position: absolute;
+ top: 0;
+ left: 0;
+ width: 24px;
+ height: 24px;
+ display: none;
+ font-size: 0;
+ text-align: -9999;
+}
+
+.extra-network-thumbs .actions .name {
+ position: absolute;
+ bottom: 0;
+ font-size: 10px;
+ padding: 3px;
+ width: 100%;
+ overflow: hidden;
+ white-space: nowrap;
+ text-overflow: ellipsis;
+ background: rgba(0,0,0,.5);
+ color: white;
+}
+
+.extra-network-thumbs .card:hover .actions .name {
+ white-space: normal;
+ word-break: break-all;
+}
+
+.extra-network-cards .card{
+ display: inline-block;
+ margin: 0.5em;
+ width: 16em;
+ height: 24em;
+ box-shadow: 0 0 5px rgba(128, 128, 128, 0.5);
+ border-radius: 0.2em;
+ position: relative;
+
+ background-size: auto 100%;
+ background-position: center;
+ overflow: hidden;
+ cursor: pointer;
+
+ background-image: url('./file=html/card-no-preview.png')
+}
+
+.extra-network-cards .card:hover{
+ box-shadow: 0 0 2px 0.3em rgba(0, 128, 255, 0.35);
+}
+
+.extra-network-cards .card .actions .additional{
+ display: none;
+}
+
+.extra-network-cards .card .actions{
+ position: absolute;
+ bottom: 0;
+ left: 0;
+ right: 0;
+ padding: 0.5em;
+ color: white;
+ background: rgba(0,0,0,0.5);
+ box-shadow: 0 0 0.25em 0.25em rgba(0,0,0,0.5);
+ text-shadow: 0 0 0.2em black;
+}
+
+.extra-network-cards .card .actions:hover{
+ box-shadow: 0 0 0.75em 0.75em rgba(0,0,0,0.5) !important;
+}
+
+.extra-network-cards .card .actions .name{
+ font-size: 1.7em;
+ font-weight: bold;
+ line-break: anywhere;
+}
+
+.extra-network-cards .card .actions:hover .additional{
+ display: block;
+}
+
+.extra-network-cards .card ul{
+ margin: 0.25em 0 0.75em 0.25em;
+ cursor: unset;
+}
+
+.extra-network-cards .card ul a{
+ cursor: pointer;
+}
+
+.extra-network-cards .card ul a:hover{
+ color: red;
+}
- /* automatically generated with few manual modifications */
- .performance .time {
- margin-right: unset;
- margin-left: 0;
- }
- .justify-center.overflow-x-scroll {
- justify-content: right;
- }
- .justify-center.overflow-x-scroll button:first-of-type {
- margin-left: unset;
- margin-right: auto;
- }
- .justify-center.overflow-x-scroll button:last-of-type {
- margin-right: unset;
- margin-left: auto;
- }
- #settings fieldset span.text-gray-500, #settings .gr-block.gr-box span.text-gray-500, #settings label.block span{
- margin-right: unset;
- margin-left: 8em;
- }
- #txt2img_progressbar, #img2img_progressbar, #ti_progressbar{
- right: unset;
- left: 0;
- }
- .progressDiv .progress{
- padding: 0 0 0 8px;
- text-align: left;
- }
- #lightboxModal{
- left: unset;
- right: 0;
- }
- .modalPrev, .modalNext{
- border-radius: 3px 0 0 3px;
- }
- .modalNext {
- right: unset;
- left: 0;
- border-radius: 0 3px 3px 0;
- }
- #imageARPreview{
- left:unset;
- right:0px;
- }
- #txt2img_skip, #img2img_skip{
- right: unset;
- left: 0px;
- }
- #context-menu{
- box-shadow:-1px 1px 2px #CE6400;
- }
- .gr-box > div > div > input.gr-text-input{
- right: unset;
- left: 0.5em;
- }
-} \ No newline at end of file
diff --git a/test/advanced_features/__init__.py b/test/advanced_features/__init__.py
deleted file mode 100644
index e69de29b..00000000
--- a/test/advanced_features/__init__.py
+++ /dev/null
diff --git a/test/advanced_features/extras_test.py b/test/advanced_features/extras_test.py
deleted file mode 100644
index 8763f8ed..00000000
--- a/test/advanced_features/extras_test.py
+++ /dev/null
@@ -1,29 +0,0 @@
-import unittest
-
-
-class TestExtrasWorking(unittest.TestCase):
- def setUp(self):
- self.url_img2img = "http://localhost:7860/sdapi/v1/extra-single-image"
- self.simple_extras = {
- "resize_mode": 0,
- "show_extras_results": True,
- "gfpgan_visibility": 0,
- "codeformer_visibility": 0,
- "codeformer_weight": 0,
- "upscaling_resize": 2,
- "upscaling_resize_w": 128,
- "upscaling_resize_h": 128,
- "upscaling_crop": True,
- "upscaler_1": "None",
- "upscaler_2": "None",
- "extras_upscaler_2_visibility": 0,
- "image": ""
- }
-
-
-class TestExtrasCorrectness(unittest.TestCase):
- pass
-
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/test/advanced_features/txt2img_test.py b/test/advanced_features/txt2img_test.py
deleted file mode 100644
index 36ed7b9a..00000000
--- a/test/advanced_features/txt2img_test.py
+++ /dev/null
@@ -1,47 +0,0 @@
-import unittest
-import requests
-
-
-class TestTxt2ImgWorking(unittest.TestCase):
- def setUp(self):
- self.url_txt2img = "http://localhost:7860/sdapi/v1/txt2img"
- self.simple_txt2img = {
- "enable_hr": False,
- "denoising_strength": 0,
- "firstphase_width": 0,
- "firstphase_height": 0,
- "prompt": "example prompt",
- "styles": [],
- "seed": -1,
- "subseed": -1,
- "subseed_strength": 0,
- "seed_resize_from_h": -1,
- "seed_resize_from_w": -1,
- "batch_size": 1,
- "n_iter": 1,
- "steps": 3,
- "cfg_scale": 7,
- "width": 64,
- "height": 64,
- "restore_faces": False,
- "tiling": False,
- "negative_prompt": "",
- "eta": 0,
- "s_churn": 0,
- "s_tmax": 0,
- "s_tmin": 0,
- "s_noise": 1,
- "sampler_index": "Euler a"
- }
-
- def test_txt2img_with_restore_faces_performed(self):
- self.simple_txt2img["restore_faces"] = True
- self.assertEqual(requests.post(self.url_txt2img, json=self.simple_txt2img).status_code, 200)
-
-
-class TestTxt2ImgCorrectness(unittest.TestCase):
- pass
-
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/test/basic_features/extras_test.py b/test/basic_features/extras_test.py
new file mode 100644
index 00000000..0170c511
--- /dev/null
+++ b/test/basic_features/extras_test.py
@@ -0,0 +1,54 @@
+import unittest
+import requests
+from gradio.processing_utils import encode_pil_to_base64
+from PIL import Image
+
+class TestExtrasWorking(unittest.TestCase):
+ def setUp(self):
+ self.url_extras_single = "http://localhost:7860/sdapi/v1/extra-single-image"
+ self.extras_single = {
+ "resize_mode": 0,
+ "show_extras_results": True,
+ "gfpgan_visibility": 0,
+ "codeformer_visibility": 0,
+ "codeformer_weight": 0,
+ "upscaling_resize": 2,
+ "upscaling_resize_w": 128,
+ "upscaling_resize_h": 128,
+ "upscaling_crop": True,
+ "upscaler_1": "None",
+ "upscaler_2": "None",
+ "extras_upscaler_2_visibility": 0,
+ "image": encode_pil_to_base64(Image.open(r"test/test_files/img2img_basic.png"))
+ }
+
+ def test_simple_upscaling_performed(self):
+ self.extras_single["upscaler_1"] = "Lanczos"
+ self.assertEqual(requests.post(self.url_extras_single, json=self.extras_single).status_code, 200)
+
+
+class TestPngInfoWorking(unittest.TestCase):
+ def setUp(self):
+ self.url_png_info = "http://localhost:7860/sdapi/v1/extra-single-image"
+ self.png_info = {
+ "image": encode_pil_to_base64(Image.open(r"test/test_files/img2img_basic.png"))
+ }
+
+ def test_png_info_performed(self):
+ self.assertEqual(requests.post(self.url_png_info, json=self.png_info).status_code, 200)
+
+
+class TestInterrogateWorking(unittest.TestCase):
+ def setUp(self):
+ self.url_interrogate = "http://localhost:7860/sdapi/v1/extra-single-image"
+ self.interrogate = {
+ "image": encode_pil_to_base64(Image.open(r"test/test_files/img2img_basic.png")),
+ "model": "clip"
+ }
+
+ def test_interrogate_performed(self):
+ self.assertEqual(requests.post(self.url_interrogate, json=self.interrogate).status_code, 200)
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/test/basic_features/img2img_test.py b/test/basic_features/img2img_test.py
index 0a9c1e8a..08c5c903 100644
--- a/test/basic_features/img2img_test.py
+++ b/test/basic_features/img2img_test.py
@@ -16,7 +16,7 @@ class TestImg2ImgWorking(unittest.TestCase):
"inpainting_fill": 0,
"inpaint_full_res": False,
"inpaint_full_res_padding": 0,
- "inpainting_mask_invert": 0,
+ "inpainting_mask_invert": False,
"prompt": "example prompt",
"styles": [],
"seed": -1,
@@ -50,6 +50,17 @@ class TestImg2ImgWorking(unittest.TestCase):
self.simple_img2img["mask"] = encode_pil_to_base64(Image.open(r"test/test_files/mask_basic.png"))
self.assertEqual(requests.post(self.url_img2img, json=self.simple_img2img).status_code, 200)
+ def test_inpainting_with_inverted_masked_performed(self):
+ self.simple_img2img["mask"] = encode_pil_to_base64(Image.open(r"test/test_files/mask_basic.png"))
+ self.simple_img2img["inpainting_mask_invert"] = True
+ self.assertEqual(requests.post(self.url_img2img, json=self.simple_img2img).status_code, 200)
+
+ def test_img2img_sd_upscale_performed(self):
+ self.simple_img2img["script_name"] = "sd upscale"
+ self.simple_img2img["script_args"] = ["", 8, "Lanczos", 2.0]
+
+ self.assertEqual(requests.post(self.url_img2img, json=self.simple_img2img).status_code, 200)
+
if __name__ == "__main__":
unittest.main()
diff --git a/test/basic_features/txt2img_test.py b/test/basic_features/txt2img_test.py
index 1c2674b2..5aa43a44 100644
--- a/test/basic_features/txt2img_test.py
+++ b/test/basic_features/txt2img_test.py
@@ -41,6 +41,10 @@ class TestTxt2ImgWorking(unittest.TestCase):
self.simple_txt2img["negative_prompt"] = "example negative prompt"
self.assertEqual(requests.post(self.url_txt2img, json=self.simple_txt2img).status_code, 200)
+ def test_txt2img_with_complex_prompt_performed(self):
+ self.simple_txt2img["prompt"] = "((emphasis)), (emphasis1:1.1), [to:1], [from::2], [from:to:0.3], [alt|alt1]"
+ self.assertEqual(requests.post(self.url_txt2img, json=self.simple_txt2img).status_code, 200)
+
def test_txt2img_not_square_image_performed(self):
self.simple_txt2img["height"] = 128
self.assertEqual(requests.post(self.url_txt2img, json=self.simple_txt2img).status_code, 200)
@@ -53,6 +57,10 @@ class TestTxt2ImgWorking(unittest.TestCase):
self.simple_txt2img["tiling"] = True
self.assertEqual(requests.post(self.url_txt2img, json=self.simple_txt2img).status_code, 200)
+ def test_txt2img_with_restore_faces_performed(self):
+ self.simple_txt2img["restore_faces"] = True
+ self.assertEqual(requests.post(self.url_txt2img, json=self.simple_txt2img).status_code, 200)
+
def test_txt2img_with_vanilla_sampler_performed(self):
self.simple_txt2img["sampler_index"] = "PLMS"
self.assertEqual(requests.post(self.url_txt2img, json=self.simple_txt2img).status_code, 200)
@@ -63,6 +71,10 @@ class TestTxt2ImgWorking(unittest.TestCase):
self.simple_txt2img["n_iter"] = 2
self.assertEqual(requests.post(self.url_txt2img, json=self.simple_txt2img).status_code, 200)
+ def test_txt2img_batch_performed(self):
+ self.simple_txt2img["batch_size"] = 2
+ self.assertEqual(requests.post(self.url_txt2img, json=self.simple_txt2img).status_code, 200)
+
if __name__ == "__main__":
unittest.main()
diff --git a/test/basic_features/utils_test.py b/test/basic_features/utils_test.py
index 765470c9..0bfc28a0 100644
--- a/test/basic_features/utils_test.py
+++ b/test/basic_features/utils_test.py
@@ -12,12 +12,25 @@ class UtilsTests(unittest.TestCase):
self.url_face_restorers = "http://localhost:7860/sdapi/v1/face-restorers"
self.url_realesrgan_models = "http://localhost:7860/sdapi/v1/realesrgan-models"
self.url_prompt_styles = "http://localhost:7860/sdapi/v1/prompt-styles"
- self.url_artist_categories = "http://localhost:7860/sdapi/v1/artist-categories"
- self.url_artists = "http://localhost:7860/sdapi/v1/artists"
+ self.url_embeddings = "http://localhost:7860/sdapi/v1/embeddings"
def test_options_get(self):
self.assertEqual(requests.get(self.url_options).status_code, 200)
+ def test_options_write(self):
+ response = requests.get(self.url_options)
+ self.assertEqual(response.status_code, 200)
+
+ pre_value = response.json()["send_seed"]
+
+ self.assertEqual(requests.post(self.url_options, json={"send_seed":not pre_value}).status_code, 200)
+
+ response = requests.get(self.url_options)
+ self.assertEqual(response.status_code, 200)
+ self.assertEqual(response.json()["send_seed"], not pre_value)
+
+ requests.post(self.url_options, json={"send_seed": pre_value})
+
def test_cmd_flags(self):
self.assertEqual(requests.get(self.url_cmd_flags).status_code, 200)
@@ -41,13 +54,9 @@ class UtilsTests(unittest.TestCase):
def test_prompt_styles(self):
self.assertEqual(requests.get(self.url_prompt_styles).status_code, 200)
-
- def test_artist_categories(self):
- self.assertEqual(requests.get(self.url_artist_categories).status_code, 200)
-
- def test_artists(self):
- self.assertEqual(requests.get(self.url_artists).status_code, 200)
+ def test_embeddings(self):
+ self.assertEqual(requests.get(self.url_embeddings).status_code, 200)
if __name__ == "__main__":
unittest.main()
diff --git a/test/server_poll.py b/test/server_poll.py
index d4df697b..42d56a4c 100644
--- a/test/server_poll.py
+++ b/test/server_poll.py
@@ -15,7 +15,7 @@ def run_tests(proc, test_dir):
break
if proc.poll() is None:
if test_dir is None:
- test_dir = ""
+ test_dir = "test"
suite = unittest.TestLoader().discover(test_dir, pattern="*_test.py", top_level_dir="test")
result = unittest.TextTestRunner(verbosity=2).run(suite)
return len(result.failures) + len(result.errors)
diff --git a/txt2img_Screenshot.png b/txt2img_Screenshot.png
deleted file mode 100644
index 6e2759a4..00000000
--- a/txt2img_Screenshot.png
+++ /dev/null
Binary files differ
diff --git a/webui-macos-env.sh b/webui-macos-env.sh
index 95ca9c55..fa187dd1 100644
--- a/webui-macos-env.sh
+++ b/webui-macos-env.sh
@@ -10,7 +10,7 @@ then
fi
export install_dir="$HOME"
-export COMMANDLINE_ARGS="--skip-torch-cuda-test --no-half --use-cpu interrogate"
+export COMMANDLINE_ARGS="--skip-torch-cuda-test --upcast-sampling --use-cpu interrogate"
export TORCH_COMMAND="pip install torch==1.12.1 torchvision==0.13.1"
export K_DIFFUSION_REPO="https://github.com/brkirch/k-diffusion.git"
export K_DIFFUSION_COMMIT_HASH="51c9778f269cedb55a4d88c79c0246d35bdadb71"
diff --git a/webui.bat b/webui.bat
index d4d626e2..209d972b 100644
--- a/webui.bat
+++ b/webui.bat
@@ -1,41 +1,52 @@
@echo off
if not defined PYTHON (set PYTHON=python)
-if not defined VENV_DIR (set VENV_DIR=venv)
+if not defined VENV_DIR (set "VENV_DIR=%~dp0%venv")
+
set ERROR_REPORTING=FALSE
mkdir tmp 2>NUL
%PYTHON% -c "" >tmp/stdout.txt 2>tmp/stderr.txt
-if %ERRORLEVEL% == 0 goto :start_venv
+if %ERRORLEVEL% == 0 goto :check_pip
echo Couldn't launch python
goto :show_stdout_stderr
+:check_pip
+%PYTHON% -mpip --help >tmp/stdout.txt 2>tmp/stderr.txt
+if %ERRORLEVEL% == 0 goto :start_venv
+if "%PIP_INSTALLER_LOCATION%" == "" goto :show_stdout_stderr
+%PYTHON% "%PIP_INSTALLER_LOCATION%" >tmp/stdout.txt 2>tmp/stderr.txt
+if %ERRORLEVEL% == 0 goto :start_venv
+echo Couldn't install pip
+goto :show_stdout_stderr
+
:start_venv
-if [%VENV_DIR%] == [-] goto :skip_venv
+if ["%VENV_DIR%"] == ["-"] goto :skip_venv
+if ["%SKIP_VENV%"] == ["1"] goto :skip_venv
-dir %VENV_DIR%\Scripts\Python.exe >tmp/stdout.txt 2>tmp/stderr.txt
+dir "%VENV_DIR%\Scripts\Python.exe" >tmp/stdout.txt 2>tmp/stderr.txt
if %ERRORLEVEL% == 0 goto :activate_venv
for /f "delims=" %%i in ('CALL %PYTHON% -c "import sys; print(sys.executable)"') do set PYTHON_FULLNAME="%%i"
echo Creating venv in directory %VENV_DIR% using python %PYTHON_FULLNAME%
-%PYTHON_FULLNAME% -m venv %VENV_DIR% >tmp/stdout.txt 2>tmp/stderr.txt
+%PYTHON_FULLNAME% -m venv "%VENV_DIR%" >tmp/stdout.txt 2>tmp/stderr.txt
if %ERRORLEVEL% == 0 goto :activate_venv
-echo Unable to create venv in directory %VENV_DIR%
+echo Unable to create venv in directory "%VENV_DIR%"
goto :show_stdout_stderr
:activate_venv
-set PYTHON="%~dp0%VENV_DIR%\Scripts\Python.exe"
+set PYTHON="%VENV_DIR%\Scripts\Python.exe"
echo venv %PYTHON%
-if [%ACCELERATE%] == ["True"] goto :accelerate
-goto :launch
:skip_venv
+if [%ACCELERATE%] == ["True"] goto :accelerate
+goto :launch
:accelerate
-echo "Checking for accelerate"
-set ACCELERATE="%~dp0%VENV_DIR%\Scripts\accelerate.exe"
+echo Checking for accelerate
+set ACCELERATE="%VENV_DIR%\Scripts\accelerate.exe"
if EXIST %ACCELERATE% goto :accelerate_launch
:launch
@@ -44,7 +55,7 @@ pause
exit /b
:accelerate_launch
-echo "Accelerating"
+echo Accelerating
%ACCELERATE% launch --num_cpu_threads_per_process=6 launch.py
pause
exit /b
diff --git a/webui.py b/webui.py
index 3aee8792..41f32f5c 100644
--- a/webui.py
+++ b/webui.py
@@ -1,20 +1,29 @@
import os
-import threading
+import sys
import time
import importlib
import signal
-import threading
+import re
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from fastapi.middleware.gzip import GZipMiddleware
+from packaging import version
-from modules import import_hook
+import logging
+logging.getLogger("xformers").addFilter(lambda record: 'A matching Triton is not available' not in record.getMessage())
+
+from modules import import_hook, errors, extra_networks
+from modules import extra_networks_hypernet, ui_extra_networks_hypernets, ui_extra_networks_textual_inversion
from modules.call_queue import wrap_queued_call, queue_lock, wrap_gradio_gpu_call
-from modules.paths import script_path
-from modules import shared, devices, sd_samplers, upscaler, extensions, localization, ui_tempdir
+import torch
+
+# Truncate version number of nightly/local build of PyTorch to not cause exceptions with CodeFormer or Safetensors
+if ".dev" in torch.__version__ or "+git" in torch.__version__:
+ torch.__version__ = re.search(r'[\d.]+[\d]', torch.__version__).group(0)
+
+from modules import shared, devices, sd_samplers, upscaler, extensions, localization, ui_tempdir, ui_extra_networks
import modules.codeformer_model as codeformer
-import modules.extras
import modules.face_restoration
import modules.gfpgan_model as gfpgan
import modules.img2img
@@ -27,6 +36,8 @@ import modules.sd_models
import modules.sd_vae
import modules.txt2img
import modules.script_callbacks
+import modules.textual_inversion.textual_inversion
+import modules.progress
import modules.ui
from modules import modelloader
@@ -40,7 +51,32 @@ else:
server_name = "0.0.0.0" if cmd_opts.listen else None
+def check_versions():
+ expected_torch_version = "1.13.1"
+
+ if version.parse(torch.__version__) < version.parse(expected_torch_version):
+ errors.print_error_explanation(f"""
+You are running torch {torch.__version__}.
+The program is tested to work with torch {expected_torch_version}.
+To reinstall the desired version, run with commandline flag --reinstall-torch.
+Beware that this will cause a lot of large files to be downloaded.
+ """.strip())
+
+ expected_xformers_version = "0.0.16rc425"
+ if shared.xformers_available:
+ import xformers
+
+ if version.parse(xformers.__version__) < version.parse(expected_xformers_version):
+ errors.print_error_explanation(f"""
+You are running xformers {xformers.__version__}.
+The program is tested to work with xformers {expected_xformers_version}.
+To reinstall the desired version, run with commandline flag --reinstall-xformers.
+ """.strip())
+
+
def initialize():
+ check_versions()
+
extensions.list_extensions()
localization.list_localizations(cmd_opts.localizations_dir)
@@ -55,19 +91,38 @@ def initialize():
gfpgan.setup_model(cmd_opts.gfpgan_models_path)
shared.face_restorers.append(modules.face_restoration.FaceRestoration())
+ modelloader.list_builtin_upscalers()
modules.scripts.load_scripts()
-
modelloader.load_upscalers()
modules.sd_vae.refresh_vae_list()
- modules.sd_models.load_model()
+
+ modules.textual_inversion.textual_inversion.list_textual_inversion_templates()
+
+ try:
+ modules.sd_models.load_model()
+ except Exception as e:
+ errors.display(e, "loading stable diffusion model")
+ print("", file=sys.stderr)
+ print("Stable diffusion model failed to load, exiting", file=sys.stderr)
+ exit(1)
+
+ shared.opts.data["sd_model_checkpoint"] = shared.sd_model.sd_checkpoint_info.title
+
shared.opts.onchange("sd_model_checkpoint", wrap_queued_call(lambda: modules.sd_models.reload_model_weights()))
shared.opts.onchange("sd_vae", wrap_queued_call(lambda: modules.sd_vae.reload_vae_weights()), call=False)
shared.opts.onchange("sd_vae_as_default", wrap_queued_call(lambda: modules.sd_vae.reload_vae_weights()), call=False)
- shared.opts.onchange("sd_hypernetwork", wrap_queued_call(lambda: shared.reload_hypernetworks()))
- shared.opts.onchange("sd_hypernetwork_strength", modules.hypernetworks.hypernetwork.apply_strength)
shared.opts.onchange("temp_dir", ui_tempdir.on_tmpdir_changed)
+ shared.reload_hypernetworks()
+
+ ui_extra_networks.intialize()
+ ui_extra_networks.register_page(ui_extra_networks_textual_inversion.ExtraNetworksPageTextualInversion())
+ ui_extra_networks.register_page(ui_extra_networks_hypernets.ExtraNetworksPageHypernetworks())
+
+ extra_networks.initialize()
+ extra_networks.register_extra_network(extra_networks_hypernet.ExtraNetworkHypernet())
+
if cmd_opts.tls_keyfile is not None and cmd_opts.tls_keyfile is not None:
try:
@@ -91,11 +146,11 @@ def initialize():
def setup_cors(app):
if cmd_opts.cors_allow_origins and cmd_opts.cors_allow_origins_regex:
- app.add_middleware(CORSMiddleware, allow_origins=cmd_opts.cors_allow_origins.split(','), allow_origin_regex=cmd_opts.cors_allow_origins_regex, allow_methods=['*'])
+ app.add_middleware(CORSMiddleware, allow_origins=cmd_opts.cors_allow_origins.split(','), allow_origin_regex=cmd_opts.cors_allow_origins_regex, allow_methods=['*'], allow_credentials=True, allow_headers=['*'])
elif cmd_opts.cors_allow_origins:
- app.add_middleware(CORSMiddleware, allow_origins=cmd_opts.cors_allow_origins.split(','), allow_methods=['*'])
+ app.add_middleware(CORSMiddleware, allow_origins=cmd_opts.cors_allow_origins.split(','), allow_methods=['*'], allow_credentials=True, allow_headers=['*'])
elif cmd_opts.cors_allow_origins_regex:
- app.add_middleware(CORSMiddleware, allow_origin_regex=cmd_opts.cors_allow_origins_regex, allow_methods=['*'])
+ app.add_middleware(CORSMiddleware, allow_origin_regex=cmd_opts.cors_allow_origins_regex, allow_methods=['*'], allow_credentials=True, allow_headers=['*'])
def create_api(app):
@@ -136,9 +191,14 @@ def webui():
if shared.opts.clean_temp_dir_at_start:
ui_tempdir.cleanup_tmpdr()
+ modules.script_callbacks.before_ui_callback()
+
shared.demo = modules.ui.create_ui()
- app, local_url, share_url = shared.demo.queue(default_enabled=False).launch(
+ if cmd_opts.gradio_queue:
+ shared.demo.queue(64)
+
+ app, local_url, share_url = shared.demo.launch(
share=cmd_opts.share,
server_name=server_name,
server_port=cmd_opts.port,
@@ -162,30 +222,41 @@ def webui():
app.add_middleware(GZipMiddleware, minimum_size=1000)
+ modules.progress.setup_progress_api(app)
+
if launch_api:
create_api(app)
modules.script_callbacks.app_started_callback(shared.demo, app)
- modules.script_callbacks.app_started_callback(shared.demo, app)
wait_on_server(shared.demo)
+ print('Restarting UI...')
sd_samplers.set_samplers()
- print('Reloading extensions')
+ modules.script_callbacks.script_unloaded_callback()
extensions.list_extensions()
localization.list_localizations(cmd_opts.localizations_dir)
- print('Reloading custom scripts')
+ modelloader.forbid_loaded_nonbuiltin_upscalers()
modules.scripts.reload_scripts()
+ modules.script_callbacks.model_loaded_callback(shared.sd_model)
modelloader.load_upscalers()
- print('Reloading modules: modules.ui')
- importlib.reload(modules.ui)
- print('Refreshing Model List')
+ for module in [module for name, module in sys.modules.items() if name.startswith("modules.ui")]:
+ importlib.reload(module)
+
modules.sd_models.list_models()
- print('Restarting Gradio')
+
+ shared.reload_hypernetworks()
+
+ ui_extra_networks.intialize()
+ ui_extra_networks.register_page(ui_extra_networks_textual_inversion.ExtraNetworksPageTextualInversion())
+ ui_extra_networks.register_page(ui_extra_networks_hypernets.ExtraNetworksPageHypernetworks())
+
+ extra_networks.initialize()
+ extra_networks.register_extra_network(extra_networks_hypernet.ExtraNetworkHypernet())
if __name__ == "__main__":
diff --git a/webui.sh b/webui.sh
index 04ecbf76..8cdad22d 100755
--- a/webui.sh
+++ b/webui.sh
@@ -104,6 +104,23 @@ then
fi
# Check prerequisites
+gpu_info=$(lspci 2>/dev/null | grep VGA)
+case "$gpu_info" in
+ *"Navi 1"*|*"Navi 2"*) export HSA_OVERRIDE_GFX_VERSION=10.3.0
+ ;;
+ *"Renoir"*) export HSA_OVERRIDE_GFX_VERSION=9.0.0
+ printf "\n%s\n" "${delimiter}"
+ printf "Experimental support for Renoir: make sure to have at least 4GB of VRAM and 10GB of RAM or enable cpu mode: --use-cpu all --no-half"
+ printf "\n%s\n" "${delimiter}"
+ ;;
+ *)
+ ;;
+esac
+if echo "$gpu_info" | grep -q "AMD" && [[ -z "${TORCH_COMMAND}" ]]
+then
+ export TORCH_COMMAND="pip install torch torchvision --extra-index-url https://download.pytorch.org/whl/rocm5.2"
+fi
+
for preq in "${GIT}" "${python_cmd}"
do
if ! hash "${preq}" &>/dev/null
@@ -160,10 +177,10 @@ then
printf "\n%s\n" "${delimiter}"
printf "Accelerating launch.py..."
printf "\n%s\n" "${delimiter}"
- accelerate launch --num_cpu_threads_per_process=6 "${LAUNCH_SCRIPT}" "$@"
+ exec accelerate launch --num_cpu_threads_per_process=6 "${LAUNCH_SCRIPT}" "$@"
else
printf "\n%s\n" "${delimiter}"
printf "Launching launch.py..."
- printf "\n%s\n" "${delimiter}"
- "${python_cmd}" "${LAUNCH_SCRIPT}" "$@"
+ printf "\n%s\n" "${delimiter}"
+ exec "${python_cmd}" "${LAUNCH_SCRIPT}" "$@"
fi