aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--README.md2
-rw-r--r--javascript/imageviewer.js174
-rw-r--r--launch.py141
-rw-r--r--models/deepbooru/Put your deepbooru release project folder here.txt0
-rw-r--r--modules/bsrgan_model.py2
-rw-r--r--modules/deepbooru.py73
-rw-r--r--modules/esrgan_model.py4
-rw-r--r--modules/esrgan_model_arch.py (renamed from modules/esrgam_model_arch.py)0
-rw-r--r--modules/extras.py12
-rw-r--r--modules/hypernetwork.py24
-rw-r--r--modules/images.py39
-rw-r--r--modules/ldsr_model.py2
-rw-r--r--modules/paths.py1
-rw-r--r--modules/processing.py22
-rw-r--r--modules/realesrgan_model.py2
-rw-r--r--modules/safe.py89
-rw-r--r--modules/scunet_model.py2
-rw-r--r--modules/sd_hijack.py12
-rw-r--r--modules/sd_hijack_optimizations.py6
-rw-r--r--modules/sd_models.py20
-rw-r--r--modules/shared.py19
-rw-r--r--modules/swinir_model.py2
-rw-r--r--modules/ui.py136
-rw-r--r--modules/upscaler.py7
-rw-r--r--scripts/prompts_from_file.py4
-rw-r--r--scripts/xy_grid.py59
-rw-r--r--style.css17
-rw-r--r--txt2img_Screenshot.pngbin539132 -> 337094 bytes
-rw-r--r--webui.py3
29 files changed, 627 insertions, 247 deletions
diff --git a/README.md b/README.md
index 63dd0c18..561eb03d 100644
--- a/README.md
+++ b/README.md
@@ -66,6 +66,7 @@ Check the [custom scripts](https://github.com/AUTOMATIC1111/stable-diffusion-web
- separate prompts using uppercase `AND`
- also supports weights for prompts: `a cat :1.2 AND a dog AND a penguin :2.2`
- No token limit for prompts (original stable diffusion lets you use up to 75 tokens)
+- DeepDanbooru integration, creates danbooru style tags for anime prompts (add --deepdanbooru to commandline args)
## Installation and Running
Make sure the required [dependencies](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Dependencies) are met and follow the instructions available for both [NVidia](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Install-and-Run-on-NVidia-GPUs) (recommended) and [AMD](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Install-and-Run-on-AMD-GPUs) GPUs.
@@ -123,4 +124,5 @@ The documentation was moved from this README over to the project's [wiki](https:
- Noise generation for outpainting mk2 - https://github.com/parlance-zz/g-diffuser-bot
- CLIP interrogator idea and borrowing some code - https://github.com/pharmapsychotic/clip-interrogator
- Initial Gradio script - posted on 4chan by an Anonymous user. Thank you Anonymous user.
+- DeepDanbooru - interrogator for anime diffusors https://github.com/KichangKim/DeepDanbooru
- (You)
diff --git a/javascript/imageviewer.js b/javascript/imageviewer.js
index 6a00c0da..65a33dd7 100644
--- a/javascript/imageviewer.js
+++ b/javascript/imageviewer.js
@@ -1,72 +1,97 @@
// A full size 'lightbox' preview modal shown when left clicking on gallery previews
-
function closeModal() {
- gradioApp().getElementById("lightboxModal").style.display = "none";
+ gradioApp().getElementById("lightboxModal").style.display = "none";
}
function showModal(event) {
- const source = event.target || event.srcElement;
- const modalImage = gradioApp().getElementById("modalImage")
- const lb = gradioApp().getElementById("lightboxModal")
- modalImage.src = source.src
- if (modalImage.style.display === 'none') {
- lb.style.setProperty('background-image', 'url(' + source.src + ')');
- }
- lb.style.display = "block";
- lb.focus()
- event.stopPropagation()
+ const source = event.target || event.srcElement;
+ const modalImage = gradioApp().getElementById("modalImage")
+ const lb = gradioApp().getElementById("lightboxModal")
+ modalImage.src = source.src
+ if (modalImage.style.display === 'none') {
+ lb.style.setProperty('background-image', 'url(' + source.src + ')');
+ }
+ lb.style.display = "block";
+ lb.focus()
+ event.stopPropagation()
}
function negmod(n, m) {
- return ((n % m) + m) % m;
+ return ((n % m) + m) % m;
}
-function modalImageSwitch(offset){
- var allgalleryButtons = gradioApp().querySelectorAll(".gallery-item.transition-all")
- var galleryButtons = []
- allgalleryButtons.forEach(function(elem){
- if(elem.parentElement.offsetParent){
- galleryButtons.push(elem);
+function updateOnBackgroundChange() {
+ const modalImage = gradioApp().getElementById("modalImage")
+ if (modalImage && modalImage.offsetParent) {
+ let allcurrentButtons = gradioApp().querySelectorAll(".gallery-item.transition-all.\\!ring-2")
+ let currentButton = null
+ allcurrentButtons.forEach(function(elem) {
+ if (elem.parentElement.offsetParent) {
+ currentButton = elem;
+ }
+ })
+
+ if (modalImage.src != currentButton.children[0].src) {
+ modalImage.src = currentButton.children[0].src;
+ if (modalImage.style.display === 'none') {
+ modal.style.setProperty('background-image', `url(${modalImage.src})`)
+ }
+ }
}
- })
-
- if(galleryButtons.length>1){
- var allcurrentButtons = gradioApp().querySelectorAll(".gallery-item.transition-all.\\!ring-2")
- var currentButton = null
- allcurrentButtons.forEach(function(elem){
- if(elem.parentElement.offsetParent){
- currentButton = elem;
+}
+
+function modalImageSwitch(offset) {
+ var allgalleryButtons = gradioApp().querySelectorAll(".gallery-item.transition-all")
+ var galleryButtons = []
+ allgalleryButtons.forEach(function(elem) {
+ if (elem.parentElement.offsetParent) {
+ galleryButtons.push(elem);
}
- })
-
- var result = -1
- galleryButtons.forEach(function(v, i){ if(v==currentButton) { result = i } })
-
- if(result != -1){
- nextButton = galleryButtons[negmod((result+offset),galleryButtons.length)]
- nextButton.click()
- const modalImage = gradioApp().getElementById("modalImage");
- const modal = gradioApp().getElementById("lightboxModal");
- modalImage.src = nextButton.children[0].src;
- if (modalImage.style.display === 'none') {
- modal.style.setProperty('background-image', `url(${modalImage.src})`)
+ })
+
+ if (galleryButtons.length > 1) {
+ var allcurrentButtons = gradioApp().querySelectorAll(".gallery-item.transition-all.\\!ring-2")
+ var currentButton = null
+ allcurrentButtons.forEach(function(elem) {
+ if (elem.parentElement.offsetParent) {
+ currentButton = elem;
+ }
+ })
+
+ var result = -1
+ galleryButtons.forEach(function(v, i) {
+ if (v == currentButton) {
+ result = i
+ }
+ })
+
+ if (result != -1) {
+ nextButton = galleryButtons[negmod((result + offset), galleryButtons.length)]
+ nextButton.click()
+ const modalImage = gradioApp().getElementById("modalImage");
+ const modal = gradioApp().getElementById("lightboxModal");
+ modalImage.src = nextButton.children[0].src;
+ if (modalImage.style.display === 'none') {
+ modal.style.setProperty('background-image', `url(${modalImage.src})`)
+ }
+ setTimeout(function() {
+ modal.focus()
+ }, 10)
}
- setTimeout( function(){modal.focus()},10)
- }
- }
+ }
}
-function modalNextImage(event){
- modalImageSwitch(1)
- event.stopPropagation()
+function modalNextImage(event) {
+ modalImageSwitch(1)
+ event.stopPropagation()
}
-function modalPrevImage(event){
- modalImageSwitch(-1)
- event.stopPropagation()
+function modalPrevImage(event) {
+ modalImageSwitch(-1)
+ event.stopPropagation()
}
-function modalKeyHandler(event){
+function modalKeyHandler(event) {
switch (event.key) {
case "ArrowLeft":
modalPrevImage(event)
@@ -80,24 +105,22 @@ function modalKeyHandler(event){
}
}
-function showGalleryImage(){
+function showGalleryImage() {
setTimeout(function() {
fullImg_preview = gradioApp().querySelectorAll('img.w-full.object-contain')
-
- if(fullImg_preview != null){
+
+ if (fullImg_preview != null) {
fullImg_preview.forEach(function function_name(e) {
if (e.dataset.modded)
return;
e.dataset.modded = true;
if(e && e.parentElement.tagName == 'DIV'){
-
e.style.cursor='pointer'
-
e.addEventListener('click', function (evt) {
if(!opts.js_modal_lightbox) return;
modalZoomSet(gradioApp().getElementById('modalImage'), opts.js_modal_lightbox_initially_zoomed)
showModal(evt)
- },true);
+ }, true);
}
});
}
@@ -105,21 +128,21 @@ function showGalleryImage(){
}, 100);
}
-function modalZoomSet(modalImage, enable){
- if( enable ){
+function modalZoomSet(modalImage, enable) {
+ if (enable) {
modalImage.classList.add('modalImageFullscreen');
- } else{
+ } else {
modalImage.classList.remove('modalImageFullscreen');
}
}
-function modalZoomToggle(event){
+function modalZoomToggle(event) {
modalImage = gradioApp().getElementById("modalImage");
modalZoomSet(modalImage, !modalImage.classList.contains('modalImageFullscreen'))
event.stopPropagation()
}
-function modalTileImageToggle(event){
+function modalTileImageToggle(event) {
const modalImage = gradioApp().getElementById("modalImage");
const modal = gradioApp().getElementById("lightboxModal");
const isTiling = modalImage.style.display === 'none';
@@ -134,17 +157,18 @@ function modalTileImageToggle(event){
event.stopPropagation()
}
-function galleryImageHandler(e){
- if(e && e.parentElement.tagName == 'BUTTON'){
+function galleryImageHandler(e) {
+ if (e && e.parentElement.tagName == 'BUTTON') {
e.onclick = showGalleryImage;
}
}
-onUiUpdate(function(){
+onUiUpdate(function() {
fullImg_preview = gradioApp().querySelectorAll('img.w-full')
- if(fullImg_preview != null){
- fullImg_preview.forEach(galleryImageHandler);
+ if (fullImg_preview != null) {
+ fullImg_preview.forEach(galleryImageHandler);
}
+ updateOnBackgroundChange();
})
document.addEventListener("DOMContentLoaded", function() {
@@ -152,13 +176,13 @@ document.addEventListener("DOMContentLoaded", function() {
const modal = document.createElement('div')
modal.onclick = closeModal;
modal.id = "lightboxModal";
- modal.tabIndex=0
+ modal.tabIndex = 0
modal.addEventListener('keydown', modalKeyHandler, true)
const modalControls = document.createElement('div')
modalControls.className = 'modalControls gradio-container';
modal.append(modalControls);
-
+
const modalZoom = document.createElement('span')
modalZoom.className = 'modalZoom cursor';
modalZoom.innerHTML = '⤡'
@@ -183,30 +207,30 @@ document.addEventListener("DOMContentLoaded", function() {
const modalImage = document.createElement('img')
modalImage.id = 'modalImage';
modalImage.onclick = closeModal;
- modalImage.tabIndex=0
+ modalImage.tabIndex = 0
modalImage.addEventListener('keydown', modalKeyHandler, true)
modal.appendChild(modalImage)
const modalPrev = document.createElement('a')
modalPrev.className = 'modalPrev';
modalPrev.innerHTML = '❮'
- modalPrev.tabIndex=0
- modalPrev.addEventListener('click',modalPrevImage,true);
+ modalPrev.tabIndex = 0
+ modalPrev.addEventListener('click', modalPrevImage, true);
modalPrev.addEventListener('keydown', modalKeyHandler, true)
modal.appendChild(modalPrev)
const modalNext = document.createElement('a')
modalNext.className = 'modalNext';
modalNext.innerHTML = '❯'
- modalNext.tabIndex=0
- modalNext.addEventListener('click',modalNextImage,true);
+ modalNext.tabIndex = 0
+ modalNext.addEventListener('click', modalNextImage, true);
modalNext.addEventListener('keydown', modalKeyHandler, true)
modal.appendChild(modalNext)
gradioApp().getRootNode().appendChild(modal)
-
+
document.body.appendChild(modalFragment);
-
+
});
diff --git a/launch.py b/launch.py
index 1d65a779..f42f557d 100644
--- a/launch.py
+++ b/launch.py
@@ -7,38 +7,14 @@ import shlex
import platform
dir_repos = "repositories"
-dir_tmp = "tmp"
-
python = sys.executable
git = os.environ.get('GIT', "git")
-torch_command = os.environ.get('TORCH_COMMAND', "pip install torch==1.12.1+cu113 torchvision==0.13.1+cu113 --extra-index-url https://download.pytorch.org/whl/cu113")
-requirements_file = os.environ.get('REQS_FILE', "requirements_versions.txt")
-commandline_args = os.environ.get('COMMANDLINE_ARGS', "")
-
-gfpgan_package = os.environ.get('GFPGAN_PACKAGE', "git+https://github.com/TencentARC/GFPGAN.git@8d2447a2d918f8eba5a4a01463fd48e45126a379")
-clip_package = os.environ.get('CLIP_PACKAGE', "git+https://github.com/openai/CLIP.git@d50d76daa670286dd6cacf3bcd80b5e4823fc8e1")
-
-stable_diffusion_commit_hash = os.environ.get('STABLE_DIFFUSION_COMMIT_HASH', "69ae4b35e0a0f6ee1af8bb9a5d0016ccb27e36dc")
-taming_transformers_commit_hash = os.environ.get('TAMING_TRANSFORMERS_COMMIT_HASH', "24268930bf1dce879235a7fddd0b2355b84d7ea6")
-k_diffusion_commit_hash = os.environ.get('K_DIFFUSION_COMMIT_HASH', "f4e99857772fc3a126ba886aadf795a332774878")
-codeformer_commit_hash = os.environ.get('CODEFORMER_COMMIT_HASH', "c5b4593074ba6214284d6acd5f1719b6c5d739af")
-blip_commit_hash = os.environ.get('BLIP_COMMIT_HASH', "48211a1594f1321b00f14c9f7a5b4813144b2fb9")
-
-args = shlex.split(commandline_args)
def extract_arg(args, name):
return [x for x in args if x != name], name in args
-args, skip_torch_cuda_test = extract_arg(args, '--skip-torch-cuda-test')
-xformers = '--xformers' in args
-
-
-def repo_dir(name):
- return os.path.join(dir_repos, name)
-
-
def run(command, desc=None, errdesc=None):
if desc is not None:
print(desc)
@@ -58,23 +34,11 @@ stderr: {result.stderr.decode(encoding="utf8", errors="ignore") if len(result.st
return result.stdout.decode(encoding="utf8", errors="ignore")
-def run_python(code, desc=None, errdesc=None):
- return run(f'"{python}" -c "{code}"', desc, errdesc)
-
-
-def run_pip(args, desc=None):
- return run(f'"{python}" -m pip {args} --prefer-binary', desc=f"Installing {desc}", errdesc=f"Couldn't install {desc}")
-
-
def check_run(command):
result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
return result.returncode == 0
-def check_run_python(code):
- return check_run(f'"{python}" -c "{code}"')
-
-
def is_installed(package):
try:
spec = importlib.util.find_spec(package)
@@ -84,6 +48,22 @@ def is_installed(package):
return spec is not None
+def repo_dir(name):
+ return os.path.join(dir_repos, name)
+
+
+def run_python(code, desc=None, errdesc=None):
+ return run(f'"{python}" -c "{code}"', desc, errdesc)
+
+
+def run_pip(args, desc=None):
+ return run(f'"{python}" -m pip {args} --prefer-binary', desc=f"Installing {desc}", errdesc=f"Couldn't install {desc}")
+
+
+def check_run_python(code):
+ return check_run(f'"{python}" -c "{code}"')
+
+
def git_clone(url, dir, name, commithash=None):
# TODO clone into temporary dir and move if successful
@@ -105,56 +85,81 @@ def git_clone(url, dir, name, commithash=None):
run(f'"{git}" -C {dir} checkout {commithash}', None, "Couldn't checkout {name}'s hash: {commithash}")
-try:
- commit = run(f"{git} rev-parse HEAD").strip()
-except Exception:
- commit = "<none>"
+def prepare_enviroment():
+ torch_command = os.environ.get('TORCH_COMMAND', "pip install torch==1.12.1+cu113 torchvision==0.13.1+cu113 --extra-index-url https://download.pytorch.org/whl/cu113")
+ requirements_file = os.environ.get('REQS_FILE', "requirements_versions.txt")
+ commandline_args = os.environ.get('COMMANDLINE_ARGS', "")
-print(f"Python {sys.version}")
-print(f"Commit hash: {commit}")
+ gfpgan_package = os.environ.get('GFPGAN_PACKAGE', "git+https://github.com/TencentARC/GFPGAN.git@8d2447a2d918f8eba5a4a01463fd48e45126a379")
+ clip_package = os.environ.get('CLIP_PACKAGE', "git+https://github.com/openai/CLIP.git@d50d76daa670286dd6cacf3bcd80b5e4823fc8e1")
+ stable_diffusion_commit_hash = os.environ.get('STABLE_DIFFUSION_COMMIT_HASH', "69ae4b35e0a0f6ee1af8bb9a5d0016ccb27e36dc")
+ taming_transformers_commit_hash = os.environ.get('TAMING_TRANSFORMERS_COMMIT_HASH', "24268930bf1dce879235a7fddd0b2355b84d7ea6")
+ k_diffusion_commit_hash = os.environ.get('K_DIFFUSION_COMMIT_HASH', "f4e99857772fc3a126ba886aadf795a332774878")
+ codeformer_commit_hash = os.environ.get('CODEFORMER_COMMIT_HASH', "c5b4593074ba6214284d6acd5f1719b6c5d739af")
+ blip_commit_hash = os.environ.get('BLIP_COMMIT_HASH', "48211a1594f1321b00f14c9f7a5b4813144b2fb9")
-if not is_installed("torch") or not is_installed("torchvision"):
- run(f'"{python}" -m {torch_command}', "Installing torch and torchvision", "Couldn't install torch")
+ args = shlex.split(commandline_args)
+
+ args, skip_torch_cuda_test = extract_arg(args, '--skip-torch-cuda-test')
+ xformers = '--xformers' in args
+ deepdanbooru = '--deepdanbooru' in args
+
+ try:
+ commit = run(f"{git} rev-parse HEAD").strip()
+ except Exception:
+ commit = "<none>"
-if not skip_torch_cuda_test:
- run_python("import torch; assert torch.cuda.is_available(), 'Torch is not able to use GPU; add --skip-torch-cuda-test to COMMANDLINE_ARGS variable to disable this check'")
+ print(f"Python {sys.version}")
+ print(f"Commit hash: {commit}")
-if not is_installed("gfpgan"):
- run_pip(f"install {gfpgan_package}", "gfpgan")
+ if not is_installed("torch") or not is_installed("torchvision"):
+ run(f'"{python}" -m {torch_command}', "Installing torch and torchvision", "Couldn't install torch")
-if not is_installed("clip"):
- run_pip(f"install {clip_package}", "clip")
+ if not skip_torch_cuda_test:
+ run_python("import torch; assert torch.cuda.is_available(), 'Torch is not able to use GPU; add --skip-torch-cuda-test to COMMANDLINE_ARGS variable to disable this check'")
-if not is_installed("xformers") and xformers and platform.python_version().startswith("3.10"):
- if platform.system() == "Windows":
- run_pip("install https://github.com/C43H66N12O12S2/stable-diffusion-webui/releases/download/a/xformers-0.0.14.dev0-cp310-cp310-win_amd64.whl", "xformers")
- elif platform.system() == "Linux":
- run_pip("install xformers", "xformers")
+ if not is_installed("gfpgan"):
+ run_pip(f"install {gfpgan_package}", "gfpgan")
-os.makedirs(dir_repos, exist_ok=True)
+ if not is_installed("clip"):
+ run_pip(f"install {clip_package}", "clip")
-git_clone("https://github.com/CompVis/stable-diffusion.git", repo_dir('stable-diffusion'), "Stable Diffusion", stable_diffusion_commit_hash)
-git_clone("https://github.com/CompVis/taming-transformers.git", repo_dir('taming-transformers'), "Taming Transformers", taming_transformers_commit_hash)
-git_clone("https://github.com/crowsonkb/k-diffusion.git", repo_dir('k-diffusion'), "K-diffusion", k_diffusion_commit_hash)
-git_clone("https://github.com/sczhou/CodeFormer.git", repo_dir('CodeFormer'), "CodeFormer", codeformer_commit_hash)
-git_clone("https://github.com/salesforce/BLIP.git", repo_dir('BLIP'), "BLIP", blip_commit_hash)
+ if not is_installed("xformers") and xformers and platform.python_version().startswith("3.10"):
+ if platform.system() == "Windows":
+ run_pip("install https://github.com/C43H66N12O12S2/stable-diffusion-webui/releases/download/a/xformers-0.0.14.dev0-cp310-cp310-win_amd64.whl", "xformers")
+ elif platform.system() == "Linux":
+ run_pip("install xformers", "xformers")
-if not is_installed("lpips"):
- run_pip(f"install -r {os.path.join(repo_dir('CodeFormer'), 'requirements.txt')}", "requirements for CodeFormer")
+ if not is_installed("deepdanbooru") and deepdanbooru:
+ run_pip("install git+https://github.com/KichangKim/DeepDanbooru.git@edf73df4cdaeea2cf00e9ac08bd8a9026b7a7b26#egg=deepdanbooru[tensorflow] tensorflow==2.10.0 tensorflow-io==0.27.0", "deepdanbooru")
-run_pip(f"install -r {requirements_file}", "requirements for Web UI")
+ os.makedirs(dir_repos, exist_ok=True)
-sys.argv += args
+ git_clone("https://github.com/CompVis/stable-diffusion.git", repo_dir('stable-diffusion'), "Stable Diffusion", stable_diffusion_commit_hash)
+ git_clone("https://github.com/CompVis/taming-transformers.git", repo_dir('taming-transformers'), "Taming Transformers", taming_transformers_commit_hash)
+ git_clone("https://github.com/crowsonkb/k-diffusion.git", repo_dir('k-diffusion'), "K-diffusion", k_diffusion_commit_hash)
+ git_clone("https://github.com/sczhou/CodeFormer.git", repo_dir('CodeFormer'), "CodeFormer", codeformer_commit_hash)
+ git_clone("https://github.com/salesforce/BLIP.git", repo_dir('BLIP'), "BLIP", blip_commit_hash)
+
+ if not is_installed("lpips"):
+ run_pip(f"install -r {os.path.join(repo_dir('CodeFormer'), 'requirements.txt')}", "requirements for CodeFormer")
+
+ run_pip(f"install -r {requirements_file}", "requirements for Web UI")
+
+ sys.argv += args
+
+ if "--exit" in args:
+ print("Exiting because of --exit argument")
+ exit(0)
-if "--exit" in args:
- print("Exiting because of --exit argument")
- exit(0)
def start_webui():
print(f"Launching Web UI with arguments: {' '.join(sys.argv[1:])}")
import webui
webui.webui()
+
if __name__ == "__main__":
+ prepare_enviroment()
start_webui()
diff --git a/models/deepbooru/Put your deepbooru release project folder here.txt b/models/deepbooru/Put your deepbooru release project folder here.txt
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/models/deepbooru/Put your deepbooru release project folder here.txt
diff --git a/modules/bsrgan_model.py b/modules/bsrgan_model.py
index 3bd80791..737e1a76 100644
--- a/modules/bsrgan_model.py
+++ b/modules/bsrgan_model.py
@@ -10,13 +10,11 @@ from basicsr.utils.download_util import load_file_from_url
import modules.upscaler
from modules import devices, modelloader
from modules.bsrgan_model_arch import RRDBNet
-from modules.paths import models_path
class UpscalerBSRGAN(modules.upscaler.Upscaler):
def __init__(self, dirname):
self.name = "BSRGAN"
- self.model_path = os.path.join(models_path, self.name)
self.model_name = "BSRGAN 4x"
self.model_url = "https://github.com/cszn/KAIR/releases/download/v1.0/BSRGAN.pth"
self.user_path = dirname
diff --git a/modules/deepbooru.py b/modules/deepbooru.py
new file mode 100644
index 00000000..7e3c0618
--- /dev/null
+++ b/modules/deepbooru.py
@@ -0,0 +1,73 @@
+import os.path
+from concurrent.futures import ProcessPoolExecutor
+from multiprocessing import get_context
+
+
+def _load_tf_and_return_tags(pil_image, threshold):
+ import deepdanbooru as dd
+ import tensorflow as tf
+ import numpy as np
+
+ this_folder = os.path.dirname(__file__)
+ model_path = os.path.abspath(os.path.join(this_folder, '..', 'models', 'deepbooru'))
+ if not os.path.exists(os.path.join(model_path, 'project.json')):
+ # there is no point importing these every time
+ import zipfile
+ from basicsr.utils.download_util import load_file_from_url
+ load_file_from_url(r"https://github.com/KichangKim/DeepDanbooru/releases/download/v3-20211112-sgd-e28/deepdanbooru-v3-20211112-sgd-e28.zip",
+ model_path)
+ with zipfile.ZipFile(os.path.join(model_path, "deepdanbooru-v3-20211112-sgd-e28.zip"), "r") as zip_ref:
+ zip_ref.extractall(model_path)
+ os.remove(os.path.join(model_path, "deepdanbooru-v3-20211112-sgd-e28.zip"))
+
+ tags = dd.project.load_tags_from_project(model_path)
+ model = dd.project.load_model_from_project(
+ model_path, compile_model=True
+ )
+
+ width = model.input_shape[2]
+ height = model.input_shape[1]
+ image = np.array(pil_image)
+ image = tf.image.resize(
+ image,
+ size=(height, width),
+ method=tf.image.ResizeMethod.AREA,
+ preserve_aspect_ratio=True,
+ )
+ image = image.numpy() # EagerTensor to np.array
+ image = dd.image.transform_and_pad_image(image, width, height)
+ image = image / 255.0
+ image_shape = image.shape
+ image = image.reshape((1, image_shape[0], image_shape[1], image_shape[2]))
+
+ y = model.predict(image)[0]
+
+ result_dict = {}
+
+ for i, tag in enumerate(tags):
+ result_dict[tag] = y[i]
+ result_tags_out = []
+ result_tags_print = []
+ for tag in tags:
+ if result_dict[tag] >= threshold:
+ if tag.startswith("rating:"):
+ continue
+ result_tags_out.append(tag)
+ result_tags_print.append(f'{result_dict[tag]} {tag}')
+
+ print('\n'.join(sorted(result_tags_print, reverse=True)))
+
+ return ', '.join(result_tags_out).replace('_', ' ').replace(':', ' ')
+
+
+def subprocess_init_no_cuda():
+ import os
+ os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
+
+
+def get_deepbooru_tags(pil_image, threshold=0.5):
+ context = get_context('spawn')
+ with ProcessPoolExecutor(initializer=subprocess_init_no_cuda, mp_context=context) as executor:
+ f = executor.submit(_load_tf_and_return_tags, pil_image, threshold, )
+ ret = f.result() # will rethrow any exceptions
+ return ret \ No newline at end of file
diff --git a/modules/esrgan_model.py b/modules/esrgan_model.py
index 28548124..46ad0da3 100644
--- a/modules/esrgan_model.py
+++ b/modules/esrgan_model.py
@@ -5,9 +5,8 @@ import torch
from PIL import Image
from basicsr.utils.download_util import load_file_from_url
-import modules.esrgam_model_arch as arch
+import modules.esrgan_model_arch as arch
from modules import shared, modelloader, images, devices
-from modules.paths import models_path
from modules.upscaler import Upscaler, UpscalerData
from modules.shared import opts
@@ -76,7 +75,6 @@ class UpscalerESRGAN(Upscaler):
self.model_name = "ESRGAN_4x"
self.scalers = []
self.user_path = dirname
- self.model_path = os.path.join(models_path, self.name)
super().__init__()
model_paths = self.find_models(ext_filter=[".pt", ".pth"])
scalers = []
diff --git a/modules/esrgam_model_arch.py b/modules/esrgan_model_arch.py
index e413d36e..e413d36e 100644
--- a/modules/esrgam_model_arch.py
+++ b/modules/esrgan_model_arch.py
diff --git a/modules/extras.py b/modules/extras.py
index 1d9e64e5..41e8612c 100644
--- a/modules/extras.py
+++ b/modules/extras.py
@@ -29,7 +29,7 @@ def run_extras(extras_mode, image, image_folder, gfpgan_visibility, codeformer_v
if extras_mode == 1:
#convert file to pillow image
for img in image_folder:
- image = Image.fromarray(np.array(Image.open(img)))
+ image = Image.open(img)
imageArr.append(image)
imageNameArr.append(os.path.splitext(img.orig_name)[0])
else:
@@ -98,6 +98,10 @@ def run_extras(extras_mode, image, image_folder, gfpgan_visibility, codeformer_v
no_prompt=True, grid=False, pnginfo_section_name="extras", existing_info=existing_pnginfo,
forced_filename=image_name if opts.use_original_name_batch else None)
+ if opts.enable_pnginfo:
+ image.info = existing_pnginfo
+ image.info["extras"] = info
+
outputs.append(image)
devices.torch_gc()
@@ -169,9 +173,9 @@ def run_modelmerger(primary_model_name, secondary_model_name, interp_method, int
print(f"Loading {secondary_model_info.filename}...")
secondary_model = torch.load(secondary_model_info.filename, map_location='cpu')
-
- theta_0 = primary_model['state_dict']
- theta_1 = secondary_model['state_dict']
+
+ theta_0 = sd_models.get_state_dict_from_checkpoint(primary_model)
+ theta_1 = sd_models.get_state_dict_from_checkpoint(secondary_model)
theta_funcs = {
"Weighted Sum": weighted_sum,
diff --git a/modules/hypernetwork.py b/modules/hypernetwork.py
index 7f062242..498bc9d8 100644
--- a/modules/hypernetwork.py
+++ b/modules/hypernetwork.py
@@ -40,18 +40,28 @@ class Hypernetwork:
self.layers[size] = (HypernetworkModule(size, sd[0]), HypernetworkModule(size, sd[1]))
-def load_hypernetworks(path):
+def list_hypernetworks(path):
res = {}
-
for filename in glob.iglob(os.path.join(path, '**/*.pt'), recursive=True):
+ name = os.path.splitext(os.path.basename(filename))[0]
+ res[name] = filename
+ return res
+
+
+def load_hypernetwork(filename):
+ path = shared.hypernetworks.get(filename, None)
+ if path is not None:
+ print(f"Loading hypernetwork {filename}")
try:
- hn = Hypernetwork(filename)
- res[hn.name] = hn
+ shared.loaded_hypernetwork = Hypernetwork(path)
except Exception:
- print(f"Error loading hypernetwork {filename}", file=sys.stderr)
+ print(f"Error loading hypernetwork {path}", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
+ else:
+ if shared.loaded_hypernetwork is not None:
+ print(f"Unloading hypernetwork")
- return res
+ shared.loaded_hypernetwork = None
def attention_CrossAttention_forward(self, x, context=None, mask=None):
@@ -60,7 +70,7 @@ def attention_CrossAttention_forward(self, x, context=None, mask=None):
q = self.to_q(x)
context = default(context, x)
- hypernetwork = shared.selected_hypernetwork()
+ hypernetwork = shared.loaded_hypernetwork
hypernetwork_layers = (hypernetwork.layers if hypernetwork is not None else {}).get(context.shape[2], None)
if hypernetwork_layers is not None:
diff --git a/modules/images.py b/modules/images.py
index 4a4fc977..e62eec8e 100644
--- a/modules/images.py
+++ b/modules/images.py
@@ -349,6 +349,38 @@ def get_next_sequence_number(path, basename):
def save_image(image, path, basename, seed=None, prompt=None, extension='png', info=None, short_filename=False, no_prompt=False, grid=False, pnginfo_section_name='parameters', p=None, existing_info=None, forced_filename=None, suffix="", save_to_dirs=None):
+ '''Save an image.
+
+ Args:
+ image (`PIL.Image`):
+ The image to be saved.
+ path (`str`):
+ The directory to save the image. Note, the option `save_to_dirs` will make the image to be saved into a sub directory.
+ basename (`str`):
+ The base filename which will be applied to `filename pattern`.
+ seed, prompt, short_filename,
+ extension (`str`):
+ Image file extension, default is `png`.
+ pngsectionname (`str`):
+ Specify the name of the section which `info` will be saved in.
+ info (`str` or `PngImagePlugin.iTXt`):
+ PNG info chunks.
+ existing_info (`dict`):
+ Additional PNG info. `existing_info == {pngsectionname: info, ...}`
+ no_prompt:
+ TODO I don't know its meaning.
+ p (`StableDiffusionProcessing`)
+ forced_filename (`str`):
+ If specified, `basename` and filename pattern will be ignored.
+ save_to_dirs (bool):
+ If true, the image will be saved into a subdirectory of `path`.
+
+ Returns: (fullfn, txt_fullfn)
+ fullfn (`str`):
+ The full path of the saved imaged.
+ txt_fullfn (`str` or None):
+ If a text file is saved for this image, this will be its full path. Otherwise None.
+ '''
if short_filename or prompt is None or seed is None:
file_decoration = ""
elif opts.save_to_dirs:
@@ -424,10 +456,13 @@ def save_image(image, path, basename, seed=None, prompt=None, extension='png', i
piexif.insert(exif_bytes(), fullfn_without_extension + ".jpg")
if opts.save_txt and info is not None:
- with open(f"{fullfn_without_extension}.txt", "w", encoding="utf8") as file:
+ txt_fullfn = f"{fullfn_without_extension}.txt"
+ with open(txt_fullfn, "w", encoding="utf8") as file:
file.write(info + "\n")
+ else:
+ txt_fullfn = None
- return fullfn
+ return fullfn, txt_fullfn
def addCaptionLines(lines,image,initialx,textfont):
draw = ImageDraw.Draw(image)
diff --git a/modules/ldsr_model.py b/modules/ldsr_model.py
index 1c1070fc..8c4db44a 100644
--- a/modules/ldsr_model.py
+++ b/modules/ldsr_model.py
@@ -7,13 +7,11 @@ from basicsr.utils.download_util import load_file_from_url
from modules.upscaler import Upscaler, UpscalerData
from modules.ldsr_model_arch import LDSR
from modules import shared
-from modules.paths import models_path
class UpscalerLDSR(Upscaler):
def __init__(self, user_path):
self.name = "LDSR"
- self.model_path = os.path.join(models_path, self.name)
self.user_path = user_path
self.model_url = "https://heibox.uni-heidelberg.de/f/578df07c8fc04ffbadf3/?dl=1"
self.yaml_url = "https://heibox.uni-heidelberg.de/f/31a76b13ea27482981b4/?dl=1"
diff --git a/modules/paths.py b/modules/paths.py
index 0519caa0..1e7a2fbc 100644
--- a/modules/paths.py
+++ b/modules/paths.py
@@ -1,6 +1,7 @@
import argparse
import os
import sys
+import modules.safe
script_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
models_path = os.path.join(script_path, "models")
diff --git a/modules/processing.py b/modules/processing.py
index 4fea6d56..94d2dd62 100644
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -46,6 +46,12 @@ def apply_color_correction(correction, image):
return image
+def get_correct_sampler(p):
+ if isinstance(p, modules.processing.StableDiffusionProcessingTxt2Img):
+ return sd_samplers.samplers
+ elif isinstance(p, modules.processing.StableDiffusionProcessingImg2Img):
+ return sd_samplers.samplers_for_img2img
+
class StableDiffusionProcessing:
def __init__(self, sd_model=None, outpath_samples=None, outpath_grids=None, prompt="", styles=None, seed=-1, subseed=-1, subseed_strength=0, seed_resize_from_h=-1, seed_resize_from_w=-1, seed_enable_extras=True, sampler_index=0, batch_size=1, n_iter=1, steps=50, cfg_scale=7.0, width=512, height=512, restore_faces=False, tiling=False, do_not_save_samples=False, do_not_save_grid=False, extra_generation_params=None, overlay_images=None, negative_prompt=None, eta=None):
self.sd_model = sd_model
@@ -123,7 +129,7 @@ class Processed:
self.index_of_first_image = index_of_first_image
self.styles = p.styles
self.job_timestamp = state.job_timestamp
- self.clip_skip = opts.CLIP_ignore_last_layers
+ self.clip_skip = opts.CLIP_stop_at_last_layers
self.eta = p.eta
self.ddim_discretize = p.ddim_discretize
@@ -268,16 +274,18 @@ def fix_seed(p):
def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments, iteration=0, position_in_batch=0):
index = position_in_batch + iteration * p.batch_size
- clip_skip = getattr(p, 'clip_skip', opts.CLIP_ignore_last_layers)
+ clip_skip = getattr(p, 'clip_skip', opts.CLIP_stop_at_last_layers)
generation_params = {
"Steps": p.steps,
- "Sampler": sd_samplers.samplers[p.sampler_index].name,
+ "Sampler": get_correct_sampler(p)[p.sampler_index].name,
"CFG scale": p.cfg_scale,
"Seed": all_seeds[index],
"Face restoration": (opts.face_restoration_model if p.restore_faces else None),
"Size": f"{p.width}x{p.height}",
"Model hash": getattr(p, 'sd_model_hash', None if not opts.add_model_hash_to_info or not shared.sd_model.sd_model_hash else shared.sd_model.sd_model_hash),
+ "Model": (None if not opts.add_model_name_to_info or not shared.sd_model.sd_checkpoint_info.model_name else shared.sd_model.sd_checkpoint_info.model_name.replace(',', '').replace(':', '')),
+ "Hypernet": (None if shared.loaded_hypernetwork is None else shared.loaded_hypernetwork.name.replace(',', '').replace(':', '')),
"Batch size": (None if p.batch_size < 2 else p.batch_size),
"Batch pos": (None if p.batch_size < 2 else position_in_batch),
"Variation seed": (None if p.subseed_strength == 0 else all_subseeds[index]),
@@ -285,7 +293,7 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments, iteration
"Seed resize from": (None if p.seed_resize_from_w == 0 or p.seed_resize_from_h == 0 else f"{p.seed_resize_from_w}x{p.seed_resize_from_h}"),
"Denoising strength": getattr(p, 'denoising_strength', None),
"Eta": (None if p.sampler is None or p.sampler.eta == p.sampler.default_eta else p.sampler.eta),
- "Clip skip": None if clip_skip==0 else clip_skip,
+ "Clip skip": None if clip_skip <= 1 else clip_skip,
}
generation_params.update(p.extra_generation_params)
@@ -445,7 +453,8 @@ def process_images(p: StableDiffusionProcessing) -> Processed:
text = infotext(n, i)
infotexts.append(text)
- image.info["parameters"] = text
+ if opts.enable_pnginfo:
+ image.info["parameters"] = text
output_images.append(image)
del x_samples_ddim
@@ -464,7 +473,8 @@ def process_images(p: StableDiffusionProcessing) -> Processed:
if opts.return_grid:
text = infotext()
infotexts.insert(0, text)
- grid.info["parameters"] = text
+ if opts.enable_pnginfo:
+ grid.info["parameters"] = text
output_images.insert(0, grid)
index_of_first_image = 1
diff --git a/modules/realesrgan_model.py b/modules/realesrgan_model.py
index dc0123e0..3ac0b97a 100644
--- a/modules/realesrgan_model.py
+++ b/modules/realesrgan_model.py
@@ -8,14 +8,12 @@ from basicsr.utils.download_util import load_file_from_url
from realesrgan import RealESRGANer
from modules.upscaler import Upscaler, UpscalerData
-from modules.paths import models_path
from modules.shared import cmd_opts, opts
class UpscalerRealESRGAN(Upscaler):
def __init__(self, path):
self.name = "RealESRGAN"
- self.model_path = os.path.join(models_path, self.name)
self.user_path = path
super().__init__()
try:
diff --git a/modules/safe.py b/modules/safe.py
new file mode 100644
index 00000000..4d06f2a5
--- /dev/null
+++ b/modules/safe.py
@@ -0,0 +1,89 @@
+# this code is adapted from the script contributed by anon from /h/
+
+import io
+import pickle
+import collections
+import sys
+import traceback
+
+import torch
+import numpy
+import _codecs
+import zipfile
+
+
+def encode(*args):
+ out = _codecs.encode(*args)
+ return out
+
+
+class RestrictedUnpickler(pickle.Unpickler):
+ def persistent_load(self, saved_id):
+ assert saved_id[0] == 'storage'
+ return torch.storage._TypedStorage()
+
+ def find_class(self, module, name):
+ if module == 'collections' and name == 'OrderedDict':
+ return getattr(collections, name)
+ if module == 'torch._utils' and name in ['_rebuild_tensor_v2', '_rebuild_parameter']:
+ return getattr(torch._utils, name)
+ if module == 'torch' and name in ['FloatStorage', 'HalfStorage', 'IntStorage', 'LongStorage', 'DoubleStorage']:
+ return getattr(torch, name)
+ if module == 'torch.nn.modules.container' and name in ['ParameterDict']:
+ return getattr(torch.nn.modules.container, name)
+ if module == 'numpy.core.multiarray' and name == 'scalar':
+ return numpy.core.multiarray.scalar
+ if module == 'numpy' and name == 'dtype':
+ return numpy.dtype
+ if module == '_codecs' and name == 'encode':
+ return encode
+ if module == "pytorch_lightning.callbacks" and name == 'model_checkpoint':
+ import pytorch_lightning.callbacks
+ return pytorch_lightning.callbacks.model_checkpoint
+ if module == "pytorch_lightning.callbacks.model_checkpoint" and name == 'ModelCheckpoint':
+ import pytorch_lightning.callbacks.model_checkpoint
+ return pytorch_lightning.callbacks.model_checkpoint.ModelCheckpoint
+ if module == "__builtin__" and name == 'set':
+ return set
+
+ # Forbid everything else.
+ raise pickle.UnpicklingError(f"global '{module}/{name}' is forbidden")
+
+
+def check_pt(filename):
+ try:
+
+ # new pytorch format is a zip file
+ with zipfile.ZipFile(filename) as z:
+ with z.open('archive/data.pkl') as file:
+ unpickler = RestrictedUnpickler(file)
+ unpickler.load()
+
+ except zipfile.BadZipfile:
+
+ # if it's not a zip file, it's an olf pytorch format, with five objects written to pickle
+ with open(filename, "rb") as file:
+ unpickler = RestrictedUnpickler(file)
+ for i in range(5):
+ unpickler.load()
+
+
+def load(filename, *args, **kwargs):
+ from modules import shared
+
+ try:
+ if not shared.cmd_opts.disable_safe_unpickle:
+ check_pt(filename)
+
+ except Exception:
+ print(f"Error verifying pickled file from {filename}:", file=sys.stderr)
+ print(traceback.format_exc(), file=sys.stderr)
+ print(f"\nThe file may be malicious, so the program is not going to read it.", file=sys.stderr)
+ print(f"You can skip this check with --disable-safe-unpickle commandline argument.", file=sys.stderr)
+ return None
+
+ return unsafe_torch_load(filename, *args, **kwargs)
+
+
+unsafe_torch_load = torch.load
+torch.load = load
diff --git a/modules/scunet_model.py b/modules/scunet_model.py
index fb64b740..36a996bf 100644
--- a/modules/scunet_model.py
+++ b/modules/scunet_model.py
@@ -9,14 +9,12 @@ from basicsr.utils.download_util import load_file_from_url
import modules.upscaler
from modules import devices, modelloader
-from modules.paths import models_path
from modules.scunet_model_arch import SCUNet as net
class UpscalerScuNET(modules.upscaler.Upscaler):
def __init__(self, dirname):
self.name = "ScuNET"
- self.model_path = os.path.join(models_path, self.name)
self.model_name = "ScuNET GAN"
self.model_name2 = "ScuNET PSNR"
self.model_url = "https://github.com/cszn/KAIR/releases/download/v1.0/scunet_color_real_gan.pth"
diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py
index f12a9696..437acce4 100644
--- a/modules/sd_hijack.py
+++ b/modules/sd_hijack.py
@@ -282,14 +282,12 @@ class FrozenCLIPEmbedderWithCustomWords(torch.nn.Module):
remade_batch_tokens_of_same_length = [x + [self.wrapped.tokenizer.eos_token_id] * (target_token_count - len(x)) for x in remade_batch_tokens]
tokens = torch.asarray(remade_batch_tokens_of_same_length).to(device)
- tmp = -opts.CLIP_ignore_last_layers
- if (opts.CLIP_ignore_last_layers == 0):
- outputs = self.wrapped.transformer(input_ids=tokens, position_ids=position_ids)
- z = outputs.last_hidden_state
- else:
- outputs = self.wrapped.transformer(input_ids=tokens, position_ids=position_ids, output_hidden_states=tmp)
- z = outputs.hidden_states[tmp]
+ outputs = self.wrapped.transformer(input_ids=tokens, position_ids=position_ids, output_hidden_states=-opts.CLIP_stop_at_last_layers)
+ if opts.CLIP_stop_at_last_layers > 1:
+ z = outputs.hidden_states[-opts.CLIP_stop_at_last_layers]
z = self.wrapped.transformer.text_model.final_layer_norm(z)
+ else:
+ z = outputs.last_hidden_state
# restoring original mean is likely not correct, but it seems to work well to prevent artifacts that happen otherwise
batch_multipliers_of_same_length = [x + [1.0] * (target_token_count - len(x)) for x in batch_multipliers]
diff --git a/modules/sd_hijack_optimizations.py b/modules/sd_hijack_optimizations.py
index c4396bb9..634fb4b2 100644
--- a/modules/sd_hijack_optimizations.py
+++ b/modules/sd_hijack_optimizations.py
@@ -28,7 +28,7 @@ def split_cross_attention_forward_v1(self, x, context=None, mask=None):
q_in = self.to_q(x)
context = default(context, x)
- hypernetwork = shared.selected_hypernetwork()
+ hypernetwork = shared.loaded_hypernetwork
hypernetwork_layers = (hypernetwork.layers if hypernetwork is not None else {}).get(context.shape[2], None)
if hypernetwork_layers is not None:
@@ -68,7 +68,7 @@ def split_cross_attention_forward(self, x, context=None, mask=None):
q_in = self.to_q(x)
context = default(context, x)
- hypernetwork = shared.selected_hypernetwork()
+ hypernetwork = shared.loaded_hypernetwork
hypernetwork_layers = (hypernetwork.layers if hypernetwork is not None else {}).get(context.shape[2], None)
if hypernetwork_layers is not None:
@@ -132,7 +132,7 @@ def xformers_attention_forward(self, x, context=None, mask=None):
h = self.heads
q_in = self.to_q(x)
context = default(context, x)
- hypernetwork = shared.selected_hypernetwork()
+ hypernetwork = shared.loaded_hypernetwork
hypernetwork_layers = (hypernetwork.layers if hypernetwork is not None else {}).get(context.shape[2], None)
if hypernetwork_layers is not None:
k_in = self.to_k(hypernetwork_layers[0](context))
diff --git a/modules/sd_models.py b/modules/sd_models.py
index cb3982b1..e63d3c29 100644
--- a/modules/sd_models.py
+++ b/modules/sd_models.py
@@ -5,7 +5,6 @@ from collections import namedtuple
import torch
from omegaconf import OmegaConf
-
from ldm.util import instantiate_from_config
from modules import shared, modelloader, devices
@@ -122,6 +121,13 @@ def select_checkpoint():
return checkpoint_info
+def get_state_dict_from_checkpoint(pl_sd):
+ if "state_dict" in pl_sd:
+ return pl_sd["state_dict"]
+
+ return pl_sd
+
+
def load_model_weights(model, checkpoint_info):
checkpoint_file = checkpoint_info.filename
sd_model_hash = checkpoint_info.hash
@@ -131,11 +137,8 @@ def load_model_weights(model, checkpoint_info):
pl_sd = torch.load(checkpoint_file, map_location="cpu")
if "global_step" in pl_sd:
print(f"Global Step: {pl_sd['global_step']}")
-
- if "state_dict" in pl_sd:
- sd = pl_sd["state_dict"]
- else:
- sd = pl_sd
+
+ sd = get_state_dict_from_checkpoint(pl_sd)
model.load_state_dict(sd, strict=False)
@@ -165,7 +168,7 @@ def load_model():
checkpoint_info = select_checkpoint()
if checkpoint_info.config != shared.cmd_opts.config:
- print(f"Loading config from: {shared.cmd_opts.config}")
+ print(f"Loading config from: {checkpoint_info.config}")
sd_config = OmegaConf.load(checkpoint_info.config)
sd_model = instantiate_from_config(sd_config.model)
@@ -192,7 +195,8 @@ def reload_model_weights(sd_model, info=None):
return
if sd_model.sd_checkpoint_info.config != checkpoint_info.config:
- return load_model()
+ shared.sd_model = load_model()
+ return shared.sd_model
if shared.cmd_opts.lowvram or shared.cmd_opts.medvram:
lowvram.send_everything_to_cpu()
diff --git a/modules/shared.py b/modules/shared.py
index 2dc092d6..1995a99a 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -45,6 +45,7 @@ parser.add_argument("--swinir-models-path", type=str, help="Path to directory wi
parser.add_argument("--ldsr-models-path", type=str, help="Path to directory with LDSR model file(s).", default=os.path.join(models_path, 'LDSR'))
parser.add_argument("--xformers", action='store_true', help="enable xformers for cross attention layers")
parser.add_argument("--force-enable-xformers", action='store_true', help="enable xformers for cross attention layers regardless of whether the checking code thinks you can run it; do not make bug reports if this fails to work")
+parser.add_argument("--deepdanbooru", action='store_true', help="enable deepdanbooru interrogator")
parser.add_argument("--opt-split-attention", action='store_true', help="force-enables cross-attention layer optimization. By default, it's on for torch.cuda and off for other torch devices.")
parser.add_argument("--disable-opt-split-attention", action='store_true', help="force-disables cross-attention layer optimization")
parser.add_argument("--opt-split-attention-v1", action='store_true', help="enable older version of split attention optimization that does not consume all the VRAM it can find")
@@ -64,6 +65,7 @@ parser.add_argument("--autolaunch", action='store_true', help="open the webui UR
parser.add_argument("--use-textbox-seed", action='store_true', help="use textbox for seeds in UI (no up/down, but possible to input long seeds)", default=False)
parser.add_argument("--disable-console-progressbars", action='store_true', help="do not output progressbars to console", default=False)
parser.add_argument("--enable-console-prompts", action='store_true', help="print prompts to console when generating with txt2img and img2img", default=False)
+parser.add_argument("--disable-safe-unpickle", action='store_true', help="disable checking pytorch models for malicious code", default=False)
cmd_opts = parser.parse_args()
@@ -78,11 +80,8 @@ parallel_processing_allowed = not cmd_opts.lowvram and not cmd_opts.medvram
xformers_available = False
config_filename = cmd_opts.ui_settings_file
-hypernetworks = hypernetwork.load_hypernetworks(os.path.join(models_path, 'hypernetworks'))
-
-
-def selected_hypernetwork():
- return hypernetworks.get(opts.sd_hypernetwork, None)
+hypernetworks = hypernetwork.list_hypernetworks(os.path.join(models_path, 'hypernetworks'))
+loaded_hypernetwork = None
class State:
@@ -132,13 +131,14 @@ def realesrgan_models_names():
class OptionInfo:
- def __init__(self, default=None, label="", component=None, component_args=None, onchange=None):
+ def __init__(self, default=None, label="", component=None, component_args=None, onchange=None, show_on_main_page=False):
self.default = default
self.label = label
self.component = component
self.component_args = component_args
self.onchange = onchange
self.section = None
+ self.show_on_main_page = show_on_main_page
def options_section(section_identifier, options_dict):
@@ -215,7 +215,7 @@ options_templates.update(options_section(('system', "System"), {
}))
options_templates.update(options_section(('sd', "Stable Diffusion"), {
- "sd_model_checkpoint": OptionInfo(None, "Stable Diffusion checkpoint", gr.Dropdown, lambda: {"choices": modules.sd_models.checkpoint_tiles()}),
+ "sd_model_checkpoint": OptionInfo(None, "Stable Diffusion checkpoint", gr.Dropdown, lambda: {"choices": modules.sd_models.checkpoint_tiles()}, show_on_main_page=True),
"sd_hypernetwork": OptionInfo("None", "Stable Diffusion finetune hypernetwork", gr.Dropdown, lambda: {"choices": ["None"] + [x for x in hypernetworks.keys()]}),
"img2img_color_correction": OptionInfo(False, "Apply color correction to img2img results to match original colors."),
"save_images_before_color_correction": OptionInfo(False, "Save a copy of image before applying color correction to img2img results"),
@@ -225,7 +225,7 @@ options_templates.update(options_section(('sd', "Stable Diffusion"), {
"use_old_emphasis_implementation": OptionInfo(False, "Use old emphasis implementation. Can be useful to reproduce old seeds."),
"enable_batch_seeds": OptionInfo(True, "Make K-diffusion samplers produce same images in a batch as when making a single image"),
"filter_nsfw": OptionInfo(False, "Filter NSFW content"),
- 'CLIP_ignore_last_layers': OptionInfo(0, "Ignore last layers of CLIP model", gr.Slider, {"minimum": 0, "maximum": 5, "step": 1}),
+ 'CLIP_stop_at_last_layers': OptionInfo(1, "Stop At last layers of CLIP model", gr.Slider, {"minimum": 1, "maximum": 12, "step": 1}),
"random_artist_categories": OptionInfo([], "Allowed categories for random artists selection when using the Roll button", gr.CheckboxGroup, {"choices": artist_db.categories()}),
}))
@@ -240,10 +240,11 @@ options_templates.update(options_section(('interrogate', "Interrogate Options"),
options_templates.update(options_section(('ui', "User interface"), {
"show_progressbar": OptionInfo(True, "Show progressbar"),
- "show_progress_every_n_steps": OptionInfo(0, "Show show image creation progress every N sampling steps. Set 0 to disable.", gr.Slider, {"minimum": 0, "maximum": 32, "step": 1}),
+ "show_progress_every_n_steps": OptionInfo(0, "Show image creation progress every N sampling steps. Set 0 to disable.", gr.Slider, {"minimum": 0, "maximum": 32, "step": 1}),
"return_grid": OptionInfo(True, "Show grid in results for web"),
"do_not_show_images": OptionInfo(False, "Do not show any images in results for web"),
"add_model_hash_to_info": OptionInfo(True, "Add model hash to generation information"),
+ "add_model_name_to_info": OptionInfo(False, "Add model name to generation information"),
"font": OptionInfo("", "Font for image grids that have text"),
"js_modal_lightbox": OptionInfo(True, "Enable full page image viewer"),
"js_modal_lightbox_initially_zoomed": OptionInfo(True, "Show images zoomed in by default in full page image viewer"),
diff --git a/modules/swinir_model.py b/modules/swinir_model.py
index 9bd454c6..fbd11f84 100644
--- a/modules/swinir_model.py
+++ b/modules/swinir_model.py
@@ -8,7 +8,6 @@ from basicsr.utils.download_util import load_file_from_url
from tqdm import tqdm
from modules import modelloader
-from modules.paths import models_path
from modules.shared import cmd_opts, opts, device
from modules.swinir_model_arch import SwinIR as net
from modules.upscaler import Upscaler, UpscalerData
@@ -25,7 +24,6 @@ class UpscalerSwinIR(Upscaler):
"/003_realSR_BSRGAN_DFOWMFC_s64w8_SwinIR" \
"-L_x4_GAN.pth "
self.model_name = "SwinIR 4x"
- self.model_path = os.path.join(models_path, self.name)
self.user_path = dirname
super().__init__()
scalers = []
diff --git a/modules/ui.py b/modules/ui.py
index a5983204..202c4866 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -25,6 +25,8 @@ import gradio.routes
from modules import sd_hijack
from modules.paths import script_path
from modules.shared import opts, cmd_opts
+if cmd_opts.deepdanbooru:
+ from modules.deepbooru import get_deepbooru_tags
import modules.shared as shared
from modules.sd_samplers import samplers, samplers_for_img2img
from modules.sd_hijack import model_hijack
@@ -98,9 +100,10 @@ def send_gradio_gallery_to_image(x):
return image_from_url_text(x[0])
-def save_files(js_data, images, index):
+def save_files(js_data, images, do_make_zip, index):
import csv
filenames = []
+ fullfns = []
#quick dictionary to class object conversion. Its necessary due apply_filename_pattern requiring it
class MyObject:
@@ -137,14 +140,29 @@ def save_files(js_data, images, index):
is_grid = image_index < p.index_of_first_image
i = 0 if is_grid else (image_index - p.index_of_first_image)
- fullfn = save_image(image, path, "", seed=p.all_seeds[i], prompt=p.all_prompts[i], extension=extension, info=p.infotexts[image_index], grid=is_grid, p=p, save_to_dirs=save_to_dirs)
+ fullfn, txt_fullfn = save_image(image, path, "", seed=p.all_seeds[i], prompt=p.all_prompts[i], extension=extension, info=p.infotexts[image_index], grid=is_grid, p=p, save_to_dirs=save_to_dirs)
filename = os.path.relpath(fullfn, path)
filenames.append(filename)
+ fullfns.append(fullfn)
+ if txt_fullfn:
+ filenames.append(os.path.basename(txt_fullfn))
+ fullfns.append(txt_fullfn)
writer.writerow([data["prompt"], data["seed"], data["width"], data["height"], data["sampler"], data["cfg_scale"], data["steps"], filenames[0], data["negative_prompt"]])
- return '', '', plaintext_to_html(f"Saved: {filenames[0]}")
+ # Make Zip
+ if do_make_zip:
+ zip_filepath = os.path.join(path, "images.zip")
+
+ from zipfile import ZipFile
+ with ZipFile(zip_filepath, "w") as zip_file:
+ for i in range(len(fullfns)):
+ with open(fullfns[i], mode="rb") as f:
+ zip_file.writestr(filenames[i], f.read())
+ fullfns.insert(0, zip_filepath)
+
+ return gr.File.update(value=fullfns, visible=True), '', '', plaintext_to_html(f"Saved: {filenames[0]}")
def wrap_gradio_call(func, extra_outputs=None):
@@ -292,6 +310,11 @@ def interrogate(image):
return gr_show(True) if prompt is None else prompt
+def interrogate_deepbooru(image):
+ prompt = get_deepbooru_tags(image)
+ return gr_show(True) if prompt is None else prompt
+
+
def create_seed_inputs():
with gr.Row():
with gr.Box():
@@ -428,15 +451,20 @@ def create_toprow(is_img2img):
outputs=[],
)
- with gr.Row():
+ with gr.Row(scale=1):
if is_img2img:
- interrogate = gr.Button('Interrogate', elem_id="interrogate")
+ interrogate = gr.Button('Interrogate\nCLIP', elem_id="interrogate")
+ if cmd_opts.deepdanbooru:
+ deepbooru = gr.Button('Interrogate\nDeepBooru', elem_id="deepbooru")
+ else:
+ deepbooru = None
else:
interrogate = None
+ deepbooru = None
prompt_style_apply = gr.Button('Apply style', elem_id="style_apply")
save_style = gr.Button('Create style', elem_id="style_create")
- return prompt, roll, prompt_style, negative_prompt, prompt_style2, submit, interrogate, prompt_style_apply, save_style, paste, token_counter, token_button
+ return prompt, roll, prompt_style, negative_prompt, prompt_style2, submit, interrogate, deepbooru, prompt_style_apply, save_style, paste, token_counter, token_button
def setup_progressbar(progressbar, preview, id_part, textinfo=None):
@@ -465,7 +493,7 @@ def create_ui(wrap_gradio_gpu_call):
import modules.txt2img
with gr.Blocks(analytics_enabled=False) as txt2img_interface:
- txt2img_prompt, roll, txt2img_prompt_style, txt2img_negative_prompt, txt2img_prompt_style2, submit, _, txt2img_prompt_style_apply, txt2img_save_style, paste, token_counter, token_button = create_toprow(is_img2img=False)
+ txt2img_prompt, roll, txt2img_prompt_style, txt2img_negative_prompt, txt2img_prompt_style2, submit, _, _, txt2img_prompt_style_apply, txt2img_save_style, paste, token_counter, token_button = create_toprow(is_img2img=False)
dummy_component = gr.Label(visible=False)
with gr.Row(elem_id='txt2img_progress_row'):
@@ -521,6 +549,12 @@ def create_ui(wrap_gradio_gpu_call):
button_id = "hidden_element" if shared.cmd_opts.hide_ui_dir_config else 'open_folder'
open_txt2img_folder = gr.Button(folder_symbol, elem_id=button_id)
+ with gr.Row():
+ do_make_zip = gr.Checkbox(label="Make Zip when Save?", value=False)
+
+ with gr.Row():
+ download_files = gr.File(None, file_count="multiple", interactive=False, show_label=False, visible=False)
+
with gr.Group():
html_info = gr.HTML()
generation_info = gr.Textbox(visible=False)
@@ -570,13 +604,15 @@ def create_ui(wrap_gradio_gpu_call):
save.click(
fn=wrap_gradio_call(save_files),
- _js="(x, y, z) => [x, y, selected_gallery_index()]",
+ _js="(x, y, z, w) => [x, y, z, selected_gallery_index()]",
inputs=[
generation_info,
txt2img_gallery,
+ do_make_zip,
html_info,
],
outputs=[
+ download_files,
html_info,
html_info,
html_info,
@@ -617,7 +653,7 @@ def create_ui(wrap_gradio_gpu_call):
token_button.click(fn=update_token_counter, inputs=[txt2img_prompt, steps], outputs=[token_counter])
with gr.Blocks(analytics_enabled=False) as img2img_interface:
- img2img_prompt, roll, img2img_prompt_style, img2img_negative_prompt, img2img_prompt_style2, submit, img2img_interrogate, img2img_prompt_style_apply, img2img_save_style, paste, token_counter, token_button = create_toprow(is_img2img=True)
+ img2img_prompt, roll, img2img_prompt_style, img2img_negative_prompt, img2img_prompt_style2, submit, img2img_interrogate, img2img_deepbooru, img2img_prompt_style_apply, img2img_save_style, paste, token_counter, token_button = create_toprow(is_img2img=True)
with gr.Row(elem_id='img2img_progress_row'):
with gr.Column(scale=1):
@@ -701,6 +737,12 @@ def create_ui(wrap_gradio_gpu_call):
button_id = "hidden_element" if shared.cmd_opts.hide_ui_dir_config else 'open_folder'
open_img2img_folder = gr.Button(folder_symbol, elem_id=button_id)
+ with gr.Row():
+ do_make_zip = gr.Checkbox(label="Make Zip when Save?", value=False)
+
+ with gr.Row():
+ download_files = gr.File(None, file_count="multiple", interactive=False, show_label=False, visible=False)
+
with gr.Group():
html_info = gr.HTML()
generation_info = gr.Textbox(visible=False)
@@ -774,15 +816,24 @@ def create_ui(wrap_gradio_gpu_call):
outputs=[img2img_prompt],
)
+ if cmd_opts.deepdanbooru:
+ img2img_deepbooru.click(
+ fn=interrogate_deepbooru,
+ inputs=[init_img],
+ outputs=[img2img_prompt],
+ )
+
save.click(
fn=wrap_gradio_call(save_files),
- _js="(x, y, z) => [x, y, selected_gallery_index()]",
+ _js="(x, y, z, w) => [x, y, z, selected_gallery_index()]",
inputs=[
generation_info,
img2img_gallery,
- html_info
+ do_make_zip,
+ html_info,
],
outputs=[
+ download_files,
html_info,
html_info,
html_info,
@@ -1104,6 +1155,15 @@ def create_ui(wrap_gradio_gpu_call):
component_dict = {}
def open_folder(f):
+ if not os.path.isdir(f):
+ print(f"""
+WARNING
+An open_folder request was made with an argument that is not a folder.
+This could be an error or a malicious attempt to run code on your computer.
+Requested path was: {f}
+""", file=sys.stderr)
+ return
+
if not shared.cmd_opts.hide_ui_dir_config:
path = os.path.normpath(f)
if platform.system() == "Windows":
@@ -1117,10 +1177,13 @@ def create_ui(wrap_gradio_gpu_call):
changed = 0
for key, value, comp in zip(opts.data_labels.keys(), args, components):
- if not opts.same_type(value, opts.data_labels[key].default):
- return f"Bad value for setting {key}: {value}; expecting {type(opts.data_labels[key].default).__name__}"
+ if comp != dummy_component and not opts.same_type(value, opts.data_labels[key].default):
+ return f"Bad value for setting {key}: {value}; expecting {type(opts.data_labels[key].default).__name__}", opts.dumpjson()
for key, value, comp in zip(opts.data_labels.keys(), args, components):
+ if comp == dummy_component:
+ continue
+
comp_args = opts.data_labels[key].component_args
if comp_args and isinstance(comp_args, dict) and comp_args.get('visible') is False:
continue
@@ -1138,6 +1201,21 @@ def create_ui(wrap_gradio_gpu_call):
return f'{changed} settings changed.', opts.dumpjson()
+ def run_settings_single(value, key):
+ if not opts.same_type(value, opts.data_labels[key].default):
+ return gr.update(visible=True), opts.dumpjson()
+
+ oldval = opts.data.get(key, None)
+ opts.data[key] = value
+
+ if oldval != value:
+ if opts.data_labels[key].onchange is not None:
+ opts.data_labels[key].onchange()
+
+ opts.save(shared.config_filename)
+
+ return gr.update(value=value), opts.dumpjson()
+
with gr.Blocks(analytics_enabled=False) as settings_interface:
settings_submit = gr.Button(value="Apply settings", variant='primary')
result = gr.HTML()
@@ -1145,6 +1223,8 @@ def create_ui(wrap_gradio_gpu_call):
settings_cols = 3
items_per_col = int(len(opts.data_labels) * 0.9 / settings_cols)
+ quicksettings_list = []
+
cols_displayed = 0
items_displayed = 0
previous_section = None
@@ -1167,10 +1247,14 @@ def create_ui(wrap_gradio_gpu_call):
gr.HTML(elem_id="settings_header_text_{}".format(item.section[0]), value='<h1 class="gr-button-lg">{}</h1>'.format(item.section[1]))
- component = create_setting_component(k)
- component_dict[k] = component
- components.append(component)
- items_displayed += 1
+ if item.show_on_main_page:
+ quicksettings_list.append((i, k, item))
+ components.append(dummy_component)
+ else:
+ component = create_setting_component(k)
+ component_dict[k] = component
+ components.append(component)
+ items_displayed += 1
request_notifications = gr.Button(value='Request browser notifications', elem_id="request_notifications")
request_notifications.click(
@@ -1184,7 +1268,6 @@ def create_ui(wrap_gradio_gpu_call):
reload_script_bodies = gr.Button(value='Reload custom script bodies (No ui updates, No restart)', variant='secondary')
restart_gradio = gr.Button(value='Restart Gradio and Refresh components (Custom Scripts, ui.py, js and css only)', variant='primary')
-
def reload_scripts():
modules.scripts.reload_script_body_only()
@@ -1231,7 +1314,11 @@ def create_ui(wrap_gradio_gpu_call):
css += css_hide_progressbar
with gr.Blocks(css=css, analytics_enabled=False, title="Stable Diffusion") as demo:
-
+ with gr.Row(elem_id="quicksettings"):
+ for i, k, item in quicksettings_list:
+ component = create_setting_component(k)
+ component_dict[k] = component
+
settings_interface.gradio_ref = demo
with gr.Tabs() as tabs:
@@ -1248,7 +1335,16 @@ def create_ui(wrap_gradio_gpu_call):
inputs=components,
outputs=[result, text_settings],
)
-
+
+ for i, k, item in quicksettings_list:
+ component = component_dict[k]
+
+ component.change(
+ fn=lambda value, k=k: run_settings_single(value, key=k),
+ inputs=[component],
+ outputs=[component, text_settings],
+ )
+
def modelmerger(*args):
try:
results = modules.extras.run_modelmerger(*args)
diff --git a/modules/upscaler.py b/modules/upscaler.py
index d9d7c5e2..6ab2fb40 100644
--- a/modules/upscaler.py
+++ b/modules/upscaler.py
@@ -36,10 +36,11 @@ class Upscaler:
self.half = not modules.shared.cmd_opts.no_half
self.pre_pad = 0
self.mod_scale = None
- if self.name is not None and create_dirs:
+
+ if self.model_path is None and self.name:
self.model_path = os.path.join(models_path, self.name)
- if not os.path.exists(self.model_path):
- os.makedirs(self.model_path)
+ if self.model_path and create_dirs:
+ os.makedirs(self.model_path, exist_ok=True)
try:
import cv2
diff --git a/scripts/prompts_from_file.py b/scripts/prompts_from_file.py
index 513d9a1c..b24f1a80 100644
--- a/scripts/prompts_from_file.py
+++ b/scripts/prompts_from_file.py
@@ -10,7 +10,6 @@ from modules.processing import Processed, process_images
from PIL import Image
from modules.shared import opts, cmd_opts, state
-
class Script(scripts.Script):
def title(self):
return "Prompts from file or textbox"
@@ -29,6 +28,9 @@ class Script(scripts.Script):
checkbox_txt.change(fn=lambda x: [gr.File.update(visible = not x), gr.TextArea.update(visible = x)], inputs=[checkbox_txt], outputs=[file, prompt_txt])
return [checkbox_txt, file, prompt_txt]
+ def on_show(self, checkbox_txt, file, prompt_txt):
+ return [ gr.Checkbox.update(visible = True), gr.File.update(visible = not checkbox_txt), gr.TextArea.update(visible = checkbox_txt) ]
+
def run(self, p, checkbox_txt, data: bytes, prompt_txt: str):
if (checkbox_txt):
lines = [x.strip() for x in prompt_txt.splitlines()]
diff --git a/scripts/xy_grid.py b/scripts/xy_grid.py
index c0c364df..771eb8e4 100644
--- a/scripts/xy_grid.py
+++ b/scripts/xy_grid.py
@@ -10,8 +10,8 @@ import numpy as np
import modules.scripts as scripts
import gradio as gr
-from modules import images
-from modules.processing import process_images, Processed
+from modules import images, hypernetwork
+from modules.processing import process_images, Processed, get_correct_sampler
from modules.shared import opts, cmd_opts, state
import modules.shared as shared
import modules.sd_samplers
@@ -56,15 +56,17 @@ def apply_order(p, x, xs):
p.prompt = prompt_tmp + p.prompt
-samplers_dict = {}
-for i, sampler in enumerate(modules.sd_samplers.samplers):
- samplers_dict[sampler.name.lower()] = i
- for alias in sampler.aliases:
- samplers_dict[alias.lower()] = i
+def build_samplers_dict(p):
+ samplers_dict = {}
+ for i, sampler in enumerate(get_correct_sampler(p)):
+ samplers_dict[sampler.name.lower()] = i
+ for alias in sampler.aliases:
+ samplers_dict[alias.lower()] = i
+ return samplers_dict
def apply_sampler(p, x, xs):
- sampler_index = samplers_dict.get(x.lower(), None)
+ sampler_index = build_samplers_dict(p).get(x.lower(), None)
if sampler_index is None:
raise RuntimeError(f"Unknown sampler: {x}")
@@ -78,8 +80,11 @@ def apply_checkpoint(p, x, xs):
def apply_hypernetwork(p, x, xs):
- hn = shared.hypernetworks.get(x, None)
- opts.data["sd_hypernetwork"] = hn.name if hn is not None else 'None'
+ hypernetwork.load_hypernetwork(x)
+
+
+def apply_clip_skip(p, x, xs):
+ opts.data["CLIP_stop_at_last_layers"] = x
def format_value_add_label(p, opt, x):
@@ -133,6 +138,7 @@ axis_options = [
AxisOption("Sigma max", float, apply_field("s_tmax"), format_value_add_label),
AxisOption("Sigma noise", float, apply_field("s_noise"), format_value_add_label),
AxisOption("Eta", float, apply_field("eta"), format_value_add_label),
+ AxisOption("Clip skip", int, apply_clip_skip, format_value_add_label),
AxisOptionImg2Img("Denoising", float, apply_field("denoising_strength"), format_value_add_label), # as it is now all AxisOptionImg2Img items must go after AxisOption ones
]
@@ -143,7 +149,7 @@ def draw_xy_grid(p, xs, ys, x_labels, y_labels, cell, draw_legend):
ver_texts = [[images.GridAnnotation(y)] for y in y_labels]
hor_texts = [[images.GridAnnotation(x)] for x in x_labels]
- first_pocessed = None
+ first_processed = None
state.job_count = len(xs) * len(ys) * p.n_iter
@@ -152,8 +158,8 @@ def draw_xy_grid(p, xs, ys, x_labels, y_labels, cell, draw_legend):
state.job = f"{ix + iy * len(xs) + 1} out of {len(xs) * len(ys)}"
processed = cell(x, y)
- if first_pocessed is None:
- first_pocessed = processed
+ if first_processed is None:
+ first_processed = processed
try:
res.append(processed.images[0])
@@ -164,9 +170,9 @@ def draw_xy_grid(p, xs, ys, x_labels, y_labels, cell, draw_legend):
if draw_legend:
grid = images.draw_grid_annotations(grid, res[0].width, res[0].height, hor_texts, ver_texts)
- first_pocessed.images = [grid]
+ first_processed.images = [grid]
- return first_pocessed
+ return first_processed
re_range = re.compile(r"\s*([+-]?\s*\d+)\s*-\s*([+-]?\s*\d+)(?:\s*\(([+-]\d+)\s*\))?\s*")
@@ -196,10 +202,11 @@ class Script(scripts.Script):
return [x_type, x_values, y_type, y_values, draw_legend, no_fixed_seeds]
def run(self, p, x_type, x_values, y_type, y_values, draw_legend, no_fixed_seeds):
- modules.processing.fix_seed(p)
- p.batch_size = 1
+ if not no_fixed_seeds:
+ modules.processing.fix_seed(p)
- initial_hn = opts.sd_hypernetwork
+ p.batch_size = 1
+ CLIP_stop_at_last_layers = opts.CLIP_stop_at_last_layers
def process_axis(opt, vals):
if opt.label == 'Nothing':
@@ -214,7 +221,6 @@ class Script(scripts.Script):
m = re_range.fullmatch(val)
mc = re_range_count.fullmatch(val)
if m is not None:
-
start = int(m.group(1))
end = int(m.group(2))+1
step = int(m.group(3)) if m.group(3) is not None else 1
@@ -256,6 +262,17 @@ class Script(scripts.Script):
valslist = list(permutations(valslist))
valslist = [opt.type(x) for x in valslist]
+
+ # Confirm options are valid before starting
+ if opt.label == "Sampler":
+ samplers_dict = build_samplers_dict(p)
+ for sampler_val in valslist:
+ if sampler_val.lower() not in samplers_dict.keys():
+ raise RuntimeError(f"Unknown sampler: {sampler_val}")
+ elif opt.label == "Checkpoint name":
+ for ckpt_val in valslist:
+ if modules.sd_models.get_closet_checkpoint_match(ckpt_val) is None:
+ raise RuntimeError(f"Checkpoint for {ckpt_val} not found")
return valslist
@@ -308,6 +325,8 @@ class Script(scripts.Script):
# restore checkpoint in case it was changed by axes
modules.sd_models.reload_model_weights(shared.sd_model)
- opts.data["sd_hypernetwork"] = initial_hn
+ hypernetwork.load_hypernetwork(opts.sd_hypernetwork)
+
+ opts.data["CLIP_stop_at_last_layers"] = CLIP_stop_at_last_layers
return processed
diff --git a/style.css b/style.css
index 6904fc50..c0c3f2bb 100644
--- a/style.css
+++ b/style.css
@@ -103,7 +103,12 @@
#style_apply, #style_create, #interrogate{
margin: 0.75em 0.25em 0.25em 0.25em;
- min-width: 3em;
+ min-width: 5em;
+}
+
+#style_apply, #style_create, #deepbooru{
+ margin: 0.75em 0.25em 0.25em 0.25em;
+ min-width: 5em;
}
#style_pos_col, #style_neg_col{
@@ -448,3 +453,13 @@ input[type="range"]{
.context-menu-items a:hover{
background: #a55000;
}
+
+#quicksettings > div{
+ border: none;
+ background: none;
+}
+
+#quicksettings > div > div{
+ max-width: 32em;
+ padding: 0;
+}
diff --git a/txt2img_Screenshot.png b/txt2img_Screenshot.png
index fedd538e..6e2759a4 100644
--- a/txt2img_Screenshot.png
+++ b/txt2img_Screenshot.png
Binary files differ
diff --git a/webui.py b/webui.py
index 18de8e16..270584f7 100644
--- a/webui.py
+++ b/webui.py
@@ -82,6 +82,9 @@ modules.scripts.load_scripts(os.path.join(script_path, "scripts"))
shared.sd_model = modules.sd_models.load_model()
shared.opts.onchange("sd_model_checkpoint", wrap_queued_call(lambda: modules.sd_models.reload_model_weights(shared.sd_model)))
+loaded_hypernetwork = modules.hypernetwork.load_hypernetwork(shared.opts.sd_hypernetwork)
+shared.opts.onchange("sd_hypernetwork", wrap_queued_call(lambda: modules.hypernetwork.load_hypernetwork(shared.opts.sd_hypernetwork)))
+
def webui():
# make the program just exit at ctrl+c without waiting for anything