aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--README.md2
-rw-r--r--extensions-builtin/Lora/network.py33
-rw-r--r--extensions-builtin/Lora/networks.py12
-rw-r--r--modules/processing.py6
-rw-r--r--modules/txt2img.py85
5 files changed, 102 insertions, 36 deletions
diff --git a/README.md b/README.md
index 72908fa7..f4cfcf29 100644
--- a/README.md
+++ b/README.md
@@ -1,5 +1,5 @@
# Stable Diffusion web UI
-A browser interface based on Gradio library for Stable Diffusion.
+A web interface for Stable Diffusion, implemented using Gradio library.
![](screenshot.png)
diff --git a/extensions-builtin/Lora/network.py b/extensions-builtin/Lora/network.py
index a62e5eff..b8fd9194 100644
--- a/extensions-builtin/Lora/network.py
+++ b/extensions-builtin/Lora/network.py
@@ -3,6 +3,9 @@ import os
from collections import namedtuple
import enum
+import torch.nn as nn
+import torch.nn.functional as F
+
from modules import sd_models, cache, errors, hashes, shared
NetworkWeights = namedtuple('NetworkWeights', ['network_key', 'sd_key', 'w', 'sd_module'])
@@ -115,6 +118,29 @@ class NetworkModule:
if hasattr(self.sd_module, 'weight'):
self.shape = self.sd_module.weight.shape
+ self.ops = None
+ self.extra_kwargs = {}
+ if isinstance(self.sd_module, nn.Conv2d):
+ self.ops = F.conv2d
+ self.extra_kwargs = {
+ 'stride': self.sd_module.stride,
+ 'padding': self.sd_module.padding
+ }
+ elif isinstance(self.sd_module, nn.Linear):
+ self.ops = F.linear
+ elif isinstance(self.sd_module, nn.LayerNorm):
+ self.ops = F.layer_norm
+ self.extra_kwargs = {
+ 'normalized_shape': self.sd_module.normalized_shape,
+ 'eps': self.sd_module.eps
+ }
+ elif isinstance(self.sd_module, nn.GroupNorm):
+ self.ops = F.group_norm
+ self.extra_kwargs = {
+ 'num_groups': self.sd_module.num_groups,
+ 'eps': self.sd_module.eps
+ }
+
self.dim = None
self.bias = weights.w.get("bias")
self.alpha = weights.w["alpha"].item() if "alpha" in weights.w else None
@@ -155,5 +181,10 @@ class NetworkModule:
raise NotImplementedError()
def forward(self, x, y):
- raise NotImplementedError()
+ """A general forward implementation for all modules"""
+ if self.ops is None:
+ raise NotImplementedError()
+ else:
+ updown, ex_bias = self.calc_updown(self.sd_module.weight)
+ return y + self.ops(x, weight=updown, bias=ex_bias, **self.extra_kwargs)
diff --git a/extensions-builtin/Lora/networks.py b/extensions-builtin/Lora/networks.py
index 72ebd624..32e10b62 100644
--- a/extensions-builtin/Lora/networks.py
+++ b/extensions-builtin/Lora/networks.py
@@ -458,23 +458,23 @@ def network_apply_weights(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn
self.network_current_names = wanted_names
-def network_forward(module, input, original_forward):
+def network_forward(org_module, input, original_forward):
"""
Old way of applying Lora by executing operations during layer's forward.
Stacking many loras this way results in big performance degradation.
"""
if len(loaded_networks) == 0:
- return original_forward(module, input)
+ return original_forward(org_module, input)
input = devices.cond_cast_unet(input)
- network_restore_weights_from_backup(module)
- network_reset_cached_weight(module)
+ network_restore_weights_from_backup(org_module)
+ network_reset_cached_weight(org_module)
- y = original_forward(module, input)
+ y = original_forward(org_module, input)
- network_layer_name = getattr(module, 'network_layer_name', None)
+ network_layer_name = getattr(org_module, 'network_layer_name', None)
for lora in loaded_networks:
module = lora.modules.get(network_layer_name, None)
if module is None:
diff --git a/modules/processing.py b/modules/processing.py
index 84e7b1b4..dcc807fe 100644
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -732,7 +732,7 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter
"Variation seed": (None if p.subseed_strength == 0 else (p.all_subseeds[0] if use_main_prompt else all_subseeds[index])),
"Variation seed strength": (None if p.subseed_strength == 0 else p.subseed_strength),
"Seed resize from": (None if p.seed_resize_from_w <= 0 or p.seed_resize_from_h <= 0 else f"{p.seed_resize_from_w}x{p.seed_resize_from_h}"),
- "Denoising strength": getattr(p, 'denoising_strength', None),
+ "Denoising strength": p.extra_generation_params.get("Denoising strength"),
"Conditional mask weight": getattr(p, "inpainting_mask_weight", shared.opts.inpainting_mask_weight) if p.is_using_inpainting_conditioning else None,
"Clip skip": None if clip_skip <= 1 else clip_skip,
"ENSD": opts.eta_noise_seed_delta if uses_ensd else None,
@@ -1198,6 +1198,8 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
def init(self, all_prompts, all_seeds, all_subseeds):
if self.enable_hr:
+ self.extra_generation_params["Denoising strength"] = self.denoising_strength
+
if self.hr_checkpoint_name and self.hr_checkpoint_name != 'Use same checkpoint':
self.hr_checkpoint_info = sd_models.get_closet_checkpoint_match(self.hr_checkpoint_name)
@@ -1516,6 +1518,8 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing):
self.mask_blur_y = value
def init(self, all_prompts, all_seeds, all_subseeds):
+ self.extra_generation_params["Denoising strength"] = self.denoising_strength
+
self.image_cfg_scale: float = self.image_cfg_scale if shared.sd_model.cond_stage_key == "edit" else None
self.sampler = sd_samplers.create_sampler(self.sampler_name, self.sd_model)
diff --git a/modules/txt2img.py b/modules/txt2img.py
index 41bb9da3..c4cc12d2 100644
--- a/modules/txt2img.py
+++ b/modules/txt2img.py
@@ -7,36 +7,15 @@ from modules.infotext_utils import create_override_settings_dict
from modules.shared import opts
import modules.shared as shared
from modules.ui import plaintext_to_html
+from PIL import Image
import gradio as gr
-def txt2img_upscale(id_task: str, request: gr.Request, gallery, gallery_index, generation_info, *args):
- assert len(gallery) > 0, 'No image to upscale'
- assert 0 <= gallery_index < len(gallery), f'Bad image index: {gallery_index}'
-
- geninfo = json.loads(generation_info)
- all_seeds = geninfo["all_seeds"]
-
- image_info = gallery[gallery_index] if 0 <= gallery_index < len(gallery) else gallery[0]
- image = infotext_utils.image_from_url_text(image_info)
-
- gallery_index_from_end = len(gallery) - gallery_index
- image.seed = all_seeds[-gallery_index_from_end if gallery_index_from_end < len(all_seeds) + 1 else 0]
-
- return txt2img(id_task, request, *args, firstpass_image=image)
-
-
-def txt2img(id_task: str, request: gr.Request, prompt: str, negative_prompt: str, prompt_styles, steps: int, sampler_name: str, n_iter: int, batch_size: int, cfg_scale: float, height: int, width: int, enable_hr: bool, denoising_strength: float, hr_scale: float, hr_upscaler: str, hr_second_pass_steps: int, hr_resize_x: int, hr_resize_y: int, hr_checkpoint_name: str, hr_sampler_name: str, hr_prompt: str, hr_negative_prompt, override_settings_texts, *args, firstpass_image=None):
+def txt2img_create_processing(id_task: str, request: gr.Request, prompt: str, negative_prompt: str, prompt_styles, steps: int, sampler_name: str, n_iter: int, batch_size: int, cfg_scale: float, height: int, width: int, enable_hr: bool, denoising_strength: float, hr_scale: float, hr_upscaler: str, hr_second_pass_steps: int, hr_resize_x: int, hr_resize_y: int, hr_checkpoint_name: str, hr_sampler_name: str, hr_prompt: str, hr_negative_prompt, override_settings_texts, *args, force_enable_hr=False):
override_settings = create_override_settings_dict(override_settings_texts)
- if firstpass_image is not None:
- seed = getattr(firstpass_image, 'seed', None)
- if seed:
- args = modules.scripts.scripts_txt2img.set_named_arg(args, 'ScriptSeed', 'seed', seed)
-
+ if force_enable_hr:
enable_hr = True
- batch_size = 1
- n_iter = 1
p = processing.StableDiffusionProcessingTxt2Img(
sd_model=shared.sd_model,
@@ -53,7 +32,7 @@ def txt2img(id_task: str, request: gr.Request, prompt: str, negative_prompt: str
width=width,
height=height,
enable_hr=enable_hr,
- denoising_strength=denoising_strength if enable_hr else None,
+ denoising_strength=denoising_strength,
hr_scale=hr_scale,
hr_upscaler=hr_upscaler,
hr_second_pass_steps=hr_second_pass_steps,
@@ -64,7 +43,6 @@ def txt2img(id_task: str, request: gr.Request, prompt: str, negative_prompt: str
hr_prompt=hr_prompt,
hr_negative_prompt=hr_negative_prompt,
override_settings=override_settings,
- firstpass_image=firstpass_image,
)
p.scripts = modules.scripts.scripts_txt2img
@@ -75,8 +53,61 @@ def txt2img(id_task: str, request: gr.Request, prompt: str, negative_prompt: str
if shared.opts.enable_console_prompts:
print(f"\ntxt2img: {prompt}", file=shared.progress_print_out)
+ return p
+
+
+def txt2img_upscale(id_task: str, request: gr.Request, gallery, gallery_index, generation_info, *args):
+ assert len(gallery) > 0, 'No image to upscale'
+ assert 0 <= gallery_index < len(gallery), f'Bad image index: {gallery_index}'
+
+ p = txt2img_create_processing(id_task, request, *args)
+ p.enable_hr = True
+ p.batch_size = 1
+ p.n_iter = 1
+
+ geninfo = json.loads(generation_info)
+ all_seeds = geninfo["all_seeds"]
+
+ image_info = gallery[gallery_index] if 0 <= gallery_index < len(gallery) else gallery[0]
+ p.firstpass_image = infotext_utils.image_from_url_text(image_info)
+
+ gallery_index_from_end = len(gallery) - gallery_index
+ seed = all_seeds[-gallery_index_from_end if gallery_index_from_end < len(all_seeds) + 1 else 0]
+ p.script_args = modules.scripts.scripts_txt2img.set_named_arg(p.script_args, 'ScriptSeed', 'seed', seed)
+
+ with closing(p):
+ processed = modules.scripts.scripts_txt2img.run(p, *p.script_args)
+
+ if processed is None:
+ processed = processing.process_images(p)
+
+ shared.total_tqdm.clear()
+
+ new_gallery = []
+ for i, image in enumerate(gallery):
+ fake_image = Image.new(mode="RGB", size=(1, 1))
+
+ if i == gallery_index:
+ already_saved_as = getattr(processed.images[0], 'already_saved_as', None)
+ if already_saved_as is not None:
+ fake_image.already_saved_as = already_saved_as
+ else:
+ fake_image = processed.images[0]
+ else:
+ fake_image.already_saved_as = image["name"]
+
+ new_gallery.append(fake_image)
+
+ geninfo["infotexts"][gallery_index] = processed.info
+
+ return new_gallery, json.dumps(geninfo), plaintext_to_html(processed.info), plaintext_to_html(processed.comments, classname="comments")
+
+
+def txt2img(id_task: str, request: gr.Request, *args):
+ p = txt2img_create_processing(id_task, request, *args)
+
with closing(p):
- processed = modules.scripts.scripts_txt2img.run(p, *args)
+ processed = modules.scripts.scripts_txt2img.run(p, *p.script_args)
if processed is None:
processed = processing.process_images(p)