From 2f4c91894d4c0a055c1069b2fda0e4da8fcda188 Mon Sep 17 00:00:00 2001 From: guaneec Date: Wed, 26 Oct 2022 12:10:30 +0800 Subject: Remove activation from final layer of HNs --- modules/hypernetworks/hypernetwork.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py index d647ea55..54346b64 100644 --- a/modules/hypernetworks/hypernetwork.py +++ b/modules/hypernetworks/hypernetwork.py @@ -41,8 +41,8 @@ class HypernetworkModule(torch.nn.Module): # Add a fully-connected layer linears.append(torch.nn.Linear(int(dim * layer_structure[i]), int(dim * layer_structure[i+1]))) - # Add an activation func - if activation_func == "linear" or activation_func is None: + # Add an activation func except last layer + if activation_func == "linear" or activation_func is None or i >= len(layer_structure) - 3: pass elif activation_func in self.activation_dict: linears.append(self.activation_dict[activation_func]()) @@ -53,7 +53,7 @@ class HypernetworkModule(torch.nn.Module): if add_layer_norm: linears.append(torch.nn.LayerNorm(int(dim * layer_structure[i+1]))) - # Add dropout expect last layer + # Add dropout except last layer if use_dropout and i < len(layer_structure) - 3: linears.append(torch.nn.Dropout(p=0.3)) -- cgit v1.2.1 From c702d4d0df21790199d199818f25c449213ffe0f Mon Sep 17 00:00:00 2001 From: guaneec Date: Wed, 26 Oct 2022 13:43:04 +0800 Subject: Fix off-by-one --- modules/hypernetworks/hypernetwork.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py index 54346b64..3ce85bb5 100644 --- a/modules/hypernetworks/hypernetwork.py +++ b/modules/hypernetworks/hypernetwork.py @@ -42,7 +42,7 @@ class HypernetworkModule(torch.nn.Module): linears.append(torch.nn.Linear(int(dim * layer_structure[i]), int(dim * layer_structure[i+1]))) # Add an activation func except last layer - if activation_func == "linear" or activation_func is None or i >= len(layer_structure) - 3: + if activation_func == "linear" or activation_func is None or i >= len(layer_structure) - 2: pass elif activation_func in self.activation_dict: linears.append(self.activation_dict[activation_func]()) @@ -54,7 +54,7 @@ class HypernetworkModule(torch.nn.Module): linears.append(torch.nn.LayerNorm(int(dim * layer_structure[i+1]))) # Add dropout except last layer - if use_dropout and i < len(layer_structure) - 3: + if use_dropout and i < len(layer_structure) - 2: linears.append(torch.nn.Dropout(p=0.3)) self.linear = torch.nn.Sequential(*linears) -- cgit v1.2.1 From 877d94f97ca5491d8779440769b191e0dcd32c8e Mon Sep 17 00:00:00 2001 From: guaneec Date: Wed, 26 Oct 2022 14:50:58 +0800 Subject: Back compatibility --- modules/hypernetworks/hypernetwork.py | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py index 3ce85bb5..dd317085 100644 --- a/modules/hypernetworks/hypernetwork.py +++ b/modules/hypernetworks/hypernetwork.py @@ -28,7 +28,7 @@ class HypernetworkModule(torch.nn.Module): "swish": torch.nn.Hardswish, } - def __init__(self, dim, state_dict=None, layer_structure=None, activation_func=None, add_layer_norm=False, use_dropout=False): + def __init__(self, dim, state_dict=None, layer_structure=None, activation_func=None, add_layer_norm=False, use_dropout=False, activate_output=False): super().__init__() assert layer_structure is not None, "layer_structure must not be None" @@ -42,7 +42,7 @@ class HypernetworkModule(torch.nn.Module): linears.append(torch.nn.Linear(int(dim * layer_structure[i]), int(dim * layer_structure[i+1]))) # Add an activation func except last layer - if activation_func == "linear" or activation_func is None or i >= len(layer_structure) - 2: + if activation_func == "linear" or activation_func is None or (i >= len(layer_structure) - 2 and not activate_output): pass elif activation_func in self.activation_dict: linears.append(self.activation_dict[activation_func]()) @@ -105,7 +105,7 @@ class Hypernetwork: filename = None name = None - def __init__(self, name=None, enable_sizes=None, layer_structure=None, activation_func=None, add_layer_norm=False, use_dropout=False): + def __init__(self, name=None, enable_sizes=None, layer_structure=None, activation_func=None, add_layer_norm=False, use_dropout=False, activate_output=False): self.filename = None self.name = name self.layers = {} @@ -116,11 +116,12 @@ class Hypernetwork: self.activation_func = activation_func self.add_layer_norm = add_layer_norm self.use_dropout = use_dropout + self.activate_output = activate_output for size in enable_sizes or []: self.layers[size] = ( - HypernetworkModule(size, None, self.layer_structure, self.activation_func, self.add_layer_norm, self.use_dropout), - HypernetworkModule(size, None, self.layer_structure, self.activation_func, self.add_layer_norm, self.use_dropout), + HypernetworkModule(size, None, self.layer_structure, self.activation_func, self.add_layer_norm, self.use_dropout, self.activate_output), + HypernetworkModule(size, None, self.layer_structure, self.activation_func, self.add_layer_norm, self.use_dropout, self.activate_output), ) def weights(self): @@ -147,6 +148,7 @@ class Hypernetwork: state_dict['use_dropout'] = self.use_dropout state_dict['sd_checkpoint'] = self.sd_checkpoint state_dict['sd_checkpoint_name'] = self.sd_checkpoint_name + state_dict['activate_output'] = self.activate_output torch.save(state_dict, filename) @@ -161,12 +163,13 @@ class Hypernetwork: self.activation_func = state_dict.get('activation_func', None) self.add_layer_norm = state_dict.get('is_layer_norm', False) self.use_dropout = state_dict.get('use_dropout', False) + self.activate_output = state_dict.get('activate_output', True) for size, sd in state_dict.items(): if type(size) == int: self.layers[size] = ( - HypernetworkModule(size, sd[0], self.layer_structure, self.activation_func, self.add_layer_norm, self.use_dropout), - HypernetworkModule(size, sd[1], self.layer_structure, self.activation_func, self.add_layer_norm, self.use_dropout), + HypernetworkModule(size, sd[0], self.layer_structure, self.activation_func, self.add_layer_norm, self.use_dropout, self.activate_output), + HypernetworkModule(size, sd[1], self.layer_structure, self.activation_func, self.add_layer_norm, self.use_dropout, self.activate_output), ) self.name = state_dict.get('name', self.name) -- cgit v1.2.1 From 91bb35b1e6842b30ce7553009c8ecea3643de8d2 Mon Sep 17 00:00:00 2001 From: guaneec Date: Wed, 26 Oct 2022 15:00:03 +0800 Subject: Merge fix --- modules/hypernetworks/hypernetwork.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py index eab8b32f..bd171793 100644 --- a/modules/hypernetworks/hypernetwork.py +++ b/modules/hypernetworks/hypernetwork.py @@ -190,7 +190,7 @@ class Hypernetwork: print(f"Weight initialization is {self.weight_init}") self.add_layer_norm = state_dict.get('is_layer_norm', False) print(f"Layer norm is set to {self.add_layer_norm}") - self.use_dropout = state_dict.get('use_dropout', False + self.use_dropout = state_dict.get('use_dropout', False) print(f"Dropout usage is set to {self.use_dropout}" ) self.activate_output = state_dict.get('activate_output', True) -- cgit v1.2.1 From b6a8bb123bd519736306417399f6441e504f1e8b Mon Sep 17 00:00:00 2001 From: guaneec Date: Wed, 26 Oct 2022 15:15:19 +0800 Subject: Fix merge --- modules/hypernetworks/hypernetwork.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py index bd171793..2997cead 100644 --- a/modules/hypernetworks/hypernetwork.py +++ b/modules/hypernetworks/hypernetwork.py @@ -60,7 +60,7 @@ class HypernetworkModule(torch.nn.Module): linears.append(torch.nn.LayerNorm(int(dim * layer_structure[i+1]))) # Add dropout except last layer - if use_dropout and i < len(layer_structure) - 2: + if use_dropout and i < len(layer_structure) - 3: linears.append(torch.nn.Dropout(p=0.3)) self.linear = torch.nn.Sequential(*linears) @@ -126,7 +126,7 @@ class Hypernetwork: filename = None name = None - def __init__(self, name=None, enable_sizes=None, layer_structure=None, activation_func=None, weight_init=None, add_layer_norm=False, use_dropout=False, activate_output=False) + def __init__(self, name=None, enable_sizes=None, layer_structure=None, activation_func=None, weight_init=None, add_layer_norm=False, use_dropout=False, activate_output=False): self.filename = None self.name = name self.layers = {} -- cgit v1.2.1 From 7bd8581e461623932ffbd5762ee931ee51f798db Mon Sep 17 00:00:00 2001 From: Sihan Wang <31711261+shwang95@users.noreply.github.com> Date: Wed, 26 Oct 2022 20:32:55 +0800 Subject: Fix error caused by EXIF transpose when using custom scripts Some custom scripts read image directly and no need to select image in UI, this will cause error. --- modules/img2img.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/modules/img2img.py b/modules/img2img.py index 9c0cf23e..86a19f37 100644 --- a/modules/img2img.py +++ b/modules/img2img.py @@ -80,7 +80,8 @@ def img2img(mode: int, prompt: str, negative_prompt: str, prompt_style: str, pro mask = None # Use the EXIF orientation of photos taken by smartphones. - image = ImageOps.exif_transpose(image) + if image is not None: + image = ImageOps.exif_transpose(image) assert 0. <= denoising_strength <= 1., 'can only work with strength in [0.0, 1.0]' -- cgit v1.2.1 From 0dd8480281ffa3e58439a3ce059c02d9f3baa5c7 Mon Sep 17 00:00:00 2001 From: MMaker Date: Wed, 26 Oct 2022 11:08:44 -0400 Subject: fix: Correct before image saved callback --- modules/script_callbacks.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/script_callbacks.py b/modules/script_callbacks.py index 6ea58d61..cedbe7bd 100644 --- a/modules/script_callbacks.py +++ b/modules/script_callbacks.py @@ -69,7 +69,7 @@ def ui_settings_callback(): def before_image_saved_callback(params: ImageSaveParams): - for c in callbacks_image_saved: + for c in callbacks_before_image_saved: try: c.callback(params) except Exception: -- cgit v1.2.1 From 85fcccc105aa50f1d78de559233eaa9f384608b5 Mon Sep 17 00:00:00 2001 From: AngelBottomless <35677394+aria1th@users.noreply.github.com> Date: Wed, 26 Oct 2022 22:24:33 +0900 Subject: Squashed commit of fixing dropout silently fix dropouts for future hypernetworks add kwargs for Hypernetwork class hypernet UI for gradio input add recommended options remove as options revert adding options in ui --- modules/hypernetworks/hypernetwork.py | 25 +++++++++++++++++-------- modules/ui.py | 4 ++-- 2 files changed, 19 insertions(+), 10 deletions(-) diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py index 2997cead..dd921153 100644 --- a/modules/hypernetworks/hypernetwork.py +++ b/modules/hypernetworks/hypernetwork.py @@ -34,7 +34,8 @@ class HypernetworkModule(torch.nn.Module): } activation_dict.update({cls_name.lower(): cls_obj for cls_name, cls_obj in inspect.getmembers(torch.nn.modules.activation) if inspect.isclass(cls_obj) and cls_obj.__module__ == 'torch.nn.modules.activation'}) - def __init__(self, dim, state_dict=None, layer_structure=None, activation_func=None, weight_init='Normal', add_layer_norm=False, use_dropout=False, activate_output=False): + def __init__(self, dim, state_dict=None, layer_structure=None, activation_func=None, weight_init='Normal', + add_layer_norm=False, use_dropout=False, activate_output=False, **kwargs): super().__init__() assert layer_structure is not None, "layer_structure must not be None" @@ -60,7 +61,7 @@ class HypernetworkModule(torch.nn.Module): linears.append(torch.nn.LayerNorm(int(dim * layer_structure[i+1]))) # Add dropout except last layer - if use_dropout and i < len(layer_structure) - 3: + if 'last_layer_dropout' in kwargs and kwargs['last_layer_dropout'] and use_dropout and i < len(layer_structure) - 2: linears.append(torch.nn.Dropout(p=0.3)) self.linear = torch.nn.Sequential(*linears) @@ -126,7 +127,7 @@ class Hypernetwork: filename = None name = None - def __init__(self, name=None, enable_sizes=None, layer_structure=None, activation_func=None, weight_init=None, add_layer_norm=False, use_dropout=False, activate_output=False): + def __init__(self, name=None, enable_sizes=None, layer_structure=None, activation_func=None, weight_init=None, add_layer_norm=False, use_dropout=False, activate_output=False, **kwargs): self.filename = None self.name = name self.layers = {} @@ -139,11 +140,14 @@ class Hypernetwork: self.add_layer_norm = add_layer_norm self.use_dropout = use_dropout self.activate_output = activate_output + self.last_layer_dropout = kwargs['last_layer_dropout'] if 'last_layer_dropout' in kwargs else True for size in enable_sizes or []: self.layers[size] = ( - HypernetworkModule(size, None, self.layer_structure, self.activation_func, self.weight_init, self.add_layer_norm, self.use_dropout, self.activate_output), - HypernetworkModule(size, None, self.layer_structure, self.activation_func, self.weight_init, self.add_layer_norm, self.use_dropout, self.activate_output), + HypernetworkModule(size, None, self.layer_structure, self.activation_func, self.weight_init, + self.add_layer_norm, self.use_dropout, self.activate_output, last_layer_dropout=self.last_layer_dropout), + HypernetworkModule(size, None, self.layer_structure, self.activation_func, self.weight_init, + self.add_layer_norm, self.use_dropout, self.activate_output, last_layer_dropout=self.last_layer_dropout), ) def weights(self): @@ -172,7 +176,8 @@ class Hypernetwork: state_dict['sd_checkpoint'] = self.sd_checkpoint state_dict['sd_checkpoint_name'] = self.sd_checkpoint_name state_dict['activate_output'] = self.activate_output - + state_dict['last_layer_dropout'] = self.last_layer_dropout + torch.save(state_dict, filename) def load(self, filename): @@ -193,12 +198,16 @@ class Hypernetwork: self.use_dropout = state_dict.get('use_dropout', False) print(f"Dropout usage is set to {self.use_dropout}" ) self.activate_output = state_dict.get('activate_output', True) + print(f"Activate last layer is set to {self.activate_output}") + self.last_layer_dropout = state_dict.get('last_layer_dropout', False) for size, sd in state_dict.items(): if type(size) == int: self.layers[size] = ( - HypernetworkModule(size, sd[0], self.layer_structure, self.activation_func, self.weight_init, self.add_layer_norm, self.use_dropout, self.activate_output), - HypernetworkModule(size, sd[1], self.layer_structure, self.activation_func, self.weight_init, self.add_layer_norm, self.use_dropout, self.activate_output), + HypernetworkModule(size, sd[0], self.layer_structure, self.activation_func, self.weight_init, + self.add_layer_norm, self.use_dropout, self.activate_output, last_layer_dropout=self.last_layer_dropout), + HypernetworkModule(size, sd[1], self.layer_structure, self.activation_func, self.weight_init, + self.add_layer_norm, self.use_dropout, self.activate_output, last_layer_dropout=self.last_layer_dropout), ) self.name = state_dict.get('name', self.name) diff --git a/modules/ui.py b/modules/ui.py index 0a63e357..55cbe859 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1238,8 +1238,8 @@ def create_ui(wrap_gradio_gpu_call): new_hypernetwork_name = gr.Textbox(label="Name") new_hypernetwork_sizes = gr.CheckboxGroup(label="Modules", value=["768", "320", "640", "1280"], choices=["768", "320", "640", "1280"]) new_hypernetwork_layer_structure = gr.Textbox("1, 2, 1", label="Enter hypernetwork layer structure", placeholder="1st and last digit must be 1. ex:'1, 2, 1'") - new_hypernetwork_activation_func = gr.Dropdown(value="relu", label="Select activation function of hypernetwork", choices=modules.hypernetworks.ui.keys) - new_hypernetwork_initialization_option = gr.Dropdown(value = "Normal", label="Select Layer weights initialization. relu-like - Kaiming, sigmoid-like - Xavier is recommended", choices=["Normal", "KaimingUniform", "KaimingNormal", "XavierUniform", "XavierNormal"]) + new_hypernetwork_activation_func = gr.Dropdown(value="relu", label="Select activation function of hypernetwork. Recommended : Swish / Linear(none)", choices=modules.hypernetworks.ui.keys) + new_hypernetwork_initialization_option = gr.Dropdown(value = "Normal", label="Select Layer weights initialization. Normal is default, for experiments, relu-like - Kaiming, sigmoid-like - Xavier is recommended", choices=["Normal", "KaimingUniform", "KaimingNormal", "XavierUniform", "XavierNormal"]) new_hypernetwork_add_layer_norm = gr.Checkbox(label="Add layer normalization") new_hypernetwork_use_dropout = gr.Checkbox(label="Use dropout") overwrite_old_hypernetwork = gr.Checkbox(value=False, label="Overwrite Old Hypernetwork") -- cgit v1.2.1 From cc56df996e95c2c82295ab7b9928da2544791220 Mon Sep 17 00:00:00 2001 From: guaneec Date: Wed, 26 Oct 2022 23:51:51 +0800 Subject: Fix dropout logic --- modules/hypernetworks/hypernetwork.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py index dd921153..b17598fe 100644 --- a/modules/hypernetworks/hypernetwork.py +++ b/modules/hypernetworks/hypernetwork.py @@ -35,7 +35,7 @@ class HypernetworkModule(torch.nn.Module): activation_dict.update({cls_name.lower(): cls_obj for cls_name, cls_obj in inspect.getmembers(torch.nn.modules.activation) if inspect.isclass(cls_obj) and cls_obj.__module__ == 'torch.nn.modules.activation'}) def __init__(self, dim, state_dict=None, layer_structure=None, activation_func=None, weight_init='Normal', - add_layer_norm=False, use_dropout=False, activate_output=False, **kwargs): + add_layer_norm=False, use_dropout=False, activate_output=False, last_layer_dropout=True): super().__init__() assert layer_structure is not None, "layer_structure must not be None" @@ -61,7 +61,7 @@ class HypernetworkModule(torch.nn.Module): linears.append(torch.nn.LayerNorm(int(dim * layer_structure[i+1]))) # Add dropout except last layer - if 'last_layer_dropout' in kwargs and kwargs['last_layer_dropout'] and use_dropout and i < len(layer_structure) - 2: + if use_dropout and (i < len(layer_structure) - 3 or last_layer_dropout and i < len(layer_structure) - 2): linears.append(torch.nn.Dropout(p=0.3)) self.linear = torch.nn.Sequential(*linears) -- cgit v1.2.1 From 029d7c75436558f1e884bb127caed73caaecb83a Mon Sep 17 00:00:00 2001 From: AngelBottomless <35677394+aria1th@users.noreply.github.com> Date: Thu, 27 Oct 2022 14:44:53 +0900 Subject: Revert unresolved changes in Bias initialization it should be zeros_ or parameterized in future properly. --- modules/hypernetworks/hypernetwork.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py index b17598fe..25427a37 100644 --- a/modules/hypernetworks/hypernetwork.py +++ b/modules/hypernetworks/hypernetwork.py @@ -75,7 +75,7 @@ class HypernetworkModule(torch.nn.Module): w, b = layer.weight.data, layer.bias.data if weight_init == "Normal" or type(layer) == torch.nn.LayerNorm: normal_(w, mean=0.0, std=0.01) - normal_(b, mean=0.0, std=0.005) + normal_(b, mean=0.0, std=0) elif weight_init == 'XavierUniform': xavier_uniform_(w) zeros_(b) -- cgit v1.2.1 From 0089fa5cebe43654e6d8a45d9b880e25826c2a74 Mon Sep 17 00:00:00 2001 From: batvbs Date: Sat, 29 Oct 2022 21:09:05 +0800 Subject: =?UTF-8?q?=E6=9B=B4=E6=96=B0=20zh=5FCN.json?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- localizations/zh_CN.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/localizations/zh_CN.json b/localizations/zh_CN.json index e453f5e3..b208ae25 100644 --- a/localizations/zh_CN.json +++ b/localizations/zh_CN.json @@ -482,7 +482,7 @@ "If this values is non-zero, it will be added to seed and used to initialize RNG for noises when using samplers with Eta. You can use this to produce even more variation of images, or you can use this to match images of other software if you know what you are doing.": "如果这个值不为零,它将被添加到随机种子中,并在使用带有 Eta 的采样器时用于初始化随机噪声。你可以使用它来产生更多的图像变化,或者你可以使用它来模仿其他软件生成的图像,如果你知道你在做什么", "Enable Autocomplete": "开启Tag补全", "Allowed categories for random artists selection when using the Roll button": "使用抽选艺术家按钮时将会随机的艺术家类别", - "Roll three": "抽三位出來", + "Roll three": "抽三位出来", "Generate forever": "不停地生成", "Cancel generate forever": "取消不停地生成" } -- cgit v1.2.1 From de1dc0d279a877d5d9f512befe30a7d7e5cf3881 Mon Sep 17 00:00:00 2001 From: Martin Cairns <4314538+MartinCairnsSQL@users.noreply.github.com> Date: Sat, 29 Oct 2022 15:23:19 +0100 Subject: Add adjust_steps_if_invalid to find next valid step for ddim uniform sampler --- modules/sd_samplers.py | 28 +++++++++++++++------------- 1 file changed, 15 insertions(+), 13 deletions(-) diff --git a/modules/sd_samplers.py b/modules/sd_samplers.py index 3670b57d..aca014e8 100644 --- a/modules/sd_samplers.py +++ b/modules/sd_samplers.py @@ -1,5 +1,6 @@ from collections import namedtuple import numpy as np +from math import floor import torch import tqdm from PIL import Image @@ -205,17 +206,22 @@ class VanillaStableDiffusionSampler: self.mask = p.mask if hasattr(p, 'mask') else None self.nmask = p.nmask if hasattr(p, 'nmask') else None + + def adjust_steps_if_invalid(self, p, num_steps): + if self.config.name == 'DDIM' and p.ddim_discretize == 'uniform': + valid_step = 999 / (1000 // num_steps) + if valid_step == floor(valid_step): + return int(valid_step) + 1 + + return num_steps + + def sample_img2img(self, p, x, noise, conditioning, unconditional_conditioning, steps=None, image_conditioning=None): steps, t_enc = setup_img2img_steps(p, steps) - + steps = self.adjust_steps_if_invalid(p, steps) self.initialize(p) - # existing code fails with certain step counts, like 9 - try: - self.sampler.make_schedule(ddim_num_steps=steps, ddim_eta=self.eta, ddim_discretize=p.ddim_discretize, verbose=False) - except Exception: - self.sampler.make_schedule(ddim_num_steps=steps+1, ddim_eta=self.eta, ddim_discretize=p.ddim_discretize, verbose=False) - + self.sampler.make_schedule(ddim_num_steps=steps, ddim_eta=self.eta, ddim_discretize=p.ddim_discretize, verbose=False) x1 = self.sampler.stochastic_encode(x, torch.tensor([t_enc] * int(x.shape[0])).to(shared.device), noise=noise) self.init_latent = x @@ -239,18 +245,14 @@ class VanillaStableDiffusionSampler: self.last_latent = x self.step = 0 - steps = steps or p.steps + steps = self.adjust_steps_if_invalid(p, steps or p.steps) # Wrap the conditioning models with additional image conditioning for inpainting model if image_conditioning is not None: conditioning = {"c_concat": [image_conditioning], "c_crossattn": [conditioning]} unconditional_conditioning = {"c_concat": [image_conditioning], "c_crossattn": [unconditional_conditioning]} - # existing code fails with certain step counts, like 9 - try: - samples_ddim = self.launch_sampling(steps, lambda: self.sampler.sample(S=steps, conditioning=conditioning, batch_size=int(x.shape[0]), shape=x[0].shape, verbose=False, unconditional_guidance_scale=p.cfg_scale, unconditional_conditioning=unconditional_conditioning, x_T=x, eta=self.eta)[0]) - except Exception: - samples_ddim = self.launch_sampling(steps, lambda: self.sampler.sample(S=steps+1, conditioning=conditioning, batch_size=int(x.shape[0]), shape=x[0].shape, verbose=False, unconditional_guidance_scale=p.cfg_scale, unconditional_conditioning=unconditional_conditioning, x_T=x, eta=self.eta)[0]) + samples_ddim = self.launch_sampling(steps, lambda: self.sampler.sample(S=steps, conditioning=conditioning, batch_size=int(x.shape[0]), shape=x[0].shape, verbose=False, unconditional_guidance_scale=p.cfg_scale, unconditional_conditioning=unconditional_conditioning, x_T=x, eta=self.eta)[0]) return samples_ddim -- cgit v1.2.1 From f512b0828bb3f7d586c2da8caa87506998ed9212 Mon Sep 17 00:00:00 2001 From: dtlnor Date: Sun, 30 Oct 2022 00:45:30 +0900 Subject: Update zh_CN.json update translation content to 35c45df28b303a05d56a13cb56d4046f08cf8c25 --- localizations/zh_CN.json | 53 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 53 insertions(+) diff --git a/localizations/zh_CN.json b/localizations/zh_CN.json index b208ae25..94907ada 100644 --- a/localizations/zh_CN.json +++ b/localizations/zh_CN.json @@ -91,6 +91,9 @@ "Show Textbox": "显示文本框", "File with inputs": "含输入内容的文件", "Prompts": "提示词", + "Iterate seed every line": "每行输入都换一个种子", + "List of prompt inputs": "提示词输入列表", + "Upload prompt inputs": "上传提示词输入文件", "X type": "X轴类型", "Nothing": "无", "Var. seed": "差异随机种子", @@ -109,6 +112,7 @@ "Eta": "Eta", "Clip skip": "Clip 跳过", "Denoising": "去噪", + "Cond. Image Mask Weight": "自适应图像蒙版强度", "X values": "X轴数值", "Y type": "Y轴类型", "Y values": "Y轴数值", @@ -204,6 +208,7 @@ "GFPGAN visibility": "GFPGAN 可见度", "CodeFormer visibility": "CodeFormer 可见度", "CodeFormer weight (0 = maximum effect, 1 = minimum effect)": "CodeFormer 权重 (0 = 最大效果, 1 = 最小效果)", + "Upscale Before Restoring Faces": "放大后再进行面部修复", "Open output directory": "打开输出目录", "Send to txt2img": ">> 文生图", "A merger of the two checkpoints will be generated in your": "合并后的模型(ckpt)会生成在你的", @@ -237,6 +242,36 @@ "leakyrelu": "leakyrelu", "elu": "elu", "swish": "swish", + "tanh": "tanh", + "sigmoid": "sigmoid", + "celu": "celu", + "gelu": "gelu", + "glu": "glu", + "hardshrink": "hardshrink", + "hardsigmoid": "hardsigmoid", + "hardtanh": "hardtanh", + "logsigmoid": "logsigmoid", + "logsoftmax": "logsoftmax", + "mish": "mish", + "prelu": "prelu", + "rrelu": "rrelu", + "relu6": "relu6", + "selu": "selu", + "silu": "silu", + "softmax": "softmax", + "softmax2d": "softmax2d", + "softmin": "softmin", + "softplus": "softplus", + "softshrink": "softshrink", + "softsign": "softsign", + "tanhshrink": "tanhshrink", + "threshold": "阈值", + "Select Layer weights initialization. relu-like - Kaiming, sigmoid-like - Xavier is recommended": "选择初始化层权重的方案. 类relu - Kaiming, 类sigmoid - Xavier 都是比较推荐的选项", + "Normal": "正态", + "KaimingUniform": "Kaiming均匀", + "KaimingNormal": "Kaiming正态", + "XavierUniform": "Xavier均匀", + "XavierNormal": "Xavier正态", "Add layer normalization": "添加层标准化", "Use dropout": "采用 dropout 防止过拟合", "Overwrite Old Hypernetwork": "覆写旧的 Hypernetwork", @@ -250,10 +285,15 @@ "Create flipped copies": "生成镜像副本", "Split oversized images into two": "将过大的图像分为两份", "Split oversized images": "分割过大的图像", + "Auto focal point crop": "自动焦点裁切", "Use BLIP for caption": "使用 BLIP 生成说明文字(自然语言描述)", "Use deepbooru for caption": "使用 deepbooru 生成说明文字(tags)", "Split image threshold": "图像分割阈值", "Split image overlap ratio": "分割图像重叠的比率", + "Focal point face weight": "焦点面部权重", + "Focal point entropy weight": "焦点熵权重", + "Focal point edges weight": "焦点线条权重", + "Create debug image": "生成除错图片", "Preprocess": "预处理", "Train an embedding; must specify a directory with a set of 1:1 ratio images": "训练 embedding; 必须指定一组具有 1:1 比例图像的目录", "Train an embedding or Hypernetwork; you must specify a directory with a set of 1:1 ratio images": "训练 embedding 或者 hypernetwork; 必须指定一组具有 1:1 比例图像的目录", @@ -280,9 +320,11 @@ "Renew Page": "刷新页面", "extras": "后处理", "favorites": "收藏夹", + "others": "其他", "custom fold": "自定义文件夹", "Load": "载入", "Images directory": "图像目录", + "Dropdown": "下拉列表", "Prev batch": "上一批", "Next batch": "下一批", "First Page": "首页", @@ -291,7 +333,12 @@ "Next Page": "下一页", "End Page": "尾页", "number of images to delete consecutively next": "接下来要连续删除的图像数", + "delete next": "删除下一张", "Delete": "删除", + "sort by": "排序方式", + "path name": "路径名", + "date": "日期", + "keyword": "关键词", "Generate Info": "生成信息", "File Name": "文件名", "Collect": "收藏", @@ -299,12 +346,15 @@ "Date to": "日期至", "Number": "数量", "set_index": "设置索引", + "load_switch": "载入开关", + "turn_page_switch": "翻页开关", "Checkbox": "勾选框", "Apply settings": "保存设置", "Saving images/grids": "保存图像/概览图", "Always save all generated images": "始终保存所有生成的图像", "File format for images": "图像的文件格式", "Images filename pattern": "图像文件名格式", + "Add number to filename when saving": "储存的时候在文件名里添加数字", "Always save all generated image grids": "始终保存所有生成的概览图", "File format for grids": "概览图的文件格式", "Add extended info (seed, prompt) to filename when saving grid": "保存概览时将扩展信息(随机种子、提示词)添加到文件名", @@ -359,6 +409,7 @@ "Stable Diffusion": "Stable Diffusion", "Checkpoints to cache in RAM": "缓存在内存(RAM)中的模型(ckpt)", "Hypernetwork strength": "Hypernetwork 强度", + "Inpainting conditioning mask strength": "内补绘制的自适应蒙版强度", "Apply color correction to img2img results to match original colors.": "对图生图结果应用颜色校正以匹配原始颜色", "Save a copy of image before applying color correction to img2img results": "在对图生图结果应用颜色校正之前保存图像副本", "With img2img, do exactly the amount of steps the slider specifies (normally you'd do less with less denoising).": "在进行图生图的时候,确切地执行滑块指定的迭代步数(正常情况下更弱的去噪需要更少的迭代步数)", @@ -390,6 +441,7 @@ "Add model hash to generation information": "将模型的哈希值添加到生成信息", "Add model name to generation information": "将模型名称添加到生成信息", "When reading generation parameters from text into UI (from PNG info or pasted text), do not change the selected model/checkpoint.": "当从文本读取生成参数到 UI(从 PNG 信息或粘贴文本)时,不要更改选定的模型(ckpt)", + "Send seed when sending prompt or image to other interface": "将提示词或者图片传送到其他界面的时候同时也把随机种子传送过去", "Font for image grids that have text": "有文字的概览图使用的字体", "Enable full page image viewer": "启用整页图像查看器", "Show images zoomed in by default in full page image viewer": "在整页图像查看器中默认放大显示图像", @@ -478,6 +530,7 @@ "Restore low quality faces using GFPGAN neural network": "使用 GFPGAN 神经网络修复低质量面部", "This regular expression will be used extract words from filename, and they will be joined using the option below into label text used for training. Leave empty to keep filename text as it is.": "此正则表达式将用于从文件名中提取单词,并将使用以下选项将它们接合到用于训练的标签文本中。留空以保持文件名文本不变", "This string will be used to join split words into a single line if the option above is enabled.": "如果启用了上述选项,则此处的字符会用于将拆分的单词接合为同一行", + "Only applies to inpainting models. Determines how strongly to mask off the original image for inpainting and img2img. 1.0 means fully masked, which is the default behaviour. 0.0 means a fully unmasked conditioning. Lower values will help preserve the overall composition of the image, but will struggle with large changes.": "仅适用于内补绘制专用的模型。 决定了蒙版在内补绘制以及图生图中屏蔽原图内容的强度。 1.0 表示完全屏蔽,这是默认行为。 0.0 表示完全不屏蔽。 较低的值将有助于保持图像的整体构图,但很难遇到较大的变化。", "List of setting names, separated by commas, for settings that should go to the quick access bar at the top, rather than the usual setting tab. See modules/shared.py for setting names. Requires restarting to apply.": "设置名称列表,以逗号分隔,设置应转到顶部的快速访问栏,而不是通常的设置选项卡。有关设置名称,请参见 modules/shared.py。需要重新启动才能应用", "If this values is non-zero, it will be added to seed and used to initialize RNG for noises when using samplers with Eta. You can use this to produce even more variation of images, or you can use this to match images of other software if you know what you are doing.": "如果这个值不为零,它将被添加到随机种子中,并在使用带有 Eta 的采样器时用于初始化随机噪声。你可以使用它来产生更多的图像变化,或者你可以使用它来模仿其他软件生成的图像,如果你知道你在做什么", "Enable Autocomplete": "开启Tag补全", -- cgit v1.2.1 From 44ab954fabb9c1273366ebdca47f8da394d61aab Mon Sep 17 00:00:00 2001 From: random_thoughtss Date: Sat, 29 Oct 2022 10:02:56 -0700 Subject: Fix latent upscale highres fix #3888 --- modules/processing.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/modules/processing.py b/modules/processing.py index 548eec29..f18b7db2 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -653,6 +653,7 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): if opts.use_scale_latent_for_hires_fix: samples = torch.nn.functional.interpolate(samples, size=(self.height // opt_f, self.width // opt_f), mode="bilinear") + image_conditioning = self.txt2img_image_conditioning(samples) else: decoded_samples = decode_first_stage(self.sd_model, samples) @@ -674,6 +675,12 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): samples = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(decoded_samples)) + image_conditioning = self.img2img_image_conditioning( + decoded_samples, + samples, + decoded_samples.new_ones(decoded_samples.shape[0], 1, decoded_samples.shape[2], decoded_samples.shape[3]) + ) + shared.state.nextjob() self.sampler = sd_samplers.create_sampler_with_index(sd_samplers.samplers, self.sampler_index, self.sd_model) @@ -684,11 +691,6 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): x = None devices.torch_gc() - image_conditioning = self.img2img_image_conditioning( - decoded_samples, - samples, - decoded_samples.new_ones(decoded_samples.shape[0], 1, decoded_samples.shape[2], decoded_samples.shape[3]) - ) samples = self.sampler.sample_img2img(self, samples, noise, conditioning, unconditional_conditioning, steps=self.steps, image_conditioning=image_conditioning) return samples -- cgit v1.2.1 From 6e2ce4e735db64afcd0fe637327ca4ec78335706 Mon Sep 17 00:00:00 2001 From: random_thoughtss Date: Sat, 29 Oct 2022 10:35:51 -0700 Subject: Added image conditioning to latent upscale. Only comuted if the mask weight is not 1.0 to avoid extra memory. Also includes some code cleanup. --- modules/processing.py | 29 +++++++++++------------------ 1 file changed, 11 insertions(+), 18 deletions(-) diff --git a/modules/processing.py b/modules/processing.py index f18b7db2..ee0e9e34 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -134,11 +134,7 @@ class StableDiffusionProcessing(): # Dummy zero conditioning if we're not using inpainting model. # Still takes up a bit of memory, but no encoder call. # Pretty sure we can just make this a 1x1 image since its not going to be used besides its batch size. - return torch.zeros( - x.shape[0], 5, 1, 1, - dtype=x.dtype, - device=x.device - ) + return x.new_zeros(x.shape[0], 5, 1, 1) height = height or self.height width = width or self.width @@ -156,11 +152,7 @@ class StableDiffusionProcessing(): def img2img_image_conditioning(self, source_image, latent_image, image_mask = None): if self.sampler.conditioning_key not in {'hybrid', 'concat'}: # Dummy zero conditioning if we're not using inpainting model. - return torch.zeros( - latent_image.shape[0], 5, 1, 1, - dtype=latent_image.dtype, - device=latent_image.device - ) + return latent_image.new_zeros(latent_image.shape[0], 5, 1, 1) # Handle the different mask inputs if image_mask is not None: @@ -174,11 +166,10 @@ class StableDiffusionProcessing(): # Inpainting model uses a discretized mask as input, so we round to either 1.0 or 0.0 conditioning_mask = torch.round(conditioning_mask) else: - conditioning_mask = torch.ones(1, 1, *source_image.shape[-2:]) + conditioning_mask = source_image.new_ones(1, 1, *source_image.shape[-2:]) # Create another latent image, this time with a masked version of the original input. # Smoothly interpolate between the masked and unmasked latent conditioning image using a parameter. - conditioning_mask = conditioning_mask.to(source_image.device) conditioning_image = torch.lerp( source_image, source_image * (1.0 - conditioning_mask), @@ -653,7 +644,13 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): if opts.use_scale_latent_for_hires_fix: samples = torch.nn.functional.interpolate(samples, size=(self.height // opt_f, self.width // opt_f), mode="bilinear") - image_conditioning = self.txt2img_image_conditioning(samples) + + # Avoid making the inpainting conditioning unless necessary as + # this does need some extra compute to decode / encode the image again. + if getattr(self, "inpainting_mask_weight", shared.opts.inpainting_mask_weight) < 1.0: + image_conditioning = self.img2img_image_conditioning(decode_first_stage(self.sd_model, samples), samples) + else: + image_conditioning = self.txt2img_image_conditioning(samples) else: decoded_samples = decode_first_stage(self.sd_model, samples) @@ -675,11 +672,7 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): samples = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(decoded_samples)) - image_conditioning = self.img2img_image_conditioning( - decoded_samples, - samples, - decoded_samples.new_ones(decoded_samples.shape[0], 1, decoded_samples.shape[2], decoded_samples.shape[3]) - ) + image_conditioning = self.img2img_image_conditioning(decoded_samples, samples) shared.state.nextjob() -- cgit v1.2.1 From 39f55c3c35873bc7dd9792cb2155746a1c3d4292 Mon Sep 17 00:00:00 2001 From: random_thoughtss Date: Sat, 29 Oct 2022 14:13:02 -0700 Subject: Re-add explicit device move --- modules/processing.py | 1 + 1 file changed, 1 insertion(+) diff --git a/modules/processing.py b/modules/processing.py index ee0e9e34..d07e3db9 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -170,6 +170,7 @@ class StableDiffusionProcessing(): # Create another latent image, this time with a masked version of the original input. # Smoothly interpolate between the masked and unmasked latent conditioning image using a parameter. + conditioning_mask = conditioning_mask.to(source_image.device).to(source_image.dtype) conditioning_image = torch.lerp( source_image, source_image * (1.0 - conditioning_mask), -- cgit v1.2.1 From 2f125b0a97fe1d4fbd8e4c922615d2c8dfd723fb Mon Sep 17 00:00:00 2001 From: batvbs Date: Sun, 30 Oct 2022 13:07:25 +0800 Subject: =?UTF-8?q?=E6=9B=B4=E6=96=B0=20zh=5FCN.json?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- localizations/zh_CN.json | 102 +++++++++++++++++++++++------------------------ 1 file changed, 51 insertions(+), 51 deletions(-) diff --git a/localizations/zh_CN.json b/localizations/zh_CN.json index 94907ada..b3d83707 100644 --- a/localizations/zh_CN.json +++ b/localizations/zh_CN.json @@ -12,16 +12,16 @@ "Stable Diffusion checkpoint": "Stable Diffusion 模型(ckpt)", "txt2img": "文生图", "img2img": "图生图", - "Extras": "后处理", - "PNG Info": "PNG 信息", - "Checkpoint Merger": "模型(ckpt)合并工具", + "Extras": "更多", + "PNG Info": "图片信息", + "Checkpoint Merger": "模型(ckpt)合并", "Train": "训练", - "Create aesthetic embedding": "生成美术风格 embedding", + "Create aesthetic embedding": "生成美术风格", "Image Browser": "图库浏览器", "History": "历史记录", "Settings": "设置", "Prompt": "提示词", - "Negative prompt": "反向提示词", + "Negative prompt": "负面提示词", "Run": "运行", "Skip": "跳过", "Interrupt": "中止", @@ -61,10 +61,10 @@ "Firstpass height": "第一遍的高度", "Denoising strength": "去噪强度", "Batch count": "批次", - "Batch size": "批量", + "Batch size": "数量", "CFG Scale": "提示词相关性(CFG Scale)", "Seed": "随机种子", - "Extra": "额外参数", + "Extra": "▼", "Variation seed": "差异随机种子", "Variation strength": "差异强度", "Resize seed from width": "自宽度缩放随机种子", @@ -81,7 +81,7 @@ "Slerp angle": "Slerp 角度", "Is negative text": "是反向提示词", "Script": "脚本", - "Embedding to Shareable PNG": "将 Embedding 转换为可分享的 PNG", + "Embedding to Shareable PNG": "将 Embedding 转换为可分享的 PNG 图片文件", "Prompt matrix": "提示词矩阵", "Prompts from file or textbox": "从文本框或文件载入提示词", "X/Y plot": "X/Y 图表", @@ -123,7 +123,7 @@ "Save": "保存", "Send to img2img": ">> 图生图", "Send to inpaint": ">> 内补绘制", - "Send to extras": ">> 后处理", + "Send to extras": ">> 更多", "Make Zip when Save?": "保存时生成zip压缩文件?", "Textbox": "文本框", "Interrogate\nCLIP": "CLIP\n反推提示词", @@ -153,9 +153,9 @@ "Input directory": "输入目录", "Output directory": "输出目录", "Resize mode": "缩放模式", - "Just resize": "只缩放", - "Crop and resize": "缩放并剪裁", - "Resize and fill": "缩放并填充", + "Just resize": "拉伸", + "Crop and resize": "裁剪", + "Resize and fill": "填充", "img2img alternative test": "图生图的另一种测试", "Loopback": "回送", "Outpainting mk2": "外补绘制第二版", @@ -185,7 +185,6 @@ "Color variation": "色彩变化", "Will upscale the image to twice the dimensions; use width and height sliders to set tile size": "将图像放大到两倍尺寸; 使用宽度和高度滑块设置图块尺寸(tile size)", "Tile overlap": "图块重叠的像素(Tile overlap)", - "Upscaler": "放大算法", "Lanczos": "Lanczos", "LDSR": "LDSR", "BSRGAN 4x": "BSRGAN 4x", @@ -203,6 +202,7 @@ "Scale to": "指定尺寸缩放", "Resize": "缩放", "Crop to fit": "裁剪以适应", + "Upscaler 1": "放大算法 1", "Upscaler 2": "放大算法 2", "Upscaler 2 visibility": "放大算法 2 可见度", "GFPGAN visibility": "GFPGAN 可见度", @@ -293,11 +293,11 @@ "Focal point face weight": "焦点面部权重", "Focal point entropy weight": "焦点熵权重", "Focal point edges weight": "焦点线条权重", - "Create debug image": "生成除错图片", + "Create debug image": "生成调试(debug)图片", "Preprocess": "预处理", "Train an embedding; must specify a directory with a set of 1:1 ratio images": "训练 embedding; 必须指定一组具有 1:1 比例图像的目录", "Train an embedding or Hypernetwork; you must specify a directory with a set of 1:1 ratio images": "训练 embedding 或者 hypernetwork; 必须指定一组具有 1:1 比例图像的目录", - "[wiki]": "[wiki]", + "[wiki]": "[帮助]", "Embedding": "Embedding", "Embedding Learning rate": "Embedding 学习率", "Hypernetwork Learning rate": "Hypernetwork 学习率", @@ -308,7 +308,7 @@ "Max steps": "最大迭代步数", "Save an image to log directory every N steps, 0 to disable": "每 N 步保存一个图像到日志目录,0 表示禁用", "Save a copy of embedding to log directory every N steps, 0 to disable": "每 N 步将 embedding 的副本保存到日志目录,0 表示禁用", - "Save images with embedding in PNG chunks": "保存图像并在 PNG 文件中嵌入 embedding 文件", + "Save images with embedding in PNG chunks": "保存图像并在 PNG 图片文件中嵌入 embedding 文件", "Read parameters (prompt, etc...) from txt2img tab when making previews": "进行预览时从文生图选项卡中读取参数(提示词等)", "Train Hypernetwork": "训练 Hypernetwork", "Train Embedding": "训练 Embedding", @@ -316,10 +316,10 @@ "Create images embedding": "生成图集 embedding", "txt2img history": "文生图历史记录", "img2img history": "图生图历史记录", - "extras history": "后处理历史记录", + "extras history": "更多选项卡的历史记录", "Renew Page": "刷新页面", - "extras": "后处理", - "favorites": "收藏夹", + "extras": "更多", + "favorites": "已保存", "others": "其他", "custom fold": "自定义文件夹", "Load": "载入", @@ -341,7 +341,7 @@ "keyword": "关键词", "Generate Info": "生成信息", "File Name": "文件名", - "Collect": "收藏", + "Collect": "保存", "Refresh page": "刷新页面", "Date to": "日期至", "Number": "数量", @@ -350,37 +350,37 @@ "turn_page_switch": "翻页开关", "Checkbox": "勾选框", "Apply settings": "保存设置", - "Saving images/grids": "保存图像/概览图", + "Saving images/grids": "保存图像/宫格图", "Always save all generated images": "始终保存所有生成的图像", "File format for images": "图像的文件格式", "Images filename pattern": "图像文件名格式", "Add number to filename when saving": "储存的时候在文件名里添加数字", - "Always save all generated image grids": "始终保存所有生成的概览图", - "File format for grids": "概览图的文件格式", - "Add extended info (seed, prompt) to filename when saving grid": "保存概览时将扩展信息(随机种子、提示词)添加到文件名", - "Do not save grids consisting of one picture": "只有一张图片时不要保存概览图", - "Prevent empty spots in grid (when set to autodetect)": "(在自动检测时)防止概览图中出现空位", - "Grid row count; use -1 for autodetect and 0 for it to be same as batch size": "概览行数; 使用 -1 进行自动检测,使用 0 使其与批量大小相同", - "Save text information about generation parameters as chunks to png files": "将有关生成参数的文本信息作为块保存到 png 文件中", + "Always save all generated image grids": "始终保存所有生成的宫格图", + "File format for grids": "宫格图的文件格式", + "Add extended info (seed, prompt) to filename when saving grid": "保存宫格图时将扩展信息(随机种子、提示词)添加到文件名", + "Do not save grids consisting of one picture": "只有一张图片时不要保存宫格图", + "Prevent empty spots in grid (when set to autodetect)": "(在自动检测时)防止宫格图中出现空位", + "Grid row count; use -1 for autodetect and 0 for it to be same as batch size": "宫格图行数; 使用 -1 进行自动检测,使用 0 使其与批量大小相同", + "Save text information about generation parameters as chunks to png files": "将有关生成参数的文本信息作为块保存到 png 图片文件中", "Create a text file next to every image with generation parameters.": "保存图像时在每个图像旁边创建一个文本文件储存生成参数", "Save a copy of image before doing face restoration.": "在进行面部修复之前保存图像副本", "Quality for saved jpeg images": "保存的 jpeg 图像的质量", - "If PNG image is larger than 4MB or any dimension is larger than 4000, downscale and save copy as JPG": "如果 PNG 图像大于 4MB 或宽高大于 4000,则缩小并保存副本为 JPG", - "Use original name for output filename during batch process in extras tab": "在后处理选项卡中的批量处理过程中使用原始名称作为输出文件名", + "If PNG image is larger than 4MB or any dimension is larger than 4000, downscale and save copy as JPG": "如果 PNG 图像大于 4MB 或宽高大于 4000,则缩小并保存副本为 JPG 图片", + "Use original name for output filename during batch process in extras tab": "在更多选项卡中的批量处理过程中使用原始名称作为输出文件名", "When using 'Save' button, only save a single selected image": "使用“保存”按钮时,只保存一个选定的图像", "Do not add watermark to images": "不要给图像加水印", "Paths for saving": "保存路径", "Output directory for images; if empty, defaults to three directories below": "图像的输出目录; 如果为空,则默认为以下三个目录", "Output directory for txt2img images": "文生图的输出目录", "Output directory for img2img images": "图生图的输出目录", - "Output directory for images from extras tab": "后处理的输出目录", - "Output directory for grids; if empty, defaults to two directories below": "概览图的输出目录; 如果为空,则默认为以下两个目录", - "Output directory for txt2img grids": "文生图概览的输出目录", - "Output directory for img2img grids": "图生图概览的输出目录", + "Output directory for images from extras tab": "更多选项卡的输出目录", + "Output directory for grids; if empty, defaults to two directories below": "宫格图的输出目录; 如果为空,则默认为以下两个目录", + "Output directory for txt2img grids": "文生图宫格的输出目录", + "Output directory for img2img grids": "图生图宫格的输出目录", "Directory for saving images using the Save button": "使用“保存”按钮保存图像的目录", "Saving to a directory": "保存到目录", "Save images to a subdirectory": "将图像保存到子目录", - "Save grids to a subdirectory": "将概览图保存到子目录", + "Save grids to a subdirectory": "将宫格图保存到子目录", "When using \"Save\" button, save images to a subdirectory": "使用“保存”按钮时,将图像保存到子目录", "Directory name pattern": "目录名称格式", "Max prompt words for [prompt_words] pattern": "[prompt_words] 格式的最大提示词数量", @@ -405,7 +405,7 @@ "Filename word regex": "文件名用词的正则表达式", "Filename join string": "文件名连接用字符串", "Number of repeats for a single input image per epoch; used only for displaying epoch number": "每个 epoch 中单个输入图像的重复次数; 仅用于显示 epoch 数", - "Save an csv containing the loss to log directory every N steps, 0 to disable": "每 N 步保存一个包含 loss 的 csv 到日志目录,0 表示禁用", + "Save an csv containing the loss to log directory every N steps, 0 to disable": "每 N 步保存一个包含 loss 的 csv 表格到日志目录,0 表示禁用", "Stable Diffusion": "Stable Diffusion", "Checkpoints to cache in RAM": "缓存在内存(RAM)中的模型(ckpt)", "Hypernetwork strength": "Hypernetwork 强度", @@ -418,7 +418,7 @@ "Use old emphasis implementation. Can be useful to reproduce old seeds.": "使用旧的强调符实现。可用于复现旧随机种子", "Make K-diffusion samplers produce same images in a batch as when making a single image": "使 K-diffusion 采样器批量生成与生成单个图像时产出相同的图像", "Increase coherency by padding from the last comma within n tokens when using more than 75 tokens": "当使用超过 75 个 token 时,通过从 n 个 token 中的最后一个逗号填补来提高一致性", - "Filter NSFW content": "过滤成人内容", + "Filter NSFW content": "过滤成人内容(NSFW)", "Stop At last layers of CLIP model": "在 CLIP 模型的最后哪一层停下", "Interrogate Options": "反推提示词选项", "Interrogate: keep models in VRAM": "反推: 将模型保存在显存(VRAM)中", @@ -436,18 +436,18 @@ "Show progressbar": "显示进度条", "Show image creation progress every N sampling steps. Set 0 to disable.": "每 N 个采样迭代步数显示图像生成进度。设置 0 禁用", "Show previews of all images generated in a batch as a grid": "以网格的形式预览所有批量生成出来的图像", - "Show grid in results for web": "在网页的结果中显示概览图", + "Show grid in results for web": "在网页的结果中显示宫格图", "Do not show any images in results for web": "不在网页的结果中显示任何图像", "Add model hash to generation information": "将模型的哈希值添加到生成信息", "Add model name to generation information": "将模型名称添加到生成信息", - "When reading generation parameters from text into UI (from PNG info or pasted text), do not change the selected model/checkpoint.": "当从文本读取生成参数到 UI(从 PNG 信息或粘贴文本)时,不要更改选定的模型(ckpt)", + "When reading generation parameters from text into UI (from PNG info or pasted text), do not change the selected model/checkpoint.": "从文本读取生成参数到 UI(从 PNG 图片信息或粘贴文本)时,不要更改选定的模型(ckpt)", "Send seed when sending prompt or image to other interface": "将提示词或者图片传送到其他界面的时候同时也把随机种子传送过去", - "Font for image grids that have text": "有文字的概览图使用的字体", + "Font for image grids that have text": "有文字的宫格图使用的字体", "Enable full page image viewer": "启用整页图像查看器", "Show images zoomed in by default in full page image viewer": "在整页图像查看器中默认放大显示图像", "Show generation progress in window title.": "在窗口标题中显示生成进度", "Quicksettings list": "快速设置列表", - "Localization (requires restart)": "本地化(需要重新启动)", + "Localization (requires restart)": "本地化翻译(需要保存设置,并重启Gradio)", "Sampler parameters": "采样器参数", "Hide samplers in user interface (requires restart)": "在用户界面中隐藏采样器(需要重新启动)", "eta (noise multiplier) for DDIM": "DDIM 的 eta (噪声乘数) ", @@ -472,8 +472,8 @@ "Download localization template": "下载本地化模板", "Reload custom script bodies (No ui updates, No restart)": "重新加载自定义脚本主体(无 ui 更新,无重启)", "Restart Gradio and Refresh components (Custom Scripts, ui.py, js and css only)": "重启 Gradio 及刷新组件(仅限自定义脚本、ui.py、js 和 css)", - "Prompt (press Ctrl+Enter or Alt+Enter to generate)": "提示词(按 Ctrl+Enter 或 Alt+Enter 生成)", - "Negative prompt (press Ctrl+Enter or Alt+Enter to generate)": "反向提示词(按 Ctrl+Enter 或 Alt+Enter 生成)", + "Prompt (press Ctrl+Enter or Alt+Enter to generate)": "提示词(按 Ctrl+Enter 或 Alt+Enter 生成)\nPrompt", + "Negative prompt (press Ctrl+Enter or Alt+Enter to generate)": "反向提示词(按 Ctrl+Enter 或 Alt+Enter 生成)\nNegative prompt", "Add a random artist to the prompt.": "随机添加一个艺术家到提示词中", "Read generation parameters from prompt or last generation if prompt is empty into user interface.": "从提示词中读取生成参数,如果提示词为空,则读取上一次的生成参数到用户界面", "Save style": "储存为模版风格", @@ -500,14 +500,14 @@ "This text is used to rotate the feature space of the imgs embs": "此文本用于旋转图集 embeddings 的特征空间", "Separate values for X axis using commas.": "使用逗号分隔 X 轴的值", "Separate values for Y axis using commas.": "使用逗号分隔 Y 轴的值", - "Write image to a directory (default - log/images) and generation parameters into csv file.": "将图像写入目录(默认 - log/images)并将生成参数写入 csv 文件", + "Write image to a directory (default - log/images) and generation parameters into csv file.": "将图像写入目录(默认 - log/images)并将生成参数写入 csv 表格文件", "Open images output directory": "打开图像输出目录", "How much to blur the mask before processing, in pixels.": "处理前要对蒙版进行多强的模糊,以像素为单位", "What to put inside the masked area before processing it with Stable Diffusion.": "在使用 Stable Diffusion 处理蒙版区域之前要在蒙版区域内放置什么", - "fill it with colors of the image": "用图像的颜色填充它", - "keep whatever was there originally": "保留原来的东西", - "fill it with latent space noise": "用潜空间的噪声填充它", - "fill it with latent space zeroes": "用潜空间的零填充它", + "fill it with colors of the image": "用图像的颜色(模糊/马赛克)填充它", + "keep whatever was there originally": "保留原来的图像,不进行预处理", + "fill it with latent space noise": "用潜空间的噪声(随机彩色噪点)填充它", + "fill it with latent space zeroes": "用潜空间的零(灰色)填充它", "Upscale masked region to target resolution, do inpainting, downscale back and paste into original image": "将蒙版区域放大到目标分辨率,做内补绘制,缩小后粘贴到原始图像中", "Resize image to target resolution. Unless height and width match, you will get incorrect aspect ratio.": "将图像大小调整为目标分辨率。除非高度和宽度匹配,否则你将获得不正确的纵横比", "Resize the image so that entirety of target resolution is filled with the image. Crop parts that stick out.": "调整图像大小,使整个目标分辨率都被图像填充。裁剪多出来的部分", @@ -526,7 +526,7 @@ "Input images directory": "输入图像目录", "Use following tags to define how filenames for images are chosen: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [job_timestamp]; leave empty for default.": "使用以下标签定义如何选择图像的文件名: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [job_timestamp]; 默认请留空", "If this option is enabled, watermark will not be added to created images. Warning: if you do not add watermark, you may be behaving in an unethical manner.": "如果启用此选项,水印将不会添加到生成出来的图像中。警告:如果你不添加水印,你的行为可能是不符合专业操守的", - "Use following tags to define how subdirectories for images and grids are chosen: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [job_timestamp]; leave empty for default.": "使用以下标签定义如何选择图像和概览图的子目录: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [job_timestamp]; 默认请留空", + "Use following tags to define how subdirectories for images and grids are chosen: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [job_timestamp]; leave empty for default.": "使用以下标签定义如何选择图像和宫格图的子目录: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [job_timestamp]; 默认请留空", "Restore low quality faces using GFPGAN neural network": "使用 GFPGAN 神经网络修复低质量面部", "This regular expression will be used extract words from filename, and they will be joined using the option below into label text used for training. Leave empty to keep filename text as it is.": "此正则表达式将用于从文件名中提取单词,并将使用以下选项将它们接合到用于训练的标签文本中。留空以保持文件名文本不变", "This string will be used to join split words into a single line if the option above is enabled.": "如果启用了上述选项,则此处的字符会用于将拆分的单词接合为同一行", @@ -536,6 +536,6 @@ "Enable Autocomplete": "开启Tag补全", "Allowed categories for random artists selection when using the Roll button": "使用抽选艺术家按钮时将会随机的艺术家类别", "Roll three": "抽三位出来", - "Generate forever": "不停地生成", - "Cancel generate forever": "取消不停地生成" + "Generate forever": "无限生成", + "Cancel generate forever": "停止无限生成" } -- cgit v1.2.1 From 71571e3f055237d71ba2d47756846ad1d73be00c Mon Sep 17 00:00:00 2001 From: random_thoughtss Date: Sun, 30 Oct 2022 00:35:40 -0700 Subject: Replaced master branch fix with updated fix. --- modules/processing.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/modules/processing.py b/modules/processing.py index 3dd44d3a..512c484f 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -688,8 +688,6 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): noise = create_random_tensors(samples.shape[1:], seeds=seeds, subseeds=subseeds, subseed_strength=subseed_strength, seed_resize_from_h=self.seed_resize_from_h, seed_resize_from_w=self.seed_resize_from_w, p=self) - image_conditioning = self.txt2img_image_conditioning(x) - # GC now before running the next img2img to prevent running out of memory x = None devices.torch_gc() -- cgit v1.2.1 From be27fd4690b1eb6c74da1e31c9696a0f1901fbba Mon Sep 17 00:00:00 2001 From: evshiron Date: Sun, 30 Oct 2022 17:01:01 +0800 Subject: fix broken progress api by previous rework --- modules/shared.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/modules/shared.py b/modules/shared.py index e4f163c1..2c7d28a5 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -4,6 +4,7 @@ import json import os import sys from collections import OrderedDict +import time import gradio as gr import tqdm @@ -132,6 +133,7 @@ class State: current_image = None current_image_sampling_step = 0 textinfo = None + time_start = None def skip(self): self.skipped = True @@ -168,6 +170,7 @@ class State: self.skipped = False self.interrupted = False self.textinfo = None + self.time_start = time.time() devices.torch_gc() -- cgit v1.2.1 From 1a4ff2de6a835cd8cc1590bbc1a8dedb5ad37e5b Mon Sep 17 00:00:00 2001 From: evshiron Date: Sun, 30 Oct 2022 17:02:47 +0800 Subject: fix current image in progress api when parallel processing enabled --- modules/api/api.py | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/modules/api/api.py b/modules/api/api.py index 6c06d449..97497f3f 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -3,10 +3,9 @@ import uvicorn from gradio.processing_utils import encode_pil_to_base64, decode_base64_to_file, decode_base64_to_image from fastapi import APIRouter, Depends, HTTPException import modules.shared as shared -from modules import devices from modules.api.models import * from modules.processing import StableDiffusionProcessingTxt2Img, StableDiffusionProcessingImg2Img, process_images -from modules.sd_samplers import all_samplers +from modules.sd_samplers import all_samplers, sample_to_image, samples_to_image_grid from modules.extras import run_extras, run_pnginfo @@ -170,6 +169,16 @@ class Api: progress = min(progress, 1) + # copy from check_progress_call of ui.py + + if shared.parallel_processing_allowed: + if shared.state.sampling_step - shared.state.current_image_sampling_step >= shared.opts.show_progress_every_n_steps and shared.state.current_latent is not None: + if shared.opts.show_progress_grid: + shared.state.current_image = samples_to_image_grid(shared.state.current_latent) + else: + shared.state.current_image = sample_to_image(shared.state.current_latent) + shared.state.current_image_sampling_step = shared.state.sampling_step + current_image = None if shared.state.current_image and not req.skip_current_image: current_image = encode_pil_to_base64(shared.state.current_image) -- cgit v1.2.1 From b5e21e3348163f4a17d4a2e6f97af2c370edc9b3 Mon Sep 17 00:00:00 2001 From: batvbs <60730393+batvbs@users.noreply.github.com> Date: Sun, 30 Oct 2022 17:49:17 +0800 Subject: Update zh_CN.json --- localizations/zh_CN.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/localizations/zh_CN.json b/localizations/zh_CN.json index b3d83707..d07c03a7 100644 --- a/localizations/zh_CN.json +++ b/localizations/zh_CN.json @@ -21,7 +21,7 @@ "History": "历史记录", "Settings": "设置", "Prompt": "提示词", - "Negative prompt": "负面提示词", + "Negative prompt": "反向提示词", "Run": "运行", "Skip": "跳过", "Interrupt": "中止", -- cgit v1.2.1 From 34c86c12b0a9d650d4e7c5be478bca34ad8ed048 Mon Sep 17 00:00:00 2001 From: Martin Cairns <4314538+MartinCairnsSQL@users.noreply.github.com> Date: Sun, 30 Oct 2022 11:04:27 +0000 Subject: Include PLMS in adjust steps as it also can fail in the same way --- modules/sd_samplers.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/sd_samplers.py b/modules/sd_samplers.py index aca014e8..8772db56 100644 --- a/modules/sd_samplers.py +++ b/modules/sd_samplers.py @@ -208,7 +208,7 @@ class VanillaStableDiffusionSampler: def adjust_steps_if_invalid(self, p, num_steps): - if self.config.name == 'DDIM' and p.ddim_discretize == 'uniform': + if (self.config.name == 'DDIM' and p.ddim_discretize == 'uniform') or (self.config.name == 'PLMS'): valid_step = 999 / (1000 // num_steps) if valid_step == floor(valid_step): return int(valid_step) + 1 -- cgit v1.2.1 From 99c4e8d65357ebd9e3eab95e7c6de4a86f125c1a Mon Sep 17 00:00:00 2001 From: batvbs Date: Sun, 30 Oct 2022 19:36:01 +0800 Subject: =?UTF-8?q?=E9=95=BF=E6=96=87=E6=9C=AC=E6=B7=BB=E5=8A=A0=E9=80=97?= =?UTF-8?q?=E5=8F=B7?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- localizations/zh_CN.json | 50 ++++++++++++++++++++++++------------------------ 1 file changed, 25 insertions(+), 25 deletions(-) diff --git a/localizations/zh_CN.json b/localizations/zh_CN.json index d07c03a7..5472535e 100644 --- a/localizations/zh_CN.json +++ b/localizations/zh_CN.json @@ -60,8 +60,8 @@ "Firstpass width": "第一遍的宽度", "Firstpass height": "第一遍的高度", "Denoising strength": "去噪强度", - "Batch count": "批次", - "Batch size": "数量", + "Batch count": "生成批次", + "Batch size": "每批数量", "CFG Scale": "提示词相关性(CFG Scale)", "Seed": "随机种子", "Extra": "▼", @@ -224,7 +224,7 @@ "Add difference": "添加差分", "Save as float16": "以 float16 储存", "See": "查看", - "wiki": "wiki", + "wiki": "帮助", "for detailed explanation.": "以了解详细说明", "Create embedding": "生成 embedding", "Create aesthetic images embedding": "生成美术风格图集 embedding", @@ -308,8 +308,8 @@ "Max steps": "最大迭代步数", "Save an image to log directory every N steps, 0 to disable": "每 N 步保存一个图像到日志目录,0 表示禁用", "Save a copy of embedding to log directory every N steps, 0 to disable": "每 N 步将 embedding 的副本保存到日志目录,0 表示禁用", - "Save images with embedding in PNG chunks": "保存图像并在 PNG 图片文件中嵌入 embedding 文件", - "Read parameters (prompt, etc...) from txt2img tab when making previews": "进行预览时从文生图选项卡中读取参数(提示词等)", + "Save images with embedding in PNG chunks": "保存图像,并在 PNG 图片文件中嵌入 embedding 文件", + "Read parameters (prompt, etc...) from txt2img tab when making previews": "进行预览时,从文生图选项卡中读取参数(提示词等)", "Train Hypernetwork": "训练 Hypernetwork", "Train Embedding": "训练 Embedding", "Create an aesthetic embedding out of any number of images": "从任意数量的图像中创建美术风格 embedding", @@ -357,16 +357,16 @@ "Add number to filename when saving": "储存的时候在文件名里添加数字", "Always save all generated image grids": "始终保存所有生成的宫格图", "File format for grids": "宫格图的文件格式", - "Add extended info (seed, prompt) to filename when saving grid": "保存宫格图时将扩展信息(随机种子、提示词)添加到文件名", + "Add extended info (seed, prompt) to filename when saving grid": "保存宫格图时,将扩展信息(随机种子、提示词)添加到文件名", "Do not save grids consisting of one picture": "只有一张图片时不要保存宫格图", - "Prevent empty spots in grid (when set to autodetect)": "(在自动检测时)防止宫格图中出现空位", - "Grid row count; use -1 for autodetect and 0 for it to be same as batch size": "宫格图行数; 使用 -1 进行自动检测,使用 0 使其与批量大小相同", - "Save text information about generation parameters as chunks to png files": "将有关生成参数的文本信息作为块保存到 png 图片文件中", - "Create a text file next to every image with generation parameters.": "保存图像时在每个图像旁边创建一个文本文件储存生成参数", + "Prevent empty spots in grid (when set to autodetect)": "(启用自动检测时)防止宫格图中出现空位", + "Grid row count; use -1 for autodetect and 0 for it to be same as batch size": "宫格图行数; 使用 -1 进行自动检测,使用 0 使其与每批数量相同", + "Save text information about generation parameters as chunks to png files": "将有关生成参数的文本信息,作为块保存到 png 图片文件中", + "Create a text file next to every image with generation parameters.": "保存图像时,在每个图像旁边创建一个文本文件储存生成参数", "Save a copy of image before doing face restoration.": "在进行面部修复之前保存图像副本", "Quality for saved jpeg images": "保存的 jpeg 图像的质量", "If PNG image is larger than 4MB or any dimension is larger than 4000, downscale and save copy as JPG": "如果 PNG 图像大于 4MB 或宽高大于 4000,则缩小并保存副本为 JPG 图片", - "Use original name for output filename during batch process in extras tab": "在更多选项卡中的批量处理过程中使用原始名称作为输出文件名", + "Use original name for output filename during batch process in extras tab": "在更多选项卡中的批量处理过程中,使用原始名称作为输出文件名", "When using 'Save' button, only save a single selected image": "使用“保存”按钮时,只保存一个选定的图像", "Do not add watermark to images": "不要给图像加水印", "Paths for saving": "保存路径", @@ -391,12 +391,12 @@ "Tile overlap, in pixels for SwinIR. Low values = visible seam.": "SwinIR 的图块重叠(Tile overlap)像素。低值 = 可见接缝", "LDSR processing steps. Lower = faster": "LDSR 处理迭代步数。更低 = 更快", "Upscaler for img2img": "图生图的放大算法", - "Upscale latent space image when doing hires. fix": "做高分辨率修复时也放大潜空间图像", + "Upscale latent space image when doing hires. fix": "做高分辨率修复时,也放大潜空间图像", "Face restoration": "面部修复", "CodeFormer weight parameter; 0 = maximum effect; 1 = minimum effect": "CodeFormer 权重参数; 0 = 最大效果; 1 = 最小效果", - "Move face restoration model from VRAM into RAM after processing": "面部修复处理完成后将面部修复模型从显存(VRAM)移至内存(RAM)", + "Move face restoration model from VRAM into RAM after processing": "面部修复处理完成后,将面部修复模型从显存(VRAM)移至内存(RAM)", "System": "系统", - "VRAM usage polls per second during generation. Set to 0 to disable.": "生成图像时每秒轮询显存(VRAM)使用情况的次数。设置为 0 以禁用", + "VRAM usage polls per second during generation. Set to 0 to disable.": "生成图像时,每秒轮询显存(VRAM)使用情况的次数。设置为 0 以禁用", "Always print all generation info to standard output": "始终将所有生成信息输出到 standard output (一般为控制台)", "Add a second progress bar to the console that shows progress for an entire job.": "向控制台添加第二个进度条,显示整个作业的进度", "Training": "训练", @@ -416,7 +416,7 @@ "Enable quantization in K samplers for sharper and cleaner results. This may change existing seeds. Requires restart to apply.": "在 K 采样器中启用量化以获得更清晰、更清晰的结果。这可能会改变现有的随机种子。需要重新启动才能应用", "Emphasis: use (text) to make model pay more attention to text and [text] to make it pay less attention": "强调符:使用 (文字) 使模型更关注该文本,使用 [文字] 使其减少关注", "Use old emphasis implementation. Can be useful to reproduce old seeds.": "使用旧的强调符实现。可用于复现旧随机种子", - "Make K-diffusion samplers produce same images in a batch as when making a single image": "使 K-diffusion 采样器批量生成与生成单个图像时产出相同的图像", + "Make K-diffusion samplers produce same images in a batch as when making a single image": "使 K-diffusion 采样器 批量生成与生成单个图像时,产出相同的图像", "Increase coherency by padding from the last comma within n tokens when using more than 75 tokens": "当使用超过 75 个 token 时,通过从 n 个 token 中的最后一个逗号填补来提高一致性", "Filter NSFW content": "过滤成人内容(NSFW)", "Stop At last layers of CLIP model": "在 CLIP 模型的最后哪一层停下", @@ -435,16 +435,16 @@ "User interface": "用户界面", "Show progressbar": "显示进度条", "Show image creation progress every N sampling steps. Set 0 to disable.": "每 N 个采样迭代步数显示图像生成进度。设置 0 禁用", - "Show previews of all images generated in a batch as a grid": "以网格的形式预览所有批量生成出来的图像", + "Show previews of all images generated in a batch as a grid": "以网格的形式,预览批量生成的所有图像", "Show grid in results for web": "在网页的结果中显示宫格图", "Do not show any images in results for web": "不在网页的结果中显示任何图像", "Add model hash to generation information": "将模型的哈希值添加到生成信息", "Add model name to generation information": "将模型名称添加到生成信息", "When reading generation parameters from text into UI (from PNG info or pasted text), do not change the selected model/checkpoint.": "从文本读取生成参数到 UI(从 PNG 图片信息或粘贴文本)时,不要更改选定的模型(ckpt)", - "Send seed when sending prompt or image to other interface": "将提示词或者图片传送到其他界面的时候同时也把随机种子传送过去", + "Send seed when sending prompt or image to other interface": "将提示词或者图片发送到 >> 其他界面时,把随机种子也传送过去", "Font for image grids that have text": "有文字的宫格图使用的字体", "Enable full page image viewer": "启用整页图像查看器", - "Show images zoomed in by default in full page image viewer": "在整页图像查看器中默认放大显示图像", + "Show images zoomed in by default in full page image viewer": "在整页图像查看器中,默认放大显示图像", "Show generation progress in window title.": "在窗口标题中显示生成进度", "Quicksettings list": "快速设置列表", "Localization (requires restart)": "本地化翻译(需要保存设置,并重启Gradio)", @@ -460,7 +460,7 @@ "sigma noise": "sigma 噪声", "Eta noise seed delta": "Eta 噪声种子偏移(noise seed delta)", "Images Browser": "图库浏览器", - "Preload images at startup": "在启动时预载图像", + "Preload images at startup": "在启动时预加载图像", "Number of columns on the page": "每页列数", "Number of rows on the page": "每页行数", "Number of pictures displayed on each page": "每页显示的图像数量", @@ -478,15 +478,15 @@ "Read generation parameters from prompt or last generation if prompt is empty into user interface.": "从提示词中读取生成参数,如果提示词为空,则读取上一次的生成参数到用户界面", "Save style": "储存为模版风格", "Apply selected styles to current prompt": "将所选样式应用于当前提示", - "Stop processing current image and continue processing.": "停止处理当前图像并继续处理下一个", - "Stop processing images and return any results accumulated so far.": "停止处理图像并返回迄今为止累积的任何结果", + "Stop processing current image and continue processing.": "停止处理当前图像,并继续处理下一个", + "Stop processing images and return any results accumulated so far.": "停止处理图像,并返回迄今为止累积的任何结果", "Style to apply; styles have components for both positive and negative prompts and apply to both": "要应用的模版风格; 模版风格包含正向和反向提示词,并应用于两者", "Do not do anything special": "什么都不做", "Which algorithm to use to produce the image": "使用哪种算法生成图像", "Euler Ancestral - very creative, each can get a completely different picture depending on step count, setting steps to higher than 30-40 does not help": "Euler Ancestral - 非常有创意,可以根据迭代步数获得完全不同的图像,将迭代步数设置为高于 30-40 不会有正面作用", "Denoising Diffusion Implicit Models - best at inpainting": "Denoising Diffusion Implicit models - 最擅长内补绘制", "Produce an image that can be tiled.": "生成可用于平铺(tiled)的图像", - "Use a two step process to partially create an image at smaller resolution, upscale, and then improve details in it without changing composition": "使用两步处理的时候以较小的分辨率生成初步图像、接着放大图像,然后在不更改构图的情况下改进其中的细节", + "Use a two step process to partially create an image at smaller resolution, upscale, and then improve details in it without changing composition": "使用两步处理的时候,以较小的分辨率生成初步图像、接着放大图像,然后在不更改构图的情况下改进其中的细节", "Determines how little respect the algorithm should have for image's content. At 0, nothing will change, and at 1 you'll get an unrelated image. With values below 1.0, processing will take less steps than the Sampling Steps slider specifies.": "决定算法对图像内容的影响程度。设置 0 时,什么都不会改变,而在 1 时,你将获得不相关的图像。值低于 1.0 时,处理的迭代步数将少于“采样迭代步数”滑块指定的步数", "How many batches of images to create": "创建多少批次的图像", "How many image to create in a single batch": "每批创建多少图像", @@ -504,10 +504,10 @@ "Open images output directory": "打开图像输出目录", "How much to blur the mask before processing, in pixels.": "处理前要对蒙版进行多强的模糊,以像素为单位", "What to put inside the masked area before processing it with Stable Diffusion.": "在使用 Stable Diffusion 处理蒙版区域之前要在蒙版区域内放置什么", - "fill it with colors of the image": "用图像的颜色(模糊/马赛克)填充它", + "fill it with colors of the image": "用图像的颜色(≈模糊/马赛克)填充它", "keep whatever was there originally": "保留原来的图像,不进行预处理", - "fill it with latent space noise": "用潜空间的噪声(随机彩色噪点)填充它", - "fill it with latent space zeroes": "用潜空间的零(灰色)填充它", + "fill it with latent space noise": "用潜空间的噪声(≈随机彩色噪点)填充它", + "fill it with latent space zeroes": "用潜空间的零(≈灰色)填充它", "Upscale masked region to target resolution, do inpainting, downscale back and paste into original image": "将蒙版区域放大到目标分辨率,做内补绘制,缩小后粘贴到原始图像中", "Resize image to target resolution. Unless height and width match, you will get incorrect aspect ratio.": "将图像大小调整为目标分辨率。除非高度和宽度匹配,否则你将获得不正确的纵横比", "Resize the image so that entirety of target resolution is filled with the image. Crop parts that stick out.": "调整图像大小,使整个目标分辨率都被图像填充。裁剪多出来的部分", -- cgit v1.2.1 From 4b8a192f680101de247dca79e48974b53bf961fe Mon Sep 17 00:00:00 2001 From: AngelBottomless <35677394+aria1th@users.noreply.github.com> Date: Sat, 29 Oct 2022 16:36:43 +0900 Subject: add optimizer save option to shared.opts --- modules/shared.py | 1 + 1 file changed, 1 insertion(+) diff --git a/modules/shared.py b/modules/shared.py index e4f163c1..065b893d 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -286,6 +286,7 @@ options_templates.update(options_section(('system', "System"), { options_templates.update(options_section(('training', "Training"), { "unload_models_when_training": OptionInfo(False, "Move VAE and CLIP to RAM when training hypernetwork. Saves VRAM."), + "save_optimizer_state": OptionInfo(False, "Saves Optimizer state with checkpoints. This will cause file size to increase VERY much."), "dataset_filename_word_regex": OptionInfo("", "Filename word regex"), "dataset_filename_join_string": OptionInfo(" ", "Filename join string"), "training_image_repeats_per_epoch": OptionInfo(1, "Number of repeats for a single input image per epoch; used only for displaying epoch number", gr.Number, {"precision": 0}), -- cgit v1.2.1 From 20194fd9752a280306fb66b57b258609b0918c46 Mon Sep 17 00:00:00 2001 From: AngelBottomless <35677394+aria1th@users.noreply.github.com> Date: Sat, 29 Oct 2022 16:56:42 +0900 Subject: We have duplicate linear now --- modules/hypernetworks/ui.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/hypernetworks/ui.py b/modules/hypernetworks/ui.py index aad09ffc..c2d4b51c 100644 --- a/modules/hypernetworks/ui.py +++ b/modules/hypernetworks/ui.py @@ -9,7 +9,7 @@ from modules import devices, sd_hijack, shared from modules.hypernetworks import hypernetwork not_available = ["hardswish", "multiheadattention"] -keys = ["linear"] + list(x for x in hypernetwork.HypernetworkModule.activation_dict.keys() if x not in not_available) +keys = list(x for x in hypernetwork.HypernetworkModule.activation_dict.keys() if x not in not_available) def create_hypernetwork(name, enable_sizes, overwrite_old, layer_structure=None, activation_func=None, weight_init=None, add_layer_norm=False, use_dropout=False): # Remove illegal characters from name. -- cgit v1.2.1 From 9d96d7d0a0aa0a966a9aefd24342345eb65952ed Mon Sep 17 00:00:00 2001 From: aria1th <35677394+aria1th@users.noreply.github.com> Date: Sun, 30 Oct 2022 20:39:04 +0900 Subject: resolve conflicts --- modules/hypernetworks/hypernetwork.py | 44 ++++++++++++++++++++++++++++++----- 1 file changed, 38 insertions(+), 6 deletions(-) diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py index a11e01d6..8f74cdea 100644 --- a/modules/hypernetworks/hypernetwork.py +++ b/modules/hypernetworks/hypernetwork.py @@ -21,6 +21,7 @@ from torch.nn.init import normal_, xavier_normal_, xavier_uniform_, kaiming_norm from collections import defaultdict, deque from statistics import stdev, mean +optimizer_dict = {optim_name : cls_obj for optim_name, cls_obj in inspect.getmembers(torch.optim, inspect.isclass) if optim_name != "Optimizer"} class HypernetworkModule(torch.nn.Module): multiplier = 1.0 @@ -139,6 +140,8 @@ class Hypernetwork: self.weight_init = weight_init self.add_layer_norm = add_layer_norm self.use_dropout = use_dropout + self.optimizer_name = None + self.optimizer_state_dict = None for size in enable_sizes or []: self.layers[size] = ( @@ -171,6 +174,10 @@ class Hypernetwork: state_dict['use_dropout'] = self.use_dropout state_dict['sd_checkpoint'] = self.sd_checkpoint state_dict['sd_checkpoint_name'] = self.sd_checkpoint_name + if self.optimizer_name is not None: + state_dict['optimizer_name'] = self.optimizer_name + if self.optimizer_state_dict: + state_dict['optimizer_state_dict'] = self.optimizer_state_dict torch.save(state_dict, filename) @@ -190,7 +197,14 @@ class Hypernetwork: self.add_layer_norm = state_dict.get('is_layer_norm', False) print(f"Layer norm is set to {self.add_layer_norm}") self.use_dropout = state_dict.get('use_dropout', False) - print(f"Dropout usage is set to {self.use_dropout}" ) + print(f"Dropout usage is set to {self.use_dropout}") + self.optimizer_name = state_dict.get('optimizer_name', 'AdamW') + print(f"Optimizer name is {self.optimizer_name}") + self.optimizer_state_dict = state_dict.get('optimizer_state_dict', None) + if self.optimizer_state_dict: + print("Loaded existing optimizer from checkpoint") + else: + print("No saved optimizer exists in checkpoint") for size, sd in state_dict.items(): if type(size) == int: @@ -392,8 +406,19 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log weights = hypernetwork.weights() for weight in weights: weight.requires_grad = True - # if optimizer == "AdamW": or else Adam / AdamW / SGD, etc... - optimizer = torch.optim.AdamW(weights, lr=scheduler.learn_rate) + # Here we use optimizer from saved HN, or we can specify as UI option. + if (optimizer_name := hypernetwork.optimizer_name) in optimizer_dict: + optimizer = optimizer_dict[hypernetwork.optimizer_name](params=weights, lr=scheduler.learn_rate) + else: + print(f"Optimizer type {optimizer_name} is not defined!") + optimizer = torch.optim.AdamW(params=weights, lr=scheduler.learn_rate) + optimizer_name = 'AdamW' + if hypernetwork.optimizer_state_dict: # This line must be changed if Optimizer type can be different from saved optimizer. + try: + optimizer.load_state_dict(hypernetwork.optimizer_state_dict) + except RuntimeError as e: + print("Cannot resume from saved optimizer!") + print(e) steps_without_grad = 0 @@ -455,8 +480,11 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log # Before saving, change name to match current checkpoint. hypernetwork_name_every = f'{hypernetwork_name}-{steps_done}' last_saved_file = os.path.join(hypernetwork_dir, f'{hypernetwork_name_every}.pt') + hypernetwork.optimizer_name = optimizer_name + if shared.opts.save_optimizer_state: + hypernetwork.optimizer_state_dict = optimizer.state_dict() save_hypernetwork(hypernetwork, checkpoint, hypernetwork_name, last_saved_file) - + hypernetwork.optimizer_state_dict = None # dereference it after saving, to save memory. textual_inversion.write_loss(log_directory, "hypernetwork_loss.csv", hypernetwork.step, len(ds), { "loss": f"{previous_mean_loss:.7f}", "learn_rate": scheduler.learn_rate @@ -514,14 +542,18 @@ Last saved hypernetwork: {html.escape(last_saved_file)}
Last saved image: {html.escape(last_saved_image)}

""" - report_statistics(loss_dict) filename = os.path.join(shared.cmd_opts.hypernetwork_dir, f'{hypernetwork_name}.pt') + hypernetwork.optimizer_name = optimizer_name + if shared.opts.save_optimizer_state: + hypernetwork.optimizer_state_dict = optimizer.state_dict() save_hypernetwork(hypernetwork, checkpoint, hypernetwork_name, filename) - + del optimizer + hypernetwork.optimizer_state_dict = None # dereference it after saving, to save memory. return hypernetwork, filename + def save_hypernetwork(hypernetwork, checkpoint, hypernetwork_name, filename): old_hypernetwork_name = hypernetwork.name old_sd_checkpoint = hypernetwork.sd_checkpoint if hasattr(hypernetwork, "sd_checkpoint") else None -- cgit v1.2.1 From c9bb33dd43dbb9479ff1b70351df14508c89ac60 Mon Sep 17 00:00:00 2001 From: victorca25 Date: Sun, 30 Oct 2022 12:52:50 +0100 Subject: add resrgan 8x, allow use 1x and up to 8x extra models, move BSRGAN model, add nearest --- modules/esrgan_model.py | 17 +++++++++++++---- modules/modelloader.py | 3 +++ modules/ui.py | 2 +- modules/upscaler.py | 17 ++++++++++++++++- 4 files changed, 33 insertions(+), 6 deletions(-) diff --git a/modules/esrgan_model.py b/modules/esrgan_model.py index a13cf6ac..c61669b4 100644 --- a/modules/esrgan_model.py +++ b/modules/esrgan_model.py @@ -50,6 +50,7 @@ def mod2normal(state_dict): def resrgan2normal(state_dict, nb=23): # this code is copied from https://github.com/victorca25/iNNfer if "conv_first.weight" in state_dict and "body.0.rdb1.conv1.weight" in state_dict: + re8x = 0 crt_net = {} items = [] for k, v in state_dict.items(): @@ -75,10 +76,18 @@ def resrgan2normal(state_dict, nb=23): crt_net['model.3.bias'] = state_dict['conv_up1.bias'] crt_net['model.6.weight'] = state_dict['conv_up2.weight'] crt_net['model.6.bias'] = state_dict['conv_up2.bias'] - crt_net['model.8.weight'] = state_dict['conv_hr.weight'] - crt_net['model.8.bias'] = state_dict['conv_hr.bias'] - crt_net['model.10.weight'] = state_dict['conv_last.weight'] - crt_net['model.10.bias'] = state_dict['conv_last.bias'] + + if 'conv_up3.weight' in state_dict: + # modification supporting: https://github.com/ai-forever/Real-ESRGAN/blob/main/RealESRGAN/rrdbnet_arch.py + re8x = 3 + crt_net['model.9.weight'] = state_dict['conv_up3.weight'] + crt_net['model.9.bias'] = state_dict['conv_up3.bias'] + + crt_net[f'model.{8+re8x}.weight'] = state_dict['conv_hr.weight'] + crt_net[f'model.{8+re8x}.bias'] = state_dict['conv_hr.bias'] + crt_net[f'model.{10+re8x}.weight'] = state_dict['conv_last.weight'] + crt_net[f'model.{10+re8x}.bias'] = state_dict['conv_last.bias'] + state_dict = crt_net return state_dict diff --git a/modules/modelloader.py b/modules/modelloader.py index b0f2f33d..e4a6f8ac 100644 --- a/modules/modelloader.py +++ b/modules/modelloader.py @@ -85,6 +85,9 @@ def cleanup_models(): src_path = os.path.join(root_path, "ESRGAN") dest_path = os.path.join(models_path, "ESRGAN") move_files(src_path, dest_path) + src_path = os.path.join(models_path, "BSRGAN") + dest_path = os.path.join(models_path, "ESRGAN") + move_files(src_path, dest_path, ".pth") src_path = os.path.join(root_path, "gfpgan") dest_path = os.path.join(models_path, "GFPGAN") move_files(src_path, dest_path) diff --git a/modules/ui.py b/modules/ui.py index 5055ca64..47610f5c 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1059,7 +1059,7 @@ def create_ui(wrap_gradio_gpu_call): with gr.Tabs(elem_id="extras_resize_mode"): with gr.TabItem('Scale by'): - upscaling_resize = gr.Slider(minimum=1.0, maximum=4.0, step=0.05, label="Resize", value=2) + upscaling_resize = gr.Slider(minimum=1.0, maximum=8.0, step=0.05, label="Resize", value=4) with gr.TabItem('Scale to'): with gr.Group(): with gr.Row(): diff --git a/modules/upscaler.py b/modules/upscaler.py index 6ab2fb40..83fde7ca 100644 --- a/modules/upscaler.py +++ b/modules/upscaler.py @@ -10,6 +10,7 @@ import modules.shared from modules import modelloader, shared LANCZOS = (Image.Resampling.LANCZOS if hasattr(Image, 'Resampling') else Image.LANCZOS) +NEAREST = (Image.Resampling.NEAREST if hasattr(Image, 'Resampling') else Image.NEAREST) from modules.paths import models_path @@ -57,7 +58,7 @@ class Upscaler: dest_w = img.width * scale dest_h = img.height * scale for i in range(3): - if img.width >= dest_w and img.height >= dest_h: + if img.width > dest_w and img.height > dest_h: break img = self.do_upscale(img, selected_model) if img.width != dest_w or img.height != dest_h: @@ -120,3 +121,17 @@ class UpscalerLanczos(Upscaler): self.name = "Lanczos" self.scalers = [UpscalerData("Lanczos", None, self)] + +class UpscalerNearest(Upscaler): + scalers = [] + + def do_upscale(self, img, selected_model=None): + return img.resize((int(img.width * self.scale), int(img.height * self.scale)), resample=NEAREST) + + def load_model(self, _): + pass + + def __init__(self, dirname=None): + super().__init__(False) + self.name = "Nearest" + self.scalers = [UpscalerData("Nearest", None, self)] \ No newline at end of file -- cgit v1.2.1 From 5d69f75e5bc5e8908cb6c590055157f8c7d4bb3b Mon Sep 17 00:00:00 2001 From: batvbs Date: Sun, 30 Oct 2022 21:24:28 +0800 Subject: =?UTF-8?q?=E6=9B=B4=E6=96=B0=20zh=5FCN.json?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- localizations/zh_CN.json | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/localizations/zh_CN.json b/localizations/zh_CN.json index 5472535e..b796c625 100644 --- a/localizations/zh_CN.json +++ b/localizations/zh_CN.json @@ -524,9 +524,9 @@ "Path to directory with input images": "带有输入图像的路径", "Path to directory where to write outputs": "进行输出的路径", "Input images directory": "输入图像目录", - "Use following tags to define how filenames for images are chosen: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [job_timestamp]; leave empty for default.": "使用以下标签定义如何选择图像的文件名: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [job_timestamp]; 默认请留空", + "Use following tags to define how filenames for images are chosen: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [datetime], [datetime