aboutsummaryrefslogtreecommitdiff
path: root/modules
diff options
context:
space:
mode:
Diffstat (limited to 'modules')
-rw-r--r--modules/api/api.py33
-rw-r--r--modules/cmd_args.py2
-rw-r--r--modules/images.py5
-rw-r--r--modules/options.py19
-rwxr-xr-xmodules/processing.py2
-rw-r--r--modules/sd_models.py13
-rw-r--r--modules/sd_samplers_cfg_denoiser.py4
-rw-r--r--modules/sd_vae.py4
-rw-r--r--modules/shared.py2
-rw-r--r--modules/shared_options.py9
-rw-r--r--modules/ui.py6
-rw-r--r--modules/ui_components.py12
12 files changed, 90 insertions, 21 deletions
diff --git a/modules/api/api.py b/modules/api/api.py
index 6e8d21a3..e6edffe7 100644
--- a/modules/api/api.py
+++ b/modules/api/api.py
@@ -4,6 +4,8 @@ import os
import time
import datetime
import uvicorn
+import ipaddress
+import requests
import gradio as gr
from threading import Lock
from io import BytesIO
@@ -55,10 +57,35 @@ def setUpscalers(req: dict):
return reqDict
+def verify_url(url):
+ """Returns True if the url refers to a global resource."""
+
+ import socket
+ from urllib.parse import urlparse
+ try:
+ parsed_url = urlparse(url)
+ domain_name = parsed_url.netloc
+ host = socket.gethostbyname_ex(domain_name)
+ for ip in host[2]:
+ ip_addr = ipaddress.ip_address(ip)
+ if not ip_addr.is_global:
+ return False
+ except Exception:
+ return False
+
+ return True
+
+
def decode_base64_to_image(encoding):
if encoding.startswith("http://") or encoding.startswith("https://"):
- import requests
- response = requests.get(encoding, timeout=30, headers={'user-agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.94 Safari/537.36'})
+ if not opts.api_enable_requests:
+ raise HTTPException(status_code=500, detail="Requests not allowed")
+
+ if opts.api_forbid_local_requests and not verify_url(encoding):
+ raise HTTPException(status_code=500, detail="Request to local resource not allowed")
+
+ headers = {'user-agent': opts.api_useragent} if opts.api_useragent else {}
+ response = requests.get(encoding, timeout=30, headers=headers)
try:
image = Image.open(BytesIO(response.content))
return image
@@ -543,7 +570,7 @@ class Api:
raise RuntimeError(f"model {checkpoint_name!r} not found")
for k, v in req.items():
- shared.opts.set(k, v)
+ shared.opts.set(k, v, is_api=True)
shared.opts.save(shared.config_filename)
return
diff --git a/modules/cmd_args.py b/modules/cmd_args.py
index f360f484..9f8e5b30 100644
--- a/modules/cmd_args.py
+++ b/modules/cmd_args.py
@@ -37,7 +37,7 @@ parser.add_argument("--allow-code", action='store_true', help="allow custom scri
parser.add_argument("--medvram", action='store_true', help="enable stable diffusion model optimizations for sacrificing a little speed for low VRM usage")
parser.add_argument("--lowvram", action='store_true', help="enable stable diffusion model optimizations for sacrificing a lot of speed for very low VRM usage")
parser.add_argument("--lowram", action='store_true', help="load stable diffusion checkpoint weights to VRAM instead of RAM")
-parser.add_argument("--always-batch-cond-uncond", action='store_true', help="disables cond/uncond batching that is enabled to save memory with --medvram or --lowvram")
+parser.add_argument("--always-batch-cond-uncond", action='store_true', help="does not do anything")
parser.add_argument("--unload-gfpgan", action='store_true', help="does not do anything.")
parser.add_argument("--precision", type=str, help="evaluate at this precision", choices=["full", "autocast"], default="autocast")
parser.add_argument("--upcast-sampling", action='store_true', help="upcast sampling. No effect with --no-half. Usually produces similar results to --no-half with better performance while using less memory.")
diff --git a/modules/images.py b/modules/images.py
index a6b4fb1e..eb644733 100644
--- a/modules/images.py
+++ b/modules/images.py
@@ -600,6 +600,11 @@ def save_image(image, path, basename, seed=None, prompt=None, extension='png', i
"""
namegen = FilenameGenerator(p, seed, prompt, image)
+ # WebP and JPG formats have maximum dimension limits of 16383 and 65535 respectively. switch to PNG which has a much higher limit
+ if (image.height > 65535 or image.width > 65535) and extension.lower() in ("jpg", "jpeg") or (image.height > 16383 or image.width > 16383) and extension.lower() == "webp":
+ print('Image dimensions too large; saving as PNG')
+ extension = ".png"
+
if save_to_dirs is None:
save_to_dirs = (grid and opts.grid_save_to_dirs) or (not grid and opts.save_to_dirs and not no_prompt)
diff --git a/modules/options.py b/modules/options.py
index db1fb157..758b1ce5 100644
--- a/modules/options.py
+++ b/modules/options.py
@@ -8,7 +8,7 @@ from modules.shared_cmd_options import cmd_opts
class OptionInfo:
- def __init__(self, default=None, label="", component=None, component_args=None, onchange=None, section=None, refresh=None, comment_before='', comment_after='', infotext=None):
+ def __init__(self, default=None, label="", component=None, component_args=None, onchange=None, section=None, refresh=None, comment_before='', comment_after='', infotext=None, restrict_api=False):
self.default = default
self.label = label
self.component = component
@@ -26,6 +26,9 @@ class OptionInfo:
self.infotext = infotext
+ self.restrict_api = restrict_api
+ """If True, the setting will not be accessible via API"""
+
def link(self, label, url):
self.comment_before += f"[<a href='{url}' target='_blank'>{label}</a>]"
return self
@@ -71,7 +74,7 @@ options_builtin_fields = {"data_labels", "data", "restricted_opts", "typemap"}
class Options:
typemap = {int: float}
- def __init__(self, data_labels, restricted_opts):
+ def __init__(self, data_labels: dict[str, OptionInfo], restricted_opts):
self.data_labels = data_labels
self.data = {k: v.default for k, v in self.data_labels.items()}
self.restricted_opts = restricted_opts
@@ -113,14 +116,18 @@ class Options:
return super(Options, self).__getattribute__(item)
- def set(self, key, value):
+ def set(self, key, value, is_api=False, run_callbacks=True):
"""sets an option and calls its onchange callback, returning True if the option changed and False otherwise"""
oldval = self.data.get(key, None)
if oldval == value:
return False
- if self.data_labels[key].do_not_save:
+ option = self.data_labels[key]
+ if option.do_not_save:
+ return False
+
+ if is_api and option.restrict_api:
return False
try:
@@ -128,9 +135,9 @@ class Options:
except RuntimeError:
return False
- if self.data_labels[key].onchange is not None:
+ if run_callbacks and option.onchange is not None:
try:
- self.data_labels[key].onchange()
+ option.onchange()
except Exception as e:
errors.display(e, f"changing setting {key} to {value}")
setattr(self, key, oldval)
diff --git a/modules/processing.py b/modules/processing.py
index d4926524..e60cc92b 100755
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -709,7 +709,7 @@ def process_images(p: StableDiffusionProcessing) -> Processed:
sd_models.reload_model_weights()
for k, v in p.override_settings.items():
- setattr(opts, k, v)
+ opts.set(k, v, is_api=True, run_callbacks=False)
if k == 'sd_model_checkpoint':
sd_models.reload_model_weights()
diff --git a/modules/sd_models.py b/modules/sd_models.py
index 685585b1..27d15e66 100644
--- a/modules/sd_models.py
+++ b/modules/sd_models.py
@@ -485,8 +485,12 @@ class SdModelData:
return self.sd_model
- def set_sd_model(self, v):
+ def set_sd_model(self, v, already_loaded=False):
self.sd_model = v
+ if already_loaded:
+ sd_vae.base_vae = getattr(v, "base_vae", None)
+ sd_vae.loaded_vae_file = getattr(v, "loaded_vae_file", None)
+ sd_vae.checkpoint_info = v.sd_checkpoint_info
try:
self.loaded_sd_models.remove(v)
@@ -660,13 +664,14 @@ def reuse_model_from_already_loaded(sd_model, checkpoint_info, timer):
send_model_to_device(already_loaded)
timer.record("send model to device")
- model_data.set_sd_model(already_loaded)
+ model_data.set_sd_model(already_loaded, already_loaded=True)
if not SkipWritingToConfig.skip:
shared.opts.data["sd_model_checkpoint"] = already_loaded.sd_checkpoint_info.title
shared.opts.data["sd_checkpoint_hash"] = already_loaded.sd_checkpoint_info.sha256
print(f"Using already loaded model {already_loaded.sd_checkpoint_info.title}: done in {timer.summary()}")
+ sd_vae.reload_vae_weights(already_loaded)
return model_data.sd_model
elif shared.opts.sd_checkpoints_limit > 1 and len(model_data.loaded_sd_models) < shared.opts.sd_checkpoints_limit:
print(f"Loading model {checkpoint_info.title} ({len(model_data.loaded_sd_models) + 1} out of {shared.opts.sd_checkpoints_limit})")
@@ -678,6 +683,10 @@ def reuse_model_from_already_loaded(sd_model, checkpoint_info, timer):
sd_model = model_data.loaded_sd_models.pop()
model_data.sd_model = sd_model
+ sd_vae.base_vae = getattr(sd_model, "base_vae", None)
+ sd_vae.loaded_vae_file = getattr(sd_model, "loaded_vae_file", None)
+ sd_vae.checkpoint_info = sd_model.sd_checkpoint_info
+
print(f"Reusing loaded model {sd_model.sd_checkpoint_info.title} to load {checkpoint_info.title}")
return sd_model
else:
diff --git a/modules/sd_samplers_cfg_denoiser.py b/modules/sd_samplers_cfg_denoiser.py
index bc9b97e4..b8101d38 100644
--- a/modules/sd_samplers_cfg_denoiser.py
+++ b/modules/sd_samplers_cfg_denoiser.py
@@ -165,7 +165,7 @@ class CFGDenoiser(torch.nn.Module):
else:
cond_in = catenate_conds([tensor, uncond])
- if shared.batch_cond_uncond:
+ if shared.opts.batch_cond_uncond:
x_out = self.inner_model(x_in, sigma_in, cond=make_condition_dict(cond_in, image_cond_in))
else:
x_out = torch.zeros_like(x_in)
@@ -175,7 +175,7 @@ class CFGDenoiser(torch.nn.Module):
x_out[a:b] = self.inner_model(x_in[a:b], sigma_in[a:b], cond=make_condition_dict(subscript_cond(cond_in, a, b), image_cond_in[a:b]))
else:
x_out = torch.zeros_like(x_in)
- batch_size = batch_size*2 if shared.batch_cond_uncond else batch_size
+ batch_size = batch_size*2 if shared.opts.batch_cond_uncond else batch_size
for batch_offset in range(0, tensor.shape[0], batch_size):
a = batch_offset
b = min(a + batch_size, tensor.shape[0])
diff --git a/modules/sd_vae.py b/modules/sd_vae.py
index dbade067..ee118656 100644
--- a/modules/sd_vae.py
+++ b/modules/sd_vae.py
@@ -192,7 +192,7 @@ def load_vae_dict(filename, map_location):
def load_vae(model, vae_file=None, vae_source="from unknown source"):
- global vae_dict, loaded_vae_file
+ global vae_dict, base_vae, loaded_vae_file
# save_settings = False
cache_enabled = shared.opts.sd_vae_checkpoint_cache > 0
@@ -230,6 +230,8 @@ def load_vae(model, vae_file=None, vae_source="from unknown source"):
restore_base_vae(model)
loaded_vae_file = vae_file
+ model.base_vae = base_vae
+ model.loaded_vae_file = loaded_vae_file
# don't call this from outside
diff --git a/modules/shared.py b/modules/shared.py
index d9d01484..0c57b712 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -10,7 +10,7 @@ from modules import util
cmd_opts = shared_cmd_options.cmd_opts
parser = shared_cmd_options.parser
-batch_cond_uncond = cmd_opts.always_batch_cond_uncond or not (cmd_opts.lowvram or cmd_opts.medvram)
+batch_cond_uncond = True # old field, unused now in favor of shared.opts.batch_cond_uncond
parallel_processing_allowed = not cmd_opts.lowvram and not cmd_opts.medvram
styles_filename = cmd_opts.styles_file
config_filename = cmd_opts.ui_settings_file
diff --git a/modules/shared_options.py b/modules/shared_options.py
index 8630d474..095cf479 100644
--- a/modules/shared_options.py
+++ b/modules/shared_options.py
@@ -111,6 +111,12 @@ options_templates.update(options_section(('system', "System"), {
"hide_ldm_prints": OptionInfo(True, "Prevent Stability-AI's ldm/sgm modules from printing noise to console."),
}))
+options_templates.update(options_section(('API', "API"), {
+ "api_enable_requests": OptionInfo(True, "Allow http:// and https:// URLs for input images in API", restrict_api=True),
+ "api_forbid_local_requests": OptionInfo(True, "Forbid URLs to local resources", restrict_api=True),
+ "api_useragent": OptionInfo("", "User agent for requests", restrict_api=True),
+}))
+
options_templates.update(options_section(('training', "Training"), {
"unload_models_when_training": OptionInfo(False, "Move VAE and CLIP to RAM when training if possible. Saves VRAM."),
"pin_memory": OptionInfo(False, "Turn on pin_memory for DataLoader. Makes training slightly faster but can increase memory usage."),
@@ -186,7 +192,8 @@ options_templates.update(options_section(('optimizations', "Optimizations"), {
"token_merging_ratio_img2img": OptionInfo(0.0, "Token merging ratio for img2img", gr.Slider, {"minimum": 0.0, "maximum": 0.9, "step": 0.1}).info("only applies if non-zero and overrides above"),
"token_merging_ratio_hr": OptionInfo(0.0, "Token merging ratio for high-res pass", gr.Slider, {"minimum": 0.0, "maximum": 0.9, "step": 0.1}, infotext='Token merging ratio hr').info("only applies if non-zero and overrides above"),
"pad_cond_uncond": OptionInfo(False, "Pad prompt/negative prompt to be same length", infotext='Pad conds').info("improves performance when prompt and negative prompt have different lengths; changes seeds"),
- "persistent_cond_cache": OptionInfo(True, "Persistent cond cache").info("Do not recalculate conds from prompts if prompts have not changed since previous calculation"),
+ "persistent_cond_cache": OptionInfo(True, "Persistent cond cache").info("do not recalculate conds from prompts if prompts have not changed since previous calculation"),
+ "batch_cond_uncond": OptionInfo(True, "Batch cond/uncond").info("do both conditional and unconditional denoising in one batch; uses a bit more VRAM during sampling, but improves speed; previously this was controlled by --always-batch-cond-uncond comandline argument"),
}))
options_templates.update(options_section(('compatibility', "Compatibility"), {
diff --git a/modules/ui.py b/modules/ui.py
index 01f77849..2b6a13cb 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -13,7 +13,7 @@ from modules.call_queue import wrap_gradio_gpu_call, wrap_queued_call, wrap_grad
from modules import gradio_extensons # noqa: F401
from modules import sd_hijack, sd_models, script_callbacks, ui_extensions, deepbooru, extra_networks, ui_common, ui_postprocessing, progress, ui_loadsave, shared_items, ui_settings, timer, sysinfo, ui_checkpoint_merger, ui_prompt_styles, scripts, sd_samplers, processing, ui_extra_networks
-from modules.ui_components import FormRow, FormGroup, ToolButton, FormHTML, InputAccordion
+from modules.ui_components import FormRow, FormGroup, ToolButton, FormHTML, InputAccordion, ResizeHandleRow
from modules.paths import script_path
from modules.ui_common import create_refresh_button
from modules.ui_gradio_extensions import reload_javascript
@@ -333,7 +333,7 @@ def create_ui():
extra_tabs = gr.Tabs(elem_id="txt2img_extra_tabs")
extra_tabs.__enter__()
- with gr.Tab("Generation", id="txt2img_generation") as txt2img_generation_tab, gr.Row(equal_height=False):
+ with gr.Tab("Generation", id="txt2img_generation") as txt2img_generation_tab, ResizeHandleRow(equal_height=False):
with gr.Column(variant='compact', elem_id="txt2img_settings"):
scripts.scripts_txt2img.prepare_ui()
@@ -549,7 +549,7 @@ def create_ui():
extra_tabs = gr.Tabs(elem_id="img2img_extra_tabs")
extra_tabs.__enter__()
- with gr.Tab("Generation", id="img2img_generation") as img2img_generation_tab, FormRow(equal_height=False):
+ with gr.Tab("Generation", id="img2img_generation") as img2img_generation_tab, ResizeHandleRow(equal_height=False):
with gr.Column(variant='compact', elem_id="img2img_settings"):
copy_image_buttons = []
copy_image_destinations = {}
diff --git a/modules/ui_components.py b/modules/ui_components.py
index d08b2b99..55979f62 100644
--- a/modules/ui_components.py
+++ b/modules/ui_components.py
@@ -20,6 +20,18 @@ class ToolButton(FormComponent, gr.Button):
return "button"
+class ResizeHandleRow(gr.Row):
+ """Same as gr.Row but fits inside gradio forms"""
+
+ def __init__(self, **kwargs):
+ super().__init__(**kwargs)
+
+ self.elem_classes.append("resize-handle-row")
+
+ def get_block_name(self):
+ return "row"
+
+
class FormRow(FormComponent, gr.Row):
"""Same as gr.Row but fits inside gradio forms"""