aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--javascript/badScaleChecker.js108
-rw-r--r--modules/api/models.py6
-rw-r--r--modules/extensions.py2
-rw-r--r--modules/processing.py63
-rw-r--r--modules/sd_hijack_clip.py9
5 files changed, 35 insertions, 153 deletions
diff --git a/javascript/badScaleChecker.js b/javascript/badScaleChecker.js
deleted file mode 100644
index 625ad309..00000000
--- a/javascript/badScaleChecker.js
+++ /dev/null
@@ -1,108 +0,0 @@
-(function() {
- var ignore = localStorage.getItem("bad-scale-ignore-it") == "ignore-it";
-
- function getScale() {
- var ratio = 0,
- screen = window.screen,
- ua = navigator.userAgent.toLowerCase();
-
- if (window.devicePixelRatio !== undefined) {
- ratio = window.devicePixelRatio;
- } else if (~ua.indexOf('msie')) {
- if (screen.deviceXDPI && screen.logicalXDPI) {
- ratio = screen.deviceXDPI / screen.logicalXDPI;
- }
- } else if (window.outerWidth !== undefined && window.innerWidth !== undefined) {
- ratio = window.outerWidth / window.innerWidth;
- }
-
- return ratio == 0 ? 0 : Math.round(ratio * 100);
- }
-
- var showing = false;
-
- var div = document.createElement("div");
- div.style.position = "fixed";
- div.style.top = "0px";
- div.style.left = "0px";
- div.style.width = "100vw";
- div.style.backgroundColor = "firebrick";
- div.style.textAlign = "center";
- div.style.zIndex = 99;
-
- var b = document.createElement("b");
- b.innerHTML = 'Bad Scale: ??% ';
-
- div.appendChild(b);
-
- var note1 = document.createElement("p");
- note1.innerHTML = "Change your browser or your computer settings!";
- note1.title = 'Just make sure "computer-scale" * "browser-scale" = 100% ,\n' +
- "you can keep your computer-scale and only change this page's scale,\n" +
- "for example: your computer-scale is 125%, just use [\"CTRL\"+\"-\"] to make your browser-scale of this page to 80%.";
- div.appendChild(note1);
-
- var note2 = document.createElement("p");
- note2.innerHTML = " Otherwise, it will cause this page to not function properly!";
- note2.title = "When you click \"Copy image to: [inpaint sketch]\" in some img2img's tab,\n" +
- "if scale<100% the canvas will be invisible,\n" +
- "else if scale>100% this page will take large amount of memory and CPU performance.";
- div.appendChild(note2);
-
- var btn = document.createElement("button");
- btn.innerHTML = "Click here to ignore";
-
- div.appendChild(btn);
-
- function tryShowTopBar(scale) {
- if (showing) return;
-
- b.innerHTML = 'Bad Scale: ' + scale + '% ';
-
- var updateScaleTimer = setInterval(function() {
- var newScale = getScale();
- b.innerHTML = 'Bad Scale: ' + newScale + '% ';
- if (newScale == 100) {
- var p = div.parentNode;
- if (p != null) p.removeChild(div);
- showing = false;
- clearInterval(updateScaleTimer);
- check();
- }
- }, 999);
-
- btn.onclick = function() {
- clearInterval(updateScaleTimer);
- var p = div.parentNode;
- if (p != null) p.removeChild(div);
- ignore = true;
- showing = false;
- localStorage.setItem("bad-scale-ignore-it", "ignore-it");
- };
-
- document.body.appendChild(div);
- }
-
- function check() {
- if (!ignore) {
- var timer = setInterval(function() {
- var scale = getScale();
- if (scale != 100 && !ignore) {
- tryShowTopBar(scale);
- clearInterval(timer);
- }
- if (ignore) {
- clearInterval(timer);
- }
- }, 999);
- }
- }
-
- if (document.readyState != "complete") {
- document.onreadystatechange = function() {
- if (document.readyState != "complete") check();
- };
- } else {
- check();
- }
-})();
diff --git a/modules/api/models.py b/modules/api/models.py
index bf97b1a3..800c9b93 100644
--- a/modules/api/models.py
+++ b/modules/api/models.py
@@ -208,11 +208,9 @@ class PreprocessResponse(BaseModel):
fields = {}
for key, metadata in opts.data_labels.items():
value = opts.data.get(key)
- optType = opts.typemap.get(type(metadata.default), type(metadata.default))
+ optType = opts.typemap.get(type(metadata.default), type(metadata.default)) if metadata.default else Any
- if metadata.default is None:
- pass
- elif metadata is not None:
+ if metadata is not None:
fields.update({key: (Optional[optType], Field(default=metadata.default, description=metadata.label))})
else:
fields.update({key: (Optional[optType], Field())})
diff --git a/modules/extensions.py b/modules/extensions.py
index 09d1e550..3ad5ed53 100644
--- a/modules/extensions.py
+++ b/modules/extensions.py
@@ -61,7 +61,7 @@ class Extension:
self.from_dict(d)
except FileNotFoundError:
pass
- self.status = 'unknown'
+ self.status = 'unknown' if self.status == '' else self.status
def do_read_info_from_repo(self):
repo = None
diff --git a/modules/processing.py b/modules/processing.py
index 6dc178e1..b0992ee1 100644
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -600,8 +600,12 @@ def program_version():
return res
-def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iteration=0, position_in_batch=0, use_main_prompt=False):
- index = position_in_batch + iteration * p.batch_size
+def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iteration=0, position_in_batch=0, use_main_prompt=False, index=None, all_negative_prompts=None):
+ if index is None:
+ index = position_in_batch + iteration * p.batch_size
+
+ if all_negative_prompts is None:
+ all_negative_prompts = p.all_negative_prompts
clip_skip = getattr(p, 'clip_skip', opts.CLIP_stop_at_last_layers)
enable_hr = getattr(p, 'enable_hr', False)
@@ -617,12 +621,12 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter
"Sampler": p.sampler_name,
"CFG scale": p.cfg_scale,
"Image CFG scale": getattr(p, 'image_cfg_scale', None),
- "Seed": all_seeds[index],
+ "Seed": p.all_seeds[0] if use_main_prompt else all_seeds[index],
"Face restoration": (opts.face_restoration_model if p.restore_faces else None),
"Size": f"{p.width}x{p.height}",
"Model hash": getattr(p, 'sd_model_hash', None if not opts.add_model_hash_to_info or not shared.sd_model.sd_model_hash else shared.sd_model.sd_model_hash),
"Model": (None if not opts.add_model_name_to_info else shared.sd_model.sd_checkpoint_info.name_for_extra),
- "Variation seed": (None if p.subseed_strength == 0 else all_subseeds[index]),
+ "Variation seed": (None if p.subseed_strength == 0 else (p.all_subseeds[0] if use_main_prompt else all_subseeds[index])),
"Variation seed strength": (None if p.subseed_strength == 0 else p.subseed_strength),
"Seed resize from": (None if p.seed_resize_from_w <= 0 or p.seed_resize_from_h <= 0 else f"{p.seed_resize_from_w}x{p.seed_resize_from_h}"),
"Denoising strength": getattr(p, 'denoising_strength', None),
@@ -642,7 +646,7 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter
generation_params_text = ", ".join([k if k == v else f'{k}: {generation_parameters_copypaste.quote(v)}' for k, v in generation_params.items() if v is not None])
prompt_text = p.prompt if use_main_prompt else all_prompts[index]
- negative_prompt_text = f"\nNegative prompt: {p.all_negative_prompts[index]}" if p.all_negative_prompts[index] else ""
+ negative_prompt_text = f"\nNegative prompt: {all_negative_prompts[index]}" if all_negative_prompts[index] else ""
return f"{prompt_text}{negative_prompt_text}\n{generation_params_text}".strip()
@@ -716,29 +720,6 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
else:
p.all_subseeds = [int(subseed) + x for x in range(len(p.all_prompts))]
- def infotext(iteration=0, position_in_batch=0, use_main_prompt=False):
- all_prompts = p.all_prompts[:]
- all_negative_prompts = p.all_negative_prompts[:]
- all_seeds = p.all_seeds[:]
- all_subseeds = p.all_subseeds[:]
-
- # apply changes to generation data
- all_prompts[iteration * p.batch_size:(iteration + 1) * p.batch_size] = p.prompts
- all_negative_prompts[iteration * p.batch_size:(iteration + 1) * p.batch_size] = p.negative_prompts
- all_seeds[iteration * p.batch_size:(iteration + 1) * p.batch_size] = p.seeds
- all_subseeds[iteration * p.batch_size:(iteration + 1) * p.batch_size] = p.subseeds
-
- # update p.all_negative_prompts in case extensions changed the size of the batch
- # create_infotext below uses it
- old_negative_prompts = p.all_negative_prompts
- p.all_negative_prompts = all_negative_prompts
-
- try:
- return create_infotext(p, all_prompts, all_seeds, all_subseeds, comments, iteration, position_in_batch, use_main_prompt)
- finally:
- # restore p.all_negative_prompts in case extensions changed the size of the batch
- p.all_negative_prompts = old_negative_prompts
-
if os.path.exists(cmd_opts.embeddings_dir) and not p.do_not_reload_embeddings:
model_hijack.embedding_db.load_textual_inversion_embeddings()
@@ -826,9 +807,15 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
if p.scripts is not None:
p.scripts.postprocess_batch(p, x_samples_ddim, batch_number=n)
- postprocess_batch_list_args = scripts.PostprocessBatchListArgs(list(x_samples_ddim))
- p.scripts.postprocess_batch_list(p, postprocess_batch_list_args, batch_number=n)
- x_samples_ddim = postprocess_batch_list_args.images
+ p.prompts = p.all_prompts[n * p.batch_size:(n + 1) * p.batch_size]
+ p.negative_prompts = p.all_negative_prompts[n * p.batch_size:(n + 1) * p.batch_size]
+
+ batch_params = scripts.PostprocessBatchListArgs(list(x_samples_ddim))
+ p.scripts.postprocess_batch_list(p, batch_params, batch_number=n)
+ x_samples_ddim = batch_params.images
+
+ def infotext(index=0, use_main_prompt=False):
+ return create_infotext(p, p.prompts, p.seeds, p.subseeds, use_main_prompt=use_main_prompt, index=index, all_negative_prompts=p.negative_prompts)
for i, x_sample in enumerate(x_samples_ddim):
p.batch_index = i
@@ -838,7 +825,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
if p.restore_faces:
if opts.save and not p.do_not_save_samples and opts.save_images_before_face_restoration:
- images.save_image(Image.fromarray(x_sample), p.outpath_samples, "", p.seeds[i], p.prompts[i], opts.samples_format, info=infotext(n, i), p=p, suffix="-before-face-restoration")
+ images.save_image(Image.fromarray(x_sample), p.outpath_samples, "", p.seeds[i], p.prompts[i], opts.samples_format, info=infotext(i), p=p, suffix="-before-face-restoration")
devices.torch_gc()
@@ -855,15 +842,15 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
if p.color_corrections is not None and i < len(p.color_corrections):
if opts.save and not p.do_not_save_samples and opts.save_images_before_color_correction:
image_without_cc = apply_overlay(image, p.paste_to, i, p.overlay_images)
- images.save_image(image_without_cc, p.outpath_samples, "", p.seeds[i], p.prompts[i], opts.samples_format, info=infotext(n, i), p=p, suffix="-before-color-correction")
+ images.save_image(image_without_cc, p.outpath_samples, "", p.seeds[i], p.prompts[i], opts.samples_format, info=infotext(i), p=p, suffix="-before-color-correction")
image = apply_color_correction(p.color_corrections[i], image)
image = apply_overlay(image, p.paste_to, i, p.overlay_images)
if opts.samples_save and not p.do_not_save_samples:
- images.save_image(image, p.outpath_samples, "", p.seeds[i], p.prompts[i], opts.samples_format, info=infotext(n, i), p=p)
+ images.save_image(image, p.outpath_samples, "", p.seeds[i], p.prompts[i], opts.samples_format, info=infotext(i), p=p)
- text = infotext(n, i)
+ text = infotext(i)
infotexts.append(text)
if opts.enable_pnginfo:
image.info["parameters"] = text
@@ -874,10 +861,10 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
image_mask_composite = Image.composite(image.convert('RGBA').convert('RGBa'), Image.new('RGBa', image.size), images.resize_image(2, p.mask_for_overlay, image.width, image.height).convert('L')).convert('RGBA')
if opts.save_mask:
- images.save_image(image_mask, p.outpath_samples, "", p.seeds[i], p.prompts[i], opts.samples_format, info=infotext(n, i), p=p, suffix="-mask")
+ images.save_image(image_mask, p.outpath_samples, "", p.seeds[i], p.prompts[i], opts.samples_format, info=infotext(i), p=p, suffix="-mask")
if opts.save_mask_composite:
- images.save_image(image_mask_composite, p.outpath_samples, "", p.seeds[i], p.prompts[i], opts.samples_format, info=infotext(n, i), p=p, suffix="-mask-composite")
+ images.save_image(image_mask_composite, p.outpath_samples, "", p.seeds[i], p.prompts[i], opts.samples_format, info=infotext(i), p=p, suffix="-mask-composite")
if opts.return_mask:
output_images.append(image_mask)
@@ -918,7 +905,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
p,
images_list=output_images,
seed=p.all_seeds[0],
- info=infotext(),
+ info=infotexts[0],
comments="".join(f"{comment}\n" for comment in comments),
subseed=p.all_subseeds[0],
index_of_first_image=index_of_first_image,
diff --git a/modules/sd_hijack_clip.py b/modules/sd_hijack_clip.py
index 5443e609..16a5500e 100644
--- a/modules/sd_hijack_clip.py
+++ b/modules/sd_hijack_clip.py
@@ -270,12 +270,17 @@ class FrozenCLIPEmbedderWithCustomWordsBase(torch.nn.Module):
z = self.encode_with_transformers(tokens)
+ pooled = getattr(z, 'pooled', None)
+
# restoring original mean is likely not correct, but it seems to work well to prevent artifacts that happen otherwise
batch_multipliers = torch.asarray(batch_multipliers).to(devices.device)
original_mean = z.mean()
- z *= batch_multipliers.reshape(batch_multipliers.shape + (1,)).expand(z.shape)
+ z = z * batch_multipliers.reshape(batch_multipliers.shape + (1,)).expand(z.shape)
new_mean = z.mean()
- z *= (original_mean / new_mean)
+ z = z * (original_mean / new_mean)
+
+ if pooled is not None:
+ z.pooled = pooled
return z