aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAUTOMATIC <16777216c@gmail.com>2022-09-17 15:39:30 +0300
committerAUTOMATIC <16777216c@gmail.com>2022-09-17 15:39:30 +0300
commit77dcb21688a121e4b9f93cd614546daad90f5a6c (patch)
tree272dd43fc3c71e7281d1879035caab60331a709f
parent2f18823e69ec1dd7622f652561e197a576dc3b80 (diff)
parentd94b41472e0e799ab0059fd711f52f1fba954be6 (diff)
Merge remote-tracking branch 'origin/master'
-rw-r--r--modules/extras.py5
-rw-r--r--modules/processing.py9
-rw-r--r--script.js65
-rw-r--r--scripts/prompts_from_file.py36
-rw-r--r--style.css37
5 files changed, 136 insertions, 16 deletions
diff --git a/modules/extras.py b/modules/extras.py
index 38d6ec48..3d9d9f7a 100644
--- a/modules/extras.py
+++ b/modules/extras.py
@@ -111,8 +111,9 @@ def run_pnginfo(image):
items['exif comment'] = exif_comment
- for field in ['jfif', 'jfif_version', 'jfif_unit', 'jfif_density', 'dpi', 'exif']:
- del items[field]
+ for field in ['jfif', 'jfif_version', 'jfif_unit', 'jfif_density', 'dpi', 'exif',
+ 'loop', 'background', 'timestamp', 'duration']:
+ items.pop(field, None)
info = ''
diff --git a/modules/processing.py b/modules/processing.py
index 3a4ff224..6a99d383 100644
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -188,7 +188,11 @@ def fix_seed(p):
def process_images(p: StableDiffusionProcessing) -> Processed:
"""this is the main loop that both txt2img and img2img use; it calls func_init once inside all the scopes and func_sample once per batch"""
- assert p.prompt is not None
+ if type(p.prompt) == list:
+ assert(len(p.prompt) > 0)
+ else:
+ assert p.prompt is not None
+
devices.torch_gc()
fix_seed(p)
@@ -265,6 +269,9 @@ def process_images(p: StableDiffusionProcessing) -> Processed:
seeds = all_seeds[n * p.batch_size:(n + 1) * p.batch_size]
subseeds = all_subseeds[n * p.batch_size:(n + 1) * p.batch_size]
+ if (len(prompts) == 0):
+ break
+
#uc = p.sd_model.get_learned_conditioning(len(prompts) * [p.negative_prompt])
#c = p.sd_model.get_learned_conditioning(prompts)
uc = prompt_parser.get_learned_conditioning(len(prompts) * [p.negative_prompt], p.steps)
diff --git a/script.js b/script.js
index e63e0695..a016eb4e 100644
--- a/script.js
+++ b/script.js
@@ -76,6 +76,41 @@ function gradioApp(){
global_progressbar = null
+function closeModal() {
+ gradioApp().getElementById("lightboxModal").style.display = "none";
+}
+
+function showModal(elem) {
+ gradioApp().getElementById("modalImage").src = elem.src
+ gradioApp().getElementById("lightboxModal").style.display = "block";
+}
+
+function showGalleryImage(){
+ setTimeout(function() {
+ fullImg_preview = gradioApp().querySelectorAll('img.w-full.object-contain')
+
+ if(fullImg_preview != null){
+ fullImg_preview.forEach(function function_name(e) {
+ if(e && e.parentElement.tagName == 'DIV'){
+ e.style.cursor='pointer'
+
+ elemfunc = function(elem){
+ elem.onclick = function(){showModal(elem)};
+ }
+ elemfunc(e)
+ }
+ });
+ }
+
+ }, 100);
+}
+
+function galleryImageHandler(e){
+ if(e && e.parentElement.tagName == 'BUTTON'){
+ e.onclick = showGalleryImage;
+ }
+}
+
function addTitles(root){
root.querySelectorAll('span, button, select').forEach(function(span){
tooltip = titles[span.textContent];
@@ -117,13 +152,18 @@ function addTitles(root){
img2img_preview.style.width = img2img_gallery.clientWidth + "px"
img2img_preview.style.height = img2img_gallery.clientHeight + "px"
}
-
-
+
window.setTimeout(requestProgress, 500)
});
mutationObserver.observe( progressbar, { childList:true, subtree:true })
}
+
+ fullImg_preview = gradioApp().querySelectorAll('img.w-full')
+ if(fullImg_preview != null){
+ fullImg_preview.forEach(galleryImageHandler);
+ }
+
}
document.addEventListener("DOMContentLoaded", function() {
@@ -131,6 +171,27 @@ document.addEventListener("DOMContentLoaded", function() {
addTitles(gradioApp());
});
mutationObserver.observe( gradioApp(), { childList:true, subtree:true })
+
+ const modalFragment = document.createDocumentFragment();
+ const modal = document.createElement('div')
+ modal.onclick = closeModal;
+
+ const modalClose = document.createElement('span')
+ modalClose.className = 'modalClose cursor';
+ modalClose.innerHTML = '&times;'
+ modalClose.onclick = closeModal;
+ modal.id = "lightboxModal";
+ modal.appendChild(modalClose)
+
+ const modalImage = document.createElement('img')
+ modalImage.id = 'modalImage';
+ modalImage.onclick = closeModal;
+ modal.appendChild(modalImage)
+
+ gradioApp().getRootNode().appendChild(modal)
+
+ document.body.appendChild(modalFragment);
+
});
function selected_gallery_index(){
diff --git a/scripts/prompts_from_file.py b/scripts/prompts_from_file.py
index d9b01c81..513d9a1c 100644
--- a/scripts/prompts_from_file.py
+++ b/scripts/prompts_from_file.py
@@ -13,28 +13,42 @@ from modules.shared import opts, cmd_opts, state
class Script(scripts.Script):
def title(self):
- return "Prompts from file"
+ return "Prompts from file or textbox"
def ui(self, is_img2img):
+ # This checkbox would look nicer as two tabs, but there are two problems:
+ # 1) There is a bug in Gradio 3.3 that prevents visibility from working on Tabs
+ # 2) Even with Gradio 3.3.1, returning a control (like Tabs) that can't be used as input
+ # causes a AttributeError: 'Tabs' object has no attribute 'preprocess' assert,
+ # due to the way Script assumes all controls returned can be used as inputs.
+ # Therefore, there's no good way to use grouping components right now,
+ # so we will use a checkbox! :)
+ checkbox_txt = gr.Checkbox(label="Show Textbox", value=False)
file = gr.File(label="File with inputs", type='bytes')
-
- return [file]
-
- def run(self, p, data: bytes):
- lines = [x.strip() for x in data.decode('utf8', errors='ignore').split("\n")]
+ prompt_txt = gr.TextArea(label="Prompts")
+ checkbox_txt.change(fn=lambda x: [gr.File.update(visible = not x), gr.TextArea.update(visible = x)], inputs=[checkbox_txt], outputs=[file, prompt_txt])
+ return [checkbox_txt, file, prompt_txt]
+
+ def run(self, p, checkbox_txt, data: bytes, prompt_txt: str):
+ if (checkbox_txt):
+ lines = [x.strip() for x in prompt_txt.splitlines()]
+ else:
+ lines = [x.strip() for x in data.decode('utf8', errors='ignore').split("\n")]
lines = [x for x in lines if len(x) > 0]
- batch_count = math.ceil(len(lines) / p.batch_size)
- print(f"Will process {len(lines) * p.n_iter} images in {batch_count * p.n_iter} batches.")
+ img_count = len(lines) * p.n_iter
+ batch_count = math.ceil(img_count / p.batch_size)
+ loop_count = math.ceil(batch_count / p.n_iter)
+ print(f"Will process {img_count} images in {batch_count} batches.")
p.do_not_save_grid = True
state.job_count = batch_count
images = []
- for batch_no in range(batch_count):
- state.job = f"{batch_no + 1} out of {batch_count * p.n_iter}"
- p.prompt = lines[batch_no*p.batch_size:(batch_no+1)*p.batch_size] * p.n_iter
+ for loop_no in range(loop_count):
+ state.job = f"{loop_no + 1} out of {loop_count}"
+ p.prompt = lines[loop_no*p.batch_size:(loop_no+1)*p.batch_size] * p.n_iter
proc = process_images(p)
images += proc.images
diff --git a/style.css b/style.css
index 752d2cf4..2bdd1e0e 100644
--- a/style.css
+++ b/style.css
@@ -196,3 +196,40 @@ input[type="range"]{
border-radius: 8px;
}
+#lightboxModal{
+ display: none;
+ position: fixed;
+ z-index: 900;
+ padding-top: 100px;
+ left: 0;
+ top: 0;
+ width: 100%;
+ height: 100%;
+ overflow: auto;
+ background-color: rgba(20, 20, 20, 0.95);
+}
+
+.modalClose {
+ color: white;
+ position: absolute;
+ top: 10px;
+ right: 25px;
+ font-size: 35px;
+ font-weight: bold;
+}
+
+.modalClose:hover,
+.modalClose:focus {
+ color: #999;
+ text-decoration: none;
+ cursor: pointer;
+}
+
+#modalImage {
+ display: block;
+ margin-left: auto;
+ margin-right: auto;
+ margin-top: auto;
+ width: auto;
+}
+