aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--javascript/progressbar.js30
-rw-r--r--localizations/de_DE.json4
-rw-r--r--localizations/it_IT.json126
-rw-r--r--localizations/ko_KR.json1
-rw-r--r--localizations/pt_BR.json53
-rw-r--r--modules/api/api.py5
-rw-r--r--modules/esrgan_model.py17
-rw-r--r--modules/img2img.py5
-rw-r--r--modules/modelloader.py3
-rw-r--r--modules/processing.py33
-rw-r--r--modules/script_callbacks.py41
-rw-r--r--modules/sd_samplers.py11
-rw-r--r--modules/shared.py20
-rw-r--r--modules/txt2img.py2
-rw-r--r--modules/ui.py20
-rw-r--r--modules/upscaler.py17
-rw-r--r--requirements.txt2
-rw-r--r--requirements_versions.txt2
-rw-r--r--scripts/img2imgalt.py3
-rw-r--r--scripts/prompts_from_file.py9
-rw-r--r--style.css10
-rw-r--r--webui.py2
22 files changed, 305 insertions, 111 deletions
diff --git a/javascript/progressbar.js b/javascript/progressbar.js
index 7a05726e..671fde34 100644
--- a/javascript/progressbar.js
+++ b/javascript/progressbar.js
@@ -3,8 +3,21 @@ global_progressbars = {}
galleries = {}
galleryObservers = {}
+// this tracks laumnches of window.setTimeout for progressbar to prevent starting a new timeout when the previous is still running
+timeoutIds = {}
+
function check_progressbar(id_part, id_progressbar, id_progressbar_span, id_skip, id_interrupt, id_preview, id_gallery){
- var progressbar = gradioApp().getElementById(id_progressbar)
+ // gradio 3.8's enlightened approach allows them to create two nested div elements inside each other with same id
+ // every time you use gr.HTML(elem_id='xxx'), so we handle this here
+ var progressbar = gradioApp().querySelector("#"+id_progressbar+" #"+id_progressbar)
+ var progressbarParent
+ if(progressbar){
+ progressbarParent = gradioApp().querySelector("#"+id_progressbar)
+ } else{
+ progressbar = gradioApp().getElementById(id_progressbar)
+ progressbarParent = null
+ }
+
var skip = id_skip ? gradioApp().getElementById(id_skip) : null
var interrupt = gradioApp().getElementById(id_interrupt)
@@ -26,18 +39,26 @@ function check_progressbar(id_part, id_progressbar, id_progressbar_span, id_skip
global_progressbars[id_progressbar] = progressbar
var mutationObserver = new MutationObserver(function(m){
+ if(timeoutIds[id_part]) return;
+
preview = gradioApp().getElementById(id_preview)
gallery = gradioApp().getElementById(id_gallery)
if(preview != null && gallery != null){
preview.style.width = gallery.clientWidth + "px"
preview.style.height = gallery.clientHeight + "px"
+ if(progressbarParent) progressbar.style.width = progressbarParent.clientWidth + "px"
//only watch gallery if there is a generation process going on
check_gallery(id_gallery);
var progressDiv = gradioApp().querySelectorAll('#' + id_progressbar_span).length > 0;
- if(!progressDiv){
+ if(progressDiv){
+ timeoutIds[id_part] = window.setTimeout(function() {
+ timeoutIds[id_part] = null
+ requestMoreProgress(id_part, id_progressbar_span, id_skip, id_interrupt)
+ }, 500)
+ } else{
if (skip) {
skip.style.display = "none"
}
@@ -47,13 +68,10 @@ function check_progressbar(id_part, id_progressbar, id_progressbar_span, id_skip
if (galleryObservers[id_gallery]) {
galleryObservers[id_gallery].disconnect();
galleries[id_gallery] = null;
- }
+ }
}
-
-
}
- window.setTimeout(function() { requestMoreProgress(id_part, id_progressbar_span, id_skip, id_interrupt) }, 500)
});
mutationObserver.observe( progressbar, { childList:true, subtree:true })
}
diff --git a/localizations/de_DE.json b/localizations/de_DE.json
index 56d54b54..5e254446 100644
--- a/localizations/de_DE.json
+++ b/localizations/de_DE.json
@@ -70,7 +70,7 @@
"None": "Nichts",
"Prompt matrix": "Promptmatrix",
"Prompts from file or textbox": "Prompts aus Datei oder Textfeld",
- "X/Y plot": "X/Y Graf",
+ "X/Y plot": "X/Y Graph",
"Put variable parts at start of prompt": "Variable teile am start des Prompt setzen",
"Iterate seed every line": "Iterate seed every line",
"List of prompt inputs": "List of prompt inputs",
@@ -455,4 +455,4 @@
"Only applies to inpainting models. Determines how strongly to mask off the original image for inpainting and img2img. 1.0 means fully masked, which is the default behaviour. 0.0 means a fully unmasked conditioning. Lower values will help preserve the overall composition of the image, but will struggle with large changes.": "Gilt nur für Inpainting-Modelle. Legt fest, wie stark das Originalbild für Inpainting und img2img maskiert werden soll. 1.0 bedeutet vollständig maskiert, was das Standardverhalten ist. 0.0 bedeutet eine vollständig unmaskierte Konditionierung. Niedrigere Werte tragen dazu bei, die Gesamtkomposition des Bildes zu erhalten, sind aber bei großen Änderungen problematisch.",
"List of setting names, separated by commas, for settings that should go to the quick access bar at the top, rather than the usual setting tab. See modules/shared.py for setting names. Requires restarting to apply.": "Liste von Einstellungsnamen, getrennt durch Kommas, für Einstellungen, die in der Schnellzugriffsleiste oben erscheinen sollen, anstatt in dem üblichen Einstellungs-Tab. Siehe modules/shared.py für Einstellungsnamen. Erfordert einen Neustart zur Anwendung.",
"If this values is non-zero, it will be added to seed and used to initialize RNG for noises when using samplers with Eta. You can use this to produce even more variation of images, or you can use this to match images of other software if you know what you are doing.": "Wenn dieser Wert ungleich Null ist, wird er zum Seed addiert und zur Initialisierung des RNG für Noise bei der Verwendung von Samplern mit Eta verwendet. Dies kann verwendet werden, um noch mehr Variationen von Bildern zu erzeugen, oder um Bilder von anderer Software zu erzeugen, wenn Sie wissen, was Sie tun."
-} \ No newline at end of file
+}
diff --git a/localizations/it_IT.json b/localizations/it_IT.json
index 0a216e22..49489f40 100644
--- a/localizations/it_IT.json
+++ b/localizations/it_IT.json
@@ -94,7 +94,6 @@
"Animator v6": "Animator v6",
"Asymmetric tiling": "Piastrellatura asimmetrica",
"Custom code": "Codice personalizzato",
- "Dynamic Prompting v0.2": "Prompt dinamici v0.2",
"Embedding to Shareable PNG": "Incorporamento convertito in PNG condivisibile",
"Force symmetry": "Forza la simmetria",
"Prompts interpolation": "Interpola Prompt",
@@ -107,6 +106,7 @@
"Text to Vector Graphics": "Da testo a grafica vettoriale",
"X/Y plot": "Grafico X/Y",
"X/Y/Z plot": "Grafico X/Y/Z",
+ "Dynamic Prompting v0.13.6": "Prompt dinamici v0.13.6",
"Create inspiration images": "Crea immagini di ispirazione",
"step1 min/max": "Passi min(o max)",
"step2 min/max": "Passi max (o min)",
@@ -195,7 +195,7 @@
"Beta distribution (VP only)": "Distribuzione Beta (Solo CV)",
"Beta min (VP only)": "Beta min (Solo CV)",
"Epsilon (VP only)": "Epsilon (Solo CV)",
- "Running in txt2img mode:": "Running in txt2img mode:",
+ "Running in txt2img mode:": "Esecuzione in modalità txt2img:",
"Render these video formats:": "Renderizza in questi formati:",
"GIF": "GIF",
"MP4": "MP4",
@@ -203,11 +203,11 @@
"Animation Parameters": "Parametri animazione",
"Total Animation Length (s)": "Durata totale dell'animazione (s)",
"Framerate": "Frequenza dei fotogrammi",
- "Smoothing_Frames": "Smoothing_Frames",
+ "Smoothing_Frames": "Fotogrammi da appianare",
"Add_Noise": "Aggiungi rumore",
"Noise Strength": "Intensità del rumore",
"Initial Parameters": "Parametri iniziali",
- "Denoising Strength (overrides img2img slider)": "Intensità di riduzione del rumore (sovrascrive il cursore img2img)",
+ "Denoising Strength": "Intensità di riduzione del rumore",
"Seed_March": "Seed_March",
"Zoom Factor (scale/s)": "Fattore di ingrandimento (scala/s)",
"X Pixel Shift (pixels/s)": "Sposta i Pixel sull'asse X (pixel/s)",
@@ -216,8 +216,8 @@
"Prompt Template, applied to each keyframe below": "Modello di prompt, applicato a ciascun fotogramma chiave qui di seguito",
"Positive Prompts": "Prompt positivi",
"Negative Prompts": "Prompt negativi",
- "Props, Stamps": "Props, Stamps",
- "Poper_Folder:": "Cartella Poper:",
+ "Props, Stamps": "Immagini Clipart da diffondere (prop), o da applicare in post elaborazione e non essere diffuse (stamp).",
+ "Poper_Folder:": "Cartella Immagini Clipart (PNG trasparenti):",
"Supported Keyframes:": "Fotogrammi chiave supportati:",
"time_s | source | video, images, img2img | path": "time_s | source | video, images, img2img | path",
"time_s | prompt | positive_prompts | negative_prompts": "time_s | prompt | positive_prompts | negative_prompts",
@@ -234,26 +234,11 @@
"time_s | col_set": "time_s | col_set",
"time_s | col_clear": "time_s | col_clear",
"time_s | model | model": "time_s | model | model",
- "img2img_mode": "img2img_mode",
+ "img2img_mode": "Modalità img2img",
"Keyframes:": "Fotogrammi chiave:",
"Tile X": "Piastrella asse X",
"Tile Y": "Piastrella asse Y",
"Python code": "Codice Python",
- "Combinatorial generation": "Generazione combinatoria",
- "Combinations": "Combinazioni",
- "Choose a number of terms from a list, in this case we choose two artists": "Scegli un numero di termini da un elenco, in questo caso scegliamo due artisti",
- "{2$$artist1|artist2|artist3}": "{2$$artist1|artist2|artist3}",
- "If $$ is not provided, then 1$$ is assumed.\n\n A range can be provided:": "Se $$ non viene fornito, si presume 1$$.\n\n È possibile fornire un intervallo di valori:",
- "{1-3$$artist1|artist2|artist3}": "{1-3$$artist1|artist2|artist3}",
- "In this case, a random number of artists between 1 and 3 is chosen.": "In questo caso viene scelto un numero casuale di artisti compreso tra 1 e 3.",
- "Wildcards": "Termini jolly",
- "If the groups wont drop down click": "Se i gruppi non vengono visualizzati, clicca",
- "here": "qui",
- "to fix the issue.": "per correggere il problema.",
- "WILDCARD_DIR: scripts/wildcards": "WILDCARD_DIR: scripts/wildcards",
- "You can add more wildcards by creating a text file with one term per line and name is mywildcards.txt. Place it in scripts/wildcards.": "Puoi aggiungere termini jolly creando un file di testo con un termine per riga e nominandolo, per esempio, mywildcards.txt. Inseriscilo in scripts/wildcards.",
- "__<folder>/mywildcards__": "__<folder>/mywildcards__",
- "will then become available.": "diverrà quindi disponibile.",
"Source embedding to convert": "Incorporamento sorgente da convertire",
"Embedding token": "Token Incorporamento",
"Output directory": "Cartella di output",
@@ -324,12 +309,30 @@
"Eta": "ETA",
"Clip skip": "Salta CLIP",
"Denoising": "Riduzione del rumore",
- "Cond. Image Mask Weight": "Cond. Image Mask Weight",
+ "Cond. Image Mask Weight": "Peso maschera immagine condizionale",
"X values": "Valori per X",
"Y type": "Parametro asse Y",
"Y values": "Valori per Y",
"Z type": "Parametro asse Z",
"Z values": "Valori per Z",
+ "Combinatorial generation": "Generazione combinatoria",
+ "Combinatorial batches": "Lotti combinatori",
+ "Magic prompt": "Prompt magico",
+ "Fixed seed": "Seme fisso",
+ "Combinations": "Combinazioni",
+ "Choose a number of terms from a list, in this case we choose two artists": "Scegli un numero di termini da un elenco, in questo caso scegliamo due artisti",
+ "{{2$artist1|artist2|artist3}}": "{{2$artist1|artist2|artist3}}",
+ "If $ is not provided, then 1$ is assumed.\n\n A range can be provided:": "Se $ non viene fornito, si presume 1$.\n\n È possibile fornire un intervallo di valori:",
+ "{{1-3$artist1|artist2|artist3}}": "{{1-3$artist1|artist2|artist3}}",
+ "In this case, a random number of artists between 1 and 3 is chosen.": "In questo caso viene scelto un numero casuale di artisti compreso tra 1 e 3.",
+ "Wildcards": "Termini jolly",
+ "If the groups wont drop down click": "Se i gruppi non vengono visualizzati, clicca",
+ "here": "qui",
+ "to fix the issue.": "per correggere il problema.",
+ "WILDCARD_DIR: C:\\stable-diffusion-webui\\extensions\\sd-dynamic-prompts\\wildcards": "WILDCARD_DIR: C:\\stable-diffusion-webui\\extensions\\sd-dynamic-prompts\\wildcards",
+ "You can add more wildcards by creating a text file with one term per line and name is mywildcards.txt. Place it in scripts/wildcards.": "Puoi aggiungere termini jolly creando un file di testo con un termine per riga e nominandolo, per esempio, mywildcards.txt. Inseriscilo in scripts/wildcards.",
+ "__<folder>/mywildcards__": "__<cartella>/mywildcards__",
+ "will then become available.": "diverrà quindi disponibile.",
"Artist or styles name list. '.txt' files with one name per line": "Elenco nomi di artisti o stili. File '.txt' con un nome per riga",
"Prompt words before artist or style name": "Parole chiave prima del nome dell'artista o dello stile",
"Prompt words after artist or style name": "Parole chiave dopo il nome dell'artista o dello stile",
@@ -439,7 +442,7 @@
"Reuse seed": "Riusa il seme",
"CFG decay factor": "Fattore di decadimento CFG",
"CFG target": "CFG di destinazione",
- "Show/Hide Canvas": "Mostra/Nascondi Tela",
+ "Show/Hide AlphaCanvas": "Mostra/Nascondi AlphaCanvas",
"Recommended settings: Sampling Steps: 80-100, Sampler: Euler a, Denoising strength: 0.8": "Impostazioni consigliate: Passi di campionamento: 80-100, Campionatore: Euler a, Intensità denoising: 0.8",
"Pixels to expand": "Pixel da espandere",
"Outpainting direction": "Direzione di Outpainting",
@@ -535,7 +538,7 @@
"Modules": "Moduli",
"Enter hypernetwork layer structure": "Immettere la struttura del livello della Iperrete",
"Select activation function of hypernetwork": "Selezionare la funzione di attivazione della Iperrete",
- "linear": "linear",
+ "linear": "lineare",
"relu": "relu",
"leakyrelu": "leakyrelu",
"elu": "elu",
@@ -563,9 +566,9 @@
"softshrink": "softshrink",
"softsign": "softsign",
"tanhshrink": "tanhshrink",
- "threshold": "threshold",
+ "threshold": "soglia",
"Select Layer weights initialization. relu-like - Kaiming, sigmoid-like - Xavier is recommended": "Seleziona inizializzazione dei pesi dei livelli. relu-like - Kaiming, Si consiglia sigmoid-like - Xavier",
- "Normal": "Normal",
+ "Normal": "Normale",
"KaimingUniform": "KaimingUniform",
"KaimingNormal": "KaimingNormal",
"XavierUniform": "XavierUniform",
@@ -764,8 +767,8 @@
"animation_prompts": "Prompt animazione",
"Init settings": "Impostazioni iniziali",
"use_init": "Usa le impostazioni iniziali",
- "from_img2img_instead_of_link": "from_img2img_instead_of_link",
- "strength_0_no_init": "strength_0_no_init",
+ "from_img2img_instead_of_link": "da img2img invece che da link",
+ "strength_0_no_init": "Intensità 0 nessuna inizializzazione",
"strength": "Intensità",
"init_image": "Immagine di inizializzazione",
"use_mask": "Usa maschera",
@@ -791,8 +794,8 @@
"skip_video_for_run_all": "Salta il video per eseguire tutto",
"fps": "FPS",
"output_format": "Formato di uscita",
- "PIL gif": "PIL gif",
- "FFMPEG mp4": "FFMPEG mp4",
+ "PIL gif": "PIL GIF",
+ "FFMPEG mp4": "FFMPEG MP4",
"ffmpeg_location": "Percorso ffmpeg",
"add_soundtrack": "Aggiungi colonna sonora",
"soundtrack_path": "Percorso colonna sonora",
@@ -916,7 +919,7 @@
"Generate Info": "Genera Info",
"File Name": "Nome del file",
"Move to favorites": "Aggiungi ai preferiti",
- "Renew page": "Aggiorna la pagina",
+ "Renew Page": "Aggiorna la pagina",
"Number": "Numero",
"set_index": "Imposta indice",
"load_switch": "load_switch",
@@ -990,8 +993,8 @@
"Add a second progress bar to the console that shows progress for an entire job.": "Aggiungi una seconda barra di avanzamento alla console che mostra l'avanzamento complessivo del lavoro.",
"Training": "Addestramento",
"Move VAE and CLIP to RAM when training hypernetwork. Saves VRAM.": "Sposta VAE e CLIP nella RAM durante l'addestramento di Iperreti. Risparmia VRAM.",
- "Filename word regex": "Filename word regex",
- "Filename join string": "Filename join string",
+ "Filename word regex": "Espressione regolare per estrarre parole dal nome del file",
+ "Filename join string": "Stringa per unire le parole estratte dal nome del file",
"Number of repeats for a single input image per epoch; used only for displaying epoch number": "Numero di ripetizioni per una singola immagine di input per epoca; utilizzato solo per visualizzare il numero di epoca",
"Save an csv containing the loss to log directory every N steps, 0 to disable": "Salva un file CSV contenente la perdita nella cartella di registrazione ogni N passaggi, 0 per disabilitare",
"Stable Diffusion": "Stable Diffusion",
@@ -1059,11 +1062,17 @@
"sigma tmin": "sigma tmin",
"sigma noise": "sigma noise",
"Eta noise seed delta": "ETA del delta del seme del rumore",
+ "Number of columns on image gallery": "Numero di colonne nella galleria di immagini",
"Aesthetic Image Scorer": "Punteggio delle immagini estetiche",
"Save score as EXIF or PNG Info Chunk": "Salva il punteggio come info EXIF o PNG",
- "Save score as tag (Windows Only)": "Salva punteggio come etichetta (solo Windows)",
+ "aesthetic_score": "Punteggio estetico",
+ "cfg_scale": "Scala CFG",
+ "sd_model_hash": "Hash del modello SD",
+ "hash": "Hash",
+ "Save tags (Windows only)": "Salva etichette (solo Windows)",
+ "Save category (Windows only)": "Salva categoria (solo Windows)",
+ "Save generation params text": "Salva testo parametri di generazione",
"Force CPU (Requires Custom Script Reload)": "Forza CPU (richiede il ricaricamento dello script personalizzato)",
- "Number of columns on image gallery": "Numero di colonne nella galleria di immagini",
"Images Browser": "Galleria immagini",
"Preload images at startup": "Precarica le immagini all'avvio",
"Number of columns on the page": "Numero di colonne nella pagina",
@@ -1076,6 +1085,7 @@
"Reload custom script bodies (No ui updates, No restart)": "Ricarica gli script personalizzati (nessun aggiornamento dell'interfaccia utente, nessun riavvio)",
"Restart Gradio and Refresh components (Custom Scripts, ui.py, js and css only)": "Riavvia Gradio e aggiorna i componenti (solo script personalizzati, ui.py, js e css)",
"Installed": "Installato",
+ "Available": "Disponibile",
"Install from URL": "Installa da URL",
"Apply and restart UI": "Applica e riavvia l'interfaccia utente",
"Check for updates": "Controlla aggiornamenti",
@@ -1085,12 +1095,14 @@
"aesthetic-gradients": "Gradienti Estetici (CLIP)",
"https://github.com/AUTOMATIC1111/stable-diffusion-webui-aesthetic-gradients": "https://github.com/AUTOMATIC1111/stable-diffusion-webui-aesthetic-gradients",
"unknown": "sconosciuto",
- "aesthetic-image-scorer": "Punteggio delle immagini estetiche",
- "https://github.com/tsngo/stable-diffusion-webui-aesthetic-image-scorer": "https://github.com/tsngo/stable-diffusion-webui-aesthetic-image-scorer",
"dataset-tag-editor": "Dataset Tag Editor",
"https://github.com/toshiaki1729/stable-diffusion-webui-dataset-tag-editor.git": "https://github.com/toshiaki1729/stable-diffusion-webui-dataset-tag-editor.git",
- "deforum": "Deforum",
- "https://github.com/deforum-art/deforum-for-automatic1111-webui/": "https://github.com/deforum-art/deforum-for-automatic1111-webui/",
+ "deforum-for-automatic1111-webui": "Deforum",
+ "https://github.com/deforum-art/deforum-for-automatic1111-webui": "https://github.com/deforum-art/deforum-for-automatic1111-webui",
+ "sd-dynamic-prompts": "Prompt dinamici",
+ "https://github.com/adieyal/sd-dynamic-prompts": "https://github.com/adieyal/sd-dynamic-prompts",
+ "stable-diffusion-webui-aesthetic-image-scorer": "Punteggio immagini estetiche",
+ "https://github.com/tsngo/stable-diffusion-webui-aesthetic-image-scorer": "https://github.com/tsngo/stable-diffusion-webui-aesthetic-image-scorer",
"stable-diffusion-webui-artists-to-study": "Artisti per studiare",
"https://github.com/camenduru/stable-diffusion-webui-artists-to-study": "https://github.com/camenduru/stable-diffusion-webui-artists-to-study",
"stable-diffusion-webui-images-browser": "Galleria immagini",
@@ -1101,6 +1113,8 @@
"https://github.com/DominikDoom/a1111-sd-webui-tagcomplete.git": "https://github.com/DominikDoom/a1111-sd-webui-tagcomplete.git",
"wildcards": "Termini Jolly",
"https://github.com/AUTOMATIC1111/stable-diffusion-webui-wildcards.git": "https://github.com/AUTOMATIC1111/stable-diffusion-webui-wildcards.git",
+ "Load from:": "Carica da:",
+ "Extension index URL": "URL dell'indice delle Estensioni",
"URL for extension's git repository": "URL del repository GIT dell'estensione",
"Local directory name": "Nome cartella locale",
"Install": "Installa",
@@ -1169,5 +1183,35 @@
"Only applies to inpainting models. Determines how strongly to mask off the original image for inpainting and img2img. 1.0 means fully masked, which is the default behaviour. 0.0 means a fully unmasked conditioning. Lower values will help preserve the overall composition of the image, but will struggle with large changes.": "Si applica solo ai modelli di pittura. Determina con quale forza mascherare l'immagine originale per inpainting e img2img. 1.0 significa completamente mascherato, che è il comportamento predefinito. 0.0 significa un condizionamento completamente non mascherato. Valori più bassi aiuteranno a preservare la composizione generale dell'immagine, ma avranno difficoltà con grandi cambiamenti.",
"List of setting names, separated by commas, for settings that should go to the quick access bar at the top, rather than the usual setting tab. See modules/shared.py for setting names. Requires restarting to apply.": "Elenco dei nomi delle impostazioni, separati da virgole, per le impostazioni che dovrebbero essere visualizzate nella barra di accesso rapido in alto, anziché nella normale scheda delle impostazioni. Vedi modules/shared.py per impostare i nomi. Richiede il riavvio per applicare.",
"If this values is non-zero, it will be added to seed and used to initialize RNG for noises when using samplers with Eta. You can use this to produce even more variation of images, or you can use this to match images of other software if you know what you are doing.": "Se questo valore è diverso da zero, verrà aggiunto al seed e utilizzato per inizializzare il generatore di numeri casuali per il rumore quando si utilizzano campionatori con ETA. Puoi usarlo per produrre ancora più variazioni di immagini, oppure puoi usarlo per abbinare le immagini di altri software se sai cosa stai facendo.",
- "Leave empty for auto": "Lasciare vuoto per automatico"
+ "Leave empty for auto": "Lasciare vuoto per automatico",
+ "Autocomplete options": "Opzioni di autocompletamento",
+ "Enable Autocomplete": "Abilita autocompletamento",
+ "Append commas": "Aggiungi virgole",
+ "AlphaCanvas": "AlphaCanvas",
+ "Close": "Chiudi",
+ "Grab Results": "Ottieni risultati",
+ "Apply Patch": "Applica Patch",
+ "Hue:0": "Hue:0",
+ "S:0": "S:0",
+ "L:0": "L:0",
+ "Load Canvas": "Carica Tela",
+ "saveCanvas": "Salva Tela",
+ "latest": "aggiornato",
+ "behind": "da aggiornare",
+ "Description": "Descrizione",
+ "Action": "Azione",
+ "Aesthetic Gradients": "Gradienti estetici",
+ "Create an embedding from one or few pictures and use it to apply their style to generated images.": "Crea un incorporamento da una o poche immagini e usalo per applicare il loro stile alle immagini generate.",
+ "Sample extension. Allows you to use __name__ syntax in your prompt to get a random line from a file named name.txt in the wildcards directory. Also see Dynamic Prompts for similar functionality.": "Estensione del campione. Consente di utilizzare la sintassi __name__ nel prompt per ottenere una riga casuale da un file denominato name.txt nella cartella dei termini jolly. Vedi anche 'Prompt dinamici' per funzionalità simili.",
+ "Dynamic Prompts": "Prompt dinamici",
+ "Implements an expressive template language for random or combinatorial prompt generation along with features to support deep wildcard directory structures.": "Implementa un modello di linguaggio espressivo per la generazione di prompt casuale o combinatoria insieme a funzionalità per supportare cartelle strutturate contenenti termini jolly.",
+ "Image browser": "Galleria immagini",
+ "Provides an interface to browse created images in the web browser.": "Fornisce un'interfaccia nel browser web per sfogliare le immagini create.",
+ "Randomly display the pictures of the artist's or artistic genres typical style, more pictures of this artist or genre is displayed after selecting. So you don't have to worry about how hard it is to choose the right style of art when you create.": "Visualizza in modo casuale le immagini dello stile tipico dell'artista o dei generi artistici, dopo la selezione vengono visualizzate più immagini di questo artista o genere. Così non dovete preoccuparvi della difficoltà di scegliere lo stile artistico giusto quando create.",
+ "The official port of Deforum, an extensive script for 2D and 3D animations, supporting keyframable sequences, dynamic math parameters (even inside the prompts), dynamic masking, depth estimation and warping.": "Il porting ufficiale di Deforum, uno script completo per animazioni 2D e 3D, che supporta sequenze di fotogrammi chiave, parametri matematici dinamici (anche all'interno dei prompt), mascheramento dinamico, stima della profondità e warping.",
+ "Artists to study": "Artisti per studiare",
+ "Shows a gallery of generated pictures by artists separated into categories.": "Mostra una galleria di immagini generate dagli artisti suddivise in categorie.",
+ "Calculates aesthetic score for generated images using CLIP+MLP Aesthetic Score Predictor based on Chad Scorer": "Calcola il punteggio estetico per le immagini generate utilizzando il predittore del punteggio estetico CLIP+MLP basato su Chad Scorer",
+ "Lets you edit captions in training datasets.": "Consente di modificare i sottotitoli nei set di dati di addestramento.",
+ "Time taken:": "Tempo impiegato:"
}
diff --git a/localizations/ko_KR.json b/localizations/ko_KR.json
index d152e575..7bb15ea6 100644
--- a/localizations/ko_KR.json
+++ b/localizations/ko_KR.json
@@ -547,6 +547,7 @@
"Use following tags to define how subdirectories for images and grids are chosen: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [datetime<Format>], [datetime<Format><Time Zone>], [job_timestamp]; leave empty for default.": "다음 태그들을 사용해 이미지와 그리드의 하위 디렉토리명의 형식을 결정하세요 : [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [datetime<Format>], [datetime<Format><Time Zone>], [job_timestamp]. 비워두면 기본값으로 설정됩니다.",
"Use old emphasis implementation. Can be useful to reproduce old seeds.": "옛 방식의 강조 구현을 사용합니다. 옛 시드를 재현하는 데 효과적일 수 있습니다.",
"Use original name for output filename during batch process in extras tab": "부가기능 탭에서 이미지를 여러장 처리 시 결과물 파일명에 기존 파일명 사용하기",
+ "Use same random seed for all lines": "모든 줄에 동일한 시드 사용",
"Use same seed for each image": "각 이미지에 동일한 시드 사용",
"use spaces for tags in deepbooru": "deepbooru에서 태그에 공백 사용",
"User interface": "사용자 인터페이스",
diff --git a/localizations/pt_BR.json b/localizations/pt_BR.json
index 56281105..d869170a 100644
--- a/localizations/pt_BR.json
+++ b/localizations/pt_BR.json
@@ -17,6 +17,7 @@
"Checkpoint Merger": "Fusão de Checkpoint",
"Train": "Treinar",
"Settings": "Configurações",
+ "Extensions": "Extensions",
"Prompt": "Prompt",
"Negative prompt": "Prompt negativo",
"Run": "Executar",
@@ -93,13 +94,13 @@
"Eta": "Tempo estimado",
"Clip skip": "Pular Clip",
"Denoising": "Denoising",
+ "Cond. Image Mask Weight": "Peso da Máscara Condicional de Imagem",
"X values": "Valores de X",
"Y type": "Tipo de Y",
"Y values": "Valores de Y",
"Draw legend": "Desenhar a legenda",
"Include Separate Images": "Incluir Imagens Separadas",
"Keep -1 for seeds": "Manter em -1 para seeds",
- "Drop Image Here": "Solte a imagem aqui",
"Save": "Salvar",
"Send to img2img": "Mandar para img2img",
"Send to inpaint": "Mandar para inpaint",
@@ -110,6 +111,7 @@
"Inpaint": "Inpaint",
"Batch img2img": "Lote img2img",
"Image for img2img": "Imagem para img2img",
+ "Drop Image Here": "Solte a imagem aqui",
"Image for inpainting with mask": "Imagem para inpainting com máscara",
"Mask": "Máscara",
"Mask blur": "Desfoque da máscara",
@@ -166,16 +168,10 @@
"Upscaler": "Ampliador",
"Lanczos": "Lanczos",
"LDSR": "LDSR",
- "4x_foolhardy_Remacri": "4x_foolhardy_Remacri",
- "Put ESRGAN models here": "Coloque modelos ESRGAN aqui",
- "R-ESRGAN General 4xV3": "R-ESRGAN General 4xV3",
- "R-ESRGAN AnimeVideo": "R-ESRGAN AnimeVideo",
- "R-ESRGAN 4x+": "R-ESRGAN 4x+",
- "R-ESRGAN 4x+ Anime6B": "R-ESRGAN 4x+ Anime6B",
- "R-ESRGAN 2x+": "R-ESRGAN 2x+",
- "ScuNET": "ScuNET",
+ "ESRGAN_4x": "ESRGAN_4x",
+ "ScuNET GAN": "ScuNET GAN",
"ScuNET PSNR": "ScuNET PSNR",
- "put_swinir_models_here": "put_swinir_models_here",
+ "SwinIR 4x": "SwinIR 4x",
"Single Image": "Uma imagem",
"Batch Process": "Processo em lote",
"Batch from Directory": "Lote apartir de diretório",
@@ -189,7 +185,7 @@
"GFPGAN visibility": "Visibilidade GFPGAN",
"CodeFormer visibility": "Visibilidade CodeFormer",
"CodeFormer weight (0 = maximum effect, 1 = minimum effect)": "Peso do CodeFormer (0 = efeito máximo, 1 = efeito mínimo)",
- "Open output directory": "Abrir diretório de saída",
+ "Upscale Before Restoring Faces": "Ampliar Antes de Refinar Rostos",
"Send to txt2img": "Mandar para txt2img",
"A merger of the two checkpoints will be generated in your": "Uma fusão dos dois checkpoints será gerada em seu",
"checkpoint": "checkpoint",
@@ -216,6 +212,7 @@
"Modules": "Módulos",
"Enter hypernetwork layer structure": "Entrar na estrutura de camadas da hypernetwork",
"Select activation function of hypernetwork": "Selecionar a função de ativação de hypernetwork",
+ "linear": "linear",
"relu": "relu",
"leakyrelu": "leakyrelu",
"elu": "elu",
@@ -227,12 +224,10 @@
"glu": "glu",
"hardshrink": "hardshrink",
"hardsigmoid": "hardsigmoid",
- "hardswish": "hardswish",
"hardtanh": "hardtanh",
"logsigmoid": "logsigmoid",
"logsoftmax": "logsoftmax",
"mish": "mish",
- "multiheadattention": "multiheadattention",
"prelu": "prelu",
"rrelu": "rrelu",
"relu6": "relu6",
@@ -274,9 +269,9 @@
"Focal point edges weight": "Peso de ponto focal para bordas",
"Create debug image": "Criar imagem de depuração",
"Preprocess": "Pré-processar",
- "Train an embedding; must specify a directory with a set of 1:1 ratio images": "Treinar um embedding; precisa especificar um diretório com imagens de proporção 1:1",
+ "Train an embedding; must specify a directory with a set of 1:1 ratio images": "Treinar uma incorporação; precisa especificar um diretório com imagens de proporção 1:1",
"[wiki]": "[wiki]",
- "Embedding": "Embedding",
+ "Embedding": "Incorporação",
"Embedding Learning rate": "Taxa de aprendizagem da incorporação",
"Hypernetwork Learning rate": "Taxa de aprendizagem de Hypernetwork",
"Dataset directory": "Diretório de Dataset",
@@ -345,9 +340,11 @@
"Filename join string": "Nome de arquivo join string",
"Number of repeats for a single input image per epoch; used only for displaying epoch number": "Número de repetições para entrada única de imagens por época; serve apenas para mostrar o número de época",
"Save an csv containing the loss to log directory every N steps, 0 to disable": "Salvar um csv com as perdas para o diretório de log a cada N steps, 0 para desativar",
+ "Use cross attention optimizations while training": "Usar otimizações de atenção cruzada enquanto treinando",
"Stable Diffusion": "Stable Diffusion",
"Checkpoints to cache in RAM": "Checkpoints para manter no cache da RAM",
"Hypernetwork strength": "Força da Hypernetwork",
+ "Inpainting conditioning mask strength": "Força do inpaint para máscaras condicioniais",
"Apply color correction to img2img results to match original colors.": "Aplicar correção de cor nas imagens geradas em img2img, usando a imagem original como base.",
"Save a copy of image before applying color correction to img2img results": "Salvar uma cópia das imagens geradas em img2img antes de aplicar a correção de cor",
"With img2img, do exactly the amount of steps the slider specifies (normally you'd do less with less denoising).": "Durante gerações img2img, fazer examente o número de steps definidos na barra (normalmente você faz menos steps com denoising menor).",
@@ -379,6 +376,7 @@
"Add model hash to generation information": "Adicionar hash do modelo para informação de geração",
"Add model name to generation information": "Adicionar nome do modelo para informação de geração",
"When reading generation parameters from text into UI (from PNG info or pasted text), do not change the selected model/checkpoint.": "Quando ler parâmetros de texto para a interface (de informações de PNG ou texto copiado), não alterar o modelo/intervalo selecionado.",
+ "Send seed when sending prompt or image to other interface": "Enviar seed quando enviar prompt ou imagem para outra interface",
"Font for image grids that have text": "Fonte para grade de imagens que têm texto",
"Enable full page image viewer": "Ativar visualizador de página inteira",
"Show images zoomed in by default in full page image viewer": "Mostrar imagens com zoom por definição no visualizador de página inteira",
@@ -386,13 +384,17 @@
"Quicksettings list": "Lista de configurações rapidas",
"Localization (requires restart)": "Localização (precisa reiniciar)",
"ar_AR": "ar_AR",
+ "de_DE": "de_DE",
"es_ES": "es_ES",
- "fr-FR": "fr-FR",
+ "fr_FR": "fr_FR",
+ "it_IT": "it_IT",
"ja_JP": "ja_JP",
"ko_KR": "ko_KR",
+ "pt_BR": "pt_BR",
"ru_RU": "ru_RU",
"tr_TR": "tr_TR",
"zh_CN": "zh_CN",
+ "zh_TW": "zh_TW",
"Sampler parameters": "Parâmetros de Amostragem",
"Hide samplers in user interface (requires restart)": "Esconder amostragens na interface de usuário (precisa reiniciar)",
"eta (noise multiplier) for DDIM": "tempo estimado (multiplicador de ruído) para DDIM",
@@ -408,6 +410,19 @@
"Download localization template": "Baixar arquivo modelo de localização",
"Reload custom script bodies (No ui updates, No restart)": "Recarregar scripts personalizados (Sem atualizar a interface, Sem reiniciar)",
"Restart Gradio and Refresh components (Custom Scripts, ui.py, js and css only)": "Reiniciar Gradio e atualizar componentes (Scripts personalizados, ui.py, js e css)",
+ "Installed": "Instalado",
+ "Available": "Disponível",
+ "Install from URL": "Instalado de URL",
+ "Apply and restart UI": "Apicar e reiniciar a interface",
+ "Check for updates": "Procurar por atualizações",
+ "Extension": "Extensão",
+ "URL": "URL",
+ "Update": "Atualização",
+ "Load from:": "Carregar de:",
+ "Extension index URL": "Índice de extensão URL",
+ "URL for extension's git repository": "URL para repositório git da extensão",
+ "Local directory name": "Nome do diretório local",
+ "Install": "Instalar",
"Prompt (press Ctrl+Enter or Alt+Enter to generate)": "Prompt (apertar Ctrl+Enter ou Alt+Enter para gerar)",
"Negative prompt (press Ctrl+Enter or Alt+Enter to generate)": "Prompt Negativo (apertar Ctrl+Enter ou Alt+Enter para gerar)",
"Add a random artist to the prompt.": "Adicionar um artista aleatório para o prompt.",
@@ -420,7 +435,7 @@
"Do not do anything special": "Não faça nada de especial",
"Which algorithm to use to produce the image": "O tipo de algoritmo para gerar imagens.",
"Euler Ancestral - very creative, each can get a completely different picture depending on step count, setting steps to higher than 30-40 does not help": "Euler Ancestral - cria mais variações para as imagens em diferentes passos. Mais que 40 passos cancela o efeito.",
- "Denoising Diffusion Implicit Models - Funciona melhor para inpainting.": "Denoising Diffusion Implicit Models - Funciona melhor para inpainting.",
+ "Denoising Diffusion Implicit Models - best at inpainting": "Denoising Diffusion Implicit Models - Funciona melhor para inpainting.",
"Produce an image that can be tiled.": "Produz uma imagem que pode ser ladrilhada.",
"Use a two step process to partially create an image at smaller resolution, upscale, and then improve details in it without changing composition": "Cria um processo em duas etapas, com uma imagem em baixa qualidade primeiro, aumenta a imagem e refina os detalhes sem alterar a composição da imagem",
"Determines how little respect the algorithm should have for image's content. At 0, nothing will change, and at 1 you'll get an unrelated image. With values below 1.0, processing will take less steps than the Sampling Steps slider specifies.": "Quanto o algoritmo deve manter da imagem original. Em 0, nada muda. Em 1 o algoritmo ignora a imagem original. Valores menores que 1.0 demoram mais.",
@@ -438,7 +453,7 @@
"Write image to a directory (default - log/images) and generation parameters into csv file.": "Salva a imagem no diretório padrão ou escolhido e cria um arquivo csv com os parâmetros da geração.",
"Open images output directory": "Abre o diretório de saída de imagens.",
"How much to blur the mask before processing, in pixels.": "Transição do contorno da máscara, em pixels.",
- "What to put inside the masked area before processing it with Stable Diffusion.": "O que vai dentro da máscara antes de processar.",
+ "What to put inside the masked area before processing it with Stable Diffusion.": "O que vai dentro da máscara antes de processá-la com Stable Diffusion.",
"fill it with colors of the image": "Preenche usando as cores da imagem.",
"keep whatever was there originally": "manter usando o que estava lá originalmente",
"fill it with latent space noise": "Preenche com ruídos do espaço latente.",
@@ -463,6 +478,8 @@
"Restore low quality faces using GFPGAN neural network": "Restaurar rostos de baixa qualidade usando a rede neural GFPGAN",
"This regular expression will be used extract words from filename, and they will be joined using the option below into label text used for training. Leave empty to keep filename text as it is.": "Esta expressão regular vai retirar palavras do nome do arquivo e serão juntadas via regex usando a opção abaixo em etiquetas usadas em treinamento. Não mexer para manter os nomes como estão.",
"This string will be used to join split words into a single line if the option above is enabled.": "Esta string será usada para unir palavras divididas em uma única linha se a opção acima estiver habilitada.",
+ "Only applies to inpainting models. Determines how strongly to mask off the original image for inpainting and img2img. 1.0 means fully masked, which is the default behaviour. 0.0 means a fully unmasked conditioning. Lower values will help preserve the overall composition of the image, but will struggle with large changes.": "Aplicável somente para modelos de inpaint. Determina quanto deve mascarar da imagem original para inpaint e img2img. 1.0 significa totalmente mascarado, que é o comportamento padrão. 0.0 significa uma condição totalmente não mascarada. Valores baixos ajudam a preservar a composição geral da imagem, mas vai encontrar dificuldades com grandes mudanças.",
"List of setting names, separated by commas, for settings that should go to the quick access bar at the top, rather than the usual setting tab. See modules/shared.py for setting names. Requires restarting to apply.": "Lista de nomes de configurações, separados por vírgulas, para configurações que devem ir para a barra de acesso rápido na parte superior, em vez da guia de configuração usual. Veja modules/shared.py para nomes de configuração. Necessita reinicialização para aplicar.",
"If this values is non-zero, it will be added to seed and used to initialize RNG for noises when using samplers with Eta. You can use this to produce even more variation of images, or you can use this to match images of other software if you know what you are doing.": "Se este valor for diferente de zero, ele será adicionado à seed e usado para inicializar o RNG para ruídos ao usar amostragens com Tempo Estimado. Você pode usar isso para produzir ainda mais variações de imagens ou pode usar isso para combinar imagens de outro software se souber o que está fazendo."
+ "Leave empty for auto": "Deixar desmarcado para automático"
}
diff --git a/modules/api/api.py b/modules/api/api.py
index bb87d795..71c9c160 100644
--- a/modules/api/api.py
+++ b/modules/api/api.py
@@ -5,10 +5,9 @@ import uvicorn
from gradio.processing_utils import decode_base64_to_file, decode_base64_to_image
from fastapi import APIRouter, Depends, HTTPException
import modules.shared as shared
-from modules import devices
from modules.api.models import *
from modules.processing import StableDiffusionProcessingTxt2Img, StableDiffusionProcessingImg2Img, process_images
-from modules.sd_samplers import all_samplers
+from modules.sd_samplers import all_samplers, sample_to_image, samples_to_image_grid
from modules.extras import run_extras, run_pnginfo
@@ -179,6 +178,8 @@ class Api:
progress = min(progress, 1)
+ shared.state.set_current_image()
+
current_image = None
if shared.state.current_image and not req.skip_current_image:
current_image = encode_pil_to_base64(shared.state.current_image)
diff --git a/modules/esrgan_model.py b/modules/esrgan_model.py
index a13cf6ac..c61669b4 100644
--- a/modules/esrgan_model.py
+++ b/modules/esrgan_model.py
@@ -50,6 +50,7 @@ def mod2normal(state_dict):
def resrgan2normal(state_dict, nb=23):
# this code is copied from https://github.com/victorca25/iNNfer
if "conv_first.weight" in state_dict and "body.0.rdb1.conv1.weight" in state_dict:
+ re8x = 0
crt_net = {}
items = []
for k, v in state_dict.items():
@@ -75,10 +76,18 @@ def resrgan2normal(state_dict, nb=23):
crt_net['model.3.bias'] = state_dict['conv_up1.bias']
crt_net['model.6.weight'] = state_dict['conv_up2.weight']
crt_net['model.6.bias'] = state_dict['conv_up2.bias']
- crt_net['model.8.weight'] = state_dict['conv_hr.weight']
- crt_net['model.8.bias'] = state_dict['conv_hr.bias']
- crt_net['model.10.weight'] = state_dict['conv_last.weight']
- crt_net['model.10.bias'] = state_dict['conv_last.bias']
+
+ if 'conv_up3.weight' in state_dict:
+ # modification supporting: https://github.com/ai-forever/Real-ESRGAN/blob/main/RealESRGAN/rrdbnet_arch.py
+ re8x = 3
+ crt_net['model.9.weight'] = state_dict['conv_up3.weight']
+ crt_net['model.9.bias'] = state_dict['conv_up3.bias']
+
+ crt_net[f'model.{8+re8x}.weight'] = state_dict['conv_hr.weight']
+ crt_net[f'model.{8+re8x}.bias'] = state_dict['conv_hr.bias']
+ crt_net[f'model.{10+re8x}.weight'] = state_dict['conv_last.weight']
+ crt_net[f'model.{10+re8x}.bias'] = state_dict['conv_last.bias']
+
state_dict = crt_net
return state_dict
diff --git a/modules/img2img.py b/modules/img2img.py
index 35c5df9b..be9f3653 100644
--- a/modules/img2img.py
+++ b/modules/img2img.py
@@ -81,7 +81,8 @@ def img2img(mode: int, prompt: str, negative_prompt: str, prompt_style: str, pro
mask = None
# Use the EXIF orientation of photos taken by smartphones.
- image = ImageOps.exif_transpose(image)
+ if image is not None:
+ image = ImageOps.exif_transpose(image)
assert 0. <= denoising_strength <= 1., 'can only work with strength in [0.0, 1.0]'
@@ -137,6 +138,8 @@ def img2img(mode: int, prompt: str, negative_prompt: str, prompt_style: str, pro
if processed is None:
processed = process_images(p)
+ p.close()
+
shared.total_tqdm.clear()
generation_info_js = processed.js()
diff --git a/modules/modelloader.py b/modules/modelloader.py
index b0f2f33d..e4a6f8ac 100644
--- a/modules/modelloader.py
+++ b/modules/modelloader.py
@@ -85,6 +85,9 @@ def cleanup_models():
src_path = os.path.join(root_path, "ESRGAN")
dest_path = os.path.join(models_path, "ESRGAN")
move_files(src_path, dest_path)
+ src_path = os.path.join(models_path, "BSRGAN")
+ dest_path = os.path.join(models_path, "ESRGAN")
+ move_files(src_path, dest_path, ".pth")
src_path = os.path.join(root_path, "gfpgan")
dest_path = os.path.join(models_path, "GFPGAN")
move_files(src_path, dest_path)
diff --git a/modules/processing.py b/modules/processing.py
index 57d3a523..3a364b5f 100644
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -199,9 +199,13 @@ class StableDiffusionProcessing():
def init(self, all_prompts, all_seeds, all_subseeds):
pass
- def sample(self, conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength):
+ def sample(self, conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength, prompts):
raise NotImplementedError()
+ def close(self):
+ self.sd_model = None
+ self.sampler = None
+
class Processed:
def __init__(self, p: StableDiffusionProcessing, images_list, seed=-1, info="", subseed=None, all_prompts=None, all_seeds=None, all_subseeds=None, index_of_first_image=0, infotexts=None):
@@ -517,7 +521,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
shared.state.job = f"Batch {n+1} out of {p.n_iter}"
with devices.autocast():
- samples_ddim = p.sample(conditioning=c, unconditional_conditioning=uc, seeds=seeds, subseeds=subseeds, subseed_strength=p.subseed_strength)
+ samples_ddim = p.sample(conditioning=c, unconditional_conditioning=uc, seeds=seeds, subseeds=subseeds, subseed_strength=p.subseed_strength, prompts=prompts)
samples_ddim = samples_ddim.to(devices.dtype_vae)
x_samples_ddim = decode_first_stage(p.sd_model, samples_ddim)
@@ -597,9 +601,6 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
if p.scripts is not None:
p.scripts.postprocess(p, res)
- p.sd_model = None
- p.sampler = None
-
return res
@@ -648,7 +649,7 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
self.truncate_x = int(self.firstphase_width - firstphase_width_truncated) // opt_f
self.truncate_y = int(self.firstphase_height - firstphase_height_truncated) // opt_f
- def sample(self, conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength):
+ def sample(self, conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength, prompts):
self.sampler = sd_samplers.create_sampler_with_index(sd_samplers.samplers, self.sampler_index, self.sd_model)
if not self.enable_hr:
@@ -661,9 +662,21 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
samples = samples[:, :, self.truncate_y//2:samples.shape[2]-self.truncate_y//2, self.truncate_x//2:samples.shape[3]-self.truncate_x//2]
+ """saves image before applying hires fix, if enabled in options; takes as an arguyment either an image or batch with latent space images"""
+ def save_intermediate(image, index):
+ if not opts.save or self.do_not_save_samples or not opts.save_images_before_highres_fix:
+ return
+
+ if not isinstance(image, Image.Image):
+ image = sd_samplers.sample_to_image(image, index)
+
+ images.save_image(image, self.outpath_samples, "", seeds[index], prompts[index], opts.samples_format, suffix="-before-highres-fix")
+
if opts.use_scale_latent_for_hires_fix:
samples = torch.nn.functional.interpolate(samples, size=(self.height // opt_f, self.width // opt_f), mode="bilinear")
+ for i in range(samples.shape[0]):
+ save_intermediate(samples, i)
else:
decoded_samples = decode_first_stage(self.sd_model, samples)
lowres_samples = torch.clamp((decoded_samples + 1.0) / 2.0, min=0.0, max=1.0)
@@ -673,6 +686,9 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
x_sample = 255. * np.moveaxis(x_sample.cpu().numpy(), 0, 2)
x_sample = x_sample.astype(np.uint8)
image = Image.fromarray(x_sample)
+
+ save_intermediate(image, i)
+
image = images.resize_image(0, image, self.width, self.height)
image = np.array(image).astype(np.float32) / 255.0
image = np.moveaxis(image, 2, 0)
@@ -830,8 +846,7 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing):
self.image_conditioning = self.img2img_image_conditioning(image, self.init_latent, self.image_mask)
-
- def sample(self, conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength):
+ def sample(self, conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength, prompts):
x = create_random_tensors([opt_C, self.height // opt_f, self.width // opt_f], seeds=seeds, subseeds=subseeds, subseed_strength=self.subseed_strength, seed_resize_from_h=self.seed_resize_from_h, seed_resize_from_w=self.seed_resize_from_w, p=self)
samples = self.sampler.sample_img2img(self, self.init_latent, x, conditioning, unconditional_conditioning, image_conditioning=self.image_conditioning)
@@ -842,4 +857,4 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing):
del x
devices.torch_gc()
- return samples \ No newline at end of file
+ return samples
diff --git a/modules/script_callbacks.py b/modules/script_callbacks.py
index ce264690..c28e220e 100644
--- a/modules/script_callbacks.py
+++ b/modules/script_callbacks.py
@@ -2,6 +2,7 @@ import sys
import traceback
from collections import namedtuple
import inspect
+from typing import Optional
from fastapi import FastAPI
from gradio import Blocks
@@ -26,6 +27,24 @@ class ImageSaveParams:
"""dictionary with parameters for image's PNG info data; infotext will have the key 'parameters'"""
+class CFGDenoiserParams:
+ def __init__(self, x, image_cond, sigma, sampling_step, total_sampling_steps):
+ self.x = x
+ """Latent image representation in the process of being denoised"""
+
+ self.image_cond = image_cond
+ """Conditioning image"""
+
+ self.sigma = sigma
+ """Current sigma noise step value"""
+
+ self.sampling_step = sampling_step
+ """Current Sampling step number"""
+
+ self.total_sampling_steps = total_sampling_steps
+ """Total number of sampling steps planned"""
+
+
ScriptCallback = namedtuple("ScriptCallback", ["script", "callback"])
callbacks_app_started = []
callbacks_model_loaded = []
@@ -33,6 +52,7 @@ callbacks_ui_tabs = []
callbacks_ui_settings = []
callbacks_before_image_saved = []
callbacks_image_saved = []
+callbacks_cfg_denoiser = []
def clear_callbacks():
@@ -41,9 +61,9 @@ def clear_callbacks():
callbacks_ui_settings.clear()
callbacks_before_image_saved.clear()
callbacks_image_saved.clear()
+ callbacks_cfg_denoiser.clear()
-
-def app_started_callback(demo: Blocks, app: FastAPI):
+def app_started_callback(demo: Optional[Blocks], app: FastAPI):
for c in callbacks_app_started:
try:
c.callback(demo, app)
@@ -95,6 +115,14 @@ def image_saved_callback(params: ImageSaveParams):
report_exception(c, 'image_saved_callback')
+def cfg_denoiser_callback(params: CFGDenoiserParams):
+ for c in callbacks_cfg_denoiser:
+ try:
+ c.callback(params)
+ except Exception:
+ report_exception(c, 'cfg_denoiser_callback')
+
+
def add_callback(callbacks, fun):
stack = [x for x in inspect.stack() if x.filename != __file__]
filename = stack[0].filename if len(stack) > 0 else 'unknown file'
@@ -147,3 +175,12 @@ def on_image_saved(callback):
- params: ImageSaveParams - parameters the image was saved with. Changing fields in this object does nothing.
"""
add_callback(callbacks_image_saved, callback)
+
+
+def on_cfg_denoiser(callback):
+ """register a function to be called in the kdiffussion cfg_denoiser method after building the inner model inputs.
+ The callback is called with one argument:
+ - params: CFGDenoiserParams - parameters to be passed to the inner model and sampling state details.
+ """
+ add_callback(callbacks_cfg_denoiser, callback)
+
diff --git a/modules/sd_samplers.py b/modules/sd_samplers.py
index 8772db56..c7c414ef 100644
--- a/modules/sd_samplers.py
+++ b/modules/sd_samplers.py
@@ -12,6 +12,7 @@ from modules import prompt_parser, devices, processing, images
from modules.shared import opts, cmd_opts, state
import modules.shared as shared
+from modules.script_callbacks import CFGDenoiserParams, cfg_denoiser_callback
SamplerData = namedtuple('SamplerData', ['name', 'constructor', 'aliases', 'options'])
@@ -92,8 +93,8 @@ def single_sample_to_image(sample):
return Image.fromarray(x_sample)
-def sample_to_image(samples):
- return single_sample_to_image(samples[0])
+def sample_to_image(samples, index=0):
+ return single_sample_to_image(samples[index])
def samples_to_image_grid(samples):
@@ -280,6 +281,12 @@ class CFGDenoiser(torch.nn.Module):
image_cond_in = torch.cat([torch.stack([image_cond[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [image_cond])
sigma_in = torch.cat([torch.stack([sigma[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [sigma])
+ denoiser_params = CFGDenoiserParams(x_in, image_cond_in, sigma_in, state.sampling_step, state.sampling_steps)
+ cfg_denoiser_callback(denoiser_params)
+ x_in = denoiser_params.x
+ image_cond_in = denoiser_params.image_cond
+ sigma_in = denoiser_params.sigma
+
if tensor.shape[1] == uncond.shape[1]:
cond_in = torch.cat([tensor, uncond])
diff --git a/modules/shared.py b/modules/shared.py
index cbef5c43..d8e99f85 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -4,6 +4,7 @@ import json
import os
import sys
from collections import OrderedDict
+import time
import gradio as gr
import tqdm
@@ -135,6 +136,7 @@ class State:
current_image = None
current_image_sampling_step = 0
textinfo = None
+ time_start = None
need_restart = False
def skip(self):
@@ -172,6 +174,7 @@ class State:
self.skipped = False
self.interrupted = False
self.textinfo = None
+ self.time_start = time.time()
devices.torch_gc()
@@ -181,6 +184,20 @@ class State:
devices.torch_gc()
+ """sets self.current_image from self.current_latent if enough sampling steps have been made after the last call to this"""
+ def set_current_image(self):
+ if not parallel_processing_allowed:
+ return
+
+ if self.sampling_step - self.current_image_sampling_step >= opts.show_progress_every_n_steps and self.current_latent is not None:
+ if opts.show_progress_grid:
+ self.current_image = sd_samplers.samples_to_image_grid(self.current_latent)
+ else:
+ self.current_image = sd_samplers.sample_to_image(self.current_latent)
+
+ self.current_image_sampling_step = self.sampling_step
+
+
state = State()
artist_db = modules.artists.ArtistsDatabase(os.path.join(script_path, 'artists.csv'))
@@ -238,6 +255,8 @@ options_templates.update(options_section(('saving-images', "Saving images/grids"
"enable_pnginfo": OptionInfo(True, "Save text information about generation parameters as chunks to png files"),
"save_txt": OptionInfo(False, "Create a text file next to every image with generation parameters."),
"save_images_before_face_restoration": OptionInfo(False, "Save a copy of image before doing face restoration."),
+ "save_images_before_highres_fix": OptionInfo(False, "Save a copy of image before applying highres fix."),
+ "save_images_before_color_correction": OptionInfo(False, "Save a copy of image before applying color correction to img2img results"),
"jpeg_quality": OptionInfo(80, "Quality for saved jpeg images", gr.Slider, {"minimum": 1, "maximum": 100, "step": 1}),
"export_for_4chan": OptionInfo(True, "If PNG image is larger than 4MB or any dimension is larger than 4000, downscale and save copy as JPG"),
@@ -305,7 +324,6 @@ options_templates.update(options_section(('sd', "Stable Diffusion"), {
"sd_hypernetwork_strength": OptionInfo(1.0, "Hypernetwork strength", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.001}),
"inpainting_mask_weight": OptionInfo(1.0, "Inpainting conditioning mask strength", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}),
"img2img_color_correction": OptionInfo(False, "Apply color correction to img2img results to match original colors."),
- "save_images_before_color_correction": OptionInfo(False, "Save a copy of image before applying color correction to img2img results"),
"img2img_fix_steps": OptionInfo(False, "With img2img, do exactly the amount of steps the slider specifies (normally you'd do less with less denoising)."),
"enable_quantization": OptionInfo(False, "Enable quantization in K samplers for sharper and cleaner results. This may change existing seeds. Requires restart to apply."),
"enable_emphasis": OptionInfo(True, "Emphasis: use (text) to make model pay more attention to text and [text] to make it pay less attention"),
diff --git a/modules/txt2img.py b/modules/txt2img.py
index c9d5a090..8e4e8677 100644
--- a/modules/txt2img.py
+++ b/modules/txt2img.py
@@ -47,6 +47,8 @@ def txt2img(prompt: str, negative_prompt: str, prompt_style: str, prompt_style2:
if processed is None:
processed = process_images(p)
+ p.close()
+
shared.total_tqdm.clear()
generation_info_js = processed.js()
diff --git a/modules/ui.py b/modules/ui.py
index 2c15abb7..2609857e 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -277,15 +277,7 @@ def check_progress_call(id_part):
preview_visibility = gr_show(False)
if opts.show_progress_every_n_steps > 0:
- if shared.parallel_processing_allowed:
-
- if shared.state.sampling_step - shared.state.current_image_sampling_step >= opts.show_progress_every_n_steps and shared.state.current_latent is not None:
- if opts.show_progress_grid:
- shared.state.current_image = modules.sd_samplers.samples_to_image_grid(shared.state.current_latent)
- else:
- shared.state.current_image = modules.sd_samplers.sample_to_image(shared.state.current_latent)
- shared.state.current_image_sampling_step = shared.state.sampling_step
-
+ shared.state.set_current_image()
image = shared.state.current_image
if image is None:
@@ -671,6 +663,8 @@ def create_ui(wrap_gradio_gpu_call):
import modules.img2img
import modules.txt2img
+ reload_javascript()
+
parameters_copypaste.reset()
with gr.Blocks(analytics_enabled=False) as txt2img_interface:
@@ -1060,7 +1054,7 @@ def create_ui(wrap_gradio_gpu_call):
with gr.Tabs(elem_id="extras_resize_mode"):
with gr.TabItem('Scale by'):
- upscaling_resize = gr.Slider(minimum=1.0, maximum=4.0, step=0.05, label="Resize", value=2)
+ upscaling_resize = gr.Slider(minimum=1.0, maximum=8.0, step=0.05, label="Resize", value=4)
with gr.TabItem('Scale to'):
with gr.Group():
with gr.Row():
@@ -1570,8 +1564,7 @@ def create_ui(wrap_gradio_gpu_call):
reload_script_bodies.click(
fn=reload_scripts,
inputs=[],
- outputs=[],
- _js='function(){}'
+ outputs=[]
)
def request_restart():
@@ -1583,7 +1576,7 @@ def create_ui(wrap_gradio_gpu_call):
fn=request_restart,
inputs=[],
outputs=[],
- _js='function(){restart_reload()}'
+ _js='restart_reload'
)
if column is not None:
@@ -1782,4 +1775,3 @@ def load_javascript(raw_response):
reload_javascript = partial(load_javascript, gradio.routes.templates.TemplateResponse)
-reload_javascript()
diff --git a/modules/upscaler.py b/modules/upscaler.py
index 6ab2fb40..83fde7ca 100644
--- a/modules/upscaler.py
+++ b/modules/upscaler.py
@@ -10,6 +10,7 @@ import modules.shared
from modules import modelloader, shared
LANCZOS = (Image.Resampling.LANCZOS if hasattr(Image, 'Resampling') else Image.LANCZOS)
+NEAREST = (Image.Resampling.NEAREST if hasattr(Image, 'Resampling') else Image.NEAREST)
from modules.paths import models_path
@@ -57,7 +58,7 @@ class Upscaler:
dest_w = img.width * scale
dest_h = img.height * scale
for i in range(3):
- if img.width >= dest_w and img.height >= dest_h:
+ if img.width > dest_w and img.height > dest_h:
break
img = self.do_upscale(img, selected_model)
if img.width != dest_w or img.height != dest_h:
@@ -120,3 +121,17 @@ class UpscalerLanczos(Upscaler):
self.name = "Lanczos"
self.scalers = [UpscalerData("Lanczos", None, self)]
+
+class UpscalerNearest(Upscaler):
+ scalers = []
+
+ def do_upscale(self, img, selected_model=None):
+ return img.resize((int(img.width * self.scale), int(img.height * self.scale)), resample=NEAREST)
+
+ def load_model(self, _):
+ pass
+
+ def __init__(self, dirname=None):
+ super().__init__(False)
+ self.name = "Nearest"
+ self.scalers = [UpscalerData("Nearest", None, self)] \ No newline at end of file
diff --git a/requirements.txt b/requirements.txt
index a53522f3..79e8b7c6 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -4,7 +4,7 @@ fairscale==0.4.4
fonts
font-roboto
gfpgan
-gradio==3.5
+gradio==3.8
invisible-watermark
numpy
omegaconf
diff --git a/requirements_versions.txt b/requirements_versions.txt
index 41f2501f..7bd16712 100644
--- a/requirements_versions.txt
+++ b/requirements_versions.txt
@@ -2,7 +2,7 @@ transformers==4.19.2
diffusers==0.3.0
basicsr==1.4.2
gfpgan==1.3.8
-gradio==3.5
+gradio==3.8
numpy==1.23.3
Pillow==9.2.0
realesrgan==0.3.0
diff --git a/scripts/img2imgalt.py b/scripts/img2imgalt.py
index 88abc093..964b75c7 100644
--- a/scripts/img2imgalt.py
+++ b/scripts/img2imgalt.py
@@ -166,8 +166,7 @@ class Script(scripts.Script):
if override_strength:
p.denoising_strength = 1.0
-
- def sample_extra(conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength):
+ def sample_extra(conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength, prompts):
lat = (p.init_latent.cpu().numpy() * 10).astype(int)
same_params = self.cache is not None and self.cache.cfg_scale == cfg and self.cache.steps == st \
diff --git a/scripts/prompts_from_file.py b/scripts/prompts_from_file.py
index 1be22960..d187cd9c 100644
--- a/scripts/prompts_from_file.py
+++ b/scripts/prompts_from_file.py
@@ -96,6 +96,7 @@ class Script(scripts.Script):
def ui(self, is_img2img):
checkbox_iterate = gr.Checkbox(label="Iterate seed every line", value=False)
+ checkbox_iterate_batch = gr.Checkbox(label="Use same random seed for all lines", value=False)
prompt_txt = gr.Textbox(label="List of prompt inputs", lines=1)
file = gr.File(label="Upload prompt inputs", type='bytes')
@@ -106,9 +107,9 @@ class Script(scripts.Script):
# We don't shrink back to 1, because that causes the control to ignore [enter], and it may
# be unclear to the user that shift-enter is needed.
prompt_txt.change(lambda tb: gr.update(lines=7) if ("\n" in tb) else gr.update(lines=2), inputs=[prompt_txt], outputs=[prompt_txt])
- return [checkbox_iterate, file, prompt_txt]
+ return [checkbox_iterate, checkbox_iterate_batch, file, prompt_txt]
- def run(self, p, checkbox_iterate, file, prompt_txt: str):
+ def run(self, p, checkbox_iterate, checkbox_iterate_batch, file, prompt_txt: str):
lines = [x.strip() for x in prompt_txt.splitlines()]
lines = [x for x in lines if len(x) > 0]
@@ -137,7 +138,7 @@ class Script(scripts.Script):
jobs.append(args)
print(f"Will process {len(lines)} lines in {job_count} jobs.")
- if (checkbox_iterate and p.seed == -1):
+ if (checkbox_iterate or checkbox_iterate_batch) and p.seed == -1:
p.seed = int(random.randrange(4294967294))
state.job_count = job_count
@@ -153,7 +154,7 @@ class Script(scripts.Script):
proc = process_images(copy_p)
images += proc.images
- if (checkbox_iterate):
+ if checkbox_iterate:
p.seed = p.seed + (p.batch_size * p.n_iter)
diff --git a/style.css b/style.css
index 36c5e2d8..a0382a8c 100644
--- a/style.css
+++ b/style.css
@@ -260,6 +260,16 @@ input[type="range"]{
#txt2img_negative_prompt, #img2img_negative_prompt{
}
+/* gradio 3.8 adds opacity to progressbar which makes it blink; disable it here */
+.transition.opacity-20 {
+ opacity: 1 !important;
+}
+
+/* more gradio's garbage cleanup */
+.min-h-\[4rem\] {
+ min-height: unset !important;
+}
+
#txt2img_progressbar, #img2img_progressbar, #ti_progressbar{
position: absolute;
z-index: 1000;
diff --git a/webui.py b/webui.py
index 034777a2..3b21c071 100644
--- a/webui.py
+++ b/webui.py
@@ -117,6 +117,8 @@ def api_only():
app.add_middleware(GZipMiddleware, minimum_size=1000)
api = create_api(app)
+ modules.script_callbacks.app_started_callback(None, app)
+
api.launch(server_name="0.0.0.0" if cmd_opts.listen else "127.0.0.1", port=cmd_opts.port if cmd_opts.port else 7861)