aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAUTOMATIC1111 <16777216c@gmail.com>2022-11-04 10:54:17 +0300
committerGitHub <noreply@github.com>2022-11-04 10:54:17 +0300
commit24fc05cf576c80976773d09b341b9c9b2274a492 (patch)
tree89829bec0144ac0da7c0ae340cd8435427efb01e
parenta613fbc05e037622e1f20667d428be3f72894f16 (diff)
parent352b33106a64a8c34d1f5d79fcb377a73b02e39c (diff)
Merge branch 'master' into fix-ckpt-cache
-rw-r--r--README.md7
-rw-r--r--javascript/edit-attention.js3
-rw-r--r--localizations/it_IT.json28
-rw-r--r--localizations/zh_CN.json416
-rw-r--r--localizations/zh_TW.json282
-rw-r--r--modules/api/api.py85
-rw-r--r--modules/api/models.py71
-rw-r--r--modules/hypernetworks/hypernetwork.py36
-rw-r--r--modules/masking.py2
-rw-r--r--modules/processing.py35
-rw-r--r--modules/scripts.py34
-rw-r--r--modules/sd_models.py5
-rw-r--r--modules/shared.py14
-rw-r--r--modules/ui.py37
-rw-r--r--scripts/custom_code.py2
-rw-r--r--scripts/outpainting_mk_2.py2
-rw-r--r--scripts/poor_mans_outpainting.py4
-rw-r--r--scripts/prompts_from_file.py10
-rw-r--r--scripts/sd_upscale.py4
-rw-r--r--scripts/xy_grid.py8
-rw-r--r--test/utils_test.py63
-rw-r--r--webui.py6
22 files changed, 820 insertions, 334 deletions
diff --git a/README.md b/README.md
index 55c050d5..33508f31 100644
--- a/README.md
+++ b/README.md
@@ -155,14 +155,15 @@ The documentation was moved from this README over to the project's [wiki](https:
- Swin2SR - https://github.com/mv-lab/swin2sr
- LDSR - https://github.com/Hafiidz/latent-diffusion
- Ideas for optimizations - https://github.com/basujindal/stable-diffusion
-- Doggettx - Cross Attention layer optimization - https://github.com/Doggettx/stable-diffusion, original idea for prompt editing.
-- InvokeAI, lstein - Cross Attention layer optimization - https://github.com/invoke-ai/InvokeAI (originally http://github.com/lstein/stable-diffusion)
-- Rinon Gal - Textual Inversion - https://github.com/rinongal/textual_inversion (we're not using his code, but we are using his ideas).
+- Cross Attention layer optimization - Doggettx - https://github.com/Doggettx/stable-diffusion, original idea for prompt editing.
+- Cross Attention layer optimization - InvokeAI, lstein - https://github.com/invoke-ai/InvokeAI (originally http://github.com/lstein/stable-diffusion)
+- Textual Inversion - Rinon Gal - https://github.com/rinongal/textual_inversion (we're not using his code, but we are using his ideas).
- Idea for SD upscale - https://github.com/jquesnelle/txt2imghd
- Noise generation for outpainting mk2 - https://github.com/parlance-zz/g-diffuser-bot
- CLIP interrogator idea and borrowing some code - https://github.com/pharmapsychotic/clip-interrogator
- Idea for Composable Diffusion - https://github.com/energy-based-model/Compositional-Visual-Generation-with-Composable-Diffusion-Models-PyTorch
- xformers - https://github.com/facebookresearch/xformers
- DeepDanbooru - interrogator for anime diffusers https://github.com/KichangKim/DeepDanbooru
+- Security advice - RyotaK
- Initial Gradio script - posted on 4chan by an Anonymous user. Thank you Anonymous user.
- (You)
diff --git a/javascript/edit-attention.js b/javascript/edit-attention.js
index c0d29a74..b947cbec 100644
--- a/javascript/edit-attention.js
+++ b/javascript/edit-attention.js
@@ -1,7 +1,6 @@
addEventListener('keydown', (event) => {
let target = event.originalTarget || event.composedPath()[0];
- if (!target.hasAttribute("placeholder")) return;
- if (!target.placeholder.toLowerCase().includes("prompt")) return;
+ if (!target.matches("#toprow textarea.gr-text-input[placeholder]")) return;
if (! (event.metaKey || event.ctrlKey)) return;
diff --git a/localizations/it_IT.json b/localizations/it_IT.json
index 49489f40..83d0ccce 100644
--- a/localizations/it_IT.json
+++ b/localizations/it_IT.json
@@ -104,6 +104,7 @@
"Seed travel": "Interpolazione semi",
"Shift attention": "Sposta l'attenzione",
"Text to Vector Graphics": "Da testo a grafica vettoriale",
+ "Unprompted": "Unprompted",
"X/Y plot": "Grafico X/Y",
"X/Y/Z plot": "Grafico X/Y/Z",
"Dynamic Prompting v0.13.6": "Prompt dinamici v0.13.6",
@@ -259,6 +260,7 @@
"Save results as video": "Salva i risultati come video",
"Frames per second": "Fotogrammi al secondo",
"Iterate seed every line": "Iterare il seme per ogni riga",
+ "Use same random seed for all lines": "Usa lo stesso seme casuale per tutte le righe",
"List of prompt inputs": "Elenco di prompt di input",
"Upload prompt inputs": "Carica un file contenente i prompt di input",
"n": "Esegui n volte",
@@ -294,6 +296,13 @@
"Transparent PNG": "PNG trasparente",
"Noise Tolerance": "Tolleranza al rumore",
"Quantize": "Quantizzare",
+ "Dry Run": "Esecuzione a vuoto (Debug)",
+ "NEW!": "NUOVO!",
+ "Premium Fantasy Card Template": "Premium Fantasy Card Template",
+ "is now available.": "è ora disponibile.",
+ "Generate a wide variety of creatures and characters in the style of a fantasy card game. Perfect for heroes, animals, monsters, and even crazy hybrids.": "Genera un'ampia varietà di creature e personaggi nello stile di un gioco di carte fantasy. Perfetto per eroi, animali, mostri e persino ibridi incredibili.",
+ "Learn More ➜": "Per saperne di più ➜",
+ "Purchases help fund the continued development of Unprompted. Thank you for your support!": "Gli acquisti aiutano a finanziare il continuo sviluppo di Unprompted. Grazie per il vostro sostegno!",
"X type": "Parametro asse X",
"Nothing": "Niente",
"Var. seed": "Seme della variazione",
@@ -424,6 +433,7 @@
"Sigma adjustment for finding noise for image": "Regolazione Sigma per trovare il rumore per l'immagine",
"Tile size": "Dimensione piastrella",
"Tile overlap": "Sovrapposizione piastrella",
+ "New seed for each tile": "Nuovo seme per ogni piastrella",
"alternate img2img imgage": "Immagine alternativa per img2img",
"interpolation values": "Valori di interpolazione",
"Refinement loops": "Cicli di affinamento",
@@ -455,8 +465,9 @@
"Will upscale the image to twice the dimensions; use width and height sliders to set tile size": "Aumenterà l'immagine al doppio delle dimensioni; utilizzare i cursori di larghezza e altezza per impostare la dimensione della piastrella",
"Upscaler": "Ampliamento immagine",
"Lanczos": "Lanczos",
+ "Nearest": "Nearest",
"LDSR": "LDSR",
- "ESRGAN_4x": "ESRGAN_4x",
+ "BSRGAN": "BSRGAN",
"ScuNET GAN": "ScuNET GAN",
"ScuNET PSNR": "ScuNET PSNR",
"SwinIR 4x": "SwinIR 4x",
@@ -808,6 +819,7 @@
"image_path": "Percorso immagine",
"mp4_path": "Percorso MP4",
"Click here after the generation to show the video": "Clicca qui dopo la generazione per mostrare il video",
+ "NOTE: If the 'Generate' button doesn't work, go in Settings and click 'Restart Gradio and Refresh...'.": "NOTA: se il pulsante 'Genera' non funziona, vai in Impostazioni e fai clic su 'Riavvia Gradio e Aggiorna...'.",
"Save Settings": "Salva le impostazioni",
"Load Settings": "Carica le impostazioni",
"Path relative to the webui folder." : "Percorso relativo alla cartella webui.",
@@ -922,8 +934,8 @@
"Renew Page": "Aggiorna la pagina",
"Number": "Numero",
"set_index": "Imposta indice",
- "load_switch": "load_switch",
- "turn_page_switch": "turn_page_switch",
+ "load_switch": "Carica",
+ "turn_page_switch": "Volta pagina",
"Checkbox": "Casella di controllo",
"Checkbox Group": "Seleziona immagini per",
"artists": "Artisti",
@@ -956,6 +968,8 @@
"Save text information about generation parameters as chunks to png files": "Salva le informazioni di testo dei parametri di generazione come blocchi nel file png",
"Create a text file next to every image with generation parameters.": "Crea un file di testo assieme a ogni immagine con i parametri di generazione.",
"Save a copy of image before doing face restoration.": "Salva una copia dell'immagine prima di eseguire il restauro dei volti.",
+ "Save a copy of image before applying highres fix.": "Salva una copia dell'immagine prima di applicare la correzione ad alta risoluzione.",
+ "Save a copy of image before applying color correction to img2img results": "Salva una copia dell'immagine prima di applicare la correzione del colore ai risultati di img2img",
"Quality for saved jpeg images": "Qualità delle immagini salvate in formato JPEG",
"If PNG image is larger than 4MB or any dimension is larger than 4000, downscale and save copy as JPG": "Se l'immagine PNG è più grande di 4 MB o qualsiasi dimensione è maggiore di 4000, ridimensiona e salva la copia come JPG",
"Use original name for output filename during batch process in extras tab": "Usa il nome originale per il nome del file di output durante l'elaborazione a lotti nella scheda 'Extra'",
@@ -997,12 +1011,14 @@
"Filename join string": "Stringa per unire le parole estratte dal nome del file",
"Number of repeats for a single input image per epoch; used only for displaying epoch number": "Numero di ripetizioni per una singola immagine di input per epoca; utilizzato solo per visualizzare il numero di epoca",
"Save an csv containing the loss to log directory every N steps, 0 to disable": "Salva un file CSV contenente la perdita nella cartella di registrazione ogni N passaggi, 0 per disabilitare",
+ "Use cross attention optimizations while training": "Usa le ottimizzazioni di controllo dell'attenzione incrociato durante l'allenamento",
"Stable Diffusion": "Stable Diffusion",
"Checkpoints to cache in RAM": "Checkpoint da memorizzare nella RAM",
+ "SD VAE": "SD VAE",
+ "auto": "auto",
"Hypernetwork strength": "Forza della Iperrete",
"Inpainting conditioning mask strength": "Forza della maschera di condizionamento del Inpainting",
"Apply color correction to img2img results to match original colors.": "Applica la correzione del colore ai risultati di img2img in modo che corrispondano ai colori originali.",
- "Save a copy of image before applying color correction to img2img results": "Salva una copia dell'immagine prima di applicare la correzione del colore ai risultati di img2img",
"With img2img, do exactly the amount of steps the slider specifies (normally you'd do less with less denoising).": "Con img2img, esegue esattamente la quantità di passi specificata dalla barra di scorrimento (normalmente se ne effettuano di meno con meno riduzione del rumore).",
"Enable quantization in K samplers for sharper and cleaner results. This may change existing seeds. Requires restart to apply.": "Abilita la quantizzazione nei campionatori K per risultati più nitidi e puliti. Questo può cambiare i semi esistenti. Richiede il riavvio per applicare la modifica.",
"Emphasis: use (text) to make model pay more attention to text and [text] to make it pay less attention": "Enfasi: utilizzare (testo) per fare in modo che il modello presti maggiore attenzione al testo e [testo] per fargli prestare meno attenzione",
@@ -1194,8 +1210,8 @@
"Hue:0": "Hue:0",
"S:0": "S:0",
"L:0": "L:0",
- "Load Canvas": "Carica Tela",
- "saveCanvas": "Salva Tela",
+ "Load Canvas": "Carica Canvas",
+ "Save Canvas": "Salva Canvas",
"latest": "aggiornato",
"behind": "da aggiornare",
"Description": "Descrizione",
diff --git a/localizations/zh_CN.json b/localizations/zh_CN.json
index e453f5e3..56c8980e 100644
--- a/localizations/zh_CN.json
+++ b/localizations/zh_CN.json
@@ -7,19 +7,19 @@
"Loading...": "载入中...",
"view": "查看",
"api": "api",
- "•": "•",
+ "•": " • ",
"built with gradio": "基于 Gradio 构建",
"Stable Diffusion checkpoint": "Stable Diffusion 模型(ckpt)",
"txt2img": "文生图",
"img2img": "图生图",
- "Extras": "后处理",
- "PNG Info": "PNG 信息",
- "Checkpoint Merger": "模型(ckpt)合并工具",
+ "Extras": "更多",
+ "PNG Info": "图片信息",
+ "Checkpoint Merger": "模型(ckpt)合并",
"Train": "训练",
- "Create aesthetic embedding": "生成美术风格 embedding",
+ "Create aesthetic embedding": "生成美术风格",
"Image Browser": "图库浏览器",
- "History": "历史记录",
"Settings": "设置",
+ "Extensions": "扩展",
"Prompt": "提示词",
"Negative prompt": "反向提示词",
"Run": "运行",
@@ -37,8 +37,8 @@
"Image": "图像",
"Check progress": "查看进度",
"Check progress (first)": "(首次)查看进度",
- "Sampling Steps": "采样迭代步数",
- "Sampling method": "采样方法",
+ "Sampling Steps": "采样迭代步数 (Steps)",
+ "Sampling method": "采样方法 (Sampler)",
"Euler a": "Euler a",
"Euler": "Euler",
"LMS": "LMS",
@@ -59,38 +59,39 @@
"Highres. fix": "高分辨率修复",
"Firstpass width": "第一遍的宽度",
"Firstpass height": "第一遍的高度",
- "Denoising strength": "去噪强度",
- "Batch count": "批次",
- "Batch size": "批量",
+ "Denoising strength": "重绘幅度(Denoising strength)",
+ "Batch count": "生成批次",
+ "Batch size": "每批数量",
"CFG Scale": "提示词相关性(CFG Scale)",
- "Seed": "随机种子",
- "Extra": "额外参数",
+ "Seed": "随机种子(seed)",
+ "Extra": "▼",
"Variation seed": "差异随机种子",
"Variation strength": "差异强度",
"Resize seed from width": "自宽度缩放随机种子",
"Resize seed from height": "自高度缩放随机种子",
- "Open for Clip Aesthetic!": "打开美术风格 Clip!",
- "▼": "▼",
+ "Open for Clip Aesthetic!": "打开以调整 Clip 的美术风格!",
"Aesthetic weight": "美术风格权重",
"Aesthetic steps": "美术风格迭代步数",
"Aesthetic learning rate": "美术风格学习率",
- "Slerp interpolation": "Slerp 插值",
+ "Slerp interpolation": "球面线性插值",
"Aesthetic imgs embedding": "美术风格图集 embedding",
"None": "无",
"Aesthetic text for imgs": "该图集的美术风格描述",
- "Slerp angle": "Slerp 角度",
+ "Slerp angle": "球面线性插值角度",
"Is negative text": "是反向提示词",
"Script": "脚本",
- "Embedding to Shareable PNG": "将 Embedding 转换为可分享的 PNG",
+ "Embedding to Shareable PNG": "将 Embedding 转换为可分享的 PNG 图片文件",
"Prompt matrix": "提示词矩阵",
"Prompts from file or textbox": "从文本框或文件载入提示词",
"X/Y plot": "X/Y 图表",
"Source embedding to convert": "用于转换的源 Embedding",
"Embedding token": "Embedding 的 token (关键词)",
+ "Output directory": "输出目录",
"Put variable parts at start of prompt": "把变量部分放在提示词文本的开头",
- "Show Textbox": "显示文本框",
- "File with inputs": "含输入内容的文件",
- "Prompts": "提示词",
+ "Iterate seed every line": "每行输入都换一个种子",
+ "Use same random seed for all lines": "每行输入都使用同一个随机种子",
+ "List of prompt inputs": "提示词输入列表",
+ "Upload prompt inputs": "上传提示词输入文件",
"X type": "X轴类型",
"Nothing": "无",
"Var. seed": "差异随机种子",
@@ -100,8 +101,8 @@
"Prompt order": "提示词顺序",
"Sampler": "采样器",
"Checkpoint name": "模型(ckpt)名",
- "Hypernetwork": "Hypernetwork",
- "Hypernet str.": "Hypernetwork 强度",
+ "Hypernetwork": "超网络(Hypernetwork)",
+ "Hypernet str.": "超网络(Hypernetwork) 强度",
"Sigma Churn": "Sigma Churn",
"Sigma min": "最小 Sigma",
"Sigma max": "最大 Sigma",
@@ -109,53 +110,52 @@
"Eta": "Eta",
"Clip skip": "Clip 跳过",
"Denoising": "去噪",
+ "Cond. Image Mask Weight": "图像调节屏蔽度",
"X values": "X轴数值",
"Y type": "Y轴类型",
"Y values": "Y轴数值",
"Draw legend": "在图表中包括轴标题",
"Include Separate Images": "包括独立的图像",
"Keep -1 for seeds": "保持随机种子为-1",
- "Drop Image Here": "拖拽图像到此",
"Save": "保存",
"Send to img2img": ">> 图生图",
- "Send to inpaint": ">> 内补绘制",
- "Send to extras": ">> 后处理",
+ "Send to inpaint": ">> 局部重绘",
+ "Send to extras": ">> 更多",
"Make Zip when Save?": "保存时生成zip压缩文件?",
"Textbox": "文本框",
"Interrogate\nCLIP": "CLIP\n反推提示词",
"Interrogate\nDeepBooru": "DeepBooru\n反推提示词",
- "Inpaint": "内补绘制",
+ "Inpaint": "局部重绘",
"Batch img2img": "批量图生图",
"Image for img2img": "图生图的图像",
- "Image for inpainting with mask": "用于内补绘制蒙版内容的图像",
+ "Drop Image Here": "拖拽图像到此",
+ "Image for inpainting with mask": "用于局部重绘并手动画蒙版的图像",
"Mask": "蒙版",
"Mask blur": "蒙版模糊",
"Mask mode": "蒙版模式",
"Draw mask": "绘制蒙版",
"Upload mask": "上传蒙版",
"Masking mode": "蒙版模式",
- "Inpaint masked": "内补绘制蒙版内容",
- "Inpaint not masked": "内补绘制非蒙版内容",
+ "Inpaint masked": "重绘蒙版内容",
+ "Inpaint not masked": "重绘非蒙版内容",
"Masked content": "蒙版蒙住的内容",
"fill": "填充",
"original": "原图",
"latent noise": "潜空间噪声",
"latent nothing": "潜空间数值零",
- "Inpaint at full resolution": "以完整分辨率进行内补绘制",
- "Inpaint at full resolution padding, pixels": "以完整分辨率进行内补绘制 - 填补像素",
- "Process images in a directory on the same machine where the server is running.": "在服务器主机上的目录中处理图像",
- "Use an empty output directory to save pictures normally instead of writing to the output directory.": "指定一个空的文件夹为输出目录而非默认的 output 文件夹为输出目录",
- "Disabled when launched with --hide-ui-dir-config.": "启动 --hide-ui-dir-config 时禁用",
+ "Inpaint at full resolution": "全分辨率局部重绘",
+ "Inpaint at full resolution padding, pixels": "预留像素",
+ "Process images in a directory on the same machine where the server is running.": "使用服务器主机上的一个目录,作为输入目录处理图像",
+ "Use an empty output directory to save pictures normally instead of writing to the output directory.": "使用一个空的文件夹作为输出目录,而不是使用默认的 output 文件夹作为输出目录",
"Input directory": "输入目录",
- "Output directory": "输出目录",
"Resize mode": "缩放模式",
- "Just resize": "只缩放",
- "Crop and resize": "缩放并剪裁",
- "Resize and fill": "缩放并填充",
+ "Just resize": "拉伸",
+ "Crop and resize": "裁剪",
+ "Resize and fill": "填充",
"img2img alternative test": "图生图的另一种测试",
"Loopback": "回送",
- "Outpainting mk2": "外补绘制第二版",
- "Poor man's outpainting": "效果稍差的外补绘制",
+ "Outpainting mk2": "向外绘制第二版",
+ "Poor man's outpainting": "效果稍差的向外绘制",
"SD upscale": "使用 SD 放大(SD upscale)",
"should be 2 or lower.": "必须小于等于2",
"Override `Sampling method` to Euler?(this method is built for it)": "覆写 `采样方法` 为 Euler?(这个方法就是为这样做设计的)",
@@ -164,15 +164,15 @@
"Original negative prompt": "初始反向提示词",
"Override `Sampling Steps` to the same value as `Decode steps`?": "覆写 `采样迭代步数` 为 `解码迭代步数`?",
"Decode steps": "解码迭代步数",
- "Override `Denoising strength` to 1?": "覆写 `去噪强度` 为 1?",
+ "Override `Denoising strength` to 1?": "覆写 `重绘幅度` 为 1?",
"Decode CFG scale": "解码提示词相关性(CFG scale)",
"Randomness": "随机度",
"Sigma adjustment for finding noise for image": "为寻找图中噪点的 Sigma 调整",
"Loops": "迭代次数",
- "Denoising strength change factor": "去噪强度的调整系数",
- "Recommended settings: Sampling Steps: 80-100, Sampler: Euler a, Denoising strength: 0.8": "推荐设置:采样迭代步数:80-100,采样器:Euler a,去噪强度:0.8",
+ "Denoising strength change factor": "重绘幅度的调整系数",
+ "Recommended settings: Sampling Steps: 80-100, Sampler: Euler a, Denoising strength: 0.8": "推荐设置:采样迭代步数:80-100,采样器:Euler a,重绘幅度:0.8",
"Pixels to expand": "拓展的像素数",
- "Outpainting direction": "外补绘制的方向",
+ "Outpainting direction": "向外绘制的方向",
"left": "左",
"right": "右",
"up": "上",
@@ -183,6 +183,7 @@
"Tile overlap": "图块重叠的像素(Tile overlap)",
"Upscaler": "放大算法",
"Lanczos": "Lanczos",
+ "Nearest": "最邻近(整数缩放)",
"LDSR": "LDSR",
"BSRGAN 4x": "BSRGAN 4x",
"ESRGAN_4x": "ESRGAN_4x",
@@ -199,12 +200,11 @@
"Scale to": "指定尺寸缩放",
"Resize": "缩放",
"Crop to fit": "裁剪以适应",
- "Upscaler 2": "放大算法 2",
- "Upscaler 2 visibility": "放大算法 2 可见度",
+ "Upscaler 2 visibility": "放大算法 2 (Upscaler 2) 可见度",
"GFPGAN visibility": "GFPGAN 可见度",
"CodeFormer visibility": "CodeFormer 可见度",
"CodeFormer weight (0 = maximum effect, 1 = minimum effect)": "CodeFormer 权重 (0 = 最大效果, 1 = 最小效果)",
- "Open output directory": "打开输出目录",
+ "Upscale Before Restoring Faces": "放大后再进行面部修复",
"Send to txt2img": ">> 文生图",
"A merger of the two checkpoints will be generated in your": "合并后的模型(ckpt)会生成在你的",
"checkpoint": "模型(ckpt)",
@@ -219,10 +219,9 @@
"Add difference": "添加差分",
"Save as float16": "以 float16 储存",
"See": "查看",
- "wiki": "wiki",
+ "wiki": "wiki文档",
"for detailed explanation.": "以了解详细说明",
"Create embedding": "生成 embedding",
- "Create aesthetic images embedding": "生成美术风格图集 embedding",
"Create hypernetwork": "生成 hypernetwork",
"Preprocess images": "图像预处理",
"Name": "名称",
@@ -237,6 +236,36 @@
"leakyrelu": "leakyrelu",
"elu": "elu",
"swish": "swish",
+ "tanh": "tanh",
+ "sigmoid": "sigmoid",
+ "celu": "celu",
+ "gelu": "gelu",
+ "glu": "glu",
+ "hardshrink": "hardshrink",
+ "hardsigmoid": "hardsigmoid",
+ "hardtanh": "hardtanh",
+ "logsigmoid": "logsigmoid",
+ "logsoftmax": "logsoftmax",
+ "mish": "mish",
+ "prelu": "prelu",
+ "rrelu": "rrelu",
+ "relu6": "relu6",
+ "selu": "selu",
+ "silu": "silu",
+ "softmax": "softmax",
+ "softmax2d": "softmax2d",
+ "softmin": "softmin",
+ "softplus": "softplus",
+ "softshrink": "softshrink",
+ "softsign": "softsign",
+ "tanhshrink": "tanhshrink",
+ "threshold": "阈值",
+ "Select Layer weights initialization. relu-like - Kaiming, sigmoid-like - Xavier is recommended": "选择初始化层权重的方案. 类relu - Kaiming, 类sigmoid - Xavier 都是比较推荐的选项",
+ "Normal": "正态",
+ "KaimingUniform": "Kaiming 均匀",
+ "KaimingNormal": "Kaiming 正态",
+ "XavierUniform": "Xavier 均匀",
+ "XavierNormal": "Xavier 正态",
"Add layer normalization": "添加层标准化",
"Use dropout": "采用 dropout 防止过拟合",
"Overwrite Old Hypernetwork": "覆写旧的 Hypernetwork",
@@ -248,89 +277,92 @@
"prepend": "放前面",
"append": "放后面",
"Create flipped copies": "生成镜像副本",
- "Split oversized images into two": "将过大的图像分为两份",
"Split oversized images": "分割过大的图像",
+ "Auto focal point crop": "自动焦点裁切",
"Use BLIP for caption": "使用 BLIP 生成说明文字(自然语言描述)",
"Use deepbooru for caption": "使用 deepbooru 生成说明文字(tags)",
"Split image threshold": "图像分割阈值",
"Split image overlap ratio": "分割图像重叠的比率",
+ "Focal point face weight": "焦点面部权重",
+ "Focal point entropy weight": "焦点熵权重",
+ "Focal point edges weight": "焦点线条权重",
+ "Create debug image": "生成调试(debug)图片",
"Preprocess": "预处理",
- "Train an embedding; must specify a directory with a set of 1:1 ratio images": "训练 embedding; 必须指定一组具有 1:1 比例图像的目录",
"Train an embedding or Hypernetwork; you must specify a directory with a set of 1:1 ratio images": "训练 embedding 或者 hypernetwork; 必须指定一组具有 1:1 比例图像的目录",
- "[wiki]": "[wiki]",
+ "[wiki]": "[wiki文档]",
"Embedding": "Embedding",
"Embedding Learning rate": "Embedding 学习率",
"Hypernetwork Learning rate": "Hypernetwork 学习率",
- "Learning rate": "学习率",
"Dataset directory": "数据集目录",
"Log directory": "日志目录",
"Prompt template file": "提示词模版文件",
"Max steps": "最大迭代步数",
"Save an image to log directory every N steps, 0 to disable": "每 N 步保存一个图像到日志目录,0 表示禁用",
"Save a copy of embedding to log directory every N steps, 0 to disable": "每 N 步将 embedding 的副本保存到日志目录,0 表示禁用",
- "Save images with embedding in PNG chunks": "保存图像并在 PNG 文件中嵌入 embedding 文件",
- "Read parameters (prompt, etc...) from txt2img tab when making previews": "进行预览时从文生图选项卡中读取参数(提示词等)",
+ "Save images with embedding in PNG chunks": "保存图像,并在 PNG 图片文件中嵌入 embedding 文件",
+ "Read parameters (prompt, etc...) from txt2img tab when making previews": "进行预览时,从文生图选项卡中读取参数(提示词等)",
"Train Hypernetwork": "训练 Hypernetwork",
"Train Embedding": "训练 Embedding",
"Create an aesthetic embedding out of any number of images": "从任意数量的图像中创建美术风格 embedding",
"Create images embedding": "生成图集 embedding",
- "txt2img history": "文生图历史记录",
- "img2img history": "图生图历史记录",
- "extras history": "后处理历史记录",
- "Renew Page": "刷新页面",
- "extras": "后处理",
- "favorites": "收藏夹",
- "custom fold": "自定义文件夹",
- "Load": "载入",
+ "Favorites": "收藏夹(已保存)",
+ "Others": "其他",
"Images directory": "图像目录",
- "Prev batch": "上一批",
- "Next batch": "下一批",
+ "Dropdown": "下拉列表",
"First Page": "首页",
"Prev Page": "上一页",
"Page Index": "页数",
"Next Page": "下一页",
"End Page": "尾页",
- "number of images to delete consecutively next": "接下来要连续删除的图像数",
+ "delete next": "删除下一张",
"Delete": "删除",
+ "sort by": "排序方式",
+ "path name": "路径名",
+ "date": "日期",
+ "keyword": "搜索",
"Generate Info": "生成信息",
"File Name": "文件名",
- "Collect": "收藏",
- "Refresh page": "刷新页面",
- "Date to": "日期至",
+ "Move to favorites": "移动到收藏夹(保存)",
+ "Renew Page": "刷新页面",
"Number": "数量",
"set_index": "设置索引",
+ "load_switch": "载入开关",
+ "turn_page_switch": "翻页开关",
"Checkbox": "勾选框",
"Apply settings": "保存设置",
- "Saving images/grids": "保存图像/概览图",
+ "Saving images/grids": "保存图像/宫格图",
"Always save all generated images": "始终保存所有生成的图像",
"File format for images": "图像的文件格式",
"Images filename pattern": "图像文件名格式",
- "Always save all generated image grids": "始终保存所有生成的概览图",
- "File format for grids": "概览图的文件格式",
- "Add extended info (seed, prompt) to filename when saving grid": "保存概览时将扩展信息(随机种子、提示词)添加到文件名",
- "Do not save grids consisting of one picture": "只有一张图片时不要保存概览图",
- "Prevent empty spots in grid (when set to autodetect)": "(在自动检测时)防止概览图中出现空位",
- "Grid row count; use -1 for autodetect and 0 for it to be same as batch size": "概览行数; 使用 -1 进行自动检测,使用 0 使其与批量大小相同",
- "Save text information about generation parameters as chunks to png files": "将有关生成参数的文本信息作为块保存到 png 文件中",
- "Create a text file next to every image with generation parameters.": "保存图像时在每个图像旁边创建一个文本文件储存生成参数",
+ "Add number to filename when saving": "储存的时候在文件名里添加数字",
+ "Always save all generated image grids": "始终保存所有生成的宫格图",
+ "File format for grids": "宫格图的文件格式",
+ "Add extended info (seed, prompt) to filename when saving grid": "保存宫格图时,将扩展信息(随机种子、提示词)添加到文件名",
+ "Do not save grids consisting of one picture": "只有一张图片时不要保存宫格图",
+ "Prevent empty spots in grid (when set to autodetect)": "(启用自动检测时)防止宫格图中出现空位",
+ "Grid row count; use -1 for autodetect and 0 for it to be same as batch size": "宫格图行数; 使用 -1 进行自动检测,使用 0 使其与每批数量相同",
+ "Save text information about generation parameters as chunks to png files": "将有关生成参数的文本信息,作为块保存到 png 图片文件中",
+ "Create a text file next to every image with generation parameters.": "保存图像时,在每个图像旁边创建一个文本文件储存生成参数",
"Save a copy of image before doing face restoration.": "在进行面部修复之前保存图像副本",
+ "Save a copy of image before applying highres fix.": "在做高分辨率修复之前保存初始图像副本",
+ "Save a copy of image before applying color correction to img2img results": "在对图生图结果应用颜色校正之前保存图像副本",
"Quality for saved jpeg images": "保存的 jpeg 图像的质量",
- "If PNG image is larger than 4MB or any dimension is larger than 4000, downscale and save copy as JPG": "如果 PNG 图像大于 4MB 或宽高大于 4000,则缩小并保存副本为 JPG",
- "Use original name for output filename during batch process in extras tab": "在后处理选项卡中的批量处理过程中使用原始名称作为输出文件名",
+ "If PNG image is larger than 4MB or any dimension is larger than 4000, downscale and save copy as JPG": "如果 PNG 图像大于 4MB 或宽高大于 4000,则缩小并保存副本为 JPG 图片",
+ "Use original name for output filename during batch process in extras tab": "在更多选项卡中的批量处理过程中,使用原始名称作为输出文件名",
"When using 'Save' button, only save a single selected image": "使用“保存”按钮时,只保存一个选定的图像",
"Do not add watermark to images": "不要给图像加水印",
"Paths for saving": "保存路径",
"Output directory for images; if empty, defaults to three directories below": "图像的输出目录; 如果为空,则默认为以下三个目录",
"Output directory for txt2img images": "文生图的输出目录",
"Output directory for img2img images": "图生图的输出目录",
- "Output directory for images from extras tab": "后处理的输出目录",
- "Output directory for grids; if empty, defaults to two directories below": "概览图的输出目录; 如果为空,则默认为以下两个目录",
- "Output directory for txt2img grids": "文生图概览的输出目录",
- "Output directory for img2img grids": "图生图概览的输出目录",
+ "Output directory for images from extras tab": "更多选项卡的输出目录",
+ "Output directory for grids; if empty, defaults to two directories below": "宫格图的输出目录; 如果为空,则默认为以下两个目录",
+ "Output directory for txt2img grids": "文生图宫格的输出目录",
+ "Output directory for img2img grids": "图生图宫格的输出目录",
"Directory for saving images using the Save button": "使用“保存”按钮保存图像的目录",
"Saving to a directory": "保存到目录",
"Save images to a subdirectory": "将图像保存到子目录",
- "Save grids to a subdirectory": "将概览图保存到子目录",
+ "Save grids to a subdirectory": "将宫格图保存到子目录",
"When using \"Save\" button, save images to a subdirectory": "使用“保存”按钮时,将图像保存到子目录",
"Directory name pattern": "目录名称格式",
"Max prompt words for [prompt_words] pattern": "[prompt_words] 格式的最大提示词数量",
@@ -341,34 +373,36 @@
"Tile overlap, in pixels for SwinIR. Low values = visible seam.": "SwinIR 的图块重叠(Tile overlap)像素。低值 = 可见接缝",
"LDSR processing steps. Lower = faster": "LDSR 处理迭代步数。更低 = 更快",
"Upscaler for img2img": "图生图的放大算法",
- "Upscale latent space image when doing hires. fix": "做高分辨率修复时也放大潜空间图像",
+ "Upscale latent space image when doing hires. fix": "做高分辨率修复时,也放大潜空间图像",
"Face restoration": "面部修复",
"CodeFormer weight parameter; 0 = maximum effect; 1 = minimum effect": "CodeFormer 权重参数; 0 = 最大效果; 1 = 最小效果",
- "Move face restoration model from VRAM into RAM after processing": "面部修复处理完成后将面部修复模型从显存(VRAM)移至内存(RAM)",
+ "Move face restoration model from VRAM into RAM after processing": "面部修复处理完成后,将面部修复模型从显存(VRAM)移至内存(RAM)",
"System": "系统",
- "VRAM usage polls per second during generation. Set to 0 to disable.": "生成图像时每秒轮询显存(VRAM)使用情况的次数。设置为 0 以禁用",
+ "VRAM usage polls per second during generation. Set to 0 to disable.": "生成图像时,每秒轮询显存(VRAM)使用情况的次数。设置为 0 以禁用",
"Always print all generation info to standard output": "始终将所有生成信息输出到 standard output (一般为控制台)",
"Add a second progress bar to the console that shows progress for an entire job.": "向控制台添加第二个进度条,显示整个作业的进度",
"Training": "训练",
- "Unload VAE and CLIP from VRAM when training": "训练时从显存(VRAM)中取消 VAE 和 CLIP 的加载",
- "Move VAE and CLIP to RAM when training hypernetwork. Saves VRAM.": "训练时将 VAE 和 CLIP 从显存(VRAM)移放到内存(RAM),节省显存(VRAM)",
+ "Move VAE and CLIP to RAM when training if possible. Saves VRAM.": "训练时将 VAE 和 CLIP 从显存(VRAM)移放到内存(RAM)如果可行的话,节省显存(VRAM)",
"Filename word regex": "文件名用词的正则表达式",
"Filename join string": "文件名连接用字符串",
"Number of repeats for a single input image per epoch; used only for displaying epoch number": "每个 epoch 中单个输入图像的重复次数; 仅用于显示 epoch 数",
- "Save an csv containing the loss to log directory every N steps, 0 to disable": "每 N 步保存一个包含 loss 的 csv 到日志目录,0 表示禁用",
+ "Save an csv containing the loss to log directory every N steps, 0 to disable": "每 N 步保存一个包含 loss 的 csv 表格到日志目录,0 表示禁用",
+ "Use cross attention optimizations while training": "训练时开启 cross attention 优化",
"Stable Diffusion": "Stable Diffusion",
"Checkpoints to cache in RAM": "缓存在内存(RAM)中的模型(ckpt)",
+ "SD VAE": "模型的 VAE (SD VAE)",
+ "auto": "自动",
"Hypernetwork strength": "Hypernetwork 强度",
+ "Inpainting conditioning mask strength": "局部重绘时图像调节的蒙版屏蔽强度",
"Apply color correction to img2img results to match original colors.": "对图生图结果应用颜色校正以匹配原始颜色",
- "Save a copy of image before applying color correction to img2img results": "在对图生图结果应用颜色校正之前保存图像副本",
- "With img2img, do exactly the amount of steps the slider specifies (normally you'd do less with less denoising).": "在进行图生图的时候,确切地执行滑块指定的迭代步数(正常情况下更弱的去噪需要更少的迭代步数)",
+ "With img2img, do exactly the amount of steps the slider specifies (normally you'd do less with less denoising).": "在进行图生图的时候,确切地执行滑块指定的迭代步数(正常情况下更弱的重绘幅度需要更少的迭代步数)",
"Enable quantization in K samplers for sharper and cleaner results. This may change existing seeds. Requires restart to apply.": "在 K 采样器中启用量化以获得更清晰、更清晰的结果。这可能会改变现有的随机种子。需要重新启动才能应用",
"Emphasis: use (text) to make model pay more attention to text and [text] to make it pay less attention": "强调符:使用 (文字) 使模型更关注该文本,使用 [文字] 使其减少关注",
"Use old emphasis implementation. Can be useful to reproduce old seeds.": "使用旧的强调符实现。可用于复现旧随机种子",
- "Make K-diffusion samplers produce same images in a batch as when making a single image": "使 K-diffusion 采样器批量生成与生成单个图像时产出相同的图像",
+ "Make K-diffusion samplers produce same images in a batch as when making a single image": "使 K-diffusion 采样器 批量生成与生成单个图像时,产出相同的图像",
"Increase coherency by padding from the last comma within n tokens when using more than 75 tokens": "当使用超过 75 个 token 时,通过从 n 个 token 中的最后一个逗号填补来提高一致性",
- "Filter NSFW content": "过滤成人内容",
- "Stop At last layers of CLIP model": "在 CLIP 模型的最后哪一层停下",
+ "Filter NSFW content": "过滤成人内容(NSFW)",
+ "Stop At last layers of CLIP model": "在 CLIP 模型的最后哪一层停下 (Clip skip)",
"Interrogate Options": "反推提示词选项",
"Interrogate: keep models in VRAM": "反推: 将模型保存在显存(VRAM)中",
"Interrogate: use artists from artists.csv": "反推: 使用 artists.csv 中的艺术家",
@@ -384,105 +418,207 @@
"User interface": "用户界面",
"Show progressbar": "显示进度条",
"Show image creation progress every N sampling steps. Set 0 to disable.": "每 N 个采样迭代步数显示图像生成进度。设置 0 禁用",
- "Show previews of all images generated in a batch as a grid": "以网格的形式预览所有批量生成出来的图像",
- "Show grid in results for web": "在网页的结果中显示概览图",
+ "Show previews of all images generated in a batch as a grid": "以网格的形式,预览批量生成的所有图像",
+ "Show grid in results for web": "在网页的结果中显示宫格图",
"Do not show any images in results for web": "不在网页的结果中显示任何图像",
"Add model hash to generation information": "将模型的哈希值添加到生成信息",
"Add model name to generation information": "将模型名称添加到生成信息",
- "When reading generation parameters from text into UI (from PNG info or pasted text), do not change the selected model/checkpoint.": "当从文本读取生成参数到 UI(从 PNG 信息或粘贴文本)时,不要更改选定的模型(ckpt)",
- "Font for image grids that have text": "有文字的概览图使用的字体",
+ "When reading generation parameters from text into UI (from PNG info or pasted text), do not change the selected model/checkpoint.": "从文本读取生成参数到用户界面(从 PNG 图片信息或粘贴文本)时,不要更改选定的模型(ckpt)",
+ "Send seed when sending prompt or image to other interface": "将提示词或者图片发送到 >> 其他界面时,把随机种子也传送过去",
+ "Font for image grids that have text": "有文字的宫格图使用的字体",
"Enable full page image viewer": "启用整页图像查看器",
- "Show images zoomed in by default in full page image viewer": "在整页图像查看器中默认放大显示图像",
+ "Show images zoomed in by default in full page image viewer": "在整页图像查看器中,默认放大显示图像",
"Show generation progress in window title.": "在窗口标题中显示生成进度",
"Quicksettings list": "快速设置列表",
- "Localization (requires restart)": "本地化(需要重新启动)",
+ "Localization (requires restart)": "本地化翻译(需要保存设置,并重启Gradio)",
"Sampler parameters": "采样器参数",
"Hide samplers in user interface (requires restart)": "在用户界面中隐藏采样器(需要重新启动)",
"eta (noise multiplier) for DDIM": "DDIM 的 eta (噪声乘数) ",
"eta (noise multiplier) for ancestral samplers": "ancestral 采样器的 eta (噪声乘数)",
"img2img DDIM discretize": "图生图 DDIM 离散化",
- "uniform": "均勻",
+ "uniform": "均匀",
"quad": "二阶",
"sigma churn": "sigma churn",
"sigma tmin": "最小(tmin) sigma",
"sigma noise": "sigma 噪声",
- "Eta noise seed delta": "Eta 噪声种子偏移(noise seed delta)",
+ "Eta noise seed delta": "Eta 噪声种子偏移(ENSD - Eta noise seed delta)",
"Images Browser": "图库浏览器",
- "Preload images at startup": "在启动时预载图像",
+ "Preload images at startup": "在启动时预加载图像",
"Number of columns on the page": "每页列数",
"Number of rows on the page": "每页行数",
- "Number of pictures displayed on each page": "每页显示的图像数量",
"Minimum number of pages per load": "每次加载的最小页数",
- "Number of grids in each row": "每行显示多少格",
- "Wildcards": "通配符",
"Use same seed for all images": "为所有图像使用同一个随机种子",
"Request browser notifications": "请求浏览器通知",
"Download localization template": "下载本地化模板",
- "Reload custom script bodies (No ui updates, No restart)": "重新加载自定义脚本主体(无 ui 更新,无重启)",
+ "Reload custom script bodies (No ui updates, No restart)": "重新加载自定义脚本主体(无用户界面更新,无重启)",
"Restart Gradio and Refresh components (Custom Scripts, ui.py, js and css only)": "重启 Gradio 及刷新组件(仅限自定义脚本、ui.py、js 和 css)",
- "Prompt (press Ctrl+Enter or Alt+Enter to generate)": "提示词(按 Ctrl+Enter 或 Alt+Enter 生成)",
- "Negative prompt (press Ctrl+Enter or Alt+Enter to generate)": "反向提示词(按 Ctrl+Enter 或 Alt+Enter 生成)",
- "Add a random artist to the prompt.": "随机添加一个艺术家到提示词中",
- "Read generation parameters from prompt or last generation if prompt is empty into user interface.": "从提示词中读取生成参数,如果提示词为空,则读取上一次的生成参数到用户界面",
- "Save style": "储存为模版风格",
- "Apply selected styles to current prompt": "将所选样式应用于当前提示",
- "Stop processing current image and continue processing.": "停止处理当前图像并继续处理下一个",
- "Stop processing images and return any results accumulated so far.": "停止处理图像并返回迄今为止累积的任何结果",
- "Style to apply; styles have components for both positive and negative prompts and apply to both": "要应用的模版风格; 模版风格包含正向和反向提示词,并应用于两者",
+ "Available": "可用",
+ "Install from URL": "从网址安装",
+ "Apply and restart UI": "应用并重启用户界面",
+ "Check for updates": "检查更新",
+ "Extension": "扩展",
+ "URL": "网址",
+ "Update": "更新",
+ "a1111-sd-webui-tagcomplete": "Tag自动补全",
+ "unknown": "未知",
+ "deforum-for-automatic1111-webui": "Deforum",
+ "sd-dynamic-prompting": "动态提示词",
+ "stable-diffusion-webui-aesthetic-gradients": "美术风格梯度",
+ "stable-diffusion-webui-aesthetic-image-scorer": "美术风格评分",
+ "stable-diffusion-webui-artists-to-study": "艺术家图库",
+ "stable-diffusion-webui-dataset-tag-editor": "数据集标签编辑器",
+ "stable-diffusion-webui-images-browser": "图库浏览器",
+ "stable-diffusion-webui-inspiration": "灵感",
+ "stable-diffusion-webui-wildcards": "通配符",
+ "Load from:": "加载自",
+ "Extension index URL": "扩展列表链接",
+ "URL for extension's git repository": "扩展的 git 仓库链接",
+ "Local directory name": "本地路径名",
+ "Prompt (press Ctrl+Enter or Alt+Enter to generate)": "提示词(按 Ctrl+Enter 或 Alt+Enter 生成)\nPrompt",
+ "Negative prompt (press Ctrl+Enter or Alt+Enter to generate)": "反向提示词(按 Ctrl+Enter 或 Alt+Enter 生成)\nNegative prompt",
+ "Stop processing current image and continue processing.": "停止处理当前图像,并继续处理下一个",
+ "Stop processing images and return any results accumulated so far.": "停止处理图像,并返回迄今为止累积的任何结果",
"Do not do anything special": "什么都不做",
"Which algorithm to use to produce the image": "使用哪种算法生成图像",
"Euler Ancestral - very creative, each can get a completely different picture depending on step count, setting steps to higher than 30-40 does not help": "Euler Ancestral - 非常有创意,可以根据迭代步数获得完全不同的图像,将迭代步数设置为高于 30-40 不会有正面作用",
- "Denoising Diffusion Implicit Models - best at inpainting": "Denoising Diffusion Implicit models - 最擅长内补绘制",
+ "Denoising Diffusion Implicit Models - best at inpainting": "Denoising Diffusion Implicit models - 最擅长局部重绘",
"Produce an image that can be tiled.": "生成可用于平铺(tiled)的图像",
- "Use a two step process to partially create an image at smaller resolution, upscale, and then improve details in it without changing composition": "使用两步处理的时候以较小的分辨率生成初步图像、接着放大图像,然后在不更改构图的情况下改进其中的细节",
- "Determines how little respect the algorithm should have for image's content. At 0, nothing will change, and at 1 you'll get an unrelated image. With values below 1.0, processing will take less steps than the Sampling Steps slider specifies.": "决定算法对图像内容的影响程度。设置 0 时,什么都不会改变,而在 1 时,你将获得不相关的图像。值低于 1.0 时,处理的迭代步数将少于“采样迭代步数”滑块指定的步数",
+ "Use a two step process to partially create an image at smaller resolution, upscale, and then improve details in it without changing composition": "使用两步处理的时候,以较小的分辨率生成初步图像、接着放大图像,然后在不更改构图的情况下改进其中的细节",
+ "Determines how little respect the algorithm should have for image's content. At 0, nothing will change, and at 1 you'll get an unrelated image. With values below 1.0, processing will take less steps than the Sampling Steps slider specifies.": "决定算法对图像内容的影响程度。设置 0 时,什么都不会改变,而在 1 时,你将获得不相关的图像。\n值低于 1.0 时,处理的迭代步数将少于“采样迭代步数”滑块指定的步数",
"How many batches of images to create": "创建多少批次的图像",
"How many image to create in a single batch": "每批创建多少图像",
"Classifier Free Guidance Scale - how strongly the image should conform to prompt - lower values produce more creative results": "Classifier Free Guidance Scale - 图像应在多大程度上服从提示词 - 较低的值会产生更有创意的结果",
- "A value that determines the output of random number generator - if you create an image with same parameters and seed as another image, you'll get the same result": "一个固定随机数生成器输出的值 - 以相同参数和随机种子生成的图像会得到相同的结果",
- "Set seed to -1, which will cause a new random number to be used every time": "将随机种子设置为-1,则每次都会使用一个新的随机数",
- "Reuse seed from last generation, mostly useful if it was randomed": "重用上一次使用的随机种子,如果想要固定结果就会很有用",
"Seed of a different picture to be mixed into the generation.": "将要参与生成的另一张图的随机种子",
"How strong of a variation to produce. At 0, there will be no effect. At 1, you will get the complete picture with variation seed (except for ancestral samplers, where you will just get something).": "想要产生多强烈的变化。设为 0 时,将没有效果。设为 1 时,你将获得完全产自差异随机种子的图像(ancestral 采样器除外,你只是单纯地生成了一些东西)",
"Make an attempt to produce a picture similar to what would have been produced with same seed at specified resolution": "尝试生成与在指定分辨率下使用相同随机种子生成的图像相似的图片",
"This text is used to rotate the feature space of the imgs embs": "此文本用于旋转图集 embeddings 的特征空间",
"Separate values for X axis using commas.": "使用逗号分隔 X 轴的值",
"Separate values for Y axis using commas.": "使用逗号分隔 Y 轴的值",
- "Write image to a directory (default - log/images) and generation parameters into csv file.": "将图像写入目录(默认 - log/images)并将生成参数写入 csv 文件",
- "Open images output directory": "打开图像输出目录",
+ "Write image to a directory (default - log/images) and generation parameters into csv file.": "将图像写入目录(默认 - log/images)并将生成参数写入 csv 表格文件",
"How much to blur the mask before processing, in pixels.": "处理前要对蒙版进行多强的模糊,以像素为单位",
"What to put inside the masked area before processing it with Stable Diffusion.": "在使用 Stable Diffusion 处理蒙版区域之前要在蒙版区域内放置什么",
- "fill it with colors of the image": "用图像的颜色填充它",
- "keep whatever was there originally": "保留原来的东西",
- "fill it with latent space noise": "用潜空间的噪声填充它",
- "fill it with latent space zeroes": "用潜空间的零填充它",
- "Upscale masked region to target resolution, do inpainting, downscale back and paste into original image": "将蒙版区域放大到目标分辨率,做内补绘制,缩小后粘贴到原始图像中",
+ "fill it with colors of the image": "用图像的颜色(高强度模糊)填充它",
+ "keep whatever was there originally": "保留原来的图像,不进行预处理",
+ "fill it with latent space noise": "于潜空间填充噪声",
+ "fill it with latent space zeroes": "于潜空间填零",
+ "Upscale masked region to target resolution, do inpainting, downscale back and paste into original image": "将蒙版区域(包括预留像素长度的缓冲区域)放大到目标分辨率,进行局部重绘。\n然后缩小并粘贴回原始图像中",
"Resize image to target resolution. Unless height and width match, you will get incorrect aspect ratio.": "将图像大小调整为目标分辨率。除非高度和宽度匹配,否则你将获得不正确的纵横比",
"Resize the image so that entirety of target resolution is filled with the image. Crop parts that stick out.": "调整图像大小,使整个目标分辨率都被图像填充。裁剪多出来的部分",
"Resize the image so that entirety of image is inside target resolution. Fill empty space with image's colors.": "调整图像大小,使整个图像在目标分辨率内。用图像的颜色填充空白区域",
"How many times to repeat processing an image and using it as input for the next iteration": "重复处理图像并用作下次迭代输入的次数",
- "In loopback mode, on each loop the denoising strength is multiplied by this value. <1 means decreasing variety so your sequence will converge on a fixed picture. >1 means increasing variety so your sequence will become more and more chaotic.": "在回送模式下,在每个循环中,去噪强度都会乘以该值。<1 表示减少多样性,因此你的这一组图将集中在固定的图像上。>1 意味着增加多样性,因此你的这一组图将变得越来越混乱",
+ "In loopback mode, on each loop the denoising strength is multiplied by this value. <1 means decreasing variety so your sequence will converge on a fixed picture. >1 means increasing variety so your sequence will become more and more chaotic.": "在回送模式下,在每个循环中,重绘幅度都会乘以该值。<1 表示减少多样性,因此你的这一组图将集中在固定的图像上。>1 意味着增加多样性,因此你的这一组图将变得越来越混乱",
"For SD upscale, how much overlap in pixels should there be between tiles. Tiles overlap so that when they are merged back into one picture, there is no clearly visible seam.": "使用 SD 放大(SD upscale)时,图块(Tiles)之间应该有多少像素重叠。图块(Tiles)之间需要重叠才可以让它们在合并回一张图像时,没有清晰可见的接缝",
"A directory on the same machine where the server is running.": "与服务器主机上的目录",
"Leave blank to save images to the default path.": "留空以将图像保存到默认路径",
"Result = A * (1 - M) + B * M": "结果 = A * (1 - M) + B * M",
"Result = A + (B - C) * M": "结果 = A + (B - C) * M",
"1st and last digit must be 1. ex:'1, 2, 1'": "第一个和最后一个数字必须是 1。例:'1, 2, 1'",
- "how fast should the training go. Low values will take longer to train, high values may fail to converge (not generate accurate results) and/or may break the embedding (This has happened if you see Loss: nan in the training info textbox. If this happens, you need to manually restore your embedding from an older not-broken backup).\n\nYou can set a single numeric value, or multiple learning rates using the syntax:\n\n rate_1:max_steps_1, rate_2:max_steps_2, ...\n\nEG: 0.005:100, 1e-3:1000, 1e-5\n\nWill train with rate of 0.005 for first 100 steps, then 1e-3 until 1000 steps, then 1e-5 for all remaining steps.": "训练应该多快。低值将需要更长的时间来训练,高值可能无法收敛(无法产生准确的结果)以及/也许可能会破坏 embedding(如果你在训练信息文本框中看到 Loss: nan 就会发生这种情况。如果发生这种情况,你需要从较旧的未损坏的备份手动恢复 embedding)\n\n你可以使用以下语法设置单个数值或多个学习率:\n\n 率1:步限1, 率2:步限2, ...\n\n如: 0.005:100, 1e-3:1000, 1e-5\n\n即前 100 步将以 0.005 的速率训练,接着直到 1000 步为止以 1e-3 训练,然后剩余所有步以 1e-5 训练",
"Path to directory with input images": "带有输入图像的路径",
"Path to directory where to write outputs": "进行输出的路径",
"Input images directory": "输入图像目录",
- "Use following tags to define how filenames for images are chosen: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [job_timestamp]; leave empty for default.": "使用以下标签定义如何选择图像的文件名: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [job_timestamp]; 默认请留空",
+ "Use following tags to define how filenames for images are chosen: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [datetime<Format>], [datetime<Format><Time Zone>], [job_timestamp]; leave empty for default.": "使用以下标签定义如何选择图像的文件名: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [datetime<Format>], [datetime<Format><Time Zone>], [job_timestamp]; 默认请留空",
"If this option is enabled, watermark will not be added to created images. Warning: if you do not add watermark, you may be behaving in an unethical manner.": "如果启用此选项,水印将不会添加到生成出来的图像中。警告:如果你不添加水印,你的行为可能是不符合专业操守的",
- "Use following tags to define how subdirectories for images and grids are chosen: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [job_timestamp]; leave empty for default.": "使用以下标签定义如何选择图像和概览图的子目录: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [job_timestamp]; 默认请留空",
+ "Use following tags to define how subdirectories for images and grids are chosen: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [datetime<Format>], [datetime<Format><Time Zone>], [job_timestamp]; leave empty for default.": "使用以下标签定义如何选择图像和宫格图的子目录: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [datetime<Format>], [datetime<Format><Time Zone>], [job_timestamp]; 默认请留空",
"Restore low quality faces using GFPGAN neural network": "使用 GFPGAN 神经网络修复低质量面部",
"This regular expression will be used extract words from filename, and they will be joined using the option below into label text used for training. Leave empty to keep filename text as it is.": "此正则表达式将用于从文件名中提取单词,并将使用以下选项将它们接合到用于训练的标签文本中。留空以保持文件名文本不变",
"This string will be used to join split words into a single line if the option above is enabled.": "如果启用了上述选项,则此处的字符会用于将拆分的单词接合为同一行",
- "List of setting names, separated by commas, for settings that should go to the quick access bar at the top, rather than the usual setting tab. See modules/shared.py for setting names. Requires restarting to apply.": "设置名称列表,以逗号分隔,设置应转到顶部的快速访问栏,而不是通常的设置选项卡。有关设置名称,请参见 modules/shared.py。需要重新启动才能应用",
+ "Only applies to inpainting models. Determines how strongly to mask off the original image for inpainting and img2img. 1.0 means fully masked, which is the default behaviour. 0.0 means a fully unmasked conditioning. Lower values will help preserve the overall composition of the image, but will struggle with large changes.": "仅适用于局部重绘专用的模型(模型后缀为 inpainting.ckpt 的模型)。决定了蒙版在局部重绘以及图生图中屏蔽原图内容的强度。 1.0 表示完全屏蔽原图,这是默认行为。0.0 表示完全不屏蔽让原图进行图像调节。较低的值将有助于保持原图的整体构图,但很难遇到较大的变化",
+ "List of setting names, separated by commas, for settings that should go to the quick access bar at the top, rather than the usual setting tab. See modules/shared.py for setting names. Requires restarting to apply.": "设置项名称的列表,以逗号分隔,该设置会移动到顶部的快速访问栏,而不是默认的设置选项卡。有关设置名称,请参见 modules/shared.py。需要重新启动才能应用",
"If this values is non-zero, it will be added to seed and used to initialize RNG for noises when using samplers with Eta. You can use this to produce even more variation of images, or you can use this to match images of other software if you know what you are doing.": "如果这个值不为零,它将被添加到随机种子中,并在使用带有 Eta 的采样器时用于初始化随机噪声。你可以使用它来产生更多的图像变化,或者你可以使用它来模仿其他软件生成的图像,如果你知道你在做什么",
+ "Leave empty for auto": "留空时自动生成",
+ "Autocomplete options": "自动补全选项",
"Enable Autocomplete": "开启Tag补全",
+ "Append commas": "附加逗号",
+ "latest": "最新",
+ "behind": "落后",
+ "Roll three": "抽三位出来",
+ "Generate forever": "无限生成",
+ "Cancel generate forever": "停止无限生成",
+ "how fast should the training go. Low values will take longer to train, high values may fail to converge (not generate accurate results) and/or may break the embedding (This has happened if you see Loss: nan in the training info textbox. If this happens, you need to manually restore your embedding from an older not-broken backup).\n\nYou can set a single numeric value, or multiple learning rates using the syntax:\n\n rate_1:max_steps_1, rate_2:max_steps_2, ...\n\nEG: 0.005:100, 1e-3:1000, 1e-5\n\nWill train with rate of 0.005 for first 100 steps, then 1e-3 until 1000 steps, then 1e-5 for all remaining steps.": "训练应该多快。低值将需要更长的时间来训练,高值可能无法收敛(无法产生准确的结果)以及/也许可能会破坏 embedding(如果你在训练信息文本框中看到 Loss: nan 就会发生这种情况。如果发生这种情况,你需要从较旧的未损坏的备份手动恢复 embedding)\n\n你可以使用以下语法设置单个数值或多个学习率:\n\n 率1:步限1, 率2:步限2, ...\n\n如: 0.005:100, 1e-3:1000, 1e-5\n\n即前 100 步将以 0.005 的速率训练,接着直到 1000 步为止以 1e-3 训练,然后剩余所有步以 1e-5 训练",
+ "Move VAE and CLIP to RAM when training hypernetwork. Saves VRAM.": "训练时将 VAE 和 CLIP 从显存(VRAM)移放到内存(RAM),节省显存(VRAM)",
+ "How many times to improve the generated image iteratively; higher values take longer; very low values can produce bad results": "迭代改进生成的图像多少次;更高的值需要更长的时间;非常低的值会产生不好的结果",
+ "Draw a mask over an image, and the script will regenerate the masked area with content according to prompt": "在图像上画一个蒙版,脚本会根据提示重新生成蒙版区域的内容",
+ "Upscale image normally, split result into tiles, improve each tile using img2img, merge whole image back": "正常放大图像,将结果分割成图块(tiles),用图生图改进每个图块(tiles),最后将整个图像合并回来",
+ "Create a grid where images will have different parameters. Use inputs below to specify which parameters will be shared by columns and rows": "创建一个网格,图像将有不同的参数。使用下面的输入来指定哪些参数将由列和行共享",
+ "Run Python code. Advanced user only. Must run program with --allow-code for this to work": "运行 Python 代码。仅限老手使用。必须以 --allow-code 来开启程序,才能使其运行",
+ "Separate a list of words with commas, and the first word will be used as a keyword: script will search for this word in the prompt, and replace it with others": "以逗号分割的单词列表,第一个单词将被用作关键词:脚本将在提示词中搜索这个单词,并用其他单词替换它",
+ "Separate a list of words with commas, and the script will make a variation of prompt with those words for their every possible order": "以逗号分割的单词列表,脚本会排列出这些单词的所有排列方式,并加入提示词各生成一次",
+ "Reconstruct prompt from existing image and put it into the prompt field.": "从现有的图像中重构出提示词,并将其放入提示词的输入文本框",
+ "Set the maximum number of words to be used in the [prompt_words] option; ATTENTION: If the words are too long, they may exceed the maximum length of the file path that the system can handle": "设置在[prompt_words]选项中要使用的最大字数;注意:如果字数太长,可能会超过系统可处理的文件路径的最大长度",
+ "Process an image, use it as an input, repeat.": "处理一张图像,将其作为输入,并重复",
+ "Insert selected styles into prompt fields": "在提示词中插入选定的模版风格",
+ "Save current prompts as a style. If you add the token {prompt} to the text, the style use that as placeholder for your prompt when you use the style in the future.": "将当前的提示词保存为模版风格。如果你在文本中添加{prompt}标记,那么将来你使用该模版风格时,你现有的提示词会替换模版风格中的{prompt}",
+ "Loads weights from checkpoint before making images. You can either use hash or a part of filename (as seen in settings) for checkpoint name. Recommended to use with Y axis for less switching.": "在生成图像之前从模型(ckpt)中加载权重。你可以使用哈希值或文件名的一部分(如设置中所示)作为模型(ckpt)名称。建议用在Y轴上以减少过程中模型的切换",
+ "Torch active: Peak amount of VRAM used by Torch during generation, excluding cached data.\nTorch reserved: Peak amount of VRAM allocated by Torch, including all active and cached data.\nSys VRAM: Peak amount of VRAM allocation across all applications / total GPU VRAM (peak utilization%).": "Torch active: 在生成过程中,Torch使用的显存(VRAM)峰值,不包括缓存的数据。\nTorch reserved: Torch 分配的显存(VRAM)的峰值量,包括所有活动和缓存数据。\nSys VRAM: 所有应用程序分配的显存(VRAM)的峰值量 / GPU 的总显存(VRAM)(峰值利用率%)",
+ "Uscale the image in latent space. Alternative is to produce the full image from latent representation, upscale that, and then move it back to latent space.": "放大潜空间中的图像。而另一种方法是,从潜变量表达中直接解码并生成完整的图像,接着放大它,然后再将其编码回潜空间",
+ "Start drawing": "开始绘制",
+ "Description": "描述",
+ "Action": "行动",
+ "Aesthetic Gradients": "美术风格梯度",
+ "aesthetic-gradients": "美术风格梯度",
+ "Wildcards": "通配符",
+ "Dynamic Prompts": "动态提示词",
+ "Image browser": "图库浏览器",
+ "images-browser": "图库浏览器",
+ "Inspiration": "灵感",
+ "Deforum": "Deforum",
+ "Artists to study": "艺术家图库",
+ "Aesthetic Image Scorer": "美术风格评分",
+ "Dataset Tag Editor": "数据集标签编辑器",
+
+
+ "----not work----": "----以下内容无法被翻译,Bug----",
+ "Add a random artist to the prompt.": "随机添加一个艺术家到提示词中",
+ "Read generation parameters from prompt or last generation if prompt is empty into user interface.": "从提示词中读取生成参数,如果提示词为空,则读取上一次的生成参数到用户界面",
+ "Save style": "储存为模版风格",
+ "Apply selected styles to current prompt": "将所选模板风格,应用于当前提示词",
+ "Set seed to -1, which will cause a new random number to be used every time": "将随机种子设置为-1,则每次都会使用一个新的随机数",
+ "Reuse seed from last generation, mostly useful if it was randomed": "重用上一次使用的随机种子,如果想要固定结果就会很有用",
+ "Open images output directory": "打开图像输出目录",
+ "Upscaler 1": "放大算法 1",
+ "Upscaler 2": "放大算法 2",
+ "Separate prompts into parts using vertical pipe character (|) and the script will create a picture for every combination of them (except for the first part, which will be present in all combinations)": "用竖线分隔符(|)将提示词分成若干部分,脚本将为它们的每一个组合创建一幅图片(除了被分割的第一部分,所有的组合都会包含这部分)",
+ "Select which Real-ESRGAN models to show in the web UI. (Requires restart)": "选择哪些 Real-ESRGAN 模型显示在网页用户界面。(需要重新启动)",
"Allowed categories for random artists selection when using the Roll button": "使用抽选艺术家按钮时将会随机的艺术家类别",
- "Roll three": "抽三位出來",
- "Generate forever": "不停地生成",
- "Cancel generate forever": "取消不停地生成"
+ "Face restoration model": "面部修复模型",
+ "Install": "安装",
+ "Installing...": "安装中...",
+ "Installed": "已安装",
+
+ "Style to apply; styles have components for both positive and negative prompts and apply to both": "要使用的模版风格; 模版风格包含正向和反向提示词,并应用于两者\n\ud83c\udfa8 随机添加一个艺术家到提示词中\n \u2199\ufe0f 从提示词中读取生成参数,如果提示词为空,则读取上一次的生成参数到用户界面\n\ud83d\udcbe 将当前的提示词保存为模版风格(保存在styles.csv)\n\ud83d\udccb 将所选模板风格,应用于当前提示词\n如果你在文本中添加{prompt}标记,并保存为模版风格\n那么将来你使用该模版风格时,你现有的提示词会替换模版风格中的{prompt}",
+ "A value that determines the output of random number generator - if you create an image with same parameters and seed as another image, you'll get the same result": "一个固定随机数生成器输出的值 - 以相同参数和随机种子生成的图像会得到相同的结果\n\ud83c\udfb2 将随机种子设置为-1,则每次都会使用一个新的随机数\n\u267b\ufe0f 重用上一次使用的随机种子,如果想要固定输出结果就会很有用",
+
+
+ "----deprecated----": "----以下内容在webui新版本已移除----",
+ "▼": "▼",
+ "History": "历史记录",
+ "Show Textbox": "显示文本框",
+ "File with inputs": "含输入内容的文件",
+ "Prompts": "提示词",
+ "Disabled when launched with --hide-ui-dir-config.": "启动 --hide-ui-dir-config 时禁用",
+ "Open output directory": "打开输出目录",
+ "Create aesthetic images embedding": "生成美术风格图集 embedding",
+ "Split oversized images into two": "将过大的图像分为两份",
+ "Train an embedding; must specify a directory with a set of 1:1 ratio images": "训练 embedding; 必须指定一组具有 1:1 比例图像的目录",
+ "Learning rate": "学习率",
+ "txt2img history": "文生图历史记录",
+ "img2img history": "图生图历史记录",
+ "extras history": "更多选项卡的历史记录",
+ "extras": "更多",
+ "custom fold": "自定义文件夹",
+ "Load": "载入",
+ "Prev batch": "上一批",
+ "Next batch": "下一批",
+ "number of images to delete consecutively next": "接下来要连续删除的图像数",
+ "Date to": "日期至",
+ "Refresh page": "刷新页面",
+ "Unload VAE and CLIP from VRAM when training": "训练时从显存(VRAM)中取消 VAE 和 CLIP 的加载",
+ "Number of pictures displayed on each page": "每页显示的图像数量",
+ "Number of grids in each row": "每行显示多少格",
+ "favorites": "收藏夹(已保存)",
+ "others": "其他",
+ "Collect": "收藏(保存)",
+
+
+ "--------": "--------"
}
diff --git a/localizations/zh_TW.json b/localizations/zh_TW.json
index 724df1ac..4e6dac44 100644
--- a/localizations/zh_TW.json
+++ b/localizations/zh_TW.json
@@ -12,14 +12,15 @@
"Stable Diffusion checkpoint": "Stable Diffusion 模型權重存檔點",
"txt2img": "文生圖",
"img2img": "圖生圖",
- "Extras": "後處理",
- "PNG Info": "PNG 資訊",
- "Checkpoint Merger": "模型權重存檔點合併工具",
+ "Extras": "更多",
+ "PNG Info": "圖片資訊",
+ "Checkpoint Merger": "模型權重存檔點合併",
"Train": "訓練",
- "Create aesthetic embedding": "生成美術風格 embedding",
+ "Create aesthetic embedding": "生成美術風格",
"Image Browser": "圖庫瀏覽器",
"History": "歷史記錄",
"Settings": "設定",
+ "Extensions": "擴充",
"Prompt": "提示詞",
"Negative prompt": "反向提示詞",
"Run": "執行",
@@ -28,7 +29,7 @@
"Generate": "生成",
"Style 1": "模版風格 1",
"Style 2": "模版風格 2",
- "Label": "標籤",
+ "Label": "標記",
"File": "檔案",
"Drop File Here": "拖曳檔案到此",
"-": "-",
@@ -59,12 +60,12 @@
"Highres. fix": "高解析度修復",
"Firstpass width": "第一遍的寬度",
"Firstpass height": "第一遍的高度",
- "Denoising strength": "去噪強度",
- "Batch count": "批次",
- "Batch size": "批量",
+ "Denoising strength": "重繪幅度",
+ "Batch count": "生成批次",
+ "Batch size": "每批數量",
"CFG Scale": "提示詞相關性(CFG)",
"Seed": "隨機種子",
- "Extra": "額外參數",
+ "Extra": "▼",
"Variation seed": "差異隨機種子",
"Variation strength": "差異強度",
"Resize seed from width": "自寬度縮放隨機種子",
@@ -81,7 +82,7 @@
"Slerp angle": "Slerp 角度",
"Is negative text": "是反向提示詞",
"Script": "指令碼",
- "Embedding to Shareable PNG": "將 Embedding 轉換為可分享的 PNG",
+ "Embedding to Shareable PNG": "將 Embedding 轉換為可分享的 PNG 圖片檔案",
"Prompt matrix": "提示詞矩陣",
"Prompts from file or textbox": "從文字方塊或檔案載入提示詞",
"X/Y plot": "X/Y 圖表",
@@ -91,6 +92,10 @@
"Show Textbox": "顯示文字方塊",
"File with inputs": "含輸入內容的檔案",
"Prompts": "提示詞",
+ "Iterate seed every line": "每行輸入都換一個種子",
+ "Use same random seed for all lines": "每行輸入都使用同一個隨機種子",
+ "List of prompt inputs": "提示詞輸入列表",
+ "Upload prompt inputs": "上傳提示詞輸入檔案",
"X type": "X軸類型",
"Nothing": "無",
"Var. seed": "差異隨機種子",
@@ -100,8 +105,8 @@
"Prompt order": "提示詞順序",
"Sampler": "採樣器",
"Checkpoint name": "模型權重存檔點的名稱",
- "Hypernetwork": "Hypernetwork",
- "Hypernet str.": "Hypernetwork 強度",
+ "Hypernetwork": "超網路",
+ "Hypernet str.": "超網路強度",
"Sigma Churn": "Sigma Churn",
"Sigma min": "最小 Sigma",
"Sigma max": "最大 Sigma",
@@ -109,6 +114,7 @@
"Eta": "Eta",
"Clip skip": "Clip 跳過",
"Denoising": "去噪",
+ "Cond. Image Mask Weight": "圖像調節屏蔽度",
"X values": "X軸數值",
"Y type": "Y軸類型",
"Y values": "Y軸數值",
@@ -118,44 +124,44 @@
"Drop Image Here": "拖曳圖像到此",
"Save": "儲存",
"Send to img2img": ">> 圖生圖",
- "Send to inpaint": ">> 內補繪製",
- "Send to extras": ">> 後處理",
+ "Send to inpaint": ">> 局部重繪",
+ "Send to extras": ">> 更多",
"Make Zip when Save?": "儲存時生成ZIP壓縮檔案?",
"Textbox": "文字方塊",
"Interrogate\nCLIP": "CLIP\n反推提示詞",
"Interrogate\nDeepBooru": "DeepBooru\n反推提示詞",
- "Inpaint": "內補繪製",
+ "Inpaint": "局部重繪",
"Batch img2img": "批量圖生圖",
"Image for img2img": "圖生圖的圖像",
- "Image for inpainting with mask": "用於內補繪製蒙版內容的圖像",
+ "Image for inpainting with mask": "用於局部重繪並手動畫蒙版的圖像",
"Mask": "蒙版",
"Mask blur": "蒙版模糊",
"Mask mode": "蒙版模式",
"Draw mask": "繪製蒙版",
"Upload mask": "上傳蒙版",
"Masking mode": "蒙版模式",
- "Inpaint masked": "內補繪製蒙版內容",
- "Inpaint not masked": "內補繪製非蒙版內容",
+ "Inpaint masked": "重繪蒙版內容",
+ "Inpaint not masked": "重繪非蒙版內容",
"Masked content": "蒙版蒙住的內容",
"fill": "填充",
"original": "原圖",
"latent noise": "潛空間噪聲",
"latent nothing": "潛空間數值零",
- "Inpaint at full resolution": "以完整解析度進行內補繪製",
- "Inpaint at full resolution padding, pixels": "以完整解析度進行內補繪製 — 填補畫素",
- "Process images in a directory on the same machine where the server is running.": "在伺服器主機上的目錄中處理圖像",
- "Use an empty output directory to save pictures normally instead of writing to the output directory.": "指定一個空的資料夾為輸出目錄而非預設的 output 資料夾為輸出目錄",
+ "Inpaint at full resolution": "全解析度局部重繪",
+ "Inpaint at full resolution padding, pixels": "預留畫素",
+ "Process images in a directory on the same machine where the server is running.": "使用伺服器主機上的一個目錄,作為輸入目錄處理圖像",
+ "Use an empty output directory to save pictures normally instead of writing to the output directory.": "使用一個空的資料夾作為輸出目錄,而不是使用預設的 output 資料夾作為輸出目錄",
"Disabled when launched with --hide-ui-dir-config.": "啟動 --hide-ui-dir-config 時禁用",
"Input directory": "輸入目錄",
"Output directory": "輸出目錄",
"Resize mode": "縮放模式",
- "Just resize": "只縮放",
- "Crop and resize": "縮放並剪裁",
- "Resize and fill": "縮放並填充",
+ "Just resize": "拉伸",
+ "Crop and resize": "裁剪",
+ "Resize and fill": "填充",
"img2img alternative test": "圖生圖的另一種測試",
"Loopback": "回送",
- "Outpainting mk2": "外補繪製第二版",
- "Poor man's outpainting": "效果稍差的外補繪製",
+ "Outpainting mk2": "向外繪製第二版",
+ "Poor man's outpainting": "效果稍差的向外繪製",
"SD upscale": "使用 SD 放大",
"should be 2 or lower.": "必須小於等於2",
"Override `Sampling method` to Euler?(this method is built for it)": "覆寫「採樣方法」為 Euler?(這個方法就是為這樣做設計的)",
@@ -164,15 +170,15 @@
"Original negative prompt": "初始反向提示詞",
"Override `Sampling Steps` to the same value as `Decode steps`?": "覆寫「採樣疊代步數」為「解碼疊代步數」?",
"Decode steps": "解碼疊代步數",
- "Override `Denoising strength` to 1?": "覆寫「去噪強度」為1?",
+ "Override `Denoising strength` to 1?": "覆寫「重繪幅度」為1?",
"Decode CFG scale": "解碼提示詞相關性(CFG)",
"Randomness": "隨機度",
"Sigma adjustment for finding noise for image": "為尋找圖中噪點的 Sigma 調整",
"Loops": "疊代次數",
- "Denoising strength change factor": "去噪強度的調整係數",
- "Recommended settings: Sampling Steps: 80-100, Sampler: Euler a, Denoising strength: 0.8": "推薦設定:採樣疊代步數:80-100,採樣器:Euler a,去噪強度:0.8",
+ "Denoising strength change factor": "重繪幅度的調整係數",
+ "Recommended settings: Sampling Steps: 80-100, Sampler: Euler a, Denoising strength: 0.8": "推薦設定:採樣疊代步數:80-100,採樣器:Euler a,重繪幅度:0.8",
"Pixels to expand": "拓展的畫素數",
- "Outpainting direction": "外補繪製的方向",
+ "Outpainting direction": "向外繪製的方向",
"left": "左",
"right": "右",
"up": "上",
@@ -205,6 +211,7 @@
"CodeFormer visibility": "CodeFormer 可見度",
"CodeFormer weight (0 = maximum effect, 1 = minimum effect)": "CodeFormer 權重 (0 = 最大效果, 1 = 最小效果)",
"Open output directory": "打開輸出目錄",
+ "Upscale Before Restoring Faces": "放大後再進行面部修復",
"Send to txt2img": ">> 文生圖",
"A merger of the two checkpoints will be generated in your": "合併後的模型權重存檔點會生成在你的",
"checkpoint": "模型權重存檔點",
@@ -219,7 +226,7 @@
"Add difference": "加入差分",
"Save as float16": "以 float16 儲存",
"See": "檢視",
- "wiki": "wiki",
+ "wiki": "wiki文件",
"for detailed explanation.": "以了解詳細說明",
"Create embedding": "生成 embedding",
"Create aesthetic images embedding": "生成美術風格圖集 embedding",
@@ -237,6 +244,36 @@
"leakyrelu": "leakyrelu",
"elu": "elu",
"swish": "swish",
+ "tanh": "tanh",
+ "sigmoid": "sigmoid",
+ "celu": "celu",
+ "gelu": "gelu",
+ "glu": "glu",
+ "hardshrink": "hardshrink",
+ "hardsigmoid": "hardsigmoid",
+ "hardtanh": "hardtanh",
+ "logsigmoid": "logsigmoid",
+ "logsoftmax": "logsoftmax",
+ "mish": "mish",
+ "prelu": "prelu",
+ "rrelu": "rrelu",
+ "relu6": "relu6",
+ "selu": "selu",
+ "silu": "silu",
+ "softmax": "softmax",
+ "softmax2d": "softmax2d",
+ "softmin": "softmin",
+ "softplus": "softplus",
+ "softshrink": "softshrink",
+ "softsign": "softsign",
+ "tanhshrink": "tanhshrink",
+ "threshold": "閾值",
+ "Select Layer weights initialization. relu-like - Kaiming, sigmoid-like - Xavier is recommended": "挑選初始化層權重的方案. 類relu - Kaiming, 類sigmoid - Xavier 都是比較推薦的選項",
+ "Normal": "正態",
+ "KaimingUniform": "Kaiming 均勻",
+ "KaimingNormal": "Kaiming 正態",
+ "XavierUniform": "Xavier 均勻",
+ "XavierNormal": "Xavier 正態",
"Add layer normalization": "加入層標準化",
"Use dropout": "採用 dropout 防止過擬合",
"Overwrite Old Hypernetwork": "覆寫舊的 Hypernetwork",
@@ -250,10 +287,15 @@
"Create flipped copies": "生成鏡像副本",
"Split oversized images into two": "將過大的圖像分為兩份",
"Split oversized images": "分割過大的圖像",
+ "Auto focal point crop": "自動焦點裁切",
"Use BLIP for caption": "使用 BLIP 生成說明文字(自然語言描述)",
- "Use deepbooru for caption": "使用 deepbooru 生成說明文字(標籤)",
+ "Use deepbooru for caption": "使用 deepbooru 生成說明文字(標記)",
"Split image threshold": "圖像分割閾值",
"Split image overlap ratio": "分割圖像重疊的比率",
+ "Focal point face weight": "焦點面部權重",
+ "Focal point entropy weight": "焦點熵權重",
+ "Focal point edges weight": "焦點線條權重",
+ "Create debug image": "生成除錯圖片",
"Preprocess": "預處理",
"Train an embedding; must specify a directory with a set of 1:1 ratio images": "訓練 embedding; 必須指定一組具有 1:1 比例圖像的目錄",
"Train an embedding or Hypernetwork; you must specify a directory with a set of 1:1 ratio images": "訓練 embedding 或者 hypernetwork; 必須指定一組具有 1:1 比例圖像的目錄",
@@ -268,8 +310,8 @@
"Max steps": "最大疊代步數",
"Save an image to log directory every N steps, 0 to disable": "每 N 步儲存一個圖像到日誌目錄,0 表示禁用",
"Save a copy of embedding to log directory every N steps, 0 to disable": "每 N 步將 embedding 的副本儲存到日誌目錄,0 表示禁用",
- "Save images with embedding in PNG chunks": "儲存圖像並在 PNG 檔案中嵌入 embedding 檔案",
- "Read parameters (prompt, etc...) from txt2img tab when making previews": "進行預覽時從文生圖頁籤中讀取參數(提示詞等)",
+ "Save images with embedding in PNG chunks": "儲存圖像,並在 PNG 圖片檔案中嵌入 embedding 檔案",
+ "Read parameters (prompt, etc...) from txt2img tab when making previews": "進行預覽時,從文生圖頁籤中讀取參數(提示詞等)",
"Train Hypernetwork": "訓練 Hypernetwork",
"Train Embedding": "訓練 Embedding",
"Create an aesthetic embedding out of any number of images": "從任意數量的圖像中建立美術風格 embedding",
@@ -277,60 +319,74 @@
"txt2img history": "文生圖歷史記錄",
"img2img history": "圖生圖歷史記錄",
"extras history": "後處理歷史記錄",
- "Renew Page": "刷新頁面",
"extras": "後處理",
"favorites": "收藏夾",
+ "Favorites": "收藏夾",
+ "Others": "其他",
"custom fold": "自訂資料夾",
"Load": "載入",
"Images directory": "圖像目錄",
"Prev batch": "上一批",
"Next batch": "下一批",
+ "Dropdown": "下拉式清單",
"First Page": "首頁",
"Prev Page": "上一頁",
"Page Index": "頁數",
"Next Page": "下一頁",
"End Page": "尾頁",
"number of images to delete consecutively next": "接下來要連續刪除的圖像數",
+ "delete next": "刪除下一張",
"Delete": "刪除",
+ "sort by": "排序方式",
+ "path name": "路徑名",
+ "date": "日期",
+ "keyword": "搜尋",
"Generate Info": "生成資訊",
"File Name": "檔案名",
"Collect": "收藏",
"Refresh page": "刷新頁面",
"Date to": "日期至",
+ "Move to favorites": "移動到收藏夾",
+ "Renew Page": "刷新頁面",
"Number": "數量",
"set_index": "設定索引",
+ "load_switch": "載入開關",
+ "turn_page_switch": "翻頁開關",
"Checkbox": "核取方塊",
"Apply settings": "儲存設定",
- "Saving images/grids": "儲存圖像/概覽圖",
+ "Saving images/grids": "儲存圖像/宮格圖",
"Always save all generated images": "始終儲存所有生成的圖像",
"File format for images": "圖像的檔案格式",
"Images filename pattern": "圖像檔案名格式",
- "Always save all generated image grids": "始終儲存所有生成的概覽圖",
- "File format for grids": "概覽圖的檔案格式",
- "Add extended info (seed, prompt) to filename when saving grid": "儲存概覽時將擴展資訊(隨機種子,提示詞)加入到檔案名",
- "Do not save grids consisting of one picture": "只有一張圖片時不要儲存概覽圖",
- "Prevent empty spots in grid (when set to autodetect)": "(在自動檢測時)防止概覽圖中出現空位",
- "Grid row count; use -1 for autodetect and 0 for it to be same as batch size": "概覽行數; 使用 -1 進行自動檢測,使用 0 使其與批量大小相同",
- "Save text information about generation parameters as chunks to png files": "將有關生成參數的文本資訊作為塊儲存到PNG檔案中",
- "Create a text file next to every image with generation parameters.": "儲存圖像時在每個圖像旁邊建立一個文本檔案儲存生成參數",
+ "Add number to filename when saving": "儲存的時候在檔案名里加入數字",
+ "Always save all generated image grids": "始終儲存所有生成的宮格圖",
+ "File format for grids": "宮格圖的檔案格式",
+ "Add extended info (seed, prompt) to filename when saving grid": "儲存宮格圖時,將擴展資訊(隨機種子,提示詞)加入到檔案名",
+ "Do not save grids consisting of one picture": "只有一張圖片時不要儲存宮格圖",
+ "Prevent empty spots in grid (when set to autodetect)": "(啟用自動偵測時)防止宮格圖中出現空位",
+ "Grid row count; use -1 for autodetect and 0 for it to be same as batch size": "宮格圖行數; 使用 -1 進行自動檢測,使用 0 使其與每批數量相同",
+ "Save text information about generation parameters as chunks to png files": "將有關生成參數的文本資訊,作為塊儲存到PNG圖片檔案中",
+ "Create a text file next to every image with generation parameters.": "儲存圖像時,在每個圖像旁邊建立一個文本檔案儲存生成參數",
"Save a copy of image before doing face restoration.": "在進行面部修復之前儲存圖像副本",
+ "Save a copy of image before applying highres fix.": "在做高解析度修復之前儲存初始圖像副本",
+ "Save a copy of image before applying color correction to img2img results": "在對圖生圖結果套用顏色校正之前儲存圖像副本",
"Quality for saved jpeg images": "儲存的JPEG圖像的品質",
- "If PNG image is larger than 4MB or any dimension is larger than 4000, downscale and save copy as JPG": "如果 PNG 圖像大於 4MB 或寬高大於 4000,則縮小並儲存副本為 JPG",
- "Use original name for output filename during batch process in extras tab": "在後處理頁籤中的批量處理過程中使用原始名稱作為輸出檔案名",
+ "If PNG image is larger than 4MB or any dimension is larger than 4000, downscale and save copy as JPG": "如果 PNG 圖像大於 4MB 或寬高大於 4000,則縮小並儲存副本為 JPG 圖片",
+ "Use original name for output filename during batch process in extras tab": "在更多頁籤中的批量處理過程中,使用原始名稱作為輸出檔案名",
"When using 'Save' button, only save a single selected image": "使用「儲存」按鈕時,只儲存一個選定的圖像",
"Do not add watermark to images": "不要給圖像加浮水印",
"Paths for saving": "儲存路徑",
"Output directory for images; if empty, defaults to three directories below": "圖像的輸出目錄; 如果為空,則預設為以下三個目錄",
"Output directory for txt2img images": "文生圖的輸出目錄",
"Output directory for img2img images": "圖生圖的輸出目錄",
- "Output directory for images from extras tab": "後處理的輸出目錄",
- "Output directory for grids; if empty, defaults to two directories below": "概覽圖的輸出目錄; 如果為空,則預設為以下兩個目錄",
- "Output directory for txt2img grids": "文生圖概覽的輸出目錄",
- "Output directory for img2img grids": "圖生圖概覽的輸出目錄",
+ "Output directory for images from extras tab": "更多頁籤的輸出目錄",
+ "Output directory for grids; if empty, defaults to two directories below": "宮格圖的輸出目錄; 如果為空,則預設為以下兩個目錄",
+ "Output directory for txt2img grids": "文生圖宮格的輸出目錄",
+ "Output directory for img2img grids": "圖生圖宮格的輸出目錄",
"Directory for saving images using the Save button": "使用「儲存」按鈕儲存圖像的目錄",
"Saving to a directory": "儲存到目錄",
"Save images to a subdirectory": "將圖像儲存到子目錄",
- "Save grids to a subdirectory": "將概覽圖儲存到子目錄",
+ "Save grids to a subdirectory": "將宮格圖儲存到子目錄",
"When using \"Save\" button, save images to a subdirectory": "使用「儲存」按鈕時,將圖像儲存到子目錄",
"Directory name pattern": "目錄名稱格式",
"Max prompt words for [prompt_words] pattern": "[prompt_words] 格式的最大提示詞數量",
@@ -341,10 +397,10 @@
"Tile overlap, in pixels for SwinIR. Low values = visible seam.": "SwinIR 的圖塊重疊畫素。低值 = 可見接縫",
"LDSR processing steps. Lower = faster": "LDSR 處理疊代步數。更低 = 更快",
"Upscaler for img2img": "圖生圖的放大演算法",
- "Upscale latent space image when doing hires. fix": "做高解析度修復時也放大潛空間圖像",
+ "Upscale latent space image when doing hires. fix": "做高解析度修復時,也放大潛空間圖像",
"Face restoration": "面部修復",
"CodeFormer weight parameter; 0 = maximum effect; 1 = minimum effect": "CodeFormer 權重參數; 0 = 最大效果; 1 = 最小效果",
- "Move face restoration model from VRAM into RAM after processing": "面部修復處理完成後將面部修復模型從顯存(VRAM)移至內存(RAM)",
+ "Move face restoration model from VRAM into RAM after processing": "面部修復處理完成後,將面部修復模型從顯存(VRAM)移至內存(RAM)",
"System": "系統",
"VRAM usage polls per second during generation. Set to 0 to disable.": "生成圖像時每秒輪詢顯存(VRAM)使用情況的次數。設定為 0 以禁用",
"Always print all generation info to standard output": "始終將所有生成資訊輸出到 standard output (一般為控制台)",
@@ -352,50 +408,55 @@
"Training": "訓練",
"Unload VAE and CLIP from VRAM when training": "訓練時從顯存(VRAM)中取消 VAE 和 CLIP 的載入",
"Move VAE and CLIP to RAM when training hypernetwork. Saves VRAM.": "訓練時將 VAE 和 CLIP 從顯存(VRAM)移放到內存(RAM),節省顯存(VRAM)",
+ "Move VAE and CLIP to RAM when training if possible. Saves VRAM.": "訓練時將 VAE 和 CLIP 從顯存(VRAM)移放到內存(RAM)如果可行的話,節省顯存(VRAM)",
"Filename word regex": "檔案名用詞的正則表達式",
"Filename join string": "檔案名連接用字串",
"Number of repeats for a single input image per epoch; used only for displaying epoch number": "每個 epoch 中單個輸入圖像的重複次數; 僅用於顯示 epoch 數",
- "Save an csv containing the loss to log directory every N steps, 0 to disable": "每 N 步儲存一個包含 loss 的CSV到日誌目錄,0 表示禁用",
+ "Save an csv containing the loss to log directory every N steps, 0 to disable": "每 N 步儲存一個包含 loss 的CSV表格到日誌目錄,0 表示禁用",
+ "Use cross attention optimizations while training": "訓練時開啟 cross attention 最佳化",
"Stable Diffusion": "Stable Diffusion",
"Checkpoints to cache in RAM": "快取在內存(RAM)中的模型權重存檔點",
+ "SD VAE": "模型的VAE",
+ "auto": "自動",
"Hypernetwork strength": "Hypernetwork 強度",
+ "Inpainting conditioning mask strength": "局部重繪時圖像調節的蒙版屏蔽強度",
"Apply color correction to img2img results to match original colors.": "對圖生圖結果套用顏色校正以匹配原始顏色",
- "Save a copy of image before applying color correction to img2img results": "在對圖生圖結果套用顏色校正之前儲存圖像副本",
- "With img2img, do exactly the amount of steps the slider specifies (normally you'd do less with less denoising).": "在進行圖生圖的時候,確切地執行滑塊指定的疊代步數(正常情況下更弱的去噪需要更少的疊代步數)",
+ "With img2img, do exactly the amount of steps the slider specifies (normally you'd do less with less denoising).": "在進行圖生圖的時候,確切地執行滑塊指定的疊代步數(正常情況下更弱的重繪幅度需要更少的疊代步數)",
"Enable quantization in K samplers for sharper and cleaner results. This may change existing seeds. Requires restart to apply.": "在 K 採樣器中啟用量化以獲得更清晰,更清晰的結果。這可能會改變現有的隨機種子。需要重新啟動才能套用",
"Emphasis: use (text) to make model pay more attention to text and [text] to make it pay less attention": "強調符:使用 (文字) 使模型更關注該文本,使用 [文字] 使其減少關注",
"Use old emphasis implementation. Can be useful to reproduce old seeds.": "使用舊的強調符實作。可用於復現舊隨機種子",
- "Make K-diffusion samplers produce same images in a batch as when making a single image": "使 K-diffusion 採樣器批量生成與生成單個圖像時產出相同的圖像",
+ "Make K-diffusion samplers produce same images in a batch as when making a single image": "使 K-diffusion 採樣器批量生成與生成單個圖像時,產出相同的圖像",
"Increase coherency by padding from the last comma within n tokens when using more than 75 tokens": "當使用超過 75 個 token 時,通過從 n 個 token 中的最後一個逗號填補來提高一致性",
"Filter NSFW content": "過濾成人內容",
"Stop At last layers of CLIP model": "在 CLIP 模型的最後哪一層停下",
"Interrogate Options": "反推提示詞選項",
"Interrogate: keep models in VRAM": "反推: 將模型儲存在顯存(VRAM)中",
"Interrogate: use artists from artists.csv": "反推: 使用 artists.csv 中的藝術家",
- "Interrogate: include ranks of model tags matches in results (Has no effect on caption-based interrogators).": "反推: 在生成結果中包含與模型標籤相匹配的等級(對基於生成自然語言描述的反推沒有影響)",
+ "Interrogate: include ranks of model tags matches in results (Has no effect on caption-based interrogators).": "反推: 在生成結果中包含與模型標記相匹配的等級(對基於生成自然語言描述的反推沒有影響)",
"Interrogate: num_beams for BLIP": "反推: BLIP 的 num_beams",
- "Interrogate: minimum description length (excluding artists, etc..)": "反推: 最小描述長度(不包括藝術家, 等…)",
+ "Interrogate: minimum description length (excluding artists, etc..)": "反推: 最小描述長度(不包括藝術家,等…)",
"Interrogate: maximum description length": "反推: 最大描述長度",
"CLIP: maximum number of lines in text file (0 = No limit)": "CLIP: 文本檔案中的最大行數(0 = 無限制)",
"Interrogate: deepbooru score threshold": "反推: deepbooru 分數閾值",
"Interrogate: deepbooru sort alphabetically": "反推: deepbooru 按字母順序排序",
- "use spaces for tags in deepbooru": "在 deepbooru 中為標籤使用空格",
+ "use spaces for tags in deepbooru": "在 deepbooru 中為標記使用空格",
"escape (\\) brackets in deepbooru (so they are used as literal brackets and not for emphasis)": "在 deepbooru 中使用轉義 (\\) 括號(因此它們用作文字括號而不是強調符號)",
"User interface": "使用者介面",
"Show progressbar": "顯示進度列",
"Show image creation progress every N sampling steps. Set 0 to disable.": "每 N 個採樣疊代步數顯示圖像生成進度。設定 0 禁用",
- "Show previews of all images generated in a batch as a grid": "以網格的形式預覽所有批量生成出來的圖像",
- "Show grid in results for web": "在網頁的結果中顯示概覽圖",
+ "Show previews of all images generated in a batch as a grid": "以網格的形式,預覽批量生成的所有圖像",
+ "Show grid in results for web": "在網頁的結果中顯示宮格圖",
"Do not show any images in results for web": "不在網頁的結果中顯示任何圖像",
"Add model hash to generation information": "將模型的雜湊值加入到生成資訊",
"Add model name to generation information": "將模型名稱加入到生成資訊",
- "When reading generation parameters from text into UI (from PNG info or pasted text), do not change the selected model/checkpoint.": "當從文本讀取生成參數到 UI(從 PNG 資訊或粘貼文本)時,不要更改選定的模型權重存檔點",
- "Font for image grids that have text": "有文字的概覽圖使用的字體",
+ "When reading generation parameters from text into UI (from PNG info or pasted text), do not change the selected model/checkpoint.": "從文本讀取生成參數到使用者介面(從 PNG 圖片資訊或粘貼文本)時,不要更改選定的模型權重存檔點",
+ "Send seed when sending prompt or image to other interface": "將提示詞或者圖片發送到 >> 其他界面時,把隨機種子也傳送過去",
+ "Font for image grids that have text": "有文字的宮格圖使用的字體",
"Enable full page image viewer": "啟用整頁圖像檢視器",
- "Show images zoomed in by default in full page image viewer": "在整頁圖像檢視器中預設放大顯示圖像",
+ "Show images zoomed in by default in full page image viewer": "在整頁圖像檢視器中,預設放大顯示圖像",
"Show generation progress in window title.": "在視窗標題中顯示生成進度",
"Quicksettings list": "快速設定列表",
- "Localization (requires restart)": "本地化(需要重新啟動)",
+ "Localization (requires restart)": "本地化翻譯(需要儲存設定,並重啟Gradio)",
"Sampler parameters": "採樣器參數",
"Hide samplers in user interface (requires restart)": "在使用者介面中隱藏採樣器(需要重新啟動)",
"eta (noise multiplier) for DDIM": "DDIM 的 eta (噪聲乘數)",
@@ -406,9 +467,9 @@
"sigma churn": "sigma churn",
"sigma tmin": "最小(tmin) sigma",
"sigma noise": "sigma 噪聲",
- "Eta noise seed delta": "Eta 噪聲種子偏移(noise seed delta)",
+ "Eta noise seed delta": "Eta 噪聲種子偏移(ENSD)",
"Images Browser": "圖庫瀏覽器",
- "Preload images at startup": "在啟動時預載圖像",
+ "Preload images at startup": "在啟動時預加載圖像",
"Number of columns on the page": "每頁列數",
"Number of rows on the page": "每頁行數",
"Number of pictures displayed on each page": "每頁顯示的圖像數量",
@@ -418,24 +479,36 @@
"Use same seed for all images": "為所有圖像使用同一個隨機種子",
"Request browser notifications": "請求瀏覽器通知",
"Download localization template": "下載本地化模板",
- "Reload custom script bodies (No ui updates, No restart)": "重新載入自訂指令碼主體(無UI更新,無重啟)",
+ "Reload custom script bodies (No ui updates, No restart)": "重新載入自訂指令碼主體(無使用者介面更新,無重啟)",
"Restart Gradio and Refresh components (Custom Scripts, ui.py, js and css only)": "重啟 Gradio 及刷新組件(僅限自訂指令碼,ui.py,JS 和 CSS)",
+ "Available": "可用",
+ "Install from URL": "從網址安裝",
+ "Apply and restart UI": "應用並重啟使用者介面",
+ "Check for updates": "檢查更新",
+ "Extension": "擴充",
+ "URL": "網址",
+ "Update": "更新",
+ "unknown": "未知",
+ "Load from:": "載入自",
+ "Extension index URL": "擴充清單連結",
+ "URL for extension's git repository": "擴充的 git 倉庫連結",
+ "Local directory name": "本地路徑名",
"Prompt (press Ctrl+Enter or Alt+Enter to generate)": "提示詞(按 Ctrl+Enter 或 Alt+Enter 生成)",
"Negative prompt (press Ctrl+Enter or Alt+Enter to generate)": "反向提示詞(按 Ctrl+Enter 或 Alt+Enter 生成)",
"Add a random artist to the prompt.": "隨機加入一個藝術家到提示詞中",
"Read generation parameters from prompt or last generation if prompt is empty into user interface.": "從提示詞中讀取生成參數,如果提示詞為空,則讀取上一次的生成參數到使用者介面",
"Save style": "存儲為模板風格",
"Apply selected styles to current prompt": "將所選樣式套用於當前提示",
- "Stop processing current image and continue processing.": "停止處理當前圖像並繼續處理下一個",
- "Stop processing images and return any results accumulated so far.": "停止處理圖像並返回迄今為止累積的任何結果",
+ "Stop processing current image and continue processing.": "停止處理當前圖像,並繼續處理下一個",
+ "Stop processing images and return any results accumulated so far.": "停止處理圖像,並返回迄今為止累積的任何結果",
"Style to apply; styles have components for both positive and negative prompts and apply to both": "要套用的模版風格; 模版風格包含正向和反向提示詞,並套用於兩者",
"Do not do anything special": "什麼都不做",
"Which algorithm to use to produce the image": "使用哪種演算法生成圖像",
"Euler Ancestral - very creative, each can get a completely different picture depending on step count, setting steps to higher than 30-40 does not help": "Euler Ancestral - 非常有創意,可以根據疊代步數獲得完全不同的圖像,將疊代步數設定為高於 30-40 不會有正面作用",
- "Denoising Diffusion Implicit Models - best at inpainting": "Denoising Diffusion Implicit models - 最擅長內補繪製",
+ "Denoising Diffusion Implicit Models - best at inpainting": "Denoising Diffusion Implicit models - 最擅長局部重繪",
"Produce an image that can be tiled.": "生成可用於平舖的圖像",
- "Use a two step process to partially create an image at smaller resolution, upscale, and then improve details in it without changing composition": "使用兩步處理的時候以較小的解析度生成初步圖像,接著放大圖像,然後在不更改構圖的情況下改進其中的細節",
- "Determines how little respect the algorithm should have for image's content. At 0, nothing will change, and at 1 you'll get an unrelated image. With values below 1.0, processing will take less steps than the Sampling Steps slider specifies.": "決定演算法對圖像內容的影響程度。設定 0 時,什麼都不會改變,而在 1 時,你將獲得不相關的圖像。值低於 1.0 時,處理的疊代步數將少於「採樣疊代步數」滑塊指定的步數",
+ "Use a two step process to partially create an image at smaller resolution, upscale, and then improve details in it without changing composition": "使用兩步處理的時候,以較小的解析度生成初步圖像,接著放大圖像,然後在不更改構圖的情況下改進其中的細節",
+ "Determines how little respect the algorithm should have for image's content. At 0, nothing will change, and at 1 you'll get an unrelated image. With values below 1.0, processing will take less steps than the Sampling Steps slider specifies.": "決定演算法對圖像內容的影響程度。設定 0 時,什麼都不會改變,而在 1 時,你將獲得不相關的圖像。\n值低於 1.0 時,處理的疊代步數將少於「採樣疊代步數」滑塊指定的步數",
"How many batches of images to create": "建立多少批次的圖像",
"How many image to create in a single batch": "每批建立多少圖像",
"Classifier Free Guidance Scale - how strongly the image should conform to prompt - lower values produce more creative results": "Classifier Free Guidance Scale - 圖像應在多大程度上服從提示詞 - 較低的值會產生更有創意的結果",
@@ -448,20 +521,20 @@
"This text is used to rotate the feature space of the imgs embs": "此文本用於旋轉圖集 embeddings 的特徵空間",
"Separate values for X axis using commas.": "使用逗號分隔 X 軸的值",
"Separate values for Y axis using commas.": "使用逗號分隔 Y 軸的值",
- "Write image to a directory (default - log/images) and generation parameters into csv file.": "將圖像寫入目錄(預設 — log/images)並將生成參數寫入CSV檔案",
+ "Write image to a directory (default - log/images) and generation parameters into csv file.": "將圖像寫入目錄(預設 — log/images)並將生成參數寫入CSV表格檔案",
"Open images output directory": "打開圖像輸出目錄",
"How much to blur the mask before processing, in pixels.": "處理前要對蒙版進行多強的模糊,以畫素為單位",
"What to put inside the masked area before processing it with Stable Diffusion.": "在使用 Stable Diffusion 處理蒙版區域之前要在蒙版區域內放置什麼",
- "fill it with colors of the image": "用圖像的顏色填充它",
- "keep whatever was there originally": "保留原來的内容",
+ "fill it with colors of the image": "用圖像的顏色(高強度模糊)填充它",
+ "keep whatever was there originally": "保留原來的圖像,不進行預處理",
"fill it with latent space noise": "用潛空間的噪聲填充它",
"fill it with latent space zeroes": "用潛空間的零填充它",
- "Upscale masked region to target resolution, do inpainting, downscale back and paste into original image": "將蒙版區域放大到目標解析度,做內補繪製,縮小後粘貼到原始圖像中",
+ "Upscale masked region to target resolution, do inpainting, downscale back and paste into original image": "將蒙版區域(包括預留畫素長度的緩衝區域)放大到目標解析度,進行局部重繪。\n然後縮小並粘貼回原始圖像中",
"Resize image to target resolution. Unless height and width match, you will get incorrect aspect ratio.": "將圖像大小調整為目標解析度。除非高度和寬度匹配,否則你將獲得不正確的縱橫比",
"Resize the image so that entirety of target resolution is filled with the image. Crop parts that stick out.": "調整圖像大小,使整個目標解析度都被圖像填充。裁剪多出來的部分",
"Resize the image so that entirety of image is inside target resolution. Fill empty space with image's colors.": "調整圖像大小,使整個圖像在目標解析度內。用圖像的顏色填充空白區域",
"How many times to repeat processing an image and using it as input for the next iteration": "重複處理圖像並用作下次疊代輸入的次數",
- "In loopback mode, on each loop the denoising strength is multiplied by this value. <1 means decreasing variety so your sequence will converge on a fixed picture. >1 means increasing variety so your sequence will become more and more chaotic.": "在回送模式下,在每個循環中,去噪強度都會乘以該值。<1 表示減少多樣性,因此你的這一組圖將集中在固定的圖像上。>1 意味著增加多樣性,因此你的這一組圖將變得越來越混亂",
+ "In loopback mode, on each loop the denoising strength is multiplied by this value. <1 means decreasing variety so your sequence will converge on a fixed picture. >1 means increasing variety so your sequence will become more and more chaotic.": "在回送模式下,在每個循環中,重繪幅度都會乘以該值。<1 表示減少多樣性,因此你的這一組圖將集中在固定的圖像上。>1 意味著增加多樣性,因此你的這一組圖將變得越來越混亂",
"For SD upscale, how much overlap in pixels should there be between tiles. Tiles overlap so that when they are merged back into one picture, there is no clearly visible seam.": "使用 SD 放大時,圖塊之間應該有多少畫素重疊。圖塊之間需要重疊才可以讓它們在合併回一張圖像時,沒有清晰可見的接縫",
"A directory on the same machine where the server is running.": "與伺服器主機上的目錄",
"Leave blank to save images to the default path.": "留空以將圖像儲存到預設路徑",
@@ -472,17 +545,54 @@
"Path to directory with input images": "帶有輸入圖像的路徑",
"Path to directory where to write outputs": "進行輸出的路徑",
"Input images directory": "輸入圖像目錄",
- "Use following tags to define how filenames for images are chosen: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [job_timestamp]; leave empty for default.": "使用以下標籤定義如何選擇圖像的檔案名: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [job_timestamp]; 預設請留空",
+ "Use following tags to define how filenames for images are chosen: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [datetime<Format>], [datetime<Format><Time Zone>], [job_timestamp]; leave empty for default.": "使用以下標記定義如何選擇圖像的檔案名: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [datetime<Format>], [datetime<Format><Time Zone>], [job_timestamp]; 預設請留空",
"If this option is enabled, watermark will not be added to created images. Warning: if you do not add watermark, you may be behaving in an unethical manner.": "如果啟用此選項,浮水印將不會加入到生成出來的圖像中。警告:如果你不加入浮水印,你的行為可能是不符合道德操守的",
- "Use following tags to define how subdirectories for images and grids are chosen: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [job_timestamp]; leave empty for default.": "使用以下標籤定義如何選擇圖像和概覽圖的子目錄: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [job_timestamp]; 預設請留空",
+ "Use following tags to define how subdirectories for images and grids are chosen: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [datetime<Format>], [datetime<Format><Time Zone>], [job_timestamp]; leave empty for default.": "使用以下標記定義如何選擇圖像和宮格圖的子目錄: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [datetime<Format>], [datetime<Format><Time Zone>], [job_timestamp]; 預設請留空",
"Restore low quality faces using GFPGAN neural network": "使用 GFPGAN 神經網路修復低品質面部",
- "This regular expression will be used extract words from filename, and they will be joined using the option below into label text used for training. Leave empty to keep filename text as it is.": "此正則表達式將用於從檔案名中提取單詞,並將使用以下選項將它們接合到用於訓練的標籤文本中。留空以保持檔案名文本不變",
+ "This regular expression will be used extract words from filename, and they will be joined using the option below into label text used for training. Leave empty to keep filename text as it is.": "此正則表達式將用於從檔案名中提取單詞,並將使用以下選項將它們接合到用於訓練的標記文本中。留空以保持檔案名文本不變",
"This string will be used to join split words into a single line if the option above is enabled.": "如果啟用了上述選項,則此處的字元會用於將拆分的單詞接合為同一行",
- "List of setting names, separated by commas, for settings that should go to the quick access bar at the top, rather than the usual setting tab. See modules/shared.py for setting names. Requires restarting to apply.": "設定名稱列表,以逗號分隔,設定應轉到頂部的快速存取列,而不是通常的設定頁籤。有關設定名稱,請參見 modules/shared.py。需要重新啟動才能套用",
+ "Only applies to inpainting models. Determines how strongly to mask off the original image for inpainting and img2img. 1.0 means fully masked, which is the default behaviour. 0.0 means a fully unmasked conditioning. Lower values will help preserve the overall composition of the image, but will struggle with large changes.": "僅適用於局部重繪專用的模型(模型後綴為 inpainting.ckpt 的模型)。決定了蒙版在局部重繪以及圖生圖中屏蔽原圖內容的強度。 1.0 表示完全屏蔽原圖,這是預設行為。 0.0 表示完全不屏蔽讓原圖進行圖像調節。較低的值將有助於保持原圖的整體構圖,但很難遇到較大的變化",
+ "List of setting names, separated by commas, for settings that should go to the quick access bar at the top, rather than the usual setting tab. See modules/shared.py for setting names. Requires restarting to apply.": "設定項名稱列表,以逗號分隔,該設定會移動到頂部的快速存取列,而不是預設的設定頁籤。有關設定名稱,請參見 modules/shared.py。需要重新啟動才能套用",
"If this values is non-zero, it will be added to seed and used to initialize RNG for noises when using samplers with Eta. You can use this to produce even more variation of images, or you can use this to match images of other software if you know what you are doing.": "如果這個值不為零,它將被加入到隨機種子中,並在使用帶有 Eta 的採樣器時用於初始化隨機噪聲。你可以使用它來產生更多的圖像變化,或者你可以使用它來模仿其他軟體生成的圖像,如果你知道你在做什麼",
+ "Leave empty for auto": "留空時自動生成",
+ "Autocomplete options": "自動補全選項",
"Enable Autocomplete": "開啟Tag補全",
+ "Select which Real-ESRGAN models to show in the web UI. (Requires restart)": "選擇哪些 Real-ESRGAN 模型顯示在網頁使用者介面。(需要重新啟動)",
"Allowed categories for random artists selection when using the Roll button": "使用抽選藝術家按鈕時將會隨機的藝術家類別",
+ "Append commas": "附加逗號",
"Roll three": "抽三位出來",
- "Generate forever": "不停地生成",
- "Cancel generate forever": "取消不停地生成"
+ "Generate forever": "無限生成",
+ "Cancel generate forever": "停止無限生成",
+ "How many times to improve the generated image iteratively; higher values take longer; very low values can produce bad results": "疊代改進生成的圖像多少次;更高的值需要更長的時間;非常低的值會產生不好的結果",
+ "Draw a mask over an image, and the script will regenerate the masked area with content according to prompt": "在圖像上畫一個蒙版,指令碼會根據提示重新生成蒙版區域的內容",
+ "Upscale image normally, split result into tiles, improve each tile using img2img, merge whole image back": "正常放大圖像,將結果分割成圖塊,用圖生圖改進每個圖塊,最後將整個圖像合併回來",
+ "Create a grid where images will have different parameters. Use inputs below to specify which parameters will be shared by columns and rows": "創建一個網格,圖像將有不同的參數。使用下面的輸入來指定哪些參數將由列和行共享",
+ "Run Python code. Advanced user only. Must run program with --allow-code for this to work": "執行 Python 程式碼。僅限老手使用。必須以 --allow-code 來開啟程式,才能使其執行",
+ "Separate a list of words with commas, and the first word will be used as a keyword: script will search for this word in the prompt, and replace it with others": "以逗號分割的單詞列表,第一個單詞將被用作關鍵詞:指令碼將在提示詞中搜尋這個單詞,並用其他單詞替換它",
+ "Separate a list of words with commas, and the script will make a variation of prompt with those words for their every possible order": "以逗號分割的單詞列表,指令碼會排列出這些單詞的所有排列方式,並加入提示詞各生成一次",
+ "Reconstruct prompt from existing image and put it into the prompt field.": "從現有的圖像中重構出提示詞,並將其放入提示詞的輸入文字方塊",
+ "Set the maximum number of words to be used in the [prompt_words] option; ATTENTION: If the words are too long, they may exceed the maximum length of the file path that the system can handle": "設定在[prompt_words]選項中要使用的最大字數;注意:如果字數太長,可能會超過系統可處理的檔案路徑的最大長度",
+ "Process an image, use it as an input, repeat.": "處理一張圖像,將其作為輸入,並重複",
+ "Insert selected styles into prompt fields": "在提示詞中插入選定的模版風格",
+ "Save current prompts as a style. If you add the token {prompt} to the text, the style use that as placeholder for your prompt when you use the style in the future.": "將當前的提示詞儲存為模版風格。如果你在文本中加入{prompt}標記,那麼將來你使用該模版風格時,你現有的提示詞會替換模版風格中的{prompt}",
+ "Loads weights from checkpoint before making images. You can either use hash or a part of filename (as seen in settings) for checkpoint name. Recommended to use with Y axis for less switching.": "在生成圖像之前從模型權重存檔點中載入權重。你可以使用哈希值或檔案名的一部分(如設定中所示)作為模型權重存檔點名稱。建議用在Y軸上以減少過程中模型的切換",
+ "Torch active: Peak amount of VRAM used by Torch during generation, excluding cached data.\nTorch reserved: Peak amount of VRAM allocated by Torch, including all active and cached data.\nSys VRAM: Peak amount of VRAM allocation across all applications / total GPU VRAM (peak utilization%).": "Torch active: 在生成過程中,Torch使用的顯存(VRAM)峰值,不包括快取的數據。\nTorch reserved: Torch 分配的顯存(VRAM)的峰值量,包括所有活動和快取數據。\nSys VRAM: 所有應用程式分配的顯存(VRAM)的峰值量 / GPU 的總顯存(VRAM)(峰值利用率%)",
+ "Uscale the image in latent space. Alternative is to produce the full image from latent representation, upscale that, and then move it back to latent space.": "放大潛空間中的圖像。而另一種方法是,從潛變量表達中直接解碼並生成完整的圖像,接著放大它,然後再將其編碼回潛空間",
+ "Start drawing": "開始繪製",
+ "Description": "描述",
+ "Action": "行動",
+ "Aesthetic Gradients": "美術風格",
+ "aesthetic-gradients": "美術風格",
+ "stable-diffusion-webui-wildcards": "萬用字元",
+ "Dynamic Prompts": "動態提示",
+ "images-browser": "圖庫瀏覽器",
+ "Inspiration": "靈感",
+ "Deforum": "Deforum",
+ "Artists to study": "藝術家圖庫",
+ "Aesthetic Image Scorer": "美術風格評分",
+ "Dataset Tag Editor": "數據集標記編輯器",
+ "Face restoration model": "面部修復模型",
+ "Install": "安裝",
+ "Installing...": "安裝中…",
+ "Installed": "已安裝"
}
diff --git a/modules/api/api.py b/modules/api/api.py
index 71c9c160..a49f3755 100644
--- a/modules/api/api.py
+++ b/modules/api/api.py
@@ -2,14 +2,17 @@ import base64
import io
import time
import uvicorn
-from gradio.processing_utils import decode_base64_to_file, decode_base64_to_image
-from fastapi import APIRouter, Depends, HTTPException
+from threading import Lock
+from gradio.processing_utils import encode_pil_to_base64, decode_base64_to_file, decode_base64_to_image
+from fastapi import APIRouter, Depends, FastAPI, HTTPException
import modules.shared as shared
from modules.api.models import *
from modules.processing import StableDiffusionProcessingTxt2Img, StableDiffusionProcessingImg2Img, process_images
-from modules.sd_samplers import all_samplers, sample_to_image, samples_to_image_grid
+from modules.sd_samplers import all_samplers
from modules.extras import run_extras, run_pnginfo
-
+from modules.sd_models import checkpoints_list
+from modules.realesrgan_model import get_realesrgan_models
+from typing import List
def upscaler_to_index(name: str):
try:
@@ -37,7 +40,7 @@ def encode_pil_to_base64(image):
class Api:
- def __init__(self, app, queue_lock):
+ def __init__(self, app: FastAPI, queue_lock: Lock):
self.router = APIRouter()
self.app = app
self.queue_lock = queue_lock
@@ -48,6 +51,18 @@ class Api:
self.app.add_api_route("/sdapi/v1/png-info", self.pnginfoapi, methods=["POST"], response_model=PNGInfoResponse)
self.app.add_api_route("/sdapi/v1/progress", self.progressapi, methods=["GET"], response_model=ProgressResponse)
self.app.add_api_route("/sdapi/v1/interrupt", self.interruptapi, methods=["POST"])
+ self.app.add_api_route("/sdapi/v1/options", self.get_config, methods=["GET"], response_model=OptionsModel)
+ self.app.add_api_route("/sdapi/v1/options", self.set_config, methods=["POST"])
+ self.app.add_api_route("/sdapi/v1/cmd-flags", self.get_cmd_flags, methods=["GET"], response_model=FlagsModel)
+ self.app.add_api_route("/sdapi/v1/samplers", self.get_samplers, methods=["GET"], response_model=List[SamplerItem])
+ self.app.add_api_route("/sdapi/v1/upscalers", self.get_upscalers, methods=["GET"], response_model=List[UpscalerItem])
+ self.app.add_api_route("/sdapi/v1/sd-models", self.get_sd_models, methods=["GET"], response_model=List[SDModelItem])
+ self.app.add_api_route("/sdapi/v1/hypernetworks", self.get_hypernetworks, methods=["GET"], response_model=List[HypernetworkItem])
+ self.app.add_api_route("/sdapi/v1/face-restorers", self.get_face_restorers, methods=["GET"], response_model=List[FaceRestorerItem])
+ self.app.add_api_route("/sdapi/v1/realesrgan-models", self.get_realesrgan_models, methods=["GET"], response_model=List[RealesrganItem])
+ self.app.add_api_route("/sdapi/v1/prompt-styles", self.get_promp_styles, methods=["GET"], response_model=List[PromptStyleItem])
+ self.app.add_api_route("/sdapi/v1/artist-categories", self.get_artists_categories, methods=["GET"], response_model=List[str])
+ self.app.add_api_route("/sdapi/v1/artists", self.get_artists, methods=["GET"], response_model=List[ArtistItem])
def text2imgapi(self, txt2imgreq: StableDiffusionTxt2ImgProcessingAPI):
sampler_index = sampler_to_index(txt2imgreq.sampler_index)
@@ -190,6 +205,66 @@ class Api:
shared.state.interrupt()
return {}
+
+ def get_config(self):
+ options = {}
+ for key in shared.opts.data.keys():
+ metadata = shared.opts.data_labels.get(key)
+ if(metadata is not None):
+ options.update({key: shared.opts.data.get(key, shared.opts.data_labels.get(key).default)})
+ else:
+ options.update({key: shared.opts.data.get(key, None)})
+
+ return options
+
+ def set_config(self, req: OptionsModel):
+ reqDict = vars(req)
+ for o in reqDict:
+ setattr(shared.opts, o, reqDict[o])
+
+ shared.opts.save(shared.config_filename)
+ return
+
+ def get_cmd_flags(self):
+ return vars(shared.cmd_opts)
+
+ def get_samplers(self):
+ return [{"name":sampler[0], "aliases":sampler[2], "options":sampler[3]} for sampler in all_samplers]
+
+ def get_upscalers(self):
+ upscalers = []
+
+ for upscaler in shared.sd_upscalers:
+ u = upscaler.scaler
+ upscalers.append({"name":u.name, "model_name":u.model_name, "model_path":u.model_path, "model_url":u.model_url})
+
+ return upscalers
+
+ def get_sd_models(self):
+ return [{"title":x.title, "model_name":x.model_name, "hash":x.hash, "filename": x.filename, "config": x.config} for x in checkpoints_list.values()]
+
+ def get_hypernetworks(self):
+ return [{"name": name, "path": shared.hypernetworks[name]} for name in shared.hypernetworks]
+
+ def get_face_restorers(self):
+ return [{"name":x.name(), "cmd_dir": getattr(x, "cmd_dir", None)} for x in shared.face_restorers]
+
+ def get_realesrgan_models(self):
+ return [{"name":x.name,"path":x.data_path, "scale":x.scale} for x in get_realesrgan_models(None)]
+
+ def get_promp_styles(self):
+ styleList = []
+ for k in shared.prompt_styles.styles:
+ style = shared.prompt_styles.styles[k]
+ styleList.append({"name":style[0], "prompt": style[1], "negative_prompr": style[2]})
+
+ return styleList
+
+ def get_artists_categories(self):
+ return shared.artist_db.cats
+
+ def get_artists(self):
+ return [{"name":x[0], "score":x[1], "category":x[2]} for x in shared.artist_db.artists]
def launch(self, server_name, port):
self.app.include_router(self.router)
diff --git a/modules/api/models.py b/modules/api/models.py
index 9ee42a17..8933e183 100644
--- a/modules/api/models.py
+++ b/modules/api/models.py
@@ -1,11 +1,10 @@
import inspect
-from click import prompt
from pydantic import BaseModel, Field, create_model
-from typing import Any, Optional
+from typing import Any, Optional, Union
from typing_extensions import Literal
from inflection import underscore
from modules.processing import StableDiffusionProcessingTxt2Img, StableDiffusionProcessingImg2Img
-from modules.shared import sd_upscalers
+from modules.shared import sd_upscalers, opts, parser
API_NOT_ALLOWED = [
"self",
@@ -131,6 +130,7 @@ class ExtrasBaseRequest(BaseModel):
upscaler_1: str = Field(default="None", title="Main upscaler", description=f"The name of the main upscaler to use, it has to be one of this list: {' , '.join([x.name for x in sd_upscalers])}")
upscaler_2: str = Field(default="None", title="Secondary upscaler", description=f"The name of the secondary upscaler to use, it has to be one of this list: {' , '.join([x.name for x in sd_upscalers])}")
extras_upscaler_2_visibility: float = Field(default=0, title="Secondary upscaler visibility", ge=0, le=1, allow_inf_nan=False, description="Sets the visibility of secondary upscaler, values should be between 0 and 1.")
+ upscale_first: bool = Field(default=False, title="Upscale first", description="Should the upscaler run before restoring faces?")
class ExtraBaseResponse(BaseModel):
html_info: str = Field(title="HTML info", description="A series of HTML tags containing the process info.")
@@ -165,3 +165,68 @@ class ProgressResponse(BaseModel):
eta_relative: float = Field(title="ETA in secs")
state: dict = Field(title="State", description="The current state snapshot")
current_image: str = Field(default=None, title="Current image", description="The current image in base64 format. opts.show_progress_every_n_steps is required for this to work.")
+
+fields = {}
+for key, value in opts.data.items():
+ metadata = opts.data_labels.get(key)
+ optType = opts.typemap.get(type(value), type(value))
+
+ if (metadata is not None):
+ fields.update({key: (Optional[optType], Field(
+ default=metadata.default ,description=metadata.label))})
+ else:
+ fields.update({key: (Optional[optType], Field())})
+
+OptionsModel = create_model("Options", **fields)
+
+flags = {}
+_options = vars(parser)['_option_string_actions']
+for key in _options:
+ if(_options[key].dest != 'help'):
+ flag = _options[key]
+ _type = str
+ if(_options[key].default != None): _type = type(_options[key].default)
+ flags.update({flag.dest: (_type,Field(default=flag.default, description=flag.help))})
+
+FlagsModel = create_model("Flags", **flags)
+
+class SamplerItem(BaseModel):
+ name: str = Field(title="Name")
+ aliases: list[str] = Field(title="Aliases")
+ options: dict[str, str] = Field(title="Options")
+
+class UpscalerItem(BaseModel):
+ name: str = Field(title="Name")
+ model_name: str | None = Field(title="Model Name")
+ model_path: str | None = Field(title="Path")
+ model_url: str | None = Field(title="URL")
+
+class SDModelItem(BaseModel):
+ title: str = Field(title="Title")
+ model_name: str = Field(title="Model Name")
+ hash: str = Field(title="Hash")
+ filename: str = Field(title="Filename")
+ config: str = Field(title="Config file")
+
+class HypernetworkItem(BaseModel):
+ name: str = Field(title="Name")
+ path: str | None = Field(title="Path")
+
+class FaceRestorerItem(BaseModel):
+ name: str = Field(title="Name")
+ cmd_dir: str | None = Field(title="Path")
+
+class RealesrganItem(BaseModel):
+ name: str = Field(title="Name")
+ path: str | None = Field(title="Path")
+ scale: int | None = Field(title="Scale")
+
+class PromptStyleItem(BaseModel):
+ name: str = Field(title="Name")
+ prompt: str | None = Field(title="Prompt")
+ negative_prompt: str | None = Field(title="Negative Prompt")
+
+class ArtistItem(BaseModel):
+ name: str = Field(title="Name")
+ score: float = Field(title="Score")
+ category: str = Field(title="Category") \ No newline at end of file
diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py
index a11e01d6..6e1a10cf 100644
--- a/modules/hypernetworks/hypernetwork.py
+++ b/modules/hypernetworks/hypernetwork.py
@@ -35,7 +35,8 @@ class HypernetworkModule(torch.nn.Module):
}
activation_dict.update({cls_name.lower(): cls_obj for cls_name, cls_obj in inspect.getmembers(torch.nn.modules.activation) if inspect.isclass(cls_obj) and cls_obj.__module__ == 'torch.nn.modules.activation'})
- def __init__(self, dim, state_dict=None, layer_structure=None, activation_func=None, weight_init='Normal', add_layer_norm=False, use_dropout=False):
+ def __init__(self, dim, state_dict=None, layer_structure=None, activation_func=None, weight_init='Normal',
+ add_layer_norm=False, use_dropout=False, activate_output=False, last_layer_dropout=True):
super().__init__()
assert layer_structure is not None, "layer_structure must not be None"
@@ -48,8 +49,8 @@ class HypernetworkModule(torch.nn.Module):
# Add a fully-connected layer
linears.append(torch.nn.Linear(int(dim * layer_structure[i]), int(dim * layer_structure[i+1])))
- # Add an activation func
- if activation_func == "linear" or activation_func is None:
+ # Add an activation func except last layer
+ if activation_func == "linear" or activation_func is None or (i >= len(layer_structure) - 2 and not activate_output):
pass
elif activation_func in self.activation_dict:
linears.append(self.activation_dict[activation_func]())
@@ -60,8 +61,8 @@ class HypernetworkModule(torch.nn.Module):
if add_layer_norm:
linears.append(torch.nn.LayerNorm(int(dim * layer_structure[i+1])))
- # Add dropout expect last layer
- if use_dropout and i < len(layer_structure) - 3:
+ # Add dropout except last layer
+ if use_dropout and (i < len(layer_structure) - 3 or last_layer_dropout and i < len(layer_structure) - 2):
linears.append(torch.nn.Dropout(p=0.3))
self.linear = torch.nn.Sequential(*linears)
@@ -75,7 +76,7 @@ class HypernetworkModule(torch.nn.Module):
w, b = layer.weight.data, layer.bias.data
if weight_init == "Normal" or type(layer) == torch.nn.LayerNorm:
normal_(w, mean=0.0, std=0.01)
- normal_(b, mean=0.0, std=0.005)
+ normal_(b, mean=0.0, std=0)
elif weight_init == 'XavierUniform':
xavier_uniform_(w)
zeros_(b)
@@ -127,7 +128,7 @@ class Hypernetwork:
filename = None
name = None
- def __init__(self, name=None, enable_sizes=None, layer_structure=None, activation_func=None, weight_init=None, add_layer_norm=False, use_dropout=False):
+ def __init__(self, name=None, enable_sizes=None, layer_structure=None, activation_func=None, weight_init=None, add_layer_norm=False, use_dropout=False, activate_output=False, **kwargs):
self.filename = None
self.name = name
self.layers = {}
@@ -139,11 +140,15 @@ class Hypernetwork:
self.weight_init = weight_init
self.add_layer_norm = add_layer_norm
self.use_dropout = use_dropout
+ self.activate_output = activate_output
+ self.last_layer_dropout = kwargs['last_layer_dropout'] if 'last_layer_dropout' in kwargs else True
for size in enable_sizes or []:
self.layers[size] = (
- HypernetworkModule(size, None, self.layer_structure, self.activation_func, self.weight_init, self.add_layer_norm, self.use_dropout),
- HypernetworkModule(size, None, self.layer_structure, self.activation_func, self.weight_init, self.add_layer_norm, self.use_dropout),
+ HypernetworkModule(size, None, self.layer_structure, self.activation_func, self.weight_init,
+ self.add_layer_norm, self.use_dropout, self.activate_output, last_layer_dropout=self.last_layer_dropout),
+ HypernetworkModule(size, None, self.layer_structure, self.activation_func, self.weight_init,
+ self.add_layer_norm, self.use_dropout, self.activate_output, last_layer_dropout=self.last_layer_dropout),
)
def weights(self):
@@ -171,7 +176,9 @@ class Hypernetwork:
state_dict['use_dropout'] = self.use_dropout
state_dict['sd_checkpoint'] = self.sd_checkpoint
state_dict['sd_checkpoint_name'] = self.sd_checkpoint_name
-
+ state_dict['activate_output'] = self.activate_output
+ state_dict['last_layer_dropout'] = self.last_layer_dropout
+
torch.save(state_dict, filename)
def load(self, filename):
@@ -191,12 +198,17 @@ class Hypernetwork:
print(f"Layer norm is set to {self.add_layer_norm}")
self.use_dropout = state_dict.get('use_dropout', False)
print(f"Dropout usage is set to {self.use_dropout}" )
+ self.activate_output = state_dict.get('activate_output', True)
+ print(f"Activate last layer is set to {self.activate_output}")
+ self.last_layer_dropout = state_dict.get('last_layer_dropout', False)
for size, sd in state_dict.items():
if type(size) == int:
self.layers[size] = (
- HypernetworkModule(size, sd[0], self.layer_structure, self.activation_func, self.weight_init, self.add_layer_norm, self.use_dropout),
- HypernetworkModule(size, sd[1], self.layer_structure, self.activation_func, self.weight_init, self.add_layer_norm, self.use_dropout),
+ HypernetworkModule(size, sd[0], self.layer_structure, self.activation_func, self.weight_init,
+ self.add_layer_norm, self.use_dropout, self.activate_output, last_layer_dropout=self.last_layer_dropout),
+ HypernetworkModule(size, sd[1], self.layer_structure, self.activation_func, self.weight_init,
+ self.add_layer_norm, self.use_dropout, self.activate_output, last_layer_dropout=self.last_layer_dropout),
)
self.name = state_dict.get('name', self.name)
diff --git a/modules/masking.py b/modules/masking.py
index fd8d9241..a5c4d2da 100644
--- a/modules/masking.py
+++ b/modules/masking.py
@@ -49,7 +49,7 @@ def expand_crop_region(crop_region, processing_width, processing_height, image_w
ratio_processing = processing_width / processing_height
if ratio_crop_region > ratio_processing:
- desired_height = (x2 - x1) * ratio_processing
+ desired_height = (x2 - x1) / ratio_processing
desired_height_diff = int(desired_height - (y2-y1))
y1 -= desired_height_diff//2
y2 += desired_height_diff - desired_height_diff//2
diff --git a/modules/processing.py b/modules/processing.py
index 3a364b5f..7a2fc218 100644
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -134,11 +134,7 @@ class StableDiffusionProcessing():
# Dummy zero conditioning if we're not using inpainting model.
# Still takes up a bit of memory, but no encoder call.
# Pretty sure we can just make this a 1x1 image since its not going to be used besides its batch size.
- return torch.zeros(
- x.shape[0], 5, 1, 1,
- dtype=x.dtype,
- device=x.device
- )
+ return x.new_zeros(x.shape[0], 5, 1, 1)
height = height or self.height
width = width or self.width
@@ -156,11 +152,7 @@ class StableDiffusionProcessing():
def img2img_image_conditioning(self, source_image, latent_image, image_mask = None):
if self.sampler.conditioning_key not in {'hybrid', 'concat'}:
# Dummy zero conditioning if we're not using inpainting model.
- return torch.zeros(
- latent_image.shape[0], 5, 1, 1,
- dtype=latent_image.dtype,
- device=latent_image.device
- )
+ return latent_image.new_zeros(latent_image.shape[0], 5, 1, 1)
# Handle the different mask inputs
if image_mask is not None:
@@ -174,11 +166,11 @@ class StableDiffusionProcessing():
# Inpainting model uses a discretized mask as input, so we round to either 1.0 or 0.0
conditioning_mask = torch.round(conditioning_mask)
else:
- conditioning_mask = torch.ones(1, 1, *source_image.shape[-2:])
+ conditioning_mask = source_image.new_ones(1, 1, *source_image.shape[-2:])
# Create another latent image, this time with a masked version of the original input.
# Smoothly interpolate between the masked and unmasked latent conditioning image using a parameter.
- conditioning_mask = conditioning_mask.to(source_image.device)
+ conditioning_mask = conditioning_mask.to(source_image.device).to(source_image.dtype)
conditioning_image = torch.lerp(
source_image,
source_image * (1.0 - conditioning_mask),
@@ -426,13 +418,13 @@ def process_images(p: StableDiffusionProcessing) -> Processed:
try:
for k, v in p.override_settings.items():
- opts.data[k] = v # we don't call onchange for simplicity which makes changing model, hypernet impossible
+ setattr(opts, k, v) # we don't call onchange for simplicity which makes changing model, hypernet impossible
res = process_images_inner(p)
finally:
for k, v in stored_opts.items():
- opts.data[k] = v
+ setattr(opts, k, v)
return res
@@ -673,10 +665,17 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
images.save_image(image, self.outpath_samples, "", seeds[index], prompts[index], opts.samples_format, suffix="-before-highres-fix")
if opts.use_scale_latent_for_hires_fix:
- samples = torch.nn.functional.interpolate(samples, size=(self.height // opt_f, self.width // opt_f), mode="bilinear")
-
for i in range(samples.shape[0]):
save_intermediate(samples, i)
+
+ samples = torch.nn.functional.interpolate(samples, size=(self.height // opt_f, self.width // opt_f), mode="bilinear")
+
+ # Avoid making the inpainting conditioning unless necessary as
+ # this does need some extra compute to decode / encode the image again.
+ if getattr(self, "inpainting_mask_weight", shared.opts.inpainting_mask_weight) < 1.0:
+ image_conditioning = self.img2img_image_conditioning(decode_first_stage(self.sd_model, samples), samples)
+ else:
+ image_conditioning = self.txt2img_image_conditioning(samples)
else:
decoded_samples = decode_first_stage(self.sd_model, samples)
lowres_samples = torch.clamp((decoded_samples + 1.0) / 2.0, min=0.0, max=1.0)
@@ -700,14 +699,14 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
samples = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(decoded_samples))
+ image_conditioning = self.img2img_image_conditioning(decoded_samples, samples)
+
shared.state.nextjob()
self.sampler = sd_samplers.create_sampler_with_index(sd_samplers.samplers, self.sampler_index, self.sd_model)
noise = create_random_tensors(samples.shape[1:], seeds=seeds, subseeds=subseeds, subseed_strength=subseed_strength, seed_resize_from_h=self.seed_resize_from_h, seed_resize_from_w=self.seed_resize_from_w, p=self)
- image_conditioning = self.txt2img_image_conditioning(x)
-
# GC now before running the next img2img to prevent running out of memory
x = None
devices.torch_gc()
diff --git a/modules/scripts.py b/modules/scripts.py
index 533db45c..28ce07f4 100644
--- a/modules/scripts.py
+++ b/modules/scripts.py
@@ -18,6 +18,9 @@ class Script:
args_to = None
alwayson = False
+ """A gr.Group component that has all script's UI inside it"""
+ group = None
+
infotext_fields = None
"""if set in ui(), this is a list of pairs of gradio component + text; the text will be used when
parsing infotext to set the value for the component; see ui.py's txt2img_paste_fields for an example
@@ -218,8 +221,6 @@ class ScriptRunner:
for control in controls:
control.custom_script_source = os.path.basename(script.filename)
- if not script.alwayson:
- control.visible = False
if script.infotext_fields is not None:
self.infotext_fields += script.infotext_fields
@@ -229,40 +230,41 @@ class ScriptRunner:
script.args_to = len(inputs)
for script in self.alwayson_scripts:
- with gr.Group():
+ with gr.Group() as group:
create_script_ui(script, inputs, inputs_alwayson)
+ script.group = group
+
dropdown = gr.Dropdown(label="Script", elem_id="script_list", choices=["None"] + self.titles, value="None", type="index")
dropdown.save_to_config = True
inputs[0] = dropdown
for script in self.selectable_scripts:
- create_script_ui(script, inputs, inputs_alwayson)
+ with gr.Group(visible=False) as group:
+ create_script_ui(script, inputs, inputs_alwayson)
+
+ script.group = group
def select_script(script_index):
- if 0 < script_index <= len(self.selectable_scripts):
- script = self.selectable_scripts[script_index-1]
- args_from = script.args_from
- args_to = script.args_to
- else:
- args_from = 0
- args_to = 0
+ selected_script = self.selectable_scripts[script_index - 1] if script_index>0 else None
- return [ui.gr_show(True if i == 0 else args_from <= i < args_to or is_alwayson) for i, is_alwayson in enumerate(inputs_alwayson)]
+ return [gr.update(visible=selected_script == s) for s in self.selectable_scripts]
def init_field(title):
+ """called when an initial value is set from ui-config.json to show script's UI components"""
+
if title == 'None':
return
+
script_index = self.titles.index(title)
- script = self.selectable_scripts[script_index]
- for i in range(script.args_from, script.args_to):
- inputs[i].visible = True
+ self.selectable_scripts[script_index].group.visible = True
dropdown.init_field = init_field
+
dropdown.change(
fn=select_script,
inputs=[dropdown],
- outputs=inputs
+ outputs=[script.group for script in self.selectable_scripts]
)
return inputs
diff --git a/modules/sd_models.py b/modules/sd_models.py
index cf7b79ce..63e07a12 100644
--- a/modules/sd_models.py
+++ b/modules/sd_models.py
@@ -198,8 +198,9 @@ def load_model_weights(model, checkpoint_info, vae_file="auto"):
model.first_stage_model.to(devices.dtype_vae)
else:
- vae_name = sd_vae.get_filename(vae_file)
- print(f"Loading weights [{sd_model_hash}] with {vae_name} VAE from cache")
+ vae_name = sd_vae.get_filename(vae_file) if vae_file else None
+ vae_message = f" with {vae_name} VAE" if vae_name else ""
+ print(f"Loading weights [{sd_model_hash}]{vae_message} from cache")
model.load_state_dict(checkpoints_loaded[checkpoint_info])
if shared.opts.sd_checkpoint_cache > 0:
diff --git a/modules/shared.py b/modules/shared.py
index d8e99f85..0a39cdf2 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -44,6 +44,7 @@ parser.add_argument("--precision", type=str, help="evaluate at this precision",
parser.add_argument("--share", action='store_true', help="use share=True for gradio and make the UI accessible through their site")
parser.add_argument("--ngrok", type=str, help="ngrok authtoken, alternative to gradio --share", default=None)
parser.add_argument("--ngrok-region", type=str, help="The region in which ngrok should start.", default="us")
+parser.add_argument("--enable-insecure-extension-access", action='store_true', help="enable extensions tab regardless of other options")
parser.add_argument("--codeformer-models-path", type=str, help="Path to directory with codeformer model file(s).", default=os.path.join(models_path, 'Codeformer'))
parser.add_argument("--gfpgan-models-path", type=str, help="Path to directory with GFPGAN model file(s).", default=os.path.join(models_path, 'GFPGAN'))
parser.add_argument("--esrgan-models-path", type=str, help="Path to directory with ESRGAN model file(s).", default=os.path.join(models_path, 'ESRGAN'))
@@ -99,7 +100,7 @@ restricted_opts = {
"outdir_save",
}
-cmd_opts.disable_extension_access = cmd_opts.share or cmd_opts.listen
+cmd_opts.disable_extension_access = (cmd_opts.share or cmd_opts.listen) and not cmd_opts.enable_insecure_extension_access
devices.device, devices.device_interrogate, devices.device_gfpgan, devices.device_swinir, devices.device_esrgan, devices.device_scunet, devices.device_codeformer = \
(devices.cpu if any(y in cmd_opts.use_cpu for y in [x, 'all']) else devices.get_optimal_device() for x in ['sd', 'interrogate', 'gfpgan', 'swinir', 'esrgan', 'scunet', 'codeformer'])
@@ -396,6 +397,15 @@ class Options:
def __setattr__(self, key, value):
if self.data is not None:
if key in self.data or key in self.data_labels:
+ assert not cmd_opts.freeze_settings, "changing settings is disabled"
+
+ comp_args = opts.data_labels[key].component_args
+ if isinstance(comp_args, dict) and comp_args.get('visible', True) is False:
+ raise RuntimeError(f"not possible to set {key} because it is restricted")
+
+ if cmd_opts.hide_ui_dir_config and key in restricted_opts:
+ raise RuntimeError(f"not possible to set {key} because it is restricted")
+
self.data[key] = value
return
@@ -412,6 +422,8 @@ class Options:
return super(Options, self).__getattribute__(item)
def save(self, filename):
+ assert not cmd_opts.freeze_settings, "saving settings is disabled"
+
with open(filename, "w", encoding="utf8") as file:
json.dump(self.data, file, indent=4)
diff --git a/modules/ui.py b/modules/ui.py
index 2609857e..3ac7540c 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -1052,6 +1052,8 @@ def create_ui(wrap_gradio_gpu_call):
extras_batch_output_dir = gr.Textbox(label="Output directory", **shared.hide_dirs, placeholder="Leave blank to save images to the default path.")
show_extras_results = gr.Checkbox(label='Show result images', value=True)
+ submit = gr.Button('Generate', elem_id="extras_generate", variant='primary')
+
with gr.Tabs(elem_id="extras_resize_mode"):
with gr.TabItem('Scale by'):
upscaling_resize = gr.Slider(minimum=1.0, maximum=8.0, step=0.05, label="Resize", value=4)
@@ -1079,8 +1081,6 @@ def create_ui(wrap_gradio_gpu_call):
with gr.Group():
upscale_before_face_fix = gr.Checkbox(label='Upscale Before Restoring Faces', value=False)
- submit = gr.Button('Generate', elem_id="extras_generate", variant='primary')
-
result_images, html_info_x, html_info = create_output_panel("extras", opts.outdir_extras_samples)
submit.click(
@@ -1182,8 +1182,8 @@ def create_ui(wrap_gradio_gpu_call):
new_hypernetwork_name = gr.Textbox(label="Name")
new_hypernetwork_sizes = gr.CheckboxGroup(label="Modules", value=["768", "320", "640", "1280"], choices=["768", "320", "640", "1280"])
new_hypernetwork_layer_structure = gr.Textbox("1, 2, 1", label="Enter hypernetwork layer structure", placeholder="1st and last digit must be 1. ex:'1, 2, 1'")
- new_hypernetwork_activation_func = gr.Dropdown(value="linear", label="Select activation function of hypernetwork", choices=modules.hypernetworks.ui.keys)
- new_hypernetwork_initialization_option = gr.Dropdown(value = "Normal", label="Select Layer weights initialization. relu-like - Kaiming, sigmoid-like - Xavier is recommended", choices=["Normal", "KaimingUniform", "KaimingNormal", "XavierUniform", "XavierNormal"])
+ new_hypernetwork_activation_func = gr.Dropdown(value="linear", label="Select activation function of hypernetwork. Recommended : Swish / Linear(none)", choices=modules.hypernetworks.ui.keys)
+ new_hypernetwork_initialization_option = gr.Dropdown(value = "Normal", label="Select Layer weights initialization. Recommended: Kaiming for relu-like, Xavier for sigmoid-like, Normal otherwise", choices=["Normal", "KaimingUniform", "KaimingNormal", "XavierUniform", "XavierNormal"])
new_hypernetwork_add_layer_norm = gr.Checkbox(label="Add layer normalization")
new_hypernetwork_use_dropout = gr.Checkbox(label="Use dropout")
overwrite_old_hypernetwork = gr.Checkbox(value=False, label="Overwrite Old Hypernetwork")
@@ -1438,25 +1438,16 @@ def create_ui(wrap_gradio_gpu_call):
def run_settings(*args):
changed = 0
- assert not shared.cmd_opts.freeze_settings, "changing settings is disabled"
-
for key, value, comp in zip(opts.data_labels.keys(), args, components):
- if comp != dummy_component and not opts.same_type(value, opts.data_labels[key].default):
- return f"Bad value for setting {key}: {value}; expecting {type(opts.data_labels[key].default).__name__}", opts.dumpjson()
+ assert comp == dummy_component or opts.same_type(value, opts.data_labels[key].default), f"Bad value for setting {key}: {value}; expecting {type(opts.data_labels[key].default).__name__}"
for key, value, comp in zip(opts.data_labels.keys(), args, components):
if comp == dummy_component:
continue
- comp_args = opts.data_labels[key].component_args
- if comp_args and isinstance(comp_args, dict) and comp_args.get('visible') is False:
- continue
-
- if cmd_opts.hide_ui_dir_config and key in restricted_opts:
- continue
-
oldval = opts.data.get(key, None)
- opts.data[key] = value
+
+ setattr(opts, key, value)
if oldval != value:
if opts.data_labels[key].onchange is not None:
@@ -1466,20 +1457,18 @@ def create_ui(wrap_gradio_gpu_call):
opts.save(shared.config_filename)
- return f'{changed} settings changed.', opts.dumpjson()
+ return opts.dumpjson(), f'{changed} settings changed.'
def run_settings_single(value, key):
- assert not shared.cmd_opts.freeze_settings, "changing settings is disabled"
-
if not opts.same_type(value, opts.data_labels[key].default):
return gr.update(visible=True), opts.dumpjson()
oldval = opts.data.get(key, None)
- if cmd_opts.hide_ui_dir_config and key in restricted_opts:
+ try:
+ setattr(opts, key, value)
+ except Exception:
return gr.update(value=oldval), opts.dumpjson()
- opts.data[key] = value
-
if oldval != value:
if opts.data_labels[key].onchange is not None:
opts.data_labels[key].onchange()
@@ -1632,9 +1621,9 @@ def create_ui(wrap_gradio_gpu_call):
text_settings = gr.Textbox(elem_id="settings_json", value=lambda: opts.dumpjson(), visible=False)
settings_submit.click(
- fn=run_settings,
+ fn=wrap_gradio_call(run_settings, extra_outputs=[gr.update()]),
inputs=components,
- outputs=[result, text_settings],
+ outputs=[text_settings, result],
)
for i, k, item in quicksettings_list:
diff --git a/scripts/custom_code.py b/scripts/custom_code.py
index a9b10c09..22e7b77a 100644
--- a/scripts/custom_code.py
+++ b/scripts/custom_code.py
@@ -14,7 +14,7 @@ class Script(scripts.Script):
return cmd_opts.allow_code
def ui(self, is_img2img):
- code = gr.Textbox(label="Python code", visible=False, lines=1)
+ code = gr.Textbox(label="Python code", lines=1)
return [code]
diff --git a/scripts/outpainting_mk_2.py b/scripts/outpainting_mk_2.py
index 2afd4aa5..cf71cb92 100644
--- a/scripts/outpainting_mk_2.py
+++ b/scripts/outpainting_mk_2.py
@@ -132,7 +132,7 @@ class Script(scripts.Script):
info = gr.HTML("<p style=\"margin-bottom:0.75em\">Recommended settings: Sampling Steps: 80-100, Sampler: Euler a, Denoising strength: 0.8</p>")
pixels = gr.Slider(label="Pixels to expand", minimum=8, maximum=256, step=8, value=128)
- mask_blur = gr.Slider(label='Mask blur', minimum=0, maximum=64, step=1, value=8, visible=False)
+ mask_blur = gr.Slider(label='Mask blur', minimum=0, maximum=64, step=1, value=8)
direction = gr.CheckboxGroup(label="Outpainting direction", choices=['left', 'right', 'up', 'down'], value=['left', 'right', 'up', 'down'])
noise_q = gr.Slider(label="Fall-off exponent (lower=higher detail)", minimum=0.0, maximum=4.0, step=0.01, value=1.0)
color_variation = gr.Slider(label="Color variation", minimum=0.0, maximum=1.0, step=0.01, value=0.05)
diff --git a/scripts/poor_mans_outpainting.py b/scripts/poor_mans_outpainting.py
index b0469110..ea45beb0 100644
--- a/scripts/poor_mans_outpainting.py
+++ b/scripts/poor_mans_outpainting.py
@@ -22,8 +22,8 @@ class Script(scripts.Script):
return None
pixels = gr.Slider(label="Pixels to expand", minimum=8, maximum=256, step=8, value=128)
- mask_blur = gr.Slider(label='Mask blur', minimum=0, maximum=64, step=1, value=4, visible=False)
- inpainting_fill = gr.Radio(label='Masked content', choices=['fill', 'original', 'latent noise', 'latent nothing'], value='fill', type="index", visible=False)
+ mask_blur = gr.Slider(label='Mask blur', minimum=0, maximum=64, step=1, value=4)
+ inpainting_fill = gr.Radio(label='Masked content', choices=['fill', 'original', 'latent noise', 'latent nothing'], value='fill', type="index")
direction = gr.CheckboxGroup(label="Outpainting direction", choices=['left', 'right', 'up', 'down'], value=['left', 'right', 'up', 'down'])
return [pixels, mask_blur, inpainting_fill, direction]
diff --git a/scripts/prompts_from_file.py b/scripts/prompts_from_file.py
index d187cd9c..3388bc77 100644
--- a/scripts/prompts_from_file.py
+++ b/scripts/prompts_from_file.py
@@ -83,13 +83,14 @@ def cmdargs(line):
def load_prompt_file(file):
- if (file is None):
+ if file is None:
lines = []
else:
lines = [x.strip() for x in file.decode('utf8', errors='ignore').split("\n")]
return None, "\n".join(lines), gr.update(lines=7)
+
class Script(scripts.Script):
def title(self):
return "Prompts from file or textbox"
@@ -107,9 +108,9 @@ class Script(scripts.Script):
# We don't shrink back to 1, because that causes the control to ignore [enter], and it may
# be unclear to the user that shift-enter is needed.
prompt_txt.change(lambda tb: gr.update(lines=7) if ("\n" in tb) else gr.update(lines=2), inputs=[prompt_txt], outputs=[prompt_txt])
- return [checkbox_iterate, checkbox_iterate_batch, file, prompt_txt]
+ return [checkbox_iterate, checkbox_iterate_batch, prompt_txt]
- def run(self, p, checkbox_iterate, checkbox_iterate_batch, file, prompt_txt: str):
+ def run(self, p, checkbox_iterate, checkbox_iterate_batch, prompt_txt: str):
lines = [x.strip() for x in prompt_txt.splitlines()]
lines = [x for x in lines if len(x) > 0]
@@ -157,5 +158,4 @@ class Script(scripts.Script):
if checkbox_iterate:
p.seed = p.seed + (p.batch_size * p.n_iter)
-
- return Processed(p, images, p.seed, "") \ No newline at end of file
+ return Processed(p, images, p.seed, "")
diff --git a/scripts/sd_upscale.py b/scripts/sd_upscale.py
index cb37ff7e..01074291 100644
--- a/scripts/sd_upscale.py
+++ b/scripts/sd_upscale.py
@@ -18,8 +18,8 @@ class Script(scripts.Script):
def ui(self, is_img2img):
info = gr.HTML("<p style=\"margin-bottom:0.75em\">Will upscale the image to twice the dimensions; use width and height sliders to set tile size</p>")
- overlap = gr.Slider(minimum=0, maximum=256, step=16, label='Tile overlap', value=64, visible=False)
- upscaler_index = gr.Radio(label='Upscaler', choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name, type="index", visible=False)
+ overlap = gr.Slider(minimum=0, maximum=256, step=16, label='Tile overlap', value=64)
+ upscaler_index = gr.Radio(label='Upscaler', choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name, type="index")
return [info, overlap, upscaler_index]
diff --git a/scripts/xy_grid.py b/scripts/xy_grid.py
index f5255786..417ed0d4 100644
--- a/scripts/xy_grid.py
+++ b/scripts/xy_grid.py
@@ -263,12 +263,12 @@ class Script(scripts.Script):
current_axis_options = [x for x in axis_options if type(x) == AxisOption or type(x) == AxisOptionImg2Img and is_img2img]
with gr.Row():
- x_type = gr.Dropdown(label="X type", choices=[x.label for x in current_axis_options], value=current_axis_options[1].label, visible=False, type="index", elem_id="x_type")
- x_values = gr.Textbox(label="X values", visible=False, lines=1)
+ x_type = gr.Dropdown(label="X type", choices=[x.label for x in current_axis_options], value=current_axis_options[1].label, type="index", elem_id="x_type")
+ x_values = gr.Textbox(label="X values", lines=1)
with gr.Row():
- y_type = gr.Dropdown(label="Y type", choices=[x.label for x in current_axis_options], value=current_axis_options[0].label, visible=False, type="index", elem_id="y_type")
- y_values = gr.Textbox(label="Y values", visible=False, lines=1)
+ y_type = gr.Dropdown(label="Y type", choices=[x.label for x in current_axis_options], value=current_axis_options[0].label, type="index", elem_id="y_type")
+ y_values = gr.Textbox(label="Y values", lines=1)
draw_legend = gr.Checkbox(label='Draw legend', value=True)
include_lone_images = gr.Checkbox(label='Include Separate Images', value=False)
diff --git a/test/utils_test.py b/test/utils_test.py
new file mode 100644
index 00000000..65d3d177
--- /dev/null
+++ b/test/utils_test.py
@@ -0,0 +1,63 @@
+import unittest
+import requests
+
+class UtilsTests(unittest.TestCase):
+ def setUp(self):
+ self.url_options = "http://localhost:7860/sdapi/v1/options"
+ self.url_cmd_flags = "http://localhost:7860/sdapi/v1/cmd-flags"
+ self.url_samplers = "http://localhost:7860/sdapi/v1/samplers"
+ self.url_upscalers = "http://localhost:7860/sdapi/v1/upscalers"
+ self.url_sd_models = "http://localhost:7860/sdapi/v1/sd-models"
+ self.url_hypernetworks = "http://localhost:7860/sdapi/v1/hypernetworks"
+ self.url_face_restorers = "http://localhost:7860/sdapi/v1/face-restorers"
+ self.url_realesrgan_models = "http://localhost:7860/sdapi/v1/realesrgan-models"
+ self.url_prompt_styles = "http://localhost:7860/sdapi/v1/prompt-styles"
+ self.url_artist_categories = "http://localhost:7860/sdapi/v1/artist-categories"
+ self.url_artists = "http://localhost:7860/sdapi/v1/artists"
+
+ def test_options_get(self):
+ self.assertEqual(requests.get(self.url_options).status_code, 200)
+
+ def test_options_write(self):
+ response = requests.get(self.url_options)
+ self.assertEqual(response.status_code, 200)
+
+ pre_value = response.json()["send_seed"]
+
+ self.assertEqual(requests.post(self.url_options, json={"send_seed":not pre_value}).status_code, 200)
+
+ response = requests.get(self.url_options)
+ self.assertEqual(response.status_code, 200)
+ self.assertEqual(response.json()["send_seed"], not pre_value)
+
+ requests.post(self.url_options, json={"send_seed": pre_value})
+
+ def test_cmd_flags(self):
+ self.assertEqual(requests.get(self.url_cmd_flags).status_code, 200)
+
+ def test_samplers(self):
+ self.assertEqual(requests.get(self.url_samplers).status_code, 200)
+
+ def test_upscalers(self):
+ self.assertEqual(requests.get(self.url_upscalers).status_code, 200)
+
+ def test_sd_models(self):
+ self.assertEqual(requests.get(self.url_sd_models).status_code, 200)
+
+ def test_hypernetworks(self):
+ self.assertEqual(requests.get(self.url_hypernetworks).status_code, 200)
+
+ def test_face_restorers(self):
+ self.assertEqual(requests.get(self.url_face_restorers).status_code, 200)
+
+ def test_realesrgan_models(self):
+ self.assertEqual(requests.get(self.url_realesrgan_models).status_code, 200)
+
+ def test_prompt_styles(self):
+ self.assertEqual(requests.get(self.url_prompt_styles).status_code, 200)
+
+ def test_artist_categories(self):
+ self.assertEqual(requests.get(self.url_artist_categories).status_code, 200)
+
+ def test_artists(self):
+ self.assertEqual(requests.get(self.url_artists).status_code, 200) \ No newline at end of file
diff --git a/webui.py b/webui.py
index 3b21c071..81df09dd 100644
--- a/webui.py
+++ b/webui.py
@@ -141,6 +141,12 @@ def webui():
# after initial launch, disable --autolaunch for subsequent restarts
cmd_opts.autolaunch = False
+ # gradio uses a very open CORS policy via app.user_middleware, which makes it possible for
+ # an attacker to trick the user into opening a malicious HTML page, which makes a request to the
+ # running web ui and do whatever the attcker wants, including installing an extension and
+ # runnnig its code. We disable this here. Suggested by RyotaK.
+ app.user_middleware = [x for x in app.user_middleware if x.cls.__name__ != 'CORSMiddleware']
+
app.add_middleware(GZipMiddleware, minimum_size=1000)
if launch_api: