aboutsummaryrefslogtreecommitdiff
path: root/modules
diff options
context:
space:
mode:
authorAUTOMATIC1111 <16777216c@gmail.com>2023-12-16 10:40:20 +0300
committerAUTOMATIC1111 <16777216c@gmail.com>2023-12-16 10:40:20 +0300
commitcd9ce2e31c4a264d7cde17c54d24f8ad94c9cf2c (patch)
tree621f2cc61914d0aa09a38344b2574f9c13b46324 /modules
parentc121f8c31587a21020e8670664977f6f76e68905 (diff)
Use radio for FP8 mode selection
Diffstat (limited to 'modules')
-rw-r--r--modules/shared_options.py2
1 files changed, 1 insertions, 1 deletions
diff --git a/modules/shared_options.py b/modules/shared_options.py
index d470eb8f..fa542ba8 100644
--- a/modules/shared_options.py
+++ b/modules/shared_options.py
@@ -206,7 +206,7 @@ options_templates.update(options_section(('optimizations', "Optimizations", "sd"
"pad_cond_uncond": OptionInfo(False, "Pad prompt/negative prompt to be same length", infotext='Pad conds').info("improves performance when prompt and negative prompt have different lengths; changes seeds"),
"persistent_cond_cache": OptionInfo(True, "Persistent cond cache").info("do not recalculate conds from prompts if prompts have not changed since previous calculation"),
"batch_cond_uncond": OptionInfo(True, "Batch cond/uncond").info("do both conditional and unconditional denoising in one batch; uses a bit more VRAM during sampling, but improves speed; previously this was controlled by --always-batch-cond-uncond comandline argument"),
- "fp8_storage": OptionInfo("Disable", "FP8 weight", gr.Dropdown, {"choices": ["Disable", "Enable for SDXL", "Enable"]}).info("Use FP8 to store Linear/Conv layers' weight. Require pytorch>=2.1.0."),
+ "fp8_storage": OptionInfo("Disable", "FP8 weight", gr.Radio, {"choices": ["Disable", "Enable for SDXL", "Enable"]}).info("Use FP8 to store Linear/Conv layers' weight. Require pytorch>=2.1.0."),
"cache_fp16_weight": OptionInfo(False, "Cache FP16 weight for LoRA").info("Cache fp16 weight when enabling FP8, will increase the quality of LoRA. Use more system ram."),
}))