aboutsummaryrefslogtreecommitdiff
path: root/modules/shared.py
diff options
context:
space:
mode:
authorTakuma Mori <takuma104@gmail.com>2023-01-21 17:42:04 +0900
committerTakuma Mori <takuma104@gmail.com>2023-01-21 17:42:04 +0900
commit3262e825cc542ff634e6ba2e3a162eafdc6c1bba (patch)
tree08f1be595157904a51ac206f5362ee4e0b51111d /modules/shared.py
parent184e23eb89c198b42f351a4d5ff862ee64917619 (diff)
add --xformers-flash-attention option & impl
Diffstat (limited to 'modules/shared.py')
-rw-r--r--modules/shared.py1
1 files changed, 1 insertions, 0 deletions
diff --git a/modules/shared.py b/modules/shared.py
index 72fb1934..23328adf 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -57,6 +57,7 @@ parser.add_argument("--realesrgan-models-path", type=str, help="Path to director
parser.add_argument("--clip-models-path", type=str, help="Path to directory with CLIP model file(s).", default=None)
parser.add_argument("--xformers", action='store_true', help="enable xformers for cross attention layers")
parser.add_argument("--force-enable-xformers", action='store_true', help="enable xformers for cross attention layers regardless of whether the checking code thinks you can run it; do not make bug reports if this fails to work")
+parser.add_argument("--xformers-flash-attention", action='store_true', help="enable xformers with Flash Attention to improve reproducibility (supported for SD2.x or variant only)")
parser.add_argument("--deepdanbooru", action='store_true', help="does not do anything")
parser.add_argument("--opt-split-attention", action='store_true', help="force-enables Doggettx's cross-attention layer optimization. By default, it's on for torch cuda.")
parser.add_argument("--opt-sub-quad-attention", action='store_true', help="enable memory efficient sub-quadratic cross-attention layer optimization")