aboutsummaryrefslogtreecommitdiff
path: root/modules/prompt_parser.py
diff options
context:
space:
mode:
authorAUTOMATIC1111 <16777216c@gmail.com>2022-10-06 08:50:06 +0300
committerGitHub <noreply@github.com>2022-10-06 08:50:06 +0300
commit0e92c36707a8a5bda03ddd7261c40f52cf1e6aee (patch)
treebbdad3261a6d6b273e9198b9d9b30d3bf4a65ce9 /modules/prompt_parser.py
parent55400c981b7c1389482057a35ed6ea11f08da194 (diff)
parentaf02ee1297b212e053c0f44bd457e90b7ed49eb9 (diff)
Merge pull request #1755 from AUTOMATIC1111/use-typing-list
use typing.list in prompt_parser.py for wider python version support
Diffstat (limited to 'modules/prompt_parser.py')
-rw-r--r--modules/prompt_parser.py9
1 files changed, 4 insertions, 5 deletions
diff --git a/modules/prompt_parser.py b/modules/prompt_parser.py
index ee4c5d02..a7a6aa31 100644
--- a/modules/prompt_parser.py
+++ b/modules/prompt_parser.py
@@ -1,6 +1,6 @@
import re
from collections import namedtuple
-
+from typing import List
import lark
# a prompt like this: "fantasy landscape with a [mountain:lake:0.25] and [an oak:a christmas tree:0.75][ in foreground::0.6][ in background:0.25] [shoddy:masterful:0.5]"
@@ -175,15 +175,14 @@ def get_multicond_prompt_list(prompts):
class ComposableScheduledPromptConditioning:
def __init__(self, schedules, weight=1.0):
- self.schedules = schedules # : list[ScheduledPromptConditioning]
+ self.schedules: List[ScheduledPromptConditioning] = schedules
self.weight: float = weight
class MulticondLearnedConditioning:
def __init__(self, shape, batch):
self.shape: tuple = shape # the shape field is needed to send this object to DDIM/PLMS
- self.batch = batch # : list[list[ComposableScheduledPromptConditioning]]
-
+ self.batch: List[List[ComposableScheduledPromptConditioning]] = batch
def get_multicond_learned_conditioning(model, prompts, steps) -> MulticondLearnedConditioning:
"""same as get_learned_conditioning, but returns a list of ScheduledPromptConditioning along with the weight objects for each prompt.
@@ -203,7 +202,7 @@ def get_multicond_learned_conditioning(model, prompts, steps) -> MulticondLearne
return MulticondLearnedConditioning(shape=(len(prompts),), batch=res)
-def reconstruct_cond_batch(c, current_step): # c: list[list[ScheduledPromptConditioning]]
+def reconstruct_cond_batch(c: List[List[ScheduledPromptConditioning]], current_step):
param = c[0][0].cond
res = torch.zeros((len(c),) + param.shape, device=param.device, dtype=param.dtype)
for i, cond_schedule in enumerate(c):