From f741a98baccae100fcfb40c017b5c35c5cba1b0c Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Wed, 10 May 2023 08:43:42 +0300 Subject: imports cleanup for ruff --- extensions-builtin/Lora/lora.py | 1 - 1 file changed, 1 deletion(-) (limited to 'extensions-builtin/Lora') diff --git a/extensions-builtin/Lora/lora.py b/extensions-builtin/Lora/lora.py index ba1293df..0ab43229 100644 --- a/extensions-builtin/Lora/lora.py +++ b/extensions-builtin/Lora/lora.py @@ -1,4 +1,3 @@ -import glob import os import re import torch -- cgit v1.2.1 From 028d3f6425d85f122027c127fba8bcbf4f66ee75 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Wed, 10 May 2023 11:05:02 +0300 Subject: ruff auto fixes --- extensions-builtin/Lora/lora.py | 12 ++++++------ extensions-builtin/Lora/scripts/lora_script.py | 2 +- 2 files changed, 7 insertions(+), 7 deletions(-) (limited to 'extensions-builtin/Lora') diff --git a/extensions-builtin/Lora/lora.py b/extensions-builtin/Lora/lora.py index 0ab43229..9795540f 100644 --- a/extensions-builtin/Lora/lora.py +++ b/extensions-builtin/Lora/lora.py @@ -172,7 +172,7 @@ def load_lora(name, filename): else: print(f'Lora layer {key_diffusers} matched a layer with unsupported type: {type(sd_module).__name__}') continue - assert False, f'Lora layer {key_diffusers} matched a layer with unsupported type: {type(sd_module).__name__}' + raise AssertionError(f"Lora layer {key_diffusers} matched a layer with unsupported type: {type(sd_module).__name__}") with torch.no_grad(): module.weight.copy_(weight) @@ -184,7 +184,7 @@ def load_lora(name, filename): elif lora_key == "lora_down.weight": lora_module.down = module else: - assert False, f'Bad Lora layer name: {key_diffusers} - must end in lora_up.weight, lora_down.weight or alpha' + raise AssertionError(f"Bad Lora layer name: {key_diffusers} - must end in lora_up.weight, lora_down.weight or alpha") if len(keys_failed_to_match) > 0: print(f"Failed to match keys when loading Lora {filename}: {keys_failed_to_match}") @@ -202,7 +202,7 @@ def load_loras(names, multipliers=None): loaded_loras.clear() loras_on_disk = [available_lora_aliases.get(name, None) for name in names] - if any([x is None for x in loras_on_disk]): + if any(x is None for x in loras_on_disk): list_available_loras() loras_on_disk = [available_lora_aliases.get(name, None) for name in names] @@ -309,7 +309,7 @@ def lora_apply_weights(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn.Mu print(f'failed to calculate lora weights for layer {lora_layer_name}') - setattr(self, "lora_current_names", wanted_names) + self.lora_current_names = wanted_names def lora_forward(module, input, original_forward): @@ -343,8 +343,8 @@ def lora_forward(module, input, original_forward): def lora_reset_cached_weight(self: Union[torch.nn.Conv2d, torch.nn.Linear]): - setattr(self, "lora_current_names", ()) - setattr(self, "lora_weights_backup", None) + self.lora_current_names = () + self.lora_weights_backup = None def lora_Linear_forward(self, input): diff --git a/extensions-builtin/Lora/scripts/lora_script.py b/extensions-builtin/Lora/scripts/lora_script.py index 7db971fd..b70e2de7 100644 --- a/extensions-builtin/Lora/scripts/lora_script.py +++ b/extensions-builtin/Lora/scripts/lora_script.py @@ -53,7 +53,7 @@ script_callbacks.on_infotext_pasted(lora.infotext_pasted) shared.options_templates.update(shared.options_section(('extra_networks', "Extra Networks"), { - "sd_lora": shared.OptionInfo("None", "Add Lora to prompt", gr.Dropdown, lambda: {"choices": ["None"] + [x for x in lora.available_loras]}, refresh=lora.list_available_loras), + "sd_lora": shared.OptionInfo("None", "Add Lora to prompt", gr.Dropdown, lambda: {"choices": ["None"] + list(lora.available_loras)}, refresh=lora.list_available_loras), })) -- cgit v1.2.1 From a5121e7a0623db328a9462d340d389ed6737374a Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Wed, 10 May 2023 11:37:18 +0300 Subject: fixes for B007 --- extensions-builtin/Lora/lora.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'extensions-builtin/Lora') diff --git a/extensions-builtin/Lora/lora.py b/extensions-builtin/Lora/lora.py index 9795540f..7b56136f 100644 --- a/extensions-builtin/Lora/lora.py +++ b/extensions-builtin/Lora/lora.py @@ -418,7 +418,7 @@ def infotext_pasted(infotext, params): added = [] - for k, v in params.items(): + for k in params: if not k.startswith("AddNet Model "): continue -- cgit v1.2.1 From 3ec7b705c78b7aca9569c92a419837352c7a4ec6 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Wed, 10 May 2023 21:21:32 +0300 Subject: suggestions and fixes from the PR --- extensions-builtin/Lora/scripts/lora_script.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'extensions-builtin/Lora') diff --git a/extensions-builtin/Lora/scripts/lora_script.py b/extensions-builtin/Lora/scripts/lora_script.py index b70e2de7..13d297d7 100644 --- a/extensions-builtin/Lora/scripts/lora_script.py +++ b/extensions-builtin/Lora/scripts/lora_script.py @@ -53,7 +53,7 @@ script_callbacks.on_infotext_pasted(lora.infotext_pasted) shared.options_templates.update(shared.options_section(('extra_networks', "Extra Networks"), { - "sd_lora": shared.OptionInfo("None", "Add Lora to prompt", gr.Dropdown, lambda: {"choices": ["None"] + list(lora.available_loras)}, refresh=lora.list_available_loras), + "sd_lora": shared.OptionInfo("None", "Add Lora to prompt", gr.Dropdown, lambda: {"choices": ["None", *lora.available_loras]}, refresh=lora.list_available_loras), })) -- cgit v1.2.1 From 44c37f94e176667ccdfeb74916e4640fa9dc586d Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Thu, 18 May 2023 16:36:30 +0300 Subject: add messages about Loras that failed to load to UI --- extensions-builtin/Lora/lora.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'extensions-builtin/Lora') diff --git a/extensions-builtin/Lora/lora.py b/extensions-builtin/Lora/lora.py index 1308c48b..fa57d466 100644 --- a/extensions-builtin/Lora/lora.py +++ b/extensions-builtin/Lora/lora.py @@ -3,7 +3,7 @@ import re import torch from typing import Union -from modules import shared, devices, sd_models, errors, scripts +from modules import shared, devices, sd_models, errors, scripts, sd_hijack metadata_tags_order = {"ss_sd_model_name": 1, "ss_resolution": 2, "ss_clip_skip": 3, "ss_num_train_images": 10, "ss_tag_frequency": 20} @@ -211,6 +211,8 @@ def load_loras(names, multipliers=None): loras_on_disk = [available_lora_aliases.get(name, None) for name in names] + failed_to_load_loras = [] + for i, name in enumerate(names): lora = already_loaded.get(name, None) @@ -224,12 +226,16 @@ def load_loras(names, multipliers=None): continue if lora is None: + failed_to_load_loras.append(name) print(f"Couldn't find Lora with name {name}") continue lora.multiplier = multipliers[i] if multipliers else 1.0 loaded_loras.append(lora) + if len(failed_to_load_loras) > 0: + sd_hijack.model_hijack.comments.append("Failed to find Loras: " + ", ".join(failed_to_load_loras)) + def lora_calc_updown(lora, module, target): with torch.no_grad(): -- cgit v1.2.1 From 4dd55591622019db050c2dde55a049c0d966fc3e Mon Sep 17 00:00:00 2001 From: ryankashi Date: Thu, 18 May 2023 14:12:01 -0700 Subject: Added the refresh-loras post request --- extensions-builtin/Lora/scripts/lora_script.py | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'extensions-builtin/Lora') diff --git a/extensions-builtin/Lora/scripts/lora_script.py b/extensions-builtin/Lora/scripts/lora_script.py index 060bda05..0042cbec 100644 --- a/extensions-builtin/Lora/scripts/lora_script.py +++ b/extensions-builtin/Lora/scripts/lora_script.py @@ -76,6 +76,10 @@ def api_loras(_: gr.Blocks, app: FastAPI): @app.get("/sdapi/v1/loras") async def get_loras(): return [create_lora_json(obj) for obj in lora.available_loras.values()] + + @app.post("/sdapi/v1/refresh-loras") + async def refresh_loras(): + return lora.list_available_loras() script_callbacks.on_app_started(api_loras) -- cgit v1.2.1 From 2725dfd8a66decd1b70a415f96d386668d5659c3 Mon Sep 17 00:00:00 2001 From: Aarni Koskela Date: Fri, 19 May 2023 12:37:34 +0300 Subject: Fix ruff lint --- extensions-builtin/Lora/scripts/lora_script.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'extensions-builtin/Lora') diff --git a/extensions-builtin/Lora/scripts/lora_script.py b/extensions-builtin/Lora/scripts/lora_script.py index 5eafbe86..a6b340ee 100644 --- a/extensions-builtin/Lora/scripts/lora_script.py +++ b/extensions-builtin/Lora/scripts/lora_script.py @@ -76,7 +76,7 @@ def api_loras(_: gr.Blocks, app: FastAPI): @app.get("/sdapi/v1/loras") async def get_loras(): return [create_lora_json(obj) for obj in lora.available_loras.values()] - + @app.post("/sdapi/v1/refresh-loras") async def refresh_loras(): return lora.list_available_loras() -- cgit v1.2.1 From 39ec4f06ffb2c26e1298b2c5d80874dc3fd693ac Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Fri, 19 May 2023 22:59:29 +0300 Subject: calculate hashes for Lora add lora hashes to infotext when pasting infotext, use infotext's lora hashes to find local loras for entries whose hashes match loras the user has --- extensions-builtin/Lora/extra_networks_lora.py | 18 +++++++ extensions-builtin/Lora/lora.py | 59 ++++++++++++++++++----- extensions-builtin/Lora/scripts/lora_script.py | 32 +++++++++++- extensions-builtin/Lora/ui_extra_networks_lora.py | 5 +- 4 files changed, 98 insertions(+), 16 deletions(-) (limited to 'extensions-builtin/Lora') diff --git a/extensions-builtin/Lora/extra_networks_lora.py b/extensions-builtin/Lora/extra_networks_lora.py index ccb249ac..b5fea4d2 100644 --- a/extensions-builtin/Lora/extra_networks_lora.py +++ b/extensions-builtin/Lora/extra_networks_lora.py @@ -23,5 +23,23 @@ class ExtraNetworkLora(extra_networks.ExtraNetwork): lora.load_loras(names, multipliers) + if shared.opts.lora_add_hashes_to_infotext: + lora_hashes = [] + for item in lora.loaded_loras: + shorthash = item.lora_on_disk.shorthash + if not shorthash: + continue + + alias = item.mentioned_name + if not alias: + continue + + alias = alias.replace(":", "").replace(",", "") + + lora_hashes.append(f"{alias}: {shorthash}") + + if lora_hashes: + p.extra_generation_params["Lora hashes"] = ", ".join(lora_hashes) + def deactivate(self, p): pass diff --git a/extensions-builtin/Lora/lora.py b/extensions-builtin/Lora/lora.py index fa57d466..eec14712 100644 --- a/extensions-builtin/Lora/lora.py +++ b/extensions-builtin/Lora/lora.py @@ -3,7 +3,7 @@ import re import torch from typing import Union -from modules import shared, devices, sd_models, errors, scripts, sd_hijack +from modules import shared, devices, sd_models, errors, scripts, sd_hijack, hashes metadata_tags_order = {"ss_sd_model_name": 1, "ss_resolution": 2, "ss_clip_skip": 3, "ss_num_train_images": 10, "ss_tag_frequency": 20} @@ -76,9 +76,9 @@ class LoraOnDisk: self.name = name self.filename = filename self.metadata = {} + self.is_safetensors = os.path.splitext(filename)[1].lower() == ".safetensors" - _, ext = os.path.splitext(filename) - if ext.lower() == ".safetensors": + if self.is_safetensors: try: self.metadata = sd_models.read_metadata_from_safetensors(filename) except Exception as e: @@ -94,14 +94,43 @@ class LoraOnDisk: self.ssmd_cover_images = self.metadata.pop('ssmd_cover_images', None) # those are cover images and they are too big to display in UI as text self.alias = self.metadata.get('ss_output_name', self.name) + self.hash = None + self.shorthash = None + self.set_hash( + self.metadata.get('sshs_model_hash') or + hashes.sha256_from_cache(self.filename, "lora/" + self.name, use_addnet_hash=self.is_safetensors) or + '' + ) + + def set_hash(self, v): + self.hash = v + self.shorthash = self.hash[0:12] + + if self.shorthash: + available_lora_hash_lookup[self.shorthash] = self + + def read_hash(self): + if not self.hash: + self.set_hash(hashes.sha256(self.filename, "lora/" + self.name, use_addnet_hash=self.is_safetensors) or '') + + def get_alias(self): + if shared.opts.lora_preferred_name == "Filename" or self.alias.lower() in forbidden_lora_aliases: + return self.name + else: + return self.alias + class LoraModule: - def __init__(self, name): + def __init__(self, name, lora_on_disk: LoraOnDisk): self.name = name + self.lora_on_disk = lora_on_disk self.multiplier = 1.0 self.modules = {} self.mtime = None + self.mentioned_name = None + """the text that was used to add lora to prompt - can be either name or an alias""" + class LoraUpDownModule: def __init__(self): @@ -126,11 +155,11 @@ def assign_lora_names_to_compvis_modules(sd_model): sd_model.lora_layer_mapping = lora_layer_mapping -def load_lora(name, filename): - lora = LoraModule(name) - lora.mtime = os.path.getmtime(filename) +def load_lora(name, lora_on_disk): + lora = LoraModule(name, lora_on_disk) + lora.mtime = os.path.getmtime(lora_on_disk.filename) - sd = sd_models.read_state_dict(filename) + sd = sd_models.read_state_dict(lora_on_disk.filename) # this should not be needed but is here as an emergency fix for an unknown error people are experiencing in 1.2.0 if not hasattr(shared.sd_model, 'lora_layer_mapping'): @@ -191,7 +220,7 @@ def load_lora(name, filename): raise AssertionError(f"Bad Lora layer name: {key_diffusers} - must end in lora_up.weight, lora_down.weight or alpha") if len(keys_failed_to_match) > 0: - print(f"Failed to match keys when loading Lora {filename}: {keys_failed_to_match}") + print(f"Failed to match keys when loading Lora {lora_on_disk.filename}: {keys_failed_to_match}") return lora @@ -217,14 +246,19 @@ def load_loras(names, multipliers=None): lora = already_loaded.get(name, None) lora_on_disk = loras_on_disk[i] + if lora_on_disk is not None: if lora is None or os.path.getmtime(lora_on_disk.filename) > lora.mtime: try: - lora = load_lora(name, lora_on_disk.filename) + lora = load_lora(name, lora_on_disk) except Exception as e: errors.display(e, f"loading Lora {lora_on_disk.filename}") continue + lora.mentioned_name = name + + lora_on_disk.read_hash() + if lora is None: failed_to_load_loras.append(name) print(f"Couldn't find Lora with name {name}") @@ -403,7 +437,8 @@ def list_available_loras(): available_loras.clear() available_lora_aliases.clear() forbidden_lora_aliases.clear() - forbidden_lora_aliases.update({"none": 1}) + available_lora_hash_lookup.clear() + forbidden_lora_aliases.update({"none": 1, "Addams": 1}) os.makedirs(shared.cmd_opts.lora_dir, exist_ok=True) @@ -457,8 +492,10 @@ def infotext_pasted(infotext, params): if added: params["Prompt"] += "\n" + "".join(added) + available_loras = {} available_lora_aliases = {} +available_lora_hash_lookup = {} forbidden_lora_aliases = {} loaded_loras = [] diff --git a/extensions-builtin/Lora/scripts/lora_script.py b/extensions-builtin/Lora/scripts/lora_script.py index a6b340ee..e650f469 100644 --- a/extensions-builtin/Lora/scripts/lora_script.py +++ b/extensions-builtin/Lora/scripts/lora_script.py @@ -1,3 +1,5 @@ +import re + import torch import gradio as gr from fastapi import FastAPI @@ -54,7 +56,8 @@ script_callbacks.on_infotext_pasted(lora.infotext_pasted) shared.options_templates.update(shared.options_section(('extra_networks', "Extra Networks"), { "sd_lora": shared.OptionInfo("None", "Add Lora to prompt", gr.Dropdown, lambda: {"choices": ["None", *lora.available_loras]}, refresh=lora.list_available_loras), - "lora_preferred_name": shared.OptionInfo("Alias from file", "When adding to prompt, refer to lora by", gr.Radio, {"choices": ["Alias from file", "Filename"]}), + "lora_preferred_name": shared.OptionInfo("Alias from file", "When adding to prompt, refer to Lora by", gr.Radio, {"choices": ["Alias from file", "Filename"]}), + "lora_add_hashes_to_infotext": shared.OptionInfo(True, "Add Lora hashes to infotext"), })) @@ -84,3 +87,30 @@ def api_loras(_: gr.Blocks, app: FastAPI): script_callbacks.on_app_started(api_loras) +re_lora = re.compile("