aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authord8ahazard <d8ahazard@gmail.com>2022-09-26 10:27:18 -0500
committerd8ahazard <d8ahazard@gmail.com>2022-09-26 10:27:18 -0500
commit7d5c29b674bacc5654f8613af134632b7cbdb158 (patch)
tree08d8d267adf313ea15fdcfeae67f45ff430d4520
parent740070ea9cdb254209f66417418f2a4af8b099d6 (diff)
Cleanup existing directories, fixes
-rw-r--r--.gitignore1
-rw-r--r--modules/codeformer_model.py15
-rw-r--r--modules/esrgan_model.py4
-rw-r--r--modules/ldsr_model.py2
-rw-r--r--modules/modelloader.py38
-rw-r--r--webui.py2
6 files changed, 48 insertions, 14 deletions
diff --git a/.gitignore b/.gitignore
index 9d78853a..57fa2506 100644
--- a/.gitignore
+++ b/.gitignore
@@ -4,6 +4,7 @@ __pycache__
/venv
/tmp
/model.ckpt
+/models/**/*
/models/*.ckpt
/GFPGANv1.3.pth
/gfpgan/weights/*.pth
diff --git a/modules/codeformer_model.py b/modules/codeformer_model.py
index dc0a5eee..efd881eb 100644
--- a/modules/codeformer_model.py
+++ b/modules/codeformer_model.py
@@ -5,14 +5,13 @@ import traceback
import cv2
import torch
+import modules.face_restoration
+import modules.shared
from modules import shared, devices, modelloader
from modules.paths import script_path, models_path
-import modules.shared
-import modules.face_restoration
-from importlib import reload
-# codeformer people made a choice to include modified basicsr library to their project, which makes
-# it utterly impossible to use it alongside other libraries that also use basicsr, like GFPGAN.
+# codeformer people made a choice to include modified basicsr library to their project which makes
+# it utterly impossible to use it alongside with other libraries that also use basicsr, like GFPGAN.
# I am making a choice to include some files from codeformer to work around this issue.
model_dir = "Codeformer"
model_path = os.path.join(models_path, model_dir)
@@ -31,11 +30,6 @@ def setup_model(dirname):
if path is None:
return
-
- # both GFPGAN and CodeFormer use bascisr, one has it installed from pip the other uses its own
- #stored_sys_path = sys.path
- #sys.path = [path] + sys.path
-
try:
from torchvision.transforms.functional import normalize
from modules.codeformer.codeformer_arch import CodeFormer
@@ -67,7 +61,6 @@ def setup_model(dirname):
print("Unable to load codeformer model.")
return None, None
net = net_class(dim_embd=512, codebook_size=1024, n_head=8, n_layers=9, connect_list=['32', '64', '128', '256']).to(devices.device_codeformer)
- ckpt_path = load_file_from_url(url=pretrain_model_url, model_dir=os.path.join(path, 'weights/CodeFormer'), progress=True)
checkpoint = torch.load(ckpt_path)['params_ema']
net.load_state_dict(checkpoint)
net.eval()
diff --git a/modules/esrgan_model.py b/modules/esrgan_model.py
index dd0ee629..5e10c49c 100644
--- a/modules/esrgan_model.py
+++ b/modules/esrgan_model.py
@@ -18,7 +18,7 @@ from modules.shared import opts
model_dir = "ESRGAN"
model_path = os.path.join(models_path, model_dir)
model_url = "https://drive.google.com/u/0/uc?id=1TPrz5QKd8DHHt1k8SRtm6tMiPjz_Qene&export=download"
-model_name = "ESRGAN_x4.pth"
+model_name = "ESRGAN_x4"
def load_model(path: str, name: str):
@@ -27,7 +27,7 @@ def load_model(path: str, name: str):
global model_dir
global model_name
if "http" in path:
- filename = load_file_from_url(url=model_url, model_dir=model_path, file_name=model_name, progress=True)
+ filename = load_file_from_url(url=model_url, model_dir=model_path, file_name="%s.pth" % model_name, progress=True)
else:
filename = path
if not os.path.exists(filename) or filename is None:
diff --git a/modules/ldsr_model.py b/modules/ldsr_model.py
index e6e7ff74..4f9b1657 100644
--- a/modules/ldsr_model.py
+++ b/modules/ldsr_model.py
@@ -19,7 +19,7 @@ have_ldsr = False
LDSR_obj = None
-class UpscalerLDSR(modules.images.Upscaler):
+class UpscalerLDSR(images.Upscaler):
def __init__(self, steps):
self.steps = steps
self.name = "LDSR"
diff --git a/modules/modelloader.py b/modules/modelloader.py
index d59fbe05..9520a681 100644
--- a/modules/modelloader.py
+++ b/modules/modelloader.py
@@ -1,8 +1,11 @@
import os
+import shutil
from urllib.parse import urlparse
from basicsr.utils.download_util import load_file_from_url
+from modules.paths import script_path, models_path
+
def load_models(model_path: str, model_url: str = None, command_path: str = None, dl_name: str = None, existing=None,
ext_filter=None) -> list:
@@ -63,3 +66,38 @@ def friendly_name(file: str):
model_name, extension = os.path.splitext(file)
model_name = model_name.replace("_", " ").title()
return model_name
+
+
+def cleanup_models():
+ root_path = script_path
+ src_path = os.path.join(root_path, "ESRGAN")
+ dest_path = os.path.join(models_path, "ESRGAN")
+ move_files(src_path, dest_path)
+ src_path = os.path.join(root_path, "gfpgan")
+ dest_path = os.path.join(models_path, "GFPGAN")
+ move_files(src_path, dest_path)
+ src_path = os.path.join(root_path, "SwinIR")
+ dest_path = os.path.join(models_path, "SwinIR")
+ move_files(src_path, dest_path)
+ src_path = os.path.join(root_path, "repositories/latent-diffusion/experiments/pretrained_models/")
+ dest_path = os.path.join(models_path, "LDSR")
+ move_files(src_path, dest_path)
+
+
+def move_files(src_path: str, dest_path: str):
+ try:
+ if not os.path.exists(dest_path):
+ os.makedirs(dest_path)
+ if os.path.exists(src_path):
+ for file in os.listdir(src_path):
+ if os.path.isfile(file):
+ fullpath = os.path.join(src_path, file)
+ print("Moving file: %s to %s" % (fullpath, dest_path))
+ try:
+ shutil.move(fullpath, dest_path)
+ except:
+ pass
+ print("Removing folder: %s" % src_path)
+ shutil.rmtree(src_path, True)
+ except:
+ pass \ No newline at end of file
diff --git a/webui.py b/webui.py
index 7e0b3296..e71a217c 100644
--- a/webui.py
+++ b/webui.py
@@ -18,9 +18,11 @@ import modules.shared as shared
import modules.swinir_model as swinir
import modules.txt2img
import modules.ui
+from modules import modelloader
from modules.paths import script_path
from modules.shared import cmd_opts
+modelloader.cleanup_models()
codeformer.setup_model(cmd_opts.codeformer_models_path)
gfpgan.setup_model(cmd_opts.gfpgan_models_path)
shared.face_restorers.append(modules.face_restoration.FaceRestoration())