aboutsummaryrefslogtreecommitdiff
path: root/extensions-builtin
diff options
context:
space:
mode:
authorAarni Koskela <akx@iki.fi>2023-12-31 16:11:18 +0200
committerAarni Koskela <akx@iki.fi>2024-01-02 08:57:12 +0200
commitcf14a6a7aaf8ccb40552990785d5c9e400d93610 (patch)
treec7b86b0f44433edf8e9ca0c07f09090ae985cc88 /extensions-builtin
parent980970d39091e572500434c69660bc6eed22498d (diff)
Refactor upscale_2 helper out of ScuNET/SwinIR; make sure devices are right
Diffstat (limited to 'extensions-builtin')
-rw-r--r--extensions-builtin/ScuNET/scripts/scunet_model.py48
-rw-r--r--extensions-builtin/SwinIR/scripts/swinir_model.py62
2 files changed, 18 insertions, 92 deletions
diff --git a/extensions-builtin/ScuNET/scripts/scunet_model.py b/extensions-builtin/ScuNET/scripts/scunet_model.py
index f799cb76..fe5e5a19 100644
--- a/extensions-builtin/ScuNET/scripts/scunet_model.py
+++ b/extensions-builtin/ScuNET/scripts/scunet_model.py
@@ -1,13 +1,9 @@
import sys
import PIL.Image
-import numpy as np
-import torch
import modules.upscaler
-from modules import devices, modelloader, script_callbacks, errors
-from modules.shared import opts
-from modules.upscaler_utils import tiled_upscale_2
+from modules import devices, errors, modelloader, script_callbacks, shared, upscaler_utils
class UpscalerScuNET(modules.upscaler.Upscaler):
@@ -40,46 +36,23 @@ class UpscalerScuNET(modules.upscaler.Upscaler):
self.scalers = scalers
def do_upscale(self, img: PIL.Image.Image, selected_file):
-
devices.torch_gc()
-
try:
model = self.load_model(selected_file)
except Exception as e:
print(f"ScuNET: Unable to load model from {selected_file}: {e}", file=sys.stderr)
return img
- device = devices.get_device_for('scunet')
- tile = opts.SCUNET_tile
- h, w = img.height, img.width
- np_img = np.array(img)
- np_img = np_img[:, :, ::-1] # RGB to BGR
- np_img = np_img.transpose((2, 0, 1)) / 255 # HWC to CHW
- torch_img = torch.from_numpy(np_img).float().unsqueeze(0).to(device) # type: ignore
-
- if tile > h or tile > w:
- _img = torch.zeros(1, 3, max(h, tile), max(w, tile), dtype=torch_img.dtype, device=torch_img.device)
- _img[:, :, :h, :w] = torch_img # pad image
- torch_img = _img
-
- with torch.no_grad():
- torch_output = tiled_upscale_2(
- torch_img,
- model,
- tile_size=opts.SCUNET_tile,
- tile_overlap=opts.SCUNET_tile_overlap,
- scale=1,
- device=devices.get_device_for('scunet'),
- desc="ScuNET tiles",
- ).squeeze(0)
- torch_output = torch_output[:, :h * 1, :w * 1] # remove padding, if any
- np_output: np.ndarray = torch_output.float().cpu().clamp_(0, 1).numpy()
- del torch_img, torch_output
+ img = upscaler_utils.upscale_2(
+ img,
+ model,
+ tile_size=shared.opts.SCUNET_tile,
+ tile_overlap=shared.opts.SCUNET_tile_overlap,
+ scale=1, # ScuNET is a denoising model, not an upscaler
+ desc='ScuNET',
+ )
devices.torch_gc()
-
- output = np_output.transpose((1, 2, 0)) # CHW to HWC
- output = output[:, :, ::-1] # BGR to RGB
- return PIL.Image.fromarray((output * 255).astype(np.uint8))
+ return img
def load_model(self, path: str):
device = devices.get_device_for('scunet')
@@ -93,7 +66,6 @@ class UpscalerScuNET(modules.upscaler.Upscaler):
def on_ui_settings():
import gradio as gr
- from modules import shared
shared.opts.add_option("SCUNET_tile", shared.OptionInfo(256, "Tile size for SCUNET upscalers.", gr.Slider, {"minimum": 0, "maximum": 512, "step": 16}, section=('upscaling', "Upscaling")).info("0 = no tiling"))
shared.opts.add_option("SCUNET_tile_overlap", shared.OptionInfo(8, "Tile overlap for SCUNET upscalers.", gr.Slider, {"minimum": 0, "maximum": 64, "step": 1}, section=('upscaling', "Upscaling")).info("Low values = visible seam"))
diff --git a/extensions-builtin/SwinIR/scripts/swinir_model.py b/extensions-builtin/SwinIR/scripts/swinir_model.py
index 8a555c79..bc427fea 100644
--- a/extensions-builtin/SwinIR/scripts/swinir_model.py
+++ b/extensions-builtin/SwinIR/scripts/swinir_model.py
@@ -1,14 +1,10 @@
import logging
import sys
-import numpy as np
-import torch
from PIL import Image
-from modules import modelloader, devices, script_callbacks, shared
-from modules.shared import opts
+from modules import devices, modelloader, script_callbacks, shared, upscaler_utils
from modules.upscaler import Upscaler, UpscalerData
-from modules.upscaler_utils import tiled_upscale_2
SWINIR_MODEL_URL = "https://github.com/JingyunLiang/SwinIR/releases/download/v0.0/003_realSR_BSRGAN_DFOWMFC_s64w8_SwinIR-L_x4_GAN.pth"
@@ -36,9 +32,7 @@ class UpscalerSwinIR(Upscaler):
self.scalers = scalers
def do_upscale(self, img: Image.Image, model_file: str) -> Image.Image:
- current_config = (model_file, opts.SWIN_tile)
-
- device = self._get_device()
+ current_config = (model_file, shared.opts.SWIN_tile)
if self._cached_model_config == current_config:
model = self._cached_model
@@ -51,12 +45,13 @@ class UpscalerSwinIR(Upscaler):
self._cached_model = model
self._cached_model_config = current_config
- img = upscale(
+ img = upscaler_utils.upscale_2(
img,
model,
- tile=opts.SWIN_tile,
- tile_overlap=opts.SWIN_tile_overlap,
- device=device,
+ tile_size=shared.opts.SWIN_tile,
+ tile_overlap=shared.opts.SWIN_tile_overlap,
+ scale=4, # TODO: This was hard-coded before too...
+ desc="SwinIR",
)
devices.torch_gc()
return img
@@ -77,7 +72,7 @@ class UpscalerSwinIR(Upscaler):
dtype=devices.dtype,
expected_architecture="SwinIR",
)
- if getattr(opts, 'SWIN_torch_compile', False):
+ if getattr(shared.opts, 'SWIN_torch_compile', False):
try:
model_descriptor.model.compile()
except Exception:
@@ -88,47 +83,6 @@ class UpscalerSwinIR(Upscaler):
return devices.get_device_for('swinir')
-def upscale(
- img,
- model,
- *,
- tile: int,
- tile_overlap: int,
- window_size=8,
- scale=4,
- device,
-):
-
- img = np.array(img)
- img = img[:, :, ::-1]
- img = np.moveaxis(img, 2, 0) / 255
- img = torch.from_numpy(img).float()
- img = img.unsqueeze(0).to(device, dtype=devices.dtype)
- with torch.no_grad(), devices.autocast():
- _, _, h_old, w_old = img.size()
- h_pad = (h_old // window_size + 1) * window_size - h_old
- w_pad = (w_old // window_size + 1) * window_size - w_old
- img = torch.cat([img, torch.flip(img, [2])], 2)[:, :, : h_old + h_pad, :]
- img = torch.cat([img, torch.flip(img, [3])], 3)[:, :, :, : w_old + w_pad]
- output = tiled_upscale_2(
- img,
- model,
- tile_size=tile,
- tile_overlap=tile_overlap,
- scale=scale,
- device=device,
- desc="SwinIR tiles",
- )
- output = output[..., : h_old * scale, : w_old * scale]
- output = output.data.squeeze().float().cpu().clamp_(0, 1).numpy()
- if output.ndim == 3:
- output = np.transpose(
- output[[2, 1, 0], :, :], (1, 2, 0)
- ) # CHW-RGB to HCW-BGR
- output = (output * 255.0).round().astype(np.uint8) # float32 to uint8
- return Image.fromarray(output, "RGB")
-
-
def on_ui_settings():
import gradio as gr