From 9d40212485febe05a662dd0346e6def83e456288 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Tue, 13 Sep 2022 21:49:58 +0300 Subject: first attempt to produce crrect seeds in batch --- modules/devices.py | 10 ++++++++++ modules/processing.py | 18 ++++++++++++++++-- modules/sd_samplers.py | 25 +++++++++++++++++++++++++ 3 files changed, 51 insertions(+), 2 deletions(-) diff --git a/modules/devices.py b/modules/devices.py index e4430e1a..07bb2339 100644 --- a/modules/devices.py +++ b/modules/devices.py @@ -48,3 +48,13 @@ def randn(seed, shape): torch.manual_seed(seed) return torch.randn(shape, device=device) + +def randn_without_seed(shape): + # Pytorch currently doesn't handle setting randomness correctly when the metal backend is used. + if device.type == 'mps': + generator = torch.Generator(device=cpu) + noise = torch.randn(shape, generator=generator, device=cpu).to(device) + return noise + + return torch.randn(shape, device=device) + diff --git a/modules/processing.py b/modules/processing.py index f33560ee..aab72903 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -119,8 +119,14 @@ def slerp(val, low, high): return res -def create_random_tensors(shape, seeds, subseeds=None, subseed_strength=0.0, seed_resize_from_h=0, seed_resize_from_w=0): +def create_random_tensors(shape, seeds, subseeds=None, subseed_strength=0.0, seed_resize_from_h=0, seed_resize_from_w=0, p=None): xs = [] + + if p is not None and p.sampler is not None and len(seeds) > 1: + sampler_noises = [[] for _ in range(p.sampler.number_of_needed_noises(p))] + else: + sampler_noises = None + for i, seed in enumerate(seeds): noise_shape = shape if seed_resize_from_h <= 0 or seed_resize_from_w <= 0 else (shape[0], seed_resize_from_h//8, seed_resize_from_w//8) @@ -155,9 +161,17 @@ def create_random_tensors(shape, seeds, subseeds=None, subseed_strength=0.0, see x[:, ty:ty+h, tx:tx+w] = noise[:, dy:dy+h, dx:dx+w] noise = x + if sampler_noises is not None: + cnt = p.sampler.number_of_needed_noises(p) + for j in range(cnt): + sampler_noises[j].append(devices.randn_without_seed(tuple(noise_shape))) xs.append(noise) + + if sampler_noises is not None: + p.sampler.sampler_noises = [torch.stack(n).to(shared.device) for n in sampler_noises] + x = torch.stack(xs).to(shared.device) return x @@ -254,7 +268,7 @@ def process_images(p: StableDiffusionProcessing) -> Processed: comments += model_hijack.comments # we manually generate all input noises because each one should have a specific seed - x = create_random_tensors([opt_C, p.height // opt_f, p.width // opt_f], seeds=seeds, subseeds=subseeds, subseed_strength=p.subseed_strength, seed_resize_from_h=p.seed_resize_from_h, seed_resize_from_w=p.seed_resize_from_w) + x = create_random_tensors([opt_C, p.height // opt_f, p.width // opt_f], seeds=seeds, subseeds=subseeds, subseed_strength=p.subseed_strength, seed_resize_from_h=p.seed_resize_from_h, seed_resize_from_w=p.seed_resize_from_w, p=p) if p.n_iter > 1: shared.state.job = f"Batch {n+1} out of {p.n_iter}" diff --git a/modules/sd_samplers.py b/modules/sd_samplers.py index 7ef507f1..f77fe43f 100644 --- a/modules/sd_samplers.py +++ b/modules/sd_samplers.py @@ -93,6 +93,10 @@ class VanillaStableDiffusionSampler: self.mask = None self.nmask = None self.init_latent = None + self.sampler_noises = None + + def number_of_needed_noises(self, p): + return 0 def sample_img2img(self, p, x, noise, conditioning, unconditional_conditioning): t_enc = int(min(p.denoising_strength, 0.999) * p.steps) @@ -171,16 +175,37 @@ def extended_trange(count, *args, **kwargs): shared.total_tqdm.update() +original_randn_like = torch.randn_like + class KDiffusionSampler: def __init__(self, funcname, sd_model): self.model_wrap = k_diffusion.external.CompVisDenoiser(sd_model) self.funcname = funcname self.func = getattr(k_diffusion.sampling, self.funcname) self.model_wrap_cfg = CFGDenoiser(self.model_wrap) + self.sampler_noises = None + self.sampler_noise_index = 0 + + k_diffusion.sampling.torch.randn_like = self.randn_like def callback_state(self, d): store_latent(d["denoised"]) + def number_of_needed_noises(self, p): + return p.steps + + def randn_like(self, x): + noise = self.sampler_noises[self.sampler_noise_index] if self.sampler_noises is not None and self.sampler_noise_index < len(self.sampler_noises) else None + + if noise is not None and x.shape == noise.shape: + res = noise + else: + print('generating') + res = original_randn_like(x) + + self.sampler_noise_index += 1 + return res + def sample_img2img(self, p, x, noise, conditioning, unconditional_conditioning): t_enc = int(min(p.denoising_strength, 0.999) * p.steps) sigmas = self.model_wrap.get_sigmas(p.steps) -- cgit v1.2.1 From b44ddcb44398fbe922fd7515f66d8b0c2344bc54 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Fri, 16 Sep 2022 08:51:21 +0300 Subject: Prompt editing only applies to images in first batch of desired batch size when batch count > 1 #535 --- modules/sd_samplers.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/modules/sd_samplers.py b/modules/sd_samplers.py index df3a6fe8..5d95bfe0 100644 --- a/modules/sd_samplers.py +++ b/modules/sd_samplers.py @@ -115,6 +115,7 @@ class VanillaStableDiffusionSampler: self.mask = p.mask self.nmask = p.nmask self.init_latent = p.init_latent + self.step = 0 samples = self.sampler.decode(x1, conditioning, t_enc, unconditional_guidance_scale=p.cfg_scale, unconditional_conditioning=unconditional_conditioning) @@ -127,6 +128,7 @@ class VanillaStableDiffusionSampler: self.mask = None self.nmask = None self.init_latent = None + self.step = 0 # existing code fails with cetin step counts, like 9 try: @@ -206,6 +208,7 @@ class KDiffusionSampler: self.model_wrap_cfg.mask = p.mask self.model_wrap_cfg.nmask = p.nmask self.model_wrap_cfg.init_latent = p.init_latent + self.model_wrap.step = 0 if hasattr(k_diffusion.sampling, 'trange'): k_diffusion.sampling.trange = lambda *args, **kwargs: extended_trange(*args, **kwargs) @@ -216,6 +219,8 @@ class KDiffusionSampler: sigmas = self.model_wrap.get_sigmas(p.steps) x = x * sigmas[0] + self.model_wrap_cfg.step = 0 + if hasattr(k_diffusion.sampling, 'trange'): k_diffusion.sampling.trange = lambda *args, **kwargs: extended_trange(*args, **kwargs) -- cgit v1.2.1 From 87e8b9a2ab3f033e7fdadbb2fe258857915980ac Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Fri, 16 Sep 2022 09:47:03 +0300 Subject: prevent replacing torch_randn globally (instead replacing k_diffusion.sampling.torch) and add a setting to disable this all --- modules/processing.py | 2 +- modules/sd_samplers.py | 25 ++++++++++++++++++++----- modules/shared.py | 3 ++- 3 files changed, 23 insertions(+), 7 deletions(-) diff --git a/modules/processing.py b/modules/processing.py index aab72903..5abdfd7c 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -122,7 +122,7 @@ def slerp(val, low, high): def create_random_tensors(shape, seeds, subseeds=None, subseed_strength=0.0, seed_resize_from_h=0, seed_resize_from_w=0, p=None): xs = [] - if p is not None and p.sampler is not None and len(seeds) > 1: + if p is not None and p.sampler is not None and len(seeds) > 1 and opts.enable_batch_seeds: sampler_noises = [[] for _ in range(p.sampler.number_of_needed_noises(p))] else: sampler_noises = None diff --git a/modules/sd_samplers.py b/modules/sd_samplers.py index f77fe43f..d478c5bc 100644 --- a/modules/sd_samplers.py +++ b/modules/sd_samplers.py @@ -175,7 +175,19 @@ def extended_trange(count, *args, **kwargs): shared.total_tqdm.update() -original_randn_like = torch.randn_like +class TorchHijack: + def __init__(self, kdiff_sampler): + self.kdiff_sampler = kdiff_sampler + + def __getattr__(self, item): + if item == 'randn_like': + return self.kdiff_sampler.randn_like + + if hasattr(torch, item): + return getattr(torch, item) + + raise AttributeError("'{}' object has no attribute '{}'".format(type(self).__name__, item)) + class KDiffusionSampler: def __init__(self, funcname, sd_model): @@ -186,8 +198,6 @@ class KDiffusionSampler: self.sampler_noises = None self.sampler_noise_index = 0 - k_diffusion.sampling.torch.randn_like = self.randn_like - def callback_state(self, d): store_latent(d["denoised"]) @@ -200,8 +210,7 @@ class KDiffusionSampler: if noise is not None and x.shape == noise.shape: res = noise else: - print('generating') - res = original_randn_like(x) + res = torch.randn_like(x) self.sampler_noise_index += 1 return res @@ -223,6 +232,9 @@ class KDiffusionSampler: if hasattr(k_diffusion.sampling, 'trange'): k_diffusion.sampling.trange = lambda *args, **kwargs: extended_trange(*args, **kwargs) + if self.sampler_noises is not None: + k_diffusion.sampling.torch = TorchHijack(self) + return self.func(self.model_wrap_cfg, xi, sigma_sched, extra_args={'cond': conditioning, 'uncond': unconditional_conditioning, 'cond_scale': p.cfg_scale}, disable=False, callback=self.callback_state) def sample(self, p, x, conditioning, unconditional_conditioning): @@ -232,6 +244,9 @@ class KDiffusionSampler: if hasattr(k_diffusion.sampling, 'trange'): k_diffusion.sampling.trange = lambda *args, **kwargs: extended_trange(*args, **kwargs) + if self.sampler_noises is not None: + k_diffusion.sampling.torch = TorchHijack(self) + samples_ddim = self.func(self.model_wrap_cfg, x, sigmas, extra_args={'cond': conditioning, 'uncond': unconditional_conditioning, 'cond_scale': p.cfg_scale}, disable=False, callback=self.callback_state) return samples_ddim diff --git a/modules/shared.py b/modules/shared.py index bc39ad1c..ac870ec4 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -124,7 +124,8 @@ class Options: "add_model_hash_to_info": OptionInfo(False, "Add model hash to generation information"), "img2img_color_correction": OptionInfo(False, "Apply color correction to img2img results to match original colors."), "font": OptionInfo("", "Font for image grids that have text"), - "enable_emphasis": OptionInfo(True, "Use (text) to make model pay more attention to text text and [text] to make it pay less attention"), + "enable_emphasis": OptionInfo(True, "Use (text) to make model pay more attention to text and [text] to make it pay less attention"), + "enable_batch_seeds": OptionInfo(True, "Make K-diffusion samplers produce same images in a batch as when making a single image"), "save_txt": OptionInfo(False, "Create a text file next to every image with generation parameters."), "ESRGAN_tile": OptionInfo(192, "Tile size for upscaling. 0 = no tiling.", gr.Slider, {"minimum": 0, "maximum": 512, "step": 16}), "ESRGAN_tile_overlap": OptionInfo(8, "Tile overlap, in pixels for upscaling. Low values = visible seam.", gr.Slider, {"minimum": 0, "maximum": 48, "step": 1}), -- cgit v1.2.1 From b8cf2ea8ea50da7084061895e5af7b22c37633c0 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Fri, 16 Sep 2022 10:04:07 +0300 Subject: add a bit of a comment about what's being done with tensor noise --- modules/processing.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/modules/processing.py b/modules/processing.py index 798313ee..81c83f06 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -122,6 +122,10 @@ def slerp(val, low, high): def create_random_tensors(shape, seeds, subseeds=None, subseed_strength=0.0, seed_resize_from_h=0, seed_resize_from_w=0, p=None): xs = [] + # if we have multiple seeds, this means we are working with batch size>1; this then + # enables the generation of additional tensors with noise that the sampler will use during its processing. + # Using those pre-genrated tensors instead of siimple torch.randn allows a batch with seeds [100, 101] to + # produce the same images as with two batches [100], [101]. if p is not None and p.sampler is not None and len(seeds) > 1 and opts.enable_batch_seeds: sampler_noises = [[] for _ in range(p.sampler.number_of_needed_noises(p))] else: -- cgit v1.2.1 From 2aec11d263e98580787bc3f3130a09ab2d1cdfc9 Mon Sep 17 00:00:00 2001 From: Elias Sundqvist Date: Fri, 16 Sep 2022 06:40:43 +0000 Subject: Add randomness and denoising strength support to alternative img2img --- scripts/img2imgalt.py | 38 ++++++++++++++++++++++++++++++-------- 1 file changed, 30 insertions(+), 8 deletions(-) diff --git a/scripts/img2imgalt.py b/scripts/img2imgalt.py index 7813bbcc..6581eaad 100644 --- a/scripts/img2imgalt.py +++ b/scripts/img2imgalt.py @@ -76,10 +76,10 @@ class Script(scripts.Script): original_prompt = gr.Textbox(label="Original prompt", lines=1) cfg = gr.Slider(label="Decode CFG scale", minimum=0.0, maximum=15.0, step=0.1, value=1.0) st = gr.Slider(label="Decode steps", minimum=1, maximum=150, step=1, value=50) + randomness = gr.Slider(label="randomness", minimum=0.0, maximum=1.0, step=0.01, value=0.0) + return [original_prompt, cfg, st, randomness] - return [original_prompt, cfg, st] - - def run(self, p, original_prompt, cfg, st): + def run(self, p, original_prompt, cfg, st, randomness): p.batch_size = 1 p.batch_count = 1 @@ -90,18 +90,40 @@ class Script(scripts.Script): same_everything = same_params and self.cache.latent.shape == lat.shape and np.abs(self.cache.latent-lat).sum() < 100 if same_everything: - noise = self.cache.noise + rec_noise = self.cache.noise else: shared.state.job_count += 1 cond = p.sd_model.get_learned_conditioning(p.batch_size * [original_prompt]) uncond = p.sd_model.get_learned_conditioning(p.batch_size * [""]) - noise = find_noise_for_image(p, cond, uncond, cfg, st) - self.cache = Cached(noise, cfg, st, lat, original_prompt) + rec_noise = find_noise_for_image(p, cond, uncond, cfg, st) + self.cache = Cached(rec_noise, cfg, st, lat, original_prompt) + rand_noise = processing.create_random_tensors(p.init_latent.shape[1:], [p.seed + x + 1 for x in range(p.init_latent.shape[0])]) + + combined_noise = ((1 - randomness) * rec_noise + randomness * rand_noise) / ((randomness**2 + (1-randomness)**2) ** 0.5) + sampler = samplers[p.sampler_index].constructor(p.sd_model) - samples_ddim = sampler.sample(p, noise, conditioning, unconditional_conditioning) - return samples_ddim + sigmas = sampler.model_wrap.get_sigmas(p.steps) + + t_enc = int(min(p.denoising_strength, 0.999) * p.steps) + + noise_dt = combined_noise - ( p.init_latent / sigmas[0] ) + noise_dt = noise_dt * sigmas[p.steps - t_enc - 1] + + noise = p.init_latent + noise_dt + + sigma_sched = sigmas[p.steps - t_enc - 1:] + + sampler.model_wrap_cfg.mask = p.mask + sampler.model_wrap_cfg.nmask = p.nmask + sampler.model_wrap_cfg.init_latent = p.init_latent + + if hasattr(K.sampling, 'trange'): + K.sampling.trange = lambda *args, **kwargs: sd_samplers.extended_trange(*args, **kwargs) + + p.seed = p.seed + 1 + return sampler.func(sampler.model_wrap_cfg, noise, sigma_sched, extra_args={'cond': conditioning, 'uncond': unconditional_conditioning, 'cond_scale': p.cfg_scale}, disable=False, callback=sampler.callback_state) p.sample = sample_extra -- cgit v1.2.1 From a441cd563aa04c8ebfc4d02a4a004f649ad44f71 Mon Sep 17 00:00:00 2001 From: Elias Sundqvist Date: Fri, 16 Sep 2022 06:40:43 +0000 Subject: Reduce code duplication --- scripts/img2imgalt.py | 20 ++++---------------- 1 file changed, 4 insertions(+), 16 deletions(-) diff --git a/scripts/img2imgalt.py b/scripts/img2imgalt.py index 6581eaad..dbda3255 100644 --- a/scripts/img2imgalt.py +++ b/scripts/img2imgalt.py @@ -105,25 +105,13 @@ class Script(scripts.Script): sampler = samplers[p.sampler_index].constructor(p.sd_model) sigmas = sampler.model_wrap.get_sigmas(p.steps) - - t_enc = int(min(p.denoising_strength, 0.999) * p.steps) noise_dt = combined_noise - ( p.init_latent / sigmas[0] ) - noise_dt = noise_dt * sigmas[p.steps - t_enc - 1] - - noise = p.init_latent + noise_dt - - sigma_sched = sigmas[p.steps - t_enc - 1:] - - sampler.model_wrap_cfg.mask = p.mask - sampler.model_wrap_cfg.nmask = p.nmask - sampler.model_wrap_cfg.init_latent = p.init_latent - - if hasattr(K.sampling, 'trange'): - K.sampling.trange = lambda *args, **kwargs: sd_samplers.extended_trange(*args, **kwargs) - + p.seed = p.seed + 1 - return sampler.func(sampler.model_wrap_cfg, noise, sigma_sched, extra_args={'cond': conditioning, 'uncond': unconditional_conditioning, 'cond_scale': p.cfg_scale}, disable=False, callback=sampler.callback_state) + + return sampler.sample_img2img(p, p.init_latent, noise_dt, conditioning, unconditional_conditioning) + p.sample = sample_extra -- cgit v1.2.1 From d8b427f8aa787e2ee21a63c1bea5e0eabaaf4979 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Fri, 16 Sep 2022 10:21:59 +0300 Subject: remove the warning at startup related to previous PR with batch processing --- modules/ui.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/ui.py b/modules/ui.py index b6d5dcd8..738ac945 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -649,7 +649,7 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo): image = gr.Image(label="Source", source="upload", interactive=True, type="pil") with gr.TabItem('Batch Process'): - image_batch = gr.File(label="Batch Process", file_count="multiple", source="upload", interactive=True, type="file") + image_batch = gr.File(label="Batch Process", file_count="multiple", interactive=True, type="file") upscaling_resize = gr.Slider(minimum=1.0, maximum=4.0, step=0.05, label="Resize", value=2) -- cgit v1.2.1 From 2288bc96fdd3cef87497a320b56df62aa4052fc9 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Fri, 16 Sep 2022 12:43:24 +0300 Subject: fix extras tab showing original images instead of upscales --- modules/extras.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/modules/extras.py b/modules/extras.py index ffae7d67..38d6ec48 100644 --- a/modules/extras.py +++ b/modules/extras.py @@ -36,6 +36,7 @@ def run_extras(image, image_folder, gfpgan_visibility, codeformer_visibility, co outpath = opts.outdir_samples or opts.outdir_extras_samples + outputs = [] for image in imageArr: existing_pnginfo = image.info or {} @@ -91,7 +92,9 @@ def run_extras(image, image_folder, gfpgan_visibility, codeformer_visibility, co images.save_image(image, path=outpath, basename="", seed=None, prompt=None, extension=opts.samples_format, info=info, short_filename=True, no_prompt=True, grid=False, pnginfo_section_name="extras", existing_info=existing_pnginfo) - return imageArr, plaintext_to_html(info), '' + outputs.append(image) + + return outputs, plaintext_to_html(info), '' def run_pnginfo(image): -- cgit v1.2.1 From e49b1c5d73ede818adb624590934f051b94493ac Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Fri, 16 Sep 2022 13:38:02 +0300 Subject: an option to do exactly the amount of specified steps in img2img --- modules/sd_samplers.py | 26 +++++++++++++++++++------- modules/shared.py | 1 + 2 files changed, 20 insertions(+), 7 deletions(-) diff --git a/modules/sd_samplers.py b/modules/sd_samplers.py index 02ffce0e..1b3dc302 100644 --- a/modules/sd_samplers.py +++ b/modules/sd_samplers.py @@ -38,6 +38,17 @@ samplers = [ samplers_for_img2img = [x for x in samplers if x.name != 'PLMS'] +def setup_img2img_steps(p): + if opts.img2img_fix_steps: + steps = int(p.steps / min(p.denoising_strength, 0.999)) + t_enc = p.steps - 1 + else: + steps = p.steps + t_enc = int(min(p.denoising_strength, 0.999) * steps) + + return steps, t_enc + + def sample_to_image(samples): x_sample = shared.sd_model.decode_first_stage(samples[0:1].type(shared.sd_model.dtype))[0] x_sample = torch.clamp((x_sample + 1.0) / 2.0, min=0.0, max=1.0) @@ -105,13 +116,13 @@ class VanillaStableDiffusionSampler: return res def sample_img2img(self, p, x, noise, conditioning, unconditional_conditioning): - t_enc = int(min(p.denoising_strength, 0.999) * p.steps) + steps, t_enc = setup_img2img_steps(p) # existing code fails with cetain step counts, like 9 try: - self.sampler.make_schedule(ddim_num_steps=p.steps, verbose=False) + self.sampler.make_schedule(ddim_num_steps=steps, verbose=False) except Exception: - self.sampler.make_schedule(ddim_num_steps=p.steps+1, verbose=False) + self.sampler.make_schedule(ddim_num_steps=steps+1, verbose=False) x1 = self.sampler.stochastic_encode(x, torch.tensor([t_enc] * int(x.shape[0])).to(shared.device), noise=noise) @@ -230,14 +241,15 @@ class KDiffusionSampler: return res def sample_img2img(self, p, x, noise, conditioning, unconditional_conditioning): - t_enc = int(min(p.denoising_strength, 0.999) * p.steps) - sigmas = self.model_wrap.get_sigmas(p.steps) + steps, t_enc = setup_img2img_steps(p) + + sigmas = self.model_wrap.get_sigmas(steps) - noise = noise * sigmas[p.steps - t_enc - 1] + noise = noise * sigmas[steps - t_enc - 1] xi = x + noise - sigma_sched = sigmas[p.steps - t_enc - 1:] + sigma_sched = sigmas[steps - t_enc - 1:] self.model_wrap_cfg.mask = p.mask self.model_wrap_cfg.nmask = p.nmask diff --git a/modules/shared.py b/modules/shared.py index fa6a0e99..da56b6ae 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -125,6 +125,7 @@ class Options: "enable_pnginfo": OptionInfo(True, "Save text information about generation parameters as chunks to png files"), "add_model_hash_to_info": OptionInfo(False, "Add model hash to generation information"), "img2img_color_correction": OptionInfo(False, "Apply color correction to img2img results to match original colors."), + "img2img_fix_steps": OptionInfo(False, "With img2img, do exactly the amount of steps the slider specifies (normaly you'd do less with less denoising)."), "enable_quantization": OptionInfo(False, "Enable quantization in K samplers for sharper and cleaner results. This may change existing seeds. Requires restart to apply."), "font": OptionInfo("", "Font for image grids that have text"), "enable_emphasis": OptionInfo(True, "Use (text) to make model pay more attention to text and [text] to make it pay less attention"), -- cgit v1.2.1 From b64994b973ad8f4268bf785f25f92b66c8dced40 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Fri, 16 Sep 2022 19:24:48 +0300 Subject: added original negative prompt to img2img alt --- scripts/img2imgalt.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/scripts/img2imgalt.py b/scripts/img2imgalt.py index dbda3255..7f1f53a7 100644 --- a/scripts/img2imgalt.py +++ b/scripts/img2imgalt.py @@ -59,7 +59,7 @@ def find_noise_for_image(p, cond, uncond, cfg_scale, steps): return x / x.std() -Cached = namedtuple("Cached", ["noise", "cfg_scale", "steps", "latent", "original_prompt"]) +Cached = namedtuple("Cached", ["noise", "cfg_scale", "steps", "latent", "original_prompt", "original_negative_prompt"]) class Script(scripts.Script): @@ -74,19 +74,20 @@ class Script(scripts.Script): def ui(self, is_img2img): original_prompt = gr.Textbox(label="Original prompt", lines=1) + original_negative_prompt = gr.Textbox(label="Original negative prompt", lines=1) cfg = gr.Slider(label="Decode CFG scale", minimum=0.0, maximum=15.0, step=0.1, value=1.0) st = gr.Slider(label="Decode steps", minimum=1, maximum=150, step=1, value=50) randomness = gr.Slider(label="randomness", minimum=0.0, maximum=1.0, step=0.01, value=0.0) - return [original_prompt, cfg, st, randomness] + return [original_prompt, original_negative_prompt, cfg, st, randomness] - def run(self, p, original_prompt, cfg, st, randomness): + def run(self, p, original_prompt, original_negative_prompt, cfg, st, randomness): p.batch_size = 1 p.batch_count = 1 def sample_extra(x, conditioning, unconditional_conditioning): lat = (p.init_latent.cpu().numpy() * 10).astype(int) - same_params = self.cache is not None and self.cache.cfg_scale == cfg and self.cache.steps == st and self.cache.original_prompt == original_prompt + same_params = self.cache is not None and self.cache.cfg_scale == cfg and self.cache.steps == st and self.cache.original_prompt == original_prompt and self.cache.original_negative_prompt == original_negative_prompt same_everything = same_params and self.cache.latent.shape == lat.shape and np.abs(self.cache.latent-lat).sum() < 100 if same_everything: @@ -94,9 +95,9 @@ class Script(scripts.Script): else: shared.state.job_count += 1 cond = p.sd_model.get_learned_conditioning(p.batch_size * [original_prompt]) - uncond = p.sd_model.get_learned_conditioning(p.batch_size * [""]) + uncond = p.sd_model.get_learned_conditioning(p.batch_size * [original_negative_prompt]) rec_noise = find_noise_for_image(p, cond, uncond, cfg, st) - self.cache = Cached(rec_noise, cfg, st, lat, original_prompt) + self.cache = Cached(rec_noise, cfg, st, lat, original_prompt, original_negative_prompt) rand_noise = processing.create_random_tensors(p.init_latent.shape[1:], [p.seed + x + 1 for x in range(p.init_latent.shape[0])]) -- cgit v1.2.1 From 2ee9fc8eb84d5e1864dbabd8a8c6b279a6ae21ac Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Fri, 16 Sep 2022 22:18:30 +0300 Subject: new outpainting script --- scripts/outpainting_mk_2.py | 290 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 290 insertions(+) create mode 100644 scripts/outpainting_mk_2.py diff --git a/scripts/outpainting_mk_2.py b/scripts/outpainting_mk_2.py new file mode 100644 index 00000000..a42c1aed --- /dev/null +++ b/scripts/outpainting_mk_2.py @@ -0,0 +1,290 @@ +import math + +import numpy as np +import skimage + +import modules.scripts as scripts +import gradio as gr +from PIL import Image, ImageDraw + +from modules import images, processing, devices +from modules.processing import Processed, process_images +from modules.shared import opts, cmd_opts, state + + +def expand(x, dir, amount, power=0.75): + is_left = dir == 3 + is_right = dir == 1 + is_up = dir == 0 + is_down = dir == 2 + + if is_left or is_right: + noise = np.zeros((x.shape[0], amount, 3), dtype=float) + indexes = np.random.random((x.shape[0], amount)) ** power * (1 - np.arange(amount) / amount) + if is_right: + indexes = 1 - indexes + indexes = (indexes * (x.shape[1] - 1)).astype(int) + + for row in range(x.shape[0]): + if is_left: + noise[row] = x[row][indexes[row]] + else: + noise[row] = np.flip(x[row][indexes[row]], axis=0) + + x = np.concatenate([noise, x] if is_left else [x, noise], axis=1) + return x + + if is_up or is_down: + noise = np.zeros((amount, x.shape[1], 3), dtype=float) + indexes = np.random.random((x.shape[1], amount)) ** power * (1 - np.arange(amount) / amount) + if is_down: + indexes = 1 - indexes + indexes = (indexes * x.shape[0] - 1).astype(int) + + for row in range(x.shape[1]): + if is_up: + noise[:, row] = x[:, row][indexes[row]] + else: + noise[:, row] = np.flip(x[:, row][indexes[row]], axis=0) + + x = np.concatenate([noise, x] if is_up else [x, noise], axis=0) + return x + + +def get_matched_noise(_np_src_image, np_mask_rgb, noise_q=1, color_variation=0.05): + # helper fft routines that keep ortho normalization and auto-shift before and after fft + def _fft2(data): + if data.ndim > 2: # has channels + out_fft = np.zeros((data.shape[0], data.shape[1], data.shape[2]), dtype=np.complex128) + for c in range(data.shape[2]): + c_data = data[:, :, c] + out_fft[:, :, c] = np.fft.fft2(np.fft.fftshift(c_data), norm="ortho") + out_fft[:, :, c] = np.fft.ifftshift(out_fft[:, :, c]) + else: # one channel + out_fft = np.zeros((data.shape[0], data.shape[1]), dtype=np.complex128) + out_fft[:, :] = np.fft.fft2(np.fft.fftshift(data), norm="ortho") + out_fft[:, :] = np.fft.ifftshift(out_fft[:, :]) + + return out_fft + + def _ifft2(data): + if data.ndim > 2: # has channels + out_ifft = np.zeros((data.shape[0], data.shape[1], data.shape[2]), dtype=np.complex128) + for c in range(data.shape[2]): + c_data = data[:, :, c] + out_ifft[:, :, c] = np.fft.ifft2(np.fft.fftshift(c_data), norm="ortho") + out_ifft[:, :, c] = np.fft.ifftshift(out_ifft[:, :, c]) + else: # one channel + out_ifft = np.zeros((data.shape[0], data.shape[1]), dtype=np.complex128) + out_ifft[:, :] = np.fft.ifft2(np.fft.fftshift(data), norm="ortho") + out_ifft[:, :] = np.fft.ifftshift(out_ifft[:, :]) + + return out_ifft + + def _get_gaussian_window(width, height, std=3.14, mode=0): + window_scale_x = float(width / min(width, height)) + window_scale_y = float(height / min(width, height)) + + window = np.zeros((width, height)) + x = (np.arange(width) / width * 2. - 1.) * window_scale_x + for y in range(height): + fy = (y / height * 2. - 1.) * window_scale_y + if mode == 0: + window[:, y] = np.exp(-(x ** 2 + fy ** 2) * std) + else: + window[:, y] = (1 / ((x ** 2 + 1.) * (fy ** 2 + 1.))) ** (std / 3.14) # hey wait a minute that's not gaussian + + return window + + def _get_masked_window_rgb(np_mask_grey, hardness=1.): + np_mask_rgb = np.zeros((np_mask_grey.shape[0], np_mask_grey.shape[1], 3)) + if hardness != 1.: + hardened = np_mask_grey[:] ** hardness + else: + hardened = np_mask_grey[:] + for c in range(3): + np_mask_rgb[:, :, c] = hardened[:] + return np_mask_rgb + + width = _np_src_image.shape[0] + height = _np_src_image.shape[1] + num_channels = _np_src_image.shape[2] + + np_src_image = _np_src_image[:] * (1. - np_mask_rgb) + np_mask_grey = (np.sum(np_mask_rgb, axis=2) / 3.) + img_mask = np_mask_grey > 1e-6 + ref_mask = np_mask_grey < 1e-3 + + windowed_image = _np_src_image * (1. - _get_masked_window_rgb(np_mask_grey)) + windowed_image /= np.max(windowed_image) + windowed_image += np.average(_np_src_image) * np_mask_rgb # / (1.-np.average(np_mask_rgb)) # rather than leave the masked area black, we get better results from fft by filling the average unmasked color + + src_fft = _fft2(windowed_image) # get feature statistics from masked src img + src_dist = np.absolute(src_fft) + src_phase = src_fft / src_dist + + noise_window = _get_gaussian_window(width, height, mode=1) # start with simple gaussian noise + noise_rgb = np.random.random_sample((width, height, num_channels)) + noise_grey = (np.sum(noise_rgb, axis=2) / 3.) + noise_rgb *= color_variation # the colorfulness of the starting noise is blended to greyscale with a parameter + for c in range(num_channels): + noise_rgb[:, :, c] += (1. - color_variation) * noise_grey + + noise_fft = _fft2(noise_rgb) + for c in range(num_channels): + noise_fft[:, :, c] *= noise_window + noise_rgb = np.real(_ifft2(noise_fft)) + shaped_noise_fft = _fft2(noise_rgb) + shaped_noise_fft[:, :, :] = np.absolute(shaped_noise_fft[:, :, :]) ** 2 * (src_dist ** noise_q) * src_phase # perform the actual shaping + + brightness_variation = 0. # color_variation # todo: temporarily tieing brightness variation to color variation for now + contrast_adjusted_np_src = _np_src_image[:] * (brightness_variation + 1.) - brightness_variation * 2. + + # scikit-image is used for histogram matching, very convenient! + shaped_noise = np.real(_ifft2(shaped_noise_fft)) + shaped_noise -= np.min(shaped_noise) + shaped_noise /= np.max(shaped_noise) + shaped_noise[img_mask, :] = skimage.exposure.match_histograms(shaped_noise[img_mask, :] ** 1., contrast_adjusted_np_src[ref_mask, :], channel_axis=1) + shaped_noise = _np_src_image[:] * (1. - np_mask_rgb) + shaped_noise * np_mask_rgb + + matched_noise = shaped_noise[:] + + return np.clip(matched_noise, 0., 1.) + + + +class Script(scripts.Script): + def title(self): + return "Outpainting mk2" + + def show(self, is_img2img): + return is_img2img + + def ui(self, is_img2img): + if not is_img2img: + return None + + info = gr.HTML("

Recommended settings: Sampling Steps: 80-100, Sampler: Euler a, Denoising strength: 0.8

") + + pixels = gr.Slider(label="Pixels to expand", minimum=8, maximum=256, step=8, value=128) + mask_blur = gr.Slider(label='Mask blur', minimum=0, maximum=64, step=1, value=8, visible=False) + direction = gr.CheckboxGroup(label="Outpainting direction", choices=['left', 'right', 'up', 'down'], value=['left', 'right', 'up', 'down']) + noise_q = gr.Slider(label="Fall-off exponent (lower=higher detail)", minimum=0.0, maximum=4.0, step=0.01, value=1.0) + color_variation = gr.Slider(label="Color variation", minimum=0.0, maximum=1.0, step=0.01, value=0.05) + + return [info, pixels, mask_blur, direction, noise_q, color_variation] + + def run(self, p, _, pixels, mask_blur, direction, noise_q, color_variation): + initial_seed_and_info = [None, None] + + process_width = p.width + process_height = p.height + + p.mask_blur = mask_blur*4 + p.inpaint_full_res = False + p.inpainting_fill = 1 + p.do_not_save_samples = True + p.do_not_save_grid = True + + left = pixels if "left" in direction else 0 + right = pixels if "right" in direction else 0 + up = pixels if "up" in direction else 0 + down = pixels if "down" in direction else 0 + + init_img = p.init_images[0] + target_w = math.ceil((init_img.width + left + right) / 64) * 64 + target_h = math.ceil((init_img.height + up + down) / 64) * 64 + + if left > 0: + left = left * (target_w - init_img.width) // (left + right) + if right > 0: + right = target_w - init_img.width - left + + if up > 0: + up = up * (target_h - init_img.height) // (up + down) + + if down > 0: + down = target_h - init_img.height - up + + init_image = p.init_images[0] + + state.job_count = (1 if left > 0 else 0) + (1 if right > 0 else 0)+ (1 if up > 0 else 0)+ (1 if down > 0 else 0) + + def expand(init, expand_pixels, is_left=False, is_right=False, is_top=False, is_bottom=False): + is_horiz = is_left or is_right + is_vert = is_top or is_bottom + pixels_horiz = expand_pixels if is_horiz else 0 + pixels_vert = expand_pixels if is_vert else 0 + + img = Image.new("RGB", (init.width + pixels_horiz, init.height + pixels_vert)) + img.paste(init, (pixels_horiz if is_left else 0, pixels_vert if is_top else 0)) + mask = Image.new("RGB", (init.width + pixels_horiz, init.height + pixels_vert), "white") + draw = ImageDraw.Draw(mask) + draw.rectangle(( + expand_pixels + mask_blur if is_left else 0, + expand_pixels + mask_blur if is_top else 0, + mask.width - expand_pixels - mask_blur if is_right else mask.width, + mask.height - expand_pixels - mask_blur if is_bottom else mask.height, + ), fill="black") + + np_image = (np.asarray(img) / 255.0).astype(np.float64) + np_mask = (np.asarray(mask) / 255.0).astype(np.float64) + noised = get_matched_noise(np_image, np_mask, noise_q, color_variation) + out = Image.fromarray(np.clip(noised * 255., 0., 255.).astype(np.uint8), mode="RGB") + + target_width = min(process_width, init.width + pixels_horiz) if is_horiz else img.width + target_height = min(process_height, init.height + pixels_vert) if is_vert else img.height + + crop_region = ( + 0 if is_left else out.width - target_width, + 0 if is_top else out.height - target_height, + target_width if is_left else out.width, + target_height if is_top else out.height, + ) + + image_to_process = out.crop(crop_region) + mask = mask.crop(crop_region) + + p.width = target_width if is_horiz else img.width + p.height = target_height if is_vert else img.height + p.init_images = [image_to_process] + p.image_mask = mask + + latent_mask = Image.new("RGB", (p.width, p.height), "white") + draw = ImageDraw.Draw(latent_mask) + draw.rectangle(( + expand_pixels + mask_blur * 2 if is_left else 0, + expand_pixels + mask_blur * 2 if is_top else 0, + mask.width - expand_pixels - mask_blur * 2 if is_right else mask.width, + mask.height - expand_pixels - mask_blur * 2 if is_bottom else mask.height, + ), fill="black") + p.latent_mask = latent_mask + + proc = process_images(p) + proc_img = proc.images[0] + + if initial_seed_and_info[0] is None: + initial_seed_and_info[0] = proc.seed + initial_seed_and_info[1] = proc.info + + out.paste(proc_img, (0 if is_left else out.width - proc_img.width, 0 if is_top else out.height - proc_img.height)) + return out + + img = init_image + + if left > 0: + img = expand(img, left, is_left=True) + if right > 0: + img = expand(img, right, is_right=True) + if up > 0: + img = expand(img, up, is_top=True) + if down > 0: + img = expand(img, down, is_bottom=True) + + res = Processed(p, [img], initial_seed_and_info[0], initial_seed_and_info[1]) + + if opts.samples_save: + images.save_image(img, p.outpath_samples, "", res.seed, p.prompt, opts.grid_format, info=res.info, p=p) + + return res + -- cgit v1.2.1 From 449719b2fc9e8fd2e61f219e0979deb83c05177c Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Fri, 16 Sep 2022 23:17:10 +0300 Subject: added Noise generation for outpainting mk2 to credits --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 84a78da5..6c232e38 100644 --- a/README.md +++ b/README.md @@ -81,6 +81,7 @@ The documentation was moved from this README over to the project's [wiki](https: - Ideas for optimizations - https://github.com/basujindal/stable-diffusion - Doggettx - Cross Attention layer optimization - https://github.com/Doggettx/stable-diffusion, original idea for prompt editing. - Idea for SD upscale - https://github.com/jquesnelle/txt2imghd +- Noise generation for outpainting mk2 - https://github.com/parlance-zz/g-diffuser-bot - CLIP interrogator idea and borrowing some code - https://github.com/pharmapsychotic/clip-interrogator - Initial Gradio script - posted on 4chan by an Anonymous user. Thank you Anonymous user. - (You) -- cgit v1.2.1 From de5bfdf9177d7035c76a890c241f8a5a32455cad Mon Sep 17 00:00:00 2001 From: JJ Date: Sat, 17 Sep 2022 06:48:22 +1000 Subject: image info tab * handles exceptions if jpeg jfif data not present * removes further non-comment related exif data. --- modules/extras.py | 7 ++++--- modules/ui.py | 4 ++-- webui.py | 2 +- 3 files changed, 7 insertions(+), 6 deletions(-) diff --git a/modules/extras.py b/modules/extras.py index 38d6ec48..64b4f2b6 100644 --- a/modules/extras.py +++ b/modules/extras.py @@ -97,7 +97,7 @@ def run_extras(image, image_folder, gfpgan_visibility, codeformer_visibility, co return outputs, plaintext_to_html(info), '' -def run_pnginfo(image): +def run_image_info(image): items = image.info if "exif" in image.info: @@ -111,8 +111,9 @@ def run_pnginfo(image): items['exif comment'] = exif_comment - for field in ['jfif', 'jfif_version', 'jfif_unit', 'jfif_density', 'dpi', 'exif']: - del items[field] + for field in ['jfif', 'jfif_version', 'jfif_unit', 'jfif_density', 'dpi', 'exif', + 'loop', 'background', 'timestamp', 'duration']: + items.pop(field, None) info = '' diff --git a/modules/ui.py b/modules/ui.py index 738ac945..0899490f 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -295,7 +295,7 @@ def create_toprow(is_img2img): return prompt, roll, prompt_style, negative_prompt, prompt_style2, submit, interrogate, prompt_style_apply, save_style, check_progress -def create_ui(txt2img, img2img, run_extras, run_pnginfo): +def create_ui(txt2img, img2img, run_extras, run_image_info): with gr.Blocks(analytics_enabled=False) as txt2img_interface: txt2img_prompt, roll, txt2img_prompt_style, txt2img_negative_prompt, txt2img_prompt_style2, submit, _, txt2img_prompt_style_apply, txt2img_save_style, check_progress = create_toprow(is_img2img=False) @@ -697,7 +697,7 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo): submit.click(**extras_args) pnginfo_interface = gr.Interface( - wrap_gradio_call(run_pnginfo), + wrap_gradio_call(run_image_info), inputs=[ gr.Image(label="Source", source="upload", interactive=True, type="pil"), ], diff --git a/webui.py b/webui.py index add72123..cbfb62f0 100644 --- a/webui.py +++ b/webui.py @@ -121,7 +121,7 @@ def webui(): txt2img=wrap_gradio_gpu_call(modules.txt2img.txt2img), img2img=wrap_gradio_gpu_call(modules.img2img.img2img), run_extras=wrap_gradio_gpu_call(modules.extras.run_extras), - run_pnginfo=modules.extras.run_pnginfo + run_image_info=modules.extras.run_image_info ) demo.launch( -- cgit v1.2.1 From 6168d09218073c369fd08b7208f93805f624ec05 Mon Sep 17 00:00:00 2001 From: uservar <63248296+uservar@users.noreply.github.com> Date: Fri, 16 Sep 2022 19:07:14 +0000 Subject: Prevent uploading previous output from javascript As it is currently, txt2img and img2img send back the previous output args (txt2img_gallery, generation_info, html_info) whenever you generate a new image. This can lead to uploading a huge gallery of previously generated images, which leads to an unnecessary delay between submitting and beginning to generate. --- script.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/script.js b/script.js index 0852e421..8b10b53d 100644 --- a/script.js +++ b/script.js @@ -177,7 +177,7 @@ function submit(){ window.setTimeout(requestProgress, 500) res = [] - for(var i=0;i Date: Sat, 17 Sep 2022 08:03:47 +0300 Subject: add a comment and some checks for the functionality of the last PR --- script.js | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/script.js b/script.js index 8b10b53d..4a70e51d 100644 --- a/script.js +++ b/script.js @@ -177,9 +177,18 @@ function submit(){ window.setTimeout(requestProgress, 500) res = [] - for(var i=0;i Date: Fri, 16 Sep 2022 07:28:57 +1000 Subject: image.save parameter fix * image.save takes exif as a parameter * piexif takes the bytes as a parameter, not the exif_bytes function itself * reduce calls to create_exif_bytes --- modules/images.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/modules/images.py b/modules/images.py index f37f5f08..8cd7fe37 100644 --- a/modules/images.py +++ b/modules/images.py @@ -345,7 +345,7 @@ def save_image(image, path, basename, seed=None, prompt=None, extension='png', i if not os.path.exists(fullfn): break - def exif_bytes(): + def create_exif_bytes(): return piexif.dump({ "Exif": { piexif.ExifIFD.UserComment: piexif.helper.UserComment.dump(info or "", encoding="unicode") @@ -353,7 +353,8 @@ def save_image(image, path, basename, seed=None, prompt=None, extension='png', i }) if extension.lower() in ("jpg", "jpeg", "webp"): - image.save(fullfn, quality=opts.jpeg_quality, exif_bytes=exif_bytes()) + exif_bytes = create_exif_bytes() + image.save(fullfn, quality=opts.jpeg_quality, exif=exif_bytes) else: image.save(fullfn, quality=opts.jpeg_quality, pnginfo=pnginfo) @@ -370,7 +371,11 @@ def save_image(image, path, basename, seed=None, prompt=None, extension='png', i elif oversize: image = image.resize((image.width * target_side_length // image.height, target_side_length), LANCZOS) - image.save(fullfn_without_extension + ".jpg", quality=opts.jpeg_quality, exif_bytes=exif_bytes()) + if exif_bytes in locals(): + pass + else: + exif_bytes = create_exif_bytes() + image.save(fullfn_without_extension + ".jpg", quality=opts.jpeg_quality, exif=exif_bytes) if opts.save_txt and info is not None: with open(f"{fullfn_without_extension}.txt", "w", encoding="utf8") as file: -- cgit v1.2.1 From 3c665b8dd6da07c60af7783f0e0dd1dec714a9b4 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sat, 17 Sep 2022 08:32:15 +0300 Subject: the last PR broke saving EXiF completely for me. I don't know if it was broken already or some condition changed, but it seems like the person who originally added EXIF said, saving it with PIL may not work. I switched to using piexif to add data after the file written. --- modules/images.py | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-) diff --git a/modules/images.py b/modules/images.py index 8cd7fe37..7e1e506c 100644 --- a/modules/images.py +++ b/modules/images.py @@ -346,6 +346,7 @@ def save_image(image, path, basename, seed=None, prompt=None, extension='png', i break def create_exif_bytes(): + def exif_bytes(): return piexif.dump({ "Exif": { piexif.ExifIFD.UserComment: piexif.helper.UserComment.dump(info or "", encoding="unicode") @@ -353,14 +354,12 @@ def save_image(image, path, basename, seed=None, prompt=None, extension='png', i }) if extension.lower() in ("jpg", "jpeg", "webp"): - exif_bytes = create_exif_bytes() - image.save(fullfn, quality=opts.jpeg_quality, exif=exif_bytes) + image.save(fullfn, quality=opts.jpeg_quality) + if opts.enable_pnginfo and info is not None: + piexif.insert(exif_bytes(), fullfn) else: image.save(fullfn, quality=opts.jpeg_quality, pnginfo=pnginfo) - if extension.lower() == "webp": - piexif.insert(exif_bytes, fullfn) - target_side_length = 4000 oversize = image.width > target_side_length or image.height > target_side_length if opts.export_for_4chan and (oversize or os.stat(fullfn).st_size > 4 * 1024 * 1024): @@ -371,11 +370,9 @@ def save_image(image, path, basename, seed=None, prompt=None, extension='png', i elif oversize: image = image.resize((image.width * target_side_length // image.height, target_side_length), LANCZOS) - if exif_bytes in locals(): - pass - else: - exif_bytes = create_exif_bytes() - image.save(fullfn_without_extension + ".jpg", quality=opts.jpeg_quality, exif=exif_bytes) + image.save(fullfn_without_extension + ".jpg", quality=opts.jpeg_quality) + if opts.enable_pnginfo and info is not None: + piexif.insert(exif_bytes(), fullfn) if opts.save_txt and info is not None: with open(f"{fullfn_without_extension}.txt", "w", encoding="utf8") as file: -- cgit v1.2.1 From 1fc1c537c7303be88e0da93c3a632c48acb101e9 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sat, 17 Sep 2022 09:01:10 +0300 Subject: fix --- modules/images.py | 1 - 1 file changed, 1 deletion(-) diff --git a/modules/images.py b/modules/images.py index 7e1e506c..b62c48f8 100644 --- a/modules/images.py +++ b/modules/images.py @@ -345,7 +345,6 @@ def save_image(image, path, basename, seed=None, prompt=None, extension='png', i if not os.path.exists(fullfn): break - def create_exif_bytes(): def exif_bytes(): return piexif.dump({ "Exif": { -- cgit v1.2.1 From 047a623f7a5c585d308d25268763f76ea225f9a0 Mon Sep 17 00:00:00 2001 From: jjisnow Date: Sat, 17 Sep 2022 16:07:07 +1000 Subject: Restore run_pnginfo --- modules/extras.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/extras.py b/modules/extras.py index 64b4f2b6..3d9d9f7a 100644 --- a/modules/extras.py +++ b/modules/extras.py @@ -97,7 +97,7 @@ def run_extras(image, image_folder, gfpgan_visibility, codeformer_visibility, co return outputs, plaintext_to_html(info), '' -def run_image_info(image): +def run_pnginfo(image): items = image.info if "exif" in image.info: -- cgit v1.2.1 From 588d6de4a870a80862377d14c4f316ff13e5e818 Mon Sep 17 00:00:00 2001 From: jjisnow Date: Sat, 17 Sep 2022 16:08:56 +1000 Subject: Update ui.py Reverse run_pnginfo for compatibility reasons --- modules/ui.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/ui.py b/modules/ui.py index 0899490f..738ac945 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -295,7 +295,7 @@ def create_toprow(is_img2img): return prompt, roll, prompt_style, negative_prompt, prompt_style2, submit, interrogate, prompt_style_apply, save_style, check_progress -def create_ui(txt2img, img2img, run_extras, run_image_info): +def create_ui(txt2img, img2img, run_extras, run_pnginfo): with gr.Blocks(analytics_enabled=False) as txt2img_interface: txt2img_prompt, roll, txt2img_prompt_style, txt2img_negative_prompt, txt2img_prompt_style2, submit, _, txt2img_prompt_style_apply, txt2img_save_style, check_progress = create_toprow(is_img2img=False) @@ -697,7 +697,7 @@ def create_ui(txt2img, img2img, run_extras, run_image_info): submit.click(**extras_args) pnginfo_interface = gr.Interface( - wrap_gradio_call(run_image_info), + wrap_gradio_call(run_pnginfo), inputs=[ gr.Image(label="Source", source="upload", interactive=True, type="pil"), ], -- cgit v1.2.1 From b172cd56e820c2ee107a2a0bc4cfb45de34ede4b Mon Sep 17 00:00:00 2001 From: jjisnow Date: Sat, 17 Sep 2022 16:09:53 +1000 Subject: Update webui.py --- webui.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/webui.py b/webui.py index cbfb62f0..576b4a3d 100644 --- a/webui.py +++ b/webui.py @@ -9,7 +9,7 @@ from omegaconf import OmegaConf import signal from ldm.util import instantiate_from_config - +run_ from modules.shared import opts, cmd_opts, state import modules.shared as shared import modules.ui @@ -121,7 +121,7 @@ def webui(): txt2img=wrap_gradio_gpu_call(modules.txt2img.txt2img), img2img=wrap_gradio_gpu_call(modules.img2img.img2img), run_extras=wrap_gradio_gpu_call(modules.extras.run_extras), - run_image_info=modules.extras.run_image_info + run_pnginfo=modules.extras.run_pnginfo ) demo.launch( -- cgit v1.2.1 From ac61e4663c21ea0f51a4319162d3877e00554a2a Mon Sep 17 00:00:00 2001 From: jjisnow Date: Sat, 17 Sep 2022 16:10:46 +1000 Subject: Update webui.py --- webui.py | 1 - 1 file changed, 1 deletion(-) diff --git a/webui.py b/webui.py index 576b4a3d..1a6208b7 100644 --- a/webui.py +++ b/webui.py @@ -9,7 +9,6 @@ from omegaconf import OmegaConf import signal from ldm.util import instantiate_from_config -run_ from modules.shared import opts, cmd_opts, state import modules.shared as shared import modules.ui -- cgit v1.2.1 From ed6787ca2fe950f633a925ccb0467eafd4ec0f43 Mon Sep 17 00:00:00 2001 From: EyeDeck Date: Sat, 17 Sep 2022 00:49:31 -0400 Subject: Add VRAM monitoring --- modules/memmon.py | 77 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ modules/shared.py | 5 ++++ modules/ui.py | 14 +++++++++- style.css | 18 ++++++++++++- 4 files changed, 112 insertions(+), 2 deletions(-) create mode 100644 modules/memmon.py diff --git a/modules/memmon.py b/modules/memmon.py new file mode 100644 index 00000000..f2cac841 --- /dev/null +++ b/modules/memmon.py @@ -0,0 +1,77 @@ +import threading +import time +from collections import defaultdict + +import torch + + +class MemUsageMonitor(threading.Thread): + run_flag = None + device = None + disabled = False + opts = None + data = None + + def __init__(self, name, device, opts): + threading.Thread.__init__(self) + self.name = name + self.device = device + self.opts = opts + + self.daemon = True + self.run_flag = threading.Event() + self.data = defaultdict(int) + + def run(self): + if self.disabled: + return + + while True: + self.run_flag.wait() + + torch.cuda.reset_peak_memory_stats() + self.data.clear() + + if self.opts.memmon_poll_rate <= 0: + self.run_flag.clear() + continue + + self.data["min_free"] = torch.cuda.mem_get_info()[0] + + while self.run_flag.is_set(): + free, total = torch.cuda.mem_get_info() # calling with self.device errors, torch bug? + self.data["min_free"] = min(self.data["min_free"], free) + + time.sleep(1 / self.opts.memmon_poll_rate) + + def dump_debug(self): + print(self, 'recorded data:') + for k, v in self.read().items(): + print(k, -(v // -(1024 ** 2))) + + print(self, 'raw torch memory stats:') + tm = torch.cuda.memory_stats(self.device) + for k, v in tm.items(): + if 'bytes' not in k: + continue + print('\t' if 'peak' in k else '', k, -(v // -(1024 ** 2))) + + print(torch.cuda.memory_summary()) + + def monitor(self): + self.run_flag.set() + + def read(self): + free, total = torch.cuda.mem_get_info() + self.data["total"] = total + + torch_stats = torch.cuda.memory_stats(self.device) + self.data["active_peak"] = torch_stats["active_bytes.all.peak"] + self.data["reserved_peak"] = torch_stats["reserved_bytes.all.peak"] + self.data["system_peak"] = total - self.data["min_free"] + + return self.data + + def stop(self): + self.run_flag.clear() + return self.read() diff --git a/modules/shared.py b/modules/shared.py index da56b6ae..4f877036 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -12,6 +12,7 @@ from modules.paths import script_path, sd_path from modules.devices import get_optimal_device import modules.styles import modules.interrogate +import modules.memmon sd_model_file = os.path.join(script_path, 'model.ckpt') if not os.path.exists(sd_model_file): @@ -138,6 +139,7 @@ class Options: "show_progressbar": OptionInfo(True, "Show progressbar"), "show_progress_every_n_steps": OptionInfo(0, "Show show image creation progress every N sampling steps. Set 0 to disable.", gr.Slider, {"minimum": 0, "maximum": 32, "step": 1}), "multiple_tqdm": OptionInfo(True, "Add a second progress bar to the console that shows progress for an entire job. Broken in PyCharm console."), + "memmon_poll_rate": OptionInfo(8, "VRAM usage polls per second during generation. Set to 0 to disable.", gr.Slider, {"minimum": 0, "maximum": 40, "step":1}), "face_restoration_model": OptionInfo(None, "Face restoration model", gr.Radio, lambda: {"choices": [x.name() for x in face_restorers]}), "code_former_weight": OptionInfo(0.5, "CodeFormer weight parameter; 0 = maximum effect; 1 = minimum effect", gr.Slider, {"minimum": 0, "maximum": 1, "step": 0.01}), "save_images_before_face_restoration": OptionInfo(False, "Save a copy of image before doing face restoration."), @@ -217,3 +219,6 @@ class TotalTQDM: total_tqdm = TotalTQDM() + +mem_mon = modules.memmon.MemUsageMonitor("MemMon", device, opts) +mem_mon.start() diff --git a/modules/ui.py b/modules/ui.py index 738ac945..01b2ba85 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -119,6 +119,7 @@ def save_files(js_data, images, index): def wrap_gradio_call(func): def f(*args, **kwargs): + shared.mem_mon.monitor() t = time.perf_counter() try: @@ -135,8 +136,19 @@ def wrap_gradio_call(func): elapsed = time.perf_counter() - t + mem_stats = {k:-(v//-(1024*1024)) for k,v in shared.mem_mon.stop().items()} + active_peak = mem_stats['active_peak'] + reserved_peak = mem_stats['reserved_peak'] + sys_peak = '?' if opts.memmon_poll_rate <= 0 else mem_stats['system_peak'] + sys_total = mem_stats['total'] + sys_pct = '?' if opts.memmon_poll_rate <= 0 else round(sys_peak/sys_total * 100, 2) + vram_tooltip = "Torch active: Peak amount of VRAM used by Torch during generation, excluding cached data. " \ + "Torch reserved: Peak amount of VRAM allocated by Torch, including all active and cached data. " \ + "Sys VRAM: Peak amount of VRAM allocation across all applications / total GPU VRAM (peak utilization%)." + # last item is always HTML - res[-1] = res[-1] + f"

Time taken: {elapsed:.2f}s

" + res[-1] += f"

Time taken: {elapsed:.2f}s

" \ + f"

Torch active/reserved: {active_peak}/{reserved_peak} MiB, Sys VRAM: {sys_peak}/{sys_total} MiB ({sys_pct}%)

" shared.state.interrupted = False diff --git a/style.css b/style.css index d41c098c..67ce8550 100644 --- a/style.css +++ b/style.css @@ -1,5 +1,21 @@ .output-html p {margin: 0 0.5em;} -.performance { font-size: 0.85em; color: #444; } + +.performance { + font-size: 0.85em; + color: #444; + display: flex; + justify-content: space-between; + white-space: nowrap; +} + +.performance .time { + margin-right: 0; +} + +.performance .vram { + margin-left: 0; + text-align: right; +} #generate{ min-height: 4.5em; -- cgit v1.2.1 From b8be33dad13d4937c6ef8fbb49715d843c3dd586 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sat, 17 Sep 2022 09:23:31 +0300 Subject: hide VRAM text if polling is disabled --- modules/ui.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/modules/ui.py b/modules/ui.py index 01b2ba85..437bce66 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -136,7 +136,7 @@ def wrap_gradio_call(func): elapsed = time.perf_counter() - t - mem_stats = {k:-(v//-(1024*1024)) for k,v in shared.mem_mon.stop().items()} + mem_stats = {k: -(v//-(1024*1024)) for k,v in shared.mem_mon.stop().items()} active_peak = mem_stats['active_peak'] reserved_peak = mem_stats['reserved_peak'] sys_peak = '?' if opts.memmon_poll_rate <= 0 else mem_stats['system_peak'] @@ -146,9 +146,10 @@ def wrap_gradio_call(func): "Torch reserved: Peak amount of VRAM allocated by Torch, including all active and cached data. " \ "Sys VRAM: Peak amount of VRAM allocation across all applications / total GPU VRAM (peak utilization%)." + vram_html = '' if opts.memmon_poll_rate == 0 else f"

Torch active/reserved: {active_peak}/{reserved_peak} MiB, Sys VRAM: {sys_peak}/{sys_total} MiB ({sys_pct}%)

" + # last item is always HTML - res[-1] += f"

Time taken: {elapsed:.2f}s

" \ - f"

Torch active/reserved: {active_peak}/{reserved_peak} MiB, Sys VRAM: {sys_peak}/{sys_total} MiB ({sys_pct}%)

" + res[-1] += f"

Time taken: {elapsed:.2f}s

{vram_html}
" shared.state.interrupted = False -- cgit v1.2.1 From 247f58a5e740a7bd3980815961425b778d77ec28 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sat, 17 Sep 2022 12:05:04 +0300 Subject: add support for switching model checkpoints at runtime --- modules/images.py | 2 +- modules/processing.py | 2 +- modules/sd_models.py | 148 ++++++++++++++++++++++++++++++++++++++++++++++++++ modules/shared.py | 19 +++++-- modules/ui.py | 5 ++ webui.py | 61 ++++----------------- 6 files changed, 179 insertions(+), 58 deletions(-) create mode 100644 modules/sd_models.py diff --git a/modules/images.py b/modules/images.py index b62c48f8..a3064333 100644 --- a/modules/images.py +++ b/modules/images.py @@ -274,7 +274,7 @@ def apply_filename_pattern(x, p, seed, prompt): x = x.replace("[height]", str(p.height)) x = x.replace("[sampler]", sd_samplers.samplers[p.sampler_index].name) - x = x.replace("[model_hash]", shared.sd_model_hash) + x = x.replace("[model_hash]", shared.sd_model.sd_model_hash) x = x.replace("[date]", datetime.date.today().isoformat()) if cmd_opts.hide_ui_dir_config: diff --git a/modules/processing.py b/modules/processing.py index 81c83f06..3a4ff224 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -227,7 +227,7 @@ def process_images(p: StableDiffusionProcessing) -> Processed: "Seed": all_seeds[index], "Face restoration": (opts.face_restoration_model if p.restore_faces else None), "Size": f"{p.width}x{p.height}", - "Model hash": (None if not opts.add_model_hash_to_info or not shared.sd_model_hash else shared.sd_model_hash), + "Model hash": (None if not opts.add_model_hash_to_info or not shared.sd_model.sd_model_hash else shared.sd_model.sd_model_hash), "Batch size": (None if p.batch_size < 2 else p.batch_size), "Batch pos": (None if p.batch_size < 2 else position_in_batch), "Variation seed": (None if p.subseed_strength == 0 else all_subseeds[index]), diff --git a/modules/sd_models.py b/modules/sd_models.py new file mode 100644 index 00000000..036af0e4 --- /dev/null +++ b/modules/sd_models.py @@ -0,0 +1,148 @@ +import glob +import os.path +import sys +from collections import namedtuple +import torch +from omegaconf import OmegaConf + + +from ldm.util import instantiate_from_config + +from modules import shared + +CheckpointInfo = namedtuple("CheckpointInfo", ['filename', 'title', 'hash']) +checkpoints_list = {} + +try: + # this silences the annoying "Some weights of the model checkpoint were not used when initializing..." message at start. + + from transformers import logging + + logging.set_verbosity_error() +except Exception: + pass + + +def list_models(): + checkpoints_list.clear() + + model_dir = os.path.abspath(shared.cmd_opts.ckpt_dir) + + def modeltitle(path, h): + abspath = os.path.abspath(path) + + if abspath.startswith(model_dir): + name = abspath.replace(model_dir, '') + else: + name = os.path.basename(path) + + if name.startswith("\\") or name.startswith("/"): + name = name[1:] + + return f'{name} [{h}]' + + cmd_ckpt = shared.cmd_opts.ckpt + if os.path.exists(cmd_ckpt): + h = model_hash(cmd_ckpt) + title = modeltitle(cmd_ckpt, h) + checkpoints_list[title] = CheckpointInfo(cmd_ckpt, title, h) + elif cmd_ckpt is not None and cmd_ckpt != shared.default_sd_model_file: + print(f"Checkpoint in --ckpt argument not found: {cmd_ckpt}", file=sys.stderr) + + if os.path.exists(model_dir): + for filename in glob.glob(model_dir + '/**/*.ckpt', recursive=True): + h = model_hash(filename) + title = modeltitle(filename, h) + checkpoints_list[title] = CheckpointInfo(filename, title, h) + + +def model_hash(filename): + try: + with open(filename, "rb") as file: + import hashlib + m = hashlib.sha256() + + file.seek(0x100000) + m.update(file.read(0x10000)) + return m.hexdigest()[0:8] + except FileNotFoundError: + return 'NOFILE' + + +def select_checkpoint(): + model_checkpoint = shared.opts.sd_model_checkpoint + checkpoint_info = checkpoints_list.get(model_checkpoint, None) + if checkpoint_info is not None: + return checkpoint_info + + if len(checkpoints_list) == 0: + print(f"Checkpoint {model_checkpoint} not found and no other checkpoints found", file=sys.stderr) + return None + + checkpoint_info = next(iter(checkpoints_list.values())) + if model_checkpoint is not None: + print(f"Checkpoint {model_checkpoint} not found; loading fallback {checkpoint_info.title}", file=sys.stderr) + + return checkpoint_info + + +def load_model_weights(model, checkpoint_file, sd_model_hash): + print(f"Loading weights [{sd_model_hash}] from {checkpoint_file}") + + pl_sd = torch.load(checkpoint_file, map_location="cpu") + if "global_step" in pl_sd: + print(f"Global Step: {pl_sd['global_step']}") + sd = pl_sd["state_dict"] + + model.load_state_dict(sd, strict=False) + + if shared.cmd_opts.opt_channelslast: + model.to(memory_format=torch.channels_last) + + if not shared.cmd_opts.no_half: + model.half() + + model.sd_model_hash = sd_model_hash + model.sd_model_checkpint = checkpoint_file + + +def load_model(): + from modules import lowvram, sd_hijack + checkpoint_info = select_checkpoint() + + sd_config = OmegaConf.load(shared.cmd_opts.config) + sd_model = instantiate_from_config(sd_config.model) + load_model_weights(sd_model, checkpoint_info.filename, checkpoint_info.hash) + + if shared.cmd_opts.lowvram or shared.cmd_opts.medvram: + lowvram.setup_for_low_vram(sd_model, shared.cmd_opts.medvram) + else: + sd_model.to(shared.device) + + sd_hijack.model_hijack.hijack(sd_model) + + sd_model.eval() + + print(f"Model loaded.") + return sd_model + + +def reload_model_weights(sd_model): + from modules import lowvram, devices + checkpoint_info = select_checkpoint() + + if sd_model.sd_model_checkpint == checkpoint_info.filename: + return + + if shared.cmd_opts.lowvram or shared.cmd_opts.medvram: + lowvram.send_everything_to_cpu() + else: + sd_model.to(devices.cpu) + + load_model_weights(sd_model, checkpoint_info.filename, checkpoint_info.hash) + + if not shared.cmd_opts.lowvram and not shared.cmd_opts.medvram: + sd_model.to(devices.device) + + print(f"Weights loaded.") + return sd_model diff --git a/modules/shared.py b/modules/shared.py index 4f877036..3c3aa9b6 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -13,14 +13,15 @@ from modules.devices import get_optimal_device import modules.styles import modules.interrogate import modules.memmon +import modules.sd_models sd_model_file = os.path.join(script_path, 'model.ckpt') -if not os.path.exists(sd_model_file): - sd_model_file = "models/ldm/stable-diffusion-v1/model.ckpt" +default_sd_model_file = sd_model_file parser = argparse.ArgumentParser() parser.add_argument("--config", type=str, default=os.path.join(sd_path, "configs/stable-diffusion/v1-inference.yaml"), help="path to config which constructs model",) -parser.add_argument("--ckpt", type=str, default=os.path.join(sd_path, sd_model_file), help="path to checkpoint of model",) +parser.add_argument("--ckpt", type=str, default=sd_model_file, help="path to checkpoint of stable diffusion model; this checkpoint will be added to the list of checkpoints and loaded by default if you don't have a checkpoint selected in settings",) +parser.add_argument("--ckpt-dir", type=str, default=os.path.join(script_path, 'models'), help="path to directory with stable diffusion checkpoints",) parser.add_argument("--gfpgan-dir", type=str, help="GFPGAN directory", default=('./src/gfpgan' if os.path.exists('./src/gfpgan') else './GFPGAN')) parser.add_argument("--gfpgan-model", type=str, help="GFPGAN model file name", default='GFPGANv1.3.pth') parser.add_argument("--no-half", action='store_true', help="do not switch the model to 16-bit floats") @@ -88,13 +89,17 @@ interrogator = modules.interrogate.InterrogateModels("interrogate") face_restorers = [] +modules.sd_models.list_models() + + class Options: class OptionInfo: - def __init__(self, default=None, label="", component=None, component_args=None): + def __init__(self, default=None, label="", component=None, component_args=None, onchange=None): self.default = default self.label = label self.component = component self.component_args = component_args + self.onchange = onchange data = None hide_dirs = {"visible": False} if cmd_opts.hide_ui_dir_config else None @@ -150,6 +155,7 @@ class Options: "interrogate_clip_min_length": OptionInfo(24, "Interrogate: minimum description length (excluding artists, etc..)", gr.Slider, {"minimum": 1, "maximum": 128, "step": 1}), "interrogate_clip_max_length": OptionInfo(48, "Interrogate: maximum description length", gr.Slider, {"minimum": 1, "maximum": 256, "step": 1}), "interrogate_clip_dict_limit": OptionInfo(1500, "Interrogate: maximum number of lines in text file (0 = No limit)"), + "sd_model_checkpoint": OptionInfo(None, "Stable Diffusion checkpoint", gr.Radio, lambda: {"choices": [x.title for x in modules.sd_models.checkpoints_list.values()]}), } def __init__(self): @@ -180,6 +186,10 @@ class Options: with open(filename, "r", encoding="utf8") as file: self.data = json.load(file) + def onchange(self, key, func): + item = self.data_labels.get(key) + item.onchange = func + opts = Options() if os.path.exists(config_filename): @@ -188,7 +198,6 @@ if os.path.exists(config_filename): sd_upscalers = [] sd_model = None -sd_model_hash = '' progress_print_out = sys.stdout diff --git a/modules/ui.py b/modules/ui.py index 437bce66..36e3c664 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -758,7 +758,12 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo): if comp_args and isinstance(comp_args, dict) and comp_args.get('visible') is False: continue + oldval = opts.data.get(key, None) opts.data[key] = value + + if oldval != value and opts.data_labels[key].onchange is not None: + opts.data_labels[key].onchange() + up.append(comp.update(value=value)) opts.save(shared.config_filename) diff --git a/webui.py b/webui.py index add72123..ff8997db 100644 --- a/webui.py +++ b/webui.py @@ -3,13 +3,8 @@ import threading from modules.paths import script_path -import torch -from omegaconf import OmegaConf - import signal -from ldm.util import instantiate_from_config - from modules.shared import opts, cmd_opts, state import modules.shared as shared import modules.ui @@ -24,6 +19,7 @@ import modules.extras import modules.lowvram import modules.txt2img import modules.img2img +import modules.sd_models modules.codeformer_model.setup_codeformer() @@ -33,29 +29,17 @@ shared.face_restorers.append(modules.face_restoration.FaceRestoration()) esrgan.load_models(cmd_opts.esrgan_models_path) realesrgan.setup_realesrgan() +queue_lock = threading.Lock() -def load_model_from_config(config, ckpt, verbose=False): - print(f"Loading model [{shared.sd_model_hash}] from {ckpt}") - pl_sd = torch.load(ckpt, map_location="cpu") - if "global_step" in pl_sd: - print(f"Global Step: {pl_sd['global_step']}") - sd = pl_sd["state_dict"] - model = instantiate_from_config(config.model) - m, u = model.load_state_dict(sd, strict=False) - if len(m) > 0 and verbose: - print("missing keys:") - print(m) - if len(u) > 0 and verbose: - print("unexpected keys:") - print(u) - if cmd_opts.opt_channelslast: - model = model.to(memory_format=torch.channels_last) - model.eval() - return model +def wrap_queued_call(func): + def f(*args, **kwargs): + with queue_lock: + res = func(*args, **kwargs) + return res -queue_lock = threading.Lock() + return f def wrap_gradio_gpu_call(func): @@ -80,33 +64,8 @@ def wrap_gradio_gpu_call(func): modules.scripts.load_scripts(os.path.join(script_path, "scripts")) -try: - # this silences the annoying "Some weights of the model checkpoint were not used when initializing..." message at start. - - from transformers import logging - - logging.set_verbosity_error() -except Exception: - pass - -with open(cmd_opts.ckpt, "rb") as file: - import hashlib - m = hashlib.sha256() - - file.seek(0x100000) - m.update(file.read(0x10000)) - shared.sd_model_hash = m.hexdigest()[0:8] - -sd_config = OmegaConf.load(cmd_opts.config) -shared.sd_model = load_model_from_config(sd_config, cmd_opts.ckpt) -shared.sd_model = (shared.sd_model if cmd_opts.no_half else shared.sd_model.half()) - -if cmd_opts.lowvram or cmd_opts.medvram: - modules.lowvram.setup_for_low_vram(shared.sd_model, cmd_opts.medvram) -else: - shared.sd_model = shared.sd_model.to(shared.device) - -modules.sd_hijack.model_hijack.hijack(shared.sd_model) +shared.sd_model = modules.sd_models.load_model() +shared.opts.onchange("sd_model_checkpoint", wrap_queued_call(lambda: modules.sd_models.reload_model_weights(shared.sd_model))) def webui(): -- cgit v1.2.1 From f8f17e3b9e61f238dd32b6d1bab5db040c531559 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sat, 17 Sep 2022 12:12:55 +0300 Subject: updated readme to reflect new model location --- README.md | 2 +- models/Put Stable Diffusion checkpoints here.txt | 0 2 files changed, 1 insertion(+), 1 deletion(-) create mode 100644 models/Put Stable Diffusion checkpoints here.txt diff --git a/README.md b/README.md index 6c232e38..d97ebc3f 100644 --- a/README.md +++ b/README.md @@ -51,7 +51,7 @@ Alternatively, use [Google Colab](https://colab.research.google.com/drive/1Iy-xW 1. Install [Python 3.10.6](https://www.python.org/downloads/windows/), checking "Add Python to PATH" 2. Install [git](https://git-scm.com/download/win). 3. Download the stable-diffusion-webui repository, for example by running `git clone https://github.com/AUTOMATIC1111/stable-diffusion-webui.git`. -4. Place `model.ckpt` in the base directory, alongside `webui.py`. +4. Place `model.ckpt` in the `models` directory. 5. _*(Optional)*_ Place `GFPGANv1.3.pth` in the base directory, alongside `webui.py`. 6. Run `webui-user.bat` from Windows Explorer as normal, non-administrate, user. diff --git a/models/Put Stable Diffusion checkpoints here.txt b/models/Put Stable Diffusion checkpoints here.txt new file mode 100644 index 00000000..e69de29b -- cgit v1.2.1 From 99585b3514e2d7e987651d5c6a0806f933af012b Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sat, 17 Sep 2022 12:38:15 +0300 Subject: moved progressbar to top by request --- modules/ui.py | 7 ++++--- style.css | 6 ++++++ 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/modules/ui.py b/modules/ui.py index 36e3c664..960f1e36 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -337,6 +337,8 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo): custom_inputs = modules.scripts.scripts_txt2img.setup_ui(is_img2img=False) with gr.Column(variant='panel'): + progressbar = gr.HTML(elem_id="progressbar") + with gr.Group(): txt2img_preview = gr.Image(elem_id='txt2img_preview', visible=False) txt2img_gallery = gr.Gallery(label='Output', elem_id='txt2img_gallery').style(grid=4) @@ -349,8 +351,6 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo): send_to_extras = gr.Button('Send to extras') interrupt = gr.Button('Interrupt') - progressbar = gr.HTML(elem_id="progressbar") - with gr.Group(): html_info = gr.HTML() generation_info = gr.Textbox(visible=False) @@ -474,6 +474,8 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo): custom_inputs = modules.scripts.scripts_img2img.setup_ui(is_img2img=True) with gr.Column(variant='panel'): + progressbar = gr.HTML(elem_id="progressbar") + with gr.Group(): img2img_preview = gr.Image(elem_id='img2img_preview', visible=False) img2img_gallery = gr.Gallery(label='Output', elem_id='img2img_gallery').style(grid=4) @@ -487,7 +489,6 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo): interrupt = gr.Button('Interrupt') img2img_save_style = gr.Button('Save prompt as style') - progressbar = gr.HTML(elem_id="progressbar") with gr.Group(): html_info = gr.HTML() diff --git a/style.css b/style.css index 67ce8550..752d2cf4 100644 --- a/style.css +++ b/style.css @@ -167,6 +167,12 @@ input[type="range"]{ #txt2img_negative_prompt, #img2img_negative_prompt{ } +#progressbar{ + position: absolute; + z-index: 1000; + right: 0; +} + .progressDiv{ width: 100%; height: 30px; -- cgit v1.2.1 From 304222ef94d1c3c60fab466a96c448868f391bce Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sat, 17 Sep 2022 13:49:36 +0300 Subject: X/Y plot support for switching checkpoints. --- modules/sd_models.py | 4 ++-- script.js | 2 ++ scripts/xy_grid.py | 15 +++++++++++++++ 3 files changed, 19 insertions(+), 2 deletions(-) diff --git a/modules/sd_models.py b/modules/sd_models.py index 036af0e4..4bd70fc5 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -127,9 +127,9 @@ def load_model(): return sd_model -def reload_model_weights(sd_model): +def reload_model_weights(sd_model, info=None): from modules import lowvram, devices - checkpoint_info = select_checkpoint() + checkpoint_info = info or select_checkpoint() if sd_model.sd_model_checkpint == checkpoint_info.filename: return diff --git a/script.js b/script.js index 4a70e51d..e63e0695 100644 --- a/script.js +++ b/script.js @@ -66,6 +66,8 @@ titles = { "Style 2": "Style to apply; styles have components for both positive and negative prompts and apply to both", "Apply style": "Insert selected styles into prompt fields", "Create style": "Save current prompts as a style. If you add the token {prompt} to the text, the style use that as placeholder for your prompt when you use the style in the future.", + + "Checkpoint name": "Loads weights from checkpoint before making images. You can either use hash or a part of filename (as seen in settings) for checkpoint name. Recommended to use with Y axis for less switching.", } function gradioApp(){ diff --git a/scripts/xy_grid.py b/scripts/xy_grid.py index eccfda87..680dd702 100644 --- a/scripts/xy_grid.py +++ b/scripts/xy_grid.py @@ -10,7 +10,9 @@ import gradio as gr from modules import images from modules.processing import process_images, Processed from modules.shared import opts, cmd_opts, state +import modules.shared as shared import modules.sd_samplers +import modules.sd_models import re @@ -41,6 +43,15 @@ def apply_sampler(p, x, xs): p.sampler_index = sampler_index +def apply_checkpoint(p, x, xs): + applicable = [info for info in modules.sd_models.checkpoints_list.values() if x in info.title] + assert len(applicable) > 0, f'Checkpoint {x} for found' + + info = applicable[0] + + modules.sd_models.reload_model_weights(shared.sd_model, info) + + def format_value_add_label(p, opt, x): if type(x) == float: x = round(x, 8) @@ -74,6 +85,7 @@ axis_options = [ AxisOption("CFG Scale", float, apply_field("cfg_scale"), format_value_add_label), AxisOption("Prompt S/R", str, apply_prompt, format_value), AxisOption("Sampler", str, apply_sampler, format_value), + AxisOption("Checkpoint name", str, apply_checkpoint, format_value), AxisOptionImg2Img("Denoising", float, apply_field("denoising_strength"), format_value_add_label), # as it is now all AxisOptionImg2Img items must go after AxisOption ones ] @@ -215,4 +227,7 @@ class Script(scripts.Script): if opts.grid_save: images.save_image(processed.images[0], p.outpath_grids, "xy_grid", prompt=p.prompt, seed=processed.seed, grid=True, p=p) + # restore checkpoint in case it was changed by axes + modules.sd_models.reload_model_weights(shared.sd_model) + return processed -- cgit v1.2.1 From 140f89315380dbcc541f6e18e3d355a06ea3e2f0 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sat, 17 Sep 2022 14:55:40 +0300 Subject: process all values for x/y plot right away to error out if any are bad before any processing begins --- scripts/xy_grid.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/scripts/xy_grid.py b/scripts/xy_grid.py index 680dd702..6a157722 100644 --- a/scripts/xy_grid.py +++ b/scripts/xy_grid.py @@ -90,11 +90,11 @@ axis_options = [ ] -def draw_xy_grid(p, xs, ys, x_label, y_label, cell, draw_legend): +def draw_xy_grid(p, xs, ys, x_labels, y_labels, cell, draw_legend): res = [] - ver_texts = [[images.GridAnnotation(y_label(y))] for y in ys] - hor_texts = [[images.GridAnnotation(x_label(x))] for x in xs] + ver_texts = [[images.GridAnnotation(y)] for y in y_labels] + hor_texts = [[images.GridAnnotation(x)] for x in x_labels] first_pocessed = None @@ -218,8 +218,8 @@ class Script(scripts.Script): p, xs=xs, ys=ys, - x_label=lambda x: x_opt.format_value(p, x_opt, x), - y_label=lambda y: y_opt.format_value(p, y_opt, y), + x_labels=[x_opt.format_value(p, x_opt, x) for x in xs], + y_labels=[y_opt.format_value(p, y_opt, y) for y in ys], cell=cell, draw_legend=draw_legend ) -- cgit v1.2.1 From ba295b32688629cf575d67f1750a7838b008858b Mon Sep 17 00:00:00 2001 From: Tony Beeman Date: Sat, 17 Sep 2022 01:34:33 -0700 Subject: * Fix process_images where the number of images is not a multiple of (batch_size * n_iter), which would cause us to throw an exception. * Add a textbox option to Prompts from file (ease of use and it makes it much easier to use on a mobile device) * Fix the fact that Prompts from file was sometimes passing an empty batch. --- modules/processing.py | 9 ++++++++- scripts/prompts_from_file.py | 36 +++++++++++++++++++++++++----------- 2 files changed, 33 insertions(+), 12 deletions(-) diff --git a/modules/processing.py b/modules/processing.py index 3a4ff224..6a99d383 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -188,7 +188,11 @@ def fix_seed(p): def process_images(p: StableDiffusionProcessing) -> Processed: """this is the main loop that both txt2img and img2img use; it calls func_init once inside all the scopes and func_sample once per batch""" - assert p.prompt is not None + if type(p.prompt) == list: + assert(len(p.prompt) > 0) + else: + assert p.prompt is not None + devices.torch_gc() fix_seed(p) @@ -265,6 +269,9 @@ def process_images(p: StableDiffusionProcessing) -> Processed: seeds = all_seeds[n * p.batch_size:(n + 1) * p.batch_size] subseeds = all_subseeds[n * p.batch_size:(n + 1) * p.batch_size] + if (len(prompts) == 0): + break + #uc = p.sd_model.get_learned_conditioning(len(prompts) * [p.negative_prompt]) #c = p.sd_model.get_learned_conditioning(prompts) uc = prompt_parser.get_learned_conditioning(len(prompts) * [p.negative_prompt], p.steps) diff --git a/scripts/prompts_from_file.py b/scripts/prompts_from_file.py index d9b01c81..513d9a1c 100644 --- a/scripts/prompts_from_file.py +++ b/scripts/prompts_from_file.py @@ -13,28 +13,42 @@ from modules.shared import opts, cmd_opts, state class Script(scripts.Script): def title(self): - return "Prompts from file" + return "Prompts from file or textbox" def ui(self, is_img2img): + # This checkbox would look nicer as two tabs, but there are two problems: + # 1) There is a bug in Gradio 3.3 that prevents visibility from working on Tabs + # 2) Even with Gradio 3.3.1, returning a control (like Tabs) that can't be used as input + # causes a AttributeError: 'Tabs' object has no attribute 'preprocess' assert, + # due to the way Script assumes all controls returned can be used as inputs. + # Therefore, there's no good way to use grouping components right now, + # so we will use a checkbox! :) + checkbox_txt = gr.Checkbox(label="Show Textbox", value=False) file = gr.File(label="File with inputs", type='bytes') - - return [file] - - def run(self, p, data: bytes): - lines = [x.strip() for x in data.decode('utf8', errors='ignore').split("\n")] + prompt_txt = gr.TextArea(label="Prompts") + checkbox_txt.change(fn=lambda x: [gr.File.update(visible = not x), gr.TextArea.update(visible = x)], inputs=[checkbox_txt], outputs=[file, prompt_txt]) + return [checkbox_txt, file, prompt_txt] + + def run(self, p, checkbox_txt, data: bytes, prompt_txt: str): + if (checkbox_txt): + lines = [x.strip() for x in prompt_txt.splitlines()] + else: + lines = [x.strip() for x in data.decode('utf8', errors='ignore').split("\n")] lines = [x for x in lines if len(x) > 0] - batch_count = math.ceil(len(lines) / p.batch_size) - print(f"Will process {len(lines) * p.n_iter} images in {batch_count * p.n_iter} batches.") + img_count = len(lines) * p.n_iter + batch_count = math.ceil(img_count / p.batch_size) + loop_count = math.ceil(batch_count / p.n_iter) + print(f"Will process {img_count} images in {batch_count} batches.") p.do_not_save_grid = True state.job_count = batch_count images = [] - for batch_no in range(batch_count): - state.job = f"{batch_no + 1} out of {batch_count * p.n_iter}" - p.prompt = lines[batch_no*p.batch_size:(batch_no+1)*p.batch_size] * p.n_iter + for loop_no in range(loop_count): + state.job = f"{loop_no + 1} out of {loop_count}" + p.prompt = lines[loop_no*p.batch_size:(loop_no+1)*p.batch_size] * p.n_iter proc = process_images(p) images += proc.images -- cgit v1.2.1 From 65be5312dc2b73e659299ea052d5484e6ae6c0ea Mon Sep 17 00:00:00 2001 From: DepFA <35278260+dfaker@users.noreply.github.com> Date: Sat, 17 Sep 2022 01:00:45 +0100 Subject: Add modal css classes --- style.css | 37 +++++++++++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) diff --git a/style.css b/style.css index 752d2cf4..2de83a94 100644 --- a/style.css +++ b/style.css @@ -196,3 +196,40 @@ input[type="range"]{ border-radius: 8px; } +#lightboxModal{ + display: none; + position: fixed; + z-index: 900; + padding-top: 100px; + left: 0; + top: 0; + width: 100%; + height: 100%; + overflow: auto; + background-color: black; +} + +.modalClose { + color: white; + position: absolute; + top: 10px; + right: 25px; + font-size: 35px; + font-weight: bold; +} + +.modalClose:hover, +.modalClose:focus { + color: #999; + text-decoration: none; + cursor: pointer; +} + +#modalImage { + display: block; + margin-left: auto; + margin-right: auto; + margin-top: auto; + width: auto; +} + -- cgit v1.2.1 From 1a513370774ccb4cd9562f1b40048adc2ab7c896 Mon Sep 17 00:00:00 2001 From: DepFA <35278260+dfaker@users.noreply.github.com> Date: Sat, 17 Sep 2022 01:03:03 +0100 Subject: Add modal creation and functions --- script.js | 60 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 60 insertions(+) diff --git a/script.js b/script.js index e63e0695..7c27da74 100644 --- a/script.js +++ b/script.js @@ -76,6 +76,35 @@ function gradioApp(){ global_progressbar = null +function closeModal() { + gradioApp().getElementById("lightboxModal").style.display = "none"; +} + +function showModal(elem) { + gradioApp().getElementById("modalImage").src = elem.src + gradioApp().getElementById("lightboxModal").style.display = "block"; +} + +function showGalleryImage(){ + setTimeout(function() { + fullImg_preview = gradioApp().querySelectorAll('img.w-full.object-contain') + + if(fullImg_preview != null){ + fullImg_preview.forEach(function function_name(e) { + if(e && e.parentElement.tagName == 'DIV'){ + e.style.cursor='pointer' + + elemfunc = function(elem){ + elem.onclick = function(){showModal(elem)}; + } + elemfunc(e) + } + }); + } + + }, 100); +} + function addTitles(root){ root.querySelectorAll('span, button, select').forEach(function(span){ tooltip = titles[span.textContent]; @@ -117,8 +146,18 @@ function addTitles(root){ img2img_preview.style.width = img2img_gallery.clientWidth + "px" img2img_preview.style.height = img2img_gallery.clientHeight + "px" } + + fullImg_preview = gradioApp().querySelectorAll('img.w-full') + if(fullImg_preview != null){ + fullImg_preview.forEach(function function_name(e) { + if(e && e.parentElement.tagName == 'BUTTON'){ + e.onclick = showGalleryImage; + } + }); + } + window.setTimeout(requestProgress, 500) }); mutationObserver.observe( progressbar, { childList:true, subtree:true }) @@ -131,6 +170,27 @@ document.addEventListener("DOMContentLoaded", function() { addTitles(gradioApp()); }); mutationObserver.observe( gradioApp(), { childList:true, subtree:true }) + + const modalFragment = document.createDocumentFragment(); + const modal = document.createElement('div') + modal.onclick = closeModal; + + const modalClose = document.createElement('span') + modalClose.className = 'modalClose cursor'; + modalClose.innerHTML = '×' + modalClose.onclick = closeModal; + modal.id = "lightboxModal"; + modal.appendChild(modalClose) + + const modalImage = document.createElement('img') + modalImage.id = 'modalImage'; + modalImage.onclick = closeModal; + modal.appendChild(modalImage) + + gradioApp().getRootNode().appendChild(modal) + + document.body.appendChild(modalFragment); + }); function selected_gallery_index(){ -- cgit v1.2.1 From a66d857345c090674430c21fba1256c76d769635 Mon Sep 17 00:00:00 2001 From: DepFA <35278260+dfaker@users.noreply.github.com> Date: Sat, 17 Sep 2022 01:13:28 +0100 Subject: make background semi-transparent not black; --- style.css | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/style.css b/style.css index 2de83a94..2bdd1e0e 100644 --- a/style.css +++ b/style.css @@ -206,7 +206,7 @@ input[type="range"]{ width: 100%; height: 100%; overflow: auto; - background-color: black; + background-color: rgba(20, 20, 20, 0.95); } .modalClose { -- cgit v1.2.1 From 1ef79f926e6314b3ef9308b12ff7ad482afd790a Mon Sep 17 00:00:00 2001 From: DepFA <35278260+dfaker@users.noreply.github.com> Date: Sat, 17 Sep 2022 03:26:47 +0100 Subject: generalise to work on all non-masked images on all tabs --- script.js | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/script.js b/script.js index 7c27da74..a016eb4e 100644 --- a/script.js +++ b/script.js @@ -105,6 +105,12 @@ function showGalleryImage(){ }, 100); } +function galleryImageHandler(e){ + if(e && e.parentElement.tagName == 'BUTTON'){ + e.onclick = showGalleryImage; + } +} + function addTitles(root){ root.querySelectorAll('span, button, select').forEach(function(span){ tooltip = titles[span.textContent]; @@ -147,22 +153,17 @@ function addTitles(root){ img2img_preview.style.height = img2img_gallery.clientHeight + "px" } - fullImg_preview = gradioApp().querySelectorAll('img.w-full') - - if(fullImg_preview != null){ - - fullImg_preview.forEach(function function_name(e) { - if(e && e.parentElement.tagName == 'BUTTON'){ - e.onclick = showGalleryImage; - } - }); - } - window.setTimeout(requestProgress, 500) }); mutationObserver.observe( progressbar, { childList:true, subtree:true }) } + + fullImg_preview = gradioApp().querySelectorAll('img.w-full') + if(fullImg_preview != null){ + fullImg_preview.forEach(galleryImageHandler); + } + } document.addEventListener("DOMContentLoaded", function() { -- cgit v1.2.1 From 2f18823e69ec1dd7622f652561e197a576dc3b80 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sat, 17 Sep 2022 15:39:20 +0300 Subject: fix for broken export for 4chan --- modules/images.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/images.py b/modules/images.py index a3064333..e287d0df 100644 --- a/modules/images.py +++ b/modules/images.py @@ -371,7 +371,7 @@ def save_image(image, path, basename, seed=None, prompt=None, extension='png', i image.save(fullfn_without_extension + ".jpg", quality=opts.jpeg_quality) if opts.enable_pnginfo and info is not None: - piexif.insert(exif_bytes(), fullfn) + piexif.insert(exif_bytes(), fullfn_without_extension + ".jpg") if opts.save_txt and info is not None: with open(f"{fullfn_without_extension}.txt", "w", encoding="utf8") as file: -- cgit v1.2.1 From 8d197b6a92fbcea8e3394159247c19cea080c975 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sat, 17 Sep 2022 16:28:19 +0300 Subject: added user.css support --- .gitignore | 1 + modules/ui.py | 5 +++++ style.css | 3 --- 3 files changed, 6 insertions(+), 3 deletions(-) diff --git a/.gitignore b/.gitignore index 1dffb108..4f830e61 100644 --- a/.gitignore +++ b/.gitignore @@ -16,3 +16,4 @@ __pycache__ /webui-user.bat /webui-user.sh /interrogate +/user.css diff --git a/modules/ui.py b/modules/ui.py index 960f1e36..b97ffd07 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -801,6 +801,11 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo): with open(os.path.join(script_path, "style.css"), "r", encoding="utf8") as file: css = file.read() + if os.path.exists(os.path.join(script_path, "style.css")): + with open(os.path.join(script_path, "user.css"), "r", encoding="utf8") as file: + usercss = file.read() + css += usercss + if not cmd_opts.no_progressbar_hiding: css += css_hide_progressbar diff --git a/style.css b/style.css index 2bdd1e0e..36d073fa 100644 --- a/style.css +++ b/style.css @@ -21,9 +21,6 @@ min-height: 4.5em; } -#txt2img_gallery, #img2img_gallery{ - min-height: 768px; -} #txt2img_gallery img, #img2img_gallery img{ object-fit: scale-down; } -- cgit v1.2.1 From 56ff118845748d1302968039e13703b6ad8107c4 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sat, 17 Sep 2022 16:35:58 +0300 Subject: typo --- modules/ui.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/ui.py b/modules/ui.py index b97ffd07..2f6eb307 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -801,7 +801,7 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo): with open(os.path.join(script_path, "style.css"), "r", encoding="utf8") as file: css = file.read() - if os.path.exists(os.path.join(script_path, "style.css")): + if os.path.exists(os.path.join(script_path, "user.css")): with open(os.path.join(script_path, "user.css"), "r", encoding="utf8") as file: usercss = file.read() css += usercss -- cgit v1.2.1 From f96d8a601c782885d33ce021ad2c3f20b801dd24 Mon Sep 17 00:00:00 2001 From: DepFA <35278260+dfaker@users.noreply.github.com> Date: Sat, 17 Sep 2022 15:29:06 +0100 Subject: Block event propagation when lightbox is triggered --- script.js | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/script.js b/script.js index a016eb4e..9e3d42b5 100644 --- a/script.js +++ b/script.js @@ -80,11 +80,14 @@ function closeModal() { gradioApp().getElementById("lightboxModal").style.display = "none"; } -function showModal(elem) { - gradioApp().getElementById("modalImage").src = elem.src +function showModal(event) { + var source = event.target || event.srcElement; + gradioApp().getElementById("modalImage").src = source.src gradioApp().getElementById("lightboxModal").style.display = "block"; + event.stopPropagation() } + function showGalleryImage(){ setTimeout(function() { fullImg_preview = gradioApp().querySelectorAll('img.w-full.object-contain') @@ -92,12 +95,13 @@ function showGalleryImage(){ if(fullImg_preview != null){ fullImg_preview.forEach(function function_name(e) { if(e && e.parentElement.tagName == 'DIV'){ + e.style.cursor='pointer' - elemfunc = function(elem){ - elem.onclick = function(){showModal(elem)}; - } - elemfunc(e) + e.addEventListener('click', function (evt) { + showModal(evt) + + },true); } }); } -- cgit v1.2.1 From e24c3b79f6aaf9dece679bbef1f3567936a90511 Mon Sep 17 00:00:00 2001 From: EyeDeck Date: Sat, 17 Sep 2022 11:58:46 -0400 Subject: Fix gallery not scrolling left --- style.css | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/style.css b/style.css index 36d073fa..98aa0b32 100644 --- a/style.css +++ b/style.css @@ -25,6 +25,18 @@ object-fit: scale-down; } +.justify-center.overflow-x-scroll { + justify-content: left; +} + +.justify-center.overflow-x-scroll button:first-of-type { + margin-left: auto; +} + +.justify-center.overflow-x-scroll button:last-of-type { + margin-right: auto; +} + #subseed_show{ min-width: 6em; max-width: 6em; -- cgit v1.2.1 From 0469972e913de107bc84ac2d9b64652a9d3d0f09 Mon Sep 17 00:00:00 2001 From: safentisAuth Date: Sat, 17 Sep 2022 19:27:08 +0300 Subject: Make gallery bigger on 2k+ displays --- style.css | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/style.css b/style.css index 36d073fa..956e629f 100644 --- a/style.css +++ b/style.css @@ -21,6 +21,12 @@ min-height: 4.5em; } +@media screen and (min-width: 2500px) { + #txt2img_gallery, #img2img_gallery { + min-height: 768px; + } +} + #txt2img_gallery img, #img2img_gallery img{ object-fit: scale-down; } -- cgit v1.2.1 From fb668c58ef9bfe7ab63af0a70c27c5ff8a70cf64 Mon Sep 17 00:00:00 2001 From: DepFA <35278260+dfaker@users.noreply.github.com> Date: Sat, 17 Sep 2022 17:56:53 +0100 Subject: add previous and next styles --- style.css | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/style.css b/style.css index 36d073fa..9a236185 100644 --- a/style.css +++ b/style.css @@ -230,3 +230,29 @@ input[type="range"]{ width: auto; } +.modalPrev, +.modalNext { + cursor: pointer; + position: absolute; + top: 50%; + width: auto; + padding: 16px; + margin-top: -50px; + color: white; + font-weight: bold; + font-size: 20px; + transition: 0.6s ease; + border-radius: 0 3px 3px 0; + user-select: none; + -webkit-user-select: none; +} + +.modalNext { + right: 0; + border-radius: 3px 0 0 3px; +} + +.modalPrev:hover, +.modalNext:hover { + background-color: rgba(0, 0, 0, 0.8); +} -- cgit v1.2.1 From 0e5527b4df6199d52aa6a424dcf95954cfd87d1f Mon Sep 17 00:00:00 2001 From: DepFA <35278260+dfaker@users.noreply.github.com> Date: Sat, 17 Sep 2022 17:58:33 +0100 Subject: Add previous and next to modal with key shortcuts --- script.js | 78 +++++++++++++++++++++++++++++++++++++++++++++++---------------- 1 file changed, 59 insertions(+), 19 deletions(-) diff --git a/script.js b/script.js index 9e3d42b5..276158e9 100644 --- a/script.js +++ b/script.js @@ -83,35 +83,53 @@ function closeModal() { function showModal(event) { var source = event.target || event.srcElement; gradioApp().getElementById("modalImage").src = source.src - gradioApp().getElementById("lightboxModal").style.display = "block"; + var lb = gradioApp().getElementById("lightboxModal") + lb.style.display = "block"; + lb.focus() event.stopPropagation() } +function negmod(n, m) { + return ((n % m) + m) % m; +} + +function modalImageSwitch(offset){ + var galleryButtons = gradioApp().querySelectorAll(".gallery-item.transition-all") -function showGalleryImage(){ - setTimeout(function() { - fullImg_preview = gradioApp().querySelectorAll('img.w-full.object-contain') - - if(fullImg_preview != null){ - fullImg_preview.forEach(function function_name(e) { - if(e && e.parentElement.tagName == 'DIV'){ + if(galleryButtons.length>1){ + var currentButton = gradioApp().querySelector(".gallery-item.transition-all.\\!ring-2") - e.style.cursor='pointer' + var result = -1 + galleryButtons.forEach(function(v, i){ if(v==currentButton) { result = i } }) - e.addEventListener('click', function (evt) { - showModal(evt) + if(result != -1){ + nextButton = galleryButtons[negmod((result+offset),galleryButtons.length)] + nextButton.click() + gradioApp().getElementById("modalImage").src = nextButton.children[0].src + setTimeout( function(){gradioApp().getElementById("lightboxModal").focus()},10) + } + } - },true); - } - }); - } +} - }, 100); +function modalNextImage(event){ + modalImageSwitch(1) + event.stopPropagation() } -function galleryImageHandler(e){ - if(e && e.parentElement.tagName == 'BUTTON'){ - e.onclick = showGalleryImage; +function modalPrevImage(event){ + modalImageSwitch(-1) + event.stopPropagation() +} + +function modalKeyHandler(event){ + switch (event.key) { + case "ArrowLeft": + modalPrevImage(event) + break; + case "ArrowRight": + modalNextImage(event) + break; } } @@ -185,13 +203,35 @@ document.addEventListener("DOMContentLoaded", function() { modalClose.innerHTML = '×' modalClose.onclick = closeModal; modal.id = "lightboxModal"; + modal.tabIndex=0 + modal.addEventListener('keydown', modalKeyHandler, true) modal.appendChild(modalClose) const modalImage = document.createElement('img') modalImage.id = 'modalImage'; modalImage.onclick = closeModal; + modalImage.tabIndex=0 + modalImage.addEventListener('keydown', modalKeyHandler, true) modal.appendChild(modalImage) + const modalPrev = document.createElement('a') + modalPrev.className = 'modalPrev'; + modalPrev.innerHTML = '❮' + modalPrev.tabIndex=0 + modalPrev.addEventListener('click',modalPrevImage,true); + modalPrev.addEventListener('keydown', modalKeyHandler, true) + modal.appendChild(modalPrev) + + const modalNext = document.createElement('a') + modalNext.className = 'modalNext'; + modalNext.innerHTML = '❯' + modalNext.tabIndex=0 + modalNext.addEventListener('click',modalNextImage,true); + modalNext.addEventListener('keydown', modalKeyHandler, true) + + modal.appendChild(modalNext) + + gradioApp().getRootNode().appendChild(modal) document.body.appendChild(modalFragment); -- cgit v1.2.1 From f9cae046cb0a676efeda2577761474e58c27abed Mon Sep 17 00:00:00 2001 From: DepFA <35278260+dfaker@users.noreply.github.com> Date: Sat, 17 Sep 2022 18:03:52 +0100 Subject: typo --- script.js | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/script.js b/script.js index 276158e9..113d4335 100644 --- a/script.js +++ b/script.js @@ -133,6 +133,33 @@ function modalKeyHandler(event){ } } +function showGalleryImage(){ + setTimeout(function() { + fullImg_preview = gradioApp().querySelectorAll('img.w-full.object-contain') + + if(fullImg_preview != null){ + fullImg_preview.forEach(function function_name(e) { + if(e && e.parentElement.tagName == 'DIV'){ + + e.style.cursor='pointer' + + e.addEventListener('click', function (evt) { + showModal(evt) + + },true); + } + }); + } + + }, 100); +} + +function galleryImageHandler(e){ + if(e && e.parentElement.tagName == 'BUTTON'){ + e.onclick = showGalleryImage; + } +} + function addTitles(root){ root.querySelectorAll('span, button, select').forEach(function(span){ tooltip = titles[span.textContent]; -- cgit v1.2.1