aboutsummaryrefslogtreecommitdiff
path: root/modules/processing.py
diff options
context:
space:
mode:
authorBuckzor <bucklogos@yahoo.co.uk>2022-10-13 22:23:22 +0100
committerAUTOMATIC1111 <16777216c@gmail.com>2022-10-14 16:47:16 +0300
commitb382de2d77c653c565840ce92d27aa668a1934d7 (patch)
tree03e7c3238041ade1401ed2a5d13b0a038c3b6793 /modules/processing.py
parent40d1c6e423b4dc52b3bdae43d9e2442960760ced (diff)
Fixed Scale ratio problem
Diffstat (limited to 'modules/processing.py')
-rw-r--r--modules/processing.py25
1 files changed, 11 insertions, 14 deletions
diff --git a/modules/processing.py b/modules/processing.py
index 0246f5dd..d9b0e0e7 100644
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -522,15 +522,6 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
else:
state.job_count = state.job_count * 2
- #desired_pixel_count = self.firstphase_width * self.firstphase_height
- #actual_pixel_count = self.width * self.height
- #scale = math.sqrt(desired_pixel_count / actual_pixel_count)
-
- #self.firstphase_width = math.ceil(scale * self.width / 64) * 64
- #self.firstphase_height = math.ceil(scale * self.height / 64) * 64
- #self.firstphase_width_truncated = int(scale * self.width)
- #self.firstphase_height_truncated = int(scale * self.height)
-
def sample(self, conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength):
self.sampler = sd_samplers.create_sampler_with_index(sd_samplers.samplers, self.sampler_index, self.sd_model)
@@ -544,17 +535,23 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
truncate_x = 0
truncate_y = 0
+ width_ratio = self.width/self.firstphase_width
+ height_ratio = self.height/self.firstphase_height
if self.crop_scale:
- if self.width/self.firstphase_width > self.height/self.firstphase_height:
+ if width_ratio > height_ratio:
#Crop to landscape
- truncate_y = (self.width - self.firstphase_width)//2 // opt_f
+ truncate_y = int((self.width - self.firstphase_width) / width_ratio / height_ratio / opt_f)
- elif self.width/self.firstphase_width < self.height/self.firstphase_height:
+ elif width_ratio < height_ratio:
#Crop to portrait
- truncate_x = (self.height - self.firstphase_height)//2 // opt_f
+ truncate_x = int((self.height - self.firstphase_height) / width_ratio / height_ratio / opt_f)
+
+ samples = samples[:, :, truncate_y//2:samples.shape[2]-truncate_y//2, truncate_x//2:samples.shape[3]-truncate_x//2]
+
+
- samples = samples[:, :, truncate_y//2:samples.shape[2]-truncate_y//2, truncate_x//2:samples.shape[3]-truncate_x//2]
+
if self.scale_latent:
samples = torch.nn.functional.interpolate(samples, size=(self.height // opt_f, self.width // opt_f), mode="bilinear")