diff options
author | Volpeon <git@volpeon.ink> | 2022-10-01 16:53:19 +0200 |
---|---|---|
committer | Volpeon <git@volpeon.ink> | 2022-10-01 16:53:19 +0200 |
commit | 6720c99f7082dc855059ad4afd6b3cb45b62bc1f (patch) | |
tree | d27f69880472df0cd6f63ea42bbf7a789ec5d0b7 /pipelines | |
parent | Made inference script interactive (diff) | |
download | textual-inversion-diff-6720c99f7082dc855059ad4afd6b3cb45b62bc1f.tar.gz textual-inversion-diff-6720c99f7082dc855059ad4afd6b3cb45b62bc1f.tar.bz2 textual-inversion-diff-6720c99f7082dc855059ad4afd6b3cb45b62bc1f.zip |
Fix seed, better progress bar, fix euler_a for batch size > 1
Diffstat (limited to 'pipelines')
-rw-r--r-- | pipelines/stable_diffusion/clip_guided_stable_diffusion.py | 4 |
1 files changed, 2 insertions, 2 deletions
diff --git a/pipelines/stable_diffusion/clip_guided_stable_diffusion.py b/pipelines/stable_diffusion/clip_guided_stable_diffusion.py index ddf7ce1..eff74b5 100644 --- a/pipelines/stable_diffusion/clip_guided_stable_diffusion.py +++ b/pipelines/stable_diffusion/clip_guided_stable_diffusion.py | |||
@@ -254,10 +254,10 @@ class CLIPGuidedStableDiffusion(DiffusionPipeline): | |||
254 | noise_pred = None | 254 | noise_pred = None |
255 | if isinstance(self.scheduler, EulerAScheduler): | 255 | if isinstance(self.scheduler, EulerAScheduler): |
256 | sigma = t.reshape(1) | 256 | sigma = t.reshape(1) |
257 | sigma_in = torch.cat([sigma] * 2) | 257 | sigma_in = torch.cat([sigma] * latent_model_input.shape[0]) |
258 | # noise_pred = model(latent_model_input,sigma_in,uncond_embeddings, text_embeddings,guidance_scale) | 258 | # noise_pred = model(latent_model_input,sigma_in,uncond_embeddings, text_embeddings,guidance_scale) |
259 | noise_pred = CFGDenoiserForward(self.unet, latent_model_input, sigma_in, | 259 | noise_pred = CFGDenoiserForward(self.unet, latent_model_input, sigma_in, |
260 | text_embeddings, guidance_scale, DSsigmas=self.scheduler.DSsigmas) | 260 | text_embeddings, guidance_scale, quantize=True, DSsigmas=self.scheduler.DSsigmas) |
261 | # noise_pred = self.unet(latent_model_input, sigma_in, encoder_hidden_states=text_embeddings).sample | 261 | # noise_pred = self.unet(latent_model_input, sigma_in, encoder_hidden_states=text_embeddings).sample |
262 | else: | 262 | else: |
263 | # predict the noise residual | 263 | # predict the noise residual |