From 90879448c1ae92f39bdcabdf89230891c62e1408 Mon Sep 17 00:00:00 2001 From: Volpeon Date: Mon, 14 Nov 2022 19:48:27 +0100 Subject: Update --- pipelines/stable_diffusion/vlpn_stable_diffusion.py | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/pipelines/stable_diffusion/vlpn_stable_diffusion.py b/pipelines/stable_diffusion/vlpn_stable_diffusion.py index d6b1cb1..85b0216 100644 --- a/pipelines/stable_diffusion/vlpn_stable_diffusion.py +++ b/pipelines/stable_diffusion/vlpn_stable_diffusion.py @@ -245,10 +245,7 @@ class VlpnStableDiffusion(DiffusionPipeline): init_latents = init_latent_dist.sample(generator=generator) init_latents = 0.18215 * init_latents - if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0: - additional_image_per_prompt = batch_size // init_latents.shape[0] - init_latents = torch.cat([init_latents] * additional_image_per_prompt * num_images_per_prompt, dim=0) - elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0: + if batch_size > init_latents.shape[0]: raise ValueError( f"Cannot duplicate `init_image` of batch size {init_latents.shape[0]} to {batch_size} text prompts." ) @@ -367,8 +364,6 @@ class VlpnStableDiffusion(DiffusionPipeline): do_classifier_free_guidance = guidance_scale > 1.0 latents_are_image = isinstance(latents_or_image, PIL.Image.Image) - print(f">>> {device}") - # 3. Encode input prompt text_embeddings = self.encode_prompt( prompt, -- cgit v1.2.3-70-g09d2