summaryrefslogtreecommitdiffstats
path: root/infer.py
diff options
context:
space:
mode:
Diffstat (limited to 'infer.py')
-rw-r--r--infer.py10
1 files changed, 5 insertions, 5 deletions
diff --git a/infer.py b/infer.py
index 70851fd..5bd4abc 100644
--- a/infer.py
+++ b/infer.py
@@ -22,7 +22,7 @@ torch.backends.cuda.matmul.allow_tf32 = True
22default_args = { 22default_args = {
23 "model": None, 23 "model": None,
24 "scheduler": "euler_a", 24 "scheduler": "euler_a",
25 "precision": "fp16", 25 "precision": "fp32",
26 "embeddings_dir": "embeddings", 26 "embeddings_dir": "embeddings",
27 "output_dir": "output/inference", 27 "output_dir": "output/inference",
28 "config": None, 28 "config": None,
@@ -205,10 +205,10 @@ def load_embeddings(tokenizer, text_encoder, embeddings_dir):
205def create_pipeline(model, scheduler, embeddings_dir, dtype): 205def create_pipeline(model, scheduler, embeddings_dir, dtype):
206 print("Loading Stable Diffusion pipeline...") 206 print("Loading Stable Diffusion pipeline...")
207 207
208 tokenizer = CLIPTokenizer.from_pretrained(model + '/tokenizer', torch_dtype=dtype) 208 tokenizer = CLIPTokenizer.from_pretrained(model, subfolder='/tokenizer', torch_dtype=dtype)
209 text_encoder = CLIPTextModel.from_pretrained(model + '/text_encoder', torch_dtype=dtype) 209 text_encoder = CLIPTextModel.from_pretrained(model, subfolder='/text_encoder', torch_dtype=dtype)
210 vae = AutoencoderKL.from_pretrained(model + '/vae', torch_dtype=dtype) 210 vae = AutoencoderKL.from_pretrained(model, subfolder='/vae', torch_dtype=dtype)
211 unet = UNet2DConditionModel.from_pretrained(model + '/unet', torch_dtype=dtype) 211 unet = UNet2DConditionModel.from_pretrained(model, subfolder='/unet', torch_dtype=dtype)
212 212
213 load_embeddings(tokenizer, text_encoder, embeddings_dir) 213 load_embeddings(tokenizer, text_encoder, embeddings_dir)
214 214