summaryrefslogtreecommitdiffstats
path: root/train_lora.py
diff options
context:
space:
mode:
authorVolpeon <git@volpeon.ink>2023-02-17 15:56:54 +0100
committerVolpeon <git@volpeon.ink>2023-02-17 15:56:54 +0100
commitc927eeb3516b8ecae201441a68956f32247a6d7c (patch)
treeb24e4246a067415151039484d1f8da2658687ef3 /train_lora.py
parentRemove xformers, switch to Pytorch Nightly (diff)
downloadtextual-inversion-diff-c927eeb3516b8ecae201441a68956f32247a6d7c.tar.gz
textual-inversion-diff-c927eeb3516b8ecae201441a68956f32247a6d7c.tar.bz2
textual-inversion-diff-c927eeb3516b8ecae201441a68956f32247a6d7c.zip
Back to xformers
Diffstat (limited to 'train_lora.py')
-rw-r--r--train_lora.py4
1 files changed, 2 insertions, 2 deletions
diff --git a/train_lora.py b/train_lora.py
index 8a06ae8..330bcd6 100644
--- a/train_lora.py
+++ b/train_lora.py
@@ -421,8 +421,8 @@ def main():
421 args.pretrained_model_name_or_path) 421 args.pretrained_model_name_or_path)
422 422
423 vae.enable_slicing() 423 vae.enable_slicing()
424 # vae.set_use_memory_efficient_attention_xformers(True) 424 vae.set_use_memory_efficient_attention_xformers(True)
425 # unet.enable_xformers_memory_efficient_attention() 425 unet.enable_xformers_memory_efficient_attention()
426 426
427 unet.to(accelerator.device, dtype=weight_dtype) 427 unet.to(accelerator.device, dtype=weight_dtype)
428 text_encoder.to(accelerator.device, dtype=weight_dtype) 428 text_encoder.to(accelerator.device, dtype=weight_dtype)