diff options
author | Volpeon <git@volpeon.ink> | 2023-02-17 15:56:54 +0100 |
---|---|---|
committer | Volpeon <git@volpeon.ink> | 2023-02-17 15:56:54 +0100 |
commit | c927eeb3516b8ecae201441a68956f32247a6d7c (patch) | |
tree | b24e4246a067415151039484d1f8da2658687ef3 | |
parent | Remove xformers, switch to Pytorch Nightly (diff) | |
download | textual-inversion-diff-c927eeb3516b8ecae201441a68956f32247a6d7c.tar.gz textual-inversion-diff-c927eeb3516b8ecae201441a68956f32247a6d7c.tar.bz2 textual-inversion-diff-c927eeb3516b8ecae201441a68956f32247a6d7c.zip |
Back to xformers
-rw-r--r-- | environment.yaml | 9 | ||||
-rw-r--r-- | infer.py | 4 | ||||
-rw-r--r-- | train_dreambooth.py | 4 | ||||
-rw-r--r-- | train_lora.py | 4 | ||||
-rw-r--r-- | train_ti.py | 5 |
5 files changed, 14 insertions, 12 deletions
diff --git a/environment.yaml b/environment.yaml index 8010c09..325644f 100644 --- a/environment.yaml +++ b/environment.yaml | |||
@@ -1,17 +1,18 @@ | |||
1 | name: ldd | 1 | name: ldd |
2 | channels: | 2 | channels: |
3 | - pytorch-nightly | 3 | - pytorch |
4 | - nvidia | 4 | - nvidia |
5 | - xformers/label/dev | 5 | - xformers/label/dev |
6 | - defaults | 6 | - defaults |
7 | dependencies: | 7 | dependencies: |
8 | - cudatoolkit=11.7 | 8 | - cudatoolkit=11.7 |
9 | - libcufile=1.4.0.31 | ||
9 | - matplotlib=3.6.2 | 10 | - matplotlib=3.6.2 |
10 | - numpy=1.23.4 | 11 | - numpy=1.23.4 |
11 | - pip=22.3.1 | 12 | - pip=22.3.1 |
12 | - python=3.10.8 | 13 | - python=3.10.8 |
13 | - pytorch=2.0.0.dev20230216=*cuda* | 14 | - pytorch=1.13.1=*cuda* |
14 | - torchvision=0.15.0.dev20230216 | 15 | - torchvision=0.14.1 |
15 | - pip: | 16 | - pip: |
16 | - -e . | 17 | - -e . |
17 | - -e git+https://github.com/huggingface/diffusers#egg=diffusers | 18 | - -e git+https://github.com/huggingface/diffusers#egg=diffusers |
@@ -22,3 +23,5 @@ dependencies: | |||
22 | - setuptools==65.6.3 | 23 | - setuptools==65.6.3 |
23 | - test-tube>=0.7.5 | 24 | - test-tube>=0.7.5 |
24 | - transformers==4.26.1 | 25 | - transformers==4.26.1 |
26 | - triton==2.0.0a2 | ||
27 | - xformers==0.0.17.dev451 | ||
@@ -245,8 +245,8 @@ def create_pipeline(model, dtype): | |||
245 | tokenizer=tokenizer, | 245 | tokenizer=tokenizer, |
246 | scheduler=scheduler, | 246 | scheduler=scheduler, |
247 | ) | 247 | ) |
248 | # pipeline.enable_xformers_memory_efficient_attention() | 248 | pipeline.enable_xformers_memory_efficient_attention() |
249 | pipeline.unet = torch.compile(pipeline.unet) | 249 | # pipeline.unet = torch.compile(pipeline.unet) |
250 | pipeline.enable_vae_slicing() | 250 | pipeline.enable_vae_slicing() |
251 | pipeline.to("cuda") | 251 | pipeline.to("cuda") |
252 | 252 | ||
diff --git a/train_dreambooth.py b/train_dreambooth.py index 85b756c..5a7911c 100644 --- a/train_dreambooth.py +++ b/train_dreambooth.py | |||
@@ -464,8 +464,8 @@ def main(): | |||
464 | tokenizer.set_dropout(args.vector_dropout) | 464 | tokenizer.set_dropout(args.vector_dropout) |
465 | 465 | ||
466 | vae.enable_slicing() | 466 | vae.enable_slicing() |
467 | # vae.set_use_memory_efficient_attention_xformers(True) | 467 | vae.set_use_memory_efficient_attention_xformers(True) |
468 | # unet.enable_xformers_memory_efficient_attention() | 468 | unet.enable_xformers_memory_efficient_attention() |
469 | 469 | ||
470 | if args.gradient_checkpointing: | 470 | if args.gradient_checkpointing: |
471 | unet.enable_gradient_checkpointing() | 471 | unet.enable_gradient_checkpointing() |
diff --git a/train_lora.py b/train_lora.py index 8a06ae8..330bcd6 100644 --- a/train_lora.py +++ b/train_lora.py | |||
@@ -421,8 +421,8 @@ def main(): | |||
421 | args.pretrained_model_name_or_path) | 421 | args.pretrained_model_name_or_path) |
422 | 422 | ||
423 | vae.enable_slicing() | 423 | vae.enable_slicing() |
424 | # vae.set_use_memory_efficient_attention_xformers(True) | 424 | vae.set_use_memory_efficient_attention_xformers(True) |
425 | # unet.enable_xformers_memory_efficient_attention() | 425 | unet.enable_xformers_memory_efficient_attention() |
426 | 426 | ||
427 | unet.to(accelerator.device, dtype=weight_dtype) | 427 | unet.to(accelerator.device, dtype=weight_dtype) |
428 | text_encoder.to(accelerator.device, dtype=weight_dtype) | 428 | text_encoder.to(accelerator.device, dtype=weight_dtype) |
diff --git a/train_ti.py b/train_ti.py index 7d10317..3aa1027 100644 --- a/train_ti.py +++ b/train_ti.py | |||
@@ -538,9 +538,8 @@ def main(): | |||
538 | tokenizer.set_dropout(args.vector_dropout) | 538 | tokenizer.set_dropout(args.vector_dropout) |
539 | 539 | ||
540 | vae.enable_slicing() | 540 | vae.enable_slicing() |
541 | # vae.set_use_memory_efficient_attention_xformers(True) | 541 | vae.set_use_memory_efficient_attention_xformers(True) |
542 | # unet.enable_xformers_memory_efficient_attention() | 542 | unet.enable_xformers_memory_efficient_attention() |
543 | |||
544 | # unet = torch.compile(unet) | 543 | # unet = torch.compile(unet) |
545 | 544 | ||
546 | if args.gradient_checkpointing: | 545 | if args.gradient_checkpointing: |