From 15080055bf4330a806c409d3ca69ec5b0eab99f2 Mon Sep 17 00:00:00 2001 From: Volpeon Date: Thu, 22 Jun 2023 11:22:28 +0200 Subject: Update --- train_dreambooth.py | 15 ++------------- training/strategy/dreambooth.py | 11 +++++++---- 2 files changed, 9 insertions(+), 17 deletions(-) diff --git a/train_dreambooth.py b/train_dreambooth.py index 939a8f3..ab3ed16 100644 --- a/train_dreambooth.py +++ b/train_dreambooth.py @@ -217,11 +217,6 @@ def parse_args(): default=None, help="The embeddings directory where Textual Inversion embeddings are stored.", ) - parser.add_argument( - "--train_dir_embeddings", - action="store_true", - help="Train embeddings loaded from embeddings directory.", - ) parser.add_argument( "--collection", type=str, @@ -696,19 +691,13 @@ def main(): tokenizer, embeddings, embeddings_dir ) - placeholder_tokens = added_tokens - placeholder_token_ids = added_ids - print( f"Added {len(added_tokens)} tokens from embeddings dir: {list(zip(added_tokens, added_ids))}" ) - if args.train_dir_embeddings: - print("Training embeddings from embeddings dir") - else: - embeddings.persist() + embeddings.persist() - if len(args.placeholder_tokens) != 0 and not args.train_dir_embeddings: + if len(args.placeholder_tokens) != 0: placeholder_token_ids, initializer_token_ids = add_placeholder_tokens( tokenizer=tokenizer, embeddings=embeddings, diff --git a/training/strategy/dreambooth.py b/training/strategy/dreambooth.py index 43fe838..35cccbb 100644 --- a/training/strategy/dreambooth.py +++ b/training/strategy/dreambooth.py @@ -203,10 +203,13 @@ def dreambooth_prepare( text_encoder, unet, optimizer, train_dataloader, val_dataloader, lr_scheduler ) - for layer in text_encoder.text_model.encoder.layers[ - : (-1 * text_encoder_unfreeze_last_n_layers) - ]: - layer.requires_grad_(False) + if text_encoder_unfreeze_last_n_layers == 0: + text_encoder.text_model.encoder.requires_grad_(False) + elif text_encoder_unfreeze_last_n_layers > 0: + for layer in text_encoder.text_model.encoder.layers[ + : (-1 * text_encoder_unfreeze_last_n_layers) + ]: + layer.requires_grad_(False) text_encoder.text_model.embeddings.requires_grad_(False) -- cgit v1.2.3-54-g00ecf