From e09aaedd0e74f2fc6e2a53f233914803c65e127c Mon Sep 17 00:00:00 2001 From: Volpeon Date: Sat, 24 Dec 2022 10:25:58 +0100 Subject: Training update --- train_dreambooth.py | 20 ++++---------------- 1 file changed, 4 insertions(+), 16 deletions(-) (limited to 'train_dreambooth.py') diff --git a/train_dreambooth.py b/train_dreambooth.py index c7899a0..51e881a 100644 --- a/train_dreambooth.py +++ b/train_dreambooth.py @@ -24,6 +24,7 @@ from common import load_text_embeddings from pipelines.stable_diffusion.vlpn_stable_diffusion import VlpnStableDiffusion from data.csv import CSVDataModule from training.optimization import get_one_cycle_schedule +from training.ti import patch_trainable_embeddings from training.util import AverageMeter, CheckpointerBase, freeze_params, save_args from models.clip.prompt import PromptProcessor @@ -567,15 +568,8 @@ def main(): print(f"Training entire text encoder.") else: print(f"Training added text embeddings") - - freeze_params(itertools.chain( - text_encoder.text_model.encoder.parameters(), - text_encoder.text_model.final_layer_norm.parameters(), - text_encoder.text_model.embeddings.position_embedding.parameters(), - )) - - index_fixed_tokens = torch.arange(len(tokenizer)) - index_fixed_tokens = index_fixed_tokens[~torch.isin(index_fixed_tokens, torch.tensor(placeholder_token_id))] + text_encoder.requires_grad_(False) + patch_trainable_embeddings(text_encoder, placeholder_token_id) prompt_processor = PromptProcessor(tokenizer, text_encoder) @@ -603,7 +597,7 @@ def main(): if args.train_text_encoder: text_encoder_params_to_optimize = text_encoder.parameters() else: - text_encoder_params_to_optimize = text_encoder.get_input_embeddings().parameters() + text_encoder_params_to_optimize = text_encoder.text_model.embeddings.trainable_embedding.parameters() # Initialize the optimizer optimizer = optimizer_class( @@ -914,12 +908,6 @@ def main(): ema_unet.step(unet) optimizer.zero_grad(set_to_none=True) - if not args.train_text_encoder: - # Let's make sure we don't update any embedding weights besides the newly added token - with torch.no_grad(): - text_encoder.get_input_embeddings( - ).weight[index_fixed_tokens] = original_token_embeds[index_fixed_tokens] - avg_loss.update(loss.detach_(), bsz) avg_acc.update(acc.detach_(), bsz) -- cgit v1.2.3-54-g00ecf