From 21d70916f66e74a87c631a06b70774954b085b48 Mon Sep 17 00:00:00 2001 From: Volpeon Date: Fri, 7 Apr 2023 14:14:00 +0200 Subject: Fix --- training/strategy/dreambooth.py | 6 ++---- training/strategy/lora.py | 9 ++++----- training/strategy/ti.py | 6 ++---- 3 files changed, 8 insertions(+), 13 deletions(-) (limited to 'training/strategy') diff --git a/training/strategy/dreambooth.py b/training/strategy/dreambooth.py index 9808027..0286673 100644 --- a/training/strategy/dreambooth.py +++ b/training/strategy/dreambooth.py @@ -84,11 +84,9 @@ def dreambooth_strategy_callbacks( else: return nullcontext() - def on_accum_model(): - return unet - @contextmanager def on_train(epoch: int): + unet.train() tokenizer.train() if epoch < train_text_encoder_epochs: @@ -101,6 +99,7 @@ def dreambooth_strategy_callbacks( @contextmanager def on_eval(): + unet.eval() tokenizer.eval() text_encoder.eval() @@ -174,7 +173,6 @@ def dreambooth_strategy_callbacks( torch.cuda.empty_cache() return TrainingCallbacks( - on_accum_model=on_accum_model, on_train=on_train, on_eval=on_eval, on_before_optimize=on_before_optimize, diff --git a/training/strategy/lora.py b/training/strategy/lora.py index 6730dc9..80ffa9c 100644 --- a/training/strategy/lora.py +++ b/training/strategy/lora.py @@ -64,17 +64,17 @@ def lora_strategy_callbacks( image_size=sample_image_size, ) - def on_accum_model(): - return unet - @contextmanager def on_train(epoch: int): - tokenizer.train() + unet.train() text_encoder.train() + tokenizer.train() yield @contextmanager def on_eval(): + unet.eval() + text_encoder.eval() tokenizer.eval() yield @@ -152,7 +152,6 @@ def lora_strategy_callbacks( torch.cuda.empty_cache() return TrainingCallbacks( - on_accum_model=on_accum_model, on_train=on_train, on_eval=on_eval, on_before_optimize=on_before_optimize, diff --git a/training/strategy/ti.py b/training/strategy/ti.py index 55e9934..6a637c3 100644 --- a/training/strategy/ti.py +++ b/training/strategy/ti.py @@ -89,16 +89,15 @@ def textual_inversion_strategy_callbacks( else: return nullcontext() - def on_accum_model(): - return text_encoder.text_model.embeddings.token_override_embedding.params - @contextmanager def on_train(epoch: int): + text_encoder.text_model.embeddings.token_override_embedding.params.train() tokenizer.train() yield @contextmanager def on_eval(): + text_encoder.text_model.embeddings.token_override_embedding.params.eval() tokenizer.eval() with ema_context(): @@ -166,7 +165,6 @@ def textual_inversion_strategy_callbacks( torch.cuda.empty_cache() return TrainingCallbacks( - on_accum_model=on_accum_model, on_train=on_train, on_eval=on_eval, on_before_optimize=on_before_optimize, -- cgit v1.2.3-70-g09d2