summaryrefslogtreecommitdiffstats
path: root/training/strategy/lora.py
diff options
context:
space:
mode:
authorVolpeon <git@volpeon.ink>2023-04-10 12:57:21 +0200
committerVolpeon <git@volpeon.ink>2023-04-10 12:57:21 +0200
commite9dc712268e45d30451fc6fee8626a0a8af7ccdc (patch)
tree3ec2ee3543fb89e4f9342f5fe5969144d45af819 /training/strategy/lora.py
parentUpdate (diff)
downloadtextual-inversion-diff-e9dc712268e45d30451fc6fee8626a0a8af7ccdc.tar.gz
textual-inversion-diff-e9dc712268e45d30451fc6fee8626a0a8af7ccdc.tar.bz2
textual-inversion-diff-e9dc712268e45d30451fc6fee8626a0a8af7ccdc.zip
Fix sample gen: models sometimes weren't in eval mode
Diffstat (limited to 'training/strategy/lora.py')
-rw-r--r--training/strategy/lora.py2
1 files changed, 1 insertions, 1 deletions
diff --git a/training/strategy/lora.py b/training/strategy/lora.py
index 73ec8f2..0f72a17 100644
--- a/training/strategy/lora.py
+++ b/training/strategy/lora.py
@@ -146,7 +146,7 @@ def lora_strategy_callbacks(
146 if torch.cuda.is_available(): 146 if torch.cuda.is_available():
147 torch.cuda.empty_cache() 147 torch.cuda.empty_cache()
148 148
149 @torch.no_grad() 149 @on_eval()
150 def on_sample(step): 150 def on_sample(step):
151 unet_ = accelerator.unwrap_model(unet, keep_fp32_wrapper=True) 151 unet_ = accelerator.unwrap_model(unet, keep_fp32_wrapper=True)
152 text_encoder_ = accelerator.unwrap_model(text_encoder, keep_fp32_wrapper=True) 152 text_encoder_ = accelerator.unwrap_model(text_encoder, keep_fp32_wrapper=True)