summaryrefslogtreecommitdiffstats
path: root/training/strategy
diff options
context:
space:
mode:
authorVolpeon <git@volpeon.ink>2023-02-13 17:19:18 +0100
committerVolpeon <git@volpeon.ink>2023-02-13 17:19:18 +0100
commit94b676d91382267e7429bd68362019868affd9d1 (patch)
tree513697739ab25217cbfcff630299d02b1f6e98c8 /training/strategy
parentIntegrate Self-Attention-Guided (SAG) Stable Diffusion in my custom pipeline (diff)
downloadtextual-inversion-diff-94b676d91382267e7429bd68362019868affd9d1.tar.gz
textual-inversion-diff-94b676d91382267e7429bd68362019868affd9d1.tar.bz2
textual-inversion-diff-94b676d91382267e7429bd68362019868affd9d1.zip
Update
Diffstat (limited to 'training/strategy')
-rw-r--r--training/strategy/lora.py2
-rw-r--r--training/strategy/ti.py2
2 files changed, 2 insertions, 2 deletions
diff --git a/training/strategy/lora.py b/training/strategy/lora.py
index bc10e58..4dd1100 100644
--- a/training/strategy/lora.py
+++ b/training/strategy/lora.py
@@ -91,7 +91,7 @@ def lora_strategy_callbacks(
91 print(f"Saving checkpoint for step {step}...") 91 print(f"Saving checkpoint for step {step}...")
92 92
93 unet_ = accelerator.unwrap_model(unet) 93 unet_ = accelerator.unwrap_model(unet)
94 unet_.save_attn_procs(checkpoint_output_dir.joinpath(f"{step}_{postfix}")) 94 unet_.save_attn_procs(checkpoint_output_dir / f"{step}_{postfix}")
95 del unet_ 95 del unet_
96 96
97 @torch.no_grad() 97 @torch.no_grad()
diff --git a/training/strategy/ti.py b/training/strategy/ti.py
index da2b81c..0de3cb0 100644
--- a/training/strategy/ti.py
+++ b/training/strategy/ti.py
@@ -138,7 +138,7 @@ def textual_inversion_strategy_callbacks(
138 for (token, ids) in zip(placeholder_tokens, placeholder_token_ids): 138 for (token, ids) in zip(placeholder_tokens, placeholder_token_ids):
139 text_encoder.text_model.embeddings.save_embed( 139 text_encoder.text_model.embeddings.save_embed(
140 ids, 140 ids,
141 checkpoint_output_dir.joinpath(f"{slugify(token)}_{step}_{postfix}.bin") 141 checkpoint_output_dir / f"{slugify(token)}_{step}_{postfix}.bin"
142 ) 142 )
143 143
144 @torch.no_grad() 144 @torch.no_grad()