From 94b676d91382267e7429bd68362019868affd9d1 Mon Sep 17 00:00:00 2001
From: Volpeon <git@volpeon.ink>
Date: Mon, 13 Feb 2023 17:19:18 +0100
Subject: Update

---
 training/functional.py    | 2 +-
 training/strategy/lora.py | 2 +-
 training/strategy/ti.py   | 2 +-
 3 files changed, 3 insertions(+), 3 deletions(-)

(limited to 'training')

diff --git a/training/functional.py b/training/functional.py
index ccbb4ad..83e70e2 100644
--- a/training/functional.py
+++ b/training/functional.py
@@ -129,7 +129,7 @@ def save_samples(
 
     for pool, data, gen in datasets:
         all_samples = []
-        file_path = output_dir.joinpath(pool, f"step_{step}.jpg")
+        file_path = output_dir / pool / f"step_{step}.jpg"
         file_path.parent.mkdir(parents=True, exist_ok=True)
 
         batches = list(itertools.islice(itertools.cycle(data), batch_size * num_batches))
diff --git a/training/strategy/lora.py b/training/strategy/lora.py
index bc10e58..4dd1100 100644
--- a/training/strategy/lora.py
+++ b/training/strategy/lora.py
@@ -91,7 +91,7 @@ def lora_strategy_callbacks(
         print(f"Saving checkpoint for step {step}...")
 
         unet_ = accelerator.unwrap_model(unet)
-        unet_.save_attn_procs(checkpoint_output_dir.joinpath(f"{step}_{postfix}"))
+        unet_.save_attn_procs(checkpoint_output_dir / f"{step}_{postfix}")
         del unet_
 
     @torch.no_grad()
diff --git a/training/strategy/ti.py b/training/strategy/ti.py
index da2b81c..0de3cb0 100644
--- a/training/strategy/ti.py
+++ b/training/strategy/ti.py
@@ -138,7 +138,7 @@ def textual_inversion_strategy_callbacks(
             for (token, ids) in zip(placeholder_tokens, placeholder_token_ids):
                 text_encoder.text_model.embeddings.save_embed(
                     ids,
-                    checkpoint_output_dir.joinpath(f"{slugify(token)}_{step}_{postfix}.bin")
+                    checkpoint_output_dir / f"{slugify(token)}_{step}_{postfix}.bin"
                 )
 
     @torch.no_grad()
-- 
cgit v1.2.3-70-g09d2