From bd8ec551c960fa069482a4b4efd764f60755716b Mon Sep 17 00:00:00 2001 From: Volpeon Date: Mon, 27 Mar 2023 10:30:26 +0200 Subject: Fix TI --- train_ti.py | 2 +- training/strategy/ti.py | 18 ++++++++++-------- 2 files changed, 11 insertions(+), 9 deletions(-) diff --git a/train_ti.py b/train_ti.py index 6c35d41..ef39c38 100644 --- a/train_ti.py +++ b/train_ti.py @@ -788,7 +788,7 @@ def main(): args.num_vectors, args.train_data_template ): - run(i, [placeholder_token], [initializer_token], [num_vectors], data_template) + run(i, [placeholder_token], [initializer_token], num_vectors, data_template) embeddings.persist() diff --git a/training/strategy/ti.py b/training/strategy/ti.py index 7ac5011..b9a5547 100644 --- a/training/strategy/ti.py +++ b/training/strategy/ti.py @@ -108,14 +108,11 @@ def textual_inversion_strategy_callbacks( @torch.no_grad() def on_before_optimize(lr: float, epoch: int): if use_emb_decay: - return torch.stack([ - t - for t in text_encoder.text_model.embeddings.temp_token_embedding - if t.grad is not None - ]) + w = text_encoder.text_model.embeddings.temp_token_embedding.weight + return torch.all(w.grad == 0, dim=1) @torch.no_grad() - def on_after_optimize(w, lr: float): + def on_after_optimize(zero_ids, lr: float): if ema_embeddings is not None: ema_embeddings.step(text_encoder.text_model.embeddings.temp_token_embedding.parameters()) @@ -123,8 +120,13 @@ def textual_inversion_strategy_callbacks( lambda_ = emb_decay * lr if lambda_ != 0: - norm = w[:, :].norm(dim=-1, keepdim=True) - w[:].add_((w[:] / norm.clamp_min(1e-12)) * lambda_ * (emb_decay_target - norm)) + w = text_encoder.text_model.embeddings.temp_token_embedding.weight + + mask = torch.ones(w.shape[0], dtype=torch.bool) + mask[zero_ids] = False + + norm = w[mask, :].norm(dim=-1, keepdim=True) + w[mask].add_((w[mask] / norm.clamp_min(1e-12)) * lambda_ * (emb_decay_target - norm)) def on_log(): if ema_embeddings is not None: -- cgit v1.2.3-54-g00ecf