From 16b92605a59d59c65789c89b54bb97da51908056 Mon Sep 17 00:00:00 2001 From: Volpeon Date: Tue, 21 Feb 2023 09:09:50 +0100 Subject: Embedding normalization: Ignore tensors with grad = 0 --- training/strategy/ti.py | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) (limited to 'training/strategy/ti.py') diff --git a/training/strategy/ti.py b/training/strategy/ti.py index 66d3129..09beec4 100644 --- a/training/strategy/ti.py +++ b/training/strategy/ti.py @@ -116,10 +116,17 @@ def textual_inversion_strategy_callbacks( @torch.no_grad() def on_before_optimize(lr: float, epoch: int): if use_emb_decay: - text_encoder.text_model.embeddings.normalize( - emb_decay_target, - min(1.0, emb_decay * lr) - ) + lambda_ = emb_decay * lr + + if lambda_ != 0: + w = text_encoder.text_model.embeddings.temp_token_embedding.weight + + mask = torch.zeros(w.size(0), dtype=torch.bool) + mask[text_encoder.text_model.embeddings.temp_token_ids] = True + mask[torch.all(w.grad == 0, dim=1)] = False + + norm = w[mask, :].norm(dim=-1, keepdim=True) + w[mask].add_((w[mask] / norm.clamp_min(1e-12)) * lambda_ * (emb_decay_target - norm)) def on_after_optimize(lr: float): if ema_embeddings is not None: -- cgit v1.2.3-54-g00ecf