From 11e6f8f88483e6cfdccd66ad758ae1dfcfc0283b Mon Sep 17 00:00:00 2001 From: Volpeon Date: Sun, 2 Apr 2023 08:42:33 +0200 Subject: Lora: Only register params with grad to optimizer --- training/strategy/lora.py | 4 ---- training/strategy/ti.py | 1 - 2 files changed, 5 deletions(-) (limited to 'training') diff --git a/training/strategy/lora.py b/training/strategy/lora.py index 8905171..209785a 100644 --- a/training/strategy/lora.py +++ b/training/strategy/lora.py @@ -139,10 +139,6 @@ def lora_prepare( train_dataloader: DataLoader, val_dataloader: Optional[DataLoader], lr_scheduler: torch.optim.lr_scheduler._LRScheduler, - lora_rank: int = 4, - lora_alpha: int = 32, - lora_dropout: float = 0, - lora_bias: str = "none", **kwargs ): return accelerator.prepare(text_encoder, unet, optimizer, train_dataloader, val_dataloader, lr_scheduler) + ({},) diff --git a/training/strategy/ti.py b/training/strategy/ti.py index 677f5a3..c7520ed 100644 --- a/training/strategy/ti.py +++ b/training/strategy/ti.py @@ -209,7 +209,6 @@ def textual_inversion_prepare( text_encoder.text_model.final_layer_norm.requires_grad_(False) text_encoder.text_model.embeddings.position_embedding.requires_grad_(False) text_encoder.text_model.embeddings.token_embedding.requires_grad_(False) - text_encoder.eval() return text_encoder, unet, optimizer, train_dataloader, val_dataloader, lr_scheduler, {} -- cgit v1.2.3-70-g09d2