From 2469501c3951a9ed86c820cddf7b32144a4a1c8d Mon Sep 17 00:00:00 2001 From: Volpeon Date: Thu, 19 Jan 2023 09:04:39 +0100 Subject: Move Accelerator preparation into strategy --- training/strategy/ti.py | 22 +++++++++++++++++++++- 1 file changed, 21 insertions(+), 1 deletion(-) (limited to 'training/strategy/ti.py') diff --git a/training/strategy/ti.py b/training/strategy/ti.py index e922954..6a76f98 100644 --- a/training/strategy/ti.py +++ b/training/strategy/ti.py @@ -5,6 +5,7 @@ from contextlib import contextmanager, nullcontext from pathlib import Path import torch +import torch.nn as nn from torch.utils.data import DataLoader from accelerate import Accelerator @@ -94,7 +95,7 @@ def textual_inversion_strategy_callbacks( return nullcontext() def on_model(): - return text_encoder + return text_encoder.text_model.embeddings.temp_token_embedding def on_prepare(): text_encoder.text_model.embeddings.temp_token_embedding.requires_grad_(True) @@ -163,6 +164,25 @@ def textual_inversion_strategy_callbacks( ) +def textual_inversion_prepare( + accelerator: Accelerator, + text_encoder: CLIPTextModel, + unet: UNet2DConditionModel, + *args +): + weight_dtype = torch.float32 + if accelerator.state.mixed_precision == "fp16": + weight_dtype = torch.float16 + elif accelerator.state.mixed_precision == "bf16": + weight_dtype = torch.bfloat16 + + prep = [text_encoder] + list(args) + text_encoder, optimizer, train_dataloader, val_dataloader, lr_scheduler = accelerator.prepare(*prep) + unet.to(accelerator.device, dtype=weight_dtype) + return text_encoder, unet, optimizer, train_dataloader, val_dataloader, lr_scheduler + + textual_inversion_strategy = TrainingStrategy( callbacks=textual_inversion_strategy_callbacks, + prepare=textual_inversion_prepare, ) -- cgit v1.2.3-54-g00ecf