From 47b2fba05ba0f2f5335321d94e0634a7291980c5 Mon Sep 17 00:00:00 2001 From: Volpeon Date: Wed, 21 Dec 2022 16:48:25 +0100 Subject: Some LoRA fixes (still broken) --- train_lora.py | 159 +++++++++++++--------------------------------------------- 1 file changed, 34 insertions(+), 125 deletions(-) (limited to 'train_lora.py') diff --git a/train_lora.py b/train_lora.py index ffc1d10..34e1008 100644 --- a/train_lora.py +++ b/train_lora.py @@ -16,7 +16,6 @@ from accelerate.utils import LoggerType, set_seed from diffusers import AutoencoderKL, DDPMScheduler, DPMSolverMultistepScheduler, UNet2DConditionModel from diffusers.optimization import get_scheduler, get_cosine_with_hard_restarts_schedule_with_warmup from diffusers.training_utils import EMAModel -from PIL import Image from tqdm.auto import tqdm from transformers import CLIPTextModel, CLIPTokenizer from slugify import slugify @@ -24,8 +23,9 @@ from slugify import slugify from common import load_text_embeddings from pipelines.stable_diffusion.vlpn_stable_diffusion import VlpnStableDiffusion from data.csv import CSVDataModule -from training.lora import LoraAttention +from training.lora import LoraAttnProcessor from training.optimization import get_one_cycle_schedule +from training.util import AverageMeter, CheckpointerBase, freeze_params, save_args from models.clip.prompt import PromptProcessor logger = get_logger(__name__) @@ -109,7 +109,7 @@ def parse_args(): parser.add_argument( "--output_dir", type=str, - default="output/dreambooth", + default="output/lora", help="The output directory where the model predictions and checkpoints will be written.", ) parser.add_argument( @@ -176,7 +176,7 @@ def parse_args(): help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.", ) parser.add_argument( - "--learning_rate_unet", + "--learning_rate", type=float, default=2e-6, help="Initial learning rate (after the potential warmup period) to use.", @@ -348,76 +348,45 @@ def parse_args(): return args -def save_args(basepath: Path, args, extra={}): - info = {"args": vars(args)} - info["args"].update(extra) - with open(basepath.joinpath("args.json"), "w") as f: - json.dump(info, f, indent=4) - - -def freeze_params(params): - for param in params: - param.requires_grad = False - - -def make_grid(images, rows, cols): - w, h = images[0].size - grid = Image.new('RGB', size=(cols*w, rows*h)) - for i, image in enumerate(images): - grid.paste(image, box=(i % cols*w, i//cols*h)) - return grid - - -class AverageMeter: - def __init__(self, name=None): - self.name = name - self.reset() - - def reset(self): - self.sum = self.count = self.avg = 0 - - def update(self, val, n=1): - self.sum += val * n - self.count += n - self.avg = self.sum / self.count - - -class Checkpointer: +class Checkpointer(CheckpointerBase): def __init__( self, datamodule, accelerator, vae, unet, - unet_lora, tokenizer, text_encoder, + unet_lora, scheduler, - output_dir: Path, instance_identifier, placeholder_token, placeholder_token_id, + output_dir: Path, sample_image_size, sample_batches, sample_batch_size, seed ): - self.datamodule = datamodule + super().__init__( + datamodule=datamodule, + output_dir=output_dir, + instance_identifier=instance_identifier, + placeholder_token=placeholder_token, + placeholder_token_id=placeholder_token_id, + sample_image_size=sample_image_size, + seed=seed or torch.random.seed(), + sample_batches=sample_batches, + sample_batch_size=sample_batch_size + ) + self.accelerator = accelerator self.vae = vae self.unet = unet - self.unet_lora = unet_lora self.tokenizer = tokenizer self.text_encoder = text_encoder + self.unet_lora = unet_lora self.scheduler = scheduler - self.output_dir = output_dir - self.instance_identifier = instance_identifier - self.placeholder_token = placeholder_token - self.placeholder_token_id = placeholder_token_id - self.sample_image_size = sample_image_size - self.seed = seed or torch.random.seed() - self.sample_batches = sample_batches - self.sample_batch_size = sample_batch_size @torch.no_grad() def save_model(self): @@ -433,83 +402,18 @@ class Checkpointer: @torch.no_grad() def save_samples(self, step, num_inference_steps, guidance_scale=7.5, eta=0.0): - samples_path = Path(self.output_dir).joinpath("samples") - - unet = self.accelerator.unwrap_model(self.unet) - text_encoder = self.accelerator.unwrap_model(self.text_encoder) - + # Save a sample image pipeline = VlpnStableDiffusion( - text_encoder=text_encoder, + text_encoder=self.text_encoder, vae=self.vae, - unet=unet, + unet=self.unet, tokenizer=self.tokenizer, scheduler=self.scheduler, ).to(self.accelerator.device) pipeline.set_progress_bar_config(dynamic_ncols=True) - train_data = self.datamodule.train_dataloader() - val_data = self.datamodule.val_dataloader() + super().save_samples(pipeline, step, num_inference_steps, guidance_scale, eta) - generator = torch.Generator(device=pipeline.device).manual_seed(self.seed) - stable_latents = torch.randn( - (self.sample_batch_size, pipeline.unet.in_channels, self.sample_image_size // 8, self.sample_image_size // 8), - device=pipeline.device, - generator=generator, - ) - - with torch.autocast("cuda"), torch.inference_mode(): - for pool, data, latents in [("stable", val_data, stable_latents), ("val", val_data, None), ("train", train_data, None)]: - all_samples = [] - file_path = samples_path.joinpath(pool, f"step_{step}.jpg") - file_path.parent.mkdir(parents=True, exist_ok=True) - - data_enum = enumerate(data) - - batches = [ - batch - for j, batch in data_enum - if j * data.batch_size < self.sample_batch_size * self.sample_batches - ] - prompts = [ - prompt.format(identifier=self.instance_identifier) - for batch in batches - for prompt in batch["prompts"] - ] - nprompts = [ - prompt - for batch in batches - for prompt in batch["nprompts"] - ] - - for i in range(self.sample_batches): - prompt = prompts[i * self.sample_batch_size:(i + 1) * self.sample_batch_size] - nprompt = nprompts[i * self.sample_batch_size:(i + 1) * self.sample_batch_size] - - samples = pipeline( - prompt=prompt, - negative_prompt=nprompt, - height=self.sample_image_size, - width=self.sample_image_size, - image=latents[:len(prompt)] if latents is not None else None, - generator=generator if latents is not None else None, - guidance_scale=guidance_scale, - eta=eta, - num_inference_steps=num_inference_steps, - output_type='pil' - ).images - - all_samples += samples - - del samples - - image_grid = make_grid(all_samples, self.sample_batches, self.sample_batch_size) - image_grid.save(file_path, quality=85) - - del all_samples - del image_grid - - del unet - del text_encoder del pipeline del generator del stable_latents @@ -558,7 +462,11 @@ def main(): checkpoint_scheduler = DPMSolverMultistepScheduler.from_pretrained( args.pretrained_model_name_or_path, subfolder='scheduler') - unet_lora = LoraAttention() + unet_lora = LoraAttnProcessor( + cross_attention_dim=unet.cross_attention_dim, + inner_dim=unet.in_channels, + r=4, + ) vae.enable_slicing() vae.set_use_memory_efficient_attention_xformers(True) @@ -618,8 +526,8 @@ def main(): prompt_processor = PromptProcessor(tokenizer, text_encoder) if args.scale_lr: - args.learning_rate_unet = ( - args.learning_rate_unet * args.gradient_accumulation_steps * + args.learning_rate = ( + args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes ) @@ -639,7 +547,7 @@ def main(): [ { 'params': unet_lora.parameters(), - 'lr': args.learning_rate_unet, + 'lr': args.learning_rate, }, ], betas=(args.adam_beta1, args.adam_beta2), @@ -801,7 +709,7 @@ def main(): config = vars(args).copy() config["initializer_token"] = " ".join(config["initializer_token"]) config["placeholder_token"] = " ".join(config["placeholder_token"]) - accelerator.init_trackers("dreambooth", config=config) + accelerator.init_trackers("lora", config=config) # Train! total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps @@ -832,6 +740,7 @@ def main(): tokenizer=tokenizer, text_encoder=text_encoder, scheduler=checkpoint_scheduler, + unet_lora=unet_lora, output_dir=basepath, instance_identifier=instance_identifier, placeholder_token=args.placeholder_token, -- cgit v1.2.3-54-g00ecf