From 83808fe00ac891ad2f625388d144c318b2cb5bfe Mon Sep 17 00:00:00 2001 From: Volpeon Date: Sat, 14 Jan 2023 21:53:07 +0100 Subject: WIP: Modularization ("free(): invalid pointer" my ass) --- infer.py | 19 +- .../stable_diffusion/vlpn_stable_diffusion.py | 1 - train.py | 672 +++++++++++++++++++++ train_dreambooth.py | 3 +- train_ti.py | 74 +-- trainer/base.py | 544 +++++++++++++++++ trainer/dreambooth.py | 0 trainer/ti.py | 164 +++++ training/common.py | 370 ------------ training/functional.py | 365 +++++++++++ training/lora.py | 107 ---- training/util.py | 214 ++++--- 12 files changed, 1894 insertions(+), 639 deletions(-) create mode 100644 train.py create mode 100644 trainer/base.py create mode 100644 trainer/dreambooth.py create mode 100644 trainer/ti.py delete mode 100644 training/common.py create mode 100644 training/functional.py delete mode 100644 training/lora.py diff --git a/infer.py b/infer.py index 36b5a2c..2b07b21 100644 --- a/infer.py +++ b/infer.py @@ -214,10 +214,21 @@ def load_embeddings(pipeline, embeddings_dir): def create_pipeline(model, dtype): print("Loading Stable Diffusion pipeline...") - pipeline = VlpnStableDiffusion.from_pretrained(model, torch_dtype=dtype) - - patch_managed_embeddings(pipeline.text_encoder) - + tokenizer = MultiCLIPTokenizer.from_pretrained(model, subfolder='tokenizer', torch_dtype=dtype) + text_encoder = CLIPTextModel.from_pretrained(model, subfolder='text_encoder', torch_dtype=dtype) + vae = AutoencoderKL.from_pretrained(model, subfolder='vae', torch_dtype=dtype) + unet = UNet2DConditionModel.from_pretrained(model, subfolder='unet', torch_dtype=dtype) + scheduler = DDIMScheduler.from_pretrained(model, subfolder='scheduler', torch_dtype=dtype) + + patch_managed_embeddings(text_encoder) + + pipeline = VlpnStableDiffusion( + text_encoder=text_encoder, + vae=vae, + unet=unet, + tokenizer=tokenizer, + scheduler=scheduler, + ) pipeline.enable_xformers_memory_efficient_attention() pipeline.enable_vae_slicing() pipeline.to("cuda") diff --git a/pipelines/stable_diffusion/vlpn_stable_diffusion.py b/pipelines/stable_diffusion/vlpn_stable_diffusion.py index a5cfc60..43141bd 100644 --- a/pipelines/stable_diffusion/vlpn_stable_diffusion.py +++ b/pipelines/stable_diffusion/vlpn_stable_diffusion.py @@ -52,7 +52,6 @@ class VlpnStableDiffusion(DiffusionPipeline): EulerAncestralDiscreteScheduler, DPMSolverMultistepScheduler, ], - **kwargs, ): super().__init__() diff --git a/train.py b/train.py new file mode 100644 index 0000000..d8644c4 --- /dev/null +++ b/train.py @@ -0,0 +1,672 @@ +import argparse +import datetime +import logging +from pathlib import Path + +import torch +import torch.utils.checkpoint + +from accelerate import Accelerator +from accelerate.logging import get_logger +from accelerate.utils import LoggerType, set_seed +from slugify import slugify + +from data.csv import VlpnDataModule, VlpnDataItem +from util import load_config, load_embeddings_from_dir + +from trainer.ti import TextualInversionTrainingStrategy +from trainer.base import Trainer +from training.optimization import get_scheduler +from training.util import save_args, generate_class_images, add_placeholder_tokens, get_models + +logger = get_logger(__name__) + + +torch.backends.cuda.matmul.allow_tf32 = True +torch.backends.cudnn.benchmark = True + + +def parse_args(): + parser = argparse.ArgumentParser( + description="Simple example of a training script." + ) + parser.add_argument( + "--pretrained_model_name_or_path", + type=str, + default=None, + help="Path to pretrained model or model identifier from huggingface.co/models.", + ) + parser.add_argument( + "--tokenizer_name", + type=str, + default=None, + help="Pretrained tokenizer name or path if not the same as model_name", + ) + parser.add_argument( + "--train_data_file", + type=str, + default=None, + help="A CSV file containing the training data." + ) + parser.add_argument( + "--train_data_template", + type=str, + default="template", + ) + parser.add_argument( + "--project", + type=str, + default=None, + help="The name of the current project.", + ) + parser.add_argument( + "--placeholder_tokens", + type=str, + nargs='*', + help="A token to use as a placeholder for the concept.", + ) + parser.add_argument( + "--initializer_tokens", + type=str, + nargs='*', + help="A token to use as initializer word." + ) + parser.add_argument( + "--num_vectors", + type=int, + nargs='*', + help="Number of vectors per embedding." + ) + parser.add_argument( + "--num_class_images", + type=int, + default=1, + help="How many class images to generate." + ) + parser.add_argument( + "--class_image_dir", + type=str, + default="cls", + help="The directory where class images will be saved.", + ) + parser.add_argument( + "--exclude_collections", + type=str, + nargs='*', + help="Exclude all items with a listed collection.", + ) + parser.add_argument( + "--output_dir", + type=str, + default="output/text-inversion", + help="The output directory where the model predictions and checkpoints will be written.", + ) + parser.add_argument( + "--embeddings_dir", + type=str, + default=None, + help="The embeddings directory where Textual Inversion embeddings are stored.", + ) + parser.add_argument( + "--collection", + type=str, + nargs='*', + help="A collection to filter the dataset.", + ) + parser.add_argument( + "--seed", + type=int, + default=None, + help="A seed for reproducible training." + ) + parser.add_argument( + "--resolution", + type=int, + default=768, + help=( + "The resolution for input images, all the images in the train/validation dataset will be resized to this" + " resolution" + ), + ) + parser.add_argument( + "--num_buckets", + type=int, + default=0, + help="Number of aspect ratio buckets in either direction.", + ) + parser.add_argument( + "--progressive_buckets", + action="store_true", + help="Include images in smaller buckets as well.", + ) + parser.add_argument( + "--bucket_step_size", + type=int, + default=64, + help="Step size between buckets.", + ) + parser.add_argument( + "--bucket_max_pixels", + type=int, + default=None, + help="Maximum pixels per bucket.", + ) + parser.add_argument( + "--tag_dropout", + type=float, + default=0, + help="Tag dropout probability.", + ) + parser.add_argument( + "--no_tag_shuffle", + action="store_true", + help="Shuffle tags.", + ) + parser.add_argument( + "--vector_dropout", + type=int, + default=0, + help="Vector dropout probability.", + ) + parser.add_argument( + "--vector_shuffle", + type=str, + default="auto", + help='Vector shuffling algorithm. Choose between ["all", "trailing", "leading", "between", "auto", "off"]', + ) + parser.add_argument( + "--num_train_epochs", + type=int, + default=100 + ) + parser.add_argument( + "--gradient_accumulation_steps", + type=int, + default=1, + help="Number of updates steps to accumulate before performing a backward/update pass.", + ) + parser.add_argument( + "--gradient_checkpointing", + action="store_true", + help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.", + ) + parser.add_argument( + "--find_lr", + action="store_true", + help="Automatically find a learning rate (no training).", + ) + parser.add_argument( + "--learning_rate", + type=float, + default=1e-4, + help="Initial learning rate (after the potential warmup period) to use.", + ) + parser.add_argument( + "--scale_lr", + action="store_true", + help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.", + ) + parser.add_argument( + "--lr_scheduler", + type=str, + default="one_cycle", + help=( + 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",' + ' "constant", "constant_with_warmup", "one_cycle"]' + ), + ) + parser.add_argument( + "--lr_warmup_epochs", + type=int, + default=10, + help="Number of steps for the warmup in the lr scheduler." + ) + parser.add_argument( + "--lr_cycles", + type=int, + default=None, + help="Number of restart cycles in the lr scheduler." + ) + parser.add_argument( + "--lr_warmup_func", + type=str, + default="cos", + help='Choose between ["linear", "cos"]' + ) + parser.add_argument( + "--lr_warmup_exp", + type=int, + default=1, + help='If lr_warmup_func is "cos", exponent to modify the function' + ) + parser.add_argument( + "--lr_annealing_func", + type=str, + default="cos", + help='Choose between ["linear", "half_cos", "cos"]' + ) + parser.add_argument( + "--lr_annealing_exp", + type=int, + default=1, + help='If lr_annealing_func is "half_cos" or "cos", exponent to modify the function' + ) + parser.add_argument( + "--lr_min_lr", + type=float, + default=0.04, + help="Minimum learning rate in the lr scheduler." + ) + parser.add_argument( + "--use_ema", + action="store_true", + help="Whether to use EMA model." + ) + parser.add_argument( + "--ema_inv_gamma", + type=float, + default=1.0 + ) + parser.add_argument( + "--ema_power", + type=float, + default=1 + ) + parser.add_argument( + "--ema_max_decay", + type=float, + default=0.9999 + ) + parser.add_argument( + "--use_8bit_adam", + action="store_true", + help="Whether or not to use 8-bit Adam from bitsandbytes." + ) + parser.add_argument( + "--adam_beta1", + type=float, + default=0.9, + help="The beta1 parameter for the Adam optimizer." + ) + parser.add_argument( + "--adam_beta2", + type=float, + default=0.999, + help="The beta2 parameter for the Adam optimizer." + ) + parser.add_argument( + "--adam_weight_decay", + type=float, + default=0, + help="Weight decay to use." + ) + parser.add_argument( + "--adam_epsilon", + type=float, + default=1e-08, + help="Epsilon value for the Adam optimizer" + ) + parser.add_argument( + "--adam_amsgrad", + type=bool, + default=False, + help="Amsgrad value for the Adam optimizer" + ) + parser.add_argument( + "--mixed_precision", + type=str, + default="no", + choices=["no", "fp16", "bf16"], + help=( + "Whether to use mixed precision. Choose" + "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." + "and an Nvidia Ampere GPU." + ), + ) + parser.add_argument( + "--checkpoint_frequency", + type=int, + default=5, + help="How often to save a checkpoint and sample image (in epochs)", + ) + parser.add_argument( + "--sample_frequency", + type=int, + default=1, + help="How often to save a checkpoint and sample image (in epochs)", + ) + parser.add_argument( + "--sample_image_size", + type=int, + default=768, + help="Size of sample images", + ) + parser.add_argument( + "--sample_batches", + type=int, + default=1, + help="Number of sample batches to generate per checkpoint", + ) + parser.add_argument( + "--sample_batch_size", + type=int, + default=1, + help="Number of samples to generate per batch", + ) + parser.add_argument( + "--valid_set_size", + type=int, + default=None, + help="Number of images in the validation dataset." + ) + parser.add_argument( + "--valid_set_repeat", + type=int, + default=1, + help="Times the images in the validation dataset are repeated." + ) + parser.add_argument( + "--train_batch_size", + type=int, + default=1, + help="Batch size (per device) for the training dataloader." + ) + parser.add_argument( + "--sample_steps", + type=int, + default=20, + help="Number of steps for sample generation. Higher values will result in more detailed samples, but longer runtimes.", + ) + parser.add_argument( + "--prior_loss_weight", + type=float, + default=1.0, + help="The weight of prior preservation loss." + ) + parser.add_argument( + "--emb_decay_target", + default=0.4, + type=float, + help="Embedding decay target." + ) + parser.add_argument( + "--emb_decay_factor", + default=0, + type=float, + help="Embedding decay factor." + ) + parser.add_argument( + "--emb_decay_start", + default=1e-4, + type=float, + help="Embedding decay start offset." + ) + parser.add_argument( + "--noise_timesteps", + type=int, + default=1000, + ) + parser.add_argument( + "--resume_from", + type=str, + default=None, + help="Path to a directory to resume training from (ie, logs/token_name/2022-09-22T23-36-27)" + ) + parser.add_argument( + "--global_step", + type=int, + default=0, + ) + parser.add_argument( + "--config", + type=str, + default=None, + help="Path to a JSON configuration file containing arguments for invoking this script." + ) + + args = parser.parse_args() + if args.config is not None: + args = load_config(args.config) + args = parser.parse_args(namespace=argparse.Namespace(**args)) + + if args.train_data_file is None: + raise ValueError("You must specify --train_data_file") + + if args.pretrained_model_name_or_path is None: + raise ValueError("You must specify --pretrained_model_name_or_path") + + if args.project is None: + raise ValueError("You must specify --project") + + if isinstance(args.placeholder_tokens, str): + args.placeholder_tokens = [args.placeholder_tokens] + + if len(args.placeholder_tokens) == 0: + args.placeholder_tokens = [f"<*{i}>" for i in range(args.initializer_tokens)] + + if isinstance(args.initializer_tokens, str): + args.initializer_tokens = [args.initializer_tokens] * len(args.placeholder_tokens) + + if len(args.initializer_tokens) == 0: + raise ValueError("You must specify --initializer_tokens") + + if len(args.placeholder_tokens) != len(args.initializer_tokens): + raise ValueError("--placeholder_tokens and --initializer_tokens must have the same number of items") + + if args.num_vectors is None: + args.num_vectors = 1 + + if isinstance(args.num_vectors, int): + args.num_vectors = [args.num_vectors] * len(args.initializer_tokens) + + if len(args.placeholder_tokens) != len(args.num_vectors): + raise ValueError("--placeholder_tokens and --num_vectors must have the same number of items") + + if isinstance(args.collection, str): + args.collection = [args.collection] + + if isinstance(args.exclude_collections, str): + args.exclude_collections = [args.exclude_collections] + + if args.output_dir is None: + raise ValueError("You must specify --output_dir") + + return args + + +def main(): + args = parse_args() + + global_step_offset = args.global_step + now = datetime.datetime.now().strftime("%Y-%m-%dT%H-%M-%S") + output_dir = Path(args.output_dir).joinpath(slugify(args.project), now) + output_dir.mkdir(parents=True, exist_ok=True) + + accelerator = Accelerator( + log_with=LoggerType.TENSORBOARD, + logging_dir=f"{output_dir}", + gradient_accumulation_steps=args.gradient_accumulation_steps, + mixed_precision=args.mixed_precision + ) + + logging.basicConfig(filename=output_dir.joinpath("log.txt"), level=logging.DEBUG) + + if args.seed is None: + args.seed = torch.random.seed() >> 32 + + set_seed(args.seed) + + save_args(output_dir, args) + + tokenizer, text_encoder, vae, unet, noise_scheduler, sample_scheduler, embeddings = get_models( + args.pretrained_model_name_or_path) + + tokenizer.set_use_vector_shuffle(args.vector_shuffle) + tokenizer.set_dropout(args.vector_dropout) + + vae.enable_slicing() + vae.set_use_memory_efficient_attention_xformers(True) + unet.set_use_memory_efficient_attention_xformers(True) + + if args.gradient_checkpointing: + unet.enable_gradient_checkpointing() + text_encoder.gradient_checkpointing_enable() + + if args.embeddings_dir is not None: + embeddings_dir = Path(args.embeddings_dir) + if not embeddings_dir.exists() or not embeddings_dir.is_dir(): + raise ValueError("--embeddings_dir must point to an existing directory") + + added_tokens, added_ids = load_embeddings_from_dir(tokenizer, embeddings, embeddings_dir) + print(f"Added {len(added_tokens)} tokens from embeddings dir: {list(zip(added_tokens, added_ids))}") + + placeholder_token_ids, initializer_token_ids = add_placeholder_tokens( + tokenizer=tokenizer, + embeddings=embeddings, + placeholder_tokens=args.placeholder_tokens, + initializer_tokens=args.initializer_tokens, + num_vectors=args.num_vectors + ) + + if len(placeholder_token_ids) != 0: + initializer_token_id_lens = [len(id) for id in initializer_token_ids] + placeholder_token_stats = list(zip(args.placeholder_tokens, placeholder_token_ids, initializer_token_id_lens)) + print(f"Added {len(placeholder_token_ids)} new tokens: {placeholder_token_stats}") + + if args.scale_lr: + args.learning_rate = ( + args.learning_rate * args.gradient_accumulation_steps * + args.train_batch_size * accelerator.num_processes + ) + + if args.find_lr: + args.learning_rate = 1e-5 + + if args.use_8bit_adam: + try: + import bitsandbytes as bnb + except ImportError: + raise ImportError("To use 8-bit Adam, please install the bitsandbytes library: `pip install bitsandbytes`.") + + optimizer_class = bnb.optim.AdamW8bit + else: + optimizer_class = torch.optim.AdamW + + optimizer = optimizer_class( + text_encoder.text_model.embeddings.temp_token_embedding.parameters(), + lr=args.learning_rate, + betas=(args.adam_beta1, args.adam_beta2), + weight_decay=args.adam_weight_decay, + eps=args.adam_epsilon, + amsgrad=args.adam_amsgrad, + ) + + weight_dtype = torch.float32 + if args.mixed_precision == "fp16": + weight_dtype = torch.float16 + elif args.mixed_precision == "bf16": + weight_dtype = torch.bfloat16 + + def keyword_filter(item: VlpnDataItem): + cond1 = any( + keyword in part + for keyword in args.placeholder_tokens + for part in item.prompt + ) + cond3 = args.collection is None or args.collection in item.collection + cond4 = args.exclude_collections is None or not any( + collection in item.collection + for collection in args.exclude_collections + ) + return cond1 and cond3 and cond4 + + datamodule = VlpnDataModule( + data_file=args.train_data_file, + batch_size=args.train_batch_size, + tokenizer=tokenizer, + class_subdir=args.class_image_dir, + num_class_images=args.num_class_images, + size=args.resolution, + num_buckets=args.num_buckets, + progressive_buckets=args.progressive_buckets, + bucket_step_size=args.bucket_step_size, + bucket_max_pixels=args.bucket_max_pixels, + dropout=args.tag_dropout, + shuffle=not args.no_tag_shuffle, + template_key=args.train_data_template, + valid_set_size=args.valid_set_size, + valid_set_repeat=args.valid_set_repeat, + seed=args.seed, + filter=keyword_filter, + dtype=weight_dtype + ) + datamodule.setup() + + train_dataloader = datamodule.train_dataloader + val_dataloader = datamodule.val_dataloader + + if args.num_class_images != 0: + generate_class_images( + accelerator, + text_encoder, + vae, + unet, + tokenizer, + sample_scheduler, + datamodule.data_train, + args.sample_batch_size, + args.sample_image_size, + args.sample_steps + ) + + lr_scheduler = get_scheduler( + args.lr_scheduler, + optimizer=optimizer, + num_training_steps_per_epoch=len(train_dataloader), + gradient_accumulation_steps=args.gradient_accumulation_steps, + min_lr=args.lr_min_lr, + warmup_func=args.lr_warmup_func, + annealing_func=args.lr_annealing_func, + warmup_exp=args.lr_warmup_exp, + annealing_exp=args.lr_annealing_exp, + cycles=args.lr_cycles, + train_epochs=args.num_train_epochs, + warmup_epochs=args.lr_warmup_epochs, + ) + + trainer = Trainer( + accelerator=accelerator, + unet=unet, + text_encoder=text_encoder, + tokenizer=tokenizer, + vae=vae, + noise_scheduler=noise_scheduler, + sample_scheduler=sample_scheduler, + train_dataloader=train_dataloader, + val_dataloader=val_dataloader, + dtype=weight_dtype, + ) + + trainer( + strategy_class=TextualInversionTrainingStrategy, + optimizer=optimizer, + lr_scheduler=lr_scheduler, + num_train_epochs=args.num_train_epochs, + sample_frequency=args.sample_frequency, + checkpoint_frequency=args.checkpoint_frequency, + global_step_offset=global_step_offset, + prior_loss_weight=args.prior_loss_weight, + output_dir=output_dir, + placeholder_tokens=args.placeholder_tokens, + placeholder_token_ids=placeholder_token_ids, + learning_rate=args.learning_rate, + sample_steps=args.sample_steps, + sample_image_size=args.sample_image_size, + sample_batch_size=args.sample_batch_size, + sample_batches=args.sample_batches, + seed=args.seed, + ) + + +if __name__ == "__main__": + main() diff --git a/train_dreambooth.py b/train_dreambooth.py index 53776ba..71bad7e 100644 --- a/train_dreambooth.py +++ b/train_dreambooth.py @@ -20,10 +20,9 @@ from slugify import slugify from util import load_config, load_embeddings_from_dir from pipelines.stable_diffusion.vlpn_stable_diffusion import VlpnStableDiffusion from data.csv import VlpnDataModule, VlpnDataItem -from training.common import loss_step, train_loop, generate_class_images, add_placeholder_tokens, get_models from training.optimization import get_scheduler from training.lr import LRFinder -from training.util import CheckpointerBase, EMAModel, save_args +from training.util import CheckpointerBase, EMAModel, save_args, generate_class_images, add_placeholder_tokens, get_models from models.clip.tokenizer import MultiCLIPTokenizer logger = get_logger(__name__) diff --git a/train_ti.py b/train_ti.py index 8631892..deed84c 100644 --- a/train_ti.py +++ b/train_ti.py @@ -19,10 +19,11 @@ from slugify import slugify from util import load_config, load_embeddings_from_dir from pipelines.stable_diffusion.vlpn_stable_diffusion import VlpnStableDiffusion from data.csv import VlpnDataModule, VlpnDataItem -from training.common import loss_step, train_loop, generate_class_images, add_placeholder_tokens, get_models +from trainer.base import Checkpointer +from training.functional import loss_step, train_loop, generate_class_images, add_placeholder_tokens, get_models from training.optimization import get_scheduler from training.lr import LRFinder -from training.util import CheckpointerBase, EMAModel, save_args +from training.util import EMAModel, save_args from models.clip.tokenizer import MultiCLIPTokenizer logger = get_logger(__name__) @@ -480,38 +481,20 @@ def parse_args(): return args -class Checkpointer(CheckpointerBase): +class TextualInversionCheckpointer(Checkpointer): def __init__( self, - weight_dtype: torch.dtype, - accelerator: Accelerator, - vae: AutoencoderKL, - unet: UNet2DConditionModel, - tokenizer: MultiCLIPTokenizer, - text_encoder: CLIPTextModel, ema_embeddings: EMAModel, - scheduler, - placeholder_tokens, - placeholder_token_ids, *args, - **kwargs + **kwargs, ): super().__init__(*args, **kwargs) - self.weight_dtype = weight_dtype - self.accelerator = accelerator - self.vae = vae - self.unet = unet - self.tokenizer = tokenizer - self.text_encoder = text_encoder self.ema_embeddings = ema_embeddings - self.scheduler = scheduler - self.placeholder_tokens = placeholder_tokens - self.placeholder_token_ids = placeholder_token_ids @torch.no_grad() def checkpoint(self, step, postfix): - print("Saving checkpoint for step %d..." % step) + print(f"Saving checkpoint for step {step}...") checkpoints_path = self.output_dir.joinpath("checkpoints") checkpoints_path.mkdir(parents=True, exist_ok=True) @@ -519,7 +502,8 @@ class Checkpointer(CheckpointerBase): text_encoder = self.accelerator.unwrap_model(self.text_encoder) ema_context = self.ema_embeddings.apply_temporary( - text_encoder.text_model.embeddings.temp_token_embedding.parameters()) if self.ema_embeddings is not None else nullcontext() + text_encoder.text_model.embeddings.temp_token_embedding.parameters() + ) if self.ema_embeddings is not None else nullcontext() with ema_context: for (token, ids) in zip(self.placeholder_tokens, self.placeholder_token_ids): @@ -528,42 +512,14 @@ class Checkpointer(CheckpointerBase): checkpoints_path.joinpath(f"{slugify(token)}_{step}_{postfix}.bin") ) - del text_encoder - - @torch.no_grad() + @torch.inference_mode() def save_samples(self, step): - unet = self.accelerator.unwrap_model(self.unet) - text_encoder = self.accelerator.unwrap_model(self.text_encoder) - ema_context = self.ema_embeddings.apply_temporary( - text_encoder.text_model.embeddings.temp_token_embedding.parameters()) if self.ema_embeddings is not None else nullcontext() + self.text_encoder.text_model.embeddings.temp_token_embedding.parameters() + ) if self.ema_embeddings is not None else nullcontext() with ema_context: - orig_unet_dtype = unet.dtype - orig_text_encoder_dtype = text_encoder.dtype - - unet.to(dtype=self.weight_dtype) - text_encoder.to(dtype=self.weight_dtype) - - pipeline = VlpnStableDiffusion( - text_encoder=text_encoder, - vae=self.vae, - unet=self.unet, - tokenizer=self.tokenizer, - scheduler=self.scheduler, - ).to(self.accelerator.device) - pipeline.set_progress_bar_config(dynamic_ncols=True) - - super().save_samples(pipeline, step) - - unet.to(dtype=orig_unet_dtype) - text_encoder.to(dtype=orig_text_encoder_dtype) - - del text_encoder - del pipeline - - if torch.cuda.is_available(): - torch.cuda.empty_cache() + super().save_samples(step) def main(): @@ -806,8 +762,8 @@ def main(): args.seed, ) - checkpointer = Checkpointer( - weight_dtype=weight_dtype, + checkpointer = TextualInversionCheckpointer( + dtype=weight_dtype, train_dataloader=train_dataloader, val_dataloader=val_dataloader, accelerator=accelerator, @@ -816,7 +772,7 @@ def main(): tokenizer=tokenizer, text_encoder=text_encoder, ema_embeddings=ema_embeddings, - scheduler=sample_scheduler, + sample_scheduler=sample_scheduler, placeholder_tokens=args.placeholder_tokens, placeholder_token_ids=placeholder_token_ids, output_dir=output_dir, diff --git a/trainer/base.py b/trainer/base.py new file mode 100644 index 0000000..e700dd6 --- /dev/null +++ b/trainer/base.py @@ -0,0 +1,544 @@ +from pathlib import Path +import math +from contextlib import contextmanager +from typing import Type, Optional +import itertools +from functools import partial + +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.utils.data import DataLoader + +from accelerate import Accelerator +from transformers import CLIPTextModel +from diffusers import AutoencoderKL, DDPMScheduler, UNet2DConditionModel, DPMSolverMultistepScheduler + +from tqdm.auto import tqdm +from PIL import Image + +from pipelines.stable_diffusion.vlpn_stable_diffusion import VlpnStableDiffusion +from models.clip.tokenizer import MultiCLIPTokenizer +from models.clip.util import get_extended_embeddings +from training.util import AverageMeter + + +def make_grid(images, rows, cols): + w, h = images[0].size + grid = Image.new('RGB', size=(cols*w, rows*h)) + for i, image in enumerate(images): + grid.paste(image, box=(i % cols*w, i//cols*h)) + return grid + + +class Checkpointer(): + def __init__( + self, + accelerator: Accelerator, + vae: AutoencoderKL, + unet: UNet2DConditionModel, + text_encoder: CLIPTextModel, + tokenizer: MultiCLIPTokenizer, + sample_scheduler, + dtype, + train_dataloader: DataLoader, + val_dataloader: DataLoader, + output_dir: Path, + sample_steps: int = 20, + sample_guidance_scale: float = 7.5, + sample_image_size: int = 768, + sample_batches: int = 1, + sample_batch_size: int = 1, + seed: Optional[int] = None, + *args, + **kwargs, + ): + self.accelerator = accelerator + self.vae = vae + self.unet = unet + self.text_encoder = text_encoder + self.tokenizer = tokenizer + self.sample_scheduler = sample_scheduler + self.dtype = dtype + self.train_dataloader = train_dataloader + self.val_dataloader = val_dataloader + self.output_dir = output_dir + self.sample_steps = sample_steps + self.sample_guidance_scale = sample_guidance_scale + self.sample_image_size = sample_image_size + self.sample_batches = sample_batches + self.sample_batch_size = sample_batch_size + self.seed = seed if seed is not None else torch.random.seed() + + @torch.no_grad() + def checkpoint(self, step: int, postfix: str): + pass + + @torch.inference_mode() + def save_samples(self, step: int): + print(f"Saving samples for step {step}...") + + samples_path = self.output_dir.joinpath("samples") + + grid_cols = min(self.sample_batch_size, 4) + grid_rows = (self.sample_batches * self.sample_batch_size) // grid_cols + + unet = self.accelerator.unwrap_model(self.unet) + text_encoder = self.accelerator.unwrap_model(self.text_encoder) + + orig_unet_dtype = unet.dtype + orig_text_encoder_dtype = text_encoder.dtype + + unet.to(dtype=self.dtype) + text_encoder.to(dtype=self.dtype) + + pipeline = VlpnStableDiffusion( + text_encoder=text_encoder, + vae=self.vae, + unet=self.unet, + tokenizer=self.tokenizer, + scheduler=self.sample_scheduler, + ).to(self.accelerator.device) + pipeline.set_progress_bar_config(dynamic_ncols=True) + + generator = torch.Generator(device=self.accelerator.device).manual_seed(self.seed) + + for pool, data, gen in [ + ("stable", self.val_dataloader, generator), + ("val", self.val_dataloader, None), + ("train", self.train_dataloader, None) + ]: + all_samples = [] + file_path = samples_path.joinpath(pool, f"step_{step}.jpg") + file_path.parent.mkdir(parents=True, exist_ok=True) + + batches = list(itertools.islice(itertools.cycle(data), self.sample_batch_size * self.sample_batches)) + prompt_ids = [ + prompt + for batch in batches + for prompt in batch["prompt_ids"] + ] + nprompt_ids = [ + prompt + for batch in batches + for prompt in batch["nprompt_ids"] + ] + + for i in range(self.sample_batches): + start = i * self.sample_batch_size + end = (i + 1) * self.sample_batch_size + prompt = prompt_ids[start:end] + nprompt = nprompt_ids[start:end] + + samples = pipeline( + prompt=prompt, + negative_prompt=nprompt, + height=self.sample_image_size, + width=self.sample_image_size, + generator=gen, + guidance_scale=self.sample_guidance_scale, + num_inference_steps=self.sample_steps, + output_type='pil' + ).images + + all_samples += samples + + image_grid = make_grid(all_samples, grid_rows, grid_cols) + image_grid.save(file_path, quality=85) + + unet.to(dtype=orig_unet_dtype) + text_encoder.to(dtype=orig_text_encoder_dtype) + + del unet + del text_encoder + del generator + del pipeline + + if torch.cuda.is_available(): + torch.cuda.empty_cache() + + +class TrainingStrategy(): + def __init__( + self, + tokenizer: MultiCLIPTokenizer, + *args, + **kwargs, + ): + self.tokenizer = tokenizer + self.checkpointer = Checkpointer(tokenizer=tokenizer, *args, **kwargs) + + @property + def main_model(self) -> nn.Module: + ... + + @contextmanager + def on_train(self, epoch: int): + try: + self.tokenizer.train() + yield + finally: + pass + + @contextmanager + def on_eval(self): + try: + self.tokenizer.eval() + yield + finally: + pass + + def on_before_optimize(self, epoch: int): + ... + + def on_after_optimize(self, lr: float): + ... + + def on_log(): + return {} + + +def loss_step( + vae: AutoencoderKL, + unet: UNet2DConditionModel, + text_encoder: CLIPTextModel, + seed: int, + noise_scheduler, + prior_loss_weight: float, + step: int, + batch: dict, + eval: bool = False +): + # Convert images to latent space + latents = vae.encode(batch["pixel_values"]).latent_dist.sample().detach() + latents = latents * 0.18215 + + generator = torch.Generator(device=latents.device).manual_seed(seed + step) if eval else None + + # Sample noise that we'll add to the latents + noise = torch.randn( + latents.shape, + dtype=latents.dtype, + layout=latents.layout, + device=latents.device, + generator=generator + ) + bsz = latents.shape[0] + # Sample a random timestep for each image + timesteps = torch.randint( + 0, + noise_scheduler.config.num_train_timesteps, + (bsz,), + generator=generator, + device=latents.device, + ) + timesteps = timesteps.long() + + # Add noise to the latents according to the noise magnitude at each timestep + # (this is the forward diffusion process) + noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps) + noisy_latents = noisy_latents.to(dtype=unet.dtype) + + # Get the text embedding for conditioning + encoder_hidden_states = get_extended_embeddings( + text_encoder, + batch["input_ids"], + batch["attention_mask"] + ) + encoder_hidden_states = encoder_hidden_states.to(dtype=unet.dtype) + + # Predict the noise residual + model_pred = unet(noisy_latents, timesteps, encoder_hidden_states).sample + + # Get the target for loss depending on the prediction type + if noise_scheduler.config.prediction_type == "epsilon": + target = noise + elif noise_scheduler.config.prediction_type == "v_prediction": + target = noise_scheduler.get_velocity(latents, noise, timesteps) + else: + raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}") + + if batch["with_prior"].all(): + # Chunk the noise and model_pred into two parts and compute the loss on each part separately. + model_pred, model_pred_prior = torch.chunk(model_pred, 2, dim=0) + target, target_prior = torch.chunk(target, 2, dim=0) + + # Compute instance loss + loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean") + + # Compute prior loss + prior_loss = F.mse_loss(model_pred_prior.float(), target_prior.float(), reduction="mean") + + # Add the prior loss to the instance loss. + loss = loss + prior_loss_weight * prior_loss + else: + loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean") + + acc = (model_pred == target).float().mean() + + return loss, acc, bsz + + +def train_loop( + strategy: TrainingStrategy, + accelerator: Accelerator, + vae: AutoencoderKL, + unet: UNet2DConditionModel, + text_encoder: CLIPTextModel, + train_dataloader: DataLoader, + val_dataloader: DataLoader, + seed: int, + optimizer: torch.optim.Optimizer, + lr_scheduler: torch.optim.lr_scheduler._LRScheduler, + noise_scheduler, + prior_loss_weight: float = 1.0, + sample_frequency: int = 10, + checkpoint_frequency: int = 50, + global_step_offset: int = 0, + num_epochs: int = 100, +): + num_training_steps_per_epoch = math.ceil( + len(train_dataloader) / accelerator.gradient_accumulation_steps + ) + num_val_steps_per_epoch = len(val_dataloader) + + num_training_steps = num_training_steps_per_epoch * num_epochs + num_val_steps = num_val_steps_per_epoch * num_epochs + + global_step = 0 + + avg_loss = AverageMeter() + avg_acc = AverageMeter() + + avg_loss_val = AverageMeter() + avg_acc_val = AverageMeter() + + max_acc_val = 0.0 + + local_progress_bar = tqdm( + range(num_training_steps_per_epoch + num_val_steps_per_epoch), + disable=not accelerator.is_local_main_process, + dynamic_ncols=True + ) + local_progress_bar.set_description(f"Epoch 1 / {num_epochs}") + + global_progress_bar = tqdm( + range(num_training_steps + num_val_steps), + disable=not accelerator.is_local_main_process, + dynamic_ncols=True + ) + global_progress_bar.set_description("Total progress") + + loss_step_ = partial( + loss_step, + vae, + unet, + text_encoder, + seed, + noise_scheduler, + prior_loss_weight + ) + + try: + for epoch in range(num_epochs): + if accelerator.is_main_process: + if epoch % sample_frequency == 0 and epoch != 0: + strategy.checkpointer.save_samples(global_step + global_step_offset) + + if epoch % checkpoint_frequency == 0 and epoch != 0: + strategy.checkpointer.checkpoint(global_step + global_step_offset, "training") + + local_progress_bar.set_description(f"Epoch {epoch + 1} / {num_epochs}") + local_progress_bar.reset() + + strategy.main_model.train() + + with strategy.on_train(epoch): + for step, batch in enumerate(train_dataloader): + with accelerator.accumulate(strategy.main_model): + loss, acc, bsz = loss_step_(step, batch) + + accelerator.backward(loss) + + strategy.on_before_optimize(epoch) + + optimizer.step() + lr_scheduler.step() + optimizer.zero_grad(set_to_none=True) + + avg_loss.update(loss.detach_(), bsz) + avg_acc.update(acc.detach_(), bsz) + + # Checks if the accelerator has performed an optimization step behind the scenes + if accelerator.sync_gradients: + strategy.on_after_optimize(lr_scheduler.get_last_lr()[0]) + + local_progress_bar.update(1) + global_progress_bar.update(1) + + global_step += 1 + + logs = { + "train/loss": avg_loss.avg.item(), + "train/acc": avg_acc.avg.item(), + "train/cur_loss": loss.item(), + "train/cur_acc": acc.item(), + "lr": lr_scheduler.get_last_lr()[0], + } + logs.update(strategy.on_log()) + + accelerator.log(logs, step=global_step) + + local_progress_bar.set_postfix(**logs) + + if global_step >= num_training_steps: + break + + accelerator.wait_for_everyone() + + strategy.main_model.eval() + + cur_loss_val = AverageMeter() + cur_acc_val = AverageMeter() + + with torch.inference_mode(), strategy.on_eval(): + for step, batch in enumerate(val_dataloader): + loss, acc, bsz = loss_step_(step, batch, True) + + loss = loss.detach_() + acc = acc.detach_() + + cur_loss_val.update(loss, bsz) + cur_acc_val.update(acc, bsz) + + avg_loss_val.update(loss, bsz) + avg_acc_val.update(acc, bsz) + + local_progress_bar.update(1) + global_progress_bar.update(1) + + logs = { + "val/loss": avg_loss_val.avg.item(), + "val/acc": avg_acc_val.avg.item(), + "val/cur_loss": loss.item(), + "val/cur_acc": acc.item(), + } + local_progress_bar.set_postfix(**logs) + + logs["val/cur_loss"] = cur_loss_val.avg.item() + logs["val/cur_acc"] = cur_acc_val.avg.item() + + accelerator.log(logs, step=global_step) + + local_progress_bar.clear() + global_progress_bar.clear() + + if accelerator.is_main_process: + if avg_acc_val.avg.item() > max_acc_val: + accelerator.print( + f"Global step {global_step}: Validation accuracy reached new maximum: {max_acc_val:.2e} -> {avg_acc_val.avg.item():.2e}") + strategy.checkpointer.checkpoint(global_step + global_step_offset, "milestone") + max_acc_val = avg_acc_val.avg.item() + + # Create the pipeline using using the trained modules and save it. + if accelerator.is_main_process: + print("Finished!") + strategy.checkpointer.checkpoint(global_step + global_step_offset, "end") + strategy.checkpointer.save_samples(global_step + global_step_offset) + accelerator.end_training() + + except KeyboardInterrupt: + if accelerator.is_main_process: + print("Interrupted") + strategy.checkpointer.checkpoint(global_step + global_step_offset, "end") + accelerator.end_training() + + +class Trainer(): + def __init__( + self, + accelerator: Accelerator, + unet: UNet2DConditionModel, + text_encoder: CLIPTextModel, + tokenizer: MultiCLIPTokenizer, + vae: AutoencoderKL, + noise_scheduler: DDPMScheduler, + sample_scheduler: DPMSolverMultistepScheduler, + train_dataloader: DataLoader, + val_dataloader: DataLoader, + dtype: torch.dtype, + ): + self.accelerator = accelerator + self.unet = unet + self.text_encoder = text_encoder + self.tokenizer = tokenizer + self.vae = vae + self.noise_scheduler = noise_scheduler + self.sample_scheduler = sample_scheduler + self.train_dataloader = train_dataloader + self.val_dataloader = val_dataloader + self.dtype = dtype + + def __call__( + self, + strategy_class: Type[TrainingStrategy], + optimizer, + lr_scheduler, + num_train_epochs: int = 100, + sample_frequency: int = 20, + checkpoint_frequency: int = 50, + global_step_offset: int = 0, + prior_loss_weight: float = 0, + seed: Optional[int] = None, + **kwargs, + ): + unet, text_encoder, optimizer, train_dataloader, val_dataloader, lr_scheduler = self.accelerator.prepare( + self.unet, self.text_encoder, optimizer, self.train_dataloader, self.val_dataloader, lr_scheduler + ) + + self.vae.to(self.accelerator.device, dtype=self.dtype) + + for model in (unet, text_encoder, self.vae): + model.requires_grad_(False) + model.eval() + + if seed is None: + seed = torch.random.seed() + + strategy = strategy_class( + accelerator=self.accelerator, + vae=self.vae, + unet=unet, + text_encoder=text_encoder, + tokenizer=self.tokenizer, + sample_scheduler=self.sample_scheduler, + train_dataloader=train_dataloader, + val_dataloader=val_dataloader, + dtype=self.dtype, + seed=seed, + **kwargs + ) + + if self.accelerator.is_main_process: + self.accelerator.init_trackers("textual_inversion") + + train_loop( + strategy=strategy, + accelerator=self.accelerator, + vae=self.vae, + unet=unet, + text_encoder=text_encoder, + train_dataloader=train_dataloader, + val_dataloader=val_dataloader, + seed=seed, + optimizer=optimizer, + lr_scheduler=lr_scheduler, + noise_scheduler=self.noise_scheduler, + prior_loss_weight=prior_loss_weight, + sample_frequency=sample_frequency, + checkpoint_frequency=checkpoint_frequency, + global_step_offset=global_step_offset, + num_epochs=num_train_epochs, + ) + + self.accelerator.free_memory() diff --git a/trainer/dreambooth.py b/trainer/dreambooth.py new file mode 100644 index 0000000..e69de29 diff --git a/trainer/ti.py b/trainer/ti.py new file mode 100644 index 0000000..15cf747 --- /dev/null +++ b/trainer/ti.py @@ -0,0 +1,164 @@ +from contextlib import contextmanager, nullcontext + +import torch + +from slugify import slugify + +from diffusers import UNet2DConditionModel +from transformers import CLIPTextModel + +from trainer.base import TrainingStrategy, Checkpointer +from training.util import EMAModel + + +class TextualInversionCheckpointer(Checkpointer): + def __init__( + self, + ema_embeddings: EMAModel, + *args, + **kwargs, + ): + super().__init__(*args, **kwargs) + + self.ema_embeddings = ema_embeddings + + @torch.no_grad() + def checkpoint(self, step, postfix): + print(f"Saving checkpoint for step {step}...") + + checkpoints_path = self.output_dir.joinpath("checkpoints") + checkpoints_path.mkdir(parents=True, exist_ok=True) + + text_encoder = self.accelerator.unwrap_model(self.text_encoder) + + ema_context = self.ema_embeddings.apply_temporary( + text_encoder.text_model.embeddings.temp_token_embedding.parameters() + ) if self.ema_embeddings is not None else nullcontext() + + with ema_context: + for (token, ids) in zip(self.placeholder_tokens, self.placeholder_token_ids): + text_encoder.text_model.embeddings.save_embed( + ids, + checkpoints_path.joinpath(f"{slugify(token)}_{step}_{postfix}.bin") + ) + + @torch.inference_mode() + def save_samples(self, step): + ema_context = self.ema_embeddings.apply_temporary( + self.text_encoder.text_model.embeddings.temp_token_embedding.parameters() + ) if self.ema_embeddings is not None else nullcontext() + + with ema_context: + super().save_samples(step) + + +class TextualInversionTrainingStrategy(TrainingStrategy): + def __init__( + self, + unet: UNet2DConditionModel, + text_encoder: CLIPTextModel, + placeholder_tokens: list[str], + placeholder_token_ids: list[list[int]], + learning_rate: float, + gradient_checkpointing: bool = False, + use_emb_decay: bool = False, + emb_decay_target: float = 0.4, + emb_decay_factor: float = 1, + emb_decay_start: float = 1e-4, + use_ema: bool = False, + ema_inv_gamma: float = 1.0, + ema_power: int = 1, + ema_max_decay: float = 0.9999, + *args, + **kwargs, + ): + super().__init__( + unet=unet, + text_encoder=text_encoder, + *args, + **kwargs + ) + + self.text_encoder = text_encoder + self.unet = unet + + self.placeholder_tokens = placeholder_tokens + self.placeholder_token_ids = placeholder_token_ids + + self.gradient_checkpointing = gradient_checkpointing + + self.learning_rate = learning_rate + self.use_emb_decay = use_emb_decay + self.emb_decay_target = emb_decay_target + self.emb_decay_factor = emb_decay_factor + self.emb_decay_start = emb_decay_start + + self.text_encoder.text_model.embeddings.temp_token_embedding.requires_grad_(True) + + self.ema_embeddings = None + + if use_ema: + self.ema_embeddings = EMAModel( + self.text_encoder.text_model.embeddings.temp_token_embedding.parameters(), + inv_gamma=ema_inv_gamma, + power=ema_power, + max_value=ema_max_decay, + ) + + self.checkpointer = TextualInversionCheckpointer( + unet=unet, + text_encoder=text_encoder, + ema_embeddings=self.ema_embeddings, + *args, + **kwargs + ) + + @property + def main_model(self): + return self.text_encoder + + @contextmanager + def on_train(self, epoch: int): + try: + if self.gradient_checkpointing: + self.unet.train() + + with super().on_eval(): + yield + finally: + pass + + @contextmanager + def on_eval(self): + try: + if self.gradient_checkpointing: + self.unet.eval() + + ema_context = self.ema_embeddings.apply_temporary( + self.text_encoder.text_model.embeddings.temp_token_embedding.parameters() + ) if self.ema_embeddings is not None else nullcontext() + + with ema_context, super().on_eval(): + yield + finally: + pass + + @torch.no_grad() + def on_after_optimize(self, lr: float): + if self.use_emb_decay: + self.text_encoder.text_model.embeddings.normalize( + self.emb_decay_target, + min(1.0, max(0.0, self.emb_decay_factor * ((lr - self.emb_decay_start) / (self.learning_rate - self.emb_decay_start)))) + ) + + if self.ema_embeddings is not None: + self.ema_embeddings.step(self.text_encoder.text_model.embeddings.temp_token_embedding.parameters()) + + def on_log(self): + log = super().on_log() + added = {} + + if self.ema_embeddings is not None: + added = {"ema_decay": self.ema_embeddings.decay} + + return log.update(added) diff --git a/training/common.py b/training/common.py deleted file mode 100644 index 5d1e3f9..0000000 --- a/training/common.py +++ /dev/null @@ -1,370 +0,0 @@ -import math -from contextlib import _GeneratorContextManager, nullcontext -from typing import Callable, Any, Tuple, Union - -import torch -import torch.nn.functional as F -from torch.utils.data import DataLoader - -from accelerate import Accelerator -from transformers import CLIPTextModel -from diffusers import AutoencoderKL, DDPMScheduler, UNet2DConditionModel, DPMSolverMultistepScheduler - -from tqdm.auto import tqdm - -from pipelines.stable_diffusion.vlpn_stable_diffusion import VlpnStableDiffusion -from models.clip.embeddings import ManagedCLIPTextEmbeddings, patch_managed_embeddings -from models.clip.util import get_extended_embeddings -from models.clip.tokenizer import MultiCLIPTokenizer -from training.util import AverageMeter, CheckpointerBase - - -def noop(*args, **kwards): - pass - - -def noop_ctx(*args, **kwards): - return nullcontext() - - -def noop_on_log(): - return {} - - -def generate_class_images( - accelerator, - text_encoder, - vae, - unet, - tokenizer, - scheduler, - data_train, - sample_batch_size, - sample_image_size, - sample_steps -): - missing_data = [item for item in data_train if not item.class_image_path.exists()] - - if len(missing_data) == 0: - return - - batched_data = [ - missing_data[i:i+sample_batch_size] - for i in range(0, len(missing_data), sample_batch_size) - ] - - pipeline = VlpnStableDiffusion( - text_encoder=text_encoder, - vae=vae, - unet=unet, - tokenizer=tokenizer, - scheduler=scheduler, - ).to(accelerator.device) - pipeline.set_progress_bar_config(dynamic_ncols=True) - - with torch.inference_mode(): - for batch in batched_data: - image_name = [item.class_image_path for item in batch] - prompt = [item.cprompt for item in batch] - nprompt = [item.nprompt for item in batch] - - images = pipeline( - prompt=prompt, - negative_prompt=nprompt, - height=sample_image_size, - width=sample_image_size, - num_inference_steps=sample_steps - ).images - - for i, image in enumerate(images): - image.save(image_name[i]) - - del pipeline - - if torch.cuda.is_available(): - torch.cuda.empty_cache() - - -def get_models(pretrained_model_name_or_path: str): - tokenizer = MultiCLIPTokenizer.from_pretrained(pretrained_model_name_or_path, subfolder='tokenizer') - text_encoder = CLIPTextModel.from_pretrained(pretrained_model_name_or_path, subfolder='text_encoder') - vae = AutoencoderKL.from_pretrained(pretrained_model_name_or_path, subfolder='vae') - unet = UNet2DConditionModel.from_pretrained(pretrained_model_name_or_path, subfolder='unet') - noise_scheduler = DDPMScheduler.from_pretrained(pretrained_model_name_or_path, subfolder='scheduler') - sample_scheduler = DPMSolverMultistepScheduler.from_pretrained( - pretrained_model_name_or_path, subfolder='scheduler') - - vae.enable_slicing() - vae.set_use_memory_efficient_attention_xformers(True) - unet.set_use_memory_efficient_attention_xformers(True) - - embeddings = patch_managed_embeddings(text_encoder) - - return tokenizer, text_encoder, vae, unet, noise_scheduler, sample_scheduler, embeddings - - -def add_placeholder_tokens( - tokenizer: MultiCLIPTokenizer, - embeddings: ManagedCLIPTextEmbeddings, - placeholder_tokens: list[str], - initializer_tokens: list[str], - num_vectors: Union[list[int], int] -): - initializer_token_ids = [ - tokenizer.encode(token, add_special_tokens=False) - for token in initializer_tokens - ] - placeholder_token_ids = tokenizer.add_multi_tokens(placeholder_tokens, num_vectors) - - embeddings.resize(len(tokenizer)) - - for (placeholder_token_id, initializer_token_id) in zip(placeholder_token_ids, initializer_token_ids): - embeddings.add_embed(placeholder_token_id, initializer_token_id) - - return placeholder_token_ids, initializer_token_ids - - -def loss_step( - vae: AutoencoderKL, - noise_scheduler: DDPMScheduler, - unet: UNet2DConditionModel, - text_encoder: CLIPTextModel, - prior_loss_weight: float, - seed: int, - step: int, - batch: dict[str, Any], - eval: bool = False -): - # Convert images to latent space - latents = vae.encode(batch["pixel_values"]).latent_dist.sample().detach() - latents = latents * 0.18215 - - generator = torch.Generator(device=latents.device).manual_seed(seed + step) if eval else None - - # Sample noise that we'll add to the latents - noise = torch.randn( - latents.shape, - dtype=latents.dtype, - layout=latents.layout, - device=latents.device, - generator=generator - ) - bsz = latents.shape[0] - # Sample a random timestep for each image - timesteps = torch.randint( - 0, - noise_scheduler.config.num_train_timesteps, - (bsz,), - generator=generator, - device=latents.device, - ) - timesteps = timesteps.long() - - # Add noise to the latents according to the noise magnitude at each timestep - # (this is the forward diffusion process) - noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps) - noisy_latents = noisy_latents.to(dtype=unet.dtype) - - # Get the text embedding for conditioning - encoder_hidden_states = get_extended_embeddings( - text_encoder, - batch["input_ids"], - batch["attention_mask"] - ) - encoder_hidden_states = encoder_hidden_states.to(dtype=unet.dtype) - - # Predict the noise residual - model_pred = unet(noisy_latents, timesteps, encoder_hidden_states).sample - - # Get the target for loss depending on the prediction type - if noise_scheduler.config.prediction_type == "epsilon": - target = noise - elif noise_scheduler.config.prediction_type == "v_prediction": - target = noise_scheduler.get_velocity(latents, noise, timesteps) - else: - raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}") - - if batch["with_prior"].all(): - # Chunk the noise and model_pred into two parts and compute the loss on each part separately. - model_pred, model_pred_prior = torch.chunk(model_pred, 2, dim=0) - target, target_prior = torch.chunk(target, 2, dim=0) - - # Compute instance loss - loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean") - - # Compute prior loss - prior_loss = F.mse_loss(model_pred_prior.float(), target_prior.float(), reduction="mean") - - # Add the prior loss to the instance loss. - loss = loss + prior_loss_weight * prior_loss - else: - loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean") - - acc = (model_pred == target).float().mean() - - return loss, acc, bsz - - -def train_loop( - accelerator: Accelerator, - optimizer: torch.optim.Optimizer, - lr_scheduler: torch.optim.lr_scheduler._LRScheduler, - model: torch.nn.Module, - checkpointer: CheckpointerBase, - train_dataloader: DataLoader, - val_dataloader: DataLoader, - loss_step: Union[Callable[[int, Any], Tuple[Any, Any, int]], Callable[[int, Any, bool], Tuple[Any, Any, int]]], - sample_frequency: int = 10, - checkpoint_frequency: int = 50, - global_step_offset: int = 0, - num_epochs: int = 100, - on_log: Callable[[], dict[str, Any]] = noop_on_log, - on_train: Callable[[int], _GeneratorContextManager] = noop_ctx, - on_before_optimize: Callable[[int], None] = noop, - on_after_optimize: Callable[[float], None] = noop, - on_eval: Callable[[], _GeneratorContextManager] = noop_ctx -): - num_training_steps_per_epoch = math.ceil(len(train_dataloader) / accelerator.gradient_accumulation_steps) - num_val_steps_per_epoch = len(val_dataloader) - - num_training_steps = num_training_steps_per_epoch * num_epochs - num_val_steps = num_val_steps_per_epoch * num_epochs - - global_step = 0 - - avg_loss = AverageMeter() - avg_acc = AverageMeter() - - avg_loss_val = AverageMeter() - avg_acc_val = AverageMeter() - - max_acc_val = 0.0 - - local_progress_bar = tqdm( - range(num_training_steps_per_epoch + num_val_steps_per_epoch), - disable=not accelerator.is_local_main_process, - dynamic_ncols=True - ) - local_progress_bar.set_description(f"Epoch 1 / {num_epochs}") - - global_progress_bar = tqdm( - range(num_training_steps + num_val_steps), - disable=not accelerator.is_local_main_process, - dynamic_ncols=True - ) - global_progress_bar.set_description("Total progress") - - try: - for epoch in range(num_epochs): - if accelerator.is_main_process: - if epoch % sample_frequency == 0: - checkpointer.save_samples(global_step + global_step_offset) - - if epoch % checkpoint_frequency == 0 and epoch != 0: - checkpointer.checkpoint(global_step + global_step_offset, "training") - - local_progress_bar.set_description(f"Epoch {epoch + 1} / {num_epochs}") - local_progress_bar.reset() - - model.train() - - with on_train(epoch): - for step, batch in enumerate(train_dataloader): - with accelerator.accumulate(model): - loss, acc, bsz = loss_step(step, batch) - - accelerator.backward(loss) - - on_before_optimize(epoch) - - optimizer.step() - lr_scheduler.step() - optimizer.zero_grad(set_to_none=True) - - avg_loss.update(loss.detach_(), bsz) - avg_acc.update(acc.detach_(), bsz) - - # Checks if the accelerator has performed an optimization step behind the scenes - if accelerator.sync_gradients: - on_after_optimize(lr_scheduler.get_last_lr()[0]) - - local_progress_bar.update(1) - global_progress_bar.update(1) - - global_step += 1 - - logs = { - "train/loss": avg_loss.avg.item(), - "train/acc": avg_acc.avg.item(), - "train/cur_loss": loss.item(), - "train/cur_acc": acc.item(), - "lr": lr_scheduler.get_last_lr()[0], - } - logs.update(on_log()) - - accelerator.log(logs, step=global_step) - - local_progress_bar.set_postfix(**logs) - - if global_step >= num_training_steps: - break - - accelerator.wait_for_everyone() - - model.eval() - - cur_loss_val = AverageMeter() - cur_acc_val = AverageMeter() - - with torch.inference_mode(), on_eval(): - for step, batch in enumerate(val_dataloader): - loss, acc, bsz = loss_step(step, batch, True) - - loss = loss.detach_() - acc = acc.detach_() - - cur_loss_val.update(loss, bsz) - cur_acc_val.update(acc, bsz) - - avg_loss_val.update(loss, bsz) - avg_acc_val.update(acc, bsz) - - local_progress_bar.update(1) - global_progress_bar.update(1) - - logs = { - "val/loss": avg_loss_val.avg.item(), - "val/acc": avg_acc_val.avg.item(), - "val/cur_loss": loss.item(), - "val/cur_acc": acc.item(), - } - local_progress_bar.set_postfix(**logs) - - logs["val/cur_loss"] = cur_loss_val.avg.item() - logs["val/cur_acc"] = cur_acc_val.avg.item() - - accelerator.log(logs, step=global_step) - - local_progress_bar.clear() - global_progress_bar.clear() - - if accelerator.is_main_process: - if avg_acc_val.avg.item() > max_acc_val: - accelerator.print( - f"Global step {global_step}: Validation accuracy reached new maximum: {max_acc_val:.2e} -> {avg_acc_val.avg.item():.2e}") - checkpointer.checkpoint(global_step + global_step_offset, "milestone") - max_acc_val = avg_acc_val.avg.item() - - # Create the pipeline using using the trained modules and save it. - if accelerator.is_main_process: - print("Finished!") - checkpointer.checkpoint(global_step + global_step_offset, "end") - checkpointer.save_samples(global_step + global_step_offset) - accelerator.end_training() - - except KeyboardInterrupt: - if accelerator.is_main_process: - print("Interrupted") - checkpointer.checkpoint(global_step + global_step_offset, "end") - accelerator.end_training() - quit() diff --git a/training/functional.py b/training/functional.py new file mode 100644 index 0000000..2d81eca --- /dev/null +++ b/training/functional.py @@ -0,0 +1,365 @@ +import math +from contextlib import _GeneratorContextManager, nullcontext +from typing import Callable, Any, Tuple, Union + +import torch +import torch.nn.functional as F +from torch.utils.data import DataLoader + +from accelerate import Accelerator +from transformers import CLIPTextModel +from diffusers import AutoencoderKL, DDPMScheduler, UNet2DConditionModel, DPMSolverMultistepScheduler + +from tqdm.auto import tqdm + +from pipelines.stable_diffusion.vlpn_stable_diffusion import VlpnStableDiffusion +from models.clip.embeddings import ManagedCLIPTextEmbeddings, patch_managed_embeddings +from models.clip.util import get_extended_embeddings +from models.clip.tokenizer import MultiCLIPTokenizer +from training.util import AverageMeter +from trainer.base import Checkpointer + + +def const(result=None): + def fn(*args, **kwargs): + return result + return fn + + +def generate_class_images( + accelerator, + text_encoder, + vae, + unet, + tokenizer, + scheduler, + data_train, + sample_batch_size, + sample_image_size, + sample_steps +): + missing_data = [item for item in data_train if not item.class_image_path.exists()] + + if len(missing_data) == 0: + return + + batched_data = [ + missing_data[i:i+sample_batch_size] + for i in range(0, len(missing_data), sample_batch_size) + ] + + pipeline = VlpnStableDiffusion( + text_encoder=text_encoder, + vae=vae, + unet=unet, + tokenizer=tokenizer, + scheduler=scheduler, + ).to(accelerator.device) + pipeline.set_progress_bar_config(dynamic_ncols=True) + + with torch.inference_mode(): + for batch in batched_data: + image_name = [item.class_image_path for item in batch] + prompt = [item.cprompt for item in batch] + nprompt = [item.nprompt for item in batch] + + images = pipeline( + prompt=prompt, + negative_prompt=nprompt, + height=sample_image_size, + width=sample_image_size, + num_inference_steps=sample_steps + ).images + + for i, image in enumerate(images): + image.save(image_name[i]) + + del pipeline + + if torch.cuda.is_available(): + torch.cuda.empty_cache() + + +def get_models(pretrained_model_name_or_path: str): + tokenizer = MultiCLIPTokenizer.from_pretrained(pretrained_model_name_or_path, subfolder='tokenizer') + text_encoder = CLIPTextModel.from_pretrained(pretrained_model_name_or_path, subfolder='text_encoder') + vae = AutoencoderKL.from_pretrained(pretrained_model_name_or_path, subfolder='vae') + unet = UNet2DConditionModel.from_pretrained(pretrained_model_name_or_path, subfolder='unet') + noise_scheduler = DDPMScheduler.from_pretrained(pretrained_model_name_or_path, subfolder='scheduler') + sample_scheduler = DPMSolverMultistepScheduler.from_pretrained( + pretrained_model_name_or_path, subfolder='scheduler') + + vae.enable_slicing() + vae.set_use_memory_efficient_attention_xformers(True) + unet.set_use_memory_efficient_attention_xformers(True) + + embeddings = patch_managed_embeddings(text_encoder) + + return tokenizer, text_encoder, vae, unet, noise_scheduler, sample_scheduler, embeddings + + +def add_placeholder_tokens( + tokenizer: MultiCLIPTokenizer, + embeddings: ManagedCLIPTextEmbeddings, + placeholder_tokens: list[str], + initializer_tokens: list[str], + num_vectors: Union[list[int], int] +): + initializer_token_ids = [ + tokenizer.encode(token, add_special_tokens=False) + for token in initializer_tokens + ] + placeholder_token_ids = tokenizer.add_multi_tokens(placeholder_tokens, num_vectors) + + embeddings.resize(len(tokenizer)) + + for (placeholder_token_id, initializer_token_id) in zip(placeholder_token_ids, initializer_token_ids): + embeddings.add_embed(placeholder_token_id, initializer_token_id) + + return placeholder_token_ids, initializer_token_ids + + +def loss_step( + vae: AutoencoderKL, + noise_scheduler: DDPMScheduler, + unet: UNet2DConditionModel, + text_encoder: CLIPTextModel, + prior_loss_weight: float, + seed: int, + step: int, + batch: dict[str, Any], + eval: bool = False +): + # Convert images to latent space + latents = vae.encode(batch["pixel_values"]).latent_dist.sample().detach() + latents = latents * 0.18215 + + generator = torch.Generator(device=latents.device).manual_seed(seed + step) if eval else None + + # Sample noise that we'll add to the latents + noise = torch.randn( + latents.shape, + dtype=latents.dtype, + layout=latents.layout, + device=latents.device, + generator=generator + ) + bsz = latents.shape[0] + # Sample a random timestep for each image + timesteps = torch.randint( + 0, + noise_scheduler.config.num_train_timesteps, + (bsz,), + generator=generator, + device=latents.device, + ) + timesteps = timesteps.long() + + # Add noise to the latents according to the noise magnitude at each timestep + # (this is the forward diffusion process) + noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps) + noisy_latents = noisy_latents.to(dtype=unet.dtype) + + # Get the text embedding for conditioning + encoder_hidden_states = get_extended_embeddings( + text_encoder, + batch["input_ids"], + batch["attention_mask"] + ) + encoder_hidden_states = encoder_hidden_states.to(dtype=unet.dtype) + + # Predict the noise residual + model_pred = unet(noisy_latents, timesteps, encoder_hidden_states).sample + + # Get the target for loss depending on the prediction type + if noise_scheduler.config.prediction_type == "epsilon": + target = noise + elif noise_scheduler.config.prediction_type == "v_prediction": + target = noise_scheduler.get_velocity(latents, noise, timesteps) + else: + raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}") + + if batch["with_prior"].all(): + # Chunk the noise and model_pred into two parts and compute the loss on each part separately. + model_pred, model_pred_prior = torch.chunk(model_pred, 2, dim=0) + target, target_prior = torch.chunk(target, 2, dim=0) + + # Compute instance loss + loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean") + + # Compute prior loss + prior_loss = F.mse_loss(model_pred_prior.float(), target_prior.float(), reduction="mean") + + # Add the prior loss to the instance loss. + loss = loss + prior_loss_weight * prior_loss + else: + loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean") + + acc = (model_pred == target).float().mean() + + return loss, acc, bsz + + +def train_loop( + accelerator: Accelerator, + optimizer: torch.optim.Optimizer, + lr_scheduler: torch.optim.lr_scheduler._LRScheduler, + model: torch.nn.Module, + checkpointer: Checkpointer, + train_dataloader: DataLoader, + val_dataloader: DataLoader, + loss_step: Union[Callable[[int, Any], Tuple[Any, Any, int]], Callable[[int, Any, bool], Tuple[Any, Any, int]]], + sample_frequency: int = 10, + checkpoint_frequency: int = 50, + global_step_offset: int = 0, + num_epochs: int = 100, + on_log: Callable[[], dict[str, Any]] = const({}), + on_train: Callable[[int], _GeneratorContextManager] = const(nullcontext()), + on_before_optimize: Callable[[int], None] = const(), + on_after_optimize: Callable[[float], None] = const(), + on_eval: Callable[[], _GeneratorContextManager] = const(nullcontext()) +): + num_training_steps_per_epoch = math.ceil(len(train_dataloader) / accelerator.gradient_accumulation_steps) + num_val_steps_per_epoch = len(val_dataloader) + + num_training_steps = num_training_steps_per_epoch * num_epochs + num_val_steps = num_val_steps_per_epoch * num_epochs + + global_step = 0 + + avg_loss = AverageMeter() + avg_acc = AverageMeter() + + avg_loss_val = AverageMeter() + avg_acc_val = AverageMeter() + + max_acc_val = 0.0 + + local_progress_bar = tqdm( + range(num_training_steps_per_epoch + num_val_steps_per_epoch), + disable=not accelerator.is_local_main_process, + dynamic_ncols=True + ) + local_progress_bar.set_description(f"Epoch 1 / {num_epochs}") + + global_progress_bar = tqdm( + range(num_training_steps + num_val_steps), + disable=not accelerator.is_local_main_process, + dynamic_ncols=True + ) + global_progress_bar.set_description("Total progress") + + try: + for epoch in range(num_epochs): + if accelerator.is_main_process: + if epoch % sample_frequency == 0: + checkpointer.save_samples(global_step + global_step_offset) + + if epoch % checkpoint_frequency == 0 and epoch != 0: + checkpointer.checkpoint(global_step + global_step_offset, "training") + + local_progress_bar.set_description(f"Epoch {epoch + 1} / {num_epochs}") + local_progress_bar.reset() + + model.train() + + with on_train(epoch): + for step, batch in enumerate(train_dataloader): + with accelerator.accumulate(model): + loss, acc, bsz = loss_step(step, batch) + + accelerator.backward(loss) + + on_before_optimize(epoch) + + optimizer.step() + lr_scheduler.step() + optimizer.zero_grad(set_to_none=True) + + avg_loss.update(loss.detach_(), bsz) + avg_acc.update(acc.detach_(), bsz) + + # Checks if the accelerator has performed an optimization step behind the scenes + if accelerator.sync_gradients: + on_after_optimize(lr_scheduler.get_last_lr()[0]) + + local_progress_bar.update(1) + global_progress_bar.update(1) + + global_step += 1 + + logs = { + "train/loss": avg_loss.avg.item(), + "train/acc": avg_acc.avg.item(), + "train/cur_loss": loss.item(), + "train/cur_acc": acc.item(), + "lr": lr_scheduler.get_last_lr()[0], + } + logs.update(on_log()) + + accelerator.log(logs, step=global_step) + + local_progress_bar.set_postfix(**logs) + + if global_step >= num_training_steps: + break + + accelerator.wait_for_everyone() + + model.eval() + + cur_loss_val = AverageMeter() + cur_acc_val = AverageMeter() + + with torch.inference_mode(), on_eval(): + for step, batch in enumerate(val_dataloader): + loss, acc, bsz = loss_step(step, batch, True) + + loss = loss.detach_() + acc = acc.detach_() + + cur_loss_val.update(loss, bsz) + cur_acc_val.update(acc, bsz) + + avg_loss_val.update(loss, bsz) + avg_acc_val.update(acc, bsz) + + local_progress_bar.update(1) + global_progress_bar.update(1) + + logs = { + "val/loss": avg_loss_val.avg.item(), + "val/acc": avg_acc_val.avg.item(), + "val/cur_loss": loss.item(), + "val/cur_acc": acc.item(), + } + local_progress_bar.set_postfix(**logs) + + logs["val/cur_loss"] = cur_loss_val.avg.item() + logs["val/cur_acc"] = cur_acc_val.avg.item() + + accelerator.log(logs, step=global_step) + + local_progress_bar.clear() + global_progress_bar.clear() + + if accelerator.is_main_process: + if avg_acc_val.avg.item() > max_acc_val: + accelerator.print( + f"Global step {global_step}: Validation accuracy reached new maximum: {max_acc_val:.2e} -> {avg_acc_val.avg.item():.2e}") + checkpointer.checkpoint(global_step + global_step_offset, "milestone") + max_acc_val = avg_acc_val.avg.item() + + # Create the pipeline using using the trained modules and save it. + if accelerator.is_main_process: + print("Finished!") + checkpointer.checkpoint(global_step + global_step_offset, "end") + checkpointer.save_samples(global_step + global_step_offset) + accelerator.end_training() + + except KeyboardInterrupt: + if accelerator.is_main_process: + print("Interrupted") + checkpointer.checkpoint(global_step + global_step_offset, "end") + accelerator.end_training() + quit() diff --git a/training/lora.py b/training/lora.py deleted file mode 100644 index 3857d78..0000000 --- a/training/lora.py +++ /dev/null @@ -1,107 +0,0 @@ -import torch -import torch.nn as nn - -from diffusers import ModelMixin, ConfigMixin -from diffusers.configuration_utils import register_to_config -from diffusers.models.cross_attention import CrossAttention -from diffusers.utils.import_utils import is_xformers_available - - -if is_xformers_available(): - import xformers - import xformers.ops -else: - xformers = None - - -class LoRALinearLayer(nn.Module): - def __init__(self, in_features, out_features, rank=4): - super().__init__() - - if rank > min(in_features, out_features): - raise ValueError( - f"LoRA rank {rank} must be less or equal than {min(in_features, out_features)}" - ) - - self.lora_down = nn.Linear(in_features, rank, bias=False) - self.lora_up = nn.Linear(rank, out_features, bias=False) - self.scale = 1.0 - - nn.init.normal_(self.lora_down.weight, std=1 / rank) - nn.init.zeros_(self.lora_up.weight) - - def forward(self, hidden_states): - down_hidden_states = self.lora_down(hidden_states) - up_hidden_states = self.lora_up(down_hidden_states) - - return up_hidden_states - - -class LoRACrossAttnProcessor(nn.Module): - def __init__(self, hidden_size, cross_attention_dim=None, rank=4): - super().__init__() - - self.to_q_lora = LoRALinearLayer(hidden_size, hidden_size) - self.to_k_lora = LoRALinearLayer(cross_attention_dim or hidden_size, hidden_size) - self.to_v_lora = LoRALinearLayer(cross_attention_dim or hidden_size, hidden_size) - self.to_out_lora = LoRALinearLayer(hidden_size, hidden_size) - - def __call__(self, attn: CrossAttention, hidden_states, encoder_hidden_states=None, attention_mask=None, scale=1.0): - batch_size, sequence_length, _ = hidden_states.shape - attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length) - - query = attn.to_q(hidden_states) + scale * self.to_q_lora(hidden_states) - query = attn.head_to_batch_dim(query) - - encoder_hidden_states = encoder_hidden_states if encoder_hidden_states is not None else hidden_states - - key = attn.to_k(encoder_hidden_states) + scale * self.to_k_lora(encoder_hidden_states) - value = attn.to_v(encoder_hidden_states) + scale * self.to_v_lora(encoder_hidden_states) - - key = attn.head_to_batch_dim(key) - value = attn.head_to_batch_dim(value) - - attention_probs = attn.get_attention_scores(query, key, attention_mask) - hidden_states = torch.bmm(attention_probs, value) - hidden_states = attn.batch_to_head_dim(hidden_states) - - # linear proj - hidden_states = attn.to_out[0](hidden_states) + scale * self.to_out_lora(hidden_states) - # dropout - hidden_states = attn.to_out[1](hidden_states) - - return hidden_states - - -class LoRAXFormersCrossAttnProcessor(nn.Module): - def __init__(self, hidden_size, cross_attention_dim, rank=4): - super().__init__() - - self.to_q_lora = LoRALinearLayer(hidden_size, hidden_size) - self.to_k_lora = LoRALinearLayer(cross_attention_dim or hidden_size, hidden_size) - self.to_v_lora = LoRALinearLayer(cross_attention_dim or hidden_size, hidden_size) - self.to_out_lora = LoRALinearLayer(hidden_size, hidden_size) - - def __call__(self, attn: CrossAttention, hidden_states, encoder_hidden_states=None, attention_mask=None, scale=1.0): - batch_size, sequence_length, _ = hidden_states.shape - attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length) - - query = attn.to_q(hidden_states) + scale * self.to_q_lora(hidden_states) - query = attn.head_to_batch_dim(query).contiguous() - - encoder_hidden_states = encoder_hidden_states if encoder_hidden_states is not None else hidden_states - - key = attn.to_k(encoder_hidden_states) + scale * self.to_k_lora(encoder_hidden_states) - value = attn.to_v(encoder_hidden_states) + scale * self.to_v_lora(encoder_hidden_states) - - key = attn.head_to_batch_dim(key).contiguous() - value = attn.head_to_batch_dim(value).contiguous() - - hidden_states = xformers.ops.memory_efficient_attention(query, key, value, attn_bias=attention_mask) - - # linear proj - hidden_states = attn.to_out[0](hidden_states) + scale * self.to_out_lora(hidden_states) - # dropout - hidden_states = attn.to_out[1](hidden_states) - - return hidden_states diff --git a/training/util.py b/training/util.py index 781cf04..a292edd 100644 --- a/training/util.py +++ b/training/util.py @@ -1,12 +1,40 @@ from pathlib import Path import json import copy -import itertools -from typing import Iterable, Optional +from typing import Iterable, Union from contextlib import contextmanager import torch -from PIL import Image + +from transformers import CLIPTextModel +from diffusers import AutoencoderKL, DDPMScheduler, UNet2DConditionModel, DPMSolverMultistepScheduler + +from pipelines.stable_diffusion.vlpn_stable_diffusion import VlpnStableDiffusion +from models.clip.tokenizer import MultiCLIPTokenizer +from models.clip.embeddings import ManagedCLIPTextEmbeddings, patch_managed_embeddings + + +class TrainingStrategy(): + @property + def main_model(self) -> torch.nn.Module: + ... + + @contextmanager + def on_train(self, epoch: int): + yield + + @contextmanager + def on_eval(self): + yield + + def on_before_optimize(self, epoch: int): + ... + + def on_after_optimize(self, lr: float): + ... + + def on_log(): + return {} def save_args(basepath: Path, args, extra={}): @@ -16,12 +44,93 @@ def save_args(basepath: Path, args, extra={}): json.dump(info, f, indent=4) -def make_grid(images, rows, cols): - w, h = images[0].size - grid = Image.new('RGB', size=(cols*w, rows*h)) - for i, image in enumerate(images): - grid.paste(image, box=(i % cols*w, i//cols*h)) - return grid +def generate_class_images( + accelerator, + text_encoder, + vae, + unet, + tokenizer, + scheduler, + data_train, + sample_batch_size, + sample_image_size, + sample_steps +): + missing_data = [item for item in data_train if not item.class_image_path.exists()] + + if len(missing_data) == 0: + return + + batched_data = [ + missing_data[i:i+sample_batch_size] + for i in range(0, len(missing_data), sample_batch_size) + ] + + pipeline = VlpnStableDiffusion( + text_encoder=text_encoder, + vae=vae, + unet=unet, + tokenizer=tokenizer, + scheduler=scheduler, + ).to(accelerator.device) + pipeline.set_progress_bar_config(dynamic_ncols=True) + + with torch.inference_mode(): + for batch in batched_data: + image_name = [item.class_image_path for item in batch] + prompt = [item.cprompt for item in batch] + nprompt = [item.nprompt for item in batch] + + images = pipeline( + prompt=prompt, + negative_prompt=nprompt, + height=sample_image_size, + width=sample_image_size, + num_inference_steps=sample_steps + ).images + + for i, image in enumerate(images): + image.save(image_name[i]) + + del pipeline + + if torch.cuda.is_available(): + torch.cuda.empty_cache() + + +def get_models(pretrained_model_name_or_path: str): + tokenizer = MultiCLIPTokenizer.from_pretrained(pretrained_model_name_or_path, subfolder='tokenizer') + text_encoder = CLIPTextModel.from_pretrained(pretrained_model_name_or_path, subfolder='text_encoder') + vae = AutoencoderKL.from_pretrained(pretrained_model_name_or_path, subfolder='vae') + unet = UNet2DConditionModel.from_pretrained(pretrained_model_name_or_path, subfolder='unet') + noise_scheduler = DDPMScheduler.from_pretrained(pretrained_model_name_or_path, subfolder='scheduler') + sample_scheduler = DPMSolverMultistepScheduler.from_pretrained( + pretrained_model_name_or_path, subfolder='scheduler') + + embeddings = patch_managed_embeddings(text_encoder) + + return tokenizer, text_encoder, vae, unet, noise_scheduler, sample_scheduler, embeddings + + +def add_placeholder_tokens( + tokenizer: MultiCLIPTokenizer, + embeddings: ManagedCLIPTextEmbeddings, + placeholder_tokens: list[str], + initializer_tokens: list[str], + num_vectors: Union[list[int], int] +): + initializer_token_ids = [ + tokenizer.encode(token, add_special_tokens=False) + for token in initializer_tokens + ] + placeholder_token_ids = tokenizer.add_multi_tokens(placeholder_tokens, num_vectors) + + embeddings.resize(len(tokenizer)) + + for (placeholder_token_id, initializer_token_id) in zip(placeholder_token_ids, initializer_token_ids): + embeddings.add_embed(placeholder_token_id, initializer_token_id) + + return placeholder_token_ids, initializer_token_ids class AverageMeter: @@ -38,93 +147,6 @@ class AverageMeter: self.avg = self.sum / self.count -class CheckpointerBase: - def __init__( - self, - train_dataloader, - val_dataloader, - output_dir: Path, - sample_steps: int = 20, - sample_guidance_scale: float = 7.5, - sample_image_size: int = 768, - sample_batches: int = 1, - sample_batch_size: int = 1, - seed: Optional[int] = None - ): - self.train_dataloader = train_dataloader - self.val_dataloader = val_dataloader - self.output_dir = output_dir - self.sample_image_size = sample_image_size - self.sample_steps = sample_steps - self.sample_guidance_scale = sample_guidance_scale - self.sample_batches = sample_batches - self.sample_batch_size = sample_batch_size - self.seed = seed if seed is not None else torch.random.seed() - - @torch.no_grad() - def checkpoint(self, step: int, postfix: str): - pass - - @torch.inference_mode() - def save_samples(self, pipeline, step: int): - samples_path = Path(self.output_dir).joinpath("samples") - - generator = torch.Generator(device=pipeline.device).manual_seed(self.seed) - - grid_cols = min(self.sample_batch_size, 4) - grid_rows = (self.sample_batches * self.sample_batch_size) // grid_cols - - for pool, data, gen in [ - ("stable", self.val_dataloader, generator), - ("val", self.val_dataloader, None), - ("train", self.train_dataloader, None) - ]: - all_samples = [] - file_path = samples_path.joinpath(pool, f"step_{step}.jpg") - file_path.parent.mkdir(parents=True, exist_ok=True) - - batches = list(itertools.islice(itertools.cycle(data), self.sample_batch_size * self.sample_batches)) - prompt_ids = [ - prompt - for batch in batches - for prompt in batch["prompt_ids"] - ] - nprompt_ids = [ - prompt - for batch in batches - for prompt in batch["nprompt_ids"] - ] - - for i in range(self.sample_batches): - start = i * self.sample_batch_size - end = (i + 1) * self.sample_batch_size - prompt = prompt_ids[start:end] - nprompt = nprompt_ids[start:end] - - samples = pipeline( - prompt=prompt, - negative_prompt=nprompt, - height=self.sample_image_size, - width=self.sample_image_size, - generator=gen, - guidance_scale=self.sample_guidance_scale, - num_inference_steps=self.sample_steps, - output_type='pil' - ).images - - all_samples += samples - - del samples - - image_grid = make_grid(all_samples, grid_rows, grid_cols) - image_grid.save(file_path, quality=85) - - del all_samples - del image_grid - - del generator - - # Adapted from torch-ema https://github.com/fadel/pytorch_ema/blob/master/torch_ema/ema.py#L14 class EMAModel: """ -- cgit v1.2.3-70-g09d2