From dfcfd6bc1db6b9eb12c8321d18fc7a461710e7e0 Mon Sep 17 00:00:00 2001 From: Volpeon Date: Fri, 30 Dec 2022 13:48:26 +0100 Subject: Training script improvements --- train_dreambooth.py | 41 ++++++++++++++++++++++++++++++++++++++--- 1 file changed, 38 insertions(+), 3 deletions(-) (limited to 'train_dreambooth.py') diff --git a/train_dreambooth.py b/train_dreambooth.py index 202d52c..072150b 100644 --- a/train_dreambooth.py +++ b/train_dreambooth.py @@ -22,7 +22,7 @@ from slugify import slugify from common import load_text_embeddings, load_config from pipelines.stable_diffusion.vlpn_stable_diffusion import VlpnStableDiffusion -from data.csv import CSVDataModule +from data.csv import CSVDataModule, CSVDataItem from training.optimization import get_one_cycle_schedule from training.ti import patch_trainable_embeddings from training.util import AverageMeter, CheckpointerBase, save_args @@ -82,6 +82,18 @@ def parse_args(): default=[], help="A token to use as initializer word." ) + parser.add_argument( + "--exclude_keywords", + type=str, + nargs='*', + help="Skip dataset items containing a listed keyword.", + ) + parser.add_argument( + "--exclude_modes", + type=str, + nargs='*', + help="Exclude all items with a listed mode.", + ) parser.add_argument( "--train_text_encoder", action="store_true", @@ -379,6 +391,12 @@ def parse_args(): if len(args.placeholder_token) != len(args.initializer_token): raise ValueError("Number of items in --placeholder_token and --initializer_token must match") + if isinstance(args.exclude_keywords, str): + args.exclude_keywords = [args.exclude_keywords] + + if isinstance(args.exclude_modes, str): + args.exclude_modes = [args.exclude_modes] + if args.output_dir is None: raise ValueError("You must specify --output_dir") @@ -636,6 +654,19 @@ def main(): elif args.mixed_precision == "bf16": weight_dtype = torch.bfloat16 + def keyword_filter(item: CSVDataItem): + cond2 = args.exclude_keywords is None or not any( + keyword in part + for keyword in args.exclude_keywords + for part in item.prompt + ) + cond3 = args.mode is None or args.mode in item.mode + cond4 = args.exclude_modes is None or not any( + mode in item.mode + for mode in args.exclude_modes + ) + return cond2 and cond3 and cond4 + def collate_fn(examples): prompts = [example["prompts"] for example in examples] cprompts = [example["cprompts"] for example in examples] @@ -671,12 +702,12 @@ def main(): num_class_images=args.num_class_images, size=args.resolution, repeats=args.repeats, - mode=args.mode, dropout=args.tag_dropout, center_crop=args.center_crop, template_key=args.train_data_template, valid_set_size=args.valid_set_size, num_workers=args.dataloader_num_workers, + filter=keyword_filter, collate_fn=collate_fn ) @@ -782,6 +813,10 @@ def main(): config = vars(args).copy() config["initializer_token"] = " ".join(config["initializer_token"]) config["placeholder_token"] = " ".join(config["placeholder_token"]) + if config["exclude_modes"] is not None: + config["exclude_modes"] = " ".join(config["exclude_modes"]) + if config["exclude_keywords"] is not None: + config["exclude_keywords"] = " ".join(config["exclude_keywords"]) accelerator.init_trackers("dreambooth", config=config) # Train! @@ -879,7 +914,7 @@ def main(): target, target_prior = torch.chunk(target, 2, dim=0) # Compute instance loss - loss = F.mse_loss(model_pred.float(), target.float(), reduction="none").mean([1, 2, 3]).mean() + loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean") # Compute prior loss prior_loss = F.mse_loss(model_pred_prior.float(), target_prior.float(), reduction="mean") -- cgit v1.2.3-54-g00ecf