From baba91864a45939cef4f77f6ca96ade7ae5ef274 Mon Sep 17 00:00:00 2001 From: Volpeon Date: Mon, 24 Oct 2022 23:46:18 +0200 Subject: Advanced datasets --- textual_inversion.py | 58 ++++++++++++++++++++++++++++++---------------------- 1 file changed, 34 insertions(+), 24 deletions(-) (limited to 'textual_inversion.py') diff --git a/textual_inversion.py b/textual_inversion.py index c42762f..bcdfd3a 100644 --- a/textual_inversion.py +++ b/textual_inversion.py @@ -70,13 +70,13 @@ def parse_args(): parser.add_argument( "--placeholder_token", type=str, - default="<*>", + nargs='*', help="A token to use as a placeholder for the concept.", ) parser.add_argument( "--initializer_token", type=str, - default=None, + nargs='*', help="A token to use as initializer word." ) parser.add_argument( @@ -299,12 +299,21 @@ def parse_args(): if args.pretrained_model_name_or_path is None: raise ValueError("You must specify --pretrained_model_name_or_path") - if args.placeholder_token is None: - raise ValueError("You must specify --placeholder_token") + if isinstance(args.initializer_token, str): + args.initializer_token = [args.initializer_token] - if args.initializer_token is None: + if len(args.initializer_token) == 0: raise ValueError("You must specify --initializer_token") + if isinstance(args.placeholder_token, str): + args.placeholder_token = [args.placeholder_token] + + if len(args.placeholder_token) == 0: + args.placeholder_token = [f"<*{i}>" for i in range(args.initializer_token)] + + if len(args.placeholder_token) != len(args.initializer_token): + raise ValueError("You must specify --placeholder_token") + if args.output_dir is None: raise ValueError("You must specify --output_dir") @@ -373,12 +382,13 @@ class Checkpointer: unwrapped = self.accelerator.unwrap_model(self.text_encoder) - # Save a checkpoint - learned_embeds = unwrapped.get_input_embeddings().weight[self.placeholder_token_id] - learned_embeds_dict = {self.placeholder_token: learned_embeds.detach().cpu()} + for (placeholder_token, placeholder_token_id) in zip(self.placeholder_token, self.placeholder_token_id): + # Save a checkpoint + learned_embeds = unwrapped.get_input_embeddings().weight[placeholder_token_id] + learned_embeds_dict = {placeholder_token: learned_embeds.detach().cpu()} - filename = f"%s_%d_%s.bin" % (slugify(self.placeholder_token), step, postfix) - torch.save(learned_embeds_dict, checkpoints_path.joinpath(filename)) + filename = f"%s_%d_%s.bin" % (slugify(placeholder_token), step, postfix) + torch.save(learned_embeds_dict, checkpoints_path.joinpath(filename)) del unwrapped del learned_embeds @@ -422,7 +432,7 @@ class Checkpointer: for i in range(self.sample_batches): batches = [batch for j, batch in data_enum if j * data.batch_size < self.sample_batch_size] - prompt = [prompt.format(self.instance_identifier) + prompt = [prompt.format(identifier=self.instance_identifier) for batch in batches for prompt in batch["prompts"]][:self.sample_batch_size] nprompt = [prompt for batch in batches for prompt in batch["nprompts"]][:self.sample_batch_size] @@ -498,16 +508,13 @@ def main(): tokenizer = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='tokenizer') # Convert the initializer_token, placeholder_token to ids - initializer_token_ids = tokenizer.encode(args.initializer_token, add_special_tokens=False) - print(f"Initializer token maps to {len(initializer_token_ids)} embeddings.") - initializer_token_ids = torch.tensor(initializer_token_ids[:1]) + initializer_token_ids = torch.stack([ + torch.tensor(tokenizer.encode(token, add_special_tokens=False)[:1]) + for token in args.initializer_token + ]) - # Add the placeholder token in tokenizer num_added_tokens = tokenizer.add_tokens(args.placeholder_token) - if num_added_tokens == 0: - print(f"Re-using existing token {args.placeholder_token}.") - else: - print(f"Training new token {args.placeholder_token}.") + print(f"Added {num_added_tokens} new tokens.") placeholder_token_id = tokenizer.convert_tokens_to_ids(args.placeholder_token) @@ -533,11 +540,11 @@ def main(): original_token_embeds = token_embeds.detach().clone().to(accelerator.device) if args.resume_checkpoint is not None: - token_embeds[placeholder_token_id] = torch.load(args.resume_checkpoint)[ - args.placeholder_token] + token_embeds[placeholder_token_id] = torch.load(args.resume_checkpoint)[args.placeholder_token] else: initializer_token_embeddings = text_encoder.get_input_embeddings()(initializer_token_ids) - token_embeds[placeholder_token_id] = initializer_token_embeddings + for (token_id, embeddings) in zip(placeholder_token_id, initializer_token_embeddings): + token_embeds[token_id] = embeddings # Freeze vae and unet freeze_params(vae.parameters()) @@ -648,7 +655,7 @@ def main(): with torch.inference_mode(): for batch in batched_data: image_name = [p.class_image_path for p in batch] - prompt = [p.prompt.format(args.class_identifier) for p in batch] + prompt = [p.prompt.format(identifier=args.class_identifier) for p in batch] nprompt = [p.nprompt for p in batch] images = pipeline( @@ -716,7 +723,10 @@ def main(): # We need to initialize the trackers we use, and also store our configuration. # The trackers initializes automatically on the main process. if accelerator.is_main_process: - accelerator.init_trackers("textual_inversion", config=vars(args)) + config = vars(args).copy() + config["initializer_token"] = " ".join(config["initializer_token"]) + config["placeholder_token"] = " ".join(config["placeholder_token"]) + accelerator.init_trackers("textual_inversion", config=config) # Train! total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps -- cgit v1.2.3-54-g00ecf