From baba91864a45939cef4f77f6ca96ade7ae5ef274 Mon Sep 17 00:00:00 2001 From: Volpeon Date: Mon, 24 Oct 2022 23:46:18 +0200 Subject: Advanced datasets --- dreambooth.py | 68 ++++++++++++++++++++++++++++++++++++----------------------- 1 file changed, 42 insertions(+), 26 deletions(-) (limited to 'dreambooth.py') diff --git a/dreambooth.py b/dreambooth.py index 5c26f12..2c24908 100644 --- a/dreambooth.py +++ b/dreambooth.py @@ -71,13 +71,13 @@ def parse_args(): parser.add_argument( "--placeholder_token", type=str, - default="<*>", + nargs='*', help="A token to use as a placeholder for the concept.", ) parser.add_argument( "--initializer_token", type=str, - default=None, + nargs='*', help="A token to use as initializer word." ) parser.add_argument( @@ -316,6 +316,18 @@ def parse_args(): if args.instance_identifier is None: raise ValueError("You must specify --instance_identifier") + if isinstance(args.initializer_token, str): + args.initializer_token = [args.initializer_token] + + if isinstance(args.placeholder_token, str): + args.placeholder_token = [args.placeholder_token] + + if len(args.placeholder_token) == 0: + args.placeholder_token = [f"<*{i}>" for i in range(args.initializer_token)] + + if len(args.placeholder_token) != len(args.initializer_token): + raise ValueError("Number of items in --placeholder_token and --initializer_token must match") + if args.output_dir is None: raise ValueError("You must specify --output_dir") @@ -379,9 +391,6 @@ class Checkpointer: @torch.no_grad() def save_embedding(self, step, postfix): - if self.placeholder_token_id is None: - return - print("Saving checkpoint for step %d..." % step) checkpoints_path = self.output_dir.joinpath("checkpoints") @@ -389,12 +398,13 @@ class Checkpointer: unwrapped = self.accelerator.unwrap_model(self.text_encoder) - # Save a checkpoint - learned_embeds = unwrapped.get_input_embeddings().weight[self.placeholder_token_id] - learned_embeds_dict = {self.placeholder_token: learned_embeds.detach().cpu()} + for (placeholder_token, placeholder_token_id) in zip(self.placeholder_token, self.placeholder_token_id): + # Save a checkpoint + learned_embeds = unwrapped.get_input_embeddings().weight[placeholder_token_id] + learned_embeds_dict = {placeholder_token: learned_embeds.detach().cpu()} - filename = f"%s_%d_%s.bin" % (slugify(self.placeholder_token), step, postfix) - torch.save(learned_embeds_dict, checkpoints_path.joinpath(filename)) + filename = f"%s_%d_%s.bin" % (slugify(placeholder_token), step, postfix) + torch.save(learned_embeds_dict, checkpoints_path.joinpath(filename)) del unwrapped del learned_embeds @@ -467,7 +477,7 @@ class Checkpointer: for i in range(self.sample_batches): batches = [batch for j, batch in data_enum if j * data.batch_size < self.sample_batch_size] prompt = [ - prompt.format(self.instance_identifier) + prompt.format(identifier=self.instance_identifier) for batch in batches for prompt in batch["prompts"] ][:self.sample_batch_size] @@ -516,8 +526,8 @@ def main(): instance_identifier = args.instance_identifier - if args.placeholder_token is not None: - instance_identifier = instance_identifier.format(args.placeholder_token) + if len(args.placeholder_token) != 0: + instance_identifier = instance_identifier.format(args.placeholder_token[0]) now = datetime.datetime.now().strftime("%Y-%m-%dT%H-%M-%S") basepath = Path(args.output_dir).joinpath(slugify(instance_identifier), now) @@ -565,18 +575,16 @@ def main(): # Freeze text_encoder and vae freeze_params(vae.parameters()) - if args.initializer_token is not None: + if len(args.initializer_token) != 0: # Convert the initializer_token, placeholder_token to ids - initializer_token_ids = tokenizer.encode(args.initializer_token, add_special_tokens=False) - print(f"Initializer token {args.initializer_token} maps to {len(initializer_token_ids)} embeddings.") - initializer_token_ids = torch.tensor(initializer_token_ids[:1]) + initializer_token_ids = torch.stack([ + torch.tensor(tokenizer.encode(token, add_special_tokens=False)[:1]) + for token in args.initializer_token + ]) - # Add the placeholder token in tokenizer num_added_tokens = tokenizer.add_tokens(args.placeholder_token) - if num_added_tokens == 0: - print(f"Re-using existing token {args.placeholder_token}.") - else: - print(f"Training new token {args.placeholder_token}.") + print(f"Added {num_added_tokens} new tokens.") + placeholder_token_id = tokenizer.convert_tokens_to_ids(args.placeholder_token) # Resize the token embeddings as we are adding new special tokens to the tokenizer @@ -586,7 +594,9 @@ def main(): token_embeds = text_encoder.get_input_embeddings().weight.data original_token_embeds = token_embeds.detach().clone().to(accelerator.device) initializer_token_embeddings = text_encoder.get_input_embeddings()(initializer_token_ids) - token_embeds[placeholder_token_id] = initializer_token_embeddings + + for (token_id, embeddings) in zip(placeholder_token_id, initializer_token_embeddings): + token_embeds[token_id] = embeddings freeze_params(itertools.chain( text_encoder.text_model.encoder.parameters(), @@ -594,7 +604,7 @@ def main(): text_encoder.text_model.embeddings.position_embedding.parameters(), )) else: - placeholder_token_id = None + placeholder_token_id = [] prompt_processor = PromptProcessor(tokenizer, text_encoder) @@ -721,7 +731,7 @@ def main(): with torch.inference_mode(): for batch in batched_data: image_name = [item.class_image_path for item in batch] - prompt = [item.prompt.format(args.class_identifier) for item in batch] + prompt = [item.prompt.format(identifier=args.class_identifier) for item in batch] nprompt = [item.nprompt for item in batch] images = pipeline( @@ -787,7 +797,10 @@ def main(): # We need to initialize the trackers we use, and also store our configuration. # The trackers initializes automatically on the main process. if accelerator.is_main_process: - accelerator.init_trackers("dreambooth", config=vars(args)) + config = vars(args).copy() + config["initializer_token"] = " ".join(config["initializer_token"]) + config["placeholder_token"] = " ".join(config["placeholder_token"]) + accelerator.init_trackers("dreambooth", config=config) # Train! total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps @@ -932,6 +945,9 @@ def main(): global_step += 1 if global_step % args.sample_frequency == 0: + local_progress_bar.clear() + global_progress_bar.clear() + checkpointer.save_embedding(global_step, "training") sample_checkpoint = True -- cgit v1.2.3-54-g00ecf