From 9bfb4a078f63a7ce6e35e89093f17febd9ff4b51 Mon Sep 17 00:00:00 2001 From: Volpeon Date: Fri, 24 Mar 2023 17:23:09 +0100 Subject: Update --- train_dreambooth.py | 7 +++++++ train_lora.py | 7 +++++++ train_ti.py | 6 +++--- training/functional.py | 11 +++++------ 4 files changed, 22 insertions(+), 9 deletions(-) diff --git a/train_dreambooth.py b/train_dreambooth.py index a85ae4c..1b8a3d2 100644 --- a/train_dreambooth.py +++ b/train_dreambooth.py @@ -175,6 +175,12 @@ def parse_args(): " resolution" ), ) + parser.add_argument( + "--offset_noise_strength", + type=float, + default=0.15, + help="Perlin offset noise strength.", + ) parser.add_argument( "--num_train_epochs", type=int, @@ -621,6 +627,7 @@ def main(): num_train_epochs=args.num_train_epochs, gradient_accumulation_steps=args.gradient_accumulation_steps, sample_frequency=args.sample_frequency, + offset_noise_strength=args.offset_noise_strength, # -- tokenizer=tokenizer, sample_scheduler=sample_scheduler, diff --git a/train_lora.py b/train_lora.py index fa24cee..b16a99b 100644 --- a/train_lora.py +++ b/train_lora.py @@ -164,6 +164,12 @@ def parse_args(): " resolution" ), ) + parser.add_argument( + "--offset_noise_strength", + type=float, + default=0.15, + help="Perlin offset noise strength.", + ) parser.add_argument( "--num_train_epochs", type=int, @@ -649,6 +655,7 @@ def main(): num_train_epochs=args.num_train_epochs, gradient_accumulation_steps=args.gradient_accumulation_steps, sample_frequency=args.sample_frequency, + offset_noise_strength=args.offset_noise_strength, # -- tokenizer=tokenizer, sample_scheduler=sample_scheduler, diff --git a/train_ti.py b/train_ti.py index ef71f6f..bbc5524 100644 --- a/train_ti.py +++ b/train_ti.py @@ -188,9 +188,9 @@ def parse_args(): help='Vector shuffling algorithm. Choose between ["all", "trailing", "leading", "between", "auto", "off"]', ) parser.add_argument( - "--perlin_strength", + "--offset_noise_strength", type=float, - default=0.1, + default=0.15, help="Perlin offset noise strength.", ) parser.add_argument( @@ -661,7 +661,7 @@ def main(): checkpoint_frequency=args.checkpoint_frequency, milestone_checkpoints=not args.no_milestone_checkpoints, global_step_offset=global_step_offset, - perlin_strength=args.perlin_strength, + offset_noise_strength=args.offset_noise_strength, # -- tokenizer=tokenizer, sample_scheduler=sample_scheduler, diff --git a/training/functional.py b/training/functional.py index ee73ab2..87bb339 100644 --- a/training/functional.py +++ b/training/functional.py @@ -277,7 +277,7 @@ def loss_step( with_prior_preservation: bool, prior_loss_weight: float, seed: int, - perlin_strength: float, + offset_noise_strength: float, step: int, batch: dict[str, Any], eval: bool = False, @@ -300,11 +300,10 @@ def loss_step( generator=generator ) - if perlin_strength != 0: - noise += perlin_strength * perlin_noise( + if offset_noise_strength != 0: + noise += offset_noise_strength * perlin_noise( latents.shape, res=1, - octaves=4, dtype=latents.dtype, device=latents.device, generator=generator @@ -610,7 +609,7 @@ def train( global_step_offset: int = 0, with_prior_preservation: bool = False, prior_loss_weight: float = 1.0, - perlin_strength: float = 0.1, + offset_noise_strength: float = 0.1, **kwargs, ): text_encoder, unet, optimizer, train_dataloader, val_dataloader, lr_scheduler, extra = strategy.prepare( @@ -642,7 +641,7 @@ def train( with_prior_preservation, prior_loss_weight, seed, - perlin_strength, + offset_noise_strength, ) if accelerator.is_main_process: -- cgit v1.2.3-54-g00ecf