From 54d72ba4a8331d822a48bad9e381b47d39598125 Mon Sep 17 00:00:00 2001 From: Volpeon Date: Wed, 28 Dec 2022 21:00:34 +0100 Subject: Updated 1-cycle scheduler --- training/optimization.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) (limited to 'training/optimization.py') diff --git a/training/optimization.py b/training/optimization.py index 3809f3b..a0c8673 100644 --- a/training/optimization.py +++ b/training/optimization.py @@ -6,7 +6,7 @@ from diffusers.utils import logging logger = logging.get_logger(__name__) -def get_one_cycle_schedule(optimizer, num_training_steps, annealing="cos", min_lr=0.01, mid_point=0.4, last_epoch=-1): +def get_one_cycle_schedule(optimizer, num_training_steps, annealing="cos", min_lr=0.04, mid_point=0.3, last_epoch=-1): """ Create a schedule with a learning rate that decreases linearly from the initial lr set in the optimizer to 0, after a warmup period during which it increases linearly from 0 to the initial lr set in the optimizer. @@ -35,8 +35,12 @@ def get_one_cycle_schedule(optimizer, num_training_steps, annealing="cos", min_l progress = float(num_training_steps - current_step) / float(max(1, num_training_steps - thresh_down)) return max(0.0, progress) * min_lr - else: - progress = float(current_step - thresh_up) / float(max(1, num_training_steps - thresh_up)) + + progress = float(current_step - thresh_up) / float(max(1, num_training_steps - thresh_up)) + + if annealing == "half_cos": return max(0.0, 1.0 + math.cos(math.pi * (0.5 + 0.5 * progress))) + return max(0.0, 0.5 * (1.0 + math.cos(math.pi * progress))) + return LambdaLR(optimizer, lr_lambda, last_epoch) -- cgit v1.2.3-54-g00ecf