From 30b60a91fedb59dd3e64ca1db30b759233424098 Mon Sep 17 00:00:00 2001 From: Volpeon Date: Thu, 3 Nov 2022 20:19:19 +0100 Subject: Update --- training/optimization.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/training/optimization.py b/training/optimization.py index 012beed..0fd7ec8 100644 --- a/training/optimization.py +++ b/training/optimization.py @@ -6,7 +6,7 @@ from diffusers.utils import logging logger = logging.get_logger(__name__) -def get_one_cycle_schedule(optimizer, num_training_steps, annealing="cos", min_lr=0.05, mid_point=0.42, last_epoch=-1): +def get_one_cycle_schedule(optimizer, num_training_steps, annealing="cos", min_lr=0.05, mid_point=0.43, last_epoch=-1): """ Create a schedule with a learning rate that decreases linearly from the initial lr set in the optimizer to 0, after a warmup period during which it increases linearly from 0 to the initial lr set in the optimizer. @@ -33,10 +33,10 @@ def get_one_cycle_schedule(optimizer, num_training_steps, annealing="cos", min_l if current_step < thresh_down: return min_lr + float(thresh_down - current_step) / float(max(1, thresh_down - thresh_up)) * (1 - min_lr) - return max(0.0, float(num_training_steps - current_step) / float(max(1, num_training_steps - thresh_down))) * min_lr + progress = float(num_training_steps - current_step) / float(max(1, num_training_steps - thresh_down)) + return max(0.0, progress) * min_lr else: progress = float(current_step - thresh_up) / float(max(1, num_training_steps - thresh_up)) - - return max(0.0, 0.5 * (1.0 + math.cos(math.pi * progress))) + return max(0.0, 1.0 + math.cos(math.pi * (0.5 + 0.5 * progress))) return LambdaLR(optimizer, lr_lambda, last_epoch) -- cgit v1.2.3-70-g09d2