summaryrefslogtreecommitdiffstats
path: root/training/optimization.py
blob: a79944f5fbb7e761ca7863e701757d41e55e2ac6 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
import math
from typing import Literal

import torch
from torch.optim.lr_scheduler import LambdaLR

from diffusers.utils import logging

logger = logging.get_logger(__name__)


def get_one_cycle_schedule(
    optimizer: torch.optim.Optimizer,
    num_training_steps: int,
    warmup: Literal["cos", "linear"] = "cos",
    annealing: Literal["cos", "half_cos", "linear"] = "cos",
    warmup_exp: int = 1,
    annealing_exp: int = 1,
    min_lr: int = 0.04,
    mid_point: int = 0.3,
    last_epoch: int = -1
):
    def lr_lambda(current_step: int):
        thresh_up = int(num_training_steps * min(mid_point, 0.5))

        if current_step < thresh_up:
            progress = float(current_step) / float(max(1, thresh_up))

            if warmup == "linear":
                return min_lr + progress * (1 - min_lr)

            lr = 0.5 * (1.0 + math.cos(math.pi * (1 + progress)))
            lr = lr ** warmup_exp
            return min_lr + lr * (1 - min_lr)

        if annealing == "linear":
            thresh_down = thresh_up * 2

            if current_step < thresh_down:
                progress = float(thresh_down - current_step) / float(max(1, thresh_down - thresh_up))
                return min_lr + progress * (1 - min_lr)

            progress = float(num_training_steps - current_step) / float(max(1, num_training_steps - thresh_down))
            return progress * min_lr

        progress = float(current_step - thresh_up) / float(max(1, num_training_steps - thresh_up))

        if annealing == "half_cos":
            lr = 1.0 + math.cos(math.pi * (0.5 + 0.5 * progress))
            lr = lr ** annealing_exp
            return lr

        lr = 0.5 * (1.0 + math.cos(math.pi * progress))
        lr = lr ** annealing_exp
        return lr

    return LambdaLR(optimizer, lr_lambda, last_epoch)