diff options
author | Volpeon <git@volpeon.ink> | 2023-01-01 22:08:21 +0100 |
---|---|---|
committer | Volpeon <git@volpeon.ink> | 2023-01-01 22:08:21 +0100 |
commit | 68164329b97f5cd79a56372dc6cace4b038afce8 (patch) | |
tree | 50d404f764a8e6f85fadcc0a45dd0b8da3b6e507 /training | |
parent | Cleanup (diff) | |
download | textual-inversion-diff-68164329b97f5cd79a56372dc6cace4b038afce8.tar.gz textual-inversion-diff-68164329b97f5cd79a56372dc6cace4b038afce8.tar.bz2 textual-inversion-diff-68164329b97f5cd79a56372dc6cace4b038afce8.zip |
Update
Diffstat (limited to 'training')
-rw-r--r-- | training/optimization.py | 6 |
1 files changed, 3 insertions, 3 deletions
diff --git a/training/optimization.py b/training/optimization.py index a79944f..725599b 100644 --- a/training/optimization.py +++ b/training/optimization.py | |||
@@ -30,7 +30,7 @@ def get_one_cycle_schedule( | |||
30 | return min_lr + progress * (1 - min_lr) | 30 | return min_lr + progress * (1 - min_lr) |
31 | 31 | ||
32 | lr = 0.5 * (1.0 + math.cos(math.pi * (1 + progress))) | 32 | lr = 0.5 * (1.0 + math.cos(math.pi * (1 + progress))) |
33 | lr = lr ** warmup_exp | 33 | lr = lr ** (warmup_exp - (warmup_exp - 1) * progress) |
34 | return min_lr + lr * (1 - min_lr) | 34 | return min_lr + lr * (1 - min_lr) |
35 | 35 | ||
36 | if annealing == "linear": | 36 | if annealing == "linear": |
@@ -47,11 +47,11 @@ def get_one_cycle_schedule( | |||
47 | 47 | ||
48 | if annealing == "half_cos": | 48 | if annealing == "half_cos": |
49 | lr = 1.0 + math.cos(math.pi * (0.5 + 0.5 * progress)) | 49 | lr = 1.0 + math.cos(math.pi * (0.5 + 0.5 * progress)) |
50 | lr = lr ** annealing_exp | 50 | lr = lr ** (annealing_exp - (annealing_exp - 1) * progress) |
51 | return lr | 51 | return lr |
52 | 52 | ||
53 | lr = 0.5 * (1.0 + math.cos(math.pi * progress)) | 53 | lr = 0.5 * (1.0 + math.cos(math.pi * progress)) |
54 | lr = lr ** annealing_exp | 54 | lr = lr ** (annealing_exp - (annealing_exp - 1) * progress) |
55 | return lr | 55 | return lr |
56 | 56 | ||
57 | return LambdaLR(optimizer, lr_lambda, last_epoch) | 57 | return LambdaLR(optimizer, lr_lambda, last_epoch) |