diff options
author | Volpeon <git@volpeon.ink> | 2022-12-21 09:17:25 +0100 |
---|---|---|
committer | Volpeon <git@volpeon.ink> | 2022-12-21 09:17:25 +0100 |
commit | 68540b27849564994d921968a36faa9b997e626d (patch) | |
tree | 8fbe834ab4c52f057cd114bbb0e786158f215acc /training/optimization.py | |
parent | Fix training (diff) | |
download | textual-inversion-diff-68540b27849564994d921968a36faa9b997e626d.tar.gz textual-inversion-diff-68540b27849564994d921968a36faa9b997e626d.tar.bz2 textual-inversion-diff-68540b27849564994d921968a36faa9b997e626d.zip |
Moved common training code into separate module
Diffstat (limited to 'training/optimization.py')
-rw-r--r-- | training/optimization.py | 2 |
1 files changed, 1 insertions, 1 deletions
diff --git a/training/optimization.py b/training/optimization.py index 0e603fa..c501ed9 100644 --- a/training/optimization.py +++ b/training/optimization.py | |||
@@ -6,7 +6,7 @@ from diffusers.utils import logging | |||
6 | logger = logging.get_logger(__name__) | 6 | logger = logging.get_logger(__name__) |
7 | 7 | ||
8 | 8 | ||
9 | def get_one_cycle_schedule(optimizer, num_training_steps, annealing="cos", min_lr=0.05, mid_point=0.4, last_epoch=-1): | 9 | def get_one_cycle_schedule(optimizer, num_training_steps, annealing="cos", min_lr=0.001, mid_point=0.4, last_epoch=-1): |
10 | """ | 10 | """ |
11 | Create a schedule with a learning rate that decreases linearly from the initial lr set in the optimizer to 0, after | 11 | Create a schedule with a learning rate that decreases linearly from the initial lr set in the optimizer to 0, after |
12 | a warmup period during which it increases linearly from 0 to the initial lr set in the optimizer. | 12 | a warmup period during which it increases linearly from 0 to the initial lr set in the optimizer. |