diff options
| author | Volpeon <git@volpeon.ink> | 2023-03-01 12:34:42 +0100 |
|---|---|---|
| committer | Volpeon <git@volpeon.ink> | 2023-03-01 12:34:42 +0100 |
| commit | a1b8327085ddeab589be074d7e9df4291aba1210 (patch) | |
| tree | 2f2016916d7a2f659268c3e375d55c59583c2b3b /training/optimization.py | |
| parent | Fixed TI normalization order (diff) | |
| download | textual-inversion-diff-a1b8327085ddeab589be074d7e9df4291aba1210.tar.gz textual-inversion-diff-a1b8327085ddeab589be074d7e9df4291aba1210.tar.bz2 textual-inversion-diff-a1b8327085ddeab589be074d7e9df4291aba1210.zip | |
Update
Diffstat (limited to 'training/optimization.py')
| -rw-r--r-- | training/optimization.py | 2 |
1 files changed, 1 insertions, 1 deletions
diff --git a/training/optimization.py b/training/optimization.py index 6c9a35d..7d8d55a 100644 --- a/training/optimization.py +++ b/training/optimization.py | |||
| @@ -113,7 +113,7 @@ def get_scheduler( | |||
| 113 | ): | 113 | ): |
| 114 | num_training_steps_per_epoch = math.ceil( | 114 | num_training_steps_per_epoch = math.ceil( |
| 115 | num_training_steps_per_epoch / gradient_accumulation_steps | 115 | num_training_steps_per_epoch / gradient_accumulation_steps |
| 116 | ) * gradient_accumulation_steps | 116 | ) # * gradient_accumulation_steps |
| 117 | num_training_steps = train_epochs * num_training_steps_per_epoch | 117 | num_training_steps = train_epochs * num_training_steps_per_epoch |
| 118 | num_warmup_steps = warmup_epochs * num_training_steps_per_epoch | 118 | num_warmup_steps = warmup_epochs * num_training_steps_per_epoch |
| 119 | 119 | ||
