From 96638bbd54ca7f91d44c938fae7275d3ecaa6add Mon Sep 17 00:00:00 2001 From: Volpeon Date: Tue, 21 Feb 2023 14:08:49 +0100 Subject: Fixed TI normalization order --- training/functional.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'training/functional.py') diff --git a/training/functional.py b/training/functional.py index e7c4320..b830261 100644 --- a/training/functional.py +++ b/training/functional.py @@ -38,8 +38,8 @@ class TrainingCallbacks(): on_accum_model: Callable[[], torch.nn.Module] = const(None) on_log: Callable[[], dict[str, Any]] = const({}) on_train: Callable[[int], _GeneratorContextManager] = const(nullcontext()) - on_before_optimize: Callable[[float, int], None] = const() - on_after_optimize: Callable[[float], None] = const() + on_before_optimize: Callable[[float, int], Any] = const() + on_after_optimize: Callable[[Any, float], None] = const() on_after_epoch: Callable[[float], None] = const() on_eval: Callable[[], _GeneratorContextManager] = const(nullcontext()) on_sample: Callable[[int], None] = const() @@ -455,13 +455,13 @@ def train_loop( local_progress_bar.set_postfix(**logs) if ((step + 1) % gradient_accumulation_steps == 0) or ((step + 1) == len(train_dataloader)): - on_before_optimize(lr_scheduler.get_last_lr()[0], epoch) + before_optimize_result = on_before_optimize(lr_scheduler.get_last_lr()[0], epoch) optimizer.step() lr_scheduler.step() optimizer.zero_grad(set_to_none=True) - on_after_optimize(lr_scheduler.get_last_lr()[0]) + on_after_optimize(before_optimize_result, lr_scheduler.get_last_lr()[0]) local_progress_bar.update(1) global_progress_bar.update(1) -- cgit v1.2.3-54-g00ecf