From 16b92605a59d59c65789c89b54bb97da51908056 Mon Sep 17 00:00:00 2001 From: Volpeon Date: Tue, 21 Feb 2023 09:09:50 +0100 Subject: Embedding normalization: Ignore tensors with grad = 0 --- training/functional.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'training/functional.py') diff --git a/training/functional.py b/training/functional.py index 85dd884..739d055 100644 --- a/training/functional.py +++ b/training/functional.py @@ -362,6 +362,7 @@ def train_loop( loss_step: LossCallable, sample_frequency: int = 10, checkpoint_frequency: int = 50, + milestone_checkpoints: bool = True, global_step_offset: int = 0, num_epochs: int = 100, callbacks: TrainingCallbacks = TrainingCallbacks(), @@ -514,7 +515,7 @@ def train_loop( accelerator.log(logs, step=global_step) if accelerator.is_main_process: - if avg_acc_val.avg.item() > best_acc_val: + if avg_acc_val.avg.item() > best_acc_val and milestone_checkpoints: local_progress_bar.clear() global_progress_bar.clear() @@ -527,7 +528,7 @@ def train_loop( accs.append(avg_acc_val.avg.item()) else: if accelerator.is_main_process: - if avg_acc.avg.item() > best_acc: + if avg_acc.avg.item() > best_acc and milestone_checkpoints: local_progress_bar.clear() global_progress_bar.clear() @@ -572,6 +573,7 @@ def train( num_train_epochs: int = 100, sample_frequency: int = 20, checkpoint_frequency: int = 50, + milestone_checkpoints: bool = True, global_step_offset: int = 0, with_prior_preservation: bool = False, prior_loss_weight: float = 1.0, @@ -626,6 +628,7 @@ def train( loss_step=loss_step_, sample_frequency=sample_frequency, checkpoint_frequency=checkpoint_frequency, + milestone_checkpoints=milestone_checkpoints, global_step_offset=global_step_offset, num_epochs=num_train_epochs, callbacks=callbacks, -- cgit v1.2.3-54-g00ecf