diff options
author | Volpeon <git@volpeon.ink> | 2023-02-21 11:50:11 +0100 |
---|---|---|
committer | Volpeon <git@volpeon.ink> | 2023-02-21 11:50:11 +0100 |
commit | 9d6252e63bac241e5c6191eb47adb51b84a5d782 (patch) | |
tree | 6cb649510b48ca33419af3721e630f1c06bf1ae2 /training/strategy | |
parent | Embedding normalization: Ignore tensors with grad = 0 (diff) | |
download | textual-inversion-diff-9d6252e63bac241e5c6191eb47adb51b84a5d782.tar.gz textual-inversion-diff-9d6252e63bac241e5c6191eb47adb51b84a5d782.tar.bz2 textual-inversion-diff-9d6252e63bac241e5c6191eb47adb51b84a5d782.zip |
Don't rely on Accelerate for gradient accumulation
Diffstat (limited to 'training/strategy')
-rw-r--r-- | training/strategy/dreambooth.py | 6 |
1 files changed, 0 insertions, 6 deletions
diff --git a/training/strategy/dreambooth.py b/training/strategy/dreambooth.py index d697554..fcf5c0d 100644 --- a/training/strategy/dreambooth.py +++ b/training/strategy/dreambooth.py | |||
@@ -41,12 +41,6 @@ def dreambooth_strategy_callbacks( | |||
41 | sample_guidance_scale: float = 7.5, | 41 | sample_guidance_scale: float = 7.5, |
42 | sample_image_size: Optional[int] = None, | 42 | sample_image_size: Optional[int] = None, |
43 | ): | 43 | ): |
44 | if accelerator.gradient_accumulation_steps > 1 and accelerator.num_processes > 1: | ||
45 | raise ValueError( | ||
46 | "Gradient accumulation is not supported when training the text encoder in distributed training. " | ||
47 | "Please set gradient_accumulation_steps to 1. This feature will be supported in the future." | ||
48 | ) | ||
49 | |||
50 | sample_output_dir.mkdir(parents=True, exist_ok=True) | 44 | sample_output_dir.mkdir(parents=True, exist_ok=True) |
51 | checkpoint_output_dir.mkdir(parents=True, exist_ok=True) | 45 | checkpoint_output_dir.mkdir(parents=True, exist_ok=True) |
52 | 46 | ||