diff options
Diffstat (limited to 'training/strategy')
-rw-r--r-- | training/strategy/dreambooth.py | 7 | ||||
-rw-r--r-- | training/strategy/lora.py | 6 | ||||
-rw-r--r-- | training/strategy/ti.py | 3 |
3 files changed, 5 insertions, 11 deletions
diff --git a/training/strategy/dreambooth.py b/training/strategy/dreambooth.py index fa51bc7..4ae28b7 100644 --- a/training/strategy/dreambooth.py +++ b/training/strategy/dreambooth.py | |||
@@ -142,9 +142,7 @@ def dreambooth_strategy_callbacks( | |||
142 | ) | 142 | ) |
143 | pipeline.save_pretrained(checkpoint_output_dir) | 143 | pipeline.save_pretrained(checkpoint_output_dir) |
144 | 144 | ||
145 | del unet_ | 145 | del unet_, text_encoder_, pipeline |
146 | del text_encoder_ | ||
147 | del pipeline | ||
148 | 146 | ||
149 | if torch.cuda.is_available(): | 147 | if torch.cuda.is_available(): |
150 | torch.cuda.empty_cache() | 148 | torch.cuda.empty_cache() |
@@ -165,8 +163,7 @@ def dreambooth_strategy_callbacks( | |||
165 | unet_.to(dtype=orig_unet_dtype) | 163 | unet_.to(dtype=orig_unet_dtype) |
166 | text_encoder_.to(dtype=orig_text_encoder_dtype) | 164 | text_encoder_.to(dtype=orig_text_encoder_dtype) |
167 | 165 | ||
168 | del unet_ | 166 | del unet_, text_encoder_ |
169 | del text_encoder_ | ||
170 | 167 | ||
171 | if torch.cuda.is_available(): | 168 | if torch.cuda.is_available(): |
172 | torch.cuda.empty_cache() | 169 | torch.cuda.empty_cache() |
diff --git a/training/strategy/lora.py b/training/strategy/lora.py index 73ec8f2..1517ee8 100644 --- a/training/strategy/lora.py +++ b/training/strategy/lora.py | |||
@@ -140,8 +140,7 @@ def lora_strategy_callbacks( | |||
140 | with open(checkpoint_output_dir / "lora_config.json", "w") as f: | 140 | with open(checkpoint_output_dir / "lora_config.json", "w") as f: |
141 | json.dump(lora_config, f) | 141 | json.dump(lora_config, f) |
142 | 142 | ||
143 | del unet_ | 143 | del unet_, text_encoder_ |
144 | del text_encoder_ | ||
145 | 144 | ||
146 | if torch.cuda.is_available(): | 145 | if torch.cuda.is_available(): |
147 | torch.cuda.empty_cache() | 146 | torch.cuda.empty_cache() |
@@ -153,8 +152,7 @@ def lora_strategy_callbacks( | |||
153 | 152 | ||
154 | save_samples_(step=step, unet=unet_, text_encoder=text_encoder_) | 153 | save_samples_(step=step, unet=unet_, text_encoder=text_encoder_) |
155 | 154 | ||
156 | del unet_ | 155 | del unet_, text_encoder_ |
157 | del text_encoder_ | ||
158 | 156 | ||
159 | if torch.cuda.is_available(): | 157 | if torch.cuda.is_available(): |
160 | torch.cuda.empty_cache() | 158 | torch.cuda.empty_cache() |
diff --git a/training/strategy/ti.py b/training/strategy/ti.py index 08af89d..ca7cc3d 100644 --- a/training/strategy/ti.py +++ b/training/strategy/ti.py | |||
@@ -158,8 +158,7 @@ def textual_inversion_strategy_callbacks( | |||
158 | unet_.to(dtype=orig_unet_dtype) | 158 | unet_.to(dtype=orig_unet_dtype) |
159 | text_encoder_.to(dtype=orig_text_encoder_dtype) | 159 | text_encoder_.to(dtype=orig_text_encoder_dtype) |
160 | 160 | ||
161 | del unet_ | 161 | del unet_, text_encoder_ |
162 | del text_encoder_ | ||
163 | 162 | ||
164 | if torch.cuda.is_available(): | 163 | if torch.cuda.is_available(): |
165 | torch.cuda.empty_cache() | 164 | torch.cuda.empty_cache() |