diff options
author | Volpeon <git@volpeon.ink> | 2023-04-10 10:34:12 +0200 |
---|---|---|
committer | Volpeon <git@volpeon.ink> | 2023-04-10 10:34:12 +0200 |
commit | eb6a92abda5893c975437026cdaf0ce0bfefe2a4 (patch) | |
tree | a1525010b48362986e0cc2b7c3f7505a35dea71a /training/strategy | |
parent | Update (diff) | |
download | textual-inversion-diff-eb6a92abda5893c975437026cdaf0ce0bfefe2a4.tar.gz textual-inversion-diff-eb6a92abda5893c975437026cdaf0ce0bfefe2a4.tar.bz2 textual-inversion-diff-eb6a92abda5893c975437026cdaf0ce0bfefe2a4.zip |
Update
Diffstat (limited to 'training/strategy')
-rw-r--r-- | training/strategy/dreambooth.py | 2 | ||||
-rw-r--r-- | training/strategy/lora.py | 2 | ||||
-rw-r--r-- | training/strategy/ti.py | 2 |
3 files changed, 3 insertions, 3 deletions
diff --git a/training/strategy/dreambooth.py b/training/strategy/dreambooth.py index 695174a..42624cd 100644 --- a/training/strategy/dreambooth.py +++ b/training/strategy/dreambooth.py | |||
@@ -198,7 +198,7 @@ def dreambooth_prepare( | |||
198 | 198 | ||
199 | text_encoder.text_model.embeddings.requires_grad_(False) | 199 | text_encoder.text_model.embeddings.requires_grad_(False) |
200 | 200 | ||
201 | return text_encoder, unet, optimizer, train_dataloader, val_dataloader, lr_scheduler, {} | 201 | return text_encoder, unet, optimizer, train_dataloader, val_dataloader, lr_scheduler |
202 | 202 | ||
203 | 203 | ||
204 | dreambooth_strategy = TrainingStrategy( | 204 | dreambooth_strategy = TrainingStrategy( |
diff --git a/training/strategy/lora.py b/training/strategy/lora.py index ae85401..73ec8f2 100644 --- a/training/strategy/lora.py +++ b/training/strategy/lora.py | |||
@@ -184,7 +184,7 @@ def lora_prepare( | |||
184 | 184 | ||
185 | text_encoder.text_model.embeddings.token_override_embedding.params.requires_grad_(True) | 185 | text_encoder.text_model.embeddings.token_override_embedding.params.requires_grad_(True) |
186 | 186 | ||
187 | return text_encoder, unet, optimizer, train_dataloader, val_dataloader, lr_scheduler, {} | 187 | return text_encoder, unet, optimizer, train_dataloader, val_dataloader, lr_scheduler |
188 | 188 | ||
189 | 189 | ||
190 | lora_strategy = TrainingStrategy( | 190 | lora_strategy = TrainingStrategy( |
diff --git a/training/strategy/ti.py b/training/strategy/ti.py index 9cdc1bb..363c3f9 100644 --- a/training/strategy/ti.py +++ b/training/strategy/ti.py | |||
@@ -207,7 +207,7 @@ def textual_inversion_prepare( | |||
207 | text_encoder.text_model.embeddings.position_embedding.requires_grad_(False) | 207 | text_encoder.text_model.embeddings.position_embedding.requires_grad_(False) |
208 | text_encoder.text_model.embeddings.token_embedding.requires_grad_(False) | 208 | text_encoder.text_model.embeddings.token_embedding.requires_grad_(False) |
209 | 209 | ||
210 | return text_encoder, unet, optimizer, train_dataloader, val_dataloader, lr_scheduler, {} | 210 | return text_encoder, unet, optimizer, train_dataloader, val_dataloader, lr_scheduler |
211 | 211 | ||
212 | 212 | ||
213 | textual_inversion_strategy = TrainingStrategy( | 213 | textual_inversion_strategy = TrainingStrategy( |