diff options
author | Volpeon <git@volpeon.ink> | 2023-01-14 21:53:07 +0100 |
---|---|---|
committer | Volpeon <git@volpeon.ink> | 2023-01-14 21:53:07 +0100 |
commit | 83808fe00ac891ad2f625388d144c318b2cb5bfe (patch) | |
tree | b7ca19d27f90be6f02b14f4a39c62fc7250041a2 /training/common.py | |
parent | TI: Prepare UNet with Accelerate as well (diff) | |
download | textual-inversion-diff-83808fe00ac891ad2f625388d144c318b2cb5bfe.tar.gz textual-inversion-diff-83808fe00ac891ad2f625388d144c318b2cb5bfe.tar.bz2 textual-inversion-diff-83808fe00ac891ad2f625388d144c318b2cb5bfe.zip |
WIP: Modularization ("free(): invalid pointer" my ass)
Diffstat (limited to 'training/common.py')
-rw-r--r-- | training/common.py | 370 |
1 files changed, 0 insertions, 370 deletions
diff --git a/training/common.py b/training/common.py deleted file mode 100644 index 5d1e3f9..0000000 --- a/training/common.py +++ /dev/null | |||
@@ -1,370 +0,0 @@ | |||
1 | import math | ||
2 | from contextlib import _GeneratorContextManager, nullcontext | ||
3 | from typing import Callable, Any, Tuple, Union | ||
4 | |||
5 | import torch | ||
6 | import torch.nn.functional as F | ||
7 | from torch.utils.data import DataLoader | ||
8 | |||
9 | from accelerate import Accelerator | ||
10 | from transformers import CLIPTextModel | ||
11 | from diffusers import AutoencoderKL, DDPMScheduler, UNet2DConditionModel, DPMSolverMultistepScheduler | ||
12 | |||
13 | from tqdm.auto import tqdm | ||
14 | |||
15 | from pipelines.stable_diffusion.vlpn_stable_diffusion import VlpnStableDiffusion | ||
16 | from models.clip.embeddings import ManagedCLIPTextEmbeddings, patch_managed_embeddings | ||
17 | from models.clip.util import get_extended_embeddings | ||
18 | from models.clip.tokenizer import MultiCLIPTokenizer | ||
19 | from training.util import AverageMeter, CheckpointerBase | ||
20 | |||
21 | |||
22 | def noop(*args, **kwards): | ||
23 | pass | ||
24 | |||
25 | |||
26 | def noop_ctx(*args, **kwards): | ||
27 | return nullcontext() | ||
28 | |||
29 | |||
30 | def noop_on_log(): | ||
31 | return {} | ||
32 | |||
33 | |||
34 | def generate_class_images( | ||
35 | accelerator, | ||
36 | text_encoder, | ||
37 | vae, | ||
38 | unet, | ||
39 | tokenizer, | ||
40 | scheduler, | ||
41 | data_train, | ||
42 | sample_batch_size, | ||
43 | sample_image_size, | ||
44 | sample_steps | ||
45 | ): | ||
46 | missing_data = [item for item in data_train if not item.class_image_path.exists()] | ||
47 | |||
48 | if len(missing_data) == 0: | ||
49 | return | ||
50 | |||
51 | batched_data = [ | ||
52 | missing_data[i:i+sample_batch_size] | ||
53 | for i in range(0, len(missing_data), sample_batch_size) | ||
54 | ] | ||
55 | |||
56 | pipeline = VlpnStableDiffusion( | ||
57 | text_encoder=text_encoder, | ||
58 | vae=vae, | ||
59 | unet=unet, | ||
60 | tokenizer=tokenizer, | ||
61 | scheduler=scheduler, | ||
62 | ).to(accelerator.device) | ||
63 | pipeline.set_progress_bar_config(dynamic_ncols=True) | ||
64 | |||
65 | with torch.inference_mode(): | ||
66 | for batch in batched_data: | ||
67 | image_name = [item.class_image_path for item in batch] | ||
68 | prompt = [item.cprompt for item in batch] | ||
69 | nprompt = [item.nprompt for item in batch] | ||
70 | |||
71 | images = pipeline( | ||
72 | prompt=prompt, | ||
73 | negative_prompt=nprompt, | ||
74 | height=sample_image_size, | ||
75 | width=sample_image_size, | ||
76 | num_inference_steps=sample_steps | ||
77 | ).images | ||
78 | |||
79 | for i, image in enumerate(images): | ||
80 | image.save(image_name[i]) | ||
81 | |||
82 | del pipeline | ||
83 | |||
84 | if torch.cuda.is_available(): | ||
85 | torch.cuda.empty_cache() | ||
86 | |||
87 | |||
88 | def get_models(pretrained_model_name_or_path: str): | ||
89 | tokenizer = MultiCLIPTokenizer.from_pretrained(pretrained_model_name_or_path, subfolder='tokenizer') | ||
90 | text_encoder = CLIPTextModel.from_pretrained(pretrained_model_name_or_path, subfolder='text_encoder') | ||
91 | vae = AutoencoderKL.from_pretrained(pretrained_model_name_or_path, subfolder='vae') | ||
92 | unet = UNet2DConditionModel.from_pretrained(pretrained_model_name_or_path, subfolder='unet') | ||
93 | noise_scheduler = DDPMScheduler.from_pretrained(pretrained_model_name_or_path, subfolder='scheduler') | ||
94 | sample_scheduler = DPMSolverMultistepScheduler.from_pretrained( | ||
95 | pretrained_model_name_or_path, subfolder='scheduler') | ||
96 | |||
97 | vae.enable_slicing() | ||
98 | vae.set_use_memory_efficient_attention_xformers(True) | ||
99 | unet.set_use_memory_efficient_attention_xformers(True) | ||
100 | |||
101 | embeddings = patch_managed_embeddings(text_encoder) | ||
102 | |||
103 | return tokenizer, text_encoder, vae, unet, noise_scheduler, sample_scheduler, embeddings | ||
104 | |||
105 | |||
106 | def add_placeholder_tokens( | ||
107 | tokenizer: MultiCLIPTokenizer, | ||
108 | embeddings: ManagedCLIPTextEmbeddings, | ||
109 | placeholder_tokens: list[str], | ||
110 | initializer_tokens: list[str], | ||
111 | num_vectors: Union[list[int], int] | ||
112 | ): | ||
113 | initializer_token_ids = [ | ||
114 | tokenizer.encode(token, add_special_tokens=False) | ||
115 | for token in initializer_tokens | ||
116 | ] | ||
117 | placeholder_token_ids = tokenizer.add_multi_tokens(placeholder_tokens, num_vectors) | ||
118 | |||
119 | embeddings.resize(len(tokenizer)) | ||
120 | |||
121 | for (placeholder_token_id, initializer_token_id) in zip(placeholder_token_ids, initializer_token_ids): | ||
122 | embeddings.add_embed(placeholder_token_id, initializer_token_id) | ||
123 | |||
124 | return placeholder_token_ids, initializer_token_ids | ||
125 | |||
126 | |||
127 | def loss_step( | ||
128 | vae: AutoencoderKL, | ||
129 | noise_scheduler: DDPMScheduler, | ||
130 | unet: UNet2DConditionModel, | ||
131 | text_encoder: CLIPTextModel, | ||
132 | prior_loss_weight: float, | ||
133 | seed: int, | ||
134 | step: int, | ||
135 | batch: dict[str, Any], | ||
136 | eval: bool = False | ||
137 | ): | ||
138 | # Convert images to latent space | ||
139 | latents = vae.encode(batch["pixel_values"]).latent_dist.sample().detach() | ||
140 | latents = latents * 0.18215 | ||
141 | |||
142 | generator = torch.Generator(device=latents.device).manual_seed(seed + step) if eval else None | ||
143 | |||
144 | # Sample noise that we'll add to the latents | ||
145 | noise = torch.randn( | ||
146 | latents.shape, | ||
147 | dtype=latents.dtype, | ||
148 | layout=latents.layout, | ||
149 | device=latents.device, | ||
150 | generator=generator | ||
151 | ) | ||
152 | bsz = latents.shape[0] | ||
153 | # Sample a random timestep for each image | ||
154 | timesteps = torch.randint( | ||
155 | 0, | ||
156 | noise_scheduler.config.num_train_timesteps, | ||
157 | (bsz,), | ||
158 | generator=generator, | ||
159 | device=latents.device, | ||
160 | ) | ||
161 | timesteps = timesteps.long() | ||
162 | |||
163 | # Add noise to the latents according to the noise magnitude at each timestep | ||
164 | # (this is the forward diffusion process) | ||
165 | noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps) | ||
166 | noisy_latents = noisy_latents.to(dtype=unet.dtype) | ||
167 | |||
168 | # Get the text embedding for conditioning | ||
169 | encoder_hidden_states = get_extended_embeddings( | ||
170 | text_encoder, | ||
171 | batch["input_ids"], | ||
172 | batch["attention_mask"] | ||
173 | ) | ||
174 | encoder_hidden_states = encoder_hidden_states.to(dtype=unet.dtype) | ||
175 | |||
176 | # Predict the noise residual | ||
177 | model_pred = unet(noisy_latents, timesteps, encoder_hidden_states).sample | ||
178 | |||
179 | # Get the target for loss depending on the prediction type | ||
180 | if noise_scheduler.config.prediction_type == "epsilon": | ||
181 | target = noise | ||
182 | elif noise_scheduler.config.prediction_type == "v_prediction": | ||
183 | target = noise_scheduler.get_velocity(latents, noise, timesteps) | ||
184 | else: | ||
185 | raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}") | ||
186 | |||
187 | if batch["with_prior"].all(): | ||
188 | # Chunk the noise and model_pred into two parts and compute the loss on each part separately. | ||
189 | model_pred, model_pred_prior = torch.chunk(model_pred, 2, dim=0) | ||
190 | target, target_prior = torch.chunk(target, 2, dim=0) | ||
191 | |||
192 | # Compute instance loss | ||
193 | loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean") | ||
194 | |||
195 | # Compute prior loss | ||
196 | prior_loss = F.mse_loss(model_pred_prior.float(), target_prior.float(), reduction="mean") | ||
197 | |||
198 | # Add the prior loss to the instance loss. | ||
199 | loss = loss + prior_loss_weight * prior_loss | ||
200 | else: | ||
201 | loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean") | ||
202 | |||
203 | acc = (model_pred == target).float().mean() | ||
204 | |||
205 | return loss, acc, bsz | ||
206 | |||
207 | |||
208 | def train_loop( | ||
209 | accelerator: Accelerator, | ||
210 | optimizer: torch.optim.Optimizer, | ||
211 | lr_scheduler: torch.optim.lr_scheduler._LRScheduler, | ||
212 | model: torch.nn.Module, | ||
213 | checkpointer: CheckpointerBase, | ||
214 | train_dataloader: DataLoader, | ||
215 | val_dataloader: DataLoader, | ||
216 | loss_step: Union[Callable[[int, Any], Tuple[Any, Any, int]], Callable[[int, Any, bool], Tuple[Any, Any, int]]], | ||
217 | sample_frequency: int = 10, | ||
218 | checkpoint_frequency: int = 50, | ||
219 | global_step_offset: int = 0, | ||
220 | num_epochs: int = 100, | ||
221 | on_log: Callable[[], dict[str, Any]] = noop_on_log, | ||
222 | on_train: Callable[[int], _GeneratorContextManager] = noop_ctx, | ||
223 | on_before_optimize: Callable[[int], None] = noop, | ||
224 | on_after_optimize: Callable[[float], None] = noop, | ||
225 | on_eval: Callable[[], _GeneratorContextManager] = noop_ctx | ||
226 | ): | ||
227 | num_training_steps_per_epoch = math.ceil(len(train_dataloader) / accelerator.gradient_accumulation_steps) | ||
228 | num_val_steps_per_epoch = len(val_dataloader) | ||
229 | |||
230 | num_training_steps = num_training_steps_per_epoch * num_epochs | ||
231 | num_val_steps = num_val_steps_per_epoch * num_epochs | ||
232 | |||
233 | global_step = 0 | ||
234 | |||
235 | avg_loss = AverageMeter() | ||
236 | avg_acc = AverageMeter() | ||
237 | |||
238 | avg_loss_val = AverageMeter() | ||
239 | avg_acc_val = AverageMeter() | ||
240 | |||
241 | max_acc_val = 0.0 | ||
242 | |||
243 | local_progress_bar = tqdm( | ||
244 | range(num_training_steps_per_epoch + num_val_steps_per_epoch), | ||
245 | disable=not accelerator.is_local_main_process, | ||
246 | dynamic_ncols=True | ||
247 | ) | ||
248 | local_progress_bar.set_description(f"Epoch 1 / {num_epochs}") | ||
249 | |||
250 | global_progress_bar = tqdm( | ||
251 | range(num_training_steps + num_val_steps), | ||
252 | disable=not accelerator.is_local_main_process, | ||
253 | dynamic_ncols=True | ||
254 | ) | ||
255 | global_progress_bar.set_description("Total progress") | ||
256 | |||
257 | try: | ||
258 | for epoch in range(num_epochs): | ||
259 | if accelerator.is_main_process: | ||
260 | if epoch % sample_frequency == 0: | ||
261 | checkpointer.save_samples(global_step + global_step_offset) | ||
262 | |||
263 | if epoch % checkpoint_frequency == 0 and epoch != 0: | ||
264 | checkpointer.checkpoint(global_step + global_step_offset, "training") | ||
265 | |||
266 | local_progress_bar.set_description(f"Epoch {epoch + 1} / {num_epochs}") | ||
267 | local_progress_bar.reset() | ||
268 | |||
269 | model.train() | ||
270 | |||
271 | with on_train(epoch): | ||
272 | for step, batch in enumerate(train_dataloader): | ||
273 | with accelerator.accumulate(model): | ||
274 | loss, acc, bsz = loss_step(step, batch) | ||
275 | |||
276 | accelerator.backward(loss) | ||
277 | |||
278 | on_before_optimize(epoch) | ||
279 | |||
280 | optimizer.step() | ||
281 | lr_scheduler.step() | ||
282 | optimizer.zero_grad(set_to_none=True) | ||
283 | |||
284 | avg_loss.update(loss.detach_(), bsz) | ||
285 | avg_acc.update(acc.detach_(), bsz) | ||
286 | |||
287 | # Checks if the accelerator has performed an optimization step behind the scenes | ||
288 | if accelerator.sync_gradients: | ||
289 | on_after_optimize(lr_scheduler.get_last_lr()[0]) | ||
290 | |||
291 | local_progress_bar.update(1) | ||
292 | global_progress_bar.update(1) | ||
293 | |||
294 | global_step += 1 | ||
295 | |||
296 | logs = { | ||
297 | "train/loss": avg_loss.avg.item(), | ||
298 | "train/acc": avg_acc.avg.item(), | ||
299 | "train/cur_loss": loss.item(), | ||
300 | "train/cur_acc": acc.item(), | ||
301 | "lr": lr_scheduler.get_last_lr()[0], | ||
302 | } | ||
303 | logs.update(on_log()) | ||
304 | |||
305 | accelerator.log(logs, step=global_step) | ||
306 | |||
307 | local_progress_bar.set_postfix(**logs) | ||
308 | |||
309 | if global_step >= num_training_steps: | ||
310 | break | ||
311 | |||
312 | accelerator.wait_for_everyone() | ||
313 | |||
314 | model.eval() | ||
315 | |||
316 | cur_loss_val = AverageMeter() | ||
317 | cur_acc_val = AverageMeter() | ||
318 | |||
319 | with torch.inference_mode(), on_eval(): | ||
320 | for step, batch in enumerate(val_dataloader): | ||
321 | loss, acc, bsz = loss_step(step, batch, True) | ||
322 | |||
323 | loss = loss.detach_() | ||
324 | acc = acc.detach_() | ||
325 | |||
326 | cur_loss_val.update(loss, bsz) | ||
327 | cur_acc_val.update(acc, bsz) | ||
328 | |||
329 | avg_loss_val.update(loss, bsz) | ||
330 | avg_acc_val.update(acc, bsz) | ||
331 | |||
332 | local_progress_bar.update(1) | ||
333 | global_progress_bar.update(1) | ||
334 | |||
335 | logs = { | ||
336 | "val/loss": avg_loss_val.avg.item(), | ||
337 | "val/acc": avg_acc_val.avg.item(), | ||
338 | "val/cur_loss": loss.item(), | ||
339 | "val/cur_acc": acc.item(), | ||
340 | } | ||
341 | local_progress_bar.set_postfix(**logs) | ||
342 | |||
343 | logs["val/cur_loss"] = cur_loss_val.avg.item() | ||
344 | logs["val/cur_acc"] = cur_acc_val.avg.item() | ||
345 | |||
346 | accelerator.log(logs, step=global_step) | ||
347 | |||
348 | local_progress_bar.clear() | ||
349 | global_progress_bar.clear() | ||
350 | |||
351 | if accelerator.is_main_process: | ||
352 | if avg_acc_val.avg.item() > max_acc_val: | ||
353 | accelerator.print( | ||
354 | f"Global step {global_step}: Validation accuracy reached new maximum: {max_acc_val:.2e} -> {avg_acc_val.avg.item():.2e}") | ||
355 | checkpointer.checkpoint(global_step + global_step_offset, "milestone") | ||
356 | max_acc_val = avg_acc_val.avg.item() | ||
357 | |||
358 | # Create the pipeline using using the trained modules and save it. | ||
359 | if accelerator.is_main_process: | ||
360 | print("Finished!") | ||
361 | checkpointer.checkpoint(global_step + global_step_offset, "end") | ||
362 | checkpointer.save_samples(global_step + global_step_offset) | ||
363 | accelerator.end_training() | ||
364 | |||
365 | except KeyboardInterrupt: | ||
366 | if accelerator.is_main_process: | ||
367 | print("Interrupted") | ||
368 | checkpointer.checkpoint(global_step + global_step_offset, "end") | ||
369 | accelerator.end_training() | ||
370 | quit() | ||