From 1aace3e44dae0489130039714f67d980628c92ec Mon Sep 17 00:00:00 2001 From: Volpeon Date: Tue, 16 May 2023 12:59:08 +0200 Subject: Avoid model recompilation due to varying prompt lengths --- training/functional.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'training/functional.py') diff --git a/training/functional.py b/training/functional.py index fd3f9f4..10560e5 100644 --- a/training/functional.py +++ b/training/functional.py @@ -710,8 +710,8 @@ def train( vae = torch.compile(vae, backend='hidet') if compile_unet: - unet = torch.compile(unet, backend='hidet') - # unet = torch.compile(unet, mode="reduce-overhead") + # unet = torch.compile(unet, backend='hidet') + unet = torch.compile(unet, mode="reduce-overhead") callbacks = strategy.callbacks( accelerator=accelerator, -- cgit v1.2.3-54-g00ecf