from pathlib import Path import torch from transformers import CLIPTextModel, CLIPTokenizer def load_text_embedding(embeddings, token_id, file): data = torch.load(file, map_location="cpu") assert len(data.keys()) == 1, 'embedding data has multiple terms in it' emb = next(iter(data.values())) if len(emb.shape) == 1: emb = emb.unsqueeze(0) embeddings[token_id] = emb def load_text_embeddings(tokenizer: CLIPTokenizer, text_encoder: CLIPTextModel, embeddings_dir: Path): if not embeddings_dir.exists() or not embeddings_dir.is_dir(): return [] files = [file for file in embeddings_dir.iterdir() if file.is_file()] tokens = [file.stem for file in files] added = tokenizer.add_tokens(tokens) token_ids = tokenizer.convert_tokens_to_ids(tokens) text_encoder.resize_token_embeddings(len(tokenizer)) token_embeds = text_encoder.get_input_embeddings().weight.data for (token_id, file) in zip(token_ids, files): load_text_embedding(token_embeds, token_id, file) return tokens