From 6b58e9de249e872bd2d83e5916e6c633f52cfbb8 Mon Sep 17 00:00:00 2001 From: Volpeon Date: Sat, 31 Dec 2022 12:58:54 +0100 Subject: Added multi-vector embeddings --- common.py | 38 +++++++++++++------------------------- 1 file changed, 13 insertions(+), 25 deletions(-) (limited to 'common.py') diff --git a/common.py b/common.py index f369475..e8d3ac1 100644 --- a/common.py +++ b/common.py @@ -1,9 +1,10 @@ from pathlib import Path import json -import torch +from models.clip.embeddings import ManagedCLIPTextEmbeddings +from models.clip.tokenizer import MultiCLIPTokenizer -from transformers import CLIPTextModel, CLIPTokenizer +from safetensors import safe_open def load_config(filename): @@ -18,33 +19,20 @@ def load_config(filename): return args -def load_text_embedding(embeddings, token_id, file): - data = torch.load(file, map_location="cpu") - - assert len(data.keys()) == 1, 'embedding data has multiple terms in it' - - emb = next(iter(data.values())) - if len(emb.shape) == 1: - emb = emb.unsqueeze(0) - - embeddings[token_id] = emb - - -def load_text_embeddings(tokenizer: CLIPTokenizer, text_encoder: CLIPTextModel, embeddings_dir: Path): +def load_embeddings_from_dir(tokenizer: MultiCLIPTokenizer, embeddings: ManagedCLIPTextEmbeddings, embeddings_dir: Path): if not embeddings_dir.exists() or not embeddings_dir.is_dir(): return [] - files = [file for file in embeddings_dir.iterdir() if file.is_file()] - - tokens = [file.stem for file in files] - added = tokenizer.add_tokens(tokens) - token_ids = tokenizer.convert_tokens_to_ids(tokens) - - text_encoder.resize_token_embeddings(len(tokenizer)) + filenames = [filename for filename in embeddings_dir.iterdir() if filename.is_file()] + tokens = [filename.stem for filename in filenames] - token_embeds = text_encoder.get_input_embeddings().weight.data + for filename in embeddings_dir.iterdir(): + if filename.is_file(): + with safe_open(filename, framework="pt", device="cpu") as file: + embed = file.get_tensor("embed") - for (token_id, file) in zip(token_ids, files): - load_text_embedding(token_embeds, token_id, file) + added = tokenizer.add_multi_tokens(filename.stem, embed.shape[0]) + embeddings.add_embed(added.placeholder_id) + embeddings.add_embed(added.multi_ids, embed) return tokens -- cgit v1.2.3-54-g00ecf