summaryrefslogtreecommitdiffstats
path: root/common.py
blob: 7ffa77f8902eebe16cd7f8fe34d07035463eaa16 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
from pathlib import Path
import torch

from transformers import CLIPTextModel, CLIPTokenizer


def load_text_embedding(embeddings, token_id, file):
    data = torch.load(file, map_location="cpu")

    assert len(data.keys()) == 1, 'embedding data has multiple terms in it'

    emb = next(iter(data.values()))
    if len(emb.shape) == 1:
        emb = emb.unsqueeze(0)

    embeddings[token_id] = emb


def load_text_embeddings(tokenizer: CLIPTokenizer, text_encoder: CLIPTextModel, embeddings_dir: Path):
    if not embeddings_dir.exists() or not embeddings_dir.is_dir():
        return []

    files = [file for file in embeddings_dir.iterdir() if file.is_file()]

    tokens = [file.stem for file in files]
    added = tokenizer.add_tokens(tokens)
    token_ids = tokenizer.convert_tokens_to_ids(tokens)

    text_encoder.resize_token_embeddings(len(tokenizer))

    token_embeds = text_encoder.get_input_embeddings().weight.data

    for (token_id, file) in zip(token_ids, files):
        load_text_embedding(token_embeds, token_id, file)

    return tokens