blob: 1e7f4b9685fa15cfa20f24eb4be572d82f63ada0 (
plain) (
blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
|
from pathlib import Path
import json
from models.clip.embeddings import ManagedCLIPTextEmbeddings
from models.clip.tokenizer import MultiCLIPTokenizer
from safetensors import safe_open
def load_config(filename):
with open(filename, 'rt') as f:
config = json.load(f)
args = config["args"]
if "base" in config:
args = load_config(Path(filename).parent.joinpath(config["base"])) | args
return args
def load_embeddings_from_dir(tokenizer: MultiCLIPTokenizer, embeddings: ManagedCLIPTextEmbeddings, embeddings_dir: Path):
if not embeddings_dir.exists() or not embeddings_dir.is_dir():
return []
filenames = [filename for filename in embeddings_dir.iterdir() if filename.is_file()]
tokens = [filename.stem for filename in filenames]
for filename in embeddings_dir.iterdir():
if filename.is_file():
with safe_open(filename, framework="pt", device="cpu") as file:
embed = file.get_tensor("embed")
added = tokenizer.add_multi_tokens(filename.stem, embed.shape[0])
embeddings.add_embed(added.ids, embed)
return tokens
|