From 56edf85c8b80d49c998bcf26392cce50d552137a Mon Sep 17 00:00:00 2001 From: Volpeon Date: Sat, 31 Dec 2022 23:09:41 +0100 Subject: Update --- models/clip/embeddings.py | 30 ++++++++++++++++-------------- 1 file changed, 16 insertions(+), 14 deletions(-) (limited to 'models/clip/embeddings.py') diff --git a/models/clip/embeddings.py b/models/clip/embeddings.py index 91a575d..cab1515 100644 --- a/models/clip/embeddings.py +++ b/models/clip/embeddings.py @@ -12,18 +12,22 @@ from transformers.models.clip import CLIPTextConfig from transformers.models.clip.modeling_clip import CLIPTextEmbeddings -def expand_embedding(old_embedding: nn.Embedding, n: int) -> nn.Embedding: +def resize_embedding(old_embedding: nn.Embedding, new_num_embeddings: int, initializer_factor: float = 1.0) -> nn.Embedding: old_num_embeddings, old_embedding_dim = old_embedding.weight.size() + if old_num_embeddings == new_num_embeddings: + return old_embedding + + n = min(old_num_embeddings, new_num_embeddings) + new_embedding = nn.Embedding( - old_num_embeddings + n, + new_num_embeddings, old_embedding_dim, device=old_embedding.weight.device, dtype=old_embedding.weight.dtype ) - new_embedding.weight.data.zero_() - new_embedding.weight.data[:old_num_embeddings] = old_embedding.weight.data - + new_embedding.weight.data.normal_(mean=0.0, std=initializer_factor * 0.02) + new_embedding.weight.data[:n, :] = old_embedding.weight.data[:n, :] return new_embedding @@ -40,9 +44,13 @@ class ManagedCLIPTextEmbeddings(CLIPTextEmbeddings): device=self.token_embedding.weight.device, dtype=self.token_embedding.weight.dtype ) - self.temp_token_embedding.weight.data.zero_() + self.temp_token_embedding.weight.data.normal_(mean=0.0, std=config.initializer_factor * 0.02) self.temp_token_ids = torch.tensor([], dtype=torch.long) + def resize(self, size: int): + self.temp_token_embedding = resize_embedding(self.temp_token_embedding, size, self.config.initializer_factor) + self.token_embedding = resize_embedding(self.token_embedding, size, self.config.initializer_factor) + def add_embed(self, token_ids: Union[int, list[int]], initializer: Optional[Union[int, list[int], torch.FloatTensor]] = None): if isinstance(token_ids, int): token_ids = [token_ids] @@ -55,20 +63,14 @@ class ManagedCLIPTextEmbeddings(CLIPTextEmbeddings): initializer = (initializer * len(token_ids))[:len(token_ids)] with torch.no_grad(): - initializer = self.get_embed(initializer) - - self.temp_token_embedding = expand_embedding(self.temp_token_embedding, len(token_ids)) - self.token_embedding = expand_embedding(self.token_embedding, len(token_ids)) + initializer = self.get_embed(initializer).to(dtype=self.temp_token_embedding.weight.dtype) token_ids = torch.tensor(token_ids, dtype=torch.long) self.temp_token_ids = torch.cat([self.temp_token_ids, token_ids]) if initializer is not None: - self.temp_token_embedding.weight.data[token_ids] = initializer.to( - dtype=self.temp_token_embedding.weight.dtype) - else: - self.temp_token_embedding.weight.data[token_ids].zero_() + self.temp_token_embedding.weight.data[token_ids] = initializer def load_embed(self, input_ids: list[int], filename: Path): with safe_open(filename, framework="pt", device="cpu") as file: -- cgit v1.2.3-54-g00ecf