diff options
Diffstat (limited to 'models')
| -rw-r--r-- | models/attention/control.py | 104 | ||||
| -rw-r--r-- | models/attention/hook.py | 5 | ||||
| -rw-r--r-- | models/attention/structured.py | 65 | ||||
| -rw-r--r-- | models/clip/embeddings.py | 29 | ||||
| -rw-r--r-- | models/clip/tokenizer.py | 23 | ||||
| -rw-r--r-- | models/clip/util.py | 17 | ||||
| -rw-r--r-- | models/convnext/discriminator.py | 11 | ||||
| -rw-r--r-- | models/sparse.py | 12 |
8 files changed, 175 insertions, 91 deletions
diff --git a/models/attention/control.py b/models/attention/control.py index 248bd9f..ec378c4 100644 --- a/models/attention/control.py +++ b/models/attention/control.py | |||
| @@ -23,7 +23,7 @@ class AttentionControl(abc.ABC): | |||
| 23 | attn = self.forward(attn, is_cross, place_in_unet) | 23 | attn = self.forward(attn, is_cross, place_in_unet) |
| 24 | else: | 24 | else: |
| 25 | h = attn.shape[0] | 25 | h = attn.shape[0] |
| 26 | attn[h // 2:] = self.forward(attn[h // 2:], is_cross, place_in_unet) | 26 | attn[h // 2 :] = self.forward(attn[h // 2 :], is_cross, place_in_unet) |
| 27 | self.cur_att_layer += 1 | 27 | self.cur_att_layer += 1 |
| 28 | if self.cur_att_layer == self.num_att_layers + self.num_uncond_att_layers: | 28 | if self.cur_att_layer == self.num_att_layers + self.num_uncond_att_layers: |
| 29 | self.cur_att_layer = 0 | 29 | self.cur_att_layer = 0 |
| @@ -49,12 +49,18 @@ class EmptyControl(AttentionControl): | |||
| 49 | class AttentionStore(AttentionControl): | 49 | class AttentionStore(AttentionControl): |
| 50 | @staticmethod | 50 | @staticmethod |
| 51 | def get_empty_store(): | 51 | def get_empty_store(): |
| 52 | return {"down_cross": [], "mid_cross": [], "up_cross": [], | 52 | return { |
| 53 | "down_self": [], "mid_self": [], "up_self": []} | 53 | "down_cross": [], |
| 54 | "mid_cross": [], | ||
| 55 | "up_cross": [], | ||
| 56 | "down_self": [], | ||
| 57 | "mid_self": [], | ||
| 58 | "up_self": [], | ||
| 59 | } | ||
| 54 | 60 | ||
| 55 | def forward(self, attn, is_cross: bool, place_in_unet: str): | 61 | def forward(self, attn, is_cross: bool, place_in_unet: str): |
| 56 | key = f"{place_in_unet}_{'cross' if is_cross else 'self'}" | 62 | key = f"{place_in_unet}_{'cross' if is_cross else 'self'}" |
| 57 | if attn.shape[1] <= 32 ** 2: # avoid memory overhead | 63 | if attn.shape[1] <= 32**2: # avoid memory overhead |
| 58 | self.step_store[key].append(attn) | 64 | self.step_store[key].append(attn) |
| 59 | return attn | 65 | return attn |
| 60 | 66 | ||
| @@ -68,8 +74,10 @@ class AttentionStore(AttentionControl): | |||
| 68 | self.step_store = self.get_empty_store() | 74 | self.step_store = self.get_empty_store() |
| 69 | 75 | ||
| 70 | def get_average_attention(self): | 76 | def get_average_attention(self): |
| 71 | average_attention = {key: [item / self.cur_step for item in self.attention_store[key]] | 77 | average_attention = { |
| 72 | for key in self.attention_store} | 78 | key: [item / self.cur_step for item in self.attention_store[key]] |
| 79 | for key in self.attention_store | ||
| 80 | } | ||
| 73 | return average_attention | 81 | return average_attention |
| 74 | 82 | ||
| 75 | def reset(self): | 83 | def reset(self): |
| @@ -90,7 +98,7 @@ class AttentionControlEdit(AttentionStore, abc.ABC): | |||
| 90 | return x_t | 98 | return x_t |
| 91 | 99 | ||
| 92 | def replace_self_attention(self, attn_base, att_replace): | 100 | def replace_self_attention(self, attn_base, att_replace): |
| 93 | if att_replace.shape[2] <= 16 ** 2: | 101 | if att_replace.shape[2] <= 16**2: |
| 94 | return attn_base.unsqueeze(0).expand(att_replace.shape[0], *attn_base.shape) | 102 | return attn_base.unsqueeze(0).expand(att_replace.shape[0], *attn_base.shape) |
| 95 | else: | 103 | else: |
| 96 | return att_replace | 104 | return att_replace |
| @@ -101,41 +109,62 @@ class AttentionControlEdit(AttentionStore, abc.ABC): | |||
| 101 | 109 | ||
| 102 | def forward(self, attn, is_cross: bool, place_in_unet: str): | 110 | def forward(self, attn, is_cross: bool, place_in_unet: str): |
| 103 | super(AttentionControlEdit, self).forward(attn, is_cross, place_in_unet) | 111 | super(AttentionControlEdit, self).forward(attn, is_cross, place_in_unet) |
| 104 | if is_cross or (self.num_self_replace[0] <= self.cur_step < self.num_self_replace[1]): | 112 | if is_cross or ( |
| 113 | self.num_self_replace[0] <= self.cur_step < self.num_self_replace[1] | ||
| 114 | ): | ||
| 105 | h = attn.shape[0] // (self.batch_size) | 115 | h = attn.shape[0] // (self.batch_size) |
| 106 | attn = attn.reshape(self.batch_size, h, *attn.shape[1:]) | 116 | attn = attn.reshape(self.batch_size, h, *attn.shape[1:]) |
| 107 | attn_base, attn_repalce = attn[0], attn[1:] | 117 | attn_base, attn_repalce = attn[0], attn[1:] |
| 108 | if is_cross: | 118 | if is_cross: |
| 109 | alpha_words = self.cross_replace_alpha[self.cur_step] | 119 | alpha_words = self.cross_replace_alpha[self.cur_step] |
| 110 | attn_repalce_new = self.replace_cross_attention( | 120 | attn_repalce_new = ( |
| 111 | attn_base, attn_repalce) * alpha_words + (1 - alpha_words) * attn_repalce | 121 | self.replace_cross_attention(attn_base, attn_repalce) * alpha_words |
| 122 | + (1 - alpha_words) * attn_repalce | ||
| 123 | ) | ||
| 112 | attn[1:] = attn_repalce_new | 124 | attn[1:] = attn_repalce_new |
| 113 | else: | 125 | else: |
| 114 | attn[1:] = self.replace_self_attention(attn_base, attn_repalce) | 126 | attn[1:] = self.replace_self_attention(attn_base, attn_repalce) |
| 115 | attn = attn.reshape(self.batch_size * h, *attn.shape[2:]) | 127 | attn = attn.reshape(self.batch_size * h, *attn.shape[2:]) |
| 116 | return attn | 128 | return attn |
| 117 | 129 | ||
| 118 | def __init__(self, prompts, num_steps: int, | 130 | def __init__( |
| 119 | cross_replace_steps: Union[float, Tuple[float, float], Dict[str, Tuple[float, float]]], | 131 | self, |
| 120 | self_replace_steps: Union[float, Tuple[float, float]], | 132 | prompts, |
| 121 | local_blend: Optional[LocalBlend]): | 133 | num_steps: int, |
| 134 | cross_replace_steps: Union[ | ||
| 135 | float, Tuple[float, float], Dict[str, Tuple[float, float]] | ||
| 136 | ], | ||
| 137 | self_replace_steps: Union[float, Tuple[float, float]], | ||
| 138 | local_blend: Optional[LocalBlend], | ||
| 139 | ): | ||
| 122 | super(AttentionControlEdit, self).__init__() | 140 | super(AttentionControlEdit, self).__init__() |
| 123 | self.batch_size = len(prompts) | 141 | self.batch_size = len(prompts) |
| 124 | self.cross_replace_alpha = ptp_utils.get_time_words_attention_alpha( | 142 | self.cross_replace_alpha = ptp_utils.get_time_words_attention_alpha( |
| 125 | prompts, num_steps, cross_replace_steps, tokenizer).to(device) | 143 | prompts, num_steps, cross_replace_steps, tokenizer |
| 144 | ).to(device) | ||
| 126 | if type(self_replace_steps) is float: | 145 | if type(self_replace_steps) is float: |
| 127 | self_replace_steps = 0, self_replace_steps | 146 | self_replace_steps = 0, self_replace_steps |
| 128 | self.num_self_replace = int(num_steps * self_replace_steps[0]), int(num_steps * self_replace_steps[1]) | 147 | self.num_self_replace = int(num_steps * self_replace_steps[0]), int( |
| 148 | num_steps * self_replace_steps[1] | ||
| 149 | ) | ||
| 129 | self.local_blend = local_blend | 150 | self.local_blend = local_blend |
| 130 | 151 | ||
| 131 | 152 | ||
| 132 | class AttentionReplace(AttentionControlEdit): | 153 | class AttentionReplace(AttentionControlEdit): |
| 133 | def replace_cross_attention(self, attn_base, att_replace): | 154 | def replace_cross_attention(self, attn_base, att_replace): |
| 134 | return torch.einsum('hpw,bwn->bhpn', attn_base, self.mapper) | 155 | return torch.einsum("hpw,bwn->bhpn", attn_base, self.mapper) |
| 135 | 156 | ||
| 136 | def __init__(self, prompts, num_steps: int, cross_replace_steps: float, self_replace_steps: float, | 157 | def __init__( |
| 137 | local_blend: Optional[LocalBlend] = None): | 158 | self, |
| 138 | super(AttentionReplace, self).__init__(prompts, num_steps, cross_replace_steps, self_replace_steps, local_blend) | 159 | prompts, |
| 160 | num_steps: int, | ||
| 161 | cross_replace_steps: float, | ||
| 162 | self_replace_steps: float, | ||
| 163 | local_blend: Optional[LocalBlend] = None, | ||
| 164 | ): | ||
| 165 | super(AttentionReplace, self).__init__( | ||
| 166 | prompts, num_steps, cross_replace_steps, self_replace_steps, local_blend | ||
| 167 | ) | ||
| 139 | self.mapper = seq_aligner.get_replacement_mapper(prompts, tokenizer).to(device) | 168 | self.mapper = seq_aligner.get_replacement_mapper(prompts, tokenizer).to(device) |
| 140 | 169 | ||
| 141 | 170 | ||
| @@ -145,9 +174,17 @@ class AttentionRefine(AttentionControlEdit): | |||
| 145 | attn_replace = attn_base_replace * self.alphas + att_replace * (1 - self.alphas) | 174 | attn_replace = attn_base_replace * self.alphas + att_replace * (1 - self.alphas) |
| 146 | return attn_replace | 175 | return attn_replace |
| 147 | 176 | ||
| 148 | def __init__(self, prompts, num_steps: int, cross_replace_steps: float, self_replace_steps: float, | 177 | def __init__( |
| 149 | local_blend: Optional[LocalBlend] = None): | 178 | self, |
| 150 | super(AttentionRefine, self).__init__(prompts, num_steps, cross_replace_steps, self_replace_steps, local_blend) | 179 | prompts, |
| 180 | num_steps: int, | ||
| 181 | cross_replace_steps: float, | ||
| 182 | self_replace_steps: float, | ||
| 183 | local_blend: Optional[LocalBlend] = None, | ||
| 184 | ): | ||
| 185 | super(AttentionRefine, self).__init__( | ||
| 186 | prompts, num_steps, cross_replace_steps, self_replace_steps, local_blend | ||
| 187 | ) | ||
| 151 | self.mapper, alphas = seq_aligner.get_refinement_mapper(prompts, tokenizer) | 188 | self.mapper, alphas = seq_aligner.get_refinement_mapper(prompts, tokenizer) |
| 152 | self.mapper, alphas = self.mapper.to(device), alphas.to(device) | 189 | self.mapper, alphas = self.mapper.to(device), alphas.to(device) |
| 153 | self.alphas = alphas.reshape(alphas.shape[0], 1, 1, alphas.shape[1]) | 190 | self.alphas = alphas.reshape(alphas.shape[0], 1, 1, alphas.shape[1]) |
| @@ -156,13 +193,24 @@ class AttentionRefine(AttentionControlEdit): | |||
| 156 | class AttentionReweight(AttentionControlEdit): | 193 | class AttentionReweight(AttentionControlEdit): |
| 157 | def replace_cross_attention(self, attn_base, att_replace): | 194 | def replace_cross_attention(self, attn_base, att_replace): |
| 158 | if self.prev_controller is not None: | 195 | if self.prev_controller is not None: |
| 159 | attn_base = self.prev_controller.replace_cross_attention(attn_base, att_replace) | 196 | attn_base = self.prev_controller.replace_cross_attention( |
| 197 | attn_base, att_replace | ||
| 198 | ) | ||
| 160 | attn_replace = attn_base[None, :, :, :] * self.equalizer[:, None, None, :] | 199 | attn_replace = attn_base[None, :, :, :] * self.equalizer[:, None, None, :] |
| 161 | return attn_replace | 200 | return attn_replace |
| 162 | 201 | ||
| 163 | def __init__(self, prompts, num_steps: int, cross_replace_steps: float, self_replace_steps: float, equalizer, | 202 | def __init__( |
| 164 | local_blend: Optional[LocalBlend] = None, controller: Optional[AttentionControlEdit] = None): | 203 | self, |
| 165 | super(AttentionReweight, self).__init__(prompts, num_steps, | 204 | prompts, |
| 166 | cross_replace_steps, self_replace_steps, local_blend) | 205 | num_steps: int, |
| 206 | cross_replace_steps: float, | ||
| 207 | self_replace_steps: float, | ||
| 208 | equalizer, | ||
| 209 | local_blend: Optional[LocalBlend] = None, | ||
| 210 | controller: Optional[AttentionControlEdit] = None, | ||
| 211 | ): | ||
| 212 | super(AttentionReweight, self).__init__( | ||
| 213 | prompts, num_steps, cross_replace_steps, self_replace_steps, local_blend | ||
| 214 | ) | ||
| 167 | self.equalizer = equalizer.to(device) | 215 | self.equalizer = equalizer.to(device) |
| 168 | self.prev_controller = controller | 216 | self.prev_controller = controller |
diff --git a/models/attention/hook.py b/models/attention/hook.py index 903de02..6b5fb68 100644 --- a/models/attention/hook.py +++ b/models/attention/hook.py | |||
| @@ -3,6 +3,7 @@ import torch | |||
| 3 | 3 | ||
| 4 | try: | 4 | try: |
| 5 | import xformers.ops | 5 | import xformers.ops |
| 6 | |||
| 6 | xformers._is_functorch_available = True | 7 | xformers._is_functorch_available = True |
| 7 | MEM_EFFICIENT_ATTN = True | 8 | MEM_EFFICIENT_ATTN = True |
| 8 | except ImportError: | 9 | except ImportError: |
| @@ -42,10 +43,10 @@ def register_attention_control(model, controller): | |||
| 42 | return forward | 43 | return forward |
| 43 | 44 | ||
| 44 | def register_recr(net_, count, place_in_unet): | 45 | def register_recr(net_, count, place_in_unet): |
| 45 | if net_.__class__.__name__ == 'CrossAttention': | 46 | if net_.__class__.__name__ == "CrossAttention": |
| 46 | net_.forward = ca_forward(net_, place_in_unet) | 47 | net_.forward = ca_forward(net_, place_in_unet) |
| 47 | return count + 1 | 48 | return count + 1 |
| 48 | elif hasattr(net_, 'children'): | 49 | elif hasattr(net_, "children"): |
| 49 | for net__ in net_.children(): | 50 | for net__ in net_.children(): |
| 50 | count = register_recr(net__, count, place_in_unet) | 51 | count = register_recr(net__, count, place_in_unet) |
| 51 | return count | 52 | return count |
diff --git a/models/attention/structured.py b/models/attention/structured.py index 24d889f..5bbbc06 100644 --- a/models/attention/structured.py +++ b/models/attention/structured.py | |||
| @@ -16,7 +16,9 @@ class StructuredAttentionControl(AttentionControl): | |||
| 16 | if self.struct_attn: | 16 | if self.struct_attn: |
| 17 | out = self.struct_qkv(q, context, mask) | 17 | out = self.struct_qkv(q, context, mask) |
| 18 | else: | 18 | else: |
| 19 | context = torch.cat([context[0], context[1]['k'][0]], dim=0) # use key tensor for context | 19 | context = torch.cat( |
| 20 | [context[0], context[1]["k"][0]], dim=0 | ||
| 21 | ) # use key tensor for context | ||
| 20 | out = self.normal_qkv(q, context, mask) | 22 | out = self.normal_qkv(q, context, mask) |
| 21 | else: | 23 | else: |
| 22 | context = default(context, x) | 24 | context = default(context, x) |
| @@ -29,11 +31,13 @@ class StructuredAttentionControl(AttentionControl): | |||
| 29 | context: list of [uc, list of conditional context] | 31 | context: list of [uc, list of conditional context] |
| 30 | """ | 32 | """ |
| 31 | uc_context = context[0] | 33 | uc_context = context[0] |
| 32 | context_k, context_v = context[1]['k'], context[1]['v'] | 34 | context_k, context_v = context[1]["k"], context[1]["v"] |
| 33 | 35 | ||
| 34 | if isinstance(context_k, list) and isinstance(context_v, list): | 36 | if isinstance(context_k, list) and isinstance(context_v, list): |
| 35 | out = self.multi_qkv(q, uc_context, context_k, context_v, mask) | 37 | out = self.multi_qkv(q, uc_context, context_k, context_v, mask) |
| 36 | elif isinstance(context_k, torch.Tensor) and isinstance(context_v, torch.Tensor): | 38 | elif isinstance(context_k, torch.Tensor) and isinstance( |
| 39 | context_v, torch.Tensor | ||
| 40 | ): | ||
| 37 | out = self.heterogeous_qkv(q, uc_context, context_k, context_v, mask) | 41 | out = self.heterogeous_qkv(q, uc_context, context_k, context_v, mask) |
| 38 | else: | 42 | else: |
| 39 | raise NotImplementedError | 43 | raise NotImplementedError |
| @@ -50,36 +54,45 @@ class StructuredAttentionControl(AttentionControl): | |||
| 50 | k_c = [self.to_k(c_k) for c_k in context_k] | 54 | k_c = [self.to_k(c_k) for c_k in context_k] |
| 51 | v_c = [self.to_v(c_v) for c_v in context_v] | 55 | v_c = [self.to_v(c_v) for c_v in context_v] |
| 52 | 56 | ||
| 53 | q = rearrange(q, 'b n (h d) -> (b h) n d', h=h) | 57 | q = rearrange(q, "b n (h d) -> (b h) n d", h=h) |
| 54 | 58 | ||
| 55 | k_uc = rearrange(k_uc, 'b n (h d) -> (b h) n d', h=h) | 59 | k_uc = rearrange(k_uc, "b n (h d) -> (b h) n d", h=h) |
| 56 | v_uc = rearrange(v_uc, 'b n (h d) -> (b h) n d', h=h) | 60 | v_uc = rearrange(v_uc, "b n (h d) -> (b h) n d", h=h) |
| 57 | 61 | ||
| 58 | k_c = [rearrange(k, 'b n (h d) -> (b h) n d', h=h) for k in k_c] # NOTE: modification point | 62 | k_c = [ |
| 59 | v_c = [rearrange(v, 'b n (h d) -> (b h) n d', h=h) for v in v_c] | 63 | rearrange(k, "b n (h d) -> (b h) n d", h=h) for k in k_c |
| 64 | ] # NOTE: modification point | ||
| 65 | v_c = [rearrange(v, "b n (h d) -> (b h) n d", h=h) for v in v_c] | ||
| 60 | 66 | ||
| 61 | # get composition | 67 | # get composition |
| 62 | sim_uc = einsum('b i d, b j d -> b i j', q[:true_bs], k_uc) * self.scale | 68 | sim_uc = einsum("b i d, b j d -> b i j", q[:true_bs], k_uc) * self.scale |
| 63 | sim_c = [einsum('b i d, b j d -> b i j', q[true_bs:], k) * self.scale for k in k_c] | 69 | sim_c = [ |
| 70 | einsum("b i d, b j d -> b i j", q[true_bs:], k) * self.scale for k in k_c | ||
| 71 | ] | ||
| 64 | 72 | ||
| 65 | attn_uc = sim_uc.softmax(dim=-1) | 73 | attn_uc = sim_uc.softmax(dim=-1) |
| 66 | attn_c = [sim.softmax(dim=-1) for sim in sim_c] | 74 | attn_c = [sim.softmax(dim=-1) for sim in sim_c] |
| 67 | 75 | ||
| 68 | # get uc output | 76 | # get uc output |
| 69 | out_uc = einsum('b i j, b j d -> b i d', attn_uc, v_uc) | 77 | out_uc = einsum("b i j, b j d -> b i d", attn_uc, v_uc) |
| 70 | 78 | ||
| 71 | # get c output | 79 | # get c output |
| 72 | if len(v_c) == 1: | 80 | if len(v_c) == 1: |
| 73 | out_c_collect = [] | 81 | out_c_collect = [] |
| 74 | for attn in attn_c: | 82 | for attn in attn_c: |
| 75 | for v in v_c: | 83 | for v in v_c: |
| 76 | out_c_collect.append(einsum('b i j, b j d -> b i d', attn, v)) | 84 | out_c_collect.append(einsum("b i j, b j d -> b i d", attn, v)) |
| 77 | out_c = sum(out_c_collect) / len(out_c_collect) | 85 | out_c = sum(out_c_collect) / len(out_c_collect) |
| 78 | else: | 86 | else: |
| 79 | out_c = sum([einsum('b i j, b j d -> b i d', attn, v) for attn, v in zip(attn_c, v_c)]) / len(v_c) | 87 | out_c = sum( |
| 88 | [ | ||
| 89 | einsum("b i j, b j d -> b i d", attn, v) | ||
| 90 | for attn, v in zip(attn_c, v_c) | ||
| 91 | ] | ||
| 92 | ) / len(v_c) | ||
| 80 | 93 | ||
| 81 | out = torch.cat([out_uc, out_c], dim=0) | 94 | out = torch.cat([out_uc, out_c], dim=0) |
| 82 | out = rearrange(out, '(b h) n d -> b n (h d)', h=h) | 95 | out = rearrange(out, "(b h) n d -> b n (h d)", h=h) |
| 83 | 96 | ||
| 84 | return out | 97 | return out |
| 85 | 98 | ||
| @@ -88,21 +101,21 @@ class StructuredAttentionControl(AttentionControl): | |||
| 88 | k = self.to_k(context) | 101 | k = self.to_k(context) |
| 89 | v = self.to_v(context) | 102 | v = self.to_v(context) |
| 90 | 103 | ||
| 91 | q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v)) | 104 | q, k, v = map(lambda t: rearrange(t, "b n (h d) -> (b h) n d", h=h), (q, k, v)) |
| 92 | 105 | ||
| 93 | sim = einsum('b i d, b j d -> b i j', q, k) * self.scale | 106 | sim = einsum("b i d, b j d -> b i j", q, k) * self.scale |
| 94 | 107 | ||
| 95 | if exists(mask): | 108 | if exists(mask): |
| 96 | mask = rearrange(mask, 'b ... -> b (...)') | 109 | mask = rearrange(mask, "b ... -> b (...)") |
| 97 | max_neg_value = -torch.finfo(sim.dtype).max | 110 | max_neg_value = -torch.finfo(sim.dtype).max |
| 98 | mask = repeat(mask, 'b j -> (b h) () j', h=h) | 111 | mask = repeat(mask, "b j -> (b h) () j", h=h) |
| 99 | sim.masked_fill_(~mask, max_neg_value) | 112 | sim.masked_fill_(~mask, max_neg_value) |
| 100 | 113 | ||
| 101 | # attention, what we cannot get enough of | 114 | # attention, what we cannot get enough of |
| 102 | attn = sim.softmax(dim=-1) | 115 | attn = sim.softmax(dim=-1) |
| 103 | 116 | ||
| 104 | out = einsum('b i j, b j d -> b i d', attn, v) | 117 | out = einsum("b i j, b j d -> b i d", attn, v) |
| 105 | out = rearrange(out, '(b h) n d -> b n (h d)', h=h) | 118 | out = rearrange(out, "(b h) n d -> b n (h d)", h=h) |
| 106 | 119 | ||
| 107 | return out | 120 | return out |
| 108 | 121 | ||
| @@ -111,21 +124,21 @@ class StructuredAttentionControl(AttentionControl): | |||
| 111 | k = self.to_k(torch.cat([uc_context, context_k], dim=0)) | 124 | k = self.to_k(torch.cat([uc_context, context_k], dim=0)) |
| 112 | v = self.to_v(torch.cat([uc_context, context_v], dim=0)) | 125 | v = self.to_v(torch.cat([uc_context, context_v], dim=0)) |
| 113 | 126 | ||
| 114 | q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v)) | 127 | q, k, v = map(lambda t: rearrange(t, "b n (h d) -> (b h) n d", h=h), (q, k, v)) |
| 115 | 128 | ||
| 116 | sim = einsum('b i d, b j d -> b i j', q, k) * self.scale | 129 | sim = einsum("b i d, b j d -> b i j", q, k) * self.scale |
| 117 | 130 | ||
| 118 | if exists(mask): | 131 | if exists(mask): |
| 119 | mask = rearrange(mask, 'b ... -> b (...)') | 132 | mask = rearrange(mask, "b ... -> b (...)") |
| 120 | max_neg_value = -torch.finfo(sim.dtype).max | 133 | max_neg_value = -torch.finfo(sim.dtype).max |
| 121 | mask = repeat(mask, 'b j -> (b h) () j', h=h) | 134 | mask = repeat(mask, "b j -> (b h) () j", h=h) |
| 122 | sim.masked_fill_(~mask, max_neg_value) | 135 | sim.masked_fill_(~mask, max_neg_value) |
| 123 | 136 | ||
| 124 | # attention, what we cannot get enough of | 137 | # attention, what we cannot get enough of |
| 125 | attn = sim.softmax(dim=-1) | 138 | attn = sim.softmax(dim=-1) |
| 126 | 139 | ||
| 127 | out = einsum('b i j, b j d -> b i d', attn, v) | 140 | out = einsum("b i j, b j d -> b i d", attn, v) |
| 128 | out = rearrange(out, '(b h) n d -> b n (h d)', h=h) | 141 | out = rearrange(out, "(b h) n d -> b n (h d)", h=h) |
| 129 | return out | 142 | return out |
| 130 | 143 | ||
| 131 | def get_kv(self, context): | 144 | def get_kv(self, context): |
diff --git a/models/clip/embeddings.py b/models/clip/embeddings.py index 7c7f2ac..8c3c6d4 100644 --- a/models/clip/embeddings.py +++ b/models/clip/embeddings.py | |||
| @@ -14,7 +14,13 @@ from models.sparse import SparseEmbedding | |||
| 14 | 14 | ||
| 15 | 15 | ||
| 16 | class ManagedCLIPTextEmbeddings(CLIPTextEmbeddings): | 16 | class ManagedCLIPTextEmbeddings(CLIPTextEmbeddings): |
| 17 | def __init__(self, config: CLIPTextConfig, embeddings: CLIPTextEmbeddings, alpha: int = 8, dropout: float = 0.0): | 17 | def __init__( |
| 18 | self, | ||
| 19 | config: CLIPTextConfig, | ||
| 20 | embeddings: CLIPTextEmbeddings, | ||
| 21 | alpha: int = 8, | ||
| 22 | dropout: float = 0.0, | ||
| 23 | ): | ||
| 18 | super().__init__(config) | 24 | super().__init__(config) |
| 19 | 25 | ||
| 20 | self.position_embedding = embeddings.position_embedding | 26 | self.position_embedding = embeddings.position_embedding |
| @@ -28,7 +34,9 @@ class ManagedCLIPTextEmbeddings(CLIPTextEmbeddings): | |||
| 28 | self.token_embedding.weight = embeddings.token_embedding.weight | 34 | self.token_embedding.weight = embeddings.token_embedding.weight |
| 29 | 35 | ||
| 30 | def resize(self, size: int): | 36 | def resize(self, size: int): |
| 31 | self.token_embedding = self.token_embedding.new_resized(size, self.initializer_factor) | 37 | self.token_embedding = self.token_embedding.new_resized( |
| 38 | size, self.initializer_factor | ||
| 39 | ) | ||
| 32 | 40 | ||
| 33 | def add_embed( | 41 | def add_embed( |
| 34 | self, | 42 | self, |
| @@ -46,7 +54,7 @@ class ManagedCLIPTextEmbeddings(CLIPTextEmbeddings): | |||
| 46 | initializer = [initializer] | 54 | initializer = [initializer] |
| 47 | 55 | ||
| 48 | if isinstance(initializer, list): | 56 | if isinstance(initializer, list): |
| 49 | initializer = (initializer * len(token_ids))[:len(token_ids)] | 57 | initializer = (initializer * len(token_ids))[: len(token_ids)] |
| 50 | 58 | ||
| 51 | with torch.no_grad(): | 59 | with torch.no_grad(): |
| 52 | initializer = self.get_embed(initializer) | 60 | initializer = self.get_embed(initializer) |
| @@ -76,24 +84,21 @@ class ManagedCLIPTextEmbeddings(CLIPTextEmbeddings): | |||
| 76 | 84 | ||
| 77 | def get_embed(self, input_ids: Union[list[int], torch.LongTensor]): | 85 | def get_embed(self, input_ids: Union[list[int], torch.LongTensor]): |
| 78 | if isinstance(input_ids, list): | 86 | if isinstance(input_ids, list): |
| 79 | input_ids = torch.tensor(input_ids, device=self.token_embedding.weight.device, dtype=torch.long) | 87 | input_ids = torch.tensor( |
| 88 | input_ids, device=self.token_embedding.weight.device, dtype=torch.long | ||
| 89 | ) | ||
| 80 | 90 | ||
| 81 | return self.token_embedding(input_ids) | 91 | return self.token_embedding(input_ids) |
| 82 | 92 | ||
| 83 | 93 | ||
| 84 | def patch_managed_embeddings( | 94 | def patch_managed_embeddings( |
| 85 | text_encoder: CLIPTextModel, | 95 | text_encoder: CLIPTextModel, alpha: int = 8, dropout: float = 0.0 |
| 86 | alpha: int = 8, | ||
| 87 | dropout: float = 0.0 | ||
| 88 | ) -> ManagedCLIPTextEmbeddings: | 96 | ) -> ManagedCLIPTextEmbeddings: |
| 89 | if isinstance(text_encoder.text_model.embeddings, ManagedCLIPTextEmbeddings): | 97 | if isinstance(text_encoder.text_model.embeddings, ManagedCLIPTextEmbeddings): |
| 90 | return text_encoder.text_model.embeddings | 98 | return text_encoder.text_model.embeddings |
| 91 | 99 | ||
| 92 | text_embeddings = ManagedCLIPTextEmbeddings( | 100 | text_embeddings = ManagedCLIPTextEmbeddings( |
| 93 | text_encoder.config, | 101 | text_encoder.config, text_encoder.text_model.embeddings, alpha, dropout |
| 94 | text_encoder.text_model.embeddings, | ||
| 95 | alpha, | ||
| 96 | dropout | ||
| 97 | ) | 102 | ) |
| 98 | text_encoder.text_model.embeddings = text_embeddings | 103 | text_encoder.text_model.embeddings = text_embeddings |
| 99 | return text_embeddings | 104 | return text_embeddings |
diff --git a/models/clip/tokenizer.py b/models/clip/tokenizer.py index 789b525..a866641 100644 --- a/models/clip/tokenizer.py +++ b/models/clip/tokenizer.py | |||
| @@ -91,18 +91,21 @@ class MultiCLIPTokenizer(CLIPTokenizer): | |||
| 91 | self.vector_shuffle = shuffle_none | 91 | self.vector_shuffle = shuffle_none |
| 92 | 92 | ||
| 93 | def add_multi_tokens( | 93 | def add_multi_tokens( |
| 94 | self, | 94 | self, new_tokens: Union[str, list[str]], num_vectors: Union[int, list[int]] = 1 |
| 95 | new_tokens: Union[str, list[str]], | ||
| 96 | num_vectors: Union[int, list[int]] = 1 | ||
| 97 | ) -> Union[list[int], list[list[int]]]: | 95 | ) -> Union[list[int], list[list[int]]]: |
| 98 | if isinstance(new_tokens, list): | 96 | if isinstance(new_tokens, list): |
| 99 | if isinstance(num_vectors, int): | 97 | if isinstance(num_vectors, int): |
| 100 | num_vectors = [num_vectors] * len(new_tokens) | 98 | num_vectors = [num_vectors] * len(new_tokens) |
| 101 | 99 | ||
| 102 | if len(num_vectors) != len(new_tokens): | 100 | if len(num_vectors) != len(new_tokens): |
| 103 | raise ValueError("Expected new_tokens and num_vectors to have the same len") | 101 | raise ValueError( |
| 102 | "Expected new_tokens and num_vectors to have the same len" | ||
| 103 | ) | ||
| 104 | 104 | ||
| 105 | return [self.add_multi_tokens(new_token, vecs) for new_token, vecs in zip(new_tokens, num_vectors)] | 105 | return [ |
| 106 | self.add_multi_tokens(new_token, vecs) | ||
| 107 | for new_token, vecs in zip(new_tokens, num_vectors) | ||
| 108 | ] | ||
| 106 | 109 | ||
| 107 | if isinstance(num_vectors, list): | 110 | if isinstance(num_vectors, list): |
| 108 | raise ValueError("Expected num_vectors to be int for single token") | 111 | raise ValueError("Expected num_vectors to be int for single token") |
| @@ -129,13 +132,11 @@ class MultiCLIPTokenizer(CLIPTokenizer): | |||
| 129 | return [id] | 132 | return [id] |
| 130 | 133 | ||
| 131 | def expand_ids(self, ids: list[int]): | 134 | def expand_ids(self, ids: list[int]): |
| 132 | return [ | 135 | return [new_id for id in ids for new_id in self.expand_id(id)] |
| 133 | new_id | ||
| 134 | for id in ids | ||
| 135 | for new_id in self.expand_id(id) | ||
| 136 | ] | ||
| 137 | 136 | ||
| 138 | def expand_batched_ids(self, input_ids: Union[list[int], list[list[int]], tuple[list[int]]]): | 137 | def expand_batched_ids( |
| 138 | self, input_ids: Union[list[int], list[list[int]], tuple[list[int]]] | ||
| 139 | ): | ||
| 139 | if isinstance(input_ids, (list, tuple)) and isinstance(input_ids[0], list): | 140 | if isinstance(input_ids, (list, tuple)) and isinstance(input_ids[0], list): |
| 140 | return [self.expand_ids(batch) for batch in input_ids] | 141 | return [self.expand_ids(batch) for batch in input_ids] |
| 141 | else: | 142 | else: |
diff --git a/models/clip/util.py b/models/clip/util.py index f94fbc7..7196bb6 100644 --- a/models/clip/util.py +++ b/models/clip/util.py | |||
| @@ -5,27 +5,32 @@ import torch | |||
| 5 | from transformers import CLIPTokenizer, CLIPTextModel | 5 | from transformers import CLIPTokenizer, CLIPTextModel |
| 6 | 6 | ||
| 7 | 7 | ||
| 8 | def unify_input_ids(tokenizer: CLIPTokenizer, input_ids: list[list[int]], max_length: Optional[int] = None): | 8 | def unify_input_ids( |
| 9 | tokenizer: CLIPTokenizer, | ||
| 10 | input_ids: list[list[int]], | ||
| 11 | max_length: Optional[int] = None, | ||
| 12 | ): | ||
| 9 | if max_length is None: | 13 | if max_length is None: |
| 10 | return tokenizer.pad( | 14 | return tokenizer.pad( |
| 11 | {"input_ids": input_ids}, | 15 | {"input_ids": input_ids}, |
| 12 | padding=True, | 16 | padding=True, |
| 13 | pad_to_multiple_of=tokenizer.model_max_length, | 17 | pad_to_multiple_of=tokenizer.model_max_length, |
| 14 | return_tensors="pt" | 18 | return_tensors="pt", |
| 15 | ) | 19 | ) |
| 16 | else: | 20 | else: |
| 17 | return tokenizer.pad( | 21 | return tokenizer.pad( |
| 18 | {"input_ids": input_ids}, | 22 | {"input_ids": input_ids}, |
| 19 | padding="max_length", | 23 | padding="max_length", |
| 20 | max_length=max_length, | 24 | max_length=max_length, |
| 21 | return_tensors="pt" | 25 | return_tensors="pt", |
| 22 | ) | 26 | ) |
| 23 | 27 | ||
| 28 | |||
| 24 | def get_extended_embeddings( | 29 | def get_extended_embeddings( |
| 25 | text_encoder: CLIPTextModel, | 30 | text_encoder: CLIPTextModel, |
| 26 | input_ids: torch.LongTensor, | 31 | input_ids: torch.LongTensor, |
| 27 | position_ids: Optional[torch.LongTensor] = None, | 32 | position_ids: Optional[torch.LongTensor] = None, |
| 28 | attention_mask=None | 33 | attention_mask=None, |
| 29 | ): | 34 | ): |
| 30 | model_max_length = text_encoder.config.max_position_embeddings | 35 | model_max_length = text_encoder.config.max_position_embeddings |
| 31 | prompts = input_ids.shape[0] | 36 | prompts = input_ids.shape[0] |
| @@ -36,6 +41,8 @@ def get_extended_embeddings( | |||
| 36 | if attention_mask is not None: | 41 | if attention_mask is not None: |
| 37 | attention_mask = attention_mask.view((-1, model_max_length)) | 42 | attention_mask = attention_mask.view((-1, model_max_length)) |
| 38 | 43 | ||
| 39 | text_embeddings = text_encoder(input_ids, position_ids=position_ids, attention_mask=attention_mask)[0] | 44 | text_embeddings = text_encoder( |
| 45 | input_ids, position_ids=position_ids, attention_mask=attention_mask | ||
| 46 | )[0] | ||
| 40 | text_embeddings = text_embeddings.view((prompts, -1, text_embeddings.shape[2])) | 47 | text_embeddings = text_embeddings.view((prompts, -1, text_embeddings.shape[2])) |
| 41 | return text_embeddings | 48 | return text_embeddings |
diff --git a/models/convnext/discriminator.py b/models/convnext/discriminator.py index 571b915..5798bcf 100644 --- a/models/convnext/discriminator.py +++ b/models/convnext/discriminator.py | |||
| @@ -5,7 +5,7 @@ from timm.data.constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD | |||
| 5 | from torch.nn import functional as F | 5 | from torch.nn import functional as F |
| 6 | 6 | ||
| 7 | 7 | ||
| 8 | class ConvNeXtDiscriminator(): | 8 | class ConvNeXtDiscriminator: |
| 9 | def __init__(self, model: ConvNeXt, input_size: int) -> None: | 9 | def __init__(self, model: ConvNeXt, input_size: int) -> None: |
| 10 | self.net = model | 10 | self.net = model |
| 11 | 11 | ||
| @@ -22,8 +22,13 @@ class ConvNeXtDiscriminator(): | |||
| 22 | img_mean = self.img_mean.to(device=img.device, dtype=img.dtype) | 22 | img_mean = self.img_mean.to(device=img.device, dtype=img.dtype) |
| 23 | img_std = self.img_std.to(device=img.device, dtype=img.dtype) | 23 | img_std = self.img_std.to(device=img.device, dtype=img.dtype) |
| 24 | 24 | ||
| 25 | img = ((img + 1.) / 2.).sub(img_mean).div(img_std) | 25 | img = ((img + 1.0) / 2.0).sub(img_mean).div(img_std) |
| 26 | 26 | ||
| 27 | img = F.interpolate(img, size=(self.input_size, self.input_size), mode='bicubic', align_corners=True) | 27 | img = F.interpolate( |
| 28 | img, | ||
| 29 | size=(self.input_size, self.input_size), | ||
| 30 | mode="bicubic", | ||
| 31 | align_corners=True, | ||
| 32 | ) | ||
| 28 | pred = self.net(img) | 33 | pred = self.net(img) |
| 29 | return pred | 34 | return pred |
diff --git a/models/sparse.py b/models/sparse.py index bd45696..e5897c9 100644 --- a/models/sparse.py +++ b/models/sparse.py | |||
| @@ -15,21 +15,25 @@ class SparseEmbedding(nn.Embedding): | |||
| 15 | ): | 15 | ): |
| 16 | nn.Embedding.__init__(self, num_embeddings, embedding_dim, **kwargs) | 16 | nn.Embedding.__init__(self, num_embeddings, embedding_dim, **kwargs) |
| 17 | 17 | ||
| 18 | self.register_buffer('trainable_ids', self.weight.new_zeros(num_embeddings, dtype=torch.long) - 1) | 18 | self.register_buffer( |
| 19 | "trainable_ids", self.weight.new_zeros(num_embeddings, dtype=torch.long) - 1 | ||
| 20 | ) | ||
| 19 | 21 | ||
| 20 | self.trainable = nn.ParameterList() | 22 | self.trainable = nn.ParameterList() |
| 21 | self.scaling = alpha | 23 | self.scaling = alpha |
| 22 | self.dropout_p = dropout | 24 | self.dropout_p = dropout |
| 23 | self.weight.requires_grad = False | 25 | self.weight.requires_grad = False |
| 24 | 26 | ||
| 25 | if dropout > 0.: | 27 | if dropout > 0.0: |
| 26 | self.dropout = nn.Dropout(p=dropout) | 28 | self.dropout = nn.Dropout(p=dropout) |
| 27 | else: | 29 | else: |
| 28 | self.dropout = nn.Identity() | 30 | self.dropout = nn.Identity() |
| 29 | 31 | ||
| 30 | self.reset_parameters() | 32 | self.reset_parameters() |
| 31 | 33 | ||
| 32 | def new_resized(self, new_num_embeddings: int, initializer_factor: Optional[float] = None): | 34 | def new_resized( |
| 35 | self, new_num_embeddings: int, initializer_factor: Optional[float] = None | ||
| 36 | ): | ||
| 33 | n = min(self.num_embeddings, new_num_embeddings) | 37 | n = min(self.num_embeddings, new_num_embeddings) |
| 34 | 38 | ||
| 35 | new_emb = SparseEmbedding( | 39 | new_emb = SparseEmbedding( |
| @@ -38,7 +42,7 @@ class SparseEmbedding(nn.Embedding): | |||
| 38 | self.scaling, | 42 | self.scaling, |
| 39 | self.dropout_p, | 43 | self.dropout_p, |
| 40 | device=self.weight.device, | 44 | device=self.weight.device, |
| 41 | dtype=self.weight.dtype | 45 | dtype=self.weight.dtype, |
| 42 | ) | 46 | ) |
| 43 | if initializer_factor is not None: | 47 | if initializer_factor is not None: |
| 44 | new_emb.weight.data.normal_(mean=0.0, std=initializer_factor * 0.02) | 48 | new_emb.weight.data.normal_(mean=0.0, std=initializer_factor * 0.02) |
