diff options
Diffstat (limited to 'models/attention/control.py')
| -rw-r--r-- | models/attention/control.py | 216 |
1 files changed, 0 insertions, 216 deletions
diff --git a/models/attention/control.py b/models/attention/control.py deleted file mode 100644 index ec378c4..0000000 --- a/models/attention/control.py +++ /dev/null | |||
| @@ -1,216 +0,0 @@ | |||
| 1 | import torch | ||
| 2 | import abc | ||
| 3 | |||
| 4 | |||
| 5 | class AttentionControl(abc.ABC): | ||
| 6 | def step_callback(self, x_t): | ||
| 7 | return x_t | ||
| 8 | |||
| 9 | def between_steps(self): | ||
| 10 | return | ||
| 11 | |||
| 12 | @property | ||
| 13 | def num_uncond_att_layers(self): | ||
| 14 | return self.num_att_layers if LOW_RESOURCE else 0 | ||
| 15 | |||
| 16 | @abc.abstractmethod | ||
| 17 | def forward(self, attn, is_cross: bool, place_in_unet: str): | ||
| 18 | raise NotImplementedError | ||
| 19 | |||
| 20 | def __call__(self, attn, is_cross: bool, place_in_unet: str): | ||
| 21 | if self.cur_att_layer >= self.num_uncond_att_layers: | ||
| 22 | if LOW_RESOURCE: | ||
| 23 | attn = self.forward(attn, is_cross, place_in_unet) | ||
| 24 | else: | ||
| 25 | h = attn.shape[0] | ||
| 26 | attn[h // 2 :] = self.forward(attn[h // 2 :], is_cross, place_in_unet) | ||
| 27 | self.cur_att_layer += 1 | ||
| 28 | if self.cur_att_layer == self.num_att_layers + self.num_uncond_att_layers: | ||
| 29 | self.cur_att_layer = 0 | ||
| 30 | self.cur_step += 1 | ||
| 31 | self.between_steps() | ||
| 32 | return attn | ||
| 33 | |||
| 34 | def reset(self): | ||
| 35 | self.cur_step = 0 | ||
| 36 | self.cur_att_layer = 0 | ||
| 37 | |||
| 38 | def __init__(self): | ||
| 39 | self.cur_step = 0 | ||
| 40 | self.num_att_layers = -1 | ||
| 41 | self.cur_att_layer = 0 | ||
| 42 | |||
| 43 | |||
| 44 | class EmptyControl(AttentionControl): | ||
| 45 | def forward(self, attn, is_cross: bool, place_in_unet: str): | ||
| 46 | return attn | ||
| 47 | |||
| 48 | |||
| 49 | class AttentionStore(AttentionControl): | ||
| 50 | @staticmethod | ||
| 51 | def get_empty_store(): | ||
| 52 | return { | ||
| 53 | "down_cross": [], | ||
| 54 | "mid_cross": [], | ||
| 55 | "up_cross": [], | ||
| 56 | "down_self": [], | ||
| 57 | "mid_self": [], | ||
| 58 | "up_self": [], | ||
| 59 | } | ||
| 60 | |||
| 61 | def forward(self, attn, is_cross: bool, place_in_unet: str): | ||
| 62 | key = f"{place_in_unet}_{'cross' if is_cross else 'self'}" | ||
| 63 | if attn.shape[1] <= 32**2: # avoid memory overhead | ||
| 64 | self.step_store[key].append(attn) | ||
| 65 | return attn | ||
| 66 | |||
| 67 | def between_steps(self): | ||
| 68 | if len(self.attention_store) == 0: | ||
| 69 | self.attention_store = self.step_store | ||
| 70 | else: | ||
| 71 | for key in self.attention_store: | ||
| 72 | for i in range(len(self.attention_store[key])): | ||
| 73 | self.attention_store[key][i] += self.step_store[key][i] | ||
| 74 | self.step_store = self.get_empty_store() | ||
| 75 | |||
| 76 | def get_average_attention(self): | ||
| 77 | average_attention = { | ||
| 78 | key: [item / self.cur_step for item in self.attention_store[key]] | ||
| 79 | for key in self.attention_store | ||
| 80 | } | ||
| 81 | return average_attention | ||
| 82 | |||
| 83 | def reset(self): | ||
| 84 | super(AttentionStore, self).reset() | ||
| 85 | self.step_store = self.get_empty_store() | ||
| 86 | self.attention_store = {} | ||
| 87 | |||
| 88 | def __init__(self): | ||
| 89 | super(AttentionStore, self).__init__() | ||
| 90 | self.step_store = self.get_empty_store() | ||
| 91 | self.attention_store = {} | ||
| 92 | |||
| 93 | |||
| 94 | class AttentionControlEdit(AttentionStore, abc.ABC): | ||
| 95 | def step_callback(self, x_t): | ||
| 96 | if self.local_blend is not None: | ||
| 97 | x_t = self.local_blend(x_t, self.attention_store) | ||
| 98 | return x_t | ||
| 99 | |||
| 100 | def replace_self_attention(self, attn_base, att_replace): | ||
| 101 | if att_replace.shape[2] <= 16**2: | ||
| 102 | return attn_base.unsqueeze(0).expand(att_replace.shape[0], *attn_base.shape) | ||
| 103 | else: | ||
| 104 | return att_replace | ||
| 105 | |||
| 106 | @abc.abstractmethod | ||
| 107 | def replace_cross_attention(self, attn_base, att_replace): | ||
| 108 | raise NotImplementedError | ||
| 109 | |||
| 110 | def forward(self, attn, is_cross: bool, place_in_unet: str): | ||
| 111 | super(AttentionControlEdit, self).forward(attn, is_cross, place_in_unet) | ||
| 112 | if is_cross or ( | ||
| 113 | self.num_self_replace[0] <= self.cur_step < self.num_self_replace[1] | ||
| 114 | ): | ||
| 115 | h = attn.shape[0] // (self.batch_size) | ||
| 116 | attn = attn.reshape(self.batch_size, h, *attn.shape[1:]) | ||
| 117 | attn_base, attn_repalce = attn[0], attn[1:] | ||
| 118 | if is_cross: | ||
| 119 | alpha_words = self.cross_replace_alpha[self.cur_step] | ||
| 120 | attn_repalce_new = ( | ||
| 121 | self.replace_cross_attention(attn_base, attn_repalce) * alpha_words | ||
| 122 | + (1 - alpha_words) * attn_repalce | ||
| 123 | ) | ||
| 124 | attn[1:] = attn_repalce_new | ||
| 125 | else: | ||
| 126 | attn[1:] = self.replace_self_attention(attn_base, attn_repalce) | ||
| 127 | attn = attn.reshape(self.batch_size * h, *attn.shape[2:]) | ||
| 128 | return attn | ||
| 129 | |||
| 130 | def __init__( | ||
| 131 | self, | ||
| 132 | prompts, | ||
| 133 | num_steps: int, | ||
| 134 | cross_replace_steps: Union[ | ||
| 135 | float, Tuple[float, float], Dict[str, Tuple[float, float]] | ||
| 136 | ], | ||
| 137 | self_replace_steps: Union[float, Tuple[float, float]], | ||
| 138 | local_blend: Optional[LocalBlend], | ||
| 139 | ): | ||
| 140 | super(AttentionControlEdit, self).__init__() | ||
| 141 | self.batch_size = len(prompts) | ||
| 142 | self.cross_replace_alpha = ptp_utils.get_time_words_attention_alpha( | ||
| 143 | prompts, num_steps, cross_replace_steps, tokenizer | ||
| 144 | ).to(device) | ||
| 145 | if type(self_replace_steps) is float: | ||
| 146 | self_replace_steps = 0, self_replace_steps | ||
| 147 | self.num_self_replace = int(num_steps * self_replace_steps[0]), int( | ||
| 148 | num_steps * self_replace_steps[1] | ||
| 149 | ) | ||
| 150 | self.local_blend = local_blend | ||
| 151 | |||
| 152 | |||
| 153 | class AttentionReplace(AttentionControlEdit): | ||
| 154 | def replace_cross_attention(self, attn_base, att_replace): | ||
| 155 | return torch.einsum("hpw,bwn->bhpn", attn_base, self.mapper) | ||
| 156 | |||
| 157 | def __init__( | ||
| 158 | self, | ||
| 159 | prompts, | ||
| 160 | num_steps: int, | ||
| 161 | cross_replace_steps: float, | ||
| 162 | self_replace_steps: float, | ||
| 163 | local_blend: Optional[LocalBlend] = None, | ||
| 164 | ): | ||
| 165 | super(AttentionReplace, self).__init__( | ||
| 166 | prompts, num_steps, cross_replace_steps, self_replace_steps, local_blend | ||
| 167 | ) | ||
| 168 | self.mapper = seq_aligner.get_replacement_mapper(prompts, tokenizer).to(device) | ||
| 169 | |||
| 170 | |||
| 171 | class AttentionRefine(AttentionControlEdit): | ||
| 172 | def replace_cross_attention(self, attn_base, att_replace): | ||
| 173 | attn_base_replace = attn_base[:, :, self.mapper].permute(2, 0, 1, 3) | ||
| 174 | attn_replace = attn_base_replace * self.alphas + att_replace * (1 - self.alphas) | ||
| 175 | return attn_replace | ||
| 176 | |||
| 177 | def __init__( | ||
| 178 | self, | ||
| 179 | prompts, | ||
| 180 | num_steps: int, | ||
| 181 | cross_replace_steps: float, | ||
| 182 | self_replace_steps: float, | ||
| 183 | local_blend: Optional[LocalBlend] = None, | ||
| 184 | ): | ||
| 185 | super(AttentionRefine, self).__init__( | ||
| 186 | prompts, num_steps, cross_replace_steps, self_replace_steps, local_blend | ||
| 187 | ) | ||
| 188 | self.mapper, alphas = seq_aligner.get_refinement_mapper(prompts, tokenizer) | ||
| 189 | self.mapper, alphas = self.mapper.to(device), alphas.to(device) | ||
| 190 | self.alphas = alphas.reshape(alphas.shape[0], 1, 1, alphas.shape[1]) | ||
| 191 | |||
| 192 | |||
| 193 | class AttentionReweight(AttentionControlEdit): | ||
| 194 | def replace_cross_attention(self, attn_base, att_replace): | ||
| 195 | if self.prev_controller is not None: | ||
| 196 | attn_base = self.prev_controller.replace_cross_attention( | ||
| 197 | attn_base, att_replace | ||
| 198 | ) | ||
| 199 | attn_replace = attn_base[None, :, :, :] * self.equalizer[:, None, None, :] | ||
| 200 | return attn_replace | ||
| 201 | |||
| 202 | def __init__( | ||
| 203 | self, | ||
| 204 | prompts, | ||
| 205 | num_steps: int, | ||
| 206 | cross_replace_steps: float, | ||
| 207 | self_replace_steps: float, | ||
| 208 | equalizer, | ||
| 209 | local_blend: Optional[LocalBlend] = None, | ||
| 210 | controller: Optional[AttentionControlEdit] = None, | ||
| 211 | ): | ||
| 212 | super(AttentionReweight, self).__init__( | ||
| 213 | prompts, num_steps, cross_replace_steps, self_replace_steps, local_blend | ||
| 214 | ) | ||
| 215 | self.equalizer = equalizer.to(device) | ||
| 216 | self.prev_controller = controller | ||
