From 8364ce697ddf6117fdd4f7222832d546d63880de Mon Sep 17 00:00:00 2001 From: Volpeon Date: Wed, 21 Jun 2023 13:28:49 +0200 Subject: Update --- models/attention/control.py | 106 ++++++++++++++++++++++++++++++++------------ 1 file changed, 77 insertions(+), 29 deletions(-) (limited to 'models/attention/control.py') diff --git a/models/attention/control.py b/models/attention/control.py index 248bd9f..ec378c4 100644 --- a/models/attention/control.py +++ b/models/attention/control.py @@ -23,7 +23,7 @@ class AttentionControl(abc.ABC): attn = self.forward(attn, is_cross, place_in_unet) else: h = attn.shape[0] - attn[h // 2:] = self.forward(attn[h // 2:], is_cross, place_in_unet) + attn[h // 2 :] = self.forward(attn[h // 2 :], is_cross, place_in_unet) self.cur_att_layer += 1 if self.cur_att_layer == self.num_att_layers + self.num_uncond_att_layers: self.cur_att_layer = 0 @@ -49,12 +49,18 @@ class EmptyControl(AttentionControl): class AttentionStore(AttentionControl): @staticmethod def get_empty_store(): - return {"down_cross": [], "mid_cross": [], "up_cross": [], - "down_self": [], "mid_self": [], "up_self": []} + return { + "down_cross": [], + "mid_cross": [], + "up_cross": [], + "down_self": [], + "mid_self": [], + "up_self": [], + } def forward(self, attn, is_cross: bool, place_in_unet: str): key = f"{place_in_unet}_{'cross' if is_cross else 'self'}" - if attn.shape[1] <= 32 ** 2: # avoid memory overhead + if attn.shape[1] <= 32**2: # avoid memory overhead self.step_store[key].append(attn) return attn @@ -68,8 +74,10 @@ class AttentionStore(AttentionControl): self.step_store = self.get_empty_store() def get_average_attention(self): - average_attention = {key: [item / self.cur_step for item in self.attention_store[key]] - for key in self.attention_store} + average_attention = { + key: [item / self.cur_step for item in self.attention_store[key]] + for key in self.attention_store + } return average_attention def reset(self): @@ -90,7 +98,7 @@ class AttentionControlEdit(AttentionStore, abc.ABC): return x_t def replace_self_attention(self, attn_base, att_replace): - if att_replace.shape[2] <= 16 ** 2: + if att_replace.shape[2] <= 16**2: return attn_base.unsqueeze(0).expand(att_replace.shape[0], *attn_base.shape) else: return att_replace @@ -101,41 +109,62 @@ class AttentionControlEdit(AttentionStore, abc.ABC): def forward(self, attn, is_cross: bool, place_in_unet: str): super(AttentionControlEdit, self).forward(attn, is_cross, place_in_unet) - if is_cross or (self.num_self_replace[0] <= self.cur_step < self.num_self_replace[1]): + if is_cross or ( + self.num_self_replace[0] <= self.cur_step < self.num_self_replace[1] + ): h = attn.shape[0] // (self.batch_size) attn = attn.reshape(self.batch_size, h, *attn.shape[1:]) attn_base, attn_repalce = attn[0], attn[1:] if is_cross: alpha_words = self.cross_replace_alpha[self.cur_step] - attn_repalce_new = self.replace_cross_attention( - attn_base, attn_repalce) * alpha_words + (1 - alpha_words) * attn_repalce + attn_repalce_new = ( + self.replace_cross_attention(attn_base, attn_repalce) * alpha_words + + (1 - alpha_words) * attn_repalce + ) attn[1:] = attn_repalce_new else: attn[1:] = self.replace_self_attention(attn_base, attn_repalce) attn = attn.reshape(self.batch_size * h, *attn.shape[2:]) return attn - def __init__(self, prompts, num_steps: int, - cross_replace_steps: Union[float, Tuple[float, float], Dict[str, Tuple[float, float]]], - self_replace_steps: Union[float, Tuple[float, float]], - local_blend: Optional[LocalBlend]): + def __init__( + self, + prompts, + num_steps: int, + cross_replace_steps: Union[ + float, Tuple[float, float], Dict[str, Tuple[float, float]] + ], + self_replace_steps: Union[float, Tuple[float, float]], + local_blend: Optional[LocalBlend], + ): super(AttentionControlEdit, self).__init__() self.batch_size = len(prompts) self.cross_replace_alpha = ptp_utils.get_time_words_attention_alpha( - prompts, num_steps, cross_replace_steps, tokenizer).to(device) + prompts, num_steps, cross_replace_steps, tokenizer + ).to(device) if type(self_replace_steps) is float: self_replace_steps = 0, self_replace_steps - self.num_self_replace = int(num_steps * self_replace_steps[0]), int(num_steps * self_replace_steps[1]) + self.num_self_replace = int(num_steps * self_replace_steps[0]), int( + num_steps * self_replace_steps[1] + ) self.local_blend = local_blend class AttentionReplace(AttentionControlEdit): def replace_cross_attention(self, attn_base, att_replace): - return torch.einsum('hpw,bwn->bhpn', attn_base, self.mapper) - - def __init__(self, prompts, num_steps: int, cross_replace_steps: float, self_replace_steps: float, - local_blend: Optional[LocalBlend] = None): - super(AttentionReplace, self).__init__(prompts, num_steps, cross_replace_steps, self_replace_steps, local_blend) + return torch.einsum("hpw,bwn->bhpn", attn_base, self.mapper) + + def __init__( + self, + prompts, + num_steps: int, + cross_replace_steps: float, + self_replace_steps: float, + local_blend: Optional[LocalBlend] = None, + ): + super(AttentionReplace, self).__init__( + prompts, num_steps, cross_replace_steps, self_replace_steps, local_blend + ) self.mapper = seq_aligner.get_replacement_mapper(prompts, tokenizer).to(device) @@ -145,9 +174,17 @@ class AttentionRefine(AttentionControlEdit): attn_replace = attn_base_replace * self.alphas + att_replace * (1 - self.alphas) return attn_replace - def __init__(self, prompts, num_steps: int, cross_replace_steps: float, self_replace_steps: float, - local_blend: Optional[LocalBlend] = None): - super(AttentionRefine, self).__init__(prompts, num_steps, cross_replace_steps, self_replace_steps, local_blend) + def __init__( + self, + prompts, + num_steps: int, + cross_replace_steps: float, + self_replace_steps: float, + local_blend: Optional[LocalBlend] = None, + ): + super(AttentionRefine, self).__init__( + prompts, num_steps, cross_replace_steps, self_replace_steps, local_blend + ) self.mapper, alphas = seq_aligner.get_refinement_mapper(prompts, tokenizer) self.mapper, alphas = self.mapper.to(device), alphas.to(device) self.alphas = alphas.reshape(alphas.shape[0], 1, 1, alphas.shape[1]) @@ -156,13 +193,24 @@ class AttentionRefine(AttentionControlEdit): class AttentionReweight(AttentionControlEdit): def replace_cross_attention(self, attn_base, att_replace): if self.prev_controller is not None: - attn_base = self.prev_controller.replace_cross_attention(attn_base, att_replace) + attn_base = self.prev_controller.replace_cross_attention( + attn_base, att_replace + ) attn_replace = attn_base[None, :, :, :] * self.equalizer[:, None, None, :] return attn_replace - def __init__(self, prompts, num_steps: int, cross_replace_steps: float, self_replace_steps: float, equalizer, - local_blend: Optional[LocalBlend] = None, controller: Optional[AttentionControlEdit] = None): - super(AttentionReweight, self).__init__(prompts, num_steps, - cross_replace_steps, self_replace_steps, local_blend) + def __init__( + self, + prompts, + num_steps: int, + cross_replace_steps: float, + self_replace_steps: float, + equalizer, + local_blend: Optional[LocalBlend] = None, + controller: Optional[AttentionControlEdit] = None, + ): + super(AttentionReweight, self).__init__( + prompts, num_steps, cross_replace_steps, self_replace_steps, local_blend + ) self.equalizer = equalizer.to(device) self.prev_controller = controller -- cgit v1.2.3-54-g00ecf