summaryrefslogtreecommitdiffstats
path: root/models/attention/control.py
diff options
context:
space:
mode:
Diffstat (limited to 'models/attention/control.py')
-rw-r--r--models/attention/control.py168
1 files changed, 168 insertions, 0 deletions
diff --git a/models/attention/control.py b/models/attention/control.py
new file mode 100644
index 0000000..248bd9f
--- /dev/null
+++ b/models/attention/control.py
@@ -0,0 +1,168 @@
1import torch
2import abc
3
4
5class AttentionControl(abc.ABC):
6 def step_callback(self, x_t):
7 return x_t
8
9 def between_steps(self):
10 return
11
12 @property
13 def num_uncond_att_layers(self):
14 return self.num_att_layers if LOW_RESOURCE else 0
15
16 @abc.abstractmethod
17 def forward(self, attn, is_cross: bool, place_in_unet: str):
18 raise NotImplementedError
19
20 def __call__(self, attn, is_cross: bool, place_in_unet: str):
21 if self.cur_att_layer >= self.num_uncond_att_layers:
22 if LOW_RESOURCE:
23 attn = self.forward(attn, is_cross, place_in_unet)
24 else:
25 h = attn.shape[0]
26 attn[h // 2:] = self.forward(attn[h // 2:], is_cross, place_in_unet)
27 self.cur_att_layer += 1
28 if self.cur_att_layer == self.num_att_layers + self.num_uncond_att_layers:
29 self.cur_att_layer = 0
30 self.cur_step += 1
31 self.between_steps()
32 return attn
33
34 def reset(self):
35 self.cur_step = 0
36 self.cur_att_layer = 0
37
38 def __init__(self):
39 self.cur_step = 0
40 self.num_att_layers = -1
41 self.cur_att_layer = 0
42
43
44class EmptyControl(AttentionControl):
45 def forward(self, attn, is_cross: bool, place_in_unet: str):
46 return attn
47
48
49class AttentionStore(AttentionControl):
50 @staticmethod
51 def get_empty_store():
52 return {"down_cross": [], "mid_cross": [], "up_cross": [],
53 "down_self": [], "mid_self": [], "up_self": []}
54
55 def forward(self, attn, is_cross: bool, place_in_unet: str):
56 key = f"{place_in_unet}_{'cross' if is_cross else 'self'}"
57 if attn.shape[1] <= 32 ** 2: # avoid memory overhead
58 self.step_store[key].append(attn)
59 return attn
60
61 def between_steps(self):
62 if len(self.attention_store) == 0:
63 self.attention_store = self.step_store
64 else:
65 for key in self.attention_store:
66 for i in range(len(self.attention_store[key])):
67 self.attention_store[key][i] += self.step_store[key][i]
68 self.step_store = self.get_empty_store()
69
70 def get_average_attention(self):
71 average_attention = {key: [item / self.cur_step for item in self.attention_store[key]]
72 for key in self.attention_store}
73 return average_attention
74
75 def reset(self):
76 super(AttentionStore, self).reset()
77 self.step_store = self.get_empty_store()
78 self.attention_store = {}
79
80 def __init__(self):
81 super(AttentionStore, self).__init__()
82 self.step_store = self.get_empty_store()
83 self.attention_store = {}
84
85
86class AttentionControlEdit(AttentionStore, abc.ABC):
87 def step_callback(self, x_t):
88 if self.local_blend is not None:
89 x_t = self.local_blend(x_t, self.attention_store)
90 return x_t
91
92 def replace_self_attention(self, attn_base, att_replace):
93 if att_replace.shape[2] <= 16 ** 2:
94 return attn_base.unsqueeze(0).expand(att_replace.shape[0], *attn_base.shape)
95 else:
96 return att_replace
97
98 @abc.abstractmethod
99 def replace_cross_attention(self, attn_base, att_replace):
100 raise NotImplementedError
101
102 def forward(self, attn, is_cross: bool, place_in_unet: str):
103 super(AttentionControlEdit, self).forward(attn, is_cross, place_in_unet)
104 if is_cross or (self.num_self_replace[0] <= self.cur_step < self.num_self_replace[1]):
105 h = attn.shape[0] // (self.batch_size)
106 attn = attn.reshape(self.batch_size, h, *attn.shape[1:])
107 attn_base, attn_repalce = attn[0], attn[1:]
108 if is_cross:
109 alpha_words = self.cross_replace_alpha[self.cur_step]
110 attn_repalce_new = self.replace_cross_attention(
111 attn_base, attn_repalce) * alpha_words + (1 - alpha_words) * attn_repalce
112 attn[1:] = attn_repalce_new
113 else:
114 attn[1:] = self.replace_self_attention(attn_base, attn_repalce)
115 attn = attn.reshape(self.batch_size * h, *attn.shape[2:])
116 return attn
117
118 def __init__(self, prompts, num_steps: int,
119 cross_replace_steps: Union[float, Tuple[float, float], Dict[str, Tuple[float, float]]],
120 self_replace_steps: Union[float, Tuple[float, float]],
121 local_blend: Optional[LocalBlend]):
122 super(AttentionControlEdit, self).__init__()
123 self.batch_size = len(prompts)
124 self.cross_replace_alpha = ptp_utils.get_time_words_attention_alpha(
125 prompts, num_steps, cross_replace_steps, tokenizer).to(device)
126 if type(self_replace_steps) is float:
127 self_replace_steps = 0, self_replace_steps
128 self.num_self_replace = int(num_steps * self_replace_steps[0]), int(num_steps * self_replace_steps[1])
129 self.local_blend = local_blend
130
131
132class AttentionReplace(AttentionControlEdit):
133 def replace_cross_attention(self, attn_base, att_replace):
134 return torch.einsum('hpw,bwn->bhpn', attn_base, self.mapper)
135
136 def __init__(self, prompts, num_steps: int, cross_replace_steps: float, self_replace_steps: float,
137 local_blend: Optional[LocalBlend] = None):
138 super(AttentionReplace, self).__init__(prompts, num_steps, cross_replace_steps, self_replace_steps, local_blend)
139 self.mapper = seq_aligner.get_replacement_mapper(prompts, tokenizer).to(device)
140
141
142class AttentionRefine(AttentionControlEdit):
143 def replace_cross_attention(self, attn_base, att_replace):
144 attn_base_replace = attn_base[:, :, self.mapper].permute(2, 0, 1, 3)
145 attn_replace = attn_base_replace * self.alphas + att_replace * (1 - self.alphas)
146 return attn_replace
147
148 def __init__(self, prompts, num_steps: int, cross_replace_steps: float, self_replace_steps: float,
149 local_blend: Optional[LocalBlend] = None):
150 super(AttentionRefine, self).__init__(prompts, num_steps, cross_replace_steps, self_replace_steps, local_blend)
151 self.mapper, alphas = seq_aligner.get_refinement_mapper(prompts, tokenizer)
152 self.mapper, alphas = self.mapper.to(device), alphas.to(device)
153 self.alphas = alphas.reshape(alphas.shape[0], 1, 1, alphas.shape[1])
154
155
156class AttentionReweight(AttentionControlEdit):
157 def replace_cross_attention(self, attn_base, att_replace):
158 if self.prev_controller is not None:
159 attn_base = self.prev_controller.replace_cross_attention(attn_base, att_replace)
160 attn_replace = attn_base[None, :, :, :] * self.equalizer[:, None, None, :]
161 return attn_replace
162
163 def __init__(self, prompts, num_steps: int, cross_replace_steps: float, self_replace_steps: float, equalizer,
164 local_blend: Optional[LocalBlend] = None, controller: Optional[AttentionControlEdit] = None):
165 super(AttentionReweight, self).__init__(prompts, num_steps,
166 cross_replace_steps, self_replace_steps, local_blend)
167 self.equalizer = equalizer.to(device)
168 self.prev_controller = controller