summaryrefslogtreecommitdiffstats
path: root/models/attention/structured.py
diff options
context:
space:
mode:
Diffstat (limited to 'models/attention/structured.py')
-rw-r--r--models/attention/structured.py145
1 files changed, 0 insertions, 145 deletions
diff --git a/models/attention/structured.py b/models/attention/structured.py
deleted file mode 100644
index 5bbbc06..0000000
--- a/models/attention/structured.py
+++ /dev/null
@@ -1,145 +0,0 @@
1import torch
2
3from .control import AttentionControl
4
5
6class StructuredAttentionControl(AttentionControl):
7 def forward(self, attn, is_cross: bool, place_in_unet: str):
8 return attn
9
10 def forward(self, x, context=None, mask=None):
11 h = self.heads
12
13 q = self.to_q(x)
14
15 if isinstance(context, list):
16 if self.struct_attn:
17 out = self.struct_qkv(q, context, mask)
18 else:
19 context = torch.cat(
20 [context[0], context[1]["k"][0]], dim=0
21 ) # use key tensor for context
22 out = self.normal_qkv(q, context, mask)
23 else:
24 context = default(context, x)
25 out = self.normal_qkv(q, context, mask)
26
27 return self.to_out(out)
28
29 def struct_qkv(self, q, context, mask):
30 """
31 context: list of [uc, list of conditional context]
32 """
33 uc_context = context[0]
34 context_k, context_v = context[1]["k"], context[1]["v"]
35
36 if isinstance(context_k, list) and isinstance(context_v, list):
37 out = self.multi_qkv(q, uc_context, context_k, context_v, mask)
38 elif isinstance(context_k, torch.Tensor) and isinstance(
39 context_v, torch.Tensor
40 ):
41 out = self.heterogeous_qkv(q, uc_context, context_k, context_v, mask)
42 else:
43 raise NotImplementedError
44
45 return out
46
47 def multi_qkv(self, q, uc_context, context_k, context_v, mask):
48 h = self.heads
49
50 assert uc_context.size(0) == context_k[0].size(0) == context_v[0].size(0)
51 true_bs = uc_context.size(0) * h
52
53 k_uc, v_uc = self.get_kv(uc_context)
54 k_c = [self.to_k(c_k) for c_k in context_k]
55 v_c = [self.to_v(c_v) for c_v in context_v]
56
57 q = rearrange(q, "b n (h d) -> (b h) n d", h=h)
58
59 k_uc = rearrange(k_uc, "b n (h d) -> (b h) n d", h=h)
60 v_uc = rearrange(v_uc, "b n (h d) -> (b h) n d", h=h)
61
62 k_c = [
63 rearrange(k, "b n (h d) -> (b h) n d", h=h) for k in k_c
64 ] # NOTE: modification point
65 v_c = [rearrange(v, "b n (h d) -> (b h) n d", h=h) for v in v_c]
66
67 # get composition
68 sim_uc = einsum("b i d, b j d -> b i j", q[:true_bs], k_uc) * self.scale
69 sim_c = [
70 einsum("b i d, b j d -> b i j", q[true_bs:], k) * self.scale for k in k_c
71 ]
72
73 attn_uc = sim_uc.softmax(dim=-1)
74 attn_c = [sim.softmax(dim=-1) for sim in sim_c]
75
76 # get uc output
77 out_uc = einsum("b i j, b j d -> b i d", attn_uc, v_uc)
78
79 # get c output
80 if len(v_c) == 1:
81 out_c_collect = []
82 for attn in attn_c:
83 for v in v_c:
84 out_c_collect.append(einsum("b i j, b j d -> b i d", attn, v))
85 out_c = sum(out_c_collect) / len(out_c_collect)
86 else:
87 out_c = sum(
88 [
89 einsum("b i j, b j d -> b i d", attn, v)
90 for attn, v in zip(attn_c, v_c)
91 ]
92 ) / len(v_c)
93
94 out = torch.cat([out_uc, out_c], dim=0)
95 out = rearrange(out, "(b h) n d -> b n (h d)", h=h)
96
97 return out
98
99 def normal_qkv(self, q, context, mask):
100 h = self.heads
101 k = self.to_k(context)
102 v = self.to_v(context)
103
104 q, k, v = map(lambda t: rearrange(t, "b n (h d) -> (b h) n d", h=h), (q, k, v))
105
106 sim = einsum("b i d, b j d -> b i j", q, k) * self.scale
107
108 if exists(mask):
109 mask = rearrange(mask, "b ... -> b (...)")
110 max_neg_value = -torch.finfo(sim.dtype).max
111 mask = repeat(mask, "b j -> (b h) () j", h=h)
112 sim.masked_fill_(~mask, max_neg_value)
113
114 # attention, what we cannot get enough of
115 attn = sim.softmax(dim=-1)
116
117 out = einsum("b i j, b j d -> b i d", attn, v)
118 out = rearrange(out, "(b h) n d -> b n (h d)", h=h)
119
120 return out
121
122 def heterogeous_qkv(self, q, uc_context, context_k, context_v, mask):
123 h = self.heads
124 k = self.to_k(torch.cat([uc_context, context_k], dim=0))
125 v = self.to_v(torch.cat([uc_context, context_v], dim=0))
126
127 q, k, v = map(lambda t: rearrange(t, "b n (h d) -> (b h) n d", h=h), (q, k, v))
128
129 sim = einsum("b i d, b j d -> b i j", q, k) * self.scale
130
131 if exists(mask):
132 mask = rearrange(mask, "b ... -> b (...)")
133 max_neg_value = -torch.finfo(sim.dtype).max
134 mask = repeat(mask, "b j -> (b h) () j", h=h)
135 sim.masked_fill_(~mask, max_neg_value)
136
137 # attention, what we cannot get enough of
138 attn = sim.softmax(dim=-1)
139
140 out = einsum("b i j, b j d -> b i d", attn, v)
141 out = rearrange(out, "(b h) n d -> b n (h d)", h=h)
142 return out
143
144 def get_kv(self, context):
145 return self.to_k(context), self.to_v(context)