nyanko7 commited on
Commit
8d7886f
1 Parent(s): 86562e4

Create prompt_parser.py

Browse files
Files changed (1) hide show
  1. modules/prompt_parser.py +391 -0
modules/prompt_parser.py ADDED
@@ -0,0 +1,391 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import re
3
+ import math
4
+ import numpy as np
5
+ import torch
6
+
7
+ # Code from https://github.com/AUTOMATIC1111/stable-diffusion-webui/commit/8e2aeee4a127b295bfc880800e4a312e0f049b85, modified.
8
+
9
+ class PromptChunk:
10
+ """
11
+ This object contains token ids, weight (multipliers:1.4) and textual inversion embedding info for a chunk of prompt.
12
+ If a prompt is short, it is represented by one PromptChunk, otherwise, multiple are necessary.
13
+ Each PromptChunk contains an exact amount of tokens - 77, which includes one for start and end token,
14
+ so just 75 tokens from prompt.
15
+ """
16
+
17
+ def __init__(self):
18
+ self.tokens = []
19
+ self.multipliers = []
20
+ self.fixes = []
21
+
22
+
23
+ class FrozenCLIPEmbedderWithCustomWordsBase(torch.nn.Module):
24
+ """A pytorch module that is a wrapper for FrozenCLIPEmbedder module. it enhances FrozenCLIPEmbedder, making it possible to
25
+ have unlimited prompt length and assign weights to tokens in prompt.
26
+ """
27
+
28
+ def __init__(self, text_encoder, enable_emphasis=True):
29
+ super().__init__()
30
+
31
+ self.device = lambda: text_encoder.device
32
+ self.enable_emphasis = enable_emphasis
33
+ """Original FrozenCLIPEmbedder module; can also be FrozenOpenCLIPEmbedder or xlmr.BertSeriesModelWithTransformation,
34
+ depending on model."""
35
+
36
+ self.chunk_length = 75
37
+
38
+ def empty_chunk(self):
39
+ """creates an empty PromptChunk and returns it"""
40
+
41
+ chunk = PromptChunk()
42
+ chunk.tokens = [self.id_start] + [self.id_end] * (self.chunk_length + 1)
43
+ chunk.multipliers = [1.0] * (self.chunk_length + 2)
44
+ return chunk
45
+
46
+ def get_target_prompt_token_count(self, token_count):
47
+ """returns the maximum number of tokens a prompt of a known length can have before it requires one more PromptChunk to be represented"""
48
+
49
+ return math.ceil(max(token_count, 1) / self.chunk_length) * self.chunk_length
50
+
51
+ def tokenize_line(self, line):
52
+ """
53
+ this transforms a single prompt into a list of PromptChunk objects - as many as needed to
54
+ represent the prompt.
55
+ Returns the list and the total number of tokens in the prompt.
56
+ """
57
+
58
+ if self.enable_emphasis:
59
+ parsed = parse_prompt_attention(line)
60
+ else:
61
+ parsed = [[line, 1.0]]
62
+
63
+ tokenized = self.tokenize([text for text, _ in parsed])
64
+
65
+ chunks = []
66
+ chunk = PromptChunk()
67
+ token_count = 0
68
+ last_comma = -1
69
+
70
+ def next_chunk(is_last=False):
71
+ """puts current chunk into the list of results and produces the next one - empty;
72
+ if is_last is true, tokens <end-of-text> tokens at the end won't add to token_count"""
73
+ nonlocal token_count
74
+ nonlocal last_comma
75
+ nonlocal chunk
76
+
77
+ if is_last:
78
+ token_count += len(chunk.tokens)
79
+ else:
80
+ token_count += self.chunk_length
81
+
82
+ to_add = self.chunk_length - len(chunk.tokens)
83
+ if to_add > 0:
84
+ chunk.tokens += [self.id_end] * to_add
85
+ chunk.multipliers += [1.0] * to_add
86
+
87
+ chunk.tokens = [self.id_start] + chunk.tokens + [self.id_end]
88
+ chunk.multipliers = [1.0] + chunk.multipliers + [1.0]
89
+
90
+ last_comma = -1
91
+ chunks.append(chunk)
92
+ chunk = PromptChunk()
93
+
94
+ comma_padding_backtrack = 20 # default value in https://github.com/AUTOMATIC1111/stable-diffusion-webui/blob/6cff4401824299a983c8e13424018efc347b4a2b/modules/shared.py#L410
95
+ for tokens, (text, weight) in zip(tokenized, parsed):
96
+ if text == "BREAK" and weight == -1:
97
+ next_chunk()
98
+ continue
99
+
100
+ position = 0
101
+ while position < len(tokens):
102
+ token = tokens[position]
103
+
104
+ if token == self.comma_token:
105
+ last_comma = len(chunk.tokens)
106
+
107
+ # this is when we are at the end of alloted 75 tokens for the current chunk, and the current token is not a comma. opts.comma_padding_backtrack
108
+ # is a setting that specifies that if there is a comma nearby, the text after the comma should be moved out of this chunk and into the next.
109
+ elif (
110
+ comma_padding_backtrack != 0
111
+ and len(chunk.tokens) == self.chunk_length
112
+ and last_comma != -1
113
+ and len(chunk.tokens) - last_comma <= comma_padding_backtrack
114
+ ):
115
+ break_location = last_comma + 1
116
+
117
+ reloc_tokens = chunk.tokens[break_location:]
118
+ reloc_mults = chunk.multipliers[break_location:]
119
+
120
+ chunk.tokens = chunk.tokens[:break_location]
121
+ chunk.multipliers = chunk.multipliers[:break_location]
122
+
123
+ next_chunk()
124
+ chunk.tokens = reloc_tokens
125
+ chunk.multipliers = reloc_mults
126
+
127
+ if len(chunk.tokens) == self.chunk_length:
128
+ next_chunk()
129
+
130
+ chunk.tokens.append(token)
131
+ chunk.multipliers.append(weight)
132
+ position += 1
133
+
134
+ if len(chunk.tokens) > 0 or len(chunks) == 0:
135
+ next_chunk(is_last=True)
136
+
137
+ return chunks, token_count
138
+
139
+ def process_texts(self, texts):
140
+ """
141
+ Accepts a list of texts and calls tokenize_line() on each, with cache. Returns the list of results and maximum
142
+ length, in tokens, of all texts.
143
+ """
144
+
145
+ token_count = 0
146
+
147
+ cache = {}
148
+ batch_chunks = []
149
+ for line in texts:
150
+ if line in cache:
151
+ chunks = cache[line]
152
+ else:
153
+ chunks, current_token_count = self.tokenize_line(line)
154
+ token_count = max(current_token_count, token_count)
155
+
156
+ cache[line] = chunks
157
+
158
+ batch_chunks.append(chunks)
159
+
160
+ return batch_chunks, token_count
161
+
162
+ def forward(self, texts):
163
+ """
164
+ Accepts an array of texts; Passes texts through transformers network to create a tensor with numerical representation of those texts.
165
+ Returns a tensor with shape of (B, T, C), where B is length of the array; T is length, in tokens, of texts (including padding) - T will
166
+ be a multiple of 77; and C is dimensionality of each token - for SD1 it's 768, and for SD2 it's 1024.
167
+ An example shape returned by this function can be: (2, 77, 768).
168
+ Webui usually sends just one text at a time through this function - the only time when texts is an array with more than one elemenet
169
+ is when you do prompt editing: "a picture of a [cat:dog:0.4] eating ice cream"
170
+ """
171
+
172
+ batch_chunks, token_count = self.process_texts(texts)
173
+ chunk_count = max([len(x) for x in batch_chunks])
174
+
175
+ zs = []
176
+ ts = []
177
+ for i in range(chunk_count):
178
+ batch_chunk = [
179
+ chunks[i] if i < len(chunks) else self.empty_chunk()
180
+ for chunks in batch_chunks
181
+ ]
182
+
183
+ tokens = [x.tokens for x in batch_chunk]
184
+ multipliers = [x.multipliers for x in batch_chunk]
185
+ # self.embeddings.fixes = [x.fixes for x in batch_chunk]
186
+
187
+ # for fixes in self.embeddings.fixes:
188
+ # for position, embedding in fixes:
189
+ # used_embeddings[embedding.name] = embedding
190
+
191
+ z = self.process_tokens(tokens, multipliers)
192
+ zs.append(z)
193
+ ts.append(tokens)
194
+
195
+ return np.hstack(ts), torch.hstack(zs)
196
+
197
+ def process_tokens(self, remade_batch_tokens, batch_multipliers):
198
+ """
199
+ sends one single prompt chunk to be encoded by transformers neural network.
200
+ remade_batch_tokens is a batch of tokens - a list, where every element is a list of tokens; usually
201
+ there are exactly 77 tokens in the list. batch_multipliers is the same but for multipliers instead of tokens.
202
+ Multipliers are used to give more or less weight to the outputs of transformers network. Each multiplier
203
+ corresponds to one token.
204
+ """
205
+ tokens = torch.asarray(remade_batch_tokens).to(self.device())
206
+
207
+ # this is for SD2: SD1 uses the same token for padding and end of text, while SD2 uses different ones.
208
+ if self.id_end != self.id_pad:
209
+ for batch_pos in range(len(remade_batch_tokens)):
210
+ index = remade_batch_tokens[batch_pos].index(self.id_end)
211
+ tokens[batch_pos, index + 1 : tokens.shape[1]] = self.id_pad
212
+
213
+ z = self.encode_with_transformers(tokens)
214
+
215
+ # restoring original mean is likely not correct, but it seems to work well to prevent artifacts that happen otherwise
216
+ batch_multipliers = torch.asarray(batch_multipliers).to(self.device())
217
+ original_mean = z.mean()
218
+ z = z * batch_multipliers.reshape(batch_multipliers.shape + (1,)).expand(z.shape)
219
+ new_mean = z.mean()
220
+ z = z * (original_mean / new_mean)
221
+
222
+ return z
223
+
224
+
225
+ class FrozenCLIPEmbedderWithCustomWords(FrozenCLIPEmbedderWithCustomWordsBase):
226
+ def __init__(self, tokenizer, text_encoder):
227
+ super().__init__(text_encoder)
228
+ self.tokenizer = tokenizer
229
+ self.text_encoder = text_encoder
230
+
231
+ vocab = self.tokenizer.get_vocab()
232
+
233
+ self.comma_token = vocab.get(",</w>", None)
234
+
235
+ self.token_mults = {}
236
+ tokens_with_parens = [
237
+ (k, v)
238
+ for k, v in vocab.items()
239
+ if "(" in k or ")" in k or "[" in k or "]" in k
240
+ ]
241
+ for text, ident in tokens_with_parens:
242
+ mult = 1.0
243
+ for c in text:
244
+ if c == "[":
245
+ mult /= 1.1
246
+ if c == "]":
247
+ mult *= 1.1
248
+ if c == "(":
249
+ mult *= 1.1
250
+ if c == ")":
251
+ mult /= 1.1
252
+
253
+ if mult != 1.0:
254
+ self.token_mults[ident] = mult
255
+
256
+ self.id_start = self.tokenizer.bos_token_id
257
+ self.id_end = self.tokenizer.eos_token_id
258
+ self.id_pad = self.id_end
259
+
260
+ def tokenize(self, texts):
261
+ tokenized = self.tokenizer(
262
+ texts, truncation=False, add_special_tokens=False
263
+ )["input_ids"]
264
+
265
+ return tokenized
266
+
267
+ def encode_with_transformers(self, tokens):
268
+ CLIP_stop_at_last_layers = 1
269
+ tokens = tokens.to(self.text_encoder.device)
270
+ outputs = self.text_encoder(tokens, output_hidden_states=True)
271
+
272
+ if CLIP_stop_at_last_layers > 1:
273
+ z = outputs.hidden_states[-CLIP_stop_at_last_layers]
274
+ z = self.text_encoder.text_model.final_layer_norm(z)
275
+ else:
276
+ z = outputs.last_hidden_state
277
+
278
+ return z
279
+
280
+
281
+ re_attention = re.compile(
282
+ r"""
283
+ \\\(|
284
+ \\\)|
285
+ \\\[|
286
+ \\]|
287
+ \\\\|
288
+ \\|
289
+ \(|
290
+ \[|
291
+ :([+-]?[.\d]+)\)|
292
+ \)|
293
+ ]|
294
+ [^\\()\[\]:]+|
295
+ :
296
+ """,
297
+ re.X,
298
+ )
299
+
300
+ re_break = re.compile(r"\s*\bBREAK\b\s*", re.S)
301
+
302
+
303
+ def parse_prompt_attention(text):
304
+ """
305
+ Parses a string with attention tokens and returns a list of pairs: text and its associated weight.
306
+ Accepted tokens are:
307
+ (abc) - increases attention to abc by a multiplier of 1.1
308
+ (abc:3.12) - increases attention to abc by a multiplier of 3.12
309
+ [abc] - decreases attention to abc by a multiplier of 1.1
310
+ \( - literal character '('
311
+ \[ - literal character '['
312
+ \) - literal character ')'
313
+ \] - literal character ']'
314
+ \\ - literal character '\'
315
+ anything else - just text
316
+
317
+ >>> parse_prompt_attention('normal text')
318
+ [['normal text', 1.0]]
319
+ >>> parse_prompt_attention('an (important) word')
320
+ [['an ', 1.0], ['important', 1.1], [' word', 1.0]]
321
+ >>> parse_prompt_attention('(unbalanced')
322
+ [['unbalanced', 1.1]]
323
+ >>> parse_prompt_attention('\(literal\]')
324
+ [['(literal]', 1.0]]
325
+ >>> parse_prompt_attention('(unnecessary)(parens)')
326
+ [['unnecessaryparens', 1.1]]
327
+ >>> parse_prompt_attention('a (((house:1.3)) [on] a (hill:0.5), sun, (((sky))).')
328
+ [['a ', 1.0],
329
+ ['house', 1.5730000000000004],
330
+ [' ', 1.1],
331
+ ['on', 1.0],
332
+ [' a ', 1.1],
333
+ ['hill', 0.55],
334
+ [', sun, ', 1.1],
335
+ ['sky', 1.4641000000000006],
336
+ ['.', 1.1]]
337
+ """
338
+
339
+ res = []
340
+ round_brackets = []
341
+ square_brackets = []
342
+
343
+ round_bracket_multiplier = 1.1
344
+ square_bracket_multiplier = 1 / 1.1
345
+
346
+ def multiply_range(start_position, multiplier):
347
+ for p in range(start_position, len(res)):
348
+ res[p][1] *= multiplier
349
+
350
+ for m in re_attention.finditer(text):
351
+ text = m.group(0)
352
+ weight = m.group(1)
353
+
354
+ if text.startswith("\\"):
355
+ res.append([text[1:], 1.0])
356
+ elif text == "(":
357
+ round_brackets.append(len(res))
358
+ elif text == "[":
359
+ square_brackets.append(len(res))
360
+ elif weight is not None and len(round_brackets) > 0:
361
+ multiply_range(round_brackets.pop(), float(weight))
362
+ elif text == ")" and len(round_brackets) > 0:
363
+ multiply_range(round_brackets.pop(), round_bracket_multiplier)
364
+ elif text == "]" and len(square_brackets) > 0:
365
+ multiply_range(square_brackets.pop(), square_bracket_multiplier)
366
+ else:
367
+ parts = re.split(re_break, text)
368
+ for i, part in enumerate(parts):
369
+ if i > 0:
370
+ res.append(["BREAK", -1])
371
+ res.append([part, 1.0])
372
+
373
+ for pos in round_brackets:
374
+ multiply_range(pos, round_bracket_multiplier)
375
+
376
+ for pos in square_brackets:
377
+ multiply_range(pos, square_bracket_multiplier)
378
+
379
+ if len(res) == 0:
380
+ res = [["", 1.0]]
381
+
382
+ # merge runs of identical weights
383
+ i = 0
384
+ while i + 1 < len(res):
385
+ if res[i][1] == res[i + 1][1]:
386
+ res[i][0] += res[i + 1][0]
387
+ res.pop(i + 1)
388
+ else:
389
+ i += 1
390
+
391
+ return res