codeShare commited on
Commit
d1ca5a4
1 Parent(s): 542f65d

Upload fusion_t2i_CLIP_interrogator.ipynb

Browse files
Google Colab Notebooks/fusion_t2i_CLIP_interrogator.ipynb CHANGED
@@ -25,6 +25,17 @@
25
  "id": "cRV2YWomjMBU"
26
  }
27
  },
 
 
 
 
 
 
 
 
 
 
 
28
  {
29
  "cell_type": "code",
30
  "source": [
@@ -91,21 +102,6 @@
91
  "model = CLIPModel.from_pretrained(\"openai/clip-vit-large-patch14\")\n",
92
  "logit_scale = model.logit_scale.exp() #logit_scale = 100.00000762939453\n",
93
  "\n",
94
- "f_add = torch.nn.quantized.FloatFunctional()\n",
95
- "\n",
96
- "\n",
97
- "\n",
98
- "\n"
99
- ],
100
- "metadata": {
101
- "id": "TC5lMJrS1HCC"
102
- },
103
- "execution_count": null,
104
- "outputs": []
105
- },
106
- {
107
- "cell_type": "code",
108
- "source": [
109
  "index = 0\n",
110
  "%cd {home_directory + 'fusion-t2i-generator-data/' + 'vocab'}\n",
111
  "vocab_encodings = torch.load('vocab_encodings.pt', weights_only=False)\n",
@@ -119,10 +115,11 @@
119
  "for key in torch.load('reference_text_and_image_encodings.pt', weights_only=False):\n",
120
  " index = index + 1;\n",
121
  "#------#\n",
122
- "NUM_REFERENCE_ITEMS = index"
 
123
  ],
124
  "metadata": {
125
- "id": "Z8x3Y7IsnGlT"
126
  },
127
  "execution_count": null,
128
  "outputs": []
@@ -132,7 +129,7 @@
132
  "source": [
133
  "# @title \t⚄ Use a pre-encoded prompt + image pair from the fusion gen (note: NSFW!)\n",
134
  "# @markdown Choose a pre-encoded reference\n",
135
- "index = 457 # @param {type:\"slider\", min:0, max:1666, step:1}\n",
136
  "PROMPT_INDEX = index\n",
137
  "prompt = target_prompts[f'{PROMPT_INDEX}']\n",
138
  "url = target_urls[f'{PROMPT_INDEX}']\n",
@@ -140,48 +137,73 @@
140
  " image = Image.open(requests.get(url, stream=True).raw)\n",
141
  "#------#\n",
142
  "# @markdown ⚖️ 🖼️ encoding <-----?-----> 📝 encoding </div> <br>\n",
143
- "C = 0.5 # @param {type:\"slider\", min:0, max:1, step:0.01}\n",
144
- "log_strength = 1 # @param {type:\"slider\", min:-5, max:5, step:0.01}\n",
145
- "prompt_strength = math.pow(10 ,log_strength-1)\n",
146
- "reference = torch.zeros(768)\n",
147
  "\n",
148
  "%cd {home_directory + 'fusion-t2i-generator-data/' + 'reference'}\n",
149
  "references = torch.load('reference_text_and_image_encodings.pt' , weights_only=False)\n",
150
- "reference = torch.add(reference, C * references[index][0].dequantize())\n",
151
- "reference = torch.add(reference, (1-C) * references[index][1].dequantize())\n",
152
- "references = ''\n",
153
  "# @markdown -----------\n",
154
- "# @markdown 📝➕ Enhance similarity to prompt(s)\n",
 
 
 
 
 
 
 
 
 
 
 
 
155
  "POS = '' # @param {type:'string' ,placeholder:'item1 , item2 , ...'}\n",
156
- "log_strength = 1.06 # @param {type:\"slider\", min:-5, max:5, step:0.01}\n",
157
- "pos_strength = math.pow(10 ,log_strength-1)\n",
158
- "for _POS in POS.split(','):\n",
159
  " inputs = tokenizer(text = _POS.strip(), truncation = True , padding=True, return_tensors=\"pt\")\n",
160
- " text_features_POS = model.get_text_features(**inputs)\n",
161
  " text_features_POS = text_features_POS/text_features_POS.norm(p=2, dim=-1, keepdim=True)\n",
162
  " reference = torch.add(reference, pos_strength * text_features_POS)\n",
163
  "# @markdown -----------\n",
164
  "\n",
165
  "# @markdown 🚫 Penalize similarity to prompt(s)\n",
166
  "NEG = '' # @param {type:'string' , placeholder:'item1 , item2 , ...'}\n",
167
- "log_strength = 1 # @param {type:\"slider\", min:-5, max:5, step:0.01}\n",
168
- "neg_strength = math.pow(10 ,log_strength-1)\n",
169
- "for _NEG in NEG.split(','):\n",
170
  " inputs = tokenizer(text = _NEG.strip(), truncation = True , padding=True, return_tensors=\"pt\")\n",
171
- " text_features_NEG = model.get_text_features(**inputs)\n",
172
  " text_features_NEG = text_features_NEG/text_features_NEG.norm(p=2, dim=-1, keepdim=True)\n",
173
- " reference = torch.add(reference, (-1) * neg_strength * text_features_NEG)\n",
174
  "# @markdown -----------\n",
175
  "# @markdown ⏩ Skip item(s) containing the word(s)\n",
176
- "SKIP = 'futa ' # @param {type:'string' , placeholder:'item1 , item2 , ...'}\n",
177
- "def isBlacklisted(txt):\n",
178
- " if txt.strip().isnumeric(): return True\n",
179
- " if blacklist.strip() == '': return False\n",
 
 
 
 
 
 
180
  " for item in list(blacklist.split(',')):\n",
181
  " if item.strip() == '' : continue\n",
182
  " if txt.find(item.strip())> -1 : return True\n",
183
  " #------#\n",
184
- " return False\n",
 
 
 
 
 
 
 
185
  "# @markdown -----------\n",
186
  "# @markdown 🔍 How similar should the results be?\n",
187
  "list_size = 1000 # @param {type:'number'}\n",
@@ -191,46 +213,62 @@
191
  "N = 7 # @param {type:\"slider\", min:0, max:20, step:1}\n",
192
  "# @markdown -----------\n",
193
  "# @markdown ⚙️ Run the script?\n",
194
- "run_script = True # @param {type:\"boolean\"}\n",
195
- "enable = run_script\n",
 
 
 
 
 
 
 
 
 
196
  "if (enable):\n",
197
  " reference = reference/reference.norm(p=2, dim=-1, keepdim=True)\n",
198
  " %cd {home_directory + 'fusion-t2i-generator-data/' + 'vocab'}\n",
199
  " sims = torch.matmul(vocab_encodings.dequantize(),reference.t())\n",
200
  " sorted , indices = torch.sort(sims,dim=0 , descending=True)\n",
201
  "\n",
202
- " average = torch.zeros(768)\n",
203
- " for key in range(NUM_VOCAB_ITEMS):\n",
204
- " if (key>=start_at_index and key < start_at_index + list_size):\n",
205
- " average = torch.add(average, vocab_encodings[key].dequantize())\n",
206
- " if (key>=start_at_index + list_size) : break\n",
207
- " average = average * (1/max(1, list_size))\n",
208
- " average = average/average.norm(p=2, dim=-1, keepdim=True)\n",
209
- " print(average.norm(p=2, dim=-1, keepdim=True))\n",
210
- " average = average.clone().detach();\n",
211
- " variance = torch.zeros(1)\n",
212
- " for key in range(NUM_VOCAB_ITEMS):\n",
213
- " if (key>=start_at_index and key < start_at_index + list_size):\n",
214
- " #dot product\n",
215
- "\n",
216
- " difference_to_average = 100 * (torch.ones(1) - torch.dot(average[0]\n",
217
- " , vocab_encodings[key].dequantize()[0])/average.norm(p=2, dim=-1, keepdim=True))\n",
218
- "\n",
219
- " variance = torch.add(variance, difference_to_average * difference_to_average)\n",
220
- " if (key>=start_at_index + list_size) : break\n",
 
 
221
  " #--------#\n",
222
- " variance = variance * (1/max(1, list_size))\n",
223
- " variance= variance.clone().detach();\n",
224
- " print(f'The variance for the selected range is {math.sqrt(variance.item())} units from average')\n",
225
  "#---#\n",
226
- " output = '{'\n",
227
- " for _index in range(list_size):\n",
228
- " output = output + prompts[f'{indices[min(_index+start_at_index,NUM_VOCAB_ITEMS-1)].item()}'] + '|'\n",
229
- " #---------#\n",
230
- " output = (output + '}').replace('|}' , '} ')\n",
231
- " for iter in range(N):\n",
232
- " print(output)\n",
 
 
 
 
 
 
233
  "#-------#\n",
 
 
234
  "image or print('No image found')"
235
  ],
236
  "metadata": {
@@ -558,7 +596,8 @@
558
  "!zip -r {zip_dest} {root_output_folder}"
559
  ],
560
  "metadata": {
561
- "id": "zivBNrw9uSVD"
 
562
  },
563
  "execution_count": null,
564
  "outputs": []
@@ -566,44 +605,91 @@
566
  {
567
  "cell_type": "code",
568
  "source": [
569
- "\n",
570
  "# @title \t⚄ New code (work in progress)\n",
571
  "\n",
572
- "LIST_SIZE = 1000\n",
573
- "SCALE = 0.0043\n",
574
- "DIM = 768\n",
575
- "\n",
576
- "from safetensors.torch import load_file\n",
577
- "\n",
578
- "def get_most_similiar_items_to(ref , url , num_items):\n",
579
- " vocab = load_file(url)\n",
580
  "\n",
581
- " def similarity(item):\n",
582
- " key = item[0]\n",
583
- " value = item[1]\n",
584
- " tmp = torch.sub(value[1:DIM+1] , torch.ones(DIM) , alpha = value[0].item()).to(dtype=torch.uint8)\n",
585
- " return torch.dot(tmp,ref).item()\n",
586
- " #--------#\n",
587
  "\n",
588
- " return dict(sorted(vocab.items(), key=similarity))\n",
589
- "#----------#\n",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
590
  "\n",
591
- "ref = torch.rand(DIM)\n",
592
- "ref = (1/SCALE) * ref/ref.norm(p=2, dim=-1, keepdim=True)\n",
593
- "ref = torch.round(ref).to(dtype=torch.uint8)\n",
594
  "\n",
595
- "url = '/content/fusion-t2i-generator-data/lyrics_vocab_q0043_41905.safetensors'\n",
596
- "test = get_most_similiar_items_to(ref , url , LIST_SIZE)\n",
597
  "\n",
598
  "index = 0\n",
599
- "for key in test:\n",
600
- " print(key)\n",
 
 
 
 
 
601
  " index = index + 1\n",
602
- " if index>=10:break\n",
603
- "#-----#"
604
  ],
605
  "metadata": {
606
- "id": "PGyLzCmYqCPg"
 
607
  },
608
  "execution_count": null,
609
  "outputs": []
 
25
  "id": "cRV2YWomjMBU"
26
  }
27
  },
28
+ {
29
+ "cell_type": "markdown",
30
+ "source": [
31
+ "THIS IS AN OLD VERSION OF THE CLIP INTERROGATOR.\n",
32
+ "\n",
33
+ "YOU WILL FIND THE UP TO DATE VERSION HERE:https://huggingface.co/datasets/codeShare/fusion-t2i-generator-data/tree/main/Google%20Colab%20Jupyter%20Notebooks"
34
+ ],
35
+ "metadata": {
36
+ "id": "9slWHq0JIX6D"
37
+ }
38
+ },
39
  {
40
  "cell_type": "code",
41
  "source": [
 
102
  "model = CLIPModel.from_pretrained(\"openai/clip-vit-large-patch14\")\n",
103
  "logit_scale = model.logit_scale.exp() #logit_scale = 100.00000762939453\n",
104
  "\n",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
105
  "index = 0\n",
106
  "%cd {home_directory + 'fusion-t2i-generator-data/' + 'vocab'}\n",
107
  "vocab_encodings = torch.load('vocab_encodings.pt', weights_only=False)\n",
 
115
  "for key in torch.load('reference_text_and_image_encodings.pt', weights_only=False):\n",
116
  " index = index + 1;\n",
117
  "#------#\n",
118
+ "NUM_REFERENCE_ITEMS = index\n",
119
+ "\n"
120
  ],
121
  "metadata": {
122
+ "id": "TC5lMJrS1HCC"
123
  },
124
  "execution_count": null,
125
  "outputs": []
 
129
  "source": [
130
  "# @title \t⚄ Use a pre-encoded prompt + image pair from the fusion gen (note: NSFW!)\n",
131
  "# @markdown Choose a pre-encoded reference\n",
132
+ "index = 213 # @param {type:\"slider\", min:0, max:1666, step:1}\n",
133
  "PROMPT_INDEX = index\n",
134
  "prompt = target_prompts[f'{PROMPT_INDEX}']\n",
135
  "url = target_urls[f'{PROMPT_INDEX}']\n",
 
137
  " image = Image.open(requests.get(url, stream=True).raw)\n",
138
  "#------#\n",
139
  "# @markdown ⚖️ 🖼️ encoding <-----?-----> 📝 encoding </div> <br>\n",
140
+ "C = 0.3 # @param {type:\"slider\", min:0, max:1, step:0.01}\n",
141
+ "log_strength_1 = 2.17 # @param {type:\"slider\", min:-5, max:5, step:0.01}\n",
142
+ "prompt_strength = torch.tensor(math.pow(10 ,log_strength_1-1)).to(dtype = torch.float32)\n",
143
+ "reference = torch.zeros(768).to(dtype = torch.float32)\n",
144
  "\n",
145
  "%cd {home_directory + 'fusion-t2i-generator-data/' + 'reference'}\n",
146
  "references = torch.load('reference_text_and_image_encodings.pt' , weights_only=False)\n",
147
+ "reference = torch.add(reference, prompt_strength * C * references[index][0].dequantize().to(dtype = torch.float32))\n",
148
+ "reference = torch.add(reference, prompt_strength * (1-C) * references[index][1].dequantize().to(dtype = torch.float32))\n",
149
+ "references = '' # Clear up memory\n",
150
  "# @markdown -----------\n",
151
+ "# @markdown 📝➕ 1st Enhance similarity to prompt(s)\n",
152
+ "POS_2 = '' # @param {type:'string' ,placeholder:'item1 , item2 , ...'}\n",
153
+ "log_strength_2 = 1.03 # @param {type:\"slider\", min:-5, max:5, step:0.01}\n",
154
+ "pos_strength = torch.tensor(math.pow(10 ,log_strength_2-1)).to(dtype = torch.float32)\n",
155
+ "for _POS in POS_2.replace('</w>' , ' ').replace('{' , '').replace('}' , '').replace('|' , ',').split(','):\n",
156
+ " inputs = tokenizer(text = _POS.strip(), truncation = True , padding=True, return_tensors=\"pt\")\n",
157
+ " text_features_POS = model.get_text_features(**inputs).to(dtype = torch.float32)\n",
158
+ " text_features_POS = text_features_POS/text_features_POS.norm(p=2, dim=-1, keepdim=True)\n",
159
+ " reference = torch.add(reference, pos_strength * text_features_POS)\n",
160
+ "# @markdown -----------\n",
161
+ "\n",
162
+ "# @markdown -----------\n",
163
+ "# @markdown 📝➕ 2nd Enhance similarity to prompt(s)\n",
164
  "POS = '' # @param {type:'string' ,placeholder:'item1 , item2 , ...'}\n",
165
+ "log_strength_3 = 1.06 # @param {type:\"slider\", min:-5, max:5, step:0.01}\n",
166
+ "pos_strength = torch.tensor(math.pow(10 ,log_strength_3-1)).to(dtype = torch.float32)\n",
167
+ "for _POS in POS.replace('</w>' , ' ').replace('{' , '').replace('}' , '').replace('|' , ',').split(','):\n",
168
  " inputs = tokenizer(text = _POS.strip(), truncation = True , padding=True, return_tensors=\"pt\")\n",
169
+ " text_features_POS = model.get_text_features(**inputs).to(dtype = torch.float32)\n",
170
  " text_features_POS = text_features_POS/text_features_POS.norm(p=2, dim=-1, keepdim=True)\n",
171
  " reference = torch.add(reference, pos_strength * text_features_POS)\n",
172
  "# @markdown -----------\n",
173
  "\n",
174
  "# @markdown 🚫 Penalize similarity to prompt(s)\n",
175
  "NEG = '' # @param {type:'string' , placeholder:'item1 , item2 , ...'}\n",
176
+ "log_strength_4 = 1 # @param {type:\"slider\", min:-5, max:5, step:0.01}\n",
177
+ "neg_strength = torch.tensor(math.pow(10 ,log_strength_4-1)).to(dtype = torch.float32)\n",
178
+ "for _NEG in NEG.replace('</w>' , ' ').replace('{' , '').replace('}' , '').replace('|' , ',').split(','):\n",
179
  " inputs = tokenizer(text = _NEG.strip(), truncation = True , padding=True, return_tensors=\"pt\")\n",
180
+ " text_features_NEG = model.get_text_features(**inputs).to(dtype = torch.float32)\n",
181
  " text_features_NEG = text_features_NEG/text_features_NEG.norm(p=2, dim=-1, keepdim=True)\n",
182
+ " reference = torch.sub(reference, neg_strength * text_features_NEG)\n",
183
  "# @markdown -----------\n",
184
  "# @markdown ⏩ Skip item(s) containing the word(s)\n",
185
+ "SKIP = '' # @param {type:'string' , placeholder:'item1 , item2 , ...'}\n",
186
+ "\n",
187
+ "min_wordcount = 0 # @param {type:\"slider\", min:0, max:20, step:1}\n",
188
+ "\n",
189
+ "def isBlacklisted(_txt, _blacklist):\n",
190
+ " blacklist = _blacklist.lower().replace('</w>' , ' ').replace('{' , '').replace('}' , '').replace('|' , ',').strip()\n",
191
+ " txt = _txt.lower().strip()\n",
192
+ " if len(txt)<min_wordcount: return True\n",
193
+ " if txt.isnumeric(): return True\n",
194
+ " if blacklist == '': return False\n",
195
  " for item in list(blacklist.split(',')):\n",
196
  " if item.strip() == '' : continue\n",
197
  " if txt.find(item.strip())> -1 : return True\n",
198
  " #------#\n",
199
+ " found = False\n",
200
+ " alphabet = 'abcdefghijklmnopqrstuvxyz'\n",
201
+ " for letter in alphabet:\n",
202
+ " found = txt.find(letter)>-1\n",
203
+ " if found:break\n",
204
+ " #------#\n",
205
+ " return not found\n",
206
+ "\n",
207
  "# @markdown -----------\n",
208
  "# @markdown 🔍 How similar should the results be?\n",
209
  "list_size = 1000 # @param {type:'number'}\n",
 
213
  "N = 7 # @param {type:\"slider\", min:0, max:20, step:1}\n",
214
  "# @markdown -----------\n",
215
  "# @markdown ⚙️ Run the script?\n",
216
+ "update_list = True # @param {type:\"boolean\"}\n",
217
+ "\n",
218
+ "calculate_variance = False # @param {type:\"boolean\"}\n",
219
+ "\n",
220
+ "ne = update_list\n",
221
+ "\n",
222
+ "try: first\n",
223
+ "except:\n",
224
+ " enable = True\n",
225
+ " first = True\n",
226
+ "\n",
227
  "if (enable):\n",
228
  " reference = reference/reference.norm(p=2, dim=-1, keepdim=True)\n",
229
  " %cd {home_directory + 'fusion-t2i-generator-data/' + 'vocab'}\n",
230
  " sims = torch.matmul(vocab_encodings.dequantize(),reference.t())\n",
231
  " sorted , indices = torch.sort(sims,dim=0 , descending=True)\n",
232
  "\n",
233
+ " if calculate_variance:\n",
234
+ " average = torch.zeros(768)\n",
235
+ " for key in range(NUM_VOCAB_ITEMS):\n",
236
+ " if (key>=start_at_index and key < start_at_index + list_size):\n",
237
+ " average = torch.add(average, vocab_encodings[key].dequantize())\n",
238
+ " if (key>=start_at_index + list_size) : break\n",
239
+ " average = average * (1/max(1, list_size))\n",
240
+ " average = average/average.norm(p=2, dim=-1, keepdim=True)\n",
241
+ " average = average.clone().detach();\n",
242
+ " variance = torch.zeros(1)\n",
243
+ " for key in range(NUM_VOCAB_ITEMS):\n",
244
+ " if (key>=start_at_index and key < start_at_index + list_size):\n",
245
+ " #dot product\n",
246
+ " difference_to_average = 100 * (torch.ones(1) - torch.dot(average[0]\n",
247
+ " , vocab_encodings[key].dequantize()[0])/average.norm(p=2, dim=-1, keepdim=True))\n",
248
+ " variance = torch.add(variance, difference_to_average * difference_to_average)\n",
249
+ " if (key>=start_at_index + list_size) : break\n",
250
+ " #--------#\n",
251
+ " variance = variance * (1/max(1, list_size))\n",
252
+ " variance= variance.clone().detach();\n",
253
+ " print(f'The variance for the selected range is {math.sqrt(variance.item())} units from average')\n",
254
  " #--------#\n",
 
 
 
255
  "#---#\n",
256
+ "output = '{'\n",
257
+ "for _index in range(list_size):\n",
258
+ " tmp = prompts[f'{indices[min(_index+start_at_index,NUM_VOCAB_ITEMS-1)].item()}']\n",
259
+ " if isBlacklisted(tmp , SKIP): continue\n",
260
+ " tmp = fix_bad_symbols(tmp)\n",
261
+ " if output.find(tmp)>-1:continue\n",
262
+ " output = output + tmp + '|'\n",
263
+ "#---------#\n",
264
+ "output = (output + '}').replace('|}' , '} ')\n",
265
+ "print('')\n",
266
+ "print('')\n",
267
+ "for iter in range(N):\n",
268
+ " print(output)\n",
269
  "#-------#\n",
270
+ "print('')\n",
271
+ "print('')\n",
272
  "image or print('No image found')"
273
  ],
274
  "metadata": {
 
596
  "!zip -r {zip_dest} {root_output_folder}"
597
  ],
598
  "metadata": {
599
+ "id": "zivBNrw9uSVD",
600
+ "cellView": "form"
601
  },
602
  "execution_count": null,
603
  "outputs": []
 
605
  {
606
  "cell_type": "code",
607
  "source": [
 
608
  "# @title \t⚄ New code (work in progress)\n",
609
  "\n",
610
+ "def get_num_vocab_items(_url):\n",
611
+ " num_vocab_items = 0\n",
612
+ " for item in _url.split('_'):\n",
613
+ " if item.find('safetensors')>-1: num_vocab_items = int(item.replace('.safetensors', ''))\n",
614
+ " #------#\n",
615
+ " return num_vocab_items-1\n",
 
 
616
  "\n",
 
 
 
 
 
 
617
  "\n",
618
+ "def get_similiar(_ref , urls, _LIST_SIZE):\n",
619
+ " dot_dtype = torch.float16\n",
620
+ " _SCALE = torch.tensor(0.0043).to(dot_dtype)\n",
621
+ " _DIM = 768\n",
622
+ " _vocab = {}\n",
623
+ " #----#\n",
624
+ " inputs = tokenizer(text = _ref.strip(), truncation = True , padding=True, return_tensors=\"pt\")\n",
625
+ " ref = model.get_text_features(**inputs)[0]\n",
626
+ " ref = (ref/ref.norm(p=2, dim=-1, keepdim=True)).to(dtype = dot_dtype)\n",
627
+ " #-----#\n",
628
+ " num_vocab_items = 0\n",
629
+ " for url in urls:\n",
630
+ " num_vocab_items = num_vocab_items + get_num_vocab_items(url)\n",
631
+ " #------#\n",
632
+ " vocab = torch.zeros(num_vocab_items , _DIM).to(torch.uint8)\n",
633
+ " prompts = {}\n",
634
+ " index = 0\n",
635
+ " for url in urls:\n",
636
+ " __vocab = load_file(url)\n",
637
+ " for key in load_file(url):\n",
638
+ " vocab[index] = __vocab[key][1:_DIM+1] - __vocab[key][0]*torch.ones(_DIM).t()\n",
639
+ " prompts[f'{index}'] = key\n",
640
+ " index = index + 1\n",
641
+ " #-------#\n",
642
+ " __vocab = {}\n",
643
+ " #-------#\n",
644
+ " sims = torch.matmul((vocab*_SCALE).to(dot_dtype) , ref.t())\n",
645
+ " sorted , indices = torch.sort(sims, dim = 0 , descending = True)\n",
646
+ " return indices , prompts , sims\n",
647
+ " _prompts = {}\n",
648
+ " for index in range(num_vocab_items):\n",
649
+ " key = prompts[f'{indices[index]}']\n",
650
+ " _prompts[f'{key}'] = sims[key].item()\n",
651
+ " index = index + 1\n",
652
+ " if index>_LIST_SIZE:break\n",
653
+ " #-------#\n",
654
+ " return _prompts\n",
655
+ "#-------#\n",
656
+ "\n"
657
+ ],
658
+ "metadata": {
659
+ "cellView": "form",
660
+ "id": "uDzsk02CbMFc"
661
+ },
662
+ "execution_count": null,
663
+ "outputs": []
664
+ },
665
+ {
666
+ "cell_type": "code",
667
+ "source": [
668
+ "vocab = {}\n",
669
+ "# @title \t⚄ New code (work in progress)\n",
670
+ "ref = 'impressionist painting by luis royo' # @param {type:'string' , placeholder:'type a single prompt to match'}\n",
671
+ "LIST_SIZE = 1000 # @param {type:'number' , placeholder:'set how large the list should be'}\n",
672
+ "urls = [ '/content/fusion-t2i-generator-data/civitai_vocab_q0043_203663.safetensors' ,]\n",
673
  "\n",
674
+ " #'/content/fusion-t2i-generator-data/clip_vocab_q0043_541291.safetensors' , '/content/fusion-t2i-generator-data/lyrics_vocab_q0043_41905.safetensors' , '/content/fusion-t2i-generator-data/names_vocab_q0043_162977.safetensors' , '/content/fusion-t2i-generator-data/r34_vocab_q0043_96166.safetensors' ]\n",
 
 
675
  "\n",
676
+ "indices , prompts , sims = get_similiar(ref , urls , LIST_SIZE)\n",
 
677
  "\n",
678
  "index = 0\n",
679
+ "_prompts = {}\n",
680
+ "for index in range(203662):\n",
681
+ " try:\n",
682
+ " key = prompts[f'{indices[index].item()}']\n",
683
+ " print(key)\n",
684
+ " except: print('Not found!')\n",
685
+ " #_prompts[f'{key}'] = sims[key].item()\n",
686
  " index = index + 1\n",
687
+ " if index>LIST_SIZE:break\n",
688
+ "\n"
689
  ],
690
  "metadata": {
691
+ "cellView": "form",
692
+ "id": "Azz1kCza6LB3"
693
  },
694
  "execution_count": null,
695
  "outputs": []