codeShare commited on
Commit
345c051
·
verified ·
1 Parent(s): b293fe3

Upload sd_token_similarity_calculator.ipynb

Browse files
Files changed (1) hide show
  1. sd_token_similarity_calculator.ipynb +203 -152
sd_token_similarity_calculator.ipynb CHANGED
@@ -155,118 +155,29 @@
155
  ],
156
  "metadata": {
157
  "id": "Ch9puvwKH1s3",
158
- "collapsed": true
159
- },
160
- "execution_count": null,
161
- "outputs": []
162
- },
163
- {
164
- "cell_type": "code",
165
- "source": [
166
- "# @title 📝 Prompt similarity: Order pre-made text_encodings\n",
167
- "prompt = \" a fast car on the road \" # @param {\"type\":\"string\",\"placeholder\":\"Write a prompt\"}\n",
168
- "from transformers import AutoTokenizer\n",
169
- "tokenizer = AutoTokenizer.from_pretrained(\"openai/clip-vit-large-patch14\", clean_up_tokenization_spaces = False)\n",
170
- "from transformers import CLIPProcessor, CLIPModel\n",
171
- "processor = CLIPProcessor.from_pretrained(\"openai/clip-vit-large-patch14\" , clean_up_tokenization_spaces = True)\n",
172
- "model = CLIPModel.from_pretrained(\"openai/clip-vit-large-patch14\")\n",
173
- "\n",
174
- "# Get text features for user input\n",
175
- "inputs = tokenizer(text = prompt, padding=True, return_tensors=\"pt\")\n",
176
- "text_features_A = model.get_text_features(**inputs)\n",
177
- "text_features_A = text_features_A/text_features_A.norm(p=2, dim=-1, keepdim=True)\n",
178
- "name_A = prompt\n",
179
- "#------#\n",
180
- "\n",
181
- "# Load the .db file for prefix encodings\n",
182
- "import shelve\n",
183
- "_iters = -1\n",
184
- "RANGE = NUM_PREFIX\n",
185
- "NUM_PREFIX_LISTS = 1\n",
186
- "dots = results_sim = torch.zeros(RANGE*NUM_PREFIX_LISTS)\n",
187
- "for _PREFIX_ENC_VOCAB in PREFIX_ENC_VOCAB:\n",
188
- " _iters = _iters + 1\n",
189
- " d = shelve.open(_PREFIX_ENC_VOCAB)\n",
190
- " for _index in range(RANGE):\n",
191
- " index = _iters*RANGE + _index\n",
192
- " text_features = d[f'{_index}']\n",
193
- " text_features = text_features/text_features.norm(p=2, dim=-1, keepdim=True)\n",
194
- " sim = torch.nn.functional.cosine_similarity(text_features, text_features_A)\n",
195
- " dots[index] = sim\n",
196
- " #----#\n",
197
- " d.close() #close the file\n",
198
- "#------#\n",
199
- "prefix_sorted, prefix_indices = torch.sort(dots,dim=0 , descending=True)\n",
200
- "#------#\n",
201
- "\n",
202
- "# Load the .db file for prefix encodings\n",
203
- "import shelve\n",
204
- "_iters = -1\n",
205
- "RANGE = NUM_SUFFIX\n",
206
- "dots = results_sim = torch.zeros(RANGE*NUM_SUFFIX_LISTS)\n",
207
- "for _SUFFIX_ENC_VOCAB in SUFFIX_ENC_VOCAB:\n",
208
- " _iters = _iters + 1\n",
209
- " d = shelve.open(_SUFFIX_ENC_VOCAB)\n",
210
- " for _index in range(RANGE):\n",
211
- " index = _iters*RANGE + _index\n",
212
- " text_features = d[f'{_index}']\n",
213
- " text_features = text_features/text_features.norm(p=2, dim=-1, keepdim=True)\n",
214
- " sim = torch.nn.functional.cosine_similarity(text_features, text_features_A)\n",
215
- " dots[index] = sim\n",
216
- " #----#\n",
217
- " d.close() #close the file\n",
218
- "#------#\n",
219
- "suffix_sorted, suffix_indices = torch.sort(dots,dim=0 , descending=True)\n",
220
- "#------#\n",
221
- "\n",
222
- "#Print the results\n",
223
- "# title Show the 100 most similiar suffix and prefix text-encodings to the text encoding\n",
224
- "RANGE = 30\n",
225
- "_suffixes = '{'\n",
226
- "_sims = '{'\n",
227
- "for index in range(RANGE):\n",
228
- " id = int(suffix_indices[index])\n",
229
- " ahead = \"from \"\n",
230
- " behind = \"\"\n",
231
- " if(id>NUM_SUFFIX*1):\n",
232
- " ahead = \"a \"\n",
233
- " if(id>NUM_SUFFIX*2):\n",
234
- " ahead = \"by \"\n",
235
- " if(id>NUM_SUFFIX*3):\n",
236
- " ahead = \"\"\n",
237
- " behind = \"like\"\n",
238
- " id = _modulus(id,NUM_SUFFIX)\n",
239
- " #------#\n",
240
- " sim = suffix_sorted[index].item()\n",
241
- " name = ahead + get_suffix(id) + behind\n",
242
- " if(get_suffix(id) == ' '): name = ahead + f'{id}' + behind\n",
243
- " _suffixes = _suffixes + name + '|'\n",
244
- " _sims = _sims + f'{round(sim*100,2)} %' + '|'\n",
245
- "#------#\n",
246
- "_suffixes = (_suffixes + '}').replace('|}', '}')\n",
247
- "_sims = (_sims + '}').replace('|}', '}')\n",
248
- "\n",
249
- "print('most similiar suffix items to prompt : ' + _suffixes)\n",
250
- "print('similarity % for suffix items : ' + _sims)\n",
251
- "print('')\n",
252
- "\n",
253
- "#-------#\n",
254
- "\n",
255
- "_prefixes = '{'\n",
256
- "for index in range(RANGE):\n",
257
- " id = f'{prefix_indices[index]}'\n",
258
- " #sim = prefix_sorted[index]\n",
259
- " name = get_prefix(id)\n",
260
- " _prefixes = _prefixes + name + '|'\n",
261
- "#------#\n",
262
- "_prefixes = (_prefixes + '}').replace('|}', '}')\n",
263
- "print('most similiar prefix suffix to image : ' + _prefixes)\n"
264
- ],
265
- "metadata": {
266
- "id": "xc-PbIYF428y"
267
  },
268
- "execution_count": null,
269
- "outputs": []
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
270
  },
271
  {
272
  "cell_type": "code",
@@ -421,6 +332,146 @@
421
  "execution_count": null,
422
  "outputs": []
423
  },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
424
  {
425
  "cell_type": "markdown",
426
  "source": [
@@ -474,13 +525,13 @@
474
  ],
475
  "metadata": {
476
  "id": "ke6mZ1RZDOeB",
477
- "outputId": "f98f9ea5-32d1-4cf7-b523-1c6b6e6792a2",
478
  "colab": {
479
  "base_uri": "https://localhost:8080/",
480
  "height": 1000
481
  }
482
  },
483
- "execution_count": 2,
484
  "outputs": [
485
  {
486
  "output_type": "display_data",
@@ -497,6 +548,15 @@
497
  {
498
  "cell_type": "code",
499
  "source": [
 
 
 
 
 
 
 
 
 
500
  "\n",
501
  "from transformers import AutoTokenizer\n",
502
  "tokenizer = AutoTokenizer.from_pretrained(\"openai/clip-vit-large-patch14\", clean_up_tokenization_spaces = False)\n",
@@ -554,12 +614,14 @@
554
  "suffix_sorted, suffix_indices = torch.sort(dots,dim=0 , descending=True)\n",
555
  "#------#\n",
556
  "\n",
 
557
  "#Print the results\n",
558
  "# title Show the 100 most similiar suffix and prefix text-encodings to the text encoding\n",
559
- "RANGE = 30\n",
560
  "_suffixes = '{'\n",
561
  "_sims = '{'\n",
562
- "for index in range(RANGE):\n",
 
563
  " id = int(suffix_indices[index])\n",
564
  " ahead = \"from \"\n",
565
  " behind = \"\"\n",
@@ -576,62 +638,51 @@
576
  " name = ahead + get_suffix(id) + behind\n",
577
  " if(get_suffix(id) == ' '): name = ahead + f'{id}' + behind\n",
578
  " _suffixes = _suffixes + name + '|'\n",
579
- " _sims = _sims + f'{round(sim*100,2)} %' + '|'\n",
580
  "#------#\n",
581
  "_suffixes = (_suffixes + '}').replace('|}', '}')\n",
582
  "_sims = (_sims + '}').replace('|}', '}')\n",
 
583
  "\n",
584
- "print('most similiar suffix items to prompt : ' + _suffixes)\n",
585
- "print('similarity % for suffix items : ' + _sims)\n",
586
- "print('')\n",
 
 
587
  "\n",
 
 
 
 
 
 
 
588
  "#-------#\n",
589
  "\n",
590
  "_prefixes = '{'\n",
591
- "for index in range(RANGE):\n",
 
592
  " id = f'{prefix_indices[index]}'\n",
593
  " #sim = prefix_sorted[index]\n",
594
  " name = get_prefix(id)\n",
595
  " _prefixes = _prefixes + name + '|'\n",
596
  "#------#\n",
597
  "_prefixes = (_prefixes + '}').replace('|}', '}')\n",
598
- "print('most similiar prefix suffix to image : ' + _prefixes)\n"
599
- ],
600
- "metadata": {
601
- "id": "rebogpoyOG8k"
602
- },
603
- "execution_count": null,
604
- "outputs": []
605
- },
606
- {
607
- "cell_type": "code",
608
- "source": [
609
- "# @title 🖼️ Show the 10 most similiar suffix and prefix text-encodings to the image encoding\n",
610
  "\n",
611
- "_suffixes = '{'\n",
612
- "for index in range(20):\n",
613
- " id = f'{suffix_indices[index]}'\n",
614
- " sim = suffix_sorted[index]\n",
615
- " name = get_suffix(id)\n",
616
- " _suffixes = _suffixes + name + '|'\n",
617
- "#------#\n",
618
- "_suffixes = (_suffixes + '}').replace('|}', '}')\n",
619
- "print('most similiar suffix tokens to image : ' + _suffixes)\n",
620
  "\n",
621
- "#-------#\n",
 
622
  "\n",
623
- "_prefixes = '{'\n",
624
- "for index in range(20):\n",
625
- " id = f'{prefix_indices[index]}'\n",
626
- " sim = prefix_sorted[index]\n",
627
- " name = get_prefix(id)\n",
628
- " _prefixes = _prefixes + name + '|'\n",
629
- "#------#\n",
630
- "_prefixes = (_prefixes + '}').replace('|}', '}')\n",
631
- "print('most similiar prefix tokens to image : ' + _prefixes)\n"
632
  ],
633
  "metadata": {
634
- "id": "eZqMUhP0qYaK"
635
  },
636
  "execution_count": null,
637
  "outputs": []
 
155
  ],
156
  "metadata": {
157
  "id": "Ch9puvwKH1s3",
158
+ "collapsed": true,
159
+ "outputId": "129b355e-9a4f-49d1-b641-3b675558f9b2",
160
+ "colab": {
161
+ "base_uri": "https://localhost:8080/"
162
+ }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
163
  },
164
+ "execution_count": 1,
165
+ "outputs": [
166
+ {
167
+ "output_type": "stream",
168
+ "name": "stdout",
169
+ "text": [
170
+ "Cloning into 'sd_tokens'...\n",
171
+ "remote: Enumerating objects: 99, done.\u001b[K\n",
172
+ "remote: Counting objects: 100% (96/96), done.\u001b[K\n",
173
+ "remote: Compressing objects: 100% (96/96), done.\u001b[K\n",
174
+ "remote: Total 99 (delta 34), reused 0 (delta 0), pack-reused 3 (from 1)\u001b[K\n",
175
+ "Unpacking objects: 100% (99/99), 1.35 MiB | 3.12 MiB/s, done.\n",
176
+ "Filtering content: 100% (22/22), 2.47 GiB | 39.37 MiB/s, done.\n",
177
+ "/content/sd_tokens\n"
178
+ ]
179
+ }
180
+ ]
181
  },
182
  {
183
  "cell_type": "code",
 
332
  "execution_count": null,
333
  "outputs": []
334
  },
335
+ {
336
+ "cell_type": "code",
337
+ "source": [
338
+ "# @title 📝 Get Prompt text_encoding similarity to the pre-calc. text_encodings\n",
339
+ "prompt = \" a fast car on the road \" # @param {\"type\":\"string\",\"placeholder\":\"Write a prompt\"}\n",
340
+ "list_size = 100 # @param {type:'number'}\n",
341
+ "start_at_index = 0 # @param {type:'number'}\n",
342
+ "print_Similarity = True # @param {type:\"boolean\"}\n",
343
+ "print_Suffix = True # @param {type:\"boolean\"}\n",
344
+ "print_Prefix = True # @param {type:\"boolean\"}\n",
345
+ "print_Descriptions = True # @param {type:\"boolean\"}\n",
346
+ "compact_Output = False # @param {type:\"boolean\"}\n",
347
+ "\n",
348
+ "from transformers import AutoTokenizer\n",
349
+ "tokenizer = AutoTokenizer.from_pretrained(\"openai/clip-vit-large-patch14\", clean_up_tokenization_spaces = False)\n",
350
+ "from transformers import CLIPProcessor, CLIPModel\n",
351
+ "processor = CLIPProcessor.from_pretrained(\"openai/clip-vit-large-patch14\" , clean_up_tokenization_spaces = True)\n",
352
+ "model = CLIPModel.from_pretrained(\"openai/clip-vit-large-patch14\")\n",
353
+ "\n",
354
+ "# Get text features for user input\n",
355
+ "inputs = tokenizer(text = prompt, padding=True, return_tensors=\"pt\")\n",
356
+ "text_features_A = model.get_text_features(**inputs)\n",
357
+ "text_features_A = text_features_A/text_features_A.norm(p=2, dim=-1, keepdim=True)\n",
358
+ "name_A = prompt\n",
359
+ "#------#\n",
360
+ "\n",
361
+ "# Load the .db file for prefix encodings\n",
362
+ "import shelve\n",
363
+ "_iters = -1\n",
364
+ "RANGE = NUM_PREFIX\n",
365
+ "NUM_PREFIX_LISTS = 1\n",
366
+ "dots = results_sim = torch.zeros(RANGE*NUM_PREFIX_LISTS)\n",
367
+ "for _PREFIX_ENC_VOCAB in PREFIX_ENC_VOCAB:\n",
368
+ " _iters = _iters + 1\n",
369
+ " d = shelve.open(_PREFIX_ENC_VOCAB)\n",
370
+ " for _index in range(RANGE):\n",
371
+ " index = _iters*RANGE + _index\n",
372
+ " text_features = d[f'{_index}']\n",
373
+ " text_features = text_features/text_features.norm(p=2, dim=-1, keepdim=True)\n",
374
+ " sim = torch.nn.functional.cosine_similarity(text_features, text_features_A)\n",
375
+ " dots[index] = sim\n",
376
+ " #----#\n",
377
+ " d.close() #close the file\n",
378
+ "#------#\n",
379
+ "prefix_sorted, prefix_indices = torch.sort(dots,dim=0 , descending=True)\n",
380
+ "#------#\n",
381
+ "\n",
382
+ "# Load the .db file for prefix encodings\n",
383
+ "import shelve\n",
384
+ "_iters = -1\n",
385
+ "RANGE = NUM_SUFFIX\n",
386
+ "dots = results_sim = torch.zeros(RANGE*NUM_SUFFIX_LISTS)\n",
387
+ "for _SUFFIX_ENC_VOCAB in SUFFIX_ENC_VOCAB:\n",
388
+ " _iters = _iters + 1\n",
389
+ " d = shelve.open(_SUFFIX_ENC_VOCAB)\n",
390
+ " for _index in range(RANGE):\n",
391
+ " index = _iters*RANGE + _index\n",
392
+ " text_features = d[f'{_index}']\n",
393
+ " text_features = text_features/text_features.norm(p=2, dim=-1, keepdim=True)\n",
394
+ " sim = torch.nn.functional.cosine_similarity(text_features, text_features_A)\n",
395
+ " dots[index] = sim\n",
396
+ " #----#\n",
397
+ " d.close() #close the file\n",
398
+ "#------#\n",
399
+ "suffix_sorted, suffix_indices = torch.sort(dots,dim=0 , descending=True)\n",
400
+ "#------#\n",
401
+ "\n",
402
+ "#Print the results\n",
403
+ "# title Show the 100 most similiar suffix and prefix text-encodings to the text encoding\n",
404
+ "RANGE = list_size\n",
405
+ "_suffixes = '{'\n",
406
+ "_sims = '{'\n",
407
+ "for index in range(start_at_index + RANGE):\n",
408
+ " if index < start_at_index : continue\n",
409
+ " id = int(suffix_indices[index])\n",
410
+ " ahead = \"from \"\n",
411
+ " behind = \"\"\n",
412
+ " if(id>NUM_SUFFIX*1):\n",
413
+ " ahead = \"a \"\n",
414
+ " if(id>NUM_SUFFIX*2):\n",
415
+ " ahead = \"by \"\n",
416
+ " if(id>NUM_SUFFIX*3):\n",
417
+ " ahead = \"\"\n",
418
+ " behind = \"like\"\n",
419
+ " id = _modulus(id,NUM_SUFFIX)\n",
420
+ " #------#\n",
421
+ " sim = suffix_sorted[index].item()\n",
422
+ " name = ahead + get_suffix(id) + behind\n",
423
+ " if(get_suffix(id) == ' '): name = ahead + f'{id}' + behind\n",
424
+ " _suffixes = _suffixes + name + '|'\n",
425
+ " _sims = _sims + f'{round(sim,2)} %' + '|'\n",
426
+ "#------#\n",
427
+ "_suffixes = (_suffixes + '}').replace('|}', '}')\n",
428
+ "_sims = (_sims + '}').replace('|}', '}')\n",
429
+ "#------#\n",
430
+ "\n",
431
+ "\n",
432
+ "suffixes = _suffixes\n",
433
+ "sims = _sims\n",
434
+ "if(not print_Suffix): suffixes = ''\n",
435
+ "if(not print_Similarity): sims = ''\n",
436
+ "\n",
437
+ "if(not compact_Output):\n",
438
+ " if(print_Descriptions):\n",
439
+ " print(f'The {start_at_index}-{start_at_index + RANGE} most similiar suffix items to prompt : ' + suffixes)\n",
440
+ " print(f'The {start_at_index}-{start_at_index + RANGE} similarity % for suffix items : ' + sims)\n",
441
+ " print('')\n",
442
+ " else:\n",
443
+ " print(suffixes)\n",
444
+ "#-------#\n",
445
+ "\n",
446
+ "_prefixes = '{'\n",
447
+ "for index in range(start_at_index + RANGE):\n",
448
+ " if index < start_at_index : continue\n",
449
+ " id = f'{prefix_indices[index]}'\n",
450
+ " #sim = prefix_sorted[index]\n",
451
+ " name = get_prefix(id)\n",
452
+ " _prefixes = _prefixes + name + '|'\n",
453
+ "#------#\n",
454
+ "_prefixes = (_prefixes + '}').replace('|}', '}')\n",
455
+ "\n",
456
+ "\n",
457
+ "prefixes = _prefixes\n",
458
+ "if(not print_Prefix): prefixes = ''\n",
459
+ "\n",
460
+ "if(print_Descriptions):\n",
461
+ " print(f'The {start_at_index}-{start_at_index + RANGE} most similiar prefixes to prompt : ' + prefixes)\n",
462
+ "else:\n",
463
+ " if(compact_Output):\n",
464
+ " print((prefixes + _suffixes).replace('}{', '|'))\n",
465
+ " else:\n",
466
+ " print(prefixes)\n",
467
+ "\n"
468
+ ],
469
+ "metadata": {
470
+ "id": "xc-PbIYF428y"
471
+ },
472
+ "execution_count": null,
473
+ "outputs": []
474
+ },
475
  {
476
  "cell_type": "markdown",
477
  "source": [
 
525
  ],
526
  "metadata": {
527
  "id": "ke6mZ1RZDOeB",
528
+ "outputId": "9f9b5556-6fa7-4aed-e1bc-1704ab0af381",
529
  "colab": {
530
  "base_uri": "https://localhost:8080/",
531
  "height": 1000
532
  }
533
  },
534
+ "execution_count": 4,
535
  "outputs": [
536
  {
537
  "output_type": "display_data",
 
548
  {
549
  "cell_type": "code",
550
  "source": [
551
+ "# @title 🖼️ Get image_encoding similarity to the pre-calc. text_encodings\n",
552
+ "\n",
553
+ "list_size = 100 # @param {type:'number'}\n",
554
+ "start_at_index = 0 # @param {type:'number'}\n",
555
+ "print_Similarity = True # @param {type:\"boolean\"}\n",
556
+ "print_Suffix = True # @param {type:\"boolean\"}\n",
557
+ "print_Prefix = True # @param {type:\"boolean\"}\n",
558
+ "print_Descriptions = True # @param {type:\"boolean\"}\n",
559
+ "compact_Output = False # @param {type:\"boolean\"}\n",
560
  "\n",
561
  "from transformers import AutoTokenizer\n",
562
  "tokenizer = AutoTokenizer.from_pretrained(\"openai/clip-vit-large-patch14\", clean_up_tokenization_spaces = False)\n",
 
614
  "suffix_sorted, suffix_indices = torch.sort(dots,dim=0 , descending=True)\n",
615
  "#------#\n",
616
  "\n",
617
+ "\n",
618
  "#Print the results\n",
619
  "# title Show the 100 most similiar suffix and prefix text-encodings to the text encoding\n",
620
+ "RANGE = list_size\n",
621
  "_suffixes = '{'\n",
622
  "_sims = '{'\n",
623
+ "for index in range(start_at_index + RANGE):\n",
624
+ " if index < start_at_index : continue\n",
625
  " id = int(suffix_indices[index])\n",
626
  " ahead = \"from \"\n",
627
  " behind = \"\"\n",
 
638
  " name = ahead + get_suffix(id) + behind\n",
639
  " if(get_suffix(id) == ' '): name = ahead + f'{id}' + behind\n",
640
  " _suffixes = _suffixes + name + '|'\n",
641
+ " _sims = _sims + f'{round(sim,2)} %' + '|'\n",
642
  "#------#\n",
643
  "_suffixes = (_suffixes + '}').replace('|}', '}')\n",
644
  "_sims = (_sims + '}').replace('|}', '}')\n",
645
+ "#------#\n",
646
  "\n",
647
+ "suffixes = _suffixes\n",
648
+ "sims = _sims\n",
649
+ "\n",
650
+ "if(not print_Suffix): suffixes = ''\n",
651
+ "if(not print_Similarity): sims = ''\n",
652
  "\n",
653
+ "if(not compact_Output):\n",
654
+ " if(print_Descriptions):\n",
655
+ " print(f'The {start_at_index}-{start_at_index + RANGE} most similiar suffix items to prompt : ' + suffixes)\n",
656
+ " print(f'The {start_at_index}-{start_at_index + RANGE} similarity % for suffix items : ' + sims)\n",
657
+ " print('')\n",
658
+ " else:\n",
659
+ " print(suffixes)\n",
660
  "#-------#\n",
661
  "\n",
662
  "_prefixes = '{'\n",
663
+ "for index in range(start_at_index + RANGE):\n",
664
+ " if index < start_at_index : continue\n",
665
  " id = f'{prefix_indices[index]}'\n",
666
  " #sim = prefix_sorted[index]\n",
667
  " name = get_prefix(id)\n",
668
  " _prefixes = _prefixes + name + '|'\n",
669
  "#------#\n",
670
  "_prefixes = (_prefixes + '}').replace('|}', '}')\n",
 
 
 
 
 
 
 
 
 
 
 
 
671
  "\n",
 
 
 
 
 
 
 
 
 
672
  "\n",
673
+ "prefixes = _prefixes\n",
674
+ "if(not print_Prefix): prefixes = ''\n",
675
  "\n",
676
+ "if(print_Descriptions):\n",
677
+ " print(f'The {start_at_index}-{start_at_index + RANGE} most similiar prefixes to prompt : ' + prefixes)\n",
678
+ "else:\n",
679
+ " if(compact_Output):\n",
680
+ " print((prefixes + _suffixes).replace('}{', '|'))\n",
681
+ " else:\n",
682
+ " print(prefixes)\n"
 
 
683
  ],
684
  "metadata": {
685
+ "id": "rebogpoyOG8k"
686
  },
687
  "execution_count": null,
688
  "outputs": []