codeShare commited on
Commit
6124561
1 Parent(s): 606fac8

Upload sd_token_similarity_calculator.ipynb

Browse files
Files changed (1) hide show
  1. sd_token_similarity_calculator.ipynb +62 -15
sd_token_similarity_calculator.ipynb CHANGED
@@ -124,7 +124,7 @@
124
  "base_uri": "https://localhost:8080/"
125
  }
126
  },
127
- "execution_count": 1,
128
  "outputs": [
129
  {
130
  "output_type": "stream",
@@ -151,7 +151,7 @@
151
  "tokenizer = AutoTokenizer.from_pretrained(\"openai/clip-vit-large-patch14\", clean_up_tokenization_spaces = False)\n",
152
  "\n",
153
  "# @markdown Write name of token to match against\n",
154
- "token_name = \" blanket \" # @param {type:'string',\"placeholder\":\"leave empty for random value token\"}\n",
155
  "\n",
156
  "prompt = token_name\n",
157
  "# @markdown (optional) Mix the token with something else\n",
@@ -387,11 +387,11 @@
387
  "start_search_at_index = 0 # @param {type:\"slider\", min:0, max: 49407, step:100}\n",
388
  "# @markdown The lower the start_index, the more similiar the sampled tokens will be to the target token assigned in the '⚡ Get similiar tokens' cell\". If the cell was not run, then it will use tokens ordered by similarity to the \"girl\\</w>\" token\n",
389
  "start_search_at_ID = start_search_at_index\n",
390
- "search_range = 1000 # @param {type:\"slider\", min:10, max: 1000, step:10}\n",
391
  "\n",
392
- "samples_per_iter = 10 # @param {type:\"slider\", min:10, max: 100, step:10}\n",
393
  "\n",
394
- "iterations = 5 # @param {type:\"slider\", min:1, max: 20, step:0}\n",
395
  "restrictions = 'None' # @param [\"None\", \"Suffix only\", \"Prefix only\"]\n",
396
  "#markdown Limit char size of included token <----- Disabled\n",
397
  "min_char_size = 0 #param {type:\"slider\", min:0, max: 20, step:1}\n",
@@ -406,7 +406,7 @@
406
  "RANGE = min(search_range , max(1,NUM_TOKENS - start_search_at_ID))\n",
407
  "#-----#\n",
408
  "import math, random\n",
409
- "NUM_PERMUTATIONS = 4\n",
410
  "ITERS = iterations\n",
411
  "#-----#\n",
412
  "#LOOP START\n",
@@ -443,7 +443,7 @@
443
  " _start = START + iter*RANGE\n",
444
  "\n",
445
  " for index in range(samples_per_iter):\n",
446
- " id_C = min(_start + index, NUM_TOKENS) + random.randint(0,RANGE)\n",
447
  " name_C = db_vocab[f'{id_C}']\n",
448
  " is_Prefix = 0\n",
449
  " #Skip if non-AZ characters are found\n",
@@ -522,6 +522,7 @@
522
  " print_Similarity = True # @param {type:\"boolean\"}\n",
523
  " print_Name = True # @param {type:\"boolean\"}\n",
524
  " print_Divider = True # @param {type:\"boolean\"}\n",
 
525
  " #----#\n",
526
  " if (print_Divider):\n",
527
  " print('//---//')\n",
@@ -571,10 +572,12 @@
571
  " trails = (trails + \"&&&&\").replace(\"|&&&&\", \"}\").replace(\"</w>\", \" \").replace(\"{&&&&\", \"\")\n",
572
  " aheads = (aheads + \"&&&&\").replace(\"|&&&&\", \"}\").replace(\"</w>\", \" \").replace(\"{&&&&\", \"\")\n",
573
  " #-----#\n",
574
- " print(f\"place these items ahead of prompt : {aheads}\")\n",
575
- " print(\"\")\n",
576
- " print(f\"place these items behind the prompt : {trails}\")\n",
577
- " print(\"\")\n",
 
 
578
  "\n",
579
  " tmp = must_start_with + ' ' + max_name_ahead + name_B + ' ' + must_end_with\n",
580
  " tmp = tmp.strip().replace('</w>', ' ')\n",
@@ -591,8 +594,10 @@
591
  " name_inner = ''\n",
592
  " if index == 0 : name_inner = name_B\n",
593
  " if index == 1: name_inner = max_name_ahead\n",
594
- " if index == 2: name_inner = name_B + max_name_trail\n",
595
- " if index == 3: name_inner = max_name_ahead + name_B + max_name_trail\n",
 
 
596
  " if name_inner == '': name_inner = max_name_ahead + name_B + max_name_trail\n",
597
  "\n",
598
  " name = must_start_with + name_inner + must_end_with\n",
@@ -616,9 +621,23 @@
616
  " results_sim[iter*NUM_PERMUTATIONS + index] = sim\n",
617
  " results_name_B[iter*NUM_PERMUTATIONS + index] = name_inner.replace('</w>',' ')\n",
618
  " #------#\n",
619
- " name_B = results_name_B[iter*NUM_PERMUTATIONS + random.randint(0,3)]\n",
620
- "#--------#\n",
 
 
 
 
 
 
 
 
621
  "\n",
 
 
 
 
 
 
622
  "print('')\n",
623
  "sorted, indices = torch.sort(results_sim,dim=0 , descending=True)\n",
624
  "\n",
@@ -637,6 +656,34 @@
637
  "execution_count": null,
638
  "outputs": []
639
  },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
640
  {
641
  "cell_type": "code",
642
  "source": [
 
124
  "base_uri": "https://localhost:8080/"
125
  }
126
  },
127
+ "execution_count": null,
128
  "outputs": [
129
  {
130
  "output_type": "stream",
 
151
  "tokenizer = AutoTokenizer.from_pretrained(\"openai/clip-vit-large-patch14\", clean_up_tokenization_spaces = False)\n",
152
  "\n",
153
  "# @markdown Write name of token to match against\n",
154
+ "token_name = \" banana\" # @param {type:'string',\"placeholder\":\"leave empty for random value token\"}\n",
155
  "\n",
156
  "prompt = token_name\n",
157
  "# @markdown (optional) Mix the token with something else\n",
 
387
  "start_search_at_index = 0 # @param {type:\"slider\", min:0, max: 49407, step:100}\n",
388
  "# @markdown The lower the start_index, the more similiar the sampled tokens will be to the target token assigned in the '⚡ Get similiar tokens' cell\". If the cell was not run, then it will use tokens ordered by similarity to the \"girl\\</w>\" token\n",
389
  "start_search_at_ID = start_search_at_index\n",
390
+ "search_range = 1220 # @param {type:\"slider\", min:10, max: 2000, step:10}\n",
391
  "\n",
392
+ "samples_per_iter = 20 # @param {type:\"slider\", min:10, max: 100, step:10}\n",
393
  "\n",
394
+ "iterations = 20 # @param {type:\"slider\", min:1, max: 20, step:0}\n",
395
  "restrictions = 'None' # @param [\"None\", \"Suffix only\", \"Prefix only\"]\n",
396
  "#markdown Limit char size of included token <----- Disabled\n",
397
  "min_char_size = 0 #param {type:\"slider\", min:0, max: 20, step:1}\n",
 
406
  "RANGE = min(search_range , max(1,NUM_TOKENS - start_search_at_ID))\n",
407
  "#-----#\n",
408
  "import math, random\n",
409
+ "NUM_PERMUTATIONS = 6\n",
410
  "ITERS = iterations\n",
411
  "#-----#\n",
412
  "#LOOP START\n",
 
443
  " _start = START + iter*RANGE\n",
444
  "\n",
445
  " for index in range(samples_per_iter):\n",
446
+ " id_C = min(_start + index*RANGE, NUM_TOKENS) + random.randint(0,RANGE)\n",
447
  " name_C = db_vocab[f'{id_C}']\n",
448
  " is_Prefix = 0\n",
449
  " #Skip if non-AZ characters are found\n",
 
522
  " print_Similarity = True # @param {type:\"boolean\"}\n",
523
  " print_Name = True # @param {type:\"boolean\"}\n",
524
  " print_Divider = True # @param {type:\"boolean\"}\n",
525
+ " print_Suggestions = False # @param {type:\"boolean\"}\n",
526
  " #----#\n",
527
  " if (print_Divider):\n",
528
  " print('//---//')\n",
 
572
  " trails = (trails + \"&&&&\").replace(\"|&&&&\", \"}\").replace(\"</w>\", \" \").replace(\"{&&&&\", \"\")\n",
573
  " aheads = (aheads + \"&&&&\").replace(\"|&&&&\", \"}\").replace(\"</w>\", \" \").replace(\"{&&&&\", \"\")\n",
574
  " #-----#\n",
575
+ "\n",
576
+ " if(print_Suggestions):\n",
577
+ " print(f\"place these items ahead of prompt : {aheads}\")\n",
578
+ " print(\"\")\n",
579
+ " print(f\"place these items behind the prompt : {trails}\")\n",
580
+ " print(\"\")\n",
581
  "\n",
582
  " tmp = must_start_with + ' ' + max_name_ahead + name_B + ' ' + must_end_with\n",
583
  " tmp = tmp.strip().replace('</w>', ' ')\n",
 
594
  " name_inner = ''\n",
595
  " if index == 0 : name_inner = name_B\n",
596
  " if index == 1: name_inner = max_name_ahead\n",
597
+ " if index == 2: name_inner = max_name_trail\n",
598
+ " if index == 3: name_inner = name_B + max_name_trail\n",
599
+ " if index == 4: name_inner = max_name_ahead + name_B\n",
600
+ " if index == 5: name_inner = max_name_ahead + name_B + max_name_trail\n",
601
  " if name_inner == '': name_inner = max_name_ahead + name_B + max_name_trail\n",
602
  "\n",
603
  " name = must_start_with + name_inner + must_end_with\n",
 
621
  " results_sim[iter*NUM_PERMUTATIONS + index] = sim\n",
622
  " results_name_B[iter*NUM_PERMUTATIONS + index] = name_inner.replace('</w>',' ')\n",
623
  " #------#\n",
624
+ " #name_B = results_name_B[iter*NUM_PERMUTATIONS + random.randint(0,3)]\n",
625
+ " tmp = iter*NUM_PERMUTATIONS\n",
626
+ " _name_B=''\n",
627
+ " if results_sim[tmp+1]>results_sim[tmp+2]: _name_B = results_name_B[tmp + 3]\n",
628
+ " if results_sim[tmp+2]>results_sim[tmp+1]: _name_B = results_name_B[tmp + 4]\n",
629
+ "\n",
630
+ " if _name_B != name_B:\n",
631
+ " name_B=_name_B\n",
632
+ " else:\n",
633
+ " name_B = results_name_B[tmp + 5]\n",
634
  "\n",
635
+ "#--------#\n",
636
+ "print('')\n",
637
+ "if(use == '🖼️image_encoding from image'):\n",
638
+ " from google.colab.patches import cv2_imshow\n",
639
+ " cv2_imshow(image_A)\n",
640
+ "#-----#\n",
641
  "print('')\n",
642
  "sorted, indices = torch.sort(results_sim,dim=0 , descending=True)\n",
643
  "\n",
 
656
  "execution_count": null,
657
  "outputs": []
658
  },
659
+ {
660
+ "cell_type": "code",
661
+ "source": [],
662
+ "metadata": {
663
+ "id": "5XN2pM5NAfS5",
664
+ "outputId": "df4eefe6-12e7-416e-dc2d-b6df22a14d69",
665
+ "colab": {
666
+ "base_uri": "https://localhost:8080/",
667
+ "height": 321
668
+ }
669
+ },
670
+ "execution_count": 25,
671
+ "outputs": [
672
+ {
673
+ "output_type": "error",
674
+ "ename": "AttributeError",
675
+ "evalue": "clip",
676
+ "traceback": [
677
+ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
678
+ "\u001b[0;31mAttributeError\u001b[0m Traceback (most recent call last)",
679
+ "\u001b[0;32m<ipython-input-25-2eb0ffbc049b>\u001b[0m in \u001b[0;36m<cell line: 1>\u001b[0;34m()\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[0;32mif\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0muse\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0;34m'🖼️image_encoding from image'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 2\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mgoogle\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcolab\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpatches\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mcv2_imshow\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 3\u001b[0;31m \u001b[0mcv2_imshow\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mimage_A\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m",
680
+ "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/google/colab/patches/__init__.py\u001b[0m in \u001b[0;36mcv2_imshow\u001b[0;34m(a)\u001b[0m\n\u001b[1;32m 16\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0mN\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mM\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;36m4\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0man\u001b[0m \u001b[0mNxM\u001b[0m \u001b[0mBGRA\u001b[0m \u001b[0mcolor\u001b[0m \u001b[0mimage\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 17\u001b[0m \"\"\"\n\u001b[0;32m---> 18\u001b[0;31m \u001b[0ma\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0ma\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mclip\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;36m255\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mastype\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'uint8'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 19\u001b[0m \u001b[0;31m# cv2 stores colors as BGR; convert to RGB\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 20\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0ma\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mndim\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0;36m3\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
681
+ "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/PIL/Image.py\u001b[0m in \u001b[0;36m__getattr__\u001b[0;34m(self, name)\u001b[0m\n\u001b[1;32m 527\u001b[0m \u001b[0mdeprecate\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"Image categories\"\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;36m10\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m\"is_animated\"\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mplural\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mTrue\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 528\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_category\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 529\u001b[0;31m \u001b[0;32mraise\u001b[0m \u001b[0mAttributeError\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mname\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 530\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 531\u001b[0m \u001b[0;34m@\u001b[0m\u001b[0mproperty\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
682
+ "\u001b[0;31mAttributeError\u001b[0m: clip"
683
+ ]
684
+ }
685
+ ]
686
+ },
687
  {
688
  "cell_type": "code",
689
  "source": [