codeShare commited on
Commit
827c2f9
·
verified ·
1 Parent(s): aa5cf88

Upload fusion_t2i_CLIP_interrogator.ipynb

Browse files
Google Colab Jupyter Notebooks/fusion_t2i_CLIP_interrogator.ipynb CHANGED
@@ -130,7 +130,7 @@
130
  "height": 889
131
  }
132
  },
133
- "execution_count": 22,
134
  "outputs": [
135
  {
136
  "output_type": "stream",
@@ -225,7 +225,7 @@
225
  "source": [
226
  "# @title ⚄ Set range\n",
227
  "LIST_SIZE = 1000 # @param {type:'number' , placeholder:'set how large the list should be'}\n",
228
- "START_AT = 2000 # @param {type:'number' , placeholder:'set how large the list should be'}\n",
229
  "# @markdown -----\n",
230
  "# @markdown Select vocab\n",
231
  "general = True # @param {type:\"boolean\"}\n",
@@ -323,9 +323,19 @@
323
  " for key,value in data:\n",
324
  " prompts[key] = value\n",
325
  " num_items = int(prompts['num_items'])\n",
 
326
  " #------#\n",
327
  " try:vocab_loaded\n",
328
- " except:vocab_loaded = ''\n",
 
 
 
 
 
 
 
 
 
329
  " if vocab_loaded != vocab_to_load and not multi:\n",
330
  " %cd {encodings_folder}\n",
331
  " _text_encodings = load_file(f'{root_filename}.safetensors')['weights'].to(torch.uint8)\n",
@@ -337,12 +347,12 @@
337
  " #------#\n",
338
  " sims = torch.matmul(text_encodings*scale, ref.t())\n",
339
  " sorted , indices = torch.sort(sims , dim=0 , descending = True)\n",
340
- "\n",
341
  " #-----#\n",
342
  " for index in range(LIST_SIZE + START_AT):\n",
343
  " if index<START_AT: continue\n",
344
  " key = indices[index].item()\n",
345
- " prompt = prompts[f'{key}']\n",
 
346
  " if(isBlacklisted(prompt)):continue\n",
347
  " #-------#\n",
348
  " similiar_sims[_index] = torch.tensor(round(sims[key].item(), 5))\n",
@@ -383,11 +393,11 @@
383
  " for index in range(LIST_SIZE):\n",
384
  " key = indices[index].item()\n",
385
  " sim = similiar_sims[key].item()\n",
386
- " prompt = prompt + similiar_prompts[f'{key}'] + '|'\n",
387
  " #-----#\n",
388
  " prompt = (prompt + '}').replace('|}', '} ')\n",
389
  " #------#\n",
390
- " print(f'\\n\\n{prompt}\\n\\n')\n",
391
  "#-----#\n"
392
  ],
393
  "metadata": {
@@ -397,7 +407,7 @@
397
  "base_uri": "https://localhost:8080/"
398
  }
399
  },
400
- "execution_count": 31,
401
  "outputs": [
402
  {
403
  "output_type": "stream",
 
130
  "height": 889
131
  }
132
  },
133
+ "execution_count": null,
134
  "outputs": [
135
  {
136
  "output_type": "stream",
 
225
  "source": [
226
  "# @title ⚄ Set range\n",
227
  "LIST_SIZE = 1000 # @param {type:'number' , placeholder:'set how large the list should be'}\n",
228
+ "START_AT = 0 # @param {type:'number' , placeholder:'set how large the list should be'}\n",
229
  "# @markdown -----\n",
230
  "# @markdown Select vocab\n",
231
  "general = True # @param {type:\"boolean\"}\n",
 
323
  " for key,value in data:\n",
324
  " prompts[key] = value\n",
325
  " num_items = int(prompts['num_items'])\n",
326
+ "\n",
327
  " #------#\n",
328
  " try:vocab_loaded\n",
329
+ " except:\n",
330
+ " vocab_loaded = 'general'\n",
331
+ " _text_encodings = load_file(f'{root_filename}.safetensors')['weights'].to(torch.uint8)\n",
332
+ " text_encodings = torch.zeros(num_items , dim)\n",
333
+ " tmp = torch.ones(dim).to(dot_dtype)\n",
334
+ " for index in range(num_items):\n",
335
+ " text_encodings[index] = torch.sub(_text_encodings[index][1:dim+1].to(dot_dtype) , tmp , alpha= _text_encodings[index][0].to(dot_dtype))\n",
336
+ " vocab_loaded = vocab_to_load\n",
337
+ " #-----#\n",
338
+ "\n",
339
  " if vocab_loaded != vocab_to_load and not multi:\n",
340
  " %cd {encodings_folder}\n",
341
  " _text_encodings = load_file(f'{root_filename}.safetensors')['weights'].to(torch.uint8)\n",
 
347
  " #------#\n",
348
  " sims = torch.matmul(text_encodings*scale, ref.t())\n",
349
  " sorted , indices = torch.sort(sims , dim=0 , descending = True)\n",
 
350
  " #-----#\n",
351
  " for index in range(LIST_SIZE + START_AT):\n",
352
  " if index<START_AT: continue\n",
353
  " key = indices[index].item()\n",
354
+ " try:prompt = prompts[f'{key}']\n",
355
+ " except:continue\n",
356
  " if(isBlacklisted(prompt)):continue\n",
357
  " #-------#\n",
358
  " similiar_sims[_index] = torch.tensor(round(sims[key].item(), 5))\n",
 
393
  " for index in range(LIST_SIZE):\n",
394
  " key = indices[index].item()\n",
395
  " sim = similiar_sims[key].item()\n",
396
+ " prompt = prompt + fix_bad_symbols(similiar_prompts[f'{key}']) + '|'\n",
397
  " #-----#\n",
398
  " prompt = (prompt + '}').replace('|}', '} ')\n",
399
  " #------#\n",
400
+ " print(f'\\n\\n {prompt} \\n\\n')\n",
401
  "#-----#\n"
402
  ],
403
  "metadata": {
 
407
  "base_uri": "https://localhost:8080/"
408
  }
409
  },
410
+ "execution_count": null,
411
  "outputs": [
412
  {
413
  "output_type": "stream",