ppbrown commited on
Commit
5695663
1 Parent(s): 0067ebd

Upload 2 files

Browse files
openclip/calculate-distances-open.py CHANGED
@@ -11,30 +11,38 @@ Plan:
11
 
12
 
13
  import sys
14
- import torch
15
- import open_clip
16
 
17
- from safetensors import safe_open
 
 
 
 
 
 
18
 
19
- #from transformers import CLIPProcessor,CLIPModel
 
20
 
21
- device=torch.device("cuda")
22
 
23
- mtype='ViT-B-32'
24
- mname='laion2b_s34b_b79k'
25
 
26
  print("Loading",mtype,mname)
27
 
 
 
 
 
 
28
  cmodel, _, preprocess = open_clip.create_model_and_transforms(mtype,
29
  pretrained=mname)
30
  tokenizer = open_clip.get_tokenizer(mtype)
31
 
32
- ## model = model.to(device)
33
 
34
 
35
- #embed_file="embeddings.safetensors"
36
- embed_file=sys.argv[1]
37
- dictionary=sys.argv[2]
38
 
39
 
40
  print(f"read in words from {dictionary} now",file=sys.stderr)
 
11
 
12
 
13
  import sys
 
 
14
 
15
+ if len(sys.argv) <3:
16
+ print("Need embedding file and a dictionary")
17
+ print("embedding filename must start with (mtype@stringname). ")
18
+ exit(1)
19
+
20
+ embed_file=sys.argv[1]
21
+ dictionary=sys.argv[2]
22
 
23
+ dot_index = embed_file.find(".")
24
+ mstring=embed_file[:dot_index]
25
 
26
+ at_index = mstring.find("@")
27
 
28
+ mtype=mstring[:at_index]
29
+ mname=mstring[at_index+1:]
30
 
31
  print("Loading",mtype,mname)
32
 
33
+ import torch
34
+ import open_clip
35
+ from safetensors import safe_open
36
+
37
+
38
  cmodel, _, preprocess = open_clip.create_model_and_transforms(mtype,
39
  pretrained=mname)
40
  tokenizer = open_clip.get_tokenizer(mtype)
41
 
 
42
 
43
 
44
+ device=torch.device("cuda")
45
+ ## cmodel.to(device)
 
46
 
47
 
48
  print(f"read in words from {dictionary} now",file=sys.stderr)
openclip/generate-embeddings-open.py CHANGED
@@ -14,7 +14,6 @@ import torch
14
  import open_clip
15
  from safetensors.torch import save_file
16
 
17
- outfile="out.safetensors"
18
 
19
 
20
  """
@@ -22,14 +21,12 @@ outfile="out.safetensors"
22
  You MUST use the same settings when you READ from the output file as well!!
23
  """
24
 
25
- #mtype='ViT-B-32'
26
- #mname='laion2b_s34b_b79k'
27
- #mtype='ViT-g-14'
28
- #mname='laion2b_s12b_b42k'
29
- #mtype='ViT-H-14'
30
- #mname='laion2b_s32b_b79k'
31
  mtype='ViT-L-14'
32
- mname='laion2b_s32b_b82k'
 
33
  #### Warning, this requires more than 4GB vram
34
  #mtype='ViT-H-14-quickgelu'
35
  #mname='dfn5b'
@@ -38,6 +35,9 @@ mname='laion2b_s32b_b82k'
38
  # hf-hub:hf-internal-testing/tiny-open-clip-model'
39
  # for mname
40
 
 
 
 
41
  print("Loading",mtype,mname)
42
 
43
  cmodel, _, preprocess = open_clip.create_model_and_transforms(
 
14
  import open_clip
15
  from safetensors.torch import save_file
16
 
 
17
 
18
 
19
  """
 
21
  You MUST use the same settings when you READ from the output file as well!!
22
  """
23
 
24
+ # See "list_models.txt" for full combination sets
25
+
26
+ #mtype='ViT-L-14-336'
 
 
 
27
  mtype='ViT-L-14'
28
+ mname='openai'
29
+
30
  #### Warning, this requires more than 4GB vram
31
  #mtype='ViT-H-14-quickgelu'
32
  #mname='dfn5b'
 
35
  # hf-hub:hf-internal-testing/tiny-open-clip-model'
36
  # for mname
37
 
38
+ outfile=f"{mtype}-{mname}.safetensors"
39
+ print("Will save to:")
40
+ print(" ",outfile)
41
  print("Loading",mtype,mname)
42
 
43
  cmodel, _, preprocess = open_clip.create_model_and_transforms(