File size: 1,920 Bytes
8a7d64c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7485e9f
 
 
8a7d64c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ff3f4c5
8a7d64c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
#!/bin/env python

"""
    (T5 counterpart of "generate-dict-embeddingsXL.py".
"""

outputfile="embeddingsT5.temp.safetensors"

import sys
import torch
from safetensors.torch import save_file
from transformers import T5Tokenizer,T5EncoderModel

processor=None
tmodel=None

device=torch.device("cuda")

def initT5model():
    global processor,tmodel
    T="mcmonkey/google_t5-v1_1-xxl_encoderonly"
    processor = T5Tokenizer.from_pretrained(T)
    tmodel = T5EncoderModel.from_pretrained(T).to(device)


def embed_from_text(text):
    global processor,tmodel
    #print("Word:"+text)
    tokens = processor(text, return_tensors="pt")
    tokens.to(device)

    if len(tokens.input_ids) >2:
        print("ERROR: expected single token per word")
        print(text)
        exit(1)
        # We can only accept single-token words, because we want our output 
        # to be a single embedding per word, and we dont have an official
        # way to merge multiple T5 embeddings into one, like CLIP does

    with torch.no_grad():
        outputs = tmodel(tokens.input_ids)

    embedding = outputs.last_hidden_state[0][0]
    #print(encoding.shape)
    # Shape of this is (1,2,4096)
    
    return embedding

initT5model()

print("Reading in 'dictionary'")
with open("dictionary","r") as f:
    tokendict = f.readlines()
    tokendict = [token.strip() for token in tokendict]  # Remove trailing newlines


count=1
all_embeddings = []

for word in tokendict:
    emb = embed_from_text(word)
    emb=emb.unsqueeze(0) # stupid matrix magic to make torch.cat work
    all_embeddings.append(emb)
    count+=1
    if (count %100) ==0:
        print(count)


embs = torch.cat(all_embeddings,dim=0)
print("Shape of result = ",embs.shape)

if len(embs.shape) != 2:
    print("Sanity check: result is wrong shape: it wont work")

print(f"Saving the calculatiuons to {outputfile}...")
save_file({"embeddings": embs}, outputfile)