File size: 2,293 Bytes
c500758
28f8152
 
 
c500758
7226095
 
 
 
e731bb3
7226095
e731bb3
7226095
e731bb3
 
 
7226095
 
 
 
 
28f8152
 
 
 
 
 
 
 
 
 
 
bfa1cec
28f8152
 
 
 
 
 
 
 
 
f0930e7
e731bb3
 
 
 
 
 
 
 
 
 
 
28f8152
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
81d13c0
 
28f8152
 
 
 
 
 
81d13c0
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
import streamlit as st
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
import torch
import torch.nn as nn

class Net(nn.Module):
    def __init__(self):
        super(Net,self).__init__()
        self.layer = nn.Sequential(
            nn.Linear(768, 512),
            nn.ReLU(),
            nn.Linear(512, 256),
            nn.ReLU(),
            nn.Linear(256, 128),
            nn.ReLU(),
            nn.Linear(128, 8),
          )

    def forward(self,x):
        return self.layer(x)

def get_hidden_states(encoded, model):
     with torch.no_grad():
         output = model(decoder_input_ids=encoded['input_ids'], output_hidden_states=True, **encoded)

     layers = [-4, -3, -2, -1]
     states = output['decoder_hidden_states']
     output = torch.stack([states[i] for i in layers]).sum(0).squeeze()
 
     return output.mean(dim=0)
 
def get_word_vector(sent, tokenizer, model):
     encoded = tokenizer.encode_plus(sent, return_tensors="pt",  truncation=True)
 
     return get_hidden_states(encoded, model)



labels_articles = {1: 'Computer Science',2: 'Economics',3: "Electrical Engineering And Systems Science", 

                   4: "Mathematics",5: "Physics",6: "Quantitative Biology",7: "Quantitative Finance", 8: "Statistics"}

@st.cache(allow_output_mutation=True)
def load_models():
    model=Net()
    model.load_state_dict(torch.load('dummy_model.txt', map_location=torch.device('cpu')))
    model.eval()
    
    tokenizer = AutoTokenizer.from_pretrained("Callidior/bert2bert-base-arxiv-titlegen")
    
    model_emb = AutoModelForSeq2SeqLM.from_pretrained("Callidior/bert2bert-base-arxiv-titlegen")
    return model, model_emb, tokenizer

model, model_emb, tokenizer = load_models()

title = st.text_area("Write title of your article")
summary = st.text_area("Write summary of your article or dont write anything (but you should press Ctrl + Enter)")

text = title + '. ' + summary

embed = get_word_vector(text, tokenizer, model_emb)

logits = torch.nn.functional.softmax(model(embed), dim=0)

best_tags = torch.argsort(logits, descending=True)

sum = 0

res = ''

st.write('best tags:')

for tag in best_tags:
  if sum > 0.95:
    break
  sum += logits[tag.item()]
  # print(tag.item())
  new_tag = labels_articles[tag.item() + 1]
  st.write(new_tag)