File size: 1,730 Bytes
e77b318 07eb0e9 e77b318 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 |
from transformers import AutoTokenizer
from transformers import AutoModelForSequenceClassification
import torch
from torch.nn import functional as F
import numpy as np
import json
label2id= json.load(
open('data/categories_refined.json', 'r')
)
id2label= {}
for key in label2id.keys():
id2label[label2id[key]] = key
model_name= "intent_classification_model/checkpoint-1216"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name).to("cuda")
# probabilities = 1 / (1 + np.exp(-logit_score))
def logit2prob(logit):
# odds =np.exp(logit)
# prob = odds / (1 + odds)
prob= 1/(1+ np.exp(-logit))
return np.round(prob, 3)
def get_top_intent(keyword: str):
'''
Returns score list
'''
inputs = tokenizer(keyword, return_tensors="pt").to("cuda")
with torch.no_grad():
logits = model(**inputs).logits
# print("logits: ", logits)
# predicted_class_id = logits.argmax().item()
# get probabilities using softmax from logit score and convert it to numpy array
# probabilities_scores = F.softmax(logits.cpu(), dim = -1).numpy()[0]
individual_probabilities_scores = logit2prob(logits.cpu().numpy()[0])
score_list= []
for i in range(5):
label= model.config.id2label[i]
score= individual_probabilities_scores[i]
score_list.append(
(label, score)
)
# if score>=0.5:
# score_list.append(
# (id2label[i], score)
# )
score_list.sort(
key= lambda x: x[1], reverse=True
)
return score_list |