File size: 1,530 Bytes
f2a699d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
import streamlit as st
from utils import get_roberta, get_gpt, get_distilbert
import torch




st.title('Sentence Entailment')
col1, col2 = st.columns([1,1])

with col1:
    sentence1 = st.text_input('Premise')

with col2:
    sentence2 = st.text_input('Hypothesis')
btn = st.button("Submit")

label_dict = {
    0 : 'entailment',
    1 : 'neutral',
    2 : 'contradiction'
}

if btn:
    # Get Roberta Output
    roberta_tokenizer, roberta_model = get_roberta()
    roberta_input = roberta_tokenizer(
        sentence1, 
        sentence2, 
        return_tensors="pt", 
        padding=True, 
        truncation=True, 
        max_length=512
    )
    roberta_logits = roberta_model(**roberta_input)['logits']
    st.write('ROBERTA', label_dict[roberta_logits.argmax().item()])

    distilbert_tokenizer, distilbert_model = get_distilbert()
    distilbert_input = distilbert_tokenizer(
        sentence1, 
        sentence2, 
        return_tensors="pt", 
        padding=True, 
        truncation=True, 
        max_length=512
    )
    distilbert_logits = distilbert_model(**distilbert_input)['logits']
    st.write('DistilBERT', label_dict[distilbert_logits.argmax().item()])
    #

    gpt_tokenizer, gpt_model = get_gpt()
    gpt_input = gpt_tokenizer(
        sentence1 + ' [SEP] ' + sentence2,
        truncation=True,
        padding='max_length',
        max_length=512,
        return_tensors='pt'
    )
    
    gpt_logits = gpt_model(**gpt_input)['logits']
    st.write('GPT', label_dict[gpt_logits.argmax().item()])