File size: 844 Bytes
d6314c4
5cb30c5
dab87b3
5cb30c5
dab87b3
 
d6314c4
dab87b3
 
 
 
 
 
5cb30c5
dab87b3
 
 
34ba4ac
1bef367
5cb30c5
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
from fastT5 import get_onnx_model
import gradio as gr
from transformers import AutoTokenizer

tokenizer = AutoTokenizer.from_pretrained('allenai/macaw-large')
model = get_onnx_model('allenai/macaw-large', "quantized_model")

def infer(context, question, options=None):
    input_string = "$answer$ ; $context$ = " + context + " ; $question$ = " + question
    input_ids = tokenizer.encode(input_string, return_tensors="pt")    
    output = model.generate(input_ids, max_length=200)
    responses = tokenizer.batch_decode(output, skip_special_tokens=True)
    return responses[0].split(";")[0].split("=")[1].strip()

def greet(context, question):
    return infer(context, question)

examples = [['','What is the color of a cloudy sky?']]
iface = gr.Interface(fn=greet, inputs=["text", "text"], outputs="text", examples=examples)
iface.launch()