File size: 1,270 Bytes
de06228
 
 
 
 
 
 
 
 
 
 
12ed8ed
de06228
 
 
 
 
 
 
 
 
 
 
 
 
86f1f69
 
 
 
e5a2fe5
 
 
86f1f69
 
 
909a65f
86f1f69
 
 
 
c279e3a
de06228
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
from transformers import AutoTokenizer, AutoModelForQuestionAnswering, pipeline
import torch
import gradio as grad
import ast

_pretrainedModelName = "savasy/bert-base-turkish-squad"
_tokenizer = AutoTokenizer.from_pretrained(_pretrainedModelName)
_model = AutoModelForQuestionAnswering.from_pretrained(_pretrainedModelName)
_pipeline = pipeline("question-answering", model = _model, tokenizer = _tokenizer)


def answer_question(question, context):
    text = "{" + "'question': '"+question+"', 'context':'"+context+"'}"
    di = ast.literal_eval(text)
    response = _pipeline(di)
    return response.get("answer")


grad.Interface(answer_question, inputs=["text", "text"], outputs=["text"]).launch()





'''
from transformers import AutoModelForQuestionAnswering, AutoTokenizer, pipeline
import gradio as grad
import ast

#_model = "deepset/roberta-base-squad2"
_model = "savasy/bert-base-turkish-squad"

_pipeline = pipeline("question-answering", model = _model, tokenizer = _model)

def answer_question(question, context):
    text = "{" + "'question': '"+question+"', 'context':'"+context+"'}"
    di = ast.literal_eval(text)
    response = _pipeline(di)
    return response

grad.Interface(answer_question, inputs=["text", "text"], outputs="text").launch()
'''