| import gradio as gr | |
| from transformers import AutoTokenizer | |
| from nfqa_model import RobertaNFQAClassification | |
| index_to_label = {0: 'NOT-A-QUESTION', | |
| 1: 'FACTOID', | |
| 2: 'DEBATE', | |
| 3: 'EVIDENCE-BASED', | |
| 4: 'INSTRUCTION', | |
| 5: 'REASON', | |
| 6: 'EXPERIENCE', | |
| 7: 'COMPARISON'} | |
| model = RobertaNFQAClassification.from_pretrained("Lurunchik/nf-cats") | |
| nfqa_tokenizer = AutoTokenizer.from_pretrained("deepset/roberta-base-squad2") | |
| def get_nfqa_prediction(text): | |
| output = model(**nfqa_tokenizer(text, return_tensors="pt")) | |
| index = output.logits.argmax() | |
| return index_to_label[int(index)] | |
| iface = gr.Interface(fn=get_nfqa_prediction, inputs="text", outputs="text") | |
| iface.launch() | |