erdometo commited on
Commit
00f95c8
1 Parent(s): 52fc7b7

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +52 -0
app.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ !pip install transformers datasets evaluate seqeval pipeline gradio typing-extensions
2
+ import datasets
3
+ from transformers import pipeline
4
+ from transformers.pipelines.pt_utils import KeyDataset
5
+ from tqdm.auto import tqdm
6
+
7
+ pipe = pipe = pipeline("token-classification", model="erdometo/xlm-roberta-base-finetuned-TQuad2")
8
+ dataset = datasets.load_dataset("superb", name="asr", split="test")
9
+
10
+ for out in tqdm(pipe(KeyDataset(dataset, "file"))):
11
+ print(out)
12
+
13
+
14
+
15
+ import gradio as gr
16
+ from transformers import pipeline, AutoModelForQuestionAnswering, AutoTokenizer, AutoModelForTokenClassification
17
+
18
+ # Load your custom model and tokenizer
19
+ qa_model_name = "erdometo/xlm-roberta-base-finetuned-TQuad2"
20
+ token_classification_model_name = "FacebookAI/xlm-roberta-large-finetuned-conll03-german"
21
+
22
+ qa_model = AutoModelForQuestionAnswering.from_pretrained(qa_model_name)
23
+ qa_tokenizer = AutoTokenizer.from_pretrained(qa_model_name)
24
+
25
+ token_classification_model = AutoModelForTokenClassification.from_pretrained(token_classification_model_name)
26
+ token_classification_tokenizer = AutoTokenizer.from_pretrained(token_classification_model_name)
27
+ # Define a function for inference based on pipeline type
28
+ def predict(pipeline_type, question, context):
29
+ if pipeline_type == "question-answering":
30
+ qa_pipeline = pipeline("question-answering", model=qa_model, tokenizer=qa_tokenizer)
31
+ result = qa_pipeline(question=question, context=context)
32
+ response = [(result['answer'], result['score'])]
33
+ return response
34
+ elif pipeline_type == "token-classification":
35
+ token_classification_pipeline = pipeline("token-classification", model=token_classification_model, tokenizer=token_classification_tokenizer)
36
+ result = token_classification_pipeline(context)
37
+ highlighted_text = {"text": context, "entities": result}
38
+ return gr.HighlightedText(highlighted_text)
39
+
40
+ # Create a Gradio Interface with dropdown and two text inputs
41
+ iface = gr.Interface(
42
+ fn=predict,
43
+ inputs=[
44
+ gr.Dropdown(choices=["question-answering", "token-classification"], label="Choose Pipeline"),
45
+ "text",
46
+ "text"
47
+ ],
48
+ outputs=gr.Highlight()
49
+ )
50
+
51
+ # Launch the interface
52
+ iface.launch()