hyuan5040 commited on
Commit
1847228
1 Parent(s): 64bb5ac
Files changed (1) hide show
  1. app.py +10 -11
app.py CHANGED
@@ -1,16 +1,15 @@
1
- from transformers import pipeline
2
- question_answerer = pipeline("question-answering", model='distilbert-base-cased-distilled-squad')
 
 
3
 
4
- context = r"""
5
- Extractive Question Answering is the task of extracting an answer from a text given a question. An example of a
6
- question answering dataset is the SQuAD dataset, which is entirely based on that task. If you would like to fine-tune
7
- a model on a SQuAD task, you may leverage the examples/pytorch/question-answering/run_squad.py script.
8
- """
9
 
10
- result = question_answerer(question="What is a good example of a question answering dataset?", context=context)
11
- print(
12
- f"Answer: '{result['answer']}', score: {round(result['score'], 4)}, start: {result['start']}, end: {result['end']}"
13
- )
 
14
 
15
  import gradio as gr
16
 
 
1
+ from transformers import DistilBertTokenizer, DistilBertModel
2
+ import torch
3
+ tokenizer = DistilBertTokenizer.from_pretrained('distilbert-base-cased-distilled-squad')
4
+ model = DistilBertModel.from_pretrained('distilbert-base-cased-distilled-squad')
5
 
6
+ question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet"
 
 
 
 
7
 
8
+ inputs = tokenizer(question, text, return_tensors="pt")
9
+ with torch.no_grad():
10
+ outputs = model(**inputs)
11
+
12
+ print(outputs)
13
 
14
  import gradio as gr
15