yingshaoxo
commited on
Commit
•
200b5f9
1
Parent(s):
5320b62
Update README.md
Browse filesAdd a more detailed python example.
README.md
CHANGED
@@ -53,11 +53,34 @@ Here is how to import this model in Python:
|
|
53 |
<summary> Click to expand </summary>
|
54 |
|
55 |
```python
|
|
|
56 |
from transformers import AutoTokenizer, AutoModelForQuestionAnswering
|
57 |
|
58 |
tokenizer = AutoTokenizer.from_pretrained("Intel/dynamic_tinybert")
|
59 |
-
|
60 |
model = AutoModelForQuestionAnswering.from_pretrained("Intel/dynamic_tinybert")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
61 |
```
|
62 |
</details>
|
63 |
|
|
|
53 |
<summary> Click to expand </summary>
|
54 |
|
55 |
```python
|
56 |
+
import torch
|
57 |
from transformers import AutoTokenizer, AutoModelForQuestionAnswering
|
58 |
|
59 |
tokenizer = AutoTokenizer.from_pretrained("Intel/dynamic_tinybert")
|
|
|
60 |
model = AutoModelForQuestionAnswering.from_pretrained("Intel/dynamic_tinybert")
|
61 |
+
|
62 |
+
context = "remember the number 123456, I'll ask you later."
|
63 |
+
question = "What is the number I told you?"
|
64 |
+
|
65 |
+
# Tokenize the context and question
|
66 |
+
tokens = tokenizer.encode_plus(question, context, return_tensors="pt", truncation=True)
|
67 |
+
|
68 |
+
# Get the input IDs and attention mask
|
69 |
+
input_ids = tokens["input_ids"]
|
70 |
+
attention_mask = tokens["attention_mask"]
|
71 |
+
|
72 |
+
# Perform question answering
|
73 |
+
outputs = model(input_ids, attention_mask=attention_mask)
|
74 |
+
start_scores = outputs.start_logits
|
75 |
+
end_scores = outputs.end_logits
|
76 |
+
|
77 |
+
# Find the start and end positions of the answer
|
78 |
+
answer_start = torch.argmax(start_scores)
|
79 |
+
answer_end = torch.argmax(end_scores) + 1
|
80 |
+
answer = tokenizer.convert_tokens_to_string(tokenizer.convert_ids_to_tokens(input_ids[0][answer_start:answer_end]))
|
81 |
+
|
82 |
+
# Print the answer
|
83 |
+
print("Answer:", answer)
|
84 |
```
|
85 |
</details>
|
86 |
|