pedramyazdipoor
commited on
Commit
•
ae46da0
1
Parent(s):
870225c
Update README.md
Browse files
README.md
CHANGED
@@ -110,7 +110,7 @@ encoding = tokenizer(text,question,add_special_tokens = True,
|
|
110 |
out = model(encoding['input_ids'].to(device),encoding['attention_mask'].to(device), encoding['token_type_ids'].to(device))
|
111 |
#we had to change some pieces of code to make it compatible with one answer generation at a time
|
112 |
#If you have unanswerable questions, use out['start_logits'][0][0:] and out['end_logits'][0][0:] because <s> (the 1st token) is for this situation and must be compared with other tokens.
|
113 |
-
#you can initialize max_index in generate_indexes() to put force on tokens being chosen to be within the context(
|
114 |
answer_start_index, answer_end_index = generate_indexes(out['start_logits'][0][1:], out['end_logits'][0][1:], 5, 0)
|
115 |
print(tokenizer.tokenize(text + question))
|
116 |
print(tokenizer.tokenize(text + question)[answer_start_index : (answer_end_index + 1)])
|
|
|
110 |
out = model(encoding['input_ids'].to(device),encoding['attention_mask'].to(device), encoding['token_type_ids'].to(device))
|
111 |
#we had to change some pieces of code to make it compatible with one answer generation at a time
|
112 |
#If you have unanswerable questions, use out['start_logits'][0][0:] and out['end_logits'][0][0:] because <s> (the 1st token) is for this situation and must be compared with other tokens.
|
113 |
+
#you can initialize max_index in generate_indexes() to put force on tokens being chosen to be within the context(end index must be less than seperator token).
|
114 |
answer_start_index, answer_end_index = generate_indexes(out['start_logits'][0][1:], out['end_logits'][0][1:], 5, 0)
|
115 |
print(tokenizer.tokenize(text + question))
|
116 |
print(tokenizer.tokenize(text + question)[answer_start_index : (answer_end_index + 1)])
|