Datasets:
Tasks:
Text Generation
Sub-tasks:
language-modeling
Languages:
English
Multilinguality:
monolingual
Size Categories:
1k<n<10K
Source Datasets:
tweet_qa
ArXiv:
License:
Update process.py
Browse files- process.py +2 -2
process.py
CHANGED
@@ -7,7 +7,7 @@ from typing import Dict
|
|
7 |
from datasets import load_dataset
|
8 |
|
9 |
|
10 |
-
SEP_TOKEN = "
|
11 |
|
12 |
|
13 |
def create_data(hf_data):
|
@@ -20,7 +20,7 @@ def create_data(hf_data):
|
|
20 |
'questions': [_g.replace(SEP_TOKEN, " ") for _g in g['Question']],
|
21 |
'answers': [_g[0].replace(SEP_TOKEN, " ") for _g in g['Answer']],
|
22 |
}
|
23 |
-
example["questions_answers"] = SEP_TOKEN.join([f"
|
24 |
output.append(example)
|
25 |
return output
|
26 |
|
|
|
7 |
from datasets import load_dataset
|
8 |
|
9 |
|
10 |
+
SEP_TOKEN = " | "
|
11 |
|
12 |
|
13 |
def create_data(hf_data):
|
|
|
20 |
'questions': [_g.replace(SEP_TOKEN, " ") for _g in g['Question']],
|
21 |
'answers': [_g[0].replace(SEP_TOKEN, " ") for _g in g['Answer']],
|
22 |
}
|
23 |
+
example["questions_answers"] = SEP_TOKEN.join([f"question: {q}, answer: {a}" for q, a in zip(example["questions"], example["answers"])])
|
24 |
output.append(example)
|
25 |
return output
|
26 |
|