Datasets:
lmqg
/

Languages:
English
Multilinguality:
monolingual
Size Categories:
1k<n<10K
Source Datasets:
tweet_qa
ArXiv:
Tags:
question-generation
License:
asahi417 commited on
Commit
fd9d15c
1 Parent(s): 4989308

Update process.py

Browse files
Files changed (1) hide show
  1. process.py +2 -2
process.py CHANGED
@@ -7,7 +7,7 @@ from typing import Dict
7
  from datasets import load_dataset
8
 
9
 
10
- SEP_TOKEN = "\n"
11
 
12
 
13
  def create_data(hf_data):
@@ -20,7 +20,7 @@ def create_data(hf_data):
20
  'questions': [_g.replace(SEP_TOKEN, " ") for _g in g['Question']],
21
  'answers': [_g[0].replace(SEP_TOKEN, " ") for _g in g['Answer']],
22
  }
23
- example["questions_answers"] = SEP_TOKEN.join([f"Q: {q}, A: {a}" for q, a in zip(example["questions"], example["answers"])])
24
  output.append(example)
25
  return output
26
 
 
7
  from datasets import load_dataset
8
 
9
 
10
+ SEP_TOKEN = " | "
11
 
12
 
13
  def create_data(hf_data):
 
20
  'questions': [_g.replace(SEP_TOKEN, " ") for _g in g['Question']],
21
  'answers': [_g[0].replace(SEP_TOKEN, " ") for _g in g['Answer']],
22
  }
23
+ example["questions_answers"] = SEP_TOKEN.join([f"question: {q}, answer: {a}" for q, a in zip(example["questions"], example["answers"])])
24
  output.append(example)
25
  return output
26