Datasets:
lmqg
/

Modalities:
Text
Languages:
English
ArXiv:
Libraries:
Datasets
License:
asahi417 commited on
Commit
55a3716
1 Parent(s): 2ebb1e6

Update process.py

Browse files
Files changed (1) hide show
  1. process.py +22 -19
process.py CHANGED
@@ -6,35 +6,38 @@ from tqdm import tqdm
6
  from typing import Dict
7
  from datasets import load_dataset
8
 
9
- SKIP_FEATURE = ["sentence_answer", "paragraph_answer", "paragraph_sentence"]
10
 
 
11
 
12
- def process_single_data(data: Dict):
13
- """ Convert single raw json data into QG format """
14
- example = {'question': data["Question"], 'paragraph': data["Tweet"], "paragraph_id": data['qid'], 'answer': data['Answer'][0]}
15
- for i in SKIP_FEATURE:
16
- example[i] = None
17
- return example
 
 
 
 
 
 
 
 
18
 
19
 
20
  if __name__ == '__main__':
21
  tweet_qa = load_dataset("tweet_qa")
22
- data_dev = tweet_qa['validation']
 
23
  seed(1)
24
- test_context_len = 900
25
-
26
- # create test set from training
27
- data_train = tweet_qa['train']
28
- context = sorted(list(set(data_train['Tweet'])))
29
- shuffle(context)
30
- data_test = [data_train[i] for i in range(len(data_train)) if data_train[i]['Tweet'] in context[:test_context_len]]
31
- data_train = [data_train[i] for i in range(len(data_train)) if data_train[i]['Tweet'] in context[test_context_len:]]
32
- print(f'train ({len(data_train)}, test ({len(data_test)}), dev ({len(data_dev)})')
33
- data_all = {'train': data_train, 'validation': data_dev, 'test': data_test}
34
  output = './data/processed'
35
  os.makedirs(output, exist_ok=True)
36
  for k, _data in data_all.items():
37
  with open('{}/{}.jsonl'.format(output, k), 'w') as f:
38
  for single_data in tqdm(_data):
39
- single_data = process_single_data(single_data)
40
  f.write(json.dumps(single_data) + '\n')
 
6
  from typing import Dict
7
  from datasets import load_dataset
8
 
 
9
 
10
+ SEP_TOKEN = "\n"
11
 
12
+
13
+ def create_data(hf_data):
14
+ df = hf_data.to_pandas()
15
+ output = []
16
+ for tweet, g in df.groupby("Tweet"):
17
+ example = {
18
+ 'paragraph': tweet.replace(SEP_TOKEN, " "),
19
+ "paragraph_id": '-'.join(g['qid']),
20
+ 'questions': [_g.replace(SEP_TOKEN, " ") for _g in g['Question']],
21
+ 'answers': [_g[0].replace(SEP_TOKEN, " ") for _g in g['Answer']],
22
+ }
23
+ example["questions_answers"] = SEP_TOKEN.join([f"Q: {q}, A: {a}" for q, a in zip(example["questions"], example["answers"])])
24
+ output.append(example)
25
+ return output
26
 
27
 
28
  if __name__ == '__main__':
29
  tweet_qa = load_dataset("tweet_qa")
30
+ data_valid = create_data(tweet_qa['validation'])
31
+ data_train = create_data(tweet_qa['train'])
32
  seed(1)
33
+ test_len = len(data_valid)
34
+ shuffle(data_train)
35
+ data_test = data_train[:test_len]
36
+ data_train = data_train[test_len:]
37
+ data_all = {'train': data_train, 'validation': data_valid, 'test': data_test}
 
 
 
 
 
38
  output = './data/processed'
39
  os.makedirs(output, exist_ok=True)
40
  for k, _data in data_all.items():
41
  with open('{}/{}.jsonl'.format(output, k), 'w') as f:
42
  for single_data in tqdm(_data):
 
43
  f.write(json.dumps(single_data) + '\n')