Julian von der Goltz commited on
Commit
7127151
1 Parent(s): 2ed2174

Regenerate with 90/10 split

Browse files
data/train.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:eb8ac2472b2df57f694e4cf87c307134ecd0325d4ca73528f75f5061510f3200
3
- size 296246266
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aec27d46c728643de93ecbcf0a5f89ea7d0a1289306ee641664a89cc80f59ffa
3
+ size 291253866
data/validation.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3401f7214750a3e00f7fbeba2f9e72d3eef6226cb3a1fa0ce78b9ffae543695a
3
- size 5450651
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d17ee0f23cadb817b96340ddb338a4ea9063c33fed7b98c1e9f9774dae4ea4ca
3
+ size 10489039
src/create_nl_dataset.py CHANGED
@@ -81,13 +81,13 @@ if __name__ == '__main__':
81
  train_df = meta_df.iloc[:num_train]
82
  val_df = meta_df.iloc[num_train:]
83
 
84
- # with open('tmp/train.jsonl', 'w') as train_file:
85
- # with open('tmp/val.jsonl', 'w') as val_file:
86
- # for item, split in tqdm(yield_file_contents('../origin/xml_pd.zip', train_df, val_df)):
87
- # if split == 'train':
88
- # train_file.write('{}\n'.format(json.dumps(item)))
89
- # if split == 'validation':
90
- # val_file.write('{}\n'.format(json.dumps(item)))
91
 
92
  datasets.Dataset.from_json('tmp/train.jsonl', split='train').to_parquet('../data/train.parquet')
93
  datasets.Dataset.from_json('tmp/val.jsonl', split='validation').to_parquet('../data/validation.parquet')
 
81
  train_df = meta_df.iloc[:num_train]
82
  val_df = meta_df.iloc[num_train:]
83
 
84
+ with open('tmp/train.jsonl', 'w') as train_file:
85
+ with open('tmp/val.jsonl', 'w') as val_file:
86
+ for item, split in tqdm(yield_file_contents('../origin/xml_pd.zip', train_df, val_df)):
87
+ if split == 'train':
88
+ train_file.write('{}\n'.format(json.dumps(item)))
89
+ if split == 'validation':
90
+ val_file.write('{}\n'.format(json.dumps(item)))
91
 
92
  datasets.Dataset.from_json('tmp/train.jsonl', split='train').to_parquet('../data/train.parquet')
93
  datasets.Dataset.from_json('tmp/val.jsonl', split='validation').to_parquet('../data/validation.parquet')