Datasets:
GEM
/

Languages:
Italian
Multilinguality:
unknown
Size Categories:
unknown
Language Creators:
unknown
Annotations Creators:
crowd-sourced
Source Datasets:
original
Tags:
License:
Sebastien Montella commited on
Commit
dd3cf43
1 Parent(s): 1a985a4

fix challenge transformation error

Browse files
dataset_infos.json CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5106e12ea63eca19d85916c4f1db1f77695f281168355d147325338a6aa02d74
3
  size 8821
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:715bfe00a48addd4d3f2b98bc45c018b1e9a49019de8fe9506bef514f506596a
3
  size 8821
train_val_test_split.py CHANGED
@@ -233,18 +233,21 @@ def challenge_seen_unseen_transformation_split(pairs_by_transformation, training
233
  """
234
 
235
  # TODO transformations are hard-coded for now --> add argument in parser to specify them.
236
- seen_transformations_ids = [1, 2, 3, 11, 13, 23, 31, 32, 33, 34, 37]
237
  unseen_transformations_ids = [12, 22, 32, 35, 36]
238
 
239
  train = []
240
  val = []
241
  seen_transformations_test = []
242
  unseen_transformations_test = []
243
-
 
 
 
244
  for transf_id in seen_transformations_ids:
245
  curr_len = len(pairs_by_transformation[transf_id])
246
  train_limit = int(curr_len * training_ratio)
247
- val_limit = train_limit + int(curr_len * (1-training_ratio)/2.0)
248
 
249
  train += pairs_by_transformation[transf_id][:train_limit]
250
  val += pairs_by_transformation[transf_id][train_limit:val_limit]
@@ -254,7 +257,7 @@ def challenge_seen_unseen_transformation_split(pairs_by_transformation, training
254
  for transf_id in unseen_transformations_ids:
255
  unseen_transformations_test += pairs_by_transformation[transf_id]
256
 
257
-
258
  return train, val, seen_transformations_test, unseen_transformations_test
259
 
260
 
@@ -315,6 +318,10 @@ def split(args):
315
 
316
  transformations_types = get_types(xml)
317
  pairs, pairs_by_transformation, pairs_by_source_dataset = get_pairs(xml, transformations_types)
 
 
 
 
318
 
319
  if args.split_criteria == 'random':
320
  train, val, test = random_split(pairs, args.training_ratio)
@@ -325,7 +332,6 @@ def split(args):
325
 
326
  elif args.split_criteria == 'transformations':
327
  seen_transformations_train, seen_transformations_val, seen_transformations_test, unseen_transformations_test = challenge_seen_unseen_transformation_split(pairs_by_transformation, args.training_ratio)
328
-
329
  os.makedirs(f'{args.out_dir}/{version}/transformations_split/', exist_ok=True)
330
  save_jsonl(seen_transformations_train, f'{args.out_dir}/{version}/transformations_split/train.jsonl')
331
  save_jsonl(seen_transformations_val, f'{args.out_dir}/{version}/transformations_split/val.jsonl')
@@ -351,7 +357,7 @@ if __name__ == '__main__':
351
  parser.add_argument('--data_path', type=str, required=True, help='path to (single) data file')
352
  parser.add_argument('--out_dir', type=str, required=True, help='output dir to store files')
353
 
354
- parser.add_argument('--training_ratio', type=float, required=True, help='training ratio (e.g. 0.8). Remaining will be divided for val and test EQUALLY.')
355
 
356
  parser.add_argument('--split_criteria', type=str, required=True, choices=['random', 'transformations', 'source_dataset'], help='split criteria')
357
 
233
  """
234
 
235
  # TODO transformations are hard-coded for now --> add argument in parser to specify them.
236
+ seen_transformations_ids = [1, 2, 3, 11, 13, 21, 23, 31, 33, 34, 37]
237
  unseen_transformations_ids = [12, 22, 32, 35, 36]
238
 
239
  train = []
240
  val = []
241
  seen_transformations_test = []
242
  unseen_transformations_test = []
243
+ total_samples= 0
244
+ for transf_id in unseen_transformations_ids:
245
+ total_samples += len(pairs_by_transformation[transf_id])
246
+
247
  for transf_id in seen_transformations_ids:
248
  curr_len = len(pairs_by_transformation[transf_id])
249
  train_limit = int(curr_len * training_ratio)
250
+ val_limit = int(curr_len * (training_ratio + (1-training_ratio)/2.0))
251
 
252
  train += pairs_by_transformation[transf_id][:train_limit]
253
  val += pairs_by_transformation[transf_id][train_limit:val_limit]
257
  for transf_id in unseen_transformations_ids:
258
  unseen_transformations_test += pairs_by_transformation[transf_id]
259
 
260
+
261
  return train, val, seen_transformations_test, unseen_transformations_test
262
 
263
 
318
 
319
  transformations_types = get_types(xml)
320
  pairs, pairs_by_transformation, pairs_by_source_dataset = get_pairs(xml, transformations_types)
321
+
322
+ total_samples = 0
323
+ for transf_id in pairs_by_transformation.keys():
324
+ total_samples += len(pairs_by_transformation[transf_id])
325
 
326
  if args.split_criteria == 'random':
327
  train, val, test = random_split(pairs, args.training_ratio)
332
 
333
  elif args.split_criteria == 'transformations':
334
  seen_transformations_train, seen_transformations_val, seen_transformations_test, unseen_transformations_test = challenge_seen_unseen_transformation_split(pairs_by_transformation, args.training_ratio)
 
335
  os.makedirs(f'{args.out_dir}/{version}/transformations_split/', exist_ok=True)
336
  save_jsonl(seen_transformations_train, f'{args.out_dir}/{version}/transformations_split/train.jsonl')
337
  save_jsonl(seen_transformations_val, f'{args.out_dir}/{version}/transformations_split/val.jsonl')
357
  parser.add_argument('--data_path', type=str, required=True, help='path to (single) data file')
358
  parser.add_argument('--out_dir', type=str, required=True, help='output dir to store files')
359
 
360
+ parser.add_argument('--training_ratio', type=float, required=True, help='training ratio (e.g. 0.7). Remaining will be divided for val and test EQUALLY.')
361
 
362
  parser.add_argument('--split_criteria', type=str, required=True, choices=['random', 'transformations', 'source_dataset'], help='split criteria')
363
 
v1/transformations_split/seen_transformations_test.jsonl CHANGED
The diff for this file is too large to render. See raw diff
v1/transformations_split/train.jsonl CHANGED
The diff for this file is too large to render. See raw diff
v1/transformations_split/unseen_transformations_test.jsonl CHANGED
The diff for this file is too large to render. See raw diff
v1/transformations_split/val.jsonl CHANGED
The diff for this file is too large to render. See raw diff
v2/transformations_split/seen_transformations_test.jsonl CHANGED
The diff for this file is too large to render. See raw diff
v2/transformations_split/train.jsonl CHANGED
The diff for this file is too large to render. See raw diff
v2/transformations_split/unseen_transformations_test.jsonl CHANGED
The diff for this file is too large to render. See raw diff
v2/transformations_split/val.jsonl CHANGED
The diff for this file is too large to render. See raw diff