Datasets:

Modalities:
Text
Languages:
code
ArXiv:
Tags:
License:
tianyang commited on
Commit
23c05e6
1 Parent(s): 514d49e

fix - issue

Browse files
Files changed (2) hide show
  1. README.md +10 -10
  2. repobench-r.py +16 -16
README.md CHANGED
@@ -38,26 +38,26 @@ code prediction.
38
 
39
  The dataset has 4 subsets:
40
 
41
- - `python-cff`: python dataset with `cff` setting.
42
- - `python-cfr`: python dataset with `cfr` setting.
43
- - `java-cff`: java dataset with `cff` setting.
44
- - `java-cfr`: java dataset with `cfr` setting.
45
 
46
  Each subset has 4 splits:
47
 
48
- - `train-easy`: training set with easy difficulty, where the number of code snippets in the context $$k$$ satisfies $$ 5 \leq k < 10 $$.
49
- - `train-hard`: training set with hard difficulty, where the number of code snippets in the context $$k$$ satisfies $$ k \geq 10 $$.
50
- - `test-easy`: testing set with easy difficulty.
51
- - `test-hard`: testing set with hard difficulty.
52
 
53
  ## Loading Data
54
 
55
- For example, if you want to load the `test` `cross-file-first` `python` dataset with `easy` difficulty, you can use the following code:
56
 
57
  ```python
58
  from datasets import load_dataset
59
 
60
- dataset = load_dataset("tianyang/repobench-r", "python-cff", "test-easy")
61
  ```
62
 
63
  ## Dataset Structure
 
38
 
39
  The dataset has 4 subsets:
40
 
41
+ - `python_cff`: python dataset with `cff` setting.
42
+ - `python_cfr`: python dataset with `cfr` setting.
43
+ - `java_cff`: java dataset with `cff` setting.
44
+ - `java_cfr`: java dataset with `cfr` setting.
45
 
46
  Each subset has 4 splits:
47
 
48
+ - `train_easy`: training set with easy difficulty, where the number of code snippets in the context $$k$$ satisfies $$ 5 \leq k < 10 $$.
49
+ - `train_hard`: training set with hard difficulty, where the number of code snippets in the context $$k$$ satisfies $$ k \geq 10 $$.
50
+ - `test_easy`: testing set with easy difficulty.
51
+ - `test_hard`: testing set with hard difficulty.
52
 
53
  ## Loading Data
54
 
55
+ For example, if you want to load the `test` `cross_file_first` `python` dataset with `easy` difficulty, you can use the following code:
56
 
57
  ```python
58
  from datasets import load_dataset
59
 
60
+ dataset = load_dataset("tianyang/repobench-r", "python_cff", "test_easy")
61
  ```
62
 
63
  ## Dataset Structure
repobench-r.py CHANGED
@@ -43,10 +43,10 @@ _HOMEPAGE = "https://github.com/Leolty/repobench"
43
  _LICENSE = "Apache License 2.0"
44
 
45
  _URLs = {
46
- "java-cff": "https://huggingface.co/datasets/tianyang/repobench-r/resolve/main/data/java_cff.gz",
47
- "java-cfr": "https://huggingface.co/datasets/tianyang/repobench-r/resolve/main/data/java_cfr.gz",
48
- "python-cff": "https://huggingface.co/datasets/tianyang/repobench-r/resolve/main/data/python_cff.gz",
49
- "python-cfr": "https://huggingface.co/datasets/tianyang/repobench-r/resolve/main/data/python_cfr.gz"
50
  }
51
 
52
  class RepoBenchR(datasets.GeneratorBasedBuilder):
@@ -56,7 +56,7 @@ class RepoBenchR(datasets.GeneratorBasedBuilder):
56
 
57
  BUILDER_CONFIGS = [
58
  datasets.BuilderConfig(
59
- name="python-cff",
60
  description=textwrap.dedent(
61
  """
62
  cff: cross_file_first -> mask the the line that a cross-file module is first used
@@ -64,7 +64,7 @@ class RepoBenchR(datasets.GeneratorBasedBuilder):
64
  )
65
  ),
66
  datasets.BuilderConfig(
67
- name="python-cfr",
68
  description=textwrap.dedent(
69
  """
70
  cfr: cross_file_random -> mask a random line that a cross-file module is used (not the first time)
@@ -72,7 +72,7 @@ class RepoBenchR(datasets.GeneratorBasedBuilder):
72
  )
73
  ),
74
  datasets.BuilderConfig(
75
- name="java-cff",
76
  description=textwrap.dedent(
77
  """
78
  cff: cross_file_first -> mask the the line that a cross-file module is first used
@@ -80,7 +80,7 @@ class RepoBenchR(datasets.GeneratorBasedBuilder):
80
  )
81
  ),
82
  datasets.BuilderConfig(
83
- name="java-cfr",
84
  description=textwrap.dedent(
85
  """
86
  cfr: cross_file_random -> mask a random line that a cross-file module is used (not the first time)
@@ -117,20 +117,20 @@ class RepoBenchR(datasets.GeneratorBasedBuilder):
117
 
118
  return [
119
  datasets.SplitGenerator(
120
- name=datasets.Split("train-easy"),
121
- gen_kwargs={"data_dir": data_dir, "split": "train-easy"},
122
  ),
123
  datasets.SplitGenerator(
124
  name=datasets.Split("train_hard"),
125
- gen_kwargs={"data_dir": data_dir, "split": "train-hard"},
126
  ),
127
  datasets.SplitGenerator(
128
- name=datasets.Split("test-easy"),
129
- gen_kwargs={"data_dir": data_dir, "split": "test-easy"},
130
  ),
131
  datasets.SplitGenerator(
132
- name=datasets.Split("test-hard"),
133
- gen_kwargs={"data_dir": data_dir, "split": "test-hard"},
134
  )
135
  ]
136
 
@@ -139,7 +139,7 @@ class RepoBenchR(datasets.GeneratorBasedBuilder):
139
  with gzip.open(data_dir, "rb") as f:
140
  data = pickle.load(f)
141
 
142
- subset, level = split.split("-")
143
 
144
  for i, example in enumerate(data[subset][level]):
145
  yield i, {
 
43
  _LICENSE = "Apache License 2.0"
44
 
45
  _URLs = {
46
+ "java_cff": "https://huggingface.co/datasets/tianyang/repobench-r/resolve/main/data/java_cff.gz",
47
+ "java_cfr": "https://huggingface.co/datasets/tianyang/repobench-r/resolve/main/data/java_cfr.gz",
48
+ "python_cff": "https://huggingface.co/datasets/tianyang/repobench-r/resolve/main/data/python_cff.gz",
49
+ "python_cfr": "https://huggingface.co/datasets/tianyang/repobench-r/resolve/main/data/python_cfr.gz"
50
  }
51
 
52
  class RepoBenchR(datasets.GeneratorBasedBuilder):
 
56
 
57
  BUILDER_CONFIGS = [
58
  datasets.BuilderConfig(
59
+ name="python_cff",
60
  description=textwrap.dedent(
61
  """
62
  cff: cross_file_first -> mask the the line that a cross-file module is first used
 
64
  )
65
  ),
66
  datasets.BuilderConfig(
67
+ name="python_cfr",
68
  description=textwrap.dedent(
69
  """
70
  cfr: cross_file_random -> mask a random line that a cross-file module is used (not the first time)
 
72
  )
73
  ),
74
  datasets.BuilderConfig(
75
+ name="java_cff",
76
  description=textwrap.dedent(
77
  """
78
  cff: cross_file_first -> mask the the line that a cross-file module is first used
 
80
  )
81
  ),
82
  datasets.BuilderConfig(
83
+ name="java_cfr",
84
  description=textwrap.dedent(
85
  """
86
  cfr: cross_file_random -> mask a random line that a cross-file module is used (not the first time)
 
117
 
118
  return [
119
  datasets.SplitGenerator(
120
+ name=datasets.Split("train_easy"),
121
+ gen_kwargs={"data_dir": data_dir, "split": "train_easy"},
122
  ),
123
  datasets.SplitGenerator(
124
  name=datasets.Split("train_hard"),
125
+ gen_kwargs={"data_dir": data_dir, "split": "train_hard"},
126
  ),
127
  datasets.SplitGenerator(
128
+ name=datasets.Split("test_easy"),
129
+ gen_kwargs={"data_dir": data_dir, "split": "test_easy"},
130
  ),
131
  datasets.SplitGenerator(
132
+ name=datasets.Split("test_hard"),
133
+ gen_kwargs={"data_dir": data_dir, "split": "test_hard"},
134
  )
135
  ]
136
 
 
139
  with gzip.open(data_dir, "rb") as f:
140
  data = pickle.load(f)
141
 
142
+ subset, level = split.split("_")
143
 
144
  for i, example in enumerate(data[subset][level]):
145
  yield i, {