albertvillanova HF staff commited on
Commit
07ab797
1 Parent(s): 2d5f64e

Convert dataset to Parquet (#5)

Browse files

- Convert dataset to Parquet (43d16a89f1798a9c9d84b5d23174b9e1984a3009)
- Add small data files (9c500d97d229a8475a1360bdb30532f16e99c9c1)
- Delete loading script (9a4575a0d7d8372d9374b8b0934cc1d3df3055a7)
- Delete loading script auxiliary file (9261f83b1c22a53e14300e01a2f91dfe5106830e)
- Delete loading script auxiliary file (62d56c3f7a50505fa750c299e8010b7f1bd423c5)

README.md CHANGED
@@ -30,16 +30,16 @@ dataset_info:
30
  dtype: string
31
  splits:
32
  - name: train
33
- num_bytes: 32614834
34
  num_examples: 52364
35
  - name: validation
36
- num_bytes: 4086741
37
  num_examples: 6546
38
  - name: test
39
- num_bytes: 4063673
40
  num_examples: 6545
41
- download_size: 39979724
42
- dataset_size: 40765248
43
  - config_name: small
44
  features:
45
  - name: id
@@ -50,16 +50,33 @@ dataset_info:
50
  dtype: string
51
  splits:
52
  - name: train
53
- num_bytes: 13006719
54
  num_examples: 46680
55
  - name: validation
56
- num_bytes: 1629250
57
  num_examples: 5835
58
  - name: test
59
- num_bytes: 1619708
60
  num_examples: 5835
61
- download_size: 15555421
62
- dataset_size: 16255677
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
63
  ---
64
 
65
  # Dataset Card for "code_x_glue_cc_code_refinement"
30
  dtype: string
31
  splits:
32
  - name: train
33
+ num_bytes: 32614786
34
  num_examples: 52364
35
  - name: validation
36
+ num_bytes: 4086733
37
  num_examples: 6546
38
  - name: test
39
+ num_bytes: 4063665
40
  num_examples: 6545
41
+ download_size: 14929559
42
+ dataset_size: 40765184
43
  - config_name: small
44
  features:
45
  - name: id
50
  dtype: string
51
  splits:
52
  - name: train
53
+ num_bytes: 13006679
54
  num_examples: 46680
55
  - name: validation
56
+ num_bytes: 1629242
57
  num_examples: 5835
58
  - name: test
59
+ num_bytes: 1619700
60
  num_examples: 5835
61
+ download_size: 5894462
62
+ dataset_size: 16255621
63
+ configs:
64
+ - config_name: medium
65
+ data_files:
66
+ - split: train
67
+ path: medium/train-*
68
+ - split: validation
69
+ path: medium/validation-*
70
+ - split: test
71
+ path: medium/test-*
72
+ - config_name: small
73
+ data_files:
74
+ - split: train
75
+ path: small/train-*
76
+ - split: validation
77
+ path: small/validation-*
78
+ - split: test
79
+ path: small/test-*
80
  ---
81
 
82
  # Dataset Card for "code_x_glue_cc_code_refinement"
code_x_glue_cc_code_refinement.py DELETED
@@ -1,93 +0,0 @@
1
- from typing import List
2
-
3
- import datasets
4
-
5
- from .common import TrainValidTestChild
6
- from .generated_definitions import DEFINITIONS
7
-
8
-
9
- _DESCRIPTION = """We use the dataset released by this paper(https://arxiv.org/pdf/1812.08693.pdf). The source side is a Java function with bugs and the target side is the refined one. All the function and variable names are normalized. Their dataset contains two subsets ( i.e.small and medium) based on the function length."""
10
- _CITATION = """@article{10.1145/3340544,
11
- author = {Tufano, Michele and Watson, Cody and Bavota, Gabriele and Penta, Massimiliano Di and White, Martin and Poshyvanyk, Denys},
12
- title = {An Empirical Study on Learning Bug-Fixing Patches in the Wild via Neural Machine Translation},
13
- year = {2019},
14
- issue_date = {October 2019},
15
- publisher = {Association for Computing Machinery},
16
- address = {New York, NY, USA},
17
- volume = {28},
18
- number = {4},
19
- issn = {1049-331X},
20
- url = {https://doi-org.proxy.wm.edu/10.1145/3340544},
21
- doi = {10.1145/3340544},
22
- abstract = {Millions of open source projects with numerous bug fixes are available in code repositories. This proliferation of software development histories can be leveraged to learn how to fix common programming bugs. To explore such a potential, we perform an empirical study to assess the feasibility of using Neural Machine Translation techniques for learning bug-fixing patches for real defects. First, we mine millions of bug-fixes from the change histories of projects hosted on GitHub in order to extract meaningful examples of such bug-fixes. Next, we abstract the buggy and corresponding fixed code, and use them to train an Encoder-Decoder model able to translate buggy code into its fixed version. In our empirical investigation, we found that such a model is able to fix thousands of unique buggy methods in the wild. Overall, this model is capable of predicting fixed patches generated by developers in 9--50% of the cases, depending on the number of candidate patches we allow it to generate. Also, the model is able to emulate a variety of different Abstract Syntax Tree operations and generate candidate patches in a split second.},
23
- journal = {ACM Trans. Softw. Eng. Methodol.},
24
- month = sep,
25
- articleno = {19},
26
- numpages = {29},
27
- keywords = {bug-fixes, Neural machine translation}
28
- }"""
29
-
30
-
31
- class CodeXGlueCcCodeRefinementImpl(TrainValidTestChild):
32
- _DESCRIPTION = _DESCRIPTION
33
- _CITATION = _CITATION
34
-
35
- _FEATURES = {
36
- "id": datasets.Value("int32"), # Index of the sample
37
- "buggy": datasets.Value("string"), # The buggy version of the code
38
- "fixed": datasets.Value("string"), # The correct version of the code
39
- }
40
-
41
- _SUPERVISED_KEYS = ["fixed"]
42
-
43
- def generate_urls(self, split_name):
44
- size = self.info["parameters"]["size"]
45
- for key in "buggy", "fixed":
46
- yield key, f"{size}/{split_name}.buggy-fixed.{key}"
47
-
48
- def _generate_examples(self, split_name, file_paths):
49
- """This function returns the examples in the raw (text) form."""
50
- # Open each file (one for java, and one for c#)
51
- files = {k: open(file_paths[k], encoding="utf-8") for k in file_paths}
52
-
53
- id_ = 0
54
- while True:
55
- # Read a single line from each file
56
- entries = {k: files[k].readline() for k in file_paths}
57
-
58
- empty = self.check_empty(entries)
59
- if empty:
60
- # We are done: end of files
61
- return
62
-
63
- entries["id"] = id_
64
- yield id_, entries
65
- id_ += 1
66
-
67
-
68
- CLASS_MAPPING = {
69
- "CodeXGlueCcCodeRefinement": CodeXGlueCcCodeRefinementImpl,
70
- }
71
-
72
-
73
- class CodeXGlueCcCodeRefinement(datasets.GeneratorBasedBuilder):
74
- BUILDER_CONFIG_CLASS = datasets.BuilderConfig
75
- BUILDER_CONFIGS = [
76
- datasets.BuilderConfig(name=name, description=info["description"]) for name, info in DEFINITIONS.items()
77
- ]
78
-
79
- def _info(self):
80
- name = self.config.name
81
- info = DEFINITIONS[name]
82
- if info["class_name"] in CLASS_MAPPING:
83
- self.child = CLASS_MAPPING[info["class_name"]](info)
84
- else:
85
- raise RuntimeError(f"Unknown python class for dataset configuration {name}")
86
- ret = self.child._info()
87
- return ret
88
-
89
- def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
90
- return self.child._split_generators(dl_manager=dl_manager)
91
-
92
- def _generate_examples(self, split_name, file_paths):
93
- return self.child._generate_examples(split_name, file_paths)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
common.py DELETED
@@ -1,75 +0,0 @@
1
- from typing import List
2
-
3
- import datasets
4
-
5
-
6
- # Citation, taken from https://github.com/microsoft/CodeXGLUE
7
- _DEFAULT_CITATION = """@article{CodeXGLUE,
8
- title={CodeXGLUE: A Benchmark Dataset and Open Challenge for Code Intelligence},
9
- year={2020},}"""
10
-
11
-
12
- class Child:
13
- _DESCRIPTION = None
14
- _FEATURES = None
15
- _CITATION = None
16
- SPLITS = {"train": datasets.Split.TRAIN}
17
- _SUPERVISED_KEYS = None
18
-
19
- def __init__(self, info):
20
- self.info = info
21
-
22
- def homepage(self):
23
- return self.info["project_url"]
24
-
25
- def _info(self):
26
- # This is the description that will appear on the datasets page.
27
- return datasets.DatasetInfo(
28
- description=self.info["description"] + "\n\n" + self._DESCRIPTION,
29
- features=datasets.Features(self._FEATURES),
30
- homepage=self.homepage(),
31
- citation=self._CITATION or _DEFAULT_CITATION,
32
- supervised_keys=self._SUPERVISED_KEYS,
33
- )
34
-
35
- def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
36
- SPLITS = self.SPLITS
37
- _URL = self.info["raw_url"]
38
- urls_to_download = {}
39
- for split in SPLITS:
40
- if split not in urls_to_download:
41
- urls_to_download[split] = {}
42
-
43
- for key, url in self.generate_urls(split):
44
- if not url.startswith("http"):
45
- url = _URL + "/" + url
46
- urls_to_download[split][key] = url
47
-
48
- downloaded_files = {}
49
- for k, v in urls_to_download.items():
50
- downloaded_files[k] = dl_manager.download(v)
51
-
52
- return [
53
- datasets.SplitGenerator(
54
- name=SPLITS[k],
55
- gen_kwargs={"split_name": k, "file_paths": downloaded_files[k]},
56
- )
57
- for k in SPLITS
58
- ]
59
-
60
- def check_empty(self, entries):
61
- all_empty = all([v == "" for v in entries.values()])
62
- all_non_empty = all([v != "" for v in entries.values()])
63
-
64
- if not all_non_empty and not all_empty:
65
- raise RuntimeError("Parallel data files should have the same number of lines.")
66
-
67
- return all_empty
68
-
69
-
70
- class TrainValidTestChild(Child):
71
- SPLITS = {
72
- "train": datasets.Split.TRAIN,
73
- "valid": datasets.Split.VALIDATION,
74
- "test": datasets.Split.TEST,
75
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
generated_definitions.py DELETED
@@ -1,24 +0,0 @@
1
- DEFINITIONS = {
2
- "medium": {
3
- "class_name": "CodeXGlueCcCodeRefinement",
4
- "dataset_type": "Code-Code",
5
- "description": "CodeXGLUE code-refinement dataset, available at https://github.com/microsoft/CodeXGLUE/tree/main/Code-Code/code-refinement",
6
- "dir_name": "code-refinement",
7
- "name": "medium",
8
- "parameters": {"size": "medium"},
9
- "project_url": "https://github.com/madlag/CodeXGLUE/tree/main/Code-Code/code-refinement",
10
- "raw_url": "https://raw.githubusercontent.com/madlag/CodeXGLUE/main/Code-Code/code-refinement/data",
11
- "sizes": {"test": 6545, "train": 52364, "validation": 6546},
12
- },
13
- "small": {
14
- "class_name": "CodeXGlueCcCodeRefinement",
15
- "dataset_type": "Code-Code",
16
- "description": "CodeXGLUE code-refinement dataset, available at https://github.com/microsoft/CodeXGLUE/tree/main/Code-Code/code-refinement",
17
- "dir_name": "code-refinement",
18
- "name": "small",
19
- "parameters": {"size": "small"},
20
- "project_url": "https://github.com/madlag/CodeXGLUE/tree/main/Code-Code/code-refinement",
21
- "raw_url": "https://raw.githubusercontent.com/madlag/CodeXGLUE/main/Code-Code/code-refinement/data",
22
- "sizes": {"test": 5835, "train": 46680, "validation": 5835},
23
- },
24
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
medium/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:776766b34878a999193dae39f0462ed1dc5aec3b7d219d3488db4c0127eca858
3
+ size 1488083
medium/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:955496550f8603dd920aa2a09d1e4a14d878bda9f35f2f3575bae68b9d9493f0
3
+ size 11943277
medium/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9eb660729a7a43df19c714726933a517d0497cefa99cb95328bc964658eaa44c
3
+ size 1498199
small/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:be9b78909122cc07a9b31b55c999fe8b8346b1bd775e726b0cbb044f9d1ebc90
3
+ size 588578
small/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6e0ddaf02afd36f2827d0f6edcf94b3b6b5908206cbf63fc6ac99ee4a6332472
3
+ size 4715251
small/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b623a6544959b1767eb3082064b3b3896e26f0770dfd2270d14d4e62da0246a5
3
+ size 590633