Datasets:
parquet-converter
commited on
Commit
•
a11cfd8
1
Parent(s):
fbc749f
Update parquet files
Browse files
.gitattributes
DELETED
@@ -1,52 +0,0 @@
|
|
1 |
-
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
-
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
-
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
-
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
-
*.ftz filter=lfs diff=lfs merge=lfs -text
|
6 |
-
*.gz filter=lfs diff=lfs merge=lfs -text
|
7 |
-
*.h5 filter=lfs diff=lfs merge=lfs -text
|
8 |
-
*.joblib filter=lfs diff=lfs merge=lfs -text
|
9 |
-
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
10 |
-
*.lz4 filter=lfs diff=lfs merge=lfs -text
|
11 |
-
*.model filter=lfs diff=lfs merge=lfs -text
|
12 |
-
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
13 |
-
*.npy filter=lfs diff=lfs merge=lfs -text
|
14 |
-
*.npz filter=lfs diff=lfs merge=lfs -text
|
15 |
-
*.onnx filter=lfs diff=lfs merge=lfs -text
|
16 |
-
*.ot filter=lfs diff=lfs merge=lfs -text
|
17 |
-
*.parquet filter=lfs diff=lfs merge=lfs -text
|
18 |
-
*.pb filter=lfs diff=lfs merge=lfs -text
|
19 |
-
*.pickle filter=lfs diff=lfs merge=lfs -text
|
20 |
-
*.json filter=lfs diff=lfs merge=lfs -text
|
21 |
-
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
-
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
-
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
-
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
-
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
26 |
-
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
27 |
-
*.tflite filter=lfs diff=lfs merge=lfs -text
|
28 |
-
*.tgz filter=lfs diff=lfs merge=lfs -text
|
29 |
-
*.wasm filter=lfs diff=lfs merge=lfs -text
|
30 |
-
*.xz filter=lfs diff=lfs merge=lfs -text
|
31 |
-
*.zip filter=lfs diff=lfs merge=lfs -text
|
32 |
-
*.zst filter=lfs diff=lfs merge=lfs -text
|
33 |
-
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
34 |
-
# Audio files - uncompressed
|
35 |
-
*.pcm filter=lfs diff=lfs merge=lfs -text
|
36 |
-
*.sam filter=lfs diff=lfs merge=lfs -text
|
37 |
-
*.raw filter=lfs diff=lfs merge=lfs -text
|
38 |
-
# Audio files - compressed
|
39 |
-
*.aac filter=lfs diff=lfs merge=lfs -text
|
40 |
-
*.flac filter=lfs diff=lfs merge=lfs -text
|
41 |
-
*.mp3 filter=lfs diff=lfs merge=lfs -text
|
42 |
-
*.ogg filter=lfs diff=lfs merge=lfs -text
|
43 |
-
*.wav filter=lfs diff=lfs merge=lfs -text
|
44 |
-
# Image files - uncompressed
|
45 |
-
*.bmp filter=lfs diff=lfs merge=lfs -text
|
46 |
-
*.gif filter=lfs diff=lfs merge=lfs -text
|
47 |
-
*.png filter=lfs diff=lfs merge=lfs -text
|
48 |
-
*.tiff filter=lfs diff=lfs merge=lfs -text
|
49 |
-
# Image files - compressed
|
50 |
-
*.jpg filter=lfs diff=lfs merge=lfs -text
|
51 |
-
*.jpeg filter=lfs diff=lfs merge=lfs -text
|
52 |
-
*.webp filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
README.md
DELETED
@@ -1,124 +0,0 @@
|
|
1 |
-
---
|
2 |
-
annotations_creators: []
|
3 |
-
language_creators:
|
4 |
-
- crowdsourced
|
5 |
-
- expert-generated
|
6 |
-
language:
|
7 |
-
- code
|
8 |
-
license:
|
9 |
-
- mit
|
10 |
-
multilinguality:
|
11 |
-
- monolingual
|
12 |
-
size_categories:
|
13 |
-
- unknown
|
14 |
-
source_datasets:
|
15 |
-
- original
|
16 |
-
task_categories:
|
17 |
-
- text2text-generation
|
18 |
-
task_ids: []
|
19 |
-
pretty_name: CoNaLa
|
20 |
-
tags:
|
21 |
-
- code-generation
|
22 |
-
---
|
23 |
-
|
24 |
-
## Dataset Description
|
25 |
-
- **Repository:** https://conala-corpus.github.io/
|
26 |
-
- **Paper:** [Learning to Mine Aligned Code and Natural Language Pairs from Stack Overflow](https://arxiv.org/pdf/1805.08949.pdf)
|
27 |
-
|
28 |
-
### Dataset Summary
|
29 |
-
[CoNaLa](https://conala-corpus.github.io/) is a benchmark of code and natural language pairs, for the evaluation of code generation tasks. The dataset was crawled from Stack Overflow, automatically filtered, then curated by annotators, split into 2,379 training and 500 test examples. The automatically mined dataset is also available with almost 600k examples.
|
30 |
-
|
31 |
-
|
32 |
-
### Supported Tasks and Leaderboards
|
33 |
-
This dataset is used to evaluate code generations.
|
34 |
-
|
35 |
-
### Languages
|
36 |
-
English - Python code.
|
37 |
-
|
38 |
-
## Dataset Structure
|
39 |
-
```python
|
40 |
-
dataset_curated = load_dataset("neulab/conala")
|
41 |
-
DatasetDict({
|
42 |
-
train: Dataset({
|
43 |
-
features: ['question_id', 'intent', 'rewritten_intent', 'snippet'],
|
44 |
-
num_rows: 2379
|
45 |
-
})
|
46 |
-
test: Dataset({
|
47 |
-
features: ['question_id', 'intent', 'rewritten_intent', 'snippet'],
|
48 |
-
num_rows: 500
|
49 |
-
})
|
50 |
-
})
|
51 |
-
|
52 |
-
dataset_mined = load_dataset("neulab/conala", "mined")
|
53 |
-
DatasetDict({
|
54 |
-
train: Dataset({
|
55 |
-
features: ['question_id', 'parent_answer_post_id', 'prob', 'snippet', 'intent', 'id'],
|
56 |
-
num_rows: 593891
|
57 |
-
})
|
58 |
-
})
|
59 |
-
```
|
60 |
-
### Data Instances
|
61 |
-
|
62 |
-
#### CoNaLa - curated
|
63 |
-
This is the curated dataset by annotators
|
64 |
-
```
|
65 |
-
{
|
66 |
-
'question_id': 41067960,
|
67 |
-
'intent': 'How to convert a list of multiple integers into a single integer?',
|
68 |
-
'rewritten_intent': "Concatenate elements of a list 'x' of multiple integers to a single integer",
|
69 |
-
'snippet': 'sum(d * 10 ** i for i, d in enumerate(x[::-1]))'
|
70 |
-
}
|
71 |
-
```
|
72 |
-
|
73 |
-
#### CoNaLa - mined
|
74 |
-
This is the automatically mined dataset before curation
|
75 |
-
```
|
76 |
-
{
|
77 |
-
'question_id': 34705205,
|
78 |
-
'parent_answer_post_id': 34705233,
|
79 |
-
'prob': 0.8690001442846342,
|
80 |
-
'snippet': 'sorted(l, key=lambda x: (-int(x[1]), x[0]))',
|
81 |
-
'intent': 'Sort a nested list by two elements',
|
82 |
-
'id': '34705205_34705233_0'
|
83 |
-
}
|
84 |
-
```
|
85 |
-
|
86 |
-
### Data Fields
|
87 |
-
Curated:
|
88 |
-
|
89 |
-
|Field|Type|Description|
|
90 |
-
|---|---|---|
|
91 |
-
|question_id|int64|Id of the Stack Overflow question|
|
92 |
-
|intent|string|Natural Language intent (i.e., the title of a Stack Overflow question)|
|
93 |
-
|rewritten_intent|string|Crowdsourced revised intents that try to better reflect the full meaning of the code|
|
94 |
-
|snippet|string| Code snippet that implements the intent|
|
95 |
-
|
96 |
-
Mined:
|
97 |
-
|
98 |
-
|Field|Type|Description|
|
99 |
-
|---|---|---|
|
100 |
-
|question_id|int64|Id of the Stack Overflow question|
|
101 |
-
|parent_answer_post_id|int64|Id of the answer post from which the candidate snippet is extracted|
|
102 |
-
|intent|string|Natural Language intent (i.e., the title of a Stack Overflow question)|
|
103 |
-
|snippet|string| Code snippet that implements the intent|
|
104 |
-
|id|string|Unique id for this intent/snippet pair|
|
105 |
-
|prob|float64|Probability given by the mining model|
|
106 |
-
|
107 |
-
### Data Splits
|
108 |
-
There are two version of the dataset (curated and mined), mined only has a train split and curated has two splits: train and test.
|
109 |
-
|
110 |
-
## Dataset Creation
|
111 |
-
The dataset was crawled from Stack Overflow, automatically filtered, then curated by annotators. For more details, please refer to the original [paper](https://arxiv.org/pdf/1805.08949.pdf)
|
112 |
-
|
113 |
-
### Citation Information
|
114 |
-
|
115 |
-
```
|
116 |
-
@inproceedings{yin2018learning,
|
117 |
-
title={Learning to mine aligned code and natural language pairs from stack overflow},
|
118 |
-
author={Yin, Pengcheng and Deng, Bowen and Chen, Edgar and Vasilescu, Bogdan and Neubig, Graham},
|
119 |
-
booktitle={2018 IEEE/ACM 15th international conference on mining software repositories (MSR)},
|
120 |
-
pages={476--486},
|
121 |
-
year={2018},
|
122 |
-
organization={IEEE}
|
123 |
-
}
|
124 |
-
```
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
conala.py
DELETED
@@ -1,111 +0,0 @@
|
|
1 |
-
# coding=utf-8
|
2 |
-
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
|
3 |
-
#
|
4 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
# you may not use this file except in compliance with the License.
|
6 |
-
# You may obtain a copy of the License at
|
7 |
-
#
|
8 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
#
|
10 |
-
# Unless required by applicable law or agreed to in writing, software
|
11 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
# See the License for the specific language governing permissions and
|
14 |
-
# limitations under the License.
|
15 |
-
"""CoNaLa dataset."""
|
16 |
-
|
17 |
-
import json
|
18 |
-
import datasets
|
19 |
-
|
20 |
-
|
21 |
-
_CITATION = """\
|
22 |
-
@inproceedings{yin2018learning,
|
23 |
-
title={Learning to mine aligned code and natural language pairs from stack overflow},
|
24 |
-
author={Yin, Pengcheng and Deng, Bowen and Chen, Edgar and Vasilescu, Bogdan and Neubig, Graham},
|
25 |
-
booktitle={2018 IEEE/ACM 15th international conference on mining software repositories (MSR)},
|
26 |
-
pages={476--486},
|
27 |
-
year={2018},
|
28 |
-
organization={IEEE}
|
29 |
-
}
|
30 |
-
"""
|
31 |
-
|
32 |
-
_DESCRIPTION = """\
|
33 |
-
CoNaLa is a dataset of code and natural language pairs crawled from Stack Overflow, for more details please refer to this paper: https://arxiv.org/pdf/1805.08949.pdf or the dataset page https://conala-corpus.github.io/.
|
34 |
-
"""
|
35 |
-
|
36 |
-
_HOMEPAGE = "https://conala-corpus.github.io/"
|
37 |
-
_URLs = {
|
38 |
-
"mined": "data/conala-mined.json",
|
39 |
-
"curated": {"train": "data/conala-paired-train.json", "test": "data/conala-paired-test.json" },
|
40 |
-
}
|
41 |
-
|
42 |
-
class Conala(datasets.GeneratorBasedBuilder):
|
43 |
-
"""CoNaLa Code dataset."""
|
44 |
-
|
45 |
-
VERSION = datasets.Version("1.1.0")
|
46 |
-
|
47 |
-
|
48 |
-
BUILDER_CONFIGS = [
|
49 |
-
datasets.BuilderConfig(
|
50 |
-
name="curated",
|
51 |
-
version=datasets.Version("1.1.0"),
|
52 |
-
description=_DESCRIPTION,
|
53 |
-
),
|
54 |
-
datasets.BuilderConfig(name="mined", version=datasets.Version("1.1.0"), description=_DESCRIPTION),
|
55 |
-
]
|
56 |
-
|
57 |
-
DEFAULT_CONFIG_NAME = "curated"
|
58 |
-
|
59 |
-
|
60 |
-
def _info(self):
|
61 |
-
if self.config.name == "curated":
|
62 |
-
features=datasets.Features({"question_id": datasets.Value("int64"),
|
63 |
-
"intent": datasets.Value("string"),
|
64 |
-
"rewritten_intent": datasets.Value("string"),
|
65 |
-
"snippet": datasets.Value("string"),
|
66 |
-
})
|
67 |
-
else:
|
68 |
-
features=datasets.Features({"question_id": datasets.Value("int64"),
|
69 |
-
"parent_answer_post_id": datasets.Value("int64"),
|
70 |
-
"prob": datasets.Value("float64"),
|
71 |
-
"snippet": datasets.Value("string"),
|
72 |
-
"intent": datasets.Value("string"),
|
73 |
-
"id": datasets.Value("string"),
|
74 |
-
})
|
75 |
-
return datasets.DatasetInfo(
|
76 |
-
description=_DESCRIPTION,
|
77 |
-
features=features,
|
78 |
-
supervised_keys=None,
|
79 |
-
citation=_CITATION,
|
80 |
-
homepage=_HOMEPAGE)
|
81 |
-
|
82 |
-
def _split_generators(self, dl_manager):
|
83 |
-
"""Returns SplitGenerators."""
|
84 |
-
config_urls = _URLs[self.config.name]
|
85 |
-
data_dir = dl_manager.download_and_extract(config_urls)
|
86 |
-
if self.config.name == "curated":
|
87 |
-
return [
|
88 |
-
datasets.SplitGenerator(
|
89 |
-
name=datasets.Split.TRAIN,
|
90 |
-
gen_kwargs={"filepath": data_dir["train"], "split": "train"},
|
91 |
-
),
|
92 |
-
datasets.SplitGenerator(
|
93 |
-
name=datasets.Split.TEST,
|
94 |
-
gen_kwargs={"filepath": data_dir["test"], "split": "test"},
|
95 |
-
),
|
96 |
-
]
|
97 |
-
else:
|
98 |
-
return [
|
99 |
-
datasets.SplitGenerator(
|
100 |
-
name=datasets.Split.TRAIN,
|
101 |
-
gen_kwargs={"filepath": data_dir, "split": "train"},
|
102 |
-
),
|
103 |
-
]
|
104 |
-
|
105 |
-
|
106 |
-
def _generate_examples(self, filepath, split):
|
107 |
-
key = 0
|
108 |
-
for line in open(filepath, encoding="utf-8"):
|
109 |
-
line = json.loads(line)
|
110 |
-
yield key, line
|
111 |
-
key += 1
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
data/conala-paired-test.json → curated/conala-test.parquet
RENAMED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ef8a9dd97bfbdfe69074a7a3147442098f96bb677f5fda8399fb1cc9e58ad045
|
3 |
+
size 48639
|
data/conala-paired-train.json → curated/conala-train.parquet
RENAMED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1b17bc084479374bfdd4d872160b4521ac5244f3b475cd2e444e4c18b9cb8810
|
3 |
+
size 217009
|
data/conala-mined.json → mined/conala-train.parquet
RENAMED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2c3addf89bd18f3b99fc16eeceb709615ad17e87c389c152da4de606769ecadc
|
3 |
+
size 74356952
|