system HF staff commited on
Commit
f1a474c
1 Parent(s): 5f6f691

import from S3

Browse files
Files changed (1) hide show
  1. python-lines.py +95 -0
python-lines.py ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ """News headlines and categories dataset."""
18
+
19
+ from __future__ import absolute_import, division, print_function
20
+
21
+ import json
22
+
23
+ import datasets
24
+
25
+
26
+ _DESCRIPTION = """\
27
+ Dataset of single lines of Python code taken from the [CodeSearchNet](https://github.com/github/CodeSearchNet) dataset.
28
+
29
+ Context
30
+
31
+ This dataset allows checking the validity of Variational-Autoencoder latent spaces by testing what percentage of random/intermediate latent points can be greedily decoded into valid Python code.
32
+
33
+ Content
34
+
35
+ Each row has a parsable line of source code.
36
+ {'text': '{python source code line}'}
37
+
38
+ Most lines are < 100 characters while all are under 125 characters.
39
+
40
+ Contains 2.6 million lines.
41
+
42
+ All code is in parsable into a python3 ast.
43
+
44
+ """
45
+
46
+ _CITATION = """\
47
+ @dataset{dataset,
48
+ author = {Fraser Greenlee},
49
+ year = {2020},
50
+ month = {12},
51
+ pages = {},
52
+ title = {Python single line dataset.},
53
+ doi = {}
54
+ }
55
+ """
56
+
57
+ _TRAIN_DOWNLOAD_URL = "https://raw.githubusercontent.com/Fraser-Greenlee/my-huggingface-datasets/master/data/python-lines/train.jsonl"
58
+ _TEST_DOWNLOAD_URL = "https://raw.githubusercontent.com/Fraser-Greenlee/my-huggingface-datasets/master/data/python-lines/test.jsonl"
59
+ _VALIDATION_DOWNLOAD_URL = "https://raw.githubusercontent.com/Fraser-Greenlee/my-huggingface-datasets/master/data/python-lines/valid.jsonl"
60
+
61
+
62
+ class PythonLines(datasets.GeneratorBasedBuilder):
63
+ """Python lines dataset."""
64
+
65
+ def _info(self):
66
+ return datasets.DatasetInfo(
67
+ description=_DESCRIPTION,
68
+ features=datasets.Features(
69
+ {
70
+ 'text': datasets.Value("string"),
71
+ }
72
+ ),
73
+ homepage="https://github.com/Fraser-Greenlee/my-huggingface-datasets",
74
+ citation=_CITATION,
75
+ )
76
+
77
+ def _split_generators(self, dl_manager):
78
+ train_path = dl_manager.download_and_extract(_TRAIN_DOWNLOAD_URL)
79
+ test_path = dl_manager.download_and_extract(_TEST_DOWNLOAD_URL)
80
+ validation_path = dl_manager.download_and_extract(_VALIDATION_DOWNLOAD_URL)
81
+ return [
82
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": train_path}),
83
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": test_path}),
84
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": validation_path}),
85
+ ]
86
+
87
+ def _generate_examples(self, filepath):
88
+ """Generate examples."""
89
+ with open(filepath, encoding="utf-8") as json_lines_file:
90
+ data = []
91
+ for line in json_lines_file:
92
+ data.append(json.loads(line))
93
+
94
+ for id_, row in enumerate(data):
95
+ yield id_, row