Datasets:

Modalities:
Text
Formats:
json
Languages:
English
ArXiv:
Libraries:
Datasets
pandas
License:
yuzhen17 commited on
Commit
8076f37
1 Parent(s): b390035

init commit

Browse files
Files changed (2) hide show
  1. cpt.py +85 -0
  2. data.zip +3 -0
cpt.py ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import json
15
+ import os
16
+
17
+ import datasets
18
+ import pandas as pd
19
+ from datasets import DatasetInfo, DownloadManager
20
+
21
+ #
22
+ _CITATION = """\
23
+ """
24
+
25
+ _DESCRIPTION = """\
26
+ """
27
+
28
+
29
+ _LICENSE = ""
30
+
31
+ _URL = r"https://huggingface.co/datasets/hkust-nlp/cpt/resolve/main/data.zip"
32
+
33
+
34
+ Task_list = [
35
+ "python",
36
+ "cc",
37
+ "arxiv_math",
38
+ ]
39
+
40
+
41
+ class CptConfig(datasets.BuilderConfig):
42
+ def __init__(self, **kwargs):
43
+ super().__init__(version=datasets.Version("1.0.0"), **kwargs)
44
+
45
+
46
+ class Cpt(datasets.GeneratorBasedBuilder):
47
+ BUILDER_CONFIGS = [
48
+ CptConfig(
49
+ name=task_name,
50
+ )
51
+ for task_name in Task_list
52
+ ]
53
+
54
+ def _info(self):
55
+ features = datasets.Features(
56
+ {
57
+ "content": datasets.Value("string"),
58
+ "subset": datasets.Value("string"),
59
+ "meta": datasets.features.Value("string"),
60
+ }
61
+ )
62
+ return datasets.DatasetInfo(
63
+ description=_DESCRIPTION,
64
+ features=features,
65
+ license=_LICENSE,
66
+ )
67
+
68
+ def _split_generators(self, dl_manager: DownloadManager):
69
+ data_dir = dl_manager.download_and_extract(_URL)
70
+ task_name = self.config.name
71
+ return [
72
+ datasets.SplitGenerator(name=datasets.Split.TEST,
73
+ gen_kwargs={"filepath": os.path.join(data_dir, f"{task_name}.jsonl")})
74
+ ]
75
+
76
+ def _generate_examples(self, filepath):
77
+ """Yields examples."""
78
+ with open(filepath,encoding="utf-8") as f:
79
+ for id_, row in enumerate(f):
80
+ cur_data = json.loads(row)
81
+ yield id_, {
82
+ "content": cur_data["content"],
83
+ "subset": cur_data["subset"],
84
+ "meta": str(cur_data["meta"]),
85
+ }
data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:24adcf8f9823b91edadb75f233b2e269fb026463afc44ecd5087bef025c0cb33
3
+ size 96958877