parquet-converter commited on
Commit
d64012d
1 Parent(s): e90946d

Update parquet files

Browse files
.gitattributes DELETED
@@ -1,51 +0,0 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ftz filter=lfs diff=lfs merge=lfs -text
6
- *.gz filter=lfs diff=lfs merge=lfs -text
7
- *.h5 filter=lfs diff=lfs merge=lfs -text
8
- *.joblib filter=lfs diff=lfs merge=lfs -text
9
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
10
- *.lz4 filter=lfs diff=lfs merge=lfs -text
11
- *.model filter=lfs diff=lfs merge=lfs -text
12
- *.msgpack filter=lfs diff=lfs merge=lfs -text
13
- *.npy filter=lfs diff=lfs merge=lfs -text
14
- *.npz filter=lfs diff=lfs merge=lfs -text
15
- *.onnx filter=lfs diff=lfs merge=lfs -text
16
- *.ot filter=lfs diff=lfs merge=lfs -text
17
- *.parquet filter=lfs diff=lfs merge=lfs -text
18
- *.pb filter=lfs diff=lfs merge=lfs -text
19
- *.pickle filter=lfs diff=lfs merge=lfs -text
20
- *.pkl filter=lfs diff=lfs merge=lfs -text
21
- *.pt filter=lfs diff=lfs merge=lfs -text
22
- *.pth filter=lfs diff=lfs merge=lfs -text
23
- *.rar filter=lfs diff=lfs merge=lfs -text
24
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
25
- *.tar.* filter=lfs diff=lfs merge=lfs -text
26
- *.tflite filter=lfs diff=lfs merge=lfs -text
27
- *.tgz filter=lfs diff=lfs merge=lfs -text
28
- *.wasm filter=lfs diff=lfs merge=lfs -text
29
- *.xz filter=lfs diff=lfs merge=lfs -text
30
- *.zip filter=lfs diff=lfs merge=lfs -text
31
- *.zst filter=lfs diff=lfs merge=lfs -text
32
- *tfevents* filter=lfs diff=lfs merge=lfs -text
33
- # Audio files - uncompressed
34
- *.pcm filter=lfs diff=lfs merge=lfs -text
35
- *.sam filter=lfs diff=lfs merge=lfs -text
36
- *.raw filter=lfs diff=lfs merge=lfs -text
37
- # Audio files - compressed
38
- *.aac filter=lfs diff=lfs merge=lfs -text
39
- *.flac filter=lfs diff=lfs merge=lfs -text
40
- *.mp3 filter=lfs diff=lfs merge=lfs -text
41
- *.ogg filter=lfs diff=lfs merge=lfs -text
42
- *.wav filter=lfs diff=lfs merge=lfs -text
43
- # Image files - uncompressed
44
- *.bmp filter=lfs diff=lfs merge=lfs -text
45
- *.gif filter=lfs diff=lfs merge=lfs -text
46
- *.png filter=lfs diff=lfs merge=lfs -text
47
- *.tiff filter=lfs diff=lfs merge=lfs -text
48
- # Image files - compressed
49
- *.jpg filter=lfs diff=lfs merge=lfs -text
50
- *.jpeg filter=lfs diff=lfs merge=lfs -text
51
- *.webp filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
default/reddit-topics-targz-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e029c3426df6aff1c4921a4da036fa23ef5003134719fd460e529caabe6604a
3
+ size 1822976
reddit-topics-targz.py DELETED
@@ -1,91 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- # Lint as: python3
17
- """SQUAD: The Stanford Question Answering Dataset."""
18
-
19
-
20
- import json
21
-
22
- import datasets
23
- from datasets.tasks import QuestionAnsweringExtractive
24
-
25
-
26
- logger = datasets.logging.get_logger(__name__)
27
-
28
-
29
- _CITATION = """\
30
- @article{2016arXiv160605250R,
31
- author = {{Rajpurkar}, Pranav and {Zhang}, Jian and {Lopyrev},
32
- Konstantin and {Liang}, Percy},
33
- title = "{SQuAD: 100,000+ Questions for Machine Comprehension of Text}",
34
- journal = {arXiv e-prints},
35
- year = 2016,
36
- eid = {arXiv:1606.05250},
37
- pages = {arXiv:1606.05250},
38
- archivePrefix = {arXiv},
39
- eprint = {1606.05250},
40
- }
41
- """
42
-
43
- _DESCRIPTION = """\
44
- Demo...
45
- """
46
-
47
- _URL = "https://github.com/jamescalam/hf-datasets/raw/main/01_builder_script/dataset.tar.gz"
48
-
49
-
50
- class RedditTopicsTargz(datasets.GeneratorBasedBuilder):
51
- """SQUAD: The Stanford Question Answering Dataset. Version 1.1."""
52
- def _info(self):
53
- return datasets.DatasetInfo(
54
- description=_DESCRIPTION,
55
- features=datasets.Features(
56
- {
57
- "sub": datasets.Value("string"),
58
- "title": datasets.Value("string"),
59
- "selftext": datasets.Value("string"),
60
- "upvote_ratio": datasets.Value("float32"),
61
- "id": datasets.Value("string"),
62
- "created_utc": datasets.Value("float32"),
63
- }
64
- ),
65
- # No default supervised_keys (as we have to pass both question
66
- # and context as input).
67
- supervised_keys=None,
68
- homepage="https://github.com/jamescalam/hf-datasets/",
69
- citation=_CITATION,
70
- )
71
-
72
- def _split_generators(self, dl_manager):
73
- path = dl_manager.download_and_extract(_URL)
74
-
75
- return [
76
- datasets.SplitGenerator(
77
- name=datasets.Split.TRAIN,
78
- gen_kwargs={"filepath": path+'/dataset.jsonl'}
79
- ),
80
- ]
81
-
82
- def _generate_examples(self, filepath):
83
- """This function returns the examples in the raw (text) form."""
84
- idx = 0
85
- # open the file and read the lines
86
- with open(filepath, encoding="utf-8") as fp:
87
- for line in fp:
88
- # load json line
89
- obj = json.loads(line)
90
- yield idx, obj
91
- idx += 1