Convert dataset to Parquet

#3
by albertvillanova HF Staff - opened
README.md CHANGED
@@ -20,6 +20,7 @@ pretty_name: JournalistsQuestions
20
  tags:
21
  - question-identification
22
  dataset_info:
 
23
  features:
24
  - name: tweet_id
25
  dtype: string
@@ -31,13 +32,18 @@ dataset_info:
31
  '1': 'yes'
32
  - name: label_confidence
33
  dtype: float32
34
- config_name: plain_text
35
  splits:
36
  - name: train
37
- num_bytes: 342296
38
  num_examples: 10077
39
- download_size: 271039
40
- dataset_size: 342296
 
 
 
 
 
 
41
  ---
42
 
43
  # Dataset Card for journalists_questions
 
20
  tags:
21
  - question-identification
22
  dataset_info:
23
+ config_name: plain_text
24
  features:
25
  - name: tweet_id
26
  dtype: string
 
32
  '1': 'yes'
33
  - name: label_confidence
34
  dtype: float32
 
35
  splits:
36
  - name: train
37
+ num_bytes: 342288
38
  num_examples: 10077
39
+ download_size: 195694
40
+ dataset_size: 342288
41
+ configs:
42
+ - config_name: plain_text
43
+ data_files:
44
+ - split: train
45
+ path: plain_text/train-*
46
+ default: true
47
  ---
48
 
49
  # Dataset Card for journalists_questions
journalists_questions.py DELETED
@@ -1,76 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
-
17
- import csv
18
-
19
- import datasets
20
-
21
-
22
- _CITATION = """\
23
- @inproceedings{hasanain2016questions,
24
- title={What Questions Do Journalists Ask on Twitter?},
25
- author={Hasanain, Maram and Bagdouri, Mossaab and Elsayed, Tamer and Oard, Douglas W},
26
- booktitle={Tenth International AAAI Conference on Web and Social Media},
27
- year={2016}
28
- }
29
- """
30
-
31
- _DESCRIPTION = """\
32
- The journalists_questions corpus (version 1.0) is a collection of 10K human-written Arabic
33
- tweets manually labeled for question identification over Arabic tweets posted by journalists.
34
- """
35
- _DATA_URL = "https://drive.google.com/uc?export=download&id=1CBrh-9OrSpKmPQBxTK_ji6mq6WTN_U9U"
36
-
37
-
38
- class JournalistsQuestions(datasets.GeneratorBasedBuilder):
39
- BUILDER_CONFIGS = [
40
- datasets.BuilderConfig(
41
- name="plain_text",
42
- version=datasets.Version("1.0.0", ""),
43
- description="Journalists tweet IDs and annotation by whether the tweet has a question",
44
- )
45
- ]
46
-
47
- def _info(self):
48
- return datasets.DatasetInfo(
49
- description=_DESCRIPTION,
50
- features=datasets.Features(
51
- {
52
- "tweet_id": datasets.Value("string"),
53
- "label": datasets.features.ClassLabel(names=["no", "yes"]),
54
- "label_confidence": datasets.Value("float"),
55
- }
56
- ),
57
- homepage="http://qufaculty.qu.edu.qa/telsayed/datasets/",
58
- citation=_CITATION,
59
- )
60
-
61
- def _split_generators(self, dl_manager):
62
- dl_dir = dl_manager.download_and_extract(_DATA_URL)
63
- return [
64
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": dl_dir}),
65
- ]
66
-
67
- def _generate_examples(self, filepath):
68
- """This function returns the examples in the raw (text) form."""
69
- with open(filepath, encoding="utf-8") as f:
70
- reader = csv.DictReader(f, delimiter="\t", fieldnames=["tweet_id", "label", "label_confidence"])
71
- for idx, row in enumerate(reader):
72
- yield idx, {
73
- "tweet_id": row["tweet_id"],
74
- "label": row["label"],
75
- "label_confidence": float(row["label_confidence"]),
76
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
plain_text/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7b54a9ab8032c05555c284618b747e01692835b867aa0c717e60d2834847edaf
3
+ size 195694