Datasets:

Languages:
English
Multilinguality:
monolingual
Size Categories:
10K<n<100K
Language Creators:
crowdsourced
Annotations Creators:
crowdsourced
Source Datasets:
original
ArXiv:
Tags:
License:
albertvillanova HF staff commited on
Commit
4271bbd
1 Parent(s): d5c484b

Convert dataset to Parquet (#4)

Browse files

- Convert dataset to Parquet (d6dcac7579df38417858338365e2cf00aafbadd1)
- Delete loading script (e887d1bfd44f95154ef0750b8ed785db962b1449)

README.md CHANGED
@@ -31,16 +31,25 @@ dataset_info:
31
  dtype: string
32
  splits:
33
  - name: train
34
- num_bytes: 2770036
35
  num_examples: 10692
36
- - name: test
37
- num_bytes: 473730
38
- num_examples: 1979
39
  - name: validation
40
- num_bytes: 295435
41
  num_examples: 1086
42
- download_size: 1573980
43
- dataset_size: 3539201
 
 
 
 
 
 
 
 
 
 
 
 
44
  ---
45
 
46
  # Dataset Card for TweetQA
31
  dtype: string
32
  splits:
33
  - name: train
34
+ num_bytes: 2769996
35
  num_examples: 10692
 
 
 
36
  - name: validation
37
+ num_bytes: 295415
38
  num_examples: 1086
39
+ - name: test
40
+ num_bytes: 473710
41
+ num_examples: 1979
42
+ download_size: 2434334
43
+ dataset_size: 3539121
44
+ configs:
45
+ - config_name: default
46
+ data_files:
47
+ - split: train
48
+ path: data/train-*
49
+ - split: validation
50
+ path: data/validation-*
51
+ - split: test
52
+ path: data/test-*
53
  ---
54
 
55
  # Dataset Card for TweetQA
data/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:106b6ffa73c1b4dd55869599e16f86a343fd2282bcc216fe536e7318333df904
3
+ size 244219
data/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4c2c9875c8f4c21fc19de197c145d4b6fd23ae03f2c73cd9a5715b727102a514
3
+ size 2028441
data/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aff22da6d2aa8fac08a5de041a4813743c1b99cdfdce1e3f6ba7b0fb4864a1b1
3
+ size 161674
tweet_qa.py DELETED
@@ -1,110 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """TWEETQA: A Social Media Focused Question Answering Dataset"""
16
-
17
-
18
- import json
19
- import os
20
-
21
- import datasets
22
-
23
-
24
- _CITATION = """\
25
- @inproceedings{xiong2019tweetqa,
26
- title={TweetQA: A Social Media Focused Question Answering Dataset},
27
- author={Xiong, Wenhan and Wu, Jiawei and Wang, Hong and Kulkarni, Vivek and Yu, Mo and Guo, Xiaoxiao and Chang, Shiyu and Wang, William Yang},
28
- booktitle={Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics},
29
- year={2019}
30
- }
31
- """
32
-
33
- _DESCRIPTION = """\
34
- TweetQA is the first dataset for QA on social media data by leveraging news media and crowdsourcing.
35
- """
36
-
37
- _HOMEPAGE = "https://tweetqa.github.io/"
38
-
39
- _LICENSE = "CC BY-SA 4.0"
40
-
41
- _URL = "https://sites.cs.ucsb.edu/~xwhan/datasets/tweetqa.zip"
42
-
43
-
44
- class TweetQA(datasets.GeneratorBasedBuilder):
45
- """TweetQA: first large-scale dataset for QA over social media data"""
46
-
47
- VERSION = datasets.Version("1.0.0")
48
-
49
- def _info(self):
50
- features = datasets.Features(
51
- {
52
- "Question": datasets.Value("string"),
53
- "Answer": datasets.Sequence(datasets.Value("string")),
54
- "Tweet": datasets.Value("string"),
55
- "qid": datasets.Value("string"),
56
- }
57
- )
58
- return datasets.DatasetInfo(
59
- description=_DESCRIPTION,
60
- features=features,
61
- supervised_keys=None,
62
- homepage=_HOMEPAGE,
63
- license=_LICENSE,
64
- citation=_CITATION,
65
- )
66
-
67
- def _split_generators(self, dl_manager):
68
- """Returns SplitGenerators."""
69
- data_dir = dl_manager.download_and_extract(_URL)
70
- train_path = os.path.join(data_dir, "TweetQA_data", "train.json")
71
- test_path = os.path.join(data_dir, "TweetQA_data", "test.json")
72
- dev_path = os.path.join(data_dir, "TweetQA_data", "dev.json")
73
- return [
74
- datasets.SplitGenerator(
75
- name=datasets.Split.TRAIN,
76
- gen_kwargs={
77
- "filepath": train_path,
78
- "split": "train",
79
- },
80
- ),
81
- datasets.SplitGenerator(
82
- name=datasets.Split.TEST,
83
- gen_kwargs={
84
- "filepath": test_path,
85
- "split": "test",
86
- },
87
- ),
88
- datasets.SplitGenerator(
89
- name=datasets.Split.VALIDATION,
90
- gen_kwargs={
91
- "filepath": dev_path,
92
- "split": "dev",
93
- },
94
- ),
95
- ]
96
-
97
- def _generate_examples(self, filepath, split):
98
- """Yields examples."""
99
-
100
- with open(filepath, encoding="utf-8") as f:
101
- tweet_qa = json.load(f)
102
- idx = 0
103
- for data in tweet_qa:
104
- yield idx, {
105
- "Question": data["Question"],
106
- "Answer": [] if split == "test" else data["Answer"],
107
- "Tweet": data["Tweet"],
108
- "qid": data["qid"],
109
- }
110
- idx += 1