Datasets:

Modalities:
Text
Formats:
parquet
Languages:
English
ArXiv:
Libraries:
Datasets
pandas
License:
albertvillanova HF staff commited on
Commit
e887d1b
·
verified ·
1 Parent(s): d6dcac7

Delete loading script

Browse files
Files changed (1) hide show
  1. tweet_qa.py +0 -110
tweet_qa.py DELETED
@@ -1,110 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """TWEETQA: A Social Media Focused Question Answering Dataset"""
16
-
17
-
18
- import json
19
- import os
20
-
21
- import datasets
22
-
23
-
24
- _CITATION = """\
25
- @inproceedings{xiong2019tweetqa,
26
- title={TweetQA: A Social Media Focused Question Answering Dataset},
27
- author={Xiong, Wenhan and Wu, Jiawei and Wang, Hong and Kulkarni, Vivek and Yu, Mo and Guo, Xiaoxiao and Chang, Shiyu and Wang, William Yang},
28
- booktitle={Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics},
29
- year={2019}
30
- }
31
- """
32
-
33
- _DESCRIPTION = """\
34
- TweetQA is the first dataset for QA on social media data by leveraging news media and crowdsourcing.
35
- """
36
-
37
- _HOMEPAGE = "https://tweetqa.github.io/"
38
-
39
- _LICENSE = "CC BY-SA 4.0"
40
-
41
- _URL = "https://sites.cs.ucsb.edu/~xwhan/datasets/tweetqa.zip"
42
-
43
-
44
- class TweetQA(datasets.GeneratorBasedBuilder):
45
- """TweetQA: first large-scale dataset for QA over social media data"""
46
-
47
- VERSION = datasets.Version("1.0.0")
48
-
49
- def _info(self):
50
- features = datasets.Features(
51
- {
52
- "Question": datasets.Value("string"),
53
- "Answer": datasets.Sequence(datasets.Value("string")),
54
- "Tweet": datasets.Value("string"),
55
- "qid": datasets.Value("string"),
56
- }
57
- )
58
- return datasets.DatasetInfo(
59
- description=_DESCRIPTION,
60
- features=features,
61
- supervised_keys=None,
62
- homepage=_HOMEPAGE,
63
- license=_LICENSE,
64
- citation=_CITATION,
65
- )
66
-
67
- def _split_generators(self, dl_manager):
68
- """Returns SplitGenerators."""
69
- data_dir = dl_manager.download_and_extract(_URL)
70
- train_path = os.path.join(data_dir, "TweetQA_data", "train.json")
71
- test_path = os.path.join(data_dir, "TweetQA_data", "test.json")
72
- dev_path = os.path.join(data_dir, "TweetQA_data", "dev.json")
73
- return [
74
- datasets.SplitGenerator(
75
- name=datasets.Split.TRAIN,
76
- gen_kwargs={
77
- "filepath": train_path,
78
- "split": "train",
79
- },
80
- ),
81
- datasets.SplitGenerator(
82
- name=datasets.Split.TEST,
83
- gen_kwargs={
84
- "filepath": test_path,
85
- "split": "test",
86
- },
87
- ),
88
- datasets.SplitGenerator(
89
- name=datasets.Split.VALIDATION,
90
- gen_kwargs={
91
- "filepath": dev_path,
92
- "split": "dev",
93
- },
94
- ),
95
- ]
96
-
97
- def _generate_examples(self, filepath, split):
98
- """Yields examples."""
99
-
100
- with open(filepath, encoding="utf-8") as f:
101
- tweet_qa = json.load(f)
102
- idx = 0
103
- for data in tweet_qa:
104
- yield idx, {
105
- "Question": data["Question"],
106
- "Answer": [] if split == "test" else data["Answer"],
107
- "Tweet": data["Tweet"],
108
- "qid": data["qid"],
109
- }
110
- idx += 1