Datasets:

Modalities:
Text
Formats:
parquet
Languages:
English
ArXiv:
Libraries:
Datasets
pandas
License:
albertvillanova HF staff commited on
Commit
d5c484b
1 Parent(s): 0e7fbdf

Delete legacy JSON metadata (#2)

Browse files

- Delete legacy JSON metadata (49c14f001e00cc6aaf9b2648035770076449446c)

Files changed (1) hide show
  1. dataset_infos.json +0 -1
dataset_infos.json DELETED
@@ -1 +0,0 @@
1
- {"default": {"description": "TweetQA is the first dataset for QA on social media data by leveraging news media and crowdsourcing.\n", "citation": "@inproceedings{xiong2019tweetqa,\n title={TweetQA: A Social Media Focused Question Answering Dataset},\n author={Xiong, Wenhan and Wu, Jiawei and Wang, Hong and Kulkarni, Vivek and Yu, Mo and Guo, Xiaoxiao and Chang, Shiyu and Wang, William Yang},\n booktitle={Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics},\n year={2019}\n}\n", "homepage": "https://tweetqa.github.io/", "license": "CC BY-SA 4.0", "features": {"Question": {"dtype": "string", "id": null, "_type": "Value"}, "Answer": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "Tweet": {"dtype": "string", "id": null, "_type": "Value"}, "qid": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "tweet_qa", "config_name": "default", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 2770036, "num_examples": 10692, "dataset_name": "tweet_qa"}, "test": {"name": "test", "num_bytes": 473730, "num_examples": 1979, "dataset_name": "tweet_qa"}, "validation": {"name": "validation", "num_bytes": 295435, "num_examples": 1086, "dataset_name": "tweet_qa"}}, "download_checksums": {"https://sites.cs.ucsb.edu/~xwhan/datasets/tweetqa.zip": {"num_bytes": 1573980, "checksum": "e0db1b71836598aaea8785f1911369b5bca0d839504b97836eb5cb7427c7e4d9"}}, "download_size": 1573980, "post_processing_size": null, "dataset_size": 3539201, "size_in_bytes": 5113181}}