system HF staff commited on
Commit
d67f9a1
0 Parent(s):

Update files from the datasets library (from 1.2.0)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.2.0

.gitattributes ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
5
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.model filter=lfs diff=lfs merge=lfs -text
12
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
13
+ *.onnx filter=lfs diff=lfs merge=lfs -text
14
+ *.ot filter=lfs diff=lfs merge=lfs -text
15
+ *.parquet filter=lfs diff=lfs merge=lfs -text
16
+ *.pb filter=lfs diff=lfs merge=lfs -text
17
+ *.pt filter=lfs diff=lfs merge=lfs -text
18
+ *.pth filter=lfs diff=lfs merge=lfs -text
19
+ *.rar filter=lfs diff=lfs merge=lfs -text
20
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
22
+ *.tflite filter=lfs diff=lfs merge=lfs -text
23
+ *.tgz filter=lfs diff=lfs merge=lfs -text
24
+ *.xz filter=lfs diff=lfs merge=lfs -text
25
+ *.zip filter=lfs diff=lfs merge=lfs -text
26
+ *.zstandard filter=lfs diff=lfs merge=lfs -text
27
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,141 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ annotations_creators:
3
+ - expert-generated
4
+ language_creators:
5
+ - expert-generated
6
+ languages:
7
+ - ur
8
+ licenses:
9
+ - unknown
10
+ multilinguality:
11
+ - monolingual
12
+ size_categories:
13
+ - n<1K
14
+ source_datasets:
15
+ - original
16
+ task_categories:
17
+ - text-classification
18
+ task_ids:
19
+ - fact-checking
20
+ - intent-classification
21
+ ---
22
+
23
+ # Dataset Card for Bend the Truth (Urdu Fake News)
24
+
25
+ ## Table of Contents
26
+
27
+ - [Dataset Description](#dataset-description)
28
+ - [Dataset Summary](#dataset-summary)
29
+ - [Supported Tasks](#supported-tasks-and-leaderboards)
30
+ - [Languages](#languages)
31
+ - [Dataset Structure](#dataset-structure)
32
+ - [Data Instances](#data-instances)
33
+ - [Data Fields](#data-instances)
34
+ - [Data Splits](#data-instances)
35
+ - [Dataset Creation](#dataset-creation)
36
+ - [Curation Rationale](#curation-rationale)
37
+ - [Source Data](#source-data)
38
+ - [Annotations](#annotations)
39
+ - [Personal and Sensitive Information](#personal-and-sensitive-information)
40
+ - [Considerations for Using the Data](#considerations-for-using-the-data)
41
+ - [Social Impact of Dataset](#social-impact-of-dataset)
42
+ - [Discussion of Biases](#discussion-of-biases)
43
+ - [Other Known Limitations](#other-known-limitations)
44
+ - [Additional Information](#additional-information)
45
+ - [Dataset Curators](#dataset-curators)
46
+ - [Licensing Information](#licensing-information)
47
+ - [Citation Information](#citation-information)
48
+
49
+ ## Dataset Description
50
+
51
+ - **Homepage:** [Github](https://github.com/MaazAmjad/Datasets-for-Urdu-news/)
52
+ - **Repository:** [Github](https://github.com/MaazAmjad/Datasets-for-Urdu-news/)
53
+ - **Paper:**
54
+ - **Leaderboard:**
55
+ - **Point of Contact:** [Maaz Amjad](https://github.com/MaazAmjad)
56
+
57
+ ### Dataset Summary
58
+
59
+ [More Information Needed]
60
+
61
+ ### Supported Tasks and Leaderboards
62
+
63
+ [More Information Needed]
64
+
65
+ ### Languages
66
+
67
+ [More Information Needed]
68
+
69
+ ## Dataset Structure
70
+
71
+ ### Data Instances
72
+
73
+ [More Information Needed]
74
+
75
+ ### Data Fields
76
+
77
+ - news: a string in urdu
78
+ - label: the label indicating whethere the provided news is real or fake.
79
+ - category: The intent of the news being presented. The available 5 classes are Sports, Health, Technology, Entertainment, and Business.
80
+
81
+ ### Data Splits
82
+
83
+ [More Information Needed]
84
+
85
+ ## Dataset Creation
86
+
87
+ ### Curation Rationale
88
+
89
+ [More Information Needed]
90
+
91
+ ### Source Data
92
+
93
+ #### Initial Data Collection and Normalization
94
+
95
+ [More Information Needed]
96
+
97
+ #### Who are the source language producers?
98
+
99
+ [More Information Needed]
100
+
101
+ ### Annotations
102
+
103
+ #### Annotation process
104
+
105
+ [More Information Needed]
106
+
107
+ #### Who are the annotators?
108
+
109
+ [More Information Needed]
110
+
111
+ ### Personal and Sensitive Information
112
+
113
+ [More Information Needed]
114
+
115
+ ## Considerations for Using the Data
116
+
117
+ ### Social Impact of Dataset
118
+
119
+ [More Information Needed]
120
+
121
+ ### Discussion of Biases
122
+
123
+ [More Information Needed]
124
+
125
+ ### Other Known Limitations
126
+
127
+ [More Information Needed]
128
+
129
+ ## Additional Information
130
+
131
+ ### Dataset Curators
132
+
133
+ [More Information Needed]
134
+
135
+ ### Licensing Information
136
+
137
+ [More Information Needed]
138
+
139
+ ### Citation Information
140
+
141
+ [More Information Needed]
dataset_infos.json ADDED
@@ -0,0 +1 @@
 
1
+ {"default": {"description": "\nUrdu fake news datasets that contain news of 5 different news domains.\nThese domains are Sports, Health, Technology, Entertainment, and Business.\nThe real news are collected by combining manual approaches.\n", "citation": "\n@article{MaazUrdufake2020,\nauthor = {Amjad, Maaz and Sidorov, Grigori and Zhila, Alisa and G\u2019{o}mez-Adorno, Helena and Voronkov, Ilia and Gelbukh, Alexander},\ntitle = {Bend the Truth: A Benchmark Dataset for Fake News Detection in Urdu and Its Evaluation},\njournal={Journal of Intelligent & Fuzzy Systems},\nvolume={39},\nnumber={2},\npages={2457-2469},\ndoi = {10.3233/JIFS-179905},\nyear={2020},\npublisher={IOS Press}\n}\n", "homepage": "https://github.com/MaazAmjad/Datasets-for-Urdu-news", "license": "", "features": {"news": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 2, "names": ["Fake", "Real"], "names_file": null, "id": null, "_type": "ClassLabel"}, "category": {"num_classes": 5, "names": ["bus", "hlth", "sp", "tch", "sbz"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": "urdu_fake_news", "config_name": "default", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1762905, "num_examples": 638, "dataset_name": "urdu_fake_news"}, "test": {"name": "test", "num_bytes": 799587, "num_examples": 262, "dataset_name": "urdu_fake_news"}}, "download_checksums": {"https://github.com/MaazAmjad/Datasets-for-Urdu-news/blob/master/Urdu%20Fake%20News%20Dataset.zip?raw=true": {"num_bytes": 1042653, "checksum": "1e1bc010455bcb25d26f65a8759412ef967f2e8106f0c6fd94ef12f1e9cda3e1"}}, "download_size": 1042653, "post_processing_size": null, "dataset_size": 2562492, "size_in_bytes": 3605145}}
dummy/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:31df3235a55aeefb35695db00ad9f101bce07cf8c25ab893904d4454e76fea95
3
+ size 10740
urdu_fake_news.py ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Urdu Fake News Dataset"""
2
+
3
+ from __future__ import absolute_import, division, print_function
4
+
5
+ import glob
6
+ import os
7
+
8
+ import datasets
9
+
10
+
11
+ _CITATION = """
12
+ @article{MaazUrdufake2020,
13
+ author = {Amjad, Maaz and Sidorov, Grigori and Zhila, Alisa and G’{o}mez-Adorno, Helena and Voronkov, Ilia and Gelbukh, Alexander},
14
+ title = {Bend the Truth: A Benchmark Dataset for Fake News Detection in Urdu and Its Evaluation},
15
+ journal={Journal of Intelligent & Fuzzy Systems},
16
+ volume={39},
17
+ number={2},
18
+ pages={2457-2469},
19
+ doi = {10.3233/JIFS-179905},
20
+ year={2020},
21
+ publisher={IOS Press}
22
+ }
23
+ """
24
+
25
+ _DESCRIPTION = """
26
+ Urdu fake news datasets that contain news of 5 different news domains.
27
+ These domains are Sports, Health, Technology, Entertainment, and Business.
28
+ The real news are collected by combining manual approaches.
29
+ """
30
+
31
+ _URL = "https://github.com/MaazAmjad/Datasets-for-Urdu-news/blob/master/"
32
+ _URL += "Urdu%20Fake%20News%20Dataset.zip?raw=true"
33
+
34
+
35
+ class UrduFakeNews(datasets.GeneratorBasedBuilder):
36
+ VERSION = datasets.Version("1.0.0")
37
+
38
+ category_list = [
39
+ "bus",
40
+ "hlth",
41
+ "sp",
42
+ "tch",
43
+ "sbz",
44
+ ]
45
+
46
+ def _info(self):
47
+ labels_list = ["Fake", "Real"]
48
+
49
+ return datasets.DatasetInfo(
50
+ description=_DESCRIPTION,
51
+ features=datasets.Features(
52
+ {
53
+ "news": datasets.Value("string"),
54
+ "label": datasets.ClassLabel(names=labels_list),
55
+ "category": datasets.ClassLabel(names=self.category_list),
56
+ }
57
+ ),
58
+ homepage="https://github.com/MaazAmjad/Datasets-for-Urdu-news",
59
+ citation=_CITATION,
60
+ )
61
+
62
+ def _split_generators(self, dl_manager):
63
+ """Returns SplitGenerators."""
64
+ dl_path = dl_manager.download_and_extract(_URL)
65
+ input_path = os.path.join(dl_path, "1.Corpus")
66
+ return [
67
+ datasets.SplitGenerator(
68
+ name=datasets.Split.TRAIN,
69
+ gen_kwargs={"pattern": os.path.join(input_path, "Train", "*", "*.txt")},
70
+ ),
71
+ datasets.SplitGenerator(
72
+ name=datasets.Split.TEST,
73
+ gen_kwargs={"pattern": os.path.join(input_path, "Test", "*", "*.txt")},
74
+ ),
75
+ ]
76
+
77
+ def _generate_examples(self, pattern=None):
78
+ """Yields examples."""
79
+ for filename in sorted(glob.glob(pattern)):
80
+
81
+ with open(filename, encoding="utf-8") as f:
82
+ news = ""
83
+ for line in f:
84
+ if line == "\n":
85
+ continue
86
+ news += line
87
+
88
+ name = os.path.basename(filename)
89
+ key = name.rstrip(".txt")
90
+
91
+ _class = 1 if ("Real" in filename) else 0
92
+ category = "".join([i for i in key if not i.isdigit()])
93
+ if category == "":
94
+ continue
95
+ category = self.category_list.index(category)
96
+
97
+ yield key, {"news": news, "label": _class, "category": category}