albertvillanova HF staff commited on
Commit
116216d
1 Parent(s): e8e75d2

Convert dataset to Parquet (#3)

Browse files

- Convert dataset to Parquet (1978c528602a86a83e6bca25d7ef20709329b621)
- Delete loading script (919216c60d98865e14eeaa10c3dc6886b694a4c5)

README.md CHANGED
@@ -42,13 +42,20 @@ dataset_info:
42
  dtype: string
43
  splits:
44
  - name: train
45
- num_bytes: 24480402
46
  num_examples: 2815
47
  - name: validation
48
- num_bytes: 6752242
49
  num_examples: 761
50
- download_size: 8569575
51
- dataset_size: 31232644
 
 
 
 
 
 
 
52
  ---
53
 
54
  # Dataset Card for Clickbait/Fake News in Bulgarian
 
42
  dtype: string
43
  splits:
44
  - name: train
45
+ num_bytes: 24480386
46
  num_examples: 2815
47
  - name: validation
48
+ num_bytes: 6752226
49
  num_examples: 761
50
+ download_size: 11831065
51
+ dataset_size: 31232612
52
+ configs:
53
+ - config_name: default
54
+ data_files:
55
+ - split: train
56
+ path: data/train-*
57
+ - split: validation
58
+ path: data/validation-*
59
  ---
60
 
61
  # Dataset Card for Clickbait/Fake News in Bulgarian
clickbait_news_bg.py DELETED
@@ -1,119 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- """ Dataset with clickbait and fake news in Bulgarian. """
17
-
18
-
19
- import openpyxl # noqa: requires this pandas optional dependency for reading xlsx files
20
- import pandas as pd
21
-
22
- import datasets
23
-
24
-
25
- _CITATION = """\
26
- @InProceedings{clickbait_news_bg,
27
- title = {Dataset with clickbait and fake news in Bulgarian. Introduced for the Hack the Fake News 2017.},
28
- authors={Data Science Society},
29
- year={2017},
30
- url={https://gitlab.com/datasciencesociety/case_fake_news/}
31
- }
32
- """
33
-
34
- # TODO: Add description of the dataset here
35
- # You can copy an official description
36
- _DESCRIPTION = """\
37
- Dataset with clickbait and fake news in Bulgarian. Introduced for the Hack the Fake News 2017.
38
- """
39
-
40
- # TODO: Add a link to an official homepage for the dataset here
41
- _HOMEPAGE = "https://gitlab.com/datasciencesociety/case_fake_news/"
42
-
43
- # TODO: Add the licence for the dataset here if you can find it
44
- _LICENSE = ""
45
-
46
- # TODO: Add link to the official dataset URLs here
47
- # The HuggingFace dataset library don't host the datasets but only point to the original files
48
- # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
49
- _URLs = {
50
- "default_train": "https://gitlab.com/datasciencesociety/case_fake_news/-/raw/master/data/FN_Training_Set.xlsx",
51
- "default_validation": "https://gitlab.com/datasciencesociety/case_fake_news/-/raw/master/data/FN_Validation_Set.xlsx",
52
- }
53
-
54
-
55
- class ClickbaitNewsBG(datasets.GeneratorBasedBuilder):
56
- VERSION = datasets.Version("1.1.0")
57
- DEFAULT_CONFIG_NAME = "default"
58
-
59
- def _info(self):
60
- if self.config.name == "default":
61
- features = datasets.Features(
62
- {
63
- "fake_news_score": datasets.features.ClassLabel(names=["legitimate", "fake"]),
64
- "click_bait_score": datasets.features.ClassLabel(names=["normal", "clickbait"]),
65
- "content_title": datasets.Value("string"),
66
- "content_url": datasets.Value("string"),
67
- "content_published_time": datasets.Value("string"),
68
- "content": datasets.Value("string"),
69
- }
70
- )
71
- return datasets.DatasetInfo(
72
- description=_DESCRIPTION,
73
- features=features,
74
- supervised_keys=None,
75
- homepage=_HOMEPAGE,
76
- license=_LICENSE,
77
- citation=_CITATION,
78
- )
79
-
80
- def _split_generators(self, dl_manager):
81
- """Returns SplitGenerators."""
82
- data_dir = dl_manager.download(_URLs)
83
-
84
- return [
85
- datasets.SplitGenerator(
86
- name=spl_enum,
87
- gen_kwargs={
88
- "filepath": data_dir[f"{self.config.name}_{spl}"],
89
- "split": spl,
90
- },
91
- )
92
- for spl, spl_enum in [
93
- ("train", datasets.Split.TRAIN),
94
- ("validation", datasets.Split.VALIDATION),
95
- ]
96
- ]
97
-
98
- def _generate_examples(self, filepath, split):
99
- """Yields examples."""
100
- keys = [
101
- "fake_news_score",
102
- "click_bait_score",
103
- "content_title",
104
- "content_url",
105
- "content_published_time",
106
- "content",
107
- ]
108
- with open(filepath, "rb") as f:
109
- data = pd.read_excel(f, engine="openpyxl")
110
- for id_, row in enumerate(data.itertuples()):
111
- row_dict = dict()
112
- for key, value in zip(keys, row[1:]):
113
- if key == "fake_news_score":
114
- row_dict[key] = "legitimate" if value == 1 else "fake"
115
- elif key == "click_bait_score":
116
- row_dict[key] = "normal" if value == 1 else "clickbait"
117
- else:
118
- row_dict[key] = str(value)
119
- yield id_, row_dict
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
data/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3d6662718de9e679a3bce60784a126fc4ecb632b391325a6afa87bf677ba70dd
3
+ size 9021963
data/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:294a967642e26b5f570c3f89f0490ac77d1ef26406025a6f09a0a80d7d629f6a
3
+ size 2809102