Dayyan commited on
Commit
87c821c
1 Parent(s): fba2286

Add dataset

Browse files
Files changed (6) hide show
  1. .gitattributes +1 -0
  2. .gitignore +2 -0
  3. README.md +3 -0
  4. bwns.py +98 -0
  5. dataset_infos.json +1 -0
  6. train.csv +3 -0
.gitattributes CHANGED
@@ -35,3 +35,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
35
  *.mp3 filter=lfs diff=lfs merge=lfs -text
36
  *.ogg filter=lfs diff=lfs merge=lfs -text
37
  *.wav filter=lfs diff=lfs merge=lfs -text
 
35
  *.mp3 filter=lfs diff=lfs merge=lfs -text
36
  *.ogg filter=lfs diff=lfs merge=lfs -text
37
  *.wav filter=lfs diff=lfs merge=lfs -text
38
+ train.csv filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,2 @@
 
 
1
+ /.env
2
+ .DS_Store
README.md ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ # BWNS: The Baha'i World News Service dataset.
2
+
3
+ BWNS articles from 2000 to 2022.
bwns.py ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import csv
2
+ import datasets
3
+
4
+ _DESCRIPTION = "BWNS: The Bahai World News Service dataset."
5
+ _HOMEPAGE = "https://news.bahai.org/"
6
+ _URLS = {
7
+ "train": "./train.csv",
8
+ }
9
+
10
+
11
+ class BwnsDataset(datasets.GeneratorBasedBuilder):
12
+ """BWNS: The Bahai World News Service dataset."""
13
+
14
+ BUILDER_CONFIG_CLASS=datasets.BuilderConfig
15
+
16
+ VERSION = datasets.Version("1.1.0")
17
+ NAME = "BWNS"
18
+
19
+ BUILDER_CONFIGS = [
20
+ datasets.BuilderConfig(name="original", version=VERSION, description="Original"),
21
+ datasets.BuilderConfig(name="concat", version=VERSION, description="Title and content concatenated"),
22
+ ]
23
+
24
+ DEFAULT_CONFIG_NAME = "original"
25
+
26
+ def _info(self):
27
+ # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
28
+ if self.config.name == "original":
29
+ features = datasets.Features(
30
+ {
31
+ "id": datasets.Value("int32"),
32
+ "date": datasets.Value("timestamp[s]"),
33
+ "city": datasets.Value("string"),
34
+ "region": datasets.Value("string"),
35
+ "country": datasets.Value("string"),
36
+ "related_articles": datasets.Sequence(datasets.Value("int32")),
37
+ "title": datasets.Value("string"),
38
+ "content": datasets.Value("string"),
39
+ }
40
+ )
41
+ elif self.config.name == "concat":
42
+ features = datasets.Features(
43
+ {
44
+ "id": datasets.Value("int32"),
45
+ "date": datasets.Value("timestamp[s]"),
46
+ "city": datasets.Value("string"),
47
+ "region": datasets.Value("string"),
48
+ "country": datasets.Value("string"),
49
+ "related_articles": datasets.Sequence(datasets.Value("int32")),
50
+ "title_and_content": datasets.Value("string"),
51
+ }
52
+ )
53
+ return datasets.DatasetInfo(
54
+ description=_DESCRIPTION,
55
+ features=features,
56
+ supervised_keys=None,
57
+ homepage=_HOMEPAGE,
58
+ )
59
+
60
+ def _split_generators(self, dl_manager):
61
+ data_dirs = dl_manager.download_and_extract(_URLS)
62
+ return [
63
+ datasets.SplitGenerator(
64
+ name=datasets.Split.TRAIN,
65
+ # These kwargs will be passed to _generate_examples
66
+ gen_kwargs={
67
+ "filepath": data_dirs['train'],
68
+ "split": "train",
69
+ },
70
+ ),
71
+ ]
72
+
73
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
74
+ def _generate_examples(self, filepath, split):
75
+ with open(filepath, encoding="utf-8") as f:
76
+ reader = csv.DictReader(f)
77
+ for row in reader:
78
+ if self.config.name == 'original':
79
+ yield row['id'], {
80
+ "id": int(row['id']),
81
+ "date": row['date'],
82
+ "city": row['city'],
83
+ "region": row['region'],
84
+ "country": row['country'],
85
+ "related_articles": [int(x) for x in row['related_articles'][1:-1].split(',') if x],
86
+ "title": row['title'],
87
+ "content": row['content'],
88
+ }
89
+ elif self.config.name == 'concat':
90
+ yield row['id'], {
91
+ "id": int(row['id']),
92
+ "date": row['date'],
93
+ "city": row['city'],
94
+ "region": row['region'],
95
+ "country": row['country'],
96
+ "related_articles": [int(x) for x in row['related_articles'][1:-1].split(',') if x],
97
+ "title_and_content": f"{row['title']} {row['content']}",
98
+ }
dataset_infos.json ADDED
@@ -0,0 +1 @@
 
1
+ {"original": {"description": "BWNS: The Bahai World News Service dataset.", "citation": "", "homepage": "https://news.bahai.org/", "license": "", "features": {"id": {"dtype": "int32", "id": null, "_type": "Value"}, "date": {"dtype": "timestamp[s]", "id": null, "_type": "Value"}, "city": {"dtype": "string", "id": null, "_type": "Value"}, "region": {"dtype": "string", "id": null, "_type": "Value"}, "country": {"dtype": "string", "id": null, "_type": "Value"}, "related_articles": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "content": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "bwns_dataset", "config_name": "original", "version": {"version_str": "1.1.0", "description": null, "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 5642343, "num_examples": 1387, "dataset_name": "bwns_dataset"}}, "download_checksums": {"./train.csv": {"num_bytes": 5648505, "checksum": "59dc4afe0f07ec8a3815de47451ae47b38ac10ea5037166dcb8a125ca1df4f28"}}, "download_size": 5648505, "post_processing_size": null, "dataset_size": 5642343, "size_in_bytes": 11290848}, "concat": {"description": "BWNS: The Bahai World News Service dataset.", "citation": "", "homepage": "https://news.bahai.org/", "license": "", "features": {"id": {"dtype": "int32", "id": null, "_type": "Value"}, "date": {"dtype": "timestamp[s]", "id": null, "_type": "Value"}, "city": {"dtype": "string", "id": null, "_type": "Value"}, "region": {"dtype": "string", "id": null, "_type": "Value"}, "country": {"dtype": "string", "id": null, "_type": "Value"}, "related_articles": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "title_and_content": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "bwns_dataset", "config_name": "concat", "version": {"version_str": "1.1.0", "description": null, "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 5638182, "num_examples": 1387, "dataset_name": "bwns_dataset"}}, "download_checksums": {"./train.csv": {"num_bytes": 5648505, "checksum": "59dc4afe0f07ec8a3815de47451ae47b38ac10ea5037166dcb8a125ca1df4f28"}}, "download_size": 5648505, "post_processing_size": null, "dataset_size": 5638182, "size_in_bytes": 11286687}}
train.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:59dc4afe0f07ec8a3815de47451ae47b38ac10ea5037166dcb8a125ca1df4f28
3
+ size 5648505