parquet-converter commited on
Commit
268f6c1
1 Parent(s): a6f9aa7

Update parquet files

Browse files
.gitattributes DELETED
@@ -1,38 +0,0 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bin.* filter=lfs diff=lfs merge=lfs -text
5
- *.bz2 filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.model filter=lfs diff=lfs merge=lfs -text
12
- *.msgpack filter=lfs diff=lfs merge=lfs -text
13
- *.onnx filter=lfs diff=lfs merge=lfs -text
14
- *.ot filter=lfs diff=lfs merge=lfs -text
15
- *.parquet filter=lfs diff=lfs merge=lfs -text
16
- *.pb filter=lfs diff=lfs merge=lfs -text
17
- *.pt filter=lfs diff=lfs merge=lfs -text
18
- *.pth filter=lfs diff=lfs merge=lfs -text
19
- *.rar filter=lfs diff=lfs merge=lfs -text
20
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
- *.tar.* filter=lfs diff=lfs merge=lfs -text
22
- *.tflite filter=lfs diff=lfs merge=lfs -text
23
- *.tgz filter=lfs diff=lfs merge=lfs -text
24
- *.xz filter=lfs diff=lfs merge=lfs -text
25
- *.zip filter=lfs diff=lfs merge=lfs -text
26
- *.zstandard filter=lfs diff=lfs merge=lfs -text
27
- *tfevents* filter=lfs diff=lfs merge=lfs -text
28
- # Audio files - uncompressed
29
- *.pcm filter=lfs diff=lfs merge=lfs -text
30
- *.sam filter=lfs diff=lfs merge=lfs -text
31
- *.raw filter=lfs diff=lfs merge=lfs -text
32
- # Audio files - compressed
33
- *.aac filter=lfs diff=lfs merge=lfs -text
34
- *.flac filter=lfs diff=lfs merge=lfs -text
35
- *.mp3 filter=lfs diff=lfs merge=lfs -text
36
- *.ogg filter=lfs diff=lfs merge=lfs -text
37
- *.wav filter=lfs diff=lfs merge=lfs -text
38
- train.csv filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
.gitignore DELETED
@@ -1,2 +0,0 @@
1
- /.env
2
- .DS_Store
 
 
 
README.md DELETED
@@ -1,3 +0,0 @@
1
- # BWNS: The Baha'i World News Service dataset.
2
-
3
- BWNS articles from 2000 to 2022.
 
 
 
 
bwns.py DELETED
@@ -1,108 +0,0 @@
1
- import csv
2
- import datasets
3
-
4
- _DESCRIPTION = "BWNS: The Bahai World News Service dataset."
5
- _HOMEPAGE = "https://news.bahai.org/"
6
- _URLS = {
7
- "train": "./train.csv",
8
- }
9
-
10
-
11
- class BwnsDataset(datasets.GeneratorBasedBuilder):
12
- """BWNS: The Bahai World News Service dataset."""
13
-
14
- BUILDER_CONFIG_CLASS=datasets.BuilderConfig
15
-
16
- VERSION = datasets.Version("1.1.0")
17
- NAME = "BWNS"
18
-
19
- BUILDER_CONFIGS = [
20
- datasets.BuilderConfig(name="original", version=VERSION, description="Original"),
21
- datasets.BuilderConfig(name="concat", version=VERSION, description="Title and content concatenated"),
22
- ]
23
-
24
- DEFAULT_CONFIG_NAME = "original"
25
-
26
- def _info(self):
27
- # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
28
- if self.config.name == "original":
29
- features = datasets.Features(
30
- {
31
- "id": datasets.Value("int32"),
32
- "date": datasets.Value("timestamp[s]"),
33
- "city": datasets.Value("string"),
34
- "region": datasets.Value("string"),
35
- "country": datasets.Value("string"),
36
- "related_articles": datasets.Sequence(datasets.Value("int32")),
37
- "title": datasets.Value("string"),
38
- "content": datasets.Value("string"),
39
- "latitude": datasets.Value("string"),
40
- "longitude": datasets.Value("string"),
41
- }
42
- )
43
- elif self.config.name == "concat":
44
- features = datasets.Features(
45
- {
46
- "id": datasets.Value("int32"),
47
- "date": datasets.Value("timestamp[s]"),
48
- "city": datasets.Value("string"),
49
- "region": datasets.Value("string"),
50
- "country": datasets.Value("string"),
51
- "related_articles": datasets.Sequence(datasets.Value("int32")),
52
- "title": datasets.Value("string"),
53
- "title_and_content": datasets.Value("string"),
54
- "latitude": datasets.Value("string"),
55
- "longitude": datasets.Value("string"),
56
- }
57
- )
58
- return datasets.DatasetInfo(
59
- description=_DESCRIPTION,
60
- features=features,
61
- supervised_keys=None,
62
- homepage=_HOMEPAGE,
63
- )
64
-
65
- def _split_generators(self, dl_manager):
66
- data_dirs = dl_manager.download_and_extract(_URLS)
67
- return [
68
- datasets.SplitGenerator(
69
- name=datasets.Split.TRAIN,
70
- # These kwargs will be passed to _generate_examples
71
- gen_kwargs={
72
- "filepath": data_dirs['train'],
73
- "split": "train",
74
- },
75
- ),
76
- ]
77
-
78
- # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
79
- def _generate_examples(self, filepath, split):
80
- with open(filepath, encoding="utf-8") as f:
81
- reader = csv.DictReader(f)
82
- for row in reader:
83
- if self.config.name == 'original':
84
- yield row['id'], {
85
- "id": int(row['id']),
86
- "date": row['date'],
87
- "city": row['city'],
88
- "region": row['region'],
89
- "country": row['country'],
90
- "latitude": row['latitude'],
91
- "longitude": row['longitude'],
92
- "related_articles": [int(x) for x in row['related_articles'][1:-1].split(' ') if x],
93
- "title": row['title'],
94
- "content": row['content'],
95
- }
96
- elif self.config.name == 'concat':
97
- yield row['id'], {
98
- "id": int(row['id']),
99
- "date": row['date'],
100
- "city": row['city'],
101
- "region": row['region'],
102
- "country": row['country'],
103
- "latitude": row['latitude'],
104
- "longitude": row['longitude'],
105
- "related_articles": [int(x) for x in row['related_articles'][1:-1].split(' ') if x],
106
- "title": row['title'],
107
- "title_and_content": f"{row['title']} {row['content']}",
108
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
train.csv → concat/bwns-train.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:25f6c516704bfdefe0db723648498a26333191a04f378db36601f5e9ebd3d5ba
3
- size 5671511
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:34dadea9e3f2505ce38bfaa491c2f3280ceae68ebb81307d23380e4abf0ae83c
3
+ size 3192433
dataset_infos.json DELETED
@@ -1 +0,0 @@
1
- {"original": {"description": "BWNS: The Bahai World News Service dataset.", "citation": "", "homepage": "https://news.bahai.org/", "license": "", "features": {"id": {"dtype": "int32", "id": null, "_type": "Value"}, "date": {"dtype": "timestamp[s]", "id": null, "_type": "Value"}, "city": {"dtype": "string", "id": null, "_type": "Value"}, "region": {"dtype": "string", "id": null, "_type": "Value"}, "country": {"dtype": "string", "id": null, "_type": "Value"}, "related_articles": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "content": {"dtype": "string", "id": null, "_type": "Value"}, "latitude": {"dtype": "string", "id": null, "_type": "Value"}, "longitude": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "bwns_dataset", "config_name": "original", "version": {"version_str": "1.1.0", "description": null, "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 5676943, "num_examples": 1387, "dataset_name": "bwns_dataset"}}, "download_checksums": {"./train.csv": {"num_bytes": 5671511, "checksum": "25f6c516704bfdefe0db723648498a26333191a04f378db36601f5e9ebd3d5ba"}}, "download_size": 5671511, "post_processing_size": null, "dataset_size": 5676943, "size_in_bytes": 11348454}, "concat": {"description": "BWNS: The Bahai World News Service dataset.", "citation": "", "homepage": "https://news.bahai.org/", "license": "", "features": {"id": {"dtype": "int32", "id": null, "_type": "Value"}, "date": {"dtype": "timestamp[s]", "id": null, "_type": "Value"}, "city": {"dtype": "string", "id": null, "_type": "Value"}, "region": {"dtype": "string", "id": null, "_type": "Value"}, "country": {"dtype": "string", "id": null, "_type": "Value"}, "related_articles": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "title_and_content": {"dtype": "string", "id": null, "_type": "Value"}, "latitude": {"dtype": "string", "id": null, "_type": "Value"}, "longitude": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "bwns_dataset", "config_name": "concat", "version": {"version_str": "1.1.0", "description": null, "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 5758374, "num_examples": 1387, "dataset_name": "bwns_dataset"}}, "download_checksums": {"./train.csv": {"num_bytes": 5671511, "checksum": "25f6c516704bfdefe0db723648498a26333191a04f378db36601f5e9ebd3d5ba"}}, "download_size": 5671511, "post_processing_size": null, "dataset_size": 5758374, "size_in_bytes": 11429885}}
 
 
original/bwns-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8324ccd86379e38c9a8b578578ed494535dbbcd656c13aa4244183f1579a87ea
3
+ size 3152519
train.bkp.csv DELETED
The diff for this file is too large to render. See raw diff