albertvillanova HF staff commited on
Commit
1a5d5ee
1 Parent(s): 6b177ce

Convert dataset to Parquet (#2)

Browse files

- Convert dataset to Parquet (cc356cd779eea591c1730fdf9e9d1d52d3b639c8)
- Delete loading script (5bac74b3090904c4abb8d0f6f00f82eb1b4519f4)

README.md CHANGED
@@ -19,7 +19,6 @@ task_categories:
19
  task_ids:
20
  - language-modeling
21
  - masked-language-modeling
22
- paperswithcode_id: null
23
  pretty_name: HebrewSentiment
24
  dataset_info:
25
  features:
@@ -47,10 +46,15 @@ dataset_info:
47
  dtype: string
48
  splits:
49
  - name: train
50
- num_bytes: 678389435
51
  num_examples: 2028
52
- download_size: 678322912
53
- dataset_size: 678389435
 
 
 
 
 
54
  ---
55
 
56
  # Dataset Card for HebrewSentiment
 
19
  task_ids:
20
  - language-modeling
21
  - masked-language-modeling
 
22
  pretty_name: HebrewSentiment
23
  dataset_info:
24
  features:
 
46
  dtype: string
47
  splits:
48
  - name: train
49
+ num_bytes: 678389399
50
  num_examples: 2028
51
+ download_size: 377354225
52
+ dataset_size: 678389399
53
+ configs:
54
+ - config_name: default
55
+ data_files:
56
+ - split: train
57
+ path: data/train-*
58
  ---
59
 
60
  # Dataset Card for HebrewSentiment
data/train-00000-of-00002.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:794073638c9336fa5d2fc3836e274bc5147a57261302db7d3d4308e6cd712a32
3
+ size 128569788
data/train-00001-of-00002.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:29746a10f324e2d8902e38b9a61389d5012a4e0ff1fa65ee7ece70636c324e78
3
+ size 248784437
hebrew_this_world.py DELETED
@@ -1,97 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 HuggingFace Datasets Authors.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- # Lint as: python3
17
- """HebrewThisWorld: A corpus from https://thisworld.online/."""
18
-
19
-
20
- import csv
21
- import ctypes
22
-
23
- import datasets
24
-
25
-
26
- _DESCRIPTION = """\
27
- HebrewThisWorld is a data set consists of 2028 issues of the newspaper 'This World' edited by Uri Avnery and were published between 1950 and 1989. Released under the AGPLv3 license."""
28
-
29
- csv.field_size_limit(int(ctypes.c_ulong(-1).value // 2))
30
-
31
- _TRAIN_DOWNLOAD_URLS = [
32
- "https://github.com/imvladikon/datasets_additional/raw/master/data/thisworld1/metadata_0.csv",
33
- "https://github.com/imvladikon/datasets_additional/raw/master/data/thisworld1/metadata_1.csv",
34
- "https://github.com/imvladikon/datasets_additional/raw/master/data/thisworld1/metadata_2.csv",
35
- "https://github.com/imvladikon/datasets_additional/raw/master/data/thisworld1/metadata_3.csv",
36
- "https://github.com/imvladikon/datasets_additional/raw/master/data/thisworld1/metadata_4.csv",
37
- "https://github.com/imvladikon/datasets_additional/raw/master/data/thisworld1/metadata_5.csv",
38
- "https://github.com/imvladikon/datasets_additional/raw/master/data/thisworld1/metadata_6.csv",
39
- "https://github.com/imvladikon/datasets_additional/raw/master/data/thisworld1/metadata_7.csv",
40
- "https://github.com/imvladikon/datasets_additional/raw/master/data/thisworld1/metadata_8.csv",
41
- "https://github.com/imvladikon/datasets_additional/raw/master/data/thisworld1/metadata_9.csv",
42
- ]
43
-
44
-
45
- class HebrewThisWorld(datasets.GeneratorBasedBuilder):
46
- """HebrewThisWorld: Corpus from the newspaper ThisWorld"""
47
-
48
- VERSION = datasets.Version("0.1.0")
49
-
50
- def _info(self):
51
- return datasets.DatasetInfo(
52
- description=_DESCRIPTION,
53
- features=datasets.Features(
54
- {
55
- "issue_num": datasets.Value("int64"),
56
- "page_count": datasets.Value("int64"),
57
- "date": datasets.Value("string"),
58
- "date_he": datasets.Value("string"),
59
- "year": datasets.Value("string"),
60
- "href": datasets.Value("string"),
61
- "pdf": datasets.Value("string"),
62
- "coverpage": datasets.Value("string"),
63
- "backpage": datasets.Value("string"),
64
- "content": datasets.Value("string"),
65
- "url": datasets.Value("string"),
66
- }
67
- ),
68
- homepage="https://github.com/thisworld1/thisworld.online/",
69
- )
70
-
71
- def _split_generators(self, dl_manager):
72
- train_path = dl_manager.download_and_extract(_TRAIN_DOWNLOAD_URLS)
73
-
74
- return [
75
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": train_path}),
76
- ]
77
-
78
- def _generate_examples(self, filepath):
79
- """Generate Hebrew ThisWorld examples."""
80
- for file in filepath:
81
- with open(file, encoding="utf-8") as csv_file:
82
- csv_reader = csv.DictReader(csv_file)
83
- for data in csv_reader:
84
- id_ = data["issue_num"]
85
- yield id_, {
86
- "issue_num": data["issue_num"],
87
- "page_count": data["page_count"],
88
- "date": data["date"],
89
- "date_he": data["date_he"],
90
- "year": data["year"],
91
- "href": data["href"],
92
- "pdf": data["pdf"],
93
- "coverpage": data["coverpage"],
94
- "backpage": data["backpage"],
95
- "content": data["content"],
96
- "url": data["url"],
97
- }