Datasets:

Modalities:
Tabular
Text
Formats:
parquet
Languages:
English
ArXiv:
Libraries:
Datasets
pandas
License:
albertvillanova HF staff commited on
Commit
7e22f86
1 Parent(s): a798d2e

Convert dataset to Parquet (#2)

Browse files

- Convert dataset to Parquet (c0bab4dc01afd66a83d7704596cc127f90aea568)
- Delete loading script (26c68f24d298f26a103f769f483105a3fa0cf41e)

README.md CHANGED
@@ -20,6 +20,7 @@ task_ids:
20
  paperswithcode_id: gutenberg-time-dataset
21
  pretty_name: the Gutenberg Time dataset
22
  dataset_info:
 
23
  features:
24
  - name: guten_id
25
  dtype: string
@@ -35,13 +36,18 @@ dataset_info:
35
  dtype: int64
36
  - name: tok_context
37
  dtype: string
38
- config_name: gutenberg
39
  splits:
40
  - name: train
41
- num_bytes: 108550391
42
  num_examples: 120694
43
- download_size: 35853781
44
- dataset_size: 108550391
 
 
 
 
 
 
45
  ---
46
 
47
  # Dataset Card for the Gutenberg Time dataset
 
20
  paperswithcode_id: gutenberg-time-dataset
21
  pretty_name: the Gutenberg Time dataset
22
  dataset_info:
23
+ config_name: gutenberg
24
  features:
25
  - name: guten_id
26
  dtype: string
 
36
  dtype: int64
37
  - name: tok_context
38
  dtype: string
 
39
  splits:
40
  - name: train
41
+ num_bytes: 108550183
42
  num_examples: 120694
43
+ download_size: 61311092
44
+ dataset_size: 108550183
45
+ configs:
46
+ - config_name: gutenberg
47
+ data_files:
48
+ - split: train
49
+ path: gutenberg/train-*
50
+ default: true
51
  ---
52
 
53
  # Dataset Card for the Gutenberg Time dataset
gutenberg/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e8a21d3093c6b07fde9e14c3371d4efd7cea2d4f36ee1da399ebff3c08f98159
3
+ size 61311092
gutenberg_time.py DELETED
@@ -1,108 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """Recognizing the flow of time in a story is a crucial aspect of understanding it. Prior work related to time has primarily focused on identifying temporal expressions or relative sequencing of events, but here we propose computationally annotating each line of a book with wall clock times, even in the absence of explicit time-descriptive phrases. To do so, we construct a data set of hourly time phrases from 52,183 fictional books."""
16
-
17
-
18
- import csv
19
- import os
20
-
21
- import datasets
22
-
23
-
24
- _CITATION = """\
25
- @misc{kim2020time,
26
- title={What time is it? Temporal Analysis of Novels},
27
- author={Allen Kim and Charuta Pethe and Steven Skiena},
28
- year={2020},
29
- eprint={2011.04124},
30
- archivePrefix={arXiv},
31
- primaryClass={cs.CL}
32
- }
33
- """
34
-
35
- _DESCRIPTION = """\
36
- A clean data resource containing all explicit time references in a dataset of 52,183 novels whose full text is available via Project Gutenberg.
37
- """
38
-
39
- _HOMEPAGE = "https://github.com/allenkim/what-time-is-it"
40
-
41
- _LICENSE = "[More Information needed]"
42
-
43
- # The HuggingFace dataset library don't host the datasets but only point to the original files
44
- # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
45
- _URLs = {
46
- "gutenberg": "https://github.com/TevenLeScao/what-time-is-it/blob/master/gutenberg_time_phrases.zip?raw=true",
47
- }
48
-
49
-
50
- class GutenbergTime(datasets.GeneratorBasedBuilder):
51
- """Novel extracts with time-of-the-day information"""
52
-
53
- VERSION = datasets.Version("1.1.3")
54
- BUILDER_CONFIGS = [
55
- datasets.BuilderConfig(name="gutenberg", description="Data pulled from the Gutenberg project"),
56
- ]
57
-
58
- def _info(self):
59
- features = datasets.Features(
60
- {
61
- "guten_id": datasets.Value("string"),
62
- "hour_reference": datasets.Value("string"),
63
- "time_phrase": datasets.Value("string"),
64
- "is_ambiguous": datasets.Value("bool_"),
65
- "time_pos_start": datasets.Value("int64"),
66
- "time_pos_end": datasets.Value("int64"),
67
- "tok_context": datasets.Value("string"),
68
- }
69
- )
70
- return datasets.DatasetInfo(
71
- description=_DESCRIPTION,
72
- features=features,
73
- supervised_keys=None,
74
- homepage=_HOMEPAGE,
75
- license=_LICENSE,
76
- citation=_CITATION,
77
- )
78
-
79
- def _split_generators(self, dl_manager):
80
- """Returns SplitGenerators."""
81
- my_urls = _URLs[self.config.name]
82
- data = dl_manager.download_and_extract(my_urls)
83
- return [
84
- datasets.SplitGenerator(
85
- name=datasets.Split.TRAIN,
86
- # These kwargs will be passed to _generate_examples
87
- gen_kwargs={
88
- "filepath": os.path.join(data, "gutenberg_time_phrases.csv"),
89
- "split": "train",
90
- },
91
- )
92
- ]
93
-
94
- def _generate_examples(self, filepath, split):
95
-
96
- with open(filepath, encoding="utf8") as f:
97
- data = csv.reader(f)
98
- next(data)
99
- for id_, row in enumerate(data):
100
- yield id_, {
101
- "guten_id": row[0],
102
- "hour_reference": row[1],
103
- "time_phrase": row[2],
104
- "is_ambiguous": row[3],
105
- "time_pos_start": row[4],
106
- "time_pos_end": row[5],
107
- "tok_context": row[6],
108
- }