albertvillanova HF staff commited on
Commit
69abdc6
1 Parent(s): 39ed31d

Add loading script

Browse files
Files changed (1) hide show
  1. gallica_literary_fictions.py +108 -0
gallica_literary_fictions.py ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """Fictions littéraires de Gallica / Literary fictions of Gallica dataset."""
15
+
16
+
17
+ import csv
18
+ import time
19
+
20
+ import requests
21
+
22
+ import datasets
23
+
24
+
25
+ _HOMEPAGE = "https://zenodo.org/record/4751204"
26
+
27
+ _DESCRIPTION = """\
28
+ The collection "Fiction littéraire de Gallica" includes 19,240 public domain documents from the digital platform of the French National Library that were originally classified as novels or, more broadly, as literary fiction in prose. It consists of 372 tables of data in tsv format for each year of publication from 1600 to 1996 (all the missing years are in the 17th and 20th centuries). Each table is structured at the page-level of each novel (5,723,986 pages in all). It contains the complete text with the addition of some metadata. It can be opened in Excel or, preferably, with the new data analysis environments in R or Python (tidyverse, pandas…)
29
+
30
+ This corpus can be used for large-scale quantitative analyses in computational humanities. The OCR text is presented in a raw format without any correction or enrichment in order to be directly processed for text mining purposes.
31
+
32
+ The extraction is based on a historical categorization of the novels: the Y2 or Ybis classification. This classification, invented in 1730, is the only one that has been continuously applied to the BNF collections now available in the public domain (mainly before 1950). Consequently, the dataset is based on a definition of "novel" that is generally contemporary of the publication.
33
+
34
+ A French data paper (in PDF and HTML) presents the construction process of the Y2 category and describes the structuring of the corpus. It also gives several examples of possible uses for computational humanities projects.
35
+ """
36
+
37
+ _LICENSE = "Creative Commons Zero v1.0 Universal"
38
+
39
+ _CITATION = """\
40
+ @dataset{langlais_pierre_carl_2021_4751204,
41
+ author = {Langlais, Pierre-Carl},
42
+ title = {{Fictions littéraires de Gallica / Literary
43
+ fictions of Gallica}},
44
+ month = apr,
45
+ year = 2021,
46
+ publisher = {Zenodo},
47
+ version = 1,
48
+ doi = {10.5281/zenodo.4751204},
49
+ url = {https://doi.org/10.5281/zenodo.4751204}
50
+ }
51
+ """
52
+
53
+ ZENODO_API_URL = "https://zenodo.org/api/records/4751204"
54
+
55
+
56
+ class GallicaLiteraryFictions(datasets.GeneratorBasedBuilder):
57
+
58
+ VERSION = datasets.Version("1.0.0")
59
+
60
+ def _info(self):
61
+ features = datasets.Features(
62
+ {
63
+ "main_id": datasets.Value("string"),
64
+ "catalogue_id": datasets.Value("string"),
65
+ "titre": datasets.Value("string"),
66
+ "nom_auteur": datasets.Value("string"),
67
+ "prenom_auteur": datasets.Value("string"),
68
+ "date": datasets.Value("uint16"),
69
+ "document_ocr": datasets.Value("uint8"),
70
+ "date_enligne": datasets.Value("string"),
71
+ "gallica": datasets.Value("string"),
72
+ "page": datasets.Value("uint16"),
73
+ "texte": datasets.Value("string"),
74
+ }
75
+ )
76
+ return datasets.DatasetInfo(
77
+ description=_DESCRIPTION,
78
+ features=features,
79
+ homepage=_HOMEPAGE,
80
+ license=_LICENSE,
81
+ citation=_CITATION,
82
+ )
83
+
84
+ def _split_generators(self, dl_manager):
85
+ zenodo_record = requests.get(ZENODO_API_URL).json()
86
+ urls = sorted([file["links"]["self"] for file in zenodo_record["files"] if file["type"] == "zip"])
87
+ data_dirs = []
88
+ for url in datasets.logging.tqdm(urls):
89
+ data_dirs.append(dl_manager.download_and_extract(url))
90
+ time.sleep(1) # to avoid error 429
91
+ return [
92
+ datasets.SplitGenerator(
93
+ name=datasets.Split.TRAIN,
94
+ gen_kwargs={
95
+ "data_paths": dl_manager.iter_files(data_dirs),
96
+ },
97
+ )
98
+ ]
99
+
100
+ def _generate_examples(self, data_paths):
101
+ key = 0
102
+ for path in data_paths:
103
+ with open(path, encoding="utf-8", newline="") as tsv_file:
104
+ reader = csv.DictReader(tsv_file, delimiter="\t")
105
+ for row in reader:
106
+ yield key, row
107
+ key += 1
108
+ break