mstz commited on
Commit
f73a092
1 Parent(s): 7c87a98

Upload 3 files

Browse files
Files changed (3) hide show
  1. README.md +35 -1
  2. epistolas.json +0 -0
  3. medieval_latin.py +82 -0
README.md CHANGED
@@ -1,3 +1,37 @@
1
  ---
2
- license: cc-by-4.0
 
 
 
 
 
 
 
 
 
 
3
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ language:
3
+ - en
4
+ tags:
5
+ - medieval
6
+ - latin
7
+ - text-classification
8
+ pretty_name: Abalone
9
+ size_categories:
10
+ - 100<n<1K
11
+ task_categories: # Full list at https://github.com/huggingface/hub-docs/blob/main/js/src/lib/interfaces/Types.ts
12
+ - text-classification
13
  ---
14
+ # Abalone
15
+ The [Medieval Latin](https://openportal.isti.cnr.it/doc?id=people______::37b90c87470ef85c78e72b8a3c753293) authorship attribution dataset.
16
+ Which Victorian author wrote the given text?
17
+
18
+ Note: Only epistolas are included in this version of the dataset.
19
+
20
+ # Configurations and tasks
21
+ | **Configuration** | **Task** | Description |
22
+ |-------------------|---------------------------|---------------------------------------------------------------|
23
+ | authorship | Classification | Which Latin author wrote the given text?|
24
+
25
+ # Usage
26
+ ```python
27
+ from datasets import load_dataset
28
+
29
+ dataset = load_dataset("mstz/medieval_latin", "authorship")["train"]
30
+ ```
31
+
32
+ # Features
33
+ |**Feature** |**Type** |
34
+ |-------------------|---------------|
35
+ | content | `[string]` |
36
+ | epistola | `[string]` |
37
+ | author | `[string]` |
epistolas.json ADDED
The diff for this file is too large to render. See raw diff
 
medieval_latin.py ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Medieval Latin."""
2
+
3
+ from typing import List
4
+ from functools import partial
5
+
6
+ import datasets
7
+
8
+ import pandas
9
+
10
+
11
+ VERSION = datasets.Version("1.0.0")
12
+ _ORIGINAL_FEATURE_NAMES = [
13
+ "epistola",
14
+ "author",
15
+ "content"
16
+ ]
17
+ _BASE_FEATURE_NAMES = [
18
+ "epistola",
19
+ "author",
20
+ "content"
21
+ ]
22
+
23
+ DESCRIPTION = "MedievalLatin dataset from the Gungor thesis.\"."
24
+ _HOMEPAGE = "https://openportal.isti.cnr.it/doc?id=people______::37b90c87470ef85c78e72b8a3c753293"
25
+ _URLS = ("https://openportal.isti.cnr.it/doc?id=people______::37b90c87470ef85c78e72b8a3c753293")
26
+ _CITATION = """
27
+ @techreport{oai:it.cnr:prodotti:438795,
28
+ title = {MedLatin1 and MedLatin2: Two Datasets for the Computational Authorship Analysis of Medieval Latin Texts},
29
+ author = {Corbara S. and Moreo A. and Sebastiani F. and Tavoni M.},
30
+ institution = {Research report, 2020},
31
+ year = {2020}
32
+ }"""
33
+
34
+ # Dataset info
35
+ urls_per_split = {
36
+ "train": "https://huggingface.co/datasets/mstz/medieval_latin/raw/main/epistolas.json",
37
+ }
38
+ features_types_per_config = {
39
+ "authorship": {
40
+ "epistola": datasets.Value("string"),
41
+ "author": datasets.Value("string"),
42
+ "content": datasets.Value("string")
43
+ }
44
+ }
45
+ features_per_config = {k: datasets.Features(features_types_per_config[k]) for k in features_types_per_config}
46
+
47
+
48
+ class MedievalLatinConfig(datasets.BuilderConfig):
49
+ def __init__(self, **kwargs):
50
+ super(MedievalLatinConfig, self).__init__(version=VERSION, **kwargs)
51
+ self.features = features_per_config[kwargs["name"]]
52
+
53
+
54
+ class MedievalLatin(datasets.GeneratorBasedBuilder):
55
+ # dataset versions
56
+ DEFAULT_CONFIG = "authorship"
57
+ BUILDER_CONFIGS = [
58
+ MedievalLatinConfig(name="authorship",
59
+ description="authorship"),
60
+ ]
61
+
62
+
63
+ def _info(self):
64
+ info = datasets.DatasetInfo(description=DESCRIPTION, citation=_CITATION, homepage=_HOMEPAGE,
65
+ features=features_per_config[self.config.name])
66
+
67
+ return info
68
+
69
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
70
+ downloads = dl_manager.download_and_extract(urls_per_split)
71
+
72
+ return [
73
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloads["train"]})
74
+ ]
75
+
76
+ def _generate_examples(self, filepath: str):
77
+ data = pandas.read_json(filepath)
78
+
79
+ for row_id, row in data.iterrows():
80
+ data_row = dict(row)
81
+
82
+ yield row_id, data_row