Delete dataset.py with huggingface_hub
Browse files- dataset.py +0 -106
dataset.py
DELETED
|
@@ -1,106 +0,0 @@
|
|
| 1 |
-
# Config file by Simon Hengchen, https://hengchen.net
|
| 2 |
-
import os
|
| 3 |
-
import gzip
|
| 4 |
-
import datasets
|
| 5 |
-
|
| 6 |
-
logger = datasets.logging.get_logger(__name__)
|
| 7 |
-
|
| 8 |
-
|
| 9 |
-
_DESCRIPTION = """
|
| 10 |
-
This is a version of the Kubhist 2 dataset created, curated and made available by Språkbanken Text (SBX) at the University of Gothenburg (Sweden) under the CC BY 4.0 license.
|
| 11 |
-
This is a a corpus of OCRed newspapers from Sweden spanning the 1640s to the 1900s.
|
| 12 |
-
The original data is available with many types of annotation in XML at https://spraakbanken.gu.se/en/resources/kubhist2.
|
| 13 |
-
A good description of the data is available in this blog entry by Dana Dannélls: https://spraakbanken.gu.se/blogg/index.php/2019/09/15/the-kubhist-corpus-of-swedish-newspapers/
|
| 14 |
-
|
| 15 |
-
In a nutshell, this hugginface dataset version offers:
|
| 16 |
-
- only the OCRed text
|
| 17 |
-
- available in decadal subsets
|
| 18 |
-
|
| 19 |
-
License is CC BY 4.0 with attribution.
|
| 20 |
-
"""
|
| 21 |
-
|
| 22 |
-
_HOMEPAGE = "https://github.com/iguanodon-ai/kubhist2"
|
| 23 |
-
_LICENSE = "CC BY 4.0"
|
| 24 |
-
|
| 25 |
-
_BASE_DIR = "text"
|
| 26 |
-
|
| 27 |
-
# Use relative paths; Hugging Face viewer runs in a sandbox
|
| 28 |
-
_URLS = {'1640': os.path.join(_BASE_DIR, '1640/1640.txt.gz'),
|
| 29 |
-
'1650': os.path.join(_BASE_DIR, '1650/1650.txt.gz'),
|
| 30 |
-
'1660': os.path.join(_BASE_DIR, '1660/1660.txt.gz'),
|
| 31 |
-
'1670': os.path.join(_BASE_DIR, '1670/1670.txt.gz'),
|
| 32 |
-
'1680': os.path.join(_BASE_DIR, '1680/1680.txt.gz'),
|
| 33 |
-
'1690': os.path.join(_BASE_DIR, '1690/1690.txt.gz'),
|
| 34 |
-
'1700': os.path.join(_BASE_DIR, '1700/1700.txt.gz'),
|
| 35 |
-
'1710': os.path.join(_BASE_DIR, '1710/1710.txt.gz'),
|
| 36 |
-
'1720': os.path.join(_BASE_DIR, '1720/1720.txt.gz'),
|
| 37 |
-
'1730': os.path.join(_BASE_DIR, '1730/1730.txt.gz'),
|
| 38 |
-
'1740': os.path.join(_BASE_DIR, '1740/1740.txt.gz'),
|
| 39 |
-
'1750': os.path.join(_BASE_DIR, '1750/1750.txt.gz'),
|
| 40 |
-
'1760': os.path.join(_BASE_DIR, '1760/1760.txt.gz'),
|
| 41 |
-
'1770': os.path.join(_BASE_DIR, '1770/1770.txt.gz'),
|
| 42 |
-
'1780': os.path.join(_BASE_DIR, '1780/1780.txt.gz'),
|
| 43 |
-
'1790': os.path.join(_BASE_DIR, '1790/1790.txt.gz'),
|
| 44 |
-
'1800': os.path.join(_BASE_DIR, '1800/1800.txt.gz'),
|
| 45 |
-
'1810': os.path.join(_BASE_DIR, '1810/1810.txt.gz'),
|
| 46 |
-
'1820': os.path.join(_BASE_DIR, '1820/1820.txt.gz'),
|
| 47 |
-
'1830': os.path.join(_BASE_DIR, '1830/1830.txt.gz'),
|
| 48 |
-
'1840': os.path.join(_BASE_DIR, '1840/1840.txt.gz'),
|
| 49 |
-
'1850': os.path.join(_BASE_DIR, '1850/1850.txt.gz'),
|
| 50 |
-
'1860': os.path.join(_BASE_DIR, '1860/1860.txt.gz'),
|
| 51 |
-
'1870': os.path.join(_BASE_DIR, '1870/1870.txt.gz'),
|
| 52 |
-
'1880': os.path.join(_BASE_DIR, '1880/1880.txt.gz'),
|
| 53 |
-
'1890': os.path.join(_BASE_DIR, '1890/1890.txt.gz'),
|
| 54 |
-
'1900': os.path.join(_BASE_DIR, '1900/1900.txt.gz'),
|
| 55 |
-
'all': os.path.join(_BASE_DIR, 'all/all.txt.gz'),
|
| 56 |
-
}
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
class Kubhist2Config(datasets.BuilderConfig):
|
| 60 |
-
def __init__(self, period="all", **kwargs):
|
| 61 |
-
if str(period) not in _URLS:
|
| 62 |
-
self.period = "all"
|
| 63 |
-
else:
|
| 64 |
-
self.period = str(period)
|
| 65 |
-
super().__init__(**kwargs)
|
| 66 |
-
|
| 67 |
-
|
| 68 |
-
class Kubhist2(datasets.GeneratorBasedBuilder):
|
| 69 |
-
BUILDER_CONFIG_CLASS = Kubhist2Config
|
| 70 |
-
|
| 71 |
-
BUILDER_CONFIGS = [
|
| 72 |
-
Kubhist2Config(
|
| 73 |
-
name=key,
|
| 74 |
-
version=datasets.Version("1.0.0"),
|
| 75 |
-
description=f"Kubhist2: {key}",
|
| 76 |
-
period=key
|
| 77 |
-
)
|
| 78 |
-
for key in _URLS
|
| 79 |
-
]
|
| 80 |
-
|
| 81 |
-
DEFAULT_CONFIG_NAME = "all"
|
| 82 |
-
|
| 83 |
-
def _info(self):
|
| 84 |
-
return datasets.DatasetInfo(
|
| 85 |
-
description=_DESCRIPTION,
|
| 86 |
-
features=datasets.Features({
|
| 87 |
-
"text": datasets.Value("string"),
|
| 88 |
-
}),
|
| 89 |
-
supervised_keys=None,
|
| 90 |
-
homepage=_HOMEPAGE,
|
| 91 |
-
license=_LICENSE,
|
| 92 |
-
citation=_CITATION,
|
| 93 |
-
)
|
| 94 |
-
|
| 95 |
-
def _split_generators(self, dl_manager):
|
| 96 |
-
# Use `manual_dir` so the user can run `load_dataset(..., data_dir="path")`
|
| 97 |
-
data_path = os.path.join(dl_manager.manual_dir, _URLS[self.config.period])
|
| 98 |
-
return [
|
| 99 |
-
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": data_path}),
|
| 100 |
-
]
|
| 101 |
-
|
| 102 |
-
def _generate_examples(self, filepath):
|
| 103 |
-
"""Yields lines from the compressed .txt.gz file as individual examples."""
|
| 104 |
-
with gzip.open(filepath, "rt", encoding="utf-8") as f:
|
| 105 |
-
for i, line in enumerate(f):
|
| 106 |
-
yield i, {"text": line.strip()}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|