Datasets:

Languages:
Hindi
ArXiv:
License:
dipteshkanojia commited on
Commit
f0480f1
1 Parent(s): 41675a9
Files changed (2) hide show
  1. .gitignore +1 -1
  2. HiNER-collapsed.py +91 -0
.gitignore CHANGED
@@ -128,4 +128,4 @@ dmypy.json
128
  # Pyre type checker
129
  .pyre/
130
 
131
- IITB_EN_HI_Datasets.ipynb
 
128
  # Pyre type checker
129
  .pyre/
130
 
131
+ *.ipynb
HiNER-collapsed.py ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ import datasets
4
+ from typing import List
5
+ import json
6
+
7
+ logger = datasets.logging.get_logger(__name__)
8
+
9
+
10
+ _CITATION = """
11
+ """
12
+
13
+ _DESCRIPTION = """
14
+ This is the repository for HiNER - a large Hindi Named Entity Recognition dataset.
15
+ """
16
+
17
+ class HiNERCollapsedConfig(datasets.BuilderConfig):
18
+ """BuilderConfig for Conll2003"""
19
+
20
+ def __init__(self, **kwargs):
21
+ """BuilderConfig forConll2003.
22
+ Args:
23
+ **kwargs: keyword arguments forwarded to super.
24
+ """
25
+ super(SDUtestConfig, self).__init__(**kwargs)
26
+
27
+
28
+ class HiNERCollapsedConfig(datasets.GeneratorBasedBuilder):
29
+ """SDU Filtered dataset."""
30
+
31
+ BUILDER_CONFIGS = [
32
+ HiNERCollapsedConfig(name="HiNER-Collapsed", version=datasets.Version("0.0.2"), description="Hindi Named Entity Recognition Dataset"),
33
+ ]
34
+
35
+ def _info(self):
36
+ return datasets.DatasetInfo(
37
+ description=_DESCRIPTION,
38
+ features=datasets.Features(
39
+ {
40
+ "id": datasets.Value("string"),
41
+ "tokens": datasets.Sequence(datasets.Value("string")),
42
+ "ner_tags": datasets.Sequence(
43
+ datasets.features.ClassLabel(
44
+ names=[
45
+ "O",
46
+ "B-PERSON",
47
+ "I-PERSON",
48
+ "B-LOCATION",
49
+ "I-LOCATION",
50
+ "B-ORGANIZATION",
51
+ "I-ORGANIZATION"
52
+ ]
53
+ )
54
+ ),
55
+ }
56
+ ),
57
+ supervised_keys=None,
58
+ homepage="",
59
+ citation=_CITATION,
60
+ )
61
+
62
+ _URL = "https://huggingface.co/datasets/cfiltnlp/HiNER-collapsed/raw/main/"
63
+ _URLS = {
64
+ "train": _URL + "train.json",
65
+ "validation": _URL + "validation.json",
66
+ "test": _URL + "test.json"
67
+ }
68
+
69
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
70
+ urls_to_download = self._URLS
71
+ downloaded_files = dl_manager.download_and_extract(urls_to_download)
72
+
73
+ return [
74
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
75
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}),
76
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]})
77
+ ]
78
+
79
+ def _generate_examples(self, filepath):
80
+ """This function returns the examples in the raw (text) form."""
81
+ logger.info("generating examples from = %s", filepath)
82
+ with open(filepath) as f:
83
+ data = json.load(f)
84
+ for object in data:
85
+ id_ = int(object['id'])
86
+ yield id_, {
87
+ "id": str(id_),
88
+ "tokens": object['tokens'],
89
+ #"pos_tags": object['pos_tags'],
90
+ "ner_tags": object['ner_tags'],
91
+ }