Datasets:
Tasks:
Token Classification
Multilinguality:
monolingual
Size Categories:
100K<n<1M
Language Creators:
found
Annotations Creators:
Shivam Mhaskar, Diptesh Kanojia
Source Datasets:
original
License:
"""IWN-WordLists""" | |
import csv | |
import json | |
import os | |
import datasets | |
_CITATION = """\ | |
@inproceedings{bhattacharyya2010indowordnet, | |
title={IndoWordNet}, | |
author={Bhattacharyya, Pushpak}, | |
booktitle={Proceedings of the Seventh International Conference on Language Resources and Evaluation (LREC'10)}, | |
year={2010} | |
} | |
""" | |
_DESCRIPTION = """\ | |
We provide the unique word list form the IndoWordnet (IWN) knowledge base. | |
""" | |
_HOMEPAGE = "https://www.cfilt.iitb.ac.in/indowordnet/" | |
# TODO: Add the licence for the dataset here if you can find it | |
_LICENSE = "" | |
_URLS = { | |
"assamese": "./assamese.json", | |
"bengali": "./bengali.json", | |
"bodo": "./bodo.json", | |
"gujarati": "./gujarati.json", | |
"hindi": "./hindi.json", | |
"kannada": "./kannada.json", | |
"kashmiri": "./kashmiri.json", | |
"konkani": "./konkani.json", | |
"malayalam": "./malayalam.json", | |
"manipuri": "./manipuri.json", | |
"marathi": "./marathi.json", | |
"meitei": "./meitei.json", | |
"nepali": "./nepali.json", | |
"oriya": "./oriya.json", | |
"punjabi": "./punjabi.json", | |
"sanskrit": "./sanskrit.json", | |
"tamil": "./tamil.json", | |
"telugu": "./telugu.json", | |
"urdu": "./urdu.json" | |
} | |
class IWNWordListsConfig(datasets.BuilderConfig): | |
"""BuilderConfig for IWNWordLists.""" | |
def __init__(self, name, **kwargs): | |
""" | |
Args: | |
name: `string`, name of dataset config | |
**kwargs: keyword arguments forwarded to super. | |
""" | |
super(IWNWordListsConfig, self).__init__(name=name, **kwargs) | |
class IWNWordLists(datasets.GeneratorBasedBuilder): | |
"""TODO: Short description of my dataset.""" | |
VERSION = datasets.Version("1.1.0") | |
BUILDER_CONFIGS = [ | |
datasets.BuilderConfig(name="assamese", version=VERSION, description="Assamese Word List"), | |
datasets.BuilderConfig(name="bengali", version=VERSION, description="Bengali Word List"), | |
datasets.BuilderConfig(name="bodo", version=VERSION, description="Bodo Word List"), | |
datasets.BuilderConfig(name="gujarati", version=VERSION, description="Gujarati Word List"), | |
datasets.BuilderConfig(name="hindi", version=VERSION, description="Hindi Word List"), | |
datasets.BuilderConfig(name="kannada", version=VERSION, description="Kannada Word List"), | |
datasets.BuilderConfig(name="kashmiri", version=VERSION, description="Kashmiri Word List"), | |
datasets.BuilderConfig(name="konkani", version=VERSION, description="Konkani Word List"), | |
datasets.BuilderConfig(name="malayalam", version=VERSION, description="Malyalam Word List"), | |
datasets.BuilderConfig(name="manipuri", version=VERSION, description="Manipuri Word List"), | |
datasets.BuilderConfig(name="marathi", version=VERSION, description="Marathi Word List"), | |
datasets.BuilderConfig(name="meitei", version=VERSION, description="Meitei Word List"), | |
datasets.BuilderConfig(name="nepali", version=VERSION, description="Nepali Word List"), | |
datasets.BuilderConfig(name="oriya", version=VERSION, description="Oriya Word List"), | |
datasets.BuilderConfig(name="punjabi", version=VERSION, description="Punjabi Word List"), | |
datasets.BuilderConfig(name="sanskrit", version=VERSION, description="Sanskrit Word List"), | |
datasets.BuilderConfig(name="tamil", version=VERSION, description="Tamil Word List"), | |
datasets.BuilderConfig(name="telugu", version=VERSION, description="Telugu Word List"), | |
datasets.BuilderConfig(name="urdu", version=VERSION, description="Urdu Word List"), | |
] | |
# DEFAULT_CONFIG_NAME = "first_domain" # It's not mandatory to have a default configuration. Just use one if it make sense. | |
def _info(self): | |
features = datasets.Features( | |
{ | |
"word": datasets.Value("string") | |
} | |
) | |
return datasets.DatasetInfo( | |
description=_DESCRIPTION, | |
features=features, | |
homepage=_HOMEPAGE, | |
license=_LICENSE, | |
citation=_CITATION, | |
) | |
def _split_generators(self, dl_manager): | |
urls = _URLS[self.config.name] | |
data_dir = dl_manager.download_and_extract(urls) | |
return [ | |
datasets.SplitGenerator( | |
name=datasets.Split.TRAIN, | |
gen_kwargs={ | |
"filepath": data_dir, | |
"split": "train", | |
}, | |
) | |
] | |
def _generate_examples(self, filepath, split): | |
with open(filepath, encoding="utf-8") as f: | |
data = json.load(f) | |
for key, row in enumerate(data["data"]): | |
yield key, { | |
"word": row["word"] | |
} | |