|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""Brand-Product Relation Extraction Corpora""" |
|
|
|
|
|
import json |
|
|
|
import datasets |
|
|
|
|
|
|
|
|
|
_CITATION = """\ |
|
@inproceedings{inproceedings, |
|
author = {Janz, Arkadiusz and Kopociński, Łukasz and Piasecki, Maciej and Pluwak, Agnieszka}, |
|
year = {2020}, |
|
month = {05}, |
|
pages = {}, |
|
title = {Brand-Product Relation Extraction Using Heterogeneous Vector Space Representations} |
|
} |
|
""" |
|
|
|
|
|
|
|
_DESCRIPTION = """\ |
|
Dataset consisting of Polish language texts annotated to recognize brand-product relations. |
|
""" |
|
|
|
|
|
_HOMEPAGE = "https://clarin-pl.eu/dspace/handle/11321/736" |
|
|
|
|
|
_LICENSE = "" |
|
|
|
|
|
|
|
|
|
_URLs = { |
|
"tele": "https://minio.clarin-pl.eu/semrel/corpora/ner_export_json/ner_tele_export.json", |
|
"electro": "https://minio.clarin-pl.eu/semrel/corpora/ner_export_json/ner_electro_export.json", |
|
"cosmetics": "https://minio.clarin-pl.eu/semrel/corpora/ner_export_json/ner_cosmetics_export.json", |
|
"banking": "https://minio.clarin-pl.eu/semrel/corpora/ner_export_json/ner_banking_export.json", |
|
} |
|
|
|
_CATEGORIES = { |
|
"tele": "telecommunications", |
|
"electro": "electronics", |
|
"cosmetics": "cosmetics", |
|
"banking": "banking", |
|
} |
|
_ALL_CATEGORIES = "all" |
|
_VERSION = "1.1.0" |
|
|
|
|
|
class BprecConfig(datasets.BuilderConfig): |
|
"""BuilderConfig for BprecConfig.""" |
|
|
|
def __init__(self, categories=None, **kwargs): |
|
super(BprecConfig, self).__init__(version=datasets.Version(_VERSION, ""), **kwargs), |
|
self.categories = categories |
|
|
|
|
|
|
|
class Bprec(datasets.GeneratorBasedBuilder): |
|
"""Brand-Product Relation Extraction Corpora in Polish""" |
|
|
|
BUILDER_CONFIGS = [ |
|
BprecConfig( |
|
name=_ALL_CATEGORIES, |
|
categories=_CATEGORIES, |
|
description="A collection of Polish language texts annotated to recognize brand-product relations", |
|
) |
|
] + [ |
|
BprecConfig( |
|
name=cat, |
|
categories=[cat], |
|
description=f"{_CATEGORIES[cat]} examples from a collection of Polish language texts annotated to recognize brand-product relations", |
|
) |
|
for cat in _CATEGORIES |
|
] |
|
BUILDER_CONFIG_CLASS = BprecConfig |
|
DEFAULT_CONFIG_NAME = _ALL_CATEGORIES |
|
|
|
def _info(self): |
|
|
|
features = datasets.Features( |
|
{ |
|
"id": datasets.Value("int32"), |
|
"category": datasets.Value("string"), |
|
"text": datasets.Value("string"), |
|
"ner": datasets.features.Sequence( |
|
{ |
|
"source": { |
|
"from": datasets.Value("int32"), |
|
"text": datasets.Value("string"), |
|
"to": datasets.Value("int32"), |
|
"type": datasets.features.ClassLabel( |
|
names=[ |
|
"PRODUCT_NAME", |
|
"PRODUCT_NAME_IMP", |
|
"PRODUCT_NO_BRAND", |
|
"BRAND_NAME", |
|
"BRAND_NAME_IMP", |
|
"VERSION", |
|
"PRODUCT_ADJ", |
|
"BRAND_ADJ", |
|
"LOCATION", |
|
"LOCATION_IMP", |
|
] |
|
), |
|
}, |
|
"target": { |
|
"from": datasets.Value("int32"), |
|
"text": datasets.Value("string"), |
|
"to": datasets.Value("int32"), |
|
"type": datasets.features.ClassLabel( |
|
names=[ |
|
"PRODUCT_NAME", |
|
"PRODUCT_NAME_IMP", |
|
"PRODUCT_NO_BRAND", |
|
"BRAND_NAME", |
|
"BRAND_NAME_IMP", |
|
"VERSION", |
|
"PRODUCT_ADJ", |
|
"BRAND_ADJ", |
|
"LOCATION", |
|
"LOCATION_IMP", |
|
] |
|
), |
|
}, |
|
} |
|
), |
|
} |
|
) |
|
return datasets.DatasetInfo( |
|
|
|
description=_DESCRIPTION, |
|
|
|
features=features, |
|
|
|
|
|
|
|
supervised_keys=None, |
|
|
|
homepage=_HOMEPAGE, |
|
|
|
license=_LICENSE, |
|
|
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
"""Returns SplitGenerators.""" |
|
|
|
|
|
|
|
|
|
|
|
|
|
_my_urls = [_URLs[cat] for cat in self.config.categories] |
|
|
|
downloaded_files = dl_manager.download_and_extract(_my_urls) |
|
|
|
return [ |
|
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filedirs": downloaded_files}), |
|
] |
|
|
|
def _generate_examples(self, filedirs, split="tele"): |
|
"""Yields examples.""" |
|
|
|
|
|
|
|
cats = [cat for cat in self.config.categories] |
|
for cat, filepath in zip(cats, filedirs): |
|
with open(filepath, "r", encoding="utf-8") as f: |
|
data = json.load(f) |
|
for key in data.keys(): |
|
example = data[key] |
|
id_ = example.get("id") |
|
text = example.get("text") |
|
ner = example.get("ner") |
|
yield id_, { |
|
"id": id_, |
|
"category": cat, |
|
"text": text, |
|
"ner": ner, |
|
} |
|
|