stan_small / stan_small.py
ruanchaves's picture
Upload stan_small.py
b261299
"""STAN small dataset by Bansal et al.."""
import datasets
import pandas as pd
import pickle
_CITATION = """
@misc{bansal2015deep,
title={Towards Deep Semantic Analysis Of Hashtags},
author={Piyush Bansal and Romil Bansal and Vasudeva Varma},
year={2015},
eprint={1501.03210},
archivePrefix={arXiv},
primaryClass={cs.IR}
}
"""
_DESCRIPTION = """
Manually Annotated Stanford Sentiment Analysis Dataset by Bansal et al..
"""
_URLS = {
"test": "https://github.com/prashantkodali/HashSet/raw/master/datasets/stan-small-bansal_et_al.pkl"
}
class StanSmall(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version("1.0.0")
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"index": datasets.Value("int32"),
"hashtag": datasets.Value("string"),
"segmentation": datasets.Value("string"),
"alternatives": datasets.Sequence(
{
"segmentation": datasets.Value("string")
}
)
}
),
supervised_keys=None,
homepage="https://github.com/mounicam/hashtag_master",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
downloaded_files = dl_manager.download(_URLS)
return [
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"] }),
]
def _generate_examples(self, filepath):
def get_segmentation(row):
return row["goldtruths"][0]
def get_alternatives(row):
segmentations = [{
"segmentation": x
} for x in row["goldtruths"]]
return segmentations[1:]
with open(filepath, 'rb') as f:
records = pickle.load(f)
records = records.to_dict("records")
for idx, row in enumerate(records):
yield idx, {
"index": idx,
"hashtag": row["hashtags"],
"segmentation": get_segmentation(row),
"alternatives": get_alternatives(row)
}