Datasets:
Languages:
English
Multilinguality:
monolingual
Size Categories:
unknown
Language Creators:
machine-generated
Annotations Creators:
expert-generated
Source Datasets:
original
ArXiv:
License:
"""Test-Stanford dataset by Bansal et al..""" | |
import datasets | |
import pandas as pd | |
_CITATION = """ | |
@misc{bansal2015deep, | |
title={Towards Deep Semantic Analysis Of Hashtags}, | |
author={Piyush Bansal and Romil Bansal and Vasudeva Varma}, | |
year={2015}, | |
eprint={1501.03210}, | |
archivePrefix={arXiv}, | |
primaryClass={cs.IR} | |
} | |
""" | |
_DESCRIPTION = """ | |
Manually Annotated Stanford Sentiment Analysis Dataset by Bansal et al.. | |
""" | |
_URLS = { | |
"test": "https://raw.githubusercontent.com/ruanchaves/hashformers/master/datasets/Test-Stanford.txt" | |
} | |
class TestStanford(datasets.GeneratorBasedBuilder): | |
VERSION = datasets.Version("1.0.0") | |
def _info(self): | |
return datasets.DatasetInfo( | |
description=_DESCRIPTION, | |
features=datasets.Features( | |
{ | |
"index": datasets.Value("int32"), | |
"hashtag": datasets.Value("string"), | |
"segmentation": datasets.Value("string"), | |
"gold_position": datasets.Value("int32"), | |
"rank": datasets.Sequence( | |
{ | |
"position": datasets.Value("int32"), | |
"candidate": datasets.Value("string") | |
} | |
) | |
} | |
), | |
supervised_keys=None, | |
homepage="", | |
citation=_CITATION, | |
) | |
def _split_generators(self, dl_manager): | |
downloaded_files = dl_manager.download(_URLS) | |
return [ | |
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"] }), | |
] | |
def _generate_examples(self, filepath): | |
names = ["id","hashtag","candidate", "label"] | |
df = pd.read_csv(filepath, sep="\t", skiprows=1, header=None, | |
names=names) | |
for col in names[0:-1]: | |
df[col] = df[col].apply(lambda x: x.strip("'").strip()) | |
records = df.to_dict('records') | |
output = [] | |
current_hashtag = None | |
hashtag = None | |
candidates = [] | |
ids = [] | |
label = [] | |
for row in records: | |
hashtag = row["hashtag"] | |
if current_hashtag != hashtag: | |
new_row = { | |
"hashtag": current_hashtag, | |
"candidate": candidates, | |
"id": ids, | |
"label": label | |
} | |
if current_hashtag: | |
output.append(new_row) | |
current_hashtag = row['hashtag'] | |
candidates = [row["candidate"]] | |
ids = int(row["id"]) | |
label = [int(row["label"])] | |
else: | |
candidates.append(row["candidate"]) | |
label.append(int(row["label"])) | |
def get_gold_position(row): | |
try: | |
return row["label"].index(1) | |
except ValueError: | |
return None | |
def get_rank(row): | |
return [{ | |
"position": idx + 1, | |
"candidate": item | |
} for idx, item in enumerate(row["candidate"])] | |
def get_segmentation(row): | |
try: | |
gold_idx = row["label"].index(1) | |
return row["candidate"][gold_idx] | |
except ValueError: | |
return None | |
for idx, row in enumerate(output): | |
yield idx, { | |
"index": int(row["id"]), | |
"hashtag": row["hashtag"], | |
"segmentation": get_segmentation(row), | |
"gold_position": get_gold_position(row), | |
"rank": get_rank(row) | |
} |