|
"""HashSet dataset."""
|
|
|
|
import datasets
|
|
import pandas as pd
|
|
import json
|
|
|
|
_CITATION = """
|
|
@article{kodali2022hashset,
|
|
title={HashSet--A Dataset For Hashtag Segmentation},
|
|
author={Kodali, Prashant and Bhatnagar, Akshala and Ahuja, Naman and Shrivastava, Manish and Kumaraguru, Ponnurangam},
|
|
journal={arXiv preprint arXiv:2201.06741},
|
|
year={2022}
|
|
}
|
|
"""
|
|
|
|
_DESCRIPTION = """
|
|
Hashset is a new dataset consisiting on 1.9k manually annotated and 3.3M loosely supervised tweets for testing the
|
|
efficiency of hashtag segmentation models. We compare State of The Art Hashtag Segmentation models on Hashset and other
|
|
baseline datasets (STAN and BOUN). We compare and analyse the results across the datasets to argue that HashSet can act
|
|
as a good benchmark for hashtag segmentation tasks.
|
|
|
|
HashSet Manual: contains 1.9k manually annotated hashtags. Each row consists of the hashtag, segmented
|
|
hashtag ,named entity annotations, a list storing whether the hashtag contains mix of hindi and english
|
|
tokens and/or contains non-english tokens.
|
|
"""
|
|
_URL = "https://raw.githubusercontent.com/prashantkodali/HashSet/master/datasets/hashset/HashSet-Manual.csv"
|
|
|
|
class HashSetManual(datasets.GeneratorBasedBuilder):
|
|
|
|
VERSION = datasets.Version("1.0.0")
|
|
|
|
def _info(self):
|
|
return datasets.DatasetInfo(
|
|
description=_DESCRIPTION,
|
|
features=datasets.Features(
|
|
{
|
|
"index": datasets.Value("int32"),
|
|
"hashtag": datasets.Value("string"),
|
|
"segmentation": datasets.Value("string"),
|
|
"spans": datasets.Sequence(
|
|
{
|
|
"start": datasets.Value("int32"),
|
|
"end": datasets.Value("int32"),
|
|
"text": datasets.Value("string")
|
|
}
|
|
),
|
|
"source": datasets.Value("string"),
|
|
"gold_position": datasets.Value("int32"),
|
|
"mix": datasets.Value("bool"),
|
|
"other": datasets.Value("bool"),
|
|
"ner": datasets.Value("bool"),
|
|
"annotator_id": datasets.Value("int32"),
|
|
"annotation_id": datasets.Value("int32"),
|
|
"created_at": datasets.Value("timestamp[us]"),
|
|
"updated_at": datasets.Value("timestamp[us]"),
|
|
"lead_time": datasets.Value("float64"),
|
|
"rank": datasets.Sequence(
|
|
{
|
|
"position": datasets.Value("int32"),
|
|
"candidate": datasets.Value("string")
|
|
}
|
|
)
|
|
}
|
|
),
|
|
supervised_keys=None,
|
|
homepage="https://github.com/prashantkodali/HashSet/",
|
|
citation=_CITATION,
|
|
)
|
|
|
|
def _split_generators(self, dl_manager):
|
|
downloaded_files = dl_manager.download(_URL)
|
|
return [
|
|
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files }),
|
|
]
|
|
|
|
def _generate_examples(self, filepath):
|
|
|
|
def read_language_labels(field):
|
|
mix_label = "Hashtag has a mix of english and hindi tokens"
|
|
other_label = "Hashtag has non english token "
|
|
ner_label = "Hashtag has named entities"
|
|
try:
|
|
record = json.loads(field)
|
|
except json.decoder.JSONDecodeError:
|
|
record = {"choices": [field]}
|
|
|
|
mix = False
|
|
other = False
|
|
ner = False
|
|
if mix_label in record["choices"]:
|
|
mix = True
|
|
if other_label in record["choices"]:
|
|
other = True
|
|
if ner_label in record["choices"]:
|
|
ner = True
|
|
return mix, other, ner
|
|
|
|
def read_entities(field):
|
|
try:
|
|
record = json.loads(field)
|
|
except json.decoder.JSONDecodeError:
|
|
return []
|
|
output = []
|
|
for row in record:
|
|
output.append({
|
|
"start": row.get("start", None),
|
|
"end": row.get("end", None),
|
|
"text": row.get("text", None)
|
|
})
|
|
return output
|
|
|
|
def read_rank(row):
|
|
output = []
|
|
for i in range(10):
|
|
output.append({
|
|
"position": str(i+1),
|
|
"candidate": row[str(i+1)]
|
|
})
|
|
return output
|
|
|
|
def get_gold_position(field):
|
|
output = field.strip("$")
|
|
try:
|
|
return int(output)
|
|
except ValueError:
|
|
return None
|
|
|
|
records = pd.read_csv(filepath).to_dict("records")
|
|
for idx, row in enumerate(records):
|
|
mix, other, ner = read_language_labels(row["mutlitoken"])
|
|
yield idx, {
|
|
"index": row["Unnamed: 0"],
|
|
"hashtag": row["Hashtag"],
|
|
"segmentation": row["Final Segmentation"],
|
|
"spans": read_entities(row["charner"]),
|
|
"source": row["Source"],
|
|
"gold_position": get_gold_position(row["topk"]),
|
|
"mix": mix,
|
|
"other": other,
|
|
"ner": ner,
|
|
"annotator_id": int(row["annotator"]),
|
|
"annotation_id": int(row["annotation_id"]),
|
|
"created_at": row["created_at"],
|
|
"updated_at": row["updated_at"],
|
|
"lead_time": row["lead_time"],
|
|
"rank": read_rank(row)
|
|
}
|
|
|