best2009 / dataset_infos.json
albertvillanova's picture
Convert dataset to Parquet
1272c10
raw
history blame
3.37 kB
{
"best2009": {
"description": "`best2009` is a Thai word-tokenization dataset from encyclopedia, novels, news and articles by\n[NECTEC](https://www.nectec.or.th/) (148,995/2,252 lines of train/test). It was created for\n[BEST 2010: Word Tokenization Competition](https://thailang.nectec.or.th/archive/indexa290.html?q=node/10).\nThe test set answers are not provided publicly.\n",
"citation": "@inproceedings{kosawat2009best,\n title={BEST 2009: Thai word segmentation software contest},\n author={Kosawat, Krit and Boriboon, Monthika and Chootrakool, Patcharika and Chotimongkol, Ananlada and Klaithin, Supon and Kongyoung, Sarawoot and Kriengket, Kanyanut and Phaholphinyo, Sitthaa and Purodakananda, Sumonmas and Thanakulwarapas, Tipraporn and others},\n booktitle={2009 Eighth International Symposium on Natural Language Processing},\n pages={83--88},\n year={2009},\n organization={IEEE}\n}\n@inproceedings{boriboon2009best,\n title={Best corpus development and analysis},\n author={Boriboon, Monthika and Kriengket, Kanyanut and Chootrakool, Patcharika and Phaholphinyo, Sitthaa and Purodakananda, Sumonmas and Thanakulwarapas, Tipraporn and Kosawat, Krit},\n booktitle={2009 International Conference on Asian Language Processing},\n pages={322--327},\n year={2009},\n organization={IEEE}\n}\n",
"homepage": "https://aiforthai.in.th/",
"license": "CC-BY-NC-SA 3.0",
"features": {
"fname": {
"dtype": "string",
"_type": "Value"
},
"char": {
"feature": {
"dtype": "string",
"_type": "Value"
},
"_type": "Sequence"
},
"char_type": {
"feature": {
"names": [
"b_e",
"c",
"d",
"n",
"o",
"p",
"q",
"s",
"s_e",
"t",
"v",
"w"
],
"_type": "ClassLabel"
},
"_type": "Sequence"
},
"is_beginning": {
"feature": {
"names": [
"neg",
"pos"
],
"_type": "ClassLabel"
},
"_type": "Sequence"
}
},
"builder_name": "best2009",
"dataset_name": "best2009",
"config_name": "best2009",
"version": {
"version_str": "1.0.0",
"major": 1,
"minor": 0,
"patch": 0
},
"splits": {
"train": {
"name": "train",
"num_bytes": 483129698,
"num_examples": 148995,
"dataset_name": null
},
"test": {
"name": "test",
"num_bytes": 10498706,
"num_examples": 2252,
"dataset_name": null
}
},
"download_size": 28084787,
"dataset_size": 493628404,
"size_in_bytes": 521713191
}
}