Datasets:

Sub-tasks:
parsing
Languages:
Chinese
Multilinguality:
monolingual
Size Categories:
1K<n<10K
Language Creators:
found
Annotations Creators:
crowdsourced
Source Datasets:
original
Tags:
License:
amttl / dataset_infos.json
albertvillanova's picture
Convert dataset to Parquet
53b5b42
{
"amttl": {
"description": "Chinese word segmentation (CWS) trained from open source corpus faces dramatic performance drop\nwhen dealing with domain text, especially for a domain with lots of special terms and diverse\nwriting styles, such as the biomedical domain. However, building domain-specific CWS requires\nextremely high annotation cost. In this paper, we propose an approach by exploiting domain-invariant\nknowledge from high resource to low resource domains. Extensive experiments show that our mode\nachieves consistently higher accuracy than the single-task CWS and other transfer learning\nbaselines, especially when there is a large disparity between source and target domains.\n\nThis dataset is the accompanied medical Chinese word segmentation (CWS) dataset.\nThe tags are in BIES scheme.\n\nFor more details see https://www.aclweb.org/anthology/C18-1307/\n",
"citation": "@inproceedings{xing2018adaptive,\n title={Adaptive multi-task transfer learning for Chinese word segmentation in medical text},\n author={Xing, Junjie and Zhu, Kenny and Zhang, Shaodian},\n booktitle={Proceedings of the 27th International Conference on Computational Linguistics},\n pages={3619--3630},\n year={2018}\n}\n",
"homepage": "https://www.aclweb.org/anthology/C18-1307/",
"license": "",
"features": {
"id": {
"dtype": "string",
"_type": "Value"
},
"tokens": {
"feature": {
"dtype": "string",
"_type": "Value"
},
"_type": "Sequence"
},
"tags": {
"feature": {
"names": [
"B",
"I",
"E",
"S"
],
"_type": "ClassLabel"
},
"_type": "Sequence"
}
},
"builder_name": "parquet",
"dataset_name": "amttl",
"config_name": "amttl",
"version": {
"version_str": "1.0.0",
"major": 1,
"minor": 0,
"patch": 0
},
"splits": {
"train": {
"name": "train",
"num_bytes": 1132196,
"num_examples": 3063,
"dataset_name": null
},
"validation": {
"name": "validation",
"num_bytes": 324358,
"num_examples": 822,
"dataset_name": null
},
"test": {
"name": "test",
"num_bytes": 328509,
"num_examples": 908,
"dataset_name": null
}
},
"download_size": 274351,
"dataset_size": 1785063,
"size_in_bytes": 2059414
}
}