Datasets:

Sub-tasks:
parsing
Languages:
Chinese
Multilinguality:
monolingual
Size Categories:
1K<n<10K
Language Creators:
found
Annotations Creators:
crowdsourced
Source Datasets:
original
Tags:
License:

Convert dataset to Parquet

#3
by albertvillanova HF staff - opened
README.md CHANGED
@@ -19,6 +19,7 @@ task_ids:
19
  - parsing
20
  pretty_name: AMTTL
21
  dataset_info:
 
22
  features:
23
  - name: id
24
  dtype: string
@@ -32,19 +33,28 @@ dataset_info:
32
  '1': I
33
  '2': E
34
  '3': S
35
- config_name: amttl
36
  splits:
37
  - name: train
38
- num_bytes: 1132212
39
  num_examples: 3063
40
  - name: validation
41
- num_bytes: 324374
42
  num_examples: 822
43
  - name: test
44
- num_bytes: 328525
45
  num_examples: 908
46
- download_size: 685534
47
- dataset_size: 1785111
 
 
 
 
 
 
 
 
 
 
48
  ---
49
 
50
  # Dataset Card for AMTTL
 
19
  - parsing
20
  pretty_name: AMTTL
21
  dataset_info:
22
+ config_name: amttl
23
  features:
24
  - name: id
25
  dtype: string
 
33
  '1': I
34
  '2': E
35
  '3': S
 
36
  splits:
37
  - name: train
38
+ num_bytes: 1132196
39
  num_examples: 3063
40
  - name: validation
41
+ num_bytes: 324358
42
  num_examples: 822
43
  - name: test
44
+ num_bytes: 328509
45
  num_examples: 908
46
+ download_size: 274351
47
+ dataset_size: 1785063
48
+ configs:
49
+ - config_name: amttl
50
+ data_files:
51
+ - split: train
52
+ path: amttl/train-*
53
+ - split: validation
54
+ path: amttl/validation-*
55
+ - split: test
56
+ path: amttl/test-*
57
+ default: true
58
  ---
59
 
60
  # Dataset Card for AMTTL
amttl.py DELETED
@@ -1,147 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 HuggingFace Datasets Authors.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- # Lint as: python3
17
- """Introduction to AMTTL CWS Dataset"""
18
-
19
- import datasets
20
-
21
-
22
- logger = datasets.logging.get_logger(__name__)
23
-
24
-
25
- _CITATION = """\
26
- @inproceedings{xing2018adaptive,
27
- title={Adaptive multi-task transfer learning for Chinese word segmentation in medical text},
28
- author={Xing, Junjie and Zhu, Kenny and Zhang, Shaodian},
29
- booktitle={Proceedings of the 27th International Conference on Computational Linguistics},
30
- pages={3619--3630},
31
- year={2018}
32
- }
33
- """
34
-
35
- _DESCRIPTION = """\
36
- Chinese word segmentation (CWS) trained from open source corpus faces dramatic performance drop
37
- when dealing with domain text, especially for a domain with lots of special terms and diverse
38
- writing styles, such as the biomedical domain. However, building domain-specific CWS requires
39
- extremely high annotation cost. In this paper, we propose an approach by exploiting domain-invariant
40
- knowledge from high resource to low resource domains. Extensive experiments show that our mode
41
- achieves consistently higher accuracy than the single-task CWS and other transfer learning
42
- baselines, especially when there is a large disparity between source and target domains.
43
-
44
- This dataset is the accompanied medical Chinese word segmentation (CWS) dataset.
45
- The tags are in BIES scheme.
46
-
47
- For more details see https://www.aclweb.org/anthology/C18-1307/
48
- """
49
-
50
- _URL = "https://raw.githubusercontent.com/adapt-sjtu/AMTTL/master/medical_data/"
51
- _TRAINING_FILE = "forum_train.txt"
52
- _DEV_FILE = "forum_dev.txt"
53
- _TEST_FILE = "forum_test.txt"
54
-
55
-
56
- class AmttlConfig(datasets.BuilderConfig):
57
- """BuilderConfig for AMTTL"""
58
-
59
- def __init__(self, **kwargs):
60
- """BuilderConfig for AMTTL.
61
-
62
- Args:
63
- **kwargs: keyword arguments forwarded to super.
64
- """
65
- super(AmttlConfig, self).__init__(**kwargs)
66
-
67
-
68
- class Amttl(datasets.GeneratorBasedBuilder):
69
- """AMTTL Chinese Word Segmentation dataset."""
70
-
71
- BUILDER_CONFIGS = [
72
- AmttlConfig(
73
- name="amttl",
74
- version=datasets.Version("1.0.0"),
75
- description="AMTTL medical Chinese word segmentation dataset",
76
- ),
77
- ]
78
-
79
- def _info(self):
80
- return datasets.DatasetInfo(
81
- description=_DESCRIPTION,
82
- features=datasets.Features(
83
- {
84
- "id": datasets.Value("string"),
85
- "tokens": datasets.Sequence(datasets.Value("string")),
86
- "tags": datasets.Sequence(
87
- datasets.features.ClassLabel(
88
- names=[
89
- "B",
90
- "I",
91
- "E",
92
- "S",
93
- ]
94
- )
95
- ),
96
- }
97
- ),
98
- supervised_keys=None,
99
- homepage="https://www.aclweb.org/anthology/C18-1307/",
100
- citation=_CITATION,
101
- )
102
-
103
- def _split_generators(self, dl_manager):
104
- """Returns SplitGenerators."""
105
- urls_to_download = {
106
- "train": f"{_URL}{_TRAINING_FILE}",
107
- "dev": f"{_URL}{_DEV_FILE}",
108
- "test": f"{_URL}{_TEST_FILE}",
109
- }
110
- downloaded_files = dl_manager.download_and_extract(urls_to_download)
111
-
112
- return [
113
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
114
- datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}),
115
- datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}),
116
- ]
117
-
118
- def _generate_examples(self, filepath):
119
- logger.info("⏳ Generating examples from = %s", filepath)
120
- with open(filepath, encoding="utf-8") as f:
121
- guid = 0
122
- tokens = []
123
- tags = []
124
- for line in f:
125
- line_stripped = line.strip()
126
- if line_stripped == "":
127
- if tokens:
128
- yield guid, {
129
- "id": str(guid),
130
- "tokens": tokens,
131
- "tags": tags,
132
- }
133
- guid += 1
134
- tokens = []
135
- tags = []
136
- else:
137
- splits = line_stripped.split("\t")
138
- if len(splits) == 1:
139
- splits.append("O")
140
- tokens.append(splits[0])
141
- tags.append(splits[1])
142
- # last example
143
- yield guid, {
144
- "id": str(guid),
145
- "tokens": tokens,
146
- "tags": tags,
147
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
amttl/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e162219c3d2e9a4b234407072169e58475c70f69a1118c4c92c1cc8bdb7fddcf
3
+ size 51311
amttl/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:93ff0e728fa5bf6cf4c32805ac01529c1b022f29b39f28406a5e7fd28b9b6342
3
+ size 172615
amttl/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e7992b50bd6d87521937260ed7ebce5a986b8eb52ad0905373fe94d6b155c53e
3
+ size 50425
dataset_infos.json DELETED
@@ -1 +0,0 @@
1
- {"amttl": {"description": "Chinese word segmentation (CWS) trained from open source corpus faces dramatic performance drop\nwhen dealing with domain text, especially for a domain with lots of special terms and diverse\nwriting styles, such as the biomedical domain. However, building domain-specific CWS requires\nextremely high annotation cost. In this paper, we propose an approach by exploiting domain-invariant\nknowledge from high resource to low resource domains. Extensive experiments show that our mode\nachieves consistently higher accuracy than the single-task CWS and other transfer learning\nbaselines, especially when there is a large disparity between source and target domains.\n\nThis dataset is the accompanied medical Chinese word segmentation (CWS) dataset.\nThe tags are in BIES scheme.\n\nFor more details see https://www.aclweb.org/anthology/C18-1307/\n", "citation": "@inproceedings{xing2018adaptive,\n title={Adaptive multi-task transfer learning for Chinese word segmentation in medical text},\n author={Xing, Junjie and Zhu, Kenny and Zhang, Shaodian},\n booktitle={Proceedings of the 27th International Conference on Computational Linguistics},\n pages={3619--3630},\n year={2018}\n}\n", "homepage": "https://www.aclweb.org/anthology/C18-1307/", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "tags": {"feature": {"num_classes": 4, "names": ["B", "I", "E", "S"], "names_file": null, "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "amttl", "config_name": "amttl", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1132212, "num_examples": 3063, "dataset_name": "amttl"}, "validation": {"name": "validation", "num_bytes": 324374, "num_examples": 822, "dataset_name": "amttl"}, "test": {"name": "test", "num_bytes": 328525, "num_examples": 908, "dataset_name": "amttl"}}, "download_checksums": {"https://raw.githubusercontent.com/adapt-sjtu/AMTTL/master/medical_data/forum_train.txt": {"num_bytes": 434357, "checksum": "9819373963ea04d1d28844d5bc83b6b0332fad8b5f2e73092bcfc58dc6d6292a"}, "https://raw.githubusercontent.com/adapt-sjtu/AMTTL/master/medical_data/forum_dev.txt": {"num_bytes": 124973, "checksum": "1a2eb461b98d2a9160baad7f76d003cc0917b998e8283bcffa52b71224dd9d17"}, "https://raw.githubusercontent.com/adapt-sjtu/AMTTL/master/medical_data/forum_test.txt": {"num_bytes": 126204, "checksum": "aea1a8cf244cd565e94bd193a1eef7a10b16eeb0b6fbb6ed1d2fefbd55360dd6"}}, "download_size": 685534, "post_processing_size": null, "dataset_size": 1785111, "size_in_bytes": 2470645}}