Datasets:

Sub-tasks:
parsing
Languages:
Chinese
Multilinguality:
monolingual
Size Categories:
1K<n<10K
Language Creators:
found
Annotations Creators:
crowdsourced
Source Datasets:
original
Tags:
License:
albertvillanova HF staff commited on
Commit
2b4c3f2
1 Parent(s): 53b5b42

Delete loading script

Browse files
Files changed (1) hide show
  1. amttl.py +0 -147
amttl.py DELETED
@@ -1,147 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 HuggingFace Datasets Authors.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- # Lint as: python3
17
- """Introduction to AMTTL CWS Dataset"""
18
-
19
- import datasets
20
-
21
-
22
- logger = datasets.logging.get_logger(__name__)
23
-
24
-
25
- _CITATION = """\
26
- @inproceedings{xing2018adaptive,
27
- title={Adaptive multi-task transfer learning for Chinese word segmentation in medical text},
28
- author={Xing, Junjie and Zhu, Kenny and Zhang, Shaodian},
29
- booktitle={Proceedings of the 27th International Conference on Computational Linguistics},
30
- pages={3619--3630},
31
- year={2018}
32
- }
33
- """
34
-
35
- _DESCRIPTION = """\
36
- Chinese word segmentation (CWS) trained from open source corpus faces dramatic performance drop
37
- when dealing with domain text, especially for a domain with lots of special terms and diverse
38
- writing styles, such as the biomedical domain. However, building domain-specific CWS requires
39
- extremely high annotation cost. In this paper, we propose an approach by exploiting domain-invariant
40
- knowledge from high resource to low resource domains. Extensive experiments show that our mode
41
- achieves consistently higher accuracy than the single-task CWS and other transfer learning
42
- baselines, especially when there is a large disparity between source and target domains.
43
-
44
- This dataset is the accompanied medical Chinese word segmentation (CWS) dataset.
45
- The tags are in BIES scheme.
46
-
47
- For more details see https://www.aclweb.org/anthology/C18-1307/
48
- """
49
-
50
- _URL = "https://raw.githubusercontent.com/adapt-sjtu/AMTTL/master/medical_data/"
51
- _TRAINING_FILE = "forum_train.txt"
52
- _DEV_FILE = "forum_dev.txt"
53
- _TEST_FILE = "forum_test.txt"
54
-
55
-
56
- class AmttlConfig(datasets.BuilderConfig):
57
- """BuilderConfig for AMTTL"""
58
-
59
- def __init__(self, **kwargs):
60
- """BuilderConfig for AMTTL.
61
-
62
- Args:
63
- **kwargs: keyword arguments forwarded to super.
64
- """
65
- super(AmttlConfig, self).__init__(**kwargs)
66
-
67
-
68
- class Amttl(datasets.GeneratorBasedBuilder):
69
- """AMTTL Chinese Word Segmentation dataset."""
70
-
71
- BUILDER_CONFIGS = [
72
- AmttlConfig(
73
- name="amttl",
74
- version=datasets.Version("1.0.0"),
75
- description="AMTTL medical Chinese word segmentation dataset",
76
- ),
77
- ]
78
-
79
- def _info(self):
80
- return datasets.DatasetInfo(
81
- description=_DESCRIPTION,
82
- features=datasets.Features(
83
- {
84
- "id": datasets.Value("string"),
85
- "tokens": datasets.Sequence(datasets.Value("string")),
86
- "tags": datasets.Sequence(
87
- datasets.features.ClassLabel(
88
- names=[
89
- "B",
90
- "I",
91
- "E",
92
- "S",
93
- ]
94
- )
95
- ),
96
- }
97
- ),
98
- supervised_keys=None,
99
- homepage="https://www.aclweb.org/anthology/C18-1307/",
100
- citation=_CITATION,
101
- )
102
-
103
- def _split_generators(self, dl_manager):
104
- """Returns SplitGenerators."""
105
- urls_to_download = {
106
- "train": f"{_URL}{_TRAINING_FILE}",
107
- "dev": f"{_URL}{_DEV_FILE}",
108
- "test": f"{_URL}{_TEST_FILE}",
109
- }
110
- downloaded_files = dl_manager.download_and_extract(urls_to_download)
111
-
112
- return [
113
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
114
- datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}),
115
- datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}),
116
- ]
117
-
118
- def _generate_examples(self, filepath):
119
- logger.info("⏳ Generating examples from = %s", filepath)
120
- with open(filepath, encoding="utf-8") as f:
121
- guid = 0
122
- tokens = []
123
- tags = []
124
- for line in f:
125
- line_stripped = line.strip()
126
- if line_stripped == "":
127
- if tokens:
128
- yield guid, {
129
- "id": str(guid),
130
- "tokens": tokens,
131
- "tags": tags,
132
- }
133
- guid += 1
134
- tokens = []
135
- tags = []
136
- else:
137
- splits = line_stripped.split("\t")
138
- if len(splits) == 1:
139
- splits.append("O")
140
- tokens.append(splits[0])
141
- tags.append(splits[1])
142
- # last example
143
- yield guid, {
144
- "id": str(guid),
145
- "tokens": tokens,
146
- "tags": tags,
147
- }