asahi417 commited on
Commit
676ab44
1 Parent(s): de1ea7d
README.md ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - en
4
+ license:
5
+ - other
6
+ multilinguality:
7
+ - monolingual
8
+ size_categories:
9
+ - 10K<n<100K
10
+ task_categories:
11
+ - token-classification
12
+ task_ids:
13
+ - named-entity-recognition
14
+ pretty_name: BioNLP2004
15
+ ---
16
+
17
+ # Dataset Card for "tner/bionlp2004"
18
+
19
+ ## Dataset Description
20
+
21
+ - **Repository:** [T-NER](https://github.com/asahi417/tner)
22
+ - **Paper:** [https://aclanthology.org/U15-1010.pdf](https://aclanthology.org/U15-1010.pdf)
23
+ - **Dataset:** BioNLP2004
24
+ - **Domain:** Biochemical
25
+ - **Number of Entity:** 4
26
+
27
+ ### Dataset Summary
28
+ BioNLP2004 NER dataset formatted in a part of [TNER](https://github.com/asahi417/tner) project.
29
+ Original BioNLP2004 dataset contains training and test.
30
+ We take a half amount of test instances randomly from the training set and create a validation set with it.
31
+
32
+ - Entity Types:`ORG`, `LOC`, `PER`, `MISC`
33
+
34
+ ## Dataset Structure
35
+
36
+ ### Data Instances
37
+ An example of `train` looks as follows.
38
+
39
+ ```
40
+ {
41
+ "tags": [0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
42
+ "tokens": ["1", ".", "1", ".", "4", "Borrower", "engages", "in", "criminal", "conduct", "or", "is", "involved", "in", "criminal", "activities", ";"]
43
+ }
44
+ ```
45
+
46
+ ### Label ID
47
+ The label2id dictionary can be found at [here](https://huggingface.co/datasets/tner/fin/raw/main/dataset/label.json).
48
+ ```python
49
+ {
50
+ "O": 0,
51
+ "I-ORG": 1,
52
+ "I-LOC": 2,
53
+ "I-PER": 3,
54
+ "I-MISC": 4
55
+ }
56
+ ```
57
+
58
+ ### Data Splits
59
+
60
+ | name |train|validation|test|
61
+ |---------|----:|---------:|---:|
62
+ |fin |861 | 303| 303|
63
+
64
+ ### Citation Information
65
+
66
+ ```
67
+ @inproceedings{collier-kim-2004-introduction,
68
+ title = "Introduction to the Bio-entity Recognition Task at {JNLPBA}",
69
+ author = "Collier, Nigel and
70
+ Kim, Jin-Dong",
71
+ booktitle = "Proceedings of the International Joint Workshop on Natural Language Processing in Biomedicine and its Applications ({NLPBA}/{B}io{NLP})",
72
+ month = aug # " 28th and 29th",
73
+ year = "2004",
74
+ address = "Geneva, Switzerland",
75
+ publisher = "COLING",
76
+ url = "https://aclanthology.org/W04-1213",
77
+ pages = "73--78",
78
+ }
79
+ ```
bionlp2004.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ NER dataset compiled by T-NER library https://github.com/asahi417/tner/tree/master/tner """
2
+ import json
3
+ from itertools import chain
4
+ import datasets
5
+
6
+ logger = datasets.logging.get_logger(__name__)
7
+ _DESCRIPTION = """[BioNLP2004 NER dataset](https://aclanthology.org/W04-1213.pdf)"""
8
+ _NAME = "bionlp2004"
9
+ _VERSION = "1.0.0"
10
+ _CITATION = """
11
+ @inproceedings{collier-kim-2004-introduction,
12
+ title = "Introduction to the Bio-entity Recognition Task at {JNLPBA}",
13
+ author = "Collier, Nigel and
14
+ Kim, Jin-Dong",
15
+ booktitle = "Proceedings of the International Joint Workshop on Natural Language Processing in Biomedicine and its Applications ({NLPBA}/{B}io{NLP})",
16
+ month = aug # " 28th and 29th",
17
+ year = "2004",
18
+ address = "Geneva, Switzerland",
19
+ publisher = "COLING",
20
+ url = "https://aclanthology.org/W04-1213",
21
+ pages = "73--78",
22
+ }
23
+ """
24
+
25
+ _HOME_PAGE = "https://github.com/asahi417/tner"
26
+ _URL = f'https://huggingface.co/datasets/tner/{_NAME}/raw/main/dataset'
27
+ _URLS = {
28
+ str(datasets.Split.TEST): [f'{_URL}/test.json'],
29
+ str(datasets.Split.TRAIN): [f'{_URL}/train.json'],
30
+ str(datasets.Split.VALIDATION): [f'{_URL}/valid.json'],
31
+ }
32
+
33
+
34
+ class BioNLP2004Config(datasets.BuilderConfig):
35
+ """BuilderConfig"""
36
+
37
+ def __init__(self, **kwargs):
38
+ """BuilderConfig.
39
+
40
+ Args:
41
+ **kwargs: keyword arguments forwarded to super.
42
+ """
43
+ super(BioNLP2004Config, self).__init__(**kwargs)
44
+
45
+
46
+ class BioNLP2004(datasets.GeneratorBasedBuilder):
47
+ """Dataset."""
48
+
49
+ BUILDER_CONFIGS = [
50
+ BioNLP2004Config(name=_NAME, version=datasets.Version(_VERSION), description=_DESCRIPTION),
51
+ ]
52
+
53
+ def _split_generators(self, dl_manager):
54
+ downloaded_file = dl_manager.download_and_extract(_URLS)
55
+ return [datasets.SplitGenerator(name=i, gen_kwargs={"filepaths": downloaded_file[str(i)]})
56
+ for i in [datasets.Split.TRAIN, datasets.Split.VALIDATION, datasets.Split.TEST]]
57
+
58
+ def _generate_examples(self, filepaths):
59
+ _key = 0
60
+ for filepath in filepaths:
61
+ logger.info(f"generating examples from = {filepath}")
62
+ with open(filepath, encoding="utf-8") as f:
63
+ _list = [i for i in f.read().split('\n') if len(i) > 0]
64
+ for i in _list:
65
+ data = json.loads(i)
66
+ yield _key, data
67
+ _key += 1
68
+
69
+ def _info(self):
70
+ return datasets.DatasetInfo(
71
+ description=_DESCRIPTION,
72
+ features=datasets.Features(
73
+ {
74
+ "tokens": datasets.Sequence(datasets.Value("string")),
75
+ "tags": datasets.Sequence(datasets.Value("int32")),
76
+ }
77
+ ),
78
+ supervised_keys=None,
79
+ homepage=_HOME_PAGE,
80
+ citation=_CITATION,
81
+ )
dataset/label.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"O": 0, "B-DNA": 1, "I-DNA": 2, "B-protein": 3, "I-protein": 4, "B-cell_type": 5, "I-cell_type": 6, "B-cell_line": 7, "I-cell_line": 8, "B-RNA": 9, "I-RNA": 10}
dataset/test.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
dataset/train.json ADDED
The diff for this file is too large to render. See raw diff
 
dataset/valid.json ADDED
The diff for this file is too large to render. See raw diff