File size: 4,869 Bytes
b5d94a4
 
 
 
 
 
 
2658df3
b5d94a4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d7b4d91
b5d94a4
 
cfcb865
b5d94a4
 
1c4dafb
2658df3
1c4dafb
408da74
45f6105
b5d94a4
 
 
 
 
 
 
 
 
 
1c4dafb
b5d94a4
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
""" NER dataset compiled by T-NER library https://github.com/asahi417/tner/tree/master/tner """
import json
from itertools import chain
import datasets

logger = datasets.logging.get_logger(__name__)
_DESCRIPTION = """[BioNLP2004 NER dataset](https://aclanthology.org/W04-1213.pdf)"""
_NAME = "bionlp2"
_VERSION = "1.0.0"
_CITATION = """
@inproceedings{collier-kim-2004-introduction,
    title = "Introduction to the Bio-entity Recognition Task at {JNLPBA}",
    author = "Collier, Nigel  and
      Kim, Jin-Dong",
    booktitle = "Proceedings of the International Joint Workshop on Natural Language Processing in Biomedicine and its Applications ({NLPBA}/{B}io{NLP})",
    month = aug # " 28th and 29th",
    year = "2004",
    address = "Geneva, Switzerland",
    publisher = "COLING",
    url = "https://aclanthology.org/W04-1213",
    pages = "73--78",
}
https://huggingface.co/datasets/chintagunta85/bionlp/raw/main/test_bionlp.json
"""

_HOME_PAGE = "https://huggingface.co/datasets/chintagunta85"
# https://huggingface.co/datasets/chintagunta85/bionlp/raw/main/train_bionlp.json
_URL = f'https://huggingface.co/datasets/chintagunta85/{_NAME}/raw/main'
_URLS = {
    str(datasets.Split.TEST): [f'{_URL}/test_bionlp.json'],
    str(datasets.Split.TRAIN): [f'{_URL}/train_bionlp.json'],
    str(datasets.Split.VALIDATION): [f'{_URL}/valid_bionlp.json'],
}



def map_ner_tags(tlist):
    nlist=[]
    for indx in tlist:
        #if(inv_map[indx]):
            # print(inv_map[indx], custom_names.index(inv_map[indx]), indx)
        nlist.append(custom_names.index(inv_map[indx]))
    return nlist

class BioNLP2004Config(datasets.BuilderConfig):
    """BuilderConfig"""

    def __init__(self, **kwargs):
        """BuilderConfig.

        Args:
          **kwargs: keyword arguments forwarded to super.
        """
        super(BioNLP2004Config, self).__init__(**kwargs)


class BioNLP2004(datasets.GeneratorBasedBuilder):
    """Dataset."""

    BUILDER_CONFIGS = [
        BioNLP2004Config(name=_NAME, version=datasets.Version(_VERSION), description=_DESCRIPTION),
    ]



    def _split_generators(self, dl_manager):
        downloaded_file = dl_manager.download_and_extract(_URLS)
        return [datasets.SplitGenerator(name=i, gen_kwargs={"filepaths": downloaded_file[str(i)]})
                for i in [datasets.Split.TRAIN, datasets.Split.VALIDATION, datasets.Split.TEST]]

    def _generate_examples(self, filepaths):
        custom_names = ['O','B-GENE','I-GENE','B-CHEMICAL','I-CHEMICAL','B-DISEASE','I-DISEASE',
         'B-DNA', 'I-DNA', 'B-RNA', 'I-RNA', 'B-CELL_LINE', 'I-CELL_LINE', 'B-CELL_TYPE', 'I-CELL_TYPE', 
         'B-PROTEIN', 'I-PROTEIN', 'B-SPECIES', 'I-SPECIES']

        pre_def = {"O": 0, "B-DNA": 1, "I-DNA": 2, "B-PROTEIN": 3, "I-PROTEIN": 4,
         "B-CELL_TYPE": 5, "I-CELL_TYPE": 6, "B-CELL_LINE": 7, "I-CELL_LINE": 8,
         "B-RNA": 9, "I-RNA": 10}
        inv_map = {0: 'O', 1: 'B-DNA', 2: 'I-DNA', 3: 'B-PROTEIN', 4: 'I-PROTEIN', 
         5: 'B-CELL_TYPE', 6: 'I-CELL_TYPE', 7: 'B-CELL_LINE', 8: 'I-CELL_LINE', 9: 'B-RNA', 10: 'I-RNA'}    
    
        _key = 0
        for filepath in filepaths:
            logger.info(f"generating examples from = {filepath}")
            with open(filepath, encoding="utf-8") as f:
                _list = [i for i in f.read().split('\n') if len(i) > 0]
                for i in _list:
                    data = json.loads(i)
                    #print(data)
                    
                    nlist = []
                    for indx in data['ner_tags']:
                        nlist.append(custom_names.index(inv_map[indx]))
                    #data['ner_tags'] = map_ner_tags(data['ner_tags'])
                    data['ner_tags']=nlist
                    #del data['tags']
                    xstr = str(_key)
                    yield xstr,{"id":xstr,"tokens":data['tokens'], "ner_tags":data['ner_tags']}
                    #yield _key,data
                    _key += 1

    def _info(self):
        custom_names = ['O','B-GENE','I-GENE','B-CHEMICAL','I-CHEMICAL','B-DISEASE','I-DISEASE',
            'B-DNA', 'I-DNA', 'B-RNA', 'I-RNA', 'B-CELL_LINE', 'I-CELL_LINE', 'B-CELL_TYPE', 'I-CELL_TYPE', 
            'B-PROTEIN', 'I-PROTEIN', 'B-SPECIES', 'I-SPECIES']
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features(
                {
                    "id": datasets.Value("string"),
                    "tokens": datasets.Sequence(datasets.Value("string")),
                    "ner_tags": datasets.Sequence(
                        datasets.features.ClassLabel(
                            names=custom_names
                        )
                    ),
                }
            ),
            supervised_keys=None,
            homepage=_HOME_PAGE,
            citation=_CITATION,
        )