Datasets:

Languages:
English
Multilinguality:
monolingual
Size Categories:
10K<n<100K
Language Creators:
expert-generated
Annotations Creators:
expert-generated
Source Datasets:
original
Tags:
License:
albertvillanova HF staff commited on
Commit
dc06405
1 Parent(s): fe41a26

Convert dataset to Parquet (#5)

Browse files

- Convert dataset to Parquet (5d70f102c0f52f4ac8a8d14e200c8c39c61496e7)
- Delete loading script (821c1c4984fe61856b44e362f865c751f28e58c5)

README.md CHANGED
@@ -42,8 +42,18 @@ dataset_info:
42
  - name: test
43
  num_bytes: 2454589
44
  num_examples: 5000
45
- download_size: 4636753
46
  dataset_size: 9765631
 
 
 
 
 
 
 
 
 
 
47
  ---
48
 
49
  # Dataset Card for bc2gm_corpus
 
42
  - name: test
43
  num_bytes: 2454589
44
  num_examples: 5000
45
+ download_size: 2154630
46
  dataset_size: 9765631
47
+ configs:
48
+ - config_name: bc2gm_corpus
49
+ data_files:
50
+ - split: train
51
+ path: bc2gm_corpus/train-*
52
+ - split: validation
53
+ path: bc2gm_corpus/validation-*
54
+ - split: test
55
+ path: bc2gm_corpus/test-*
56
+ default: true
57
  ---
58
 
59
  # Dataset Card for bc2gm_corpus
bc2gm_corpus.py DELETED
@@ -1,143 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 HuggingFace Datasets Authors.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- # Lint as: python3
17
- """BioCreative II gene mention recognition Corpus"""
18
-
19
- import datasets
20
-
21
-
22
- logger = datasets.logging.get_logger(__name__)
23
-
24
-
25
- _CITATION = """\
26
- @article{smith2008overview,
27
- title={Overview of BioCreative II gene mention recognition},
28
- author={Smith, Larry and Tanabe, Lorraine K and nee Ando, Rie Johnson and Kuo, Cheng-Ju and Chung, I-Fang and Hsu, Chun-Nan and Lin, Yu-Shi and Klinger, Roman and Friedrich, Christoph M and Ganchev, Kuzman and others},
29
- journal={Genome biology},
30
- volume={9},
31
- number={S2},
32
- pages={S2},
33
- year={2008},
34
- publisher={Springer}
35
- }
36
- """
37
-
38
- _DESCRIPTION = """\
39
- Nineteen teams presented results for the Gene Mention Task at the BioCreative II Workshop.
40
- In this task participants designed systems to identify substrings in sentences corresponding to gene name mentions.
41
- A variety of different methods were used and the results varied with a highest achieved F1 score of 0.8721.
42
- Here we present brief descriptions of all the methods used and a statistical analysis of the results.
43
- We also demonstrate that, by combining the results from all submissions, an F score of 0.9066 is feasible,
44
- and furthermore that the best result makes use of the lowest scoring submissions.
45
-
46
- For more details, see: https://www.ncbi.nlm.nih.gov/pmc/articles/PMC2559986/
47
-
48
- The original dataset can be downloaded from: https://biocreative.bioinformatics.udel.edu/resources/corpora/biocreative-ii-corpus/
49
- This dataset has been converted to CoNLL format for NER using the following tool: https://github.com/spyysalo/standoff2conll
50
- """
51
-
52
- _HOMEPAGE = "https://www.ncbi.nlm.nih.gov/pmc/articles/PMC2559986/"
53
- _URL = "https://github.com/spyysalo/bc2gm-corpus/raw/master/conll/"
54
- _TRAINING_FILE = "train.tsv"
55
- _DEV_FILE = "devel.tsv"
56
- _TEST_FILE = "test.tsv"
57
-
58
-
59
- class Bc2gmCorpusConfig(datasets.BuilderConfig):
60
- """BuilderConfig for Bc2gmCorpus"""
61
-
62
- def __init__(self, **kwargs):
63
- """BuilderConfig for Bc2gmCorpus.
64
- Args:
65
- **kwargs: keyword arguments forwarded to super.
66
- """
67
- super(Bc2gmCorpusConfig, self).__init__(**kwargs)
68
-
69
-
70
- class Bc2gmCorpus(datasets.GeneratorBasedBuilder):
71
- """Bc2gmCorpus dataset."""
72
-
73
- BUILDER_CONFIGS = [
74
- Bc2gmCorpusConfig(name="bc2gm_corpus", version=datasets.Version("1.0.0"), description="bc2gm corpus"),
75
- ]
76
-
77
- def _info(self):
78
- return datasets.DatasetInfo(
79
- description=_DESCRIPTION,
80
- features=datasets.Features(
81
- {
82
- "id": datasets.Value("string"),
83
- "tokens": datasets.Sequence(datasets.Value("string")),
84
- "ner_tags": datasets.Sequence(
85
- datasets.features.ClassLabel(
86
- names=[
87
- "O",
88
- "B-GENE",
89
- "I-GENE",
90
- ]
91
- )
92
- ),
93
- }
94
- ),
95
- supervised_keys=None,
96
- homepage=_HOMEPAGE,
97
- citation=_CITATION,
98
- )
99
-
100
- def _split_generators(self, dl_manager):
101
- """Returns SplitGenerators."""
102
- urls_to_download = {
103
- "train": f"{_URL}{_TRAINING_FILE}",
104
- "dev": f"{_URL}{_DEV_FILE}",
105
- "test": f"{_URL}{_TEST_FILE}",
106
- }
107
- downloaded_files = dl_manager.download_and_extract(urls_to_download)
108
-
109
- return [
110
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
111
- datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}),
112
- datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}),
113
- ]
114
-
115
- def _generate_examples(self, filepath):
116
- logger.info("⏳ Generating examples from = %s", filepath)
117
- with open(filepath, encoding="utf-8") as f:
118
- guid = 0
119
- tokens = []
120
- ner_tags = []
121
- for line in f:
122
- if line == "" or line == "\n":
123
- if tokens:
124
- yield guid, {
125
- "id": str(guid),
126
- "tokens": tokens,
127
- "ner_tags": ner_tags,
128
- }
129
- guid += 1
130
- tokens = []
131
- ner_tags = []
132
- else:
133
- # tokens are tab separated
134
- splits = line.split("\t")
135
- tokens.append(splits[0])
136
- ner_tags.append(splits[1].rstrip())
137
- # last example
138
- if tokens:
139
- yield guid, {
140
- "id": str(guid),
141
- "tokens": tokens,
142
- "ner_tags": ner_tags,
143
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
bc2gm_corpus/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3d1be32773cfca844ff5ceb98c127d7e2f9a46eec6beecf49aa65675a39cb760
3
+ size 545335
bc2gm_corpus/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b29a8a328108055b47208374a053af5eb07d235290b497c54d35a6ca6a0732f4
3
+ size 1338046
bc2gm_corpus/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0f31dd41c246c43adb1e80e31e8e0cbe37fe0c19bd0a6807b19f2541392ac290
3
+ size 271249