lcampillos commited on
Commit
0204cf5
1 Parent(s): 89a1b45

Added python conversion script

Browse files
Files changed (1) hide show
  1. clinical_trials.py +102 -0
clinical_trials.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import datasets
2
+
3
+
4
+ logger = datasets.logging.get_logger(__name__)
5
+
6
+
7
+ _LICENSE = "Creative Commons Attribution 4.0 International"
8
+
9
+ _VERSION = "1.1.0"
10
+
11
+ _URL = "https://huggingface.co/datasets/plncmm/clinical_trials/resolve/main/"
12
+ _TRAINING_FILE = "train.conll"
13
+ _DEV_FILE = "dev.conll"
14
+ _TEST_FILE = "test.conll"
15
+
16
+ class ClinicalTrialsConfig(datasets.BuilderConfig):
17
+ """BuilderConfig for ClinicalTrials dataset."""
18
+
19
+ def __init__(self, **kwargs):
20
+ super(ClinicalTrialsConfig, self).__init__(**kwargs)
21
+
22
+
23
+ class ClinicalTrials(datasets.GeneratorBasedBuilder):
24
+ """ClinicalTrials dataset."""
25
+
26
+ BUILDER_CONFIGS = [
27
+ ClinicalTrialsConfig(
28
+ name="ClinicalTrials",
29
+ version=datasets.Version(_VERSION),
30
+ description="ClinicalTrials dataset"),
31
+ ]
32
+
33
+ def _info(self):
34
+ return datasets.DatasetInfo(
35
+ features=datasets.Features(
36
+ {
37
+ "id": datasets.Value("string"),
38
+ "tokens": datasets.Sequence(datasets.Value("string")),
39
+ "ner_tags": datasets.Sequence(
40
+ datasets.features.ClassLabel(
41
+ names=[
42
+ "O",
43
+ "B-ANAT",
44
+ "B-CHEM",
45
+ "B-DISO",
46
+ "B-PROC",
47
+ "I-ANAT",
48
+ "I-CHEM",
49
+ "I-DISO",
50
+ "I-PROC",
51
+ ]
52
+ )
53
+ ),
54
+ }
55
+ ),
56
+ supervised_keys=None,
57
+ )
58
+
59
+ def _split_generators(self, dl_manager):
60
+ """Returns SplitGenerators."""
61
+ urls_to_download = {
62
+ "train": f"{_URL}{_TRAINING_FILE}",
63
+ "dev": f"{_URL}{_DEV_FILE}",
64
+ "test": f"{_URL}{_TEST_FILE}",
65
+ }
66
+ downloaded_files = dl_manager.download_and_extract(urls_to_download)
67
+
68
+ return [
69
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
70
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}),
71
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}),
72
+ ]
73
+
74
+ def _generate_examples(self, filepath):
75
+ logger.info("⏳ Generating examples from = %s", filepath)
76
+ with open(filepath, encoding="utf-8") as f:
77
+ guid = 0
78
+ tokens = []
79
+ pos_tags = []
80
+ ner_tags = []
81
+ for line in f:
82
+ if line == "":
83
+ if tokens:
84
+ yield guid, {
85
+ "id": str(guid),
86
+ "tokens": tokens,
87
+ "ner_tags": ner_tags,
88
+ }
89
+ guid += 1
90
+ tokens = []
91
+ ner_tags = []
92
+ else:
93
+ splits = line.split(" ")
94
+ tokens.append(splits[0])
95
+ ner_tags.append(splits[-1].rstrip())
96
+ # last example
97
+ yield guid, {
98
+ "id": str(guid),
99
+ "tokens": tokens,
100
+ "ner_tags": ner_tags,
101
+ }
102
+