lcampillos commited on
Commit
116e53c
1 Parent(s): 7d7712e

Delete clinical_trials.py

Browse files
Files changed (1) hide show
  1. clinical_trials.py +0 -120
clinical_trials.py DELETED
@@ -1,120 +0,0 @@
1
- '''
2
- Procesar así los datos en el terminal:
3
-
4
- import clinical_trials
5
-
6
- from clinical_trials import ClinicalTrials
7
-
8
- train_json = ClinicalTrials._generate_examples('train.json','train.conll')
9
-
10
- x = json.dumps([item for item in train_json])
11
-
12
- outFile = open("train.json",'w',encoding="utf8")
13
- print(x,file=outFile)
14
- outFile.close()
15
-
16
- '''
17
-
18
-
19
- import datasets
20
-
21
-
22
- logger = datasets.logging.get_logger(__name__)
23
-
24
-
25
- _LICENSE = "Creative Commons Attribution 4.0 International"
26
-
27
- _VERSION = "1.1.0"
28
-
29
- _URL = "https://huggingface.co/datasets/lcampillos/CT-EBM-ES"
30
- _TRAINING_FILE = "train.conll"
31
- _DEV_FILE = "dev.conll"
32
- _TEST_FILE = "test.conll"
33
-
34
- class ClinicalTrialsConfig(datasets.BuilderConfig):
35
- """BuilderConfig for ClinicalTrials dataset."""
36
-
37
- def __init__(self, **kwargs):
38
- super(ClinicalTrialsConfig, self).__init__(**kwargs)
39
-
40
-
41
- class ClinicalTrials(datasets.GeneratorBasedBuilder):
42
- """ClinicalTrials dataset."""
43
-
44
- BUILDER_CONFIGS = [
45
- ClinicalTrialsConfig(
46
- name="ClinicalTrials",
47
- version=datasets.Version(_VERSION),
48
- description="ClinicalTrials dataset"),
49
- ]
50
-
51
- def _info(self):
52
- return datasets.DatasetInfo(
53
- features=datasets.Features(
54
- {
55
- "id": datasets.Value("string"),
56
- "tokens": datasets.Sequence(datasets.Value("string")),
57
- "ner_tags": datasets.Sequence(
58
- datasets.features.ClassLabel(
59
- names=[
60
- "O",
61
- "B-ANAT",
62
- "B-CHEM",
63
- "B-DISO",
64
- "B-PROC",
65
- "I-ANAT",
66
- "I-CHEM",
67
- "I-DISO",
68
- "I-PROC",
69
- ]
70
- )
71
- ),
72
- }
73
- ),
74
- supervised_keys=None,
75
- )
76
-
77
- def _split_generators(self, dl_manager):
78
- """Returns SplitGenerators."""
79
- urls_to_download = {
80
- "train": f"{_URL}{_TRAINING_FILE}",
81
- "dev": f"{_URL}{_DEV_FILE}",
82
- "test": f"{_URL}{_TEST_FILE}",
83
- }
84
- downloaded_files = dl_manager.download_and_extract(urls_to_download)
85
-
86
- return [
87
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
88
- datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}),
89
- datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}),
90
- ]
91
-
92
- def _generate_examples(self, filepath):
93
- logger.info("⏳ Generating examples from = %s", filepath)
94
- with open(filepath, encoding="utf-8") as f:
95
- guid = 0
96
- tokens = []
97
- pos_tags = []
98
- ner_tags = []
99
- for line in f:
100
- if line == "":
101
- if tokens:
102
- yield guid, {
103
- "id": str(guid),
104
- "tokens": tokens,
105
- "ner_tags": ner_tags,
106
- }
107
- guid += 1
108
- tokens = []
109
- ner_tags = []
110
- else:
111
- splits = line.split(" ")
112
- tokens.append(splits[0])
113
- ner_tags.append(splits[-1].rstrip())
114
- # last example
115
- yield guid, {
116
- "id": str(guid),
117
- "tokens": tokens,
118
- "ner_tags": ner_tags,
119
- }
120
-