inoid commited on
Commit
219ab4d
1 Parent(s): b0f0299

Upload 2 files

Browse files
barr2/process_dataset.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from datasets import load_dataset
2
+ import os
3
+ import re
4
+
5
+ from pathlib import Path
6
+ from zipfile import ZipFile
7
+ import tarfile
8
+
9
+ FILE_PATH = "BARR2.tar.bz2"
10
+
11
+ path = Path(__file__).parent.absolute()
12
+
13
+ from urllib import request
14
+ URL = 'https://temu.bsc.es/BARR2/downloads/background_set.raw_text.tar.bz2'
15
+
16
+ FILE_ZIP = str(path) + os.sep + "BARR2.zip"
17
+ FILE_ZIP_EXTRAC = str(path) + os.sep + "BARR2"
18
+
19
+ if not os.path.exists( FILE_ZIP ):
20
+ response = request.urlretrieve(URL, FILE_ZIP)
21
+
22
+ # loading the temp.zip and creating a zip object
23
+ if os.path.exists( FILE_ZIP_EXTRAC ):
24
+ os.remove(FILE_ZIP_EXTRAC)
25
+ os.makedirs(FILE_ZIP_EXTRAC)
26
+
27
+ with tarfile.open(FILE_ZIP, 'r') as zObject:
28
+
29
+ # Extracting specific file in the zip
30
+ # into a specific location.
31
+ zObject.extractall( FILE_ZIP_EXTRAC)
32
+ zObject.close()
33
+
34
+ #Open Zip
35
+
36
+ # with open( str(path) + os.sep + 'example.txt', encoding='utf8') as file:
37
+ # """
38
+ # # Build a dictionary with ICD-O-3 associated with
39
+ # # healtcare problems
40
+ # """
41
+ # linesInFile = file.readlines()
42
+
43
+ # for index, iLine in enumerate(linesInFile):
44
+ # print([linesInFile[index]]) if len(linesInFile[index]) > 1 else print('**************') if linesInFile[index] == '\n' else print ('******* ERROR ********')
45
+
46
+
47
+ # if re.match('^Las dilataciones bronquiales',iLine):
48
+ # break
49
+
50
+
51
+ # code = listOfData[0]
52
+ # description = reduce(lambda a, b: a + " "+ b, listOfData[1:2], "")
53
+ # royalListOfCode[code.strip()] = description.strip()
barr2/using_dataset_hugginface.py ADDED
@@ -0,0 +1,181 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ """using_dataset_hugginface.ipynb
3
+
4
+ Automatically generated by Colaboratory.
5
+
6
+ Original file is located at
7
+ https://colab.research.google.com/drive/1soGxkZu4antYbYG23GioJ6zoSt_GhSNT
8
+ """
9
+
10
+ """**Hugginface loggin for push on Hub**"""
11
+ ###
12
+ #
13
+ # Used bibliografy:
14
+ # https://huggingface.co/learn/nlp-course/chapter5/5
15
+ #
16
+ ###
17
+
18
+ import os
19
+ import time
20
+ import math
21
+ from huggingface_hub import login
22
+ from datasets import load_dataset, concatenate_datasets
23
+ from functools import reduce
24
+ from pathlib import Path
25
+ import pandas as pd
26
+ import pathlib
27
+ # Load model directly
28
+ from transformers import AutoTokenizer, AutoModelForCausalLM
29
+
30
+ HF_TOKEN = ''
31
+ DATASET_TO_LOAD = 'bigbio/distemist'
32
+ DATASET_TO_UPDATE = 'somosnlp/spanish_medica_llm'
33
+ DATASET_SOURCE_ID = '11'
34
+ BASE_DIR = "BARR2" + os.sep + "txt"
35
+
36
+ #Loggin to Huggin Face
37
+ login(token = HF_TOKEN)
38
+
39
+ dataset_CODING = load_dataset(DATASET_TO_LOAD)
40
+ royalListOfCode = {}
41
+ issues_path = 'dataset'
42
+ tokenizer = AutoTokenizer.from_pretrained("DeepESP/gpt2-spanish-medium")
43
+
44
+ #Read current path
45
+ path = Path(__file__).parent.absolute()
46
+ MAIN_FILE_ADRESS = str(path) + os.sep + BASE_DIR
47
+ #print ( os.listdir(str(path) + os.sep + BASE_DIR))
48
+
49
+ files = [ str(path) + os.sep + BASE_DIR + os.sep + f for f in os.listdir(MAIN_FILE_ADRESS) if os.path.isfile(str(path) + os.sep + BASE_DIR + os.sep + f) and pathlib.Path(MAIN_FILE_ADRESS + os.sep + f).suffix == ".txt" ]
50
+
51
+ #print (files)
52
+ for iFile in files:
53
+ with open( iFile,encoding='utf8') as file:
54
+ linesInFile = file.readlines()
55
+ text = reduce(lambda a, b: a + " "+ b, linesInFile, "")
56
+
57
+ #print (dataset_CODING)
58
+
59
+ # with open( str(path) + os.sep + 'ICD-O-3_valid-codes.txt',encoding='utf8') as file:
60
+ # """
61
+ # # Build a dictionary with ICD-O-3 associated with
62
+ # # healtcare problems
63
+ # """
64
+ # linesInFile = file.readlines()
65
+ # for iLine in linesInFile:
66
+ # listOfData = iLine.split('\t')
67
+
68
+ # code = listOfData[0]
69
+ # description = reduce(lambda a, b: a + " "+ b, listOfData[1:2], "")
70
+ # royalListOfCode[code.strip()] = description.strip()
71
+
72
+
73
+ # def getCodeDescription(labels_of_type, royalListOfCode):
74
+ # """
75
+ # Search description associated with some code
76
+ # in royalListOfCode
77
+
78
+ # """
79
+ # classification = []
80
+
81
+ # for iValue in labels_of_type:
82
+ # if iValue in royalListOfCode.keys():
83
+ # classification.append(royalListOfCode[iValue])
84
+ # return classification
85
+
86
+
87
+ # # raw_text: Texto asociado al documento, pregunta, caso clínico u otro tipo de información.
88
+
89
+ # # topic: (puede ser healthcare_treatment, healthcare_diagnosis, tema, respuesta a pregunta, o estar vacío p.ej en el texto abierto)
90
+
91
+ # # speciality: (especialidad médica a la que se relaciona el raw_text p.ej: cardiología, cirugía, otros)
92
+
93
+ # # raw_text_type: (puede ser caso clínico, open_text, question)
94
+
95
+ # # topic_type: (puede ser medical_topic, medical_diagnostic,answer,natural_medicine_topic, other, o vacio)
96
+
97
+ # # source: Identificador de la fuente asociada al documento que aparece en el README y descripción del dataset.
98
+
99
+ # # country: Identificador del país de procedencia de la fuente (p.ej.; ch, es) usando el estándar ISO 3166-1 alfa-2 (Códigos de país de dos letras.).
100
+ cantemistDstDict = {
101
+ 'raw_text': '',
102
+ 'topic': '',
103
+ 'speciallity': '',
104
+ 'raw_text_type': 'clinic_case',
105
+ 'topic_type': '',
106
+ 'source': DATASET_SOURCE_ID,
107
+ 'country': 'es',
108
+ 'document_id': ''
109
+ }
110
+
111
+ totalOfTokens = 0
112
+ corpusToLoad = []
113
+ countCopySeveralDocument = 0
114
+ counteOriginalDocument = 0
115
+
116
+ #print (dataset_CODING['train'][5]['entities'])
117
+
118
+ for iFile in files:
119
+ with open( iFile,encoding='utf8') as file:
120
+ linesInFile = file.readlines()
121
+ text = reduce(lambda a, b: a + " "+ b, linesInFile, "")
122
+ #print ("Element in dataset")
123
+
124
+ #Find topic or diagnosti clasification about the text
125
+ counteOriginalDocument += 1
126
+
127
+ listOfTokens = tokenizer.tokenize(text)
128
+ currentSizeOfTokens = len(listOfTokens)
129
+ totalOfTokens += currentSizeOfTokens
130
+ newCorpusRow = cantemistDstDict.copy()
131
+
132
+
133
+ newCorpusRow['raw_text'] = text
134
+ newCorpusRow['document_id'] = str(counteOriginalDocument)
135
+ corpusToLoad.append(newCorpusRow)
136
+
137
+
138
+ df = pd.DataFrame.from_records(corpusToLoad)
139
+
140
+ if os.path.exists(f"{str(path)}/{issues_path}/spanish_medical_llms.jsonl"):
141
+ os.remove(f"{str(path)}/{issues_path}/spanish_medical_llms.jsonl")
142
+
143
+ df.to_json(f"{str(path)}/{issues_path}/spanish_medical_llms.jsonl", orient="records", lines=True)
144
+ print(
145
+ f"Downloaded all the issues for {DATASET_TO_LOAD}! Dataset stored at {issues_path}/spanish_medical_llms.jsonl"
146
+ )
147
+
148
+ print(' On dataset there are as document ', counteOriginalDocument)
149
+ print(' On dataset there are as copy document ', countCopySeveralDocument)
150
+ print(' On dataset there are as size of Tokens ', totalOfTokens)
151
+ file = Path(f"{str(path)}/{issues_path}/spanish_medical_llms.jsonl") # or Path('./doc.txt')
152
+ size = file.stat().st_size
153
+ print ('File size on Kilobytes (kB)', size >> 10) # 5242880 kilobytes (kB)
154
+ print ('File size on Megabytes (MB)', size >> 20 ) # 5120 megabytes (MB)
155
+ print ('File size on Gigabytes (GB)', size >> 30 ) # 5 gigabytes (GB)
156
+
157
+ #Once the issues are downloaded we can load them locally using our
158
+ local_spanish_dataset = load_dataset("json", data_files=f"{str(path)}/{issues_path}/spanish_medical_llms.jsonl", split="train")
159
+
160
+
161
+ ##Update local dataset with cloud dataset
162
+ try:
163
+ spanish_dataset = load_dataset(DATASET_TO_UPDATE, split="train")
164
+ print("=== Before ====")
165
+ print(spanish_dataset)
166
+ spanish_dataset = concatenate_datasets([spanish_dataset, local_spanish_dataset])
167
+ except Exception:
168
+ spanish_dataset = local_spanish_dataset
169
+
170
+ spanish_dataset.push_to_hub(DATASET_TO_UPDATE)
171
+
172
+ print("=== After ====")
173
+ print(spanish_dataset)
174
+
175
+ # Augmenting the dataset
176
+
177
+ #Importan if exist element on DATASET_TO_UPDATE we must to update element
178
+ # in list, and review if the are repeted elements
179
+
180
+
181
+