inoid commited on
Commit
6f4a453
1 Parent(s): cc9e37d

Upload 3 files

Browse files
cares/README.md ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ## Importan site to use
2
+
3
+ Blog with solutions
4
+ https://community.open-emr.org/t/icd10-spanish-version/8317/18
5
+
6
+ Control Page for Spanish ICD-10
7
+ https://www.eciemaps.sanidad.gob.es/browser/diagnosticos
8
+
9
+ https://ais.paho.org/classifications/Chapters/
10
+
11
+
12
+ Check Hugginface dataset https://huggingface.co/datasets/chizhikchi/CARES
13
+
14
+
15
+ Los codigos de la Base de Datos fueron tomados de
16
+
17
+ https://eciemaps.mscbs.gob.es/ecieMaps/documentation/documentation.html
18
+
19
+
20
+ ## Size of add new rows
21
+
22
+ - On dataset there are as document 966
23
+ - On dataset there are as copy document 389
24
+ - On dataset there are as size of Tokens 322353
cares/corpus_create.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import mysql.connector
2
+
3
+ '''
4
+ Bibliografy:
5
+ https://www.w3schools.com/python/python_mysql_getstarted.asp
6
+ https://www.w3schools.com/python/python_mysql_select.as
7
+
8
+ '''
9
+ mydb = mysql.connector.connect(
10
+ host="localhost",
11
+ user="root",
12
+ password="",
13
+ database="icd10_dx_hackatonnlp"
14
+ )
15
+
16
+ icd10CodeDict = {}
17
+ mycursor = mydb.cursor()
18
+ codeIcd10 = 'C72.0'
19
+ mycursor.execute(f"SELECT dx_code, long_desc FROM `icd10_dx_order_code` WHERE dx_code = '{codeIcd10}';")
20
+
21
+ myresult = mycursor.fetchall()
22
+
23
+ for x in myresult:
24
+ code, description = x
25
+ icd10CodeDict[code] = description
26
+
27
+ print(icd10CodeDict)
cares/using_dataset_hugginface.py ADDED
@@ -0,0 +1,188 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ """using_dataset_hugginface.ipynb
3
+
4
+ Automatically generated by Colaboratory.
5
+
6
+ Original file is located at
7
+ https://colab.research.google.com/drive/1soGxkZu4antYbYG23GioJ6zoSt_GhSNT
8
+ """
9
+
10
+ """**Hugginface loggin for push on Hub**"""
11
+ ###
12
+ #
13
+ # Used bibliografy:
14
+ # https://huggingface.co/learn/nlp-course/chapter5/5
15
+ #
16
+ ###
17
+
18
+ import os
19
+ import time
20
+ import math
21
+ from huggingface_hub import login
22
+ from datasets import load_dataset, concatenate_datasets
23
+ from functools import reduce
24
+ from pathlib import Path
25
+ import pandas as pd
26
+ import mysql.connector
27
+
28
+ # Load model directly
29
+ from transformers import AutoTokenizer, AutoModelForCausalLM
30
+
31
+ HF_TOKEN = ''
32
+ DATASET_TO_LOAD = 'chizhikchi/CARES'
33
+ DATASET_TO_UPDATE = 'somosnlp/spanish_medica_llm'
34
+
35
+ #Loggin to Huggin Face
36
+ login(token = HF_TOKEN)
37
+
38
+ dataset_CODING = load_dataset(DATASET_TO_LOAD)
39
+ dataset_CODING
40
+ royalListOfCode = {}
41
+ issues_path = 'dataset'
42
+ DATASET_SOURCE_ID = '5'
43
+
44
+ tokenizer = AutoTokenizer.from_pretrained("DeepESP/gpt2-spanish-medium")
45
+
46
+ #Read current path
47
+ path = Path(__file__).parent.absolute()
48
+
49
+ '''
50
+ Bibliografy:
51
+ https://www.w3schools.com/python/python_mysql_getstarted.asp
52
+ https://www.w3schools.com/python/python_mysql_select.as
53
+
54
+ '''
55
+ mydb = mysql.connector.connect(
56
+ host="localhost",
57
+ user="root",
58
+ password="",
59
+ database="icd10_dx_hackatonnlp"
60
+ )
61
+
62
+
63
+
64
+ def getCodeDescription(labels_of_type):
65
+ """
66
+ Search description associated with some code
67
+ in royalListOfCode
68
+
69
+ """
70
+ icd10CodeDict = {}
71
+ mycursor = mydb.cursor()
72
+ codeIcd10 = ''
73
+
74
+ for iValue in labels_of_type:
75
+ codeIcd10 = iValue
76
+
77
+ if codeIcd10.find('.') == -1:
78
+ codeIcd10 += '.0'
79
+
80
+ mycursor.execute(f"SELECT dx_code, long_desc FROM `icd10_dx_order_code` WHERE dx_code = '{codeIcd10}' LIMIT 1;")
81
+
82
+ myresult = mycursor.fetchall()
83
+
84
+ for x in myresult:
85
+ code, description = x
86
+ icd10CodeDict[code] = description
87
+
88
+ return icd10CodeDict
89
+
90
+
91
+ # raw_text: Texto asociado al documento, pregunta, caso clínico u otro tipo de información.
92
+
93
+ # topic: (puede ser healthcare_treatment, healthcare_diagnosis, tema, respuesta a pregunta, o estar vacío p.ej en el texto abierto)
94
+
95
+ # speciality: (especialidad médica a la que se relaciona el raw_text p.ej: cardiología, cirugía, otros)
96
+
97
+ # raw_text_type: (puede ser caso clínico, open_text, question)
98
+
99
+ # topic_type: (puede ser medical_topic, medical_diagnostic,answer,natural_medicine_topic, other, o vacio)
100
+
101
+ # source: Identificador de la fuente asociada al documento que aparece en el README y descripción del dataset.
102
+
103
+ # country: Identificador del país de procedencia de la fuente (p.ej.; ch, es) usando el estándar ISO 3166-1 alfa-2 (Códigos de país de dos letras.).
104
+ cantemistDstDict = {
105
+ 'raw_text': '',
106
+ 'topic': '',
107
+ 'speciallity': '',
108
+ 'raw_text_type': 'clinic_case',
109
+ 'topic_type': 'medical_diagnostic',
110
+ 'source': DATASET_SOURCE_ID,
111
+ 'country': 'es',
112
+ 'document_id': ''
113
+ }
114
+
115
+ totalOfTokens = 0
116
+ corpusToLoad = []
117
+ countCopySeveralDocument = 0
118
+ counteOriginalDocument = 0
119
+
120
+ for iDataset in dataset_CODING:
121
+ if iDataset == 'test':
122
+ for item in dataset_CODING[iDataset]:
123
+ #print ("Element in dataset")
124
+ idFile = str(item['iddoc'])
125
+ text = item['full_text']
126
+ labels_of_type = item['icd10']
127
+
128
+ #Find topic or diagnosti clasification about the text
129
+ diagnostyc_types = getCodeDescription( labels_of_type)
130
+ counteOriginalDocument += 1
131
+ classFileSize = len(diagnostyc_types)
132
+
133
+ #If there are more clasification about the file
134
+
135
+ if classFileSize > 1:
136
+ countCopySeveralDocument += classFileSize - 1
137
+
138
+ listOfTokens = tokenizer.tokenize(text)
139
+ currentSizeOfTokens = len(listOfTokens)
140
+ totalOfTokens += currentSizeOfTokens
141
+
142
+ for key, iTypes in diagnostyc_types.items():
143
+ #print(iTypes)
144
+ newCorpusRow = cantemistDstDict.copy()
145
+
146
+ #print('Current text has ', currentSizeOfTokens)
147
+ #print('Total of tokens is ', totalOfTokens)
148
+ newCorpusRow['raw_text'] = text
149
+ newCorpusRow['document_id'] = idFile
150
+ newCorpusRow['topic'] = iTypes
151
+ corpusToLoad.append(newCorpusRow)
152
+
153
+ df = pd.DataFrame.from_records(corpusToLoad)
154
+
155
+ if os.path.exists(f"{str(path)}/{issues_path}/spanish_medical_llms.jsonl"):
156
+ os.remove(f"{str(path)}/{issues_path}/spanish_medical_llms.jsonl")
157
+
158
+
159
+ df.to_json(f"{str(path)}/{issues_path}/spanish_medical_llms.jsonl", orient="records", lines=True)
160
+ print(
161
+ f"Downloaded all the issues for {DATASET_TO_LOAD}! Dataset stored at {issues_path}/spanish_medical_llms.jsonl"
162
+ )
163
+
164
+ print(' On dataset there are as document ', counteOriginalDocument)
165
+ print(' On dataset there are as copy document ', countCopySeveralDocument)
166
+ print(' On dataset there are as size of Tokens ', totalOfTokens)
167
+ file = Path(f"{str(path)}/{issues_path}/spanish_medical_llms.jsonl") # or Path('./doc.txt')
168
+ size = file.stat().st_size
169
+ print ('File size on Kilobytes (kB)', size >> 10) # 5242880 kilobytes (kB)
170
+ print ('File size on Megabytes (MB)', size >> 20 ) # 5120 megabytes (MB)
171
+ print ('File size on Gigabytes (GB)', size >> 30 ) # 5 gigabytes (GB)
172
+
173
+ #Once the issues are downloaded we can load them locally using our
174
+ ##Update local dataset with cloud dataset
175
+
176
+ local_spanish_dataset = load_dataset("json", data_files=f"{str(path)}/{issues_path}/spanish_medical_llms.jsonl", split="train")
177
+
178
+ print (' Local Dataset ==> ')
179
+ print(local_spanish_dataset)
180
+ try:
181
+ spanish_dataset = load_dataset(DATASET_TO_UPDATE, split="train")
182
+ spanish_dataset = concatenate_datasets([spanish_dataset, local_spanish_dataset])
183
+ except Exception:
184
+ spanish_dataset = local_spanish_dataset
185
+
186
+ spanish_dataset.push_to_hub(DATASET_TO_UPDATE)
187
+
188
+ print(spanish_dataset)