File size: 5,543 Bytes
fb6fb24 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 |
# -*- coding: utf-8 -*-
"""using_dataset_hugginface.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1soGxkZu4antYbYG23GioJ6zoSt_GhSNT
"""
"""**Hugginface loggin for push on Hub**"""
###
#
# Used bibliografy:
# https://huggingface.co/learn/nlp-course/chapter5/5
#
###
import os
import time
import math
from huggingface_hub import login
from datasets import load_dataset, concatenate_datasets
from functools import reduce
from pathlib import Path
import pandas as pd
import mysql.connector
# Load model directly
from transformers import AutoTokenizer, AutoModelForCausalLM
HF_TOKEN = ''
DATASET_TO_LOAD = 'PlanTL-GOB-ES/pharmaconer'
DATASET_TO_UPDATE = 'somosnlp/spanish_medica_llm'
#Loggin to Huggin Face
login(token = HF_TOKEN)
dataset_CODING = load_dataset(DATASET_TO_LOAD)
dataset_CODING
royalListOfCode = {}
issues_path = 'dataset'
tokenizer = AutoTokenizer.from_pretrained("DeepESP/gpt2-spanish-medium")
DATASET_SOURCE_ID = '3'
#Read current path
path = Path(__file__).parent.absolute()
'''
Bibliografy:
https://www.w3schools.com/python/python_mysql_getstarted.asp
https://www.w3schools.com/python/python_mysql_select.as
'''
mydb = mysql.connector.connect(
host="localhost",
user="root",
password="",
database="icd10_dx_hackatonnlp"
)
def getCodeDescription(labels_of_type):
"""
Search description associated with some code
in royalListOfCode
"""
icd10CodeDict = {}
mycursor = mydb.cursor()
codeIcd10 = ''
for iValue in labels_of_type:
codeIcd10 = iValue
if codeIcd10.find('.') == -1:
codeIcd10 += '.0'
mycursor.execute(f"SELECT dx_code, long_desc FROM `icd10_dx_order_code` WHERE dx_code = '{codeIcd10}' LIMIT 1;")
myresult = mycursor.fetchall()
for x in myresult:
code, description = x
icd10CodeDict[code] = description
return icd10CodeDict
# raw_text: Texto asociado al documento, pregunta, caso clínico u otro tipo de información.
# topic: (puede ser healthcare_treatment, healthcare_diagnosis, tema, respuesta a pregunta, o estar vacío p.ej en el texto abierto)
# speciality: (especialidad médica a la que se relaciona el raw_text p.ej: cardiología, cirugía, otros)
# raw_text_type: (puede ser caso clínico, open_text, question)
# topic_type: (puede ser medical_topic, medical_diagnostic,answer,natural_medicine_topic, other, o vacio)
# source: Identificador de la fuente asociada al documento que aparece en el README y descripción del dataset.
# country: Identificador del país de procedencia de la fuente (p.ej.; ch, es) usando el estándar ISO 3166-1 alfa-2 (Códigos de país de dos letras.).
cantemistDstDict = {
'raw_text': '',
'topic': '',
'speciallity': '',
'raw_text_type': 'clinic_case',
'topic_type': '',
'source': DATASET_SOURCE_ID,
'country': 'es',
'document_id': ''
}
totalOfTokens = 0
corpusToLoad = []
countCopySeveralDocument = 0
counteOriginalDocument = 0
for iDataset in dataset_CODING:
if iDataset == 'train':
for item in dataset_CODING[iDataset]:
#print ("Element in dataset")
idFile = str(item['id'])
text = '' if len(item['tokens']) == 0 else reduce(lambda a, b: a + " "+ b, item['tokens'], "")
#Find topic or diagnosti clasification about the text
counteOriginalDocument += 1
newCorpusRow = cantemistDstDict.copy()
#print('Current text has ', currentSizeOfTokens)
#print('Total of tokens is ', totalOfTokens)
listOfTokens = tokenizer.tokenize(text)
currentSizeOfTokens = len(listOfTokens)
totalOfTokens += currentSizeOfTokens
newCorpusRow['raw_text'] = text
newCorpusRow['document_id'] = idFile
corpusToLoad.append(newCorpusRow)
df = pd.DataFrame.from_records(corpusToLoad)
if os.path.exists(f"{str(path)}/{issues_path}/spanish_medical_llms.jsonl"):
os.remove(f"{str(path)}/{issues_path}/spanish_medical_llms.jsonl")
df.to_json(f"{str(path)}/{issues_path}/spanish_medical_llms.jsonl", orient="records", lines=True)
print(
f"Downloaded all the issues for {DATASET_TO_LOAD}! Dataset stored at {issues_path}/spanish_medical_llms.jsonl"
)
print(' On dataset there are as document ', counteOriginalDocument)
print(' On dataset there are as copy document ', countCopySeveralDocument)
print(' On dataset there are as size of Tokens ', totalOfTokens)
file = Path(f"{str(path)}/{issues_path}/spanish_medical_llms.jsonl") # or Path('./doc.txt')
size = file.stat().st_size
print ('File size on Kilobytes (kB)', size >> 10) # 5242880 kilobytes (kB)
print ('File size on Megabytes (MB)', size >> 20 ) # 5120 megabytes (MB)
print ('File size on Gigabytes (GB)', size >> 30 ) # 5 gigabytes (GB)
##Update local dataset with cloud dataset
local_spanish_dataset = load_dataset("json", data_files=f"{str(path)}/{issues_path}/spanish_medical_llms.jsonl", split="train")
print (' Local Dataset ==> ')
print(local_spanish_dataset)
try:
spanish_dataset = load_dataset(DATASET_TO_UPDATE, split="train")
spanish_dataset = concatenate_datasets([spanish_dataset, local_spanish_dataset])
except Exception:
spanish_dataset = local_spanish_dataset
spanish_dataset.push_to_hub(DATASET_TO_UPDATE)
print(spanish_dataset)
# Augmenting the dataset
#Importan if exist element on DATASET_TO_UPDATE we must to update element
# in list, and review if the are repeted elements
#spanish_dataset.push_to_hub(DATASET_TO_UPDATE)
|