|
|
|
"""using_dataset_hugginface.ipynb |
|
|
|
Automatically generated by Colaboratory. |
|
|
|
Original file is located at |
|
https://colab.research.google.com/drive/1soGxkZu4antYbYG23GioJ6zoSt_GhSNT |
|
""" |
|
|
|
"""**Hugginface loggin for push on Hub**""" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import os |
|
import time |
|
import math |
|
from huggingface_hub import login |
|
from datasets import load_dataset, concatenate_datasets |
|
from functools import reduce |
|
from pathlib import Path |
|
import pandas as pd |
|
|
|
|
|
from transformers import AutoTokenizer, AutoModelForCausalLM |
|
|
|
HF_TOKEN = '' |
|
DATASET_TO_LOAD = 'bigbio/cantemist' |
|
DATASET_TO_UPDATE = 'somosnlp/spanish_medica_llm' |
|
DATASET_SOURCE_ID = '1' |
|
|
|
|
|
login(token = HF_TOKEN) |
|
|
|
dataset_CODING = load_dataset(DATASET_TO_LOAD) |
|
royalListOfCode = {} |
|
issues_path = 'dataset' |
|
tokenizer = AutoTokenizer.from_pretrained("DeepESP/gpt2-spanish-medium") |
|
|
|
|
|
path = Path(__file__).parent.absolute() |
|
|
|
with open( str(path) + os.sep + 'ICD-O-3_valid-codes.txt',encoding='utf8') as file: |
|
""" |
|
# Build a dictionary with ICD-O-3 associated with |
|
# healtcare problems |
|
""" |
|
linesInFile = file.readlines() |
|
for iLine in linesInFile: |
|
listOfData = iLine.split('\t') |
|
|
|
code = listOfData[0] |
|
description = reduce(lambda a, b: a + " "+ b, listOfData[1:2], "") |
|
royalListOfCode[code.strip()] = description.strip() |
|
|
|
|
|
def getCodeDescription(labels_of_type, royalListOfCode): |
|
""" |
|
Search description associated with some code |
|
in royalListOfCode |
|
|
|
""" |
|
classification = [] |
|
|
|
for iValue in labels_of_type: |
|
if iValue in royalListOfCode.keys(): |
|
classification.append(royalListOfCode[iValue]) |
|
return classification |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
cantemistDstDict = { |
|
'raw_text': '', |
|
'topic': '', |
|
'speciallity': '', |
|
'raw_text_type': 'clinic_case', |
|
'topic_type': 'medical_diagnostic', |
|
'source': DATASET_SOURCE_ID, |
|
'country': 'es', |
|
'document_id': '' |
|
} |
|
|
|
totalOfTokens = 0 |
|
corpusToLoad = [] |
|
countCopySeveralDocument = 0 |
|
counteOriginalDocument = 0 |
|
|
|
for iDataset in dataset_CODING: |
|
if iDataset == 'test': |
|
for item in dataset_CODING[iDataset]: |
|
|
|
idFile = item['id'] |
|
text = item['text'] |
|
list_of_type = item['text_bound_annotations'] |
|
labels_of_type = item['labels'] |
|
|
|
|
|
diagnostyc_types = getCodeDescription( labels_of_type, royalListOfCode) |
|
counteOriginalDocument += 1 |
|
classFileSize = len(diagnostyc_types) |
|
|
|
|
|
|
|
if classFileSize > 1: |
|
countCopySeveralDocument += classFileSize - 1 |
|
|
|
listOfTokens = tokenizer.tokenize(text) |
|
currentSizeOfTokens = len(listOfTokens) |
|
totalOfTokens += currentSizeOfTokens |
|
|
|
for iTypes in diagnostyc_types: |
|
|
|
newCorpusRow = cantemistDstDict.copy() |
|
|
|
|
|
|
|
newCorpusRow['raw_text'] = text |
|
newCorpusRow['document_id'] = str(idFile) |
|
newCorpusRow['topic'] = iTypes |
|
corpusToLoad.append(newCorpusRow) |
|
|
|
df = pd.DataFrame.from_records(corpusToLoad) |
|
|
|
if os.path.exists(f"{str(path)}/{issues_path}/spanish_medical_llms.jsonl"): |
|
os.remove(f"{str(path)}/{issues_path}/spanish_medical_llms.jsonl") |
|
|
|
df.to_json(f"{str(path)}/{issues_path}/spanish_medical_llms.jsonl", orient="records", lines=True) |
|
print( |
|
f"Downloaded all the issues for {DATASET_TO_LOAD}! Dataset stored at {issues_path}/spanish_medical_llms.jsonl" |
|
) |
|
|
|
print(' On dataset there are as document ', counteOriginalDocument) |
|
print(' On dataset there are as copy document ', countCopySeveralDocument) |
|
print(' On dataset there are as size of Tokens ', totalOfTokens) |
|
file = Path(f"{str(path)}/{issues_path}/spanish_medical_llms.jsonl") |
|
size = file.stat().st_size |
|
print ('File size on Kilobytes (kB)', size >> 10) |
|
print ('File size on Megabytes (MB)', size >> 20 ) |
|
print ('File size on Gigabytes (GB)', size >> 30 ) |
|
|
|
|
|
local_spanish_dataset = load_dataset("json", data_files=f"{str(path)}/{issues_path}/spanish_medical_llms.jsonl", split="train") |
|
|
|
|
|
try: |
|
spanish_dataset = load_dataset(DATASET_TO_UPDATE, split="train") |
|
spanish_dataset = concatenate_datasets([spanish_dataset, local_spanish_dataset]) |
|
except Exception: |
|
spanish_dataset = local_spanish_dataset |
|
|
|
spanish_dataset.push_to_hub(DATASET_TO_UPDATE) |
|
|
|
print(spanish_dataset) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|