|
|
|
"""using_dataset_hugginface.ipynb |
|
|
|
Automatically generated by Colaboratory. |
|
|
|
Original file is located at |
|
https://colab.research.google.com/drive/1soGxkZu4antYbYG23GioJ6zoSt_GhSNT |
|
""" |
|
|
|
"""**Hugginface loggin for push on Hub**""" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import os |
|
import time |
|
import math |
|
from huggingface_hub import login |
|
from datasets import load_dataset, concatenate_datasets |
|
from functools import reduce |
|
from pathlib import Path |
|
import pandas as pd |
|
import numpy as np |
|
|
|
|
|
|
|
from transformers import AutoTokenizer |
|
|
|
HF_TOKEN = '' |
|
DATASET_TO_LOAD = 'spanish_health_output.json' |
|
DATASET_TO_UPDATE = 'somosnlp/spanish_medica_llm' |
|
BAD_CHAIN = [ |
|
'es como usted puede verificarlo', |
|
'Un sitio oficial del Gobierno de Estados Unidos', |
|
'lo en sitios web oficiales y seguros.', |
|
'forma segura a un sitio web .gov. Comparta informaci', |
|
'Gobierno de Estados Unidos.', |
|
'pertenece a una organizaci', |
|
'(\r\n \n ) o ', |
|
'Un sitio\r\n' |
|
] |
|
|
|
login(token = HF_TOKEN) |
|
|
|
|
|
|
|
royalListOfCode = {} |
|
issues_path = 'dataset' |
|
tokenizer = AutoTokenizer.from_pretrained("DeepESP/gpt2-spanish-medium") |
|
DATASET_SOURCE_ID = '2' |
|
|
|
path = Path(__file__).parent.absolute() |
|
|
|
dataset_CODING = pd.read_json(str(path) + os.sep + DATASET_TO_LOAD, encoding="utf8") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
cantemistDstDict = { |
|
'raw_text': '', |
|
'topic': '', |
|
'speciallity': '', |
|
'raw_text_type': 'open_text', |
|
'topic_type': 'other', |
|
'source': DATASET_SOURCE_ID, |
|
'country': 'es', |
|
'document_id': '' |
|
} |
|
|
|
def getExtraTexInformation(item, data_top_columname): |
|
optionalTag = ["Healthtopics Name", "titles", "subtitles", "paragraphs"] |
|
text = "" |
|
|
|
for key in data_top_columname: |
|
if key not in optionalTag: |
|
if not np.isnan(item[key]) and len(item[key]) > 1: |
|
text += str(item[key]) + '\n' |
|
|
|
return text |
|
|
|
totalOfTokens = 0 |
|
corpusToLoad = [] |
|
countCopySeveralDocument = 0 |
|
counteOriginalDocument = 0 |
|
data_top_columname = dataset_CODING.head() |
|
|
|
def verifyRepetelyChain(paragraph): |
|
return '' if len([ x for x in BAD_CHAIN if paragraph.find(x) != -1]) > 0 else paragraph |
|
|
|
|
|
for index, item in dataset_CODING.iterrows(): |
|
|
|
if len(item['paragraphs']) > 1: |
|
text = reduce(lambda a, b: verifyRepetelyChain(a) + "\n "+ verifyRepetelyChain(b), item['paragraphs'], "") |
|
else: |
|
text = getExtraTexInformation(item, data_top_columname) |
|
|
|
|
|
counteOriginalDocument += 1 |
|
newCorpusRow = cantemistDstDict.copy() |
|
|
|
|
|
|
|
|
|
listOfTokens = [] |
|
try: |
|
listOfTokens = tokenizer.tokenize(text) |
|
except Exception: |
|
raise Exception('Error') |
|
|
|
currentSizeOfTokens = len(listOfTokens) |
|
totalOfTokens += currentSizeOfTokens |
|
|
|
newCorpusRow['topic'] = item['Healthtopics Name'] if item['Healthtopics Name'] else reduce(lambda a, b: a + "\n "+ b, item['titles'], "") |
|
newCorpusRow['raw_text'] = text |
|
idFile = counteOriginalDocument |
|
newCorpusRow['document_id'] = str(idFile) |
|
corpusToLoad.append(newCorpusRow) |
|
|
|
|
|
df = pd.DataFrame.from_records(corpusToLoad) |
|
|
|
if os.path.exists(f"{str(path)}/{issues_path}/spanish_medical_llms.jsonl"): |
|
os.remove(f"{str(path)}/{issues_path}/spanish_medical_llms.jsonl") |
|
|
|
df.to_json(f"{str(path)}/{issues_path}/spanish_medical_llms.jsonl", orient="records", lines=True) |
|
print( |
|
f"Downloaded all the issues for {DATASET_TO_LOAD}! Dataset stored at {issues_path}/spanish_medical_llms.jsonl" |
|
) |
|
|
|
print(' On dataset there are as document ', counteOriginalDocument) |
|
print(' On dataset there are as copy document ', countCopySeveralDocument) |
|
print(' On dataset there are as size of Tokens ', totalOfTokens) |
|
file = Path(f"{str(path)}/{issues_path}/spanish_medical_llms.jsonl") |
|
size = file.stat().st_size |
|
print ('File size on Kilobytes (kB)', size >> 10) |
|
print ('File size on Megabytes (MB)', size >> 20 ) |
|
print ('File size on Gigabytes (GB)', size >> 30 ) |
|
|
|
|
|
local_spanish_dataset = load_dataset("json", data_files=f"{str(path)}/{issues_path}/spanish_medical_llms.jsonl", split="train") |
|
|
|
|
|
try: |
|
spanish_dataset = load_dataset(DATASET_TO_UPDATE, split="train") |
|
new_spanish_dataset = concatenate_datasets([spanish_dataset, local_spanish_dataset]) |
|
except Exception: |
|
print ('<== Exception ==> ') |
|
raise Exception |
|
|
|
|
|
new_spanish_dataset.push_to_hub(DATASET_TO_UPDATE) |
|
|
|
print(new_spanish_dataset) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|