inoid commited on
Commit
75663d4
1 Parent(s): a3effaa

Upload using_dataset_hugginface.py

Browse files
Files changed (1) hide show
  1. distemist/using_dataset_hugginface.py +173 -0
distemist/using_dataset_hugginface.py ADDED
@@ -0,0 +1,173 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ """using_dataset_hugginface.ipynb
3
+
4
+ Automatically generated by Colaboratory.
5
+
6
+ Original file is located at
7
+ https://colab.research.google.com/drive/1soGxkZu4antYbYG23GioJ6zoSt_GhSNT
8
+ """
9
+
10
+ """**Hugginface loggin for push on Hub**"""
11
+ ###
12
+ #
13
+ # Used bibliografy:
14
+ # https://huggingface.co/learn/nlp-course/chapter5/5
15
+ #
16
+ ###
17
+
18
+ import os
19
+ import time
20
+ import math
21
+ from huggingface_hub import login
22
+ from datasets import load_dataset, concatenate_datasets
23
+ from functools import reduce
24
+ from pathlib import Path
25
+ import pandas as pd
26
+
27
+ # Load model directly
28
+ from transformers import AutoTokenizer, AutoModelForCausalLM
29
+
30
+ HF_TOKEN = ''
31
+ DATASET_TO_LOAD = 'bigbio/distemist'
32
+ DATASET_TO_UPDATE = 'somosnlp/spanish_medica_llm'
33
+ DATASET_SOURCE_ID = '9'
34
+
35
+ #Loggin to Huggin Face
36
+ login(token = HF_TOKEN)
37
+
38
+ dataset_CODING = load_dataset(DATASET_TO_LOAD)
39
+ royalListOfCode = {}
40
+ issues_path = 'dataset'
41
+ tokenizer = AutoTokenizer.from_pretrained("DeepESP/gpt2-spanish-medium")
42
+
43
+ #Read current path
44
+ path = Path(__file__).parent.absolute()
45
+
46
+ #print (dataset_CODING)
47
+
48
+ # with open( str(path) + os.sep + 'ICD-O-3_valid-codes.txt',encoding='utf8') as file:
49
+ # """
50
+ # # Build a dictionary with ICD-O-3 associated with
51
+ # # healtcare problems
52
+ # """
53
+ # linesInFile = file.readlines()
54
+ # for iLine in linesInFile:
55
+ # listOfData = iLine.split('\t')
56
+
57
+ # code = listOfData[0]
58
+ # description = reduce(lambda a, b: a + " "+ b, listOfData[1:2], "")
59
+ # royalListOfCode[code.strip()] = description.strip()
60
+
61
+
62
+ # def getCodeDescription(labels_of_type, royalListOfCode):
63
+ # """
64
+ # Search description associated with some code
65
+ # in royalListOfCode
66
+
67
+ # """
68
+ # classification = []
69
+
70
+ # for iValue in labels_of_type:
71
+ # if iValue in royalListOfCode.keys():
72
+ # classification.append(royalListOfCode[iValue])
73
+ # return classification
74
+
75
+
76
+ # # raw_text: Texto asociado al documento, pregunta, caso clínico u otro tipo de información.
77
+
78
+ # # topic: (puede ser healthcare_treatment, healthcare_diagnosis, tema, respuesta a pregunta, o estar vacío p.ej en el texto abierto)
79
+
80
+ # # speciality: (especialidad médica a la que se relaciona el raw_text p.ej: cardiología, cirugía, otros)
81
+
82
+ # # raw_text_type: (puede ser caso clínico, open_text, question)
83
+
84
+ # # topic_type: (puede ser medical_topic, medical_diagnostic,answer,natural_medicine_topic, other, o vacio)
85
+
86
+ # # source: Identificador de la fuente asociada al documento que aparece en el README y descripción del dataset.
87
+
88
+ # # country: Identificador del país de procedencia de la fuente (p.ej.; ch, es) usando el estándar ISO 3166-1 alfa-2 (Códigos de país de dos letras.).
89
+ cantemistDstDict = {
90
+ 'raw_text': '',
91
+ 'topic': '',
92
+ 'speciallity': '',
93
+ 'raw_text_type': 'clinic_case',
94
+ 'topic_type': '',
95
+ 'source': DATASET_SOURCE_ID,
96
+ 'country': 'es',
97
+ 'document_id': ''
98
+ }
99
+
100
+ totalOfTokens = 0
101
+ corpusToLoad = []
102
+ countCopySeveralDocument = 0
103
+ counteOriginalDocument = 0
104
+
105
+ #print (dataset_CODING['train'][5]['entities'])
106
+
107
+ # for item in dataset_CODING['train']:
108
+ # for passage in item['passages']:
109
+ # print ("Keys " + str( passage.keys()))
110
+ # print("Clinical case type " + str(passage['text']))
111
+
112
+ for iDataset in dataset_CODING:
113
+ for item in dataset_CODING[iDataset]:
114
+ for passageItem in item['passages']:
115
+ #print ("Element in dataset")
116
+ idFile = passageItem['id'] + '_' + str(iDataset)
117
+ text = passageItem['text'][0]
118
+
119
+ #Find topic or diagnosti clasification about the text
120
+ counteOriginalDocument += 1
121
+
122
+ listOfTokens = tokenizer.tokenize(text)
123
+ currentSizeOfTokens = len(listOfTokens)
124
+ totalOfTokens += currentSizeOfTokens
125
+ newCorpusRow = cantemistDstDict.copy()
126
+
127
+ #print('Current text has ', currentSizeOfTokens)
128
+ #print('Total of tokens is ', totalOfTokens)
129
+ newCorpusRow['raw_text'] = text
130
+ newCorpusRow['document_id'] = str(idFile)
131
+ corpusToLoad.append(newCorpusRow)
132
+
133
+
134
+ df = pd.DataFrame.from_records(corpusToLoad)
135
+
136
+ if os.path.exists(f"{str(path)}/{issues_path}/spanish_medical_llms.jsonl"):
137
+ os.remove(f"{str(path)}/{issues_path}/spanish_medical_llms.jsonl")
138
+
139
+ df.to_json(f"{str(path)}/{issues_path}/spanish_medical_llms.jsonl", orient="records", lines=True)
140
+ print(
141
+ f"Downloaded all the issues for {DATASET_TO_LOAD}! Dataset stored at {issues_path}/spanish_medical_llms.jsonl"
142
+ )
143
+
144
+ print(' On dataset there are as document ', counteOriginalDocument)
145
+ print(' On dataset there are as copy document ', countCopySeveralDocument)
146
+ print(' On dataset there are as size of Tokens ', totalOfTokens)
147
+ file = Path(f"{str(path)}/{issues_path}/spanish_medical_llms.jsonl") # or Path('./doc.txt')
148
+ size = file.stat().st_size
149
+ print ('File size on Kilobytes (kB)', size >> 10) # 5242880 kilobytes (kB)
150
+ print ('File size on Megabytes (MB)', size >> 20 ) # 5120 megabytes (MB)
151
+ print ('File size on Gigabytes (GB)', size >> 30 ) # 5 gigabytes (GB)
152
+
153
+ #Once the issues are downloaded we can load them locally using our
154
+ local_spanish_dataset = load_dataset("json", data_files=f"{str(path)}/{issues_path}/spanish_medical_llms.jsonl", split="train")
155
+
156
+ ##Update local dataset with cloud dataset
157
+ try:
158
+ spanish_dataset = load_dataset(DATASET_TO_UPDATE, split="train")
159
+ spanish_dataset = concatenate_datasets([spanish_dataset, local_spanish_dataset])
160
+ except Exception:
161
+ spanish_dataset = local_spanish_dataset
162
+
163
+ spanish_dataset.push_to_hub(DATASET_TO_UPDATE)
164
+
165
+ print(spanish_dataset)
166
+
167
+ # Augmenting the dataset
168
+
169
+ #Importan if exist element on DATASET_TO_UPDATE we must to update element
170
+ # in list, and review if the are repeted elements
171
+
172
+
173
+