Update E3C.py
Browse files
E3C.py
CHANGED
@@ -174,31 +174,15 @@ class E3C(datasets.GeneratorBasedBuilder):
|
|
174 |
|
175 |
def get_parsed_data(self, filepath: str):
|
176 |
|
177 |
-
print("--- get_parsed_data ---")
|
178 |
-
|
179 |
-
print(filepath)
|
180 |
-
|
181 |
-
print("---")
|
182 |
-
|
183 |
-
print(list(os.walk(filepath)))
|
184 |
-
|
185 |
for root, _, files in os.walk(filepath):
|
186 |
|
187 |
-
print(files)
|
188 |
-
|
189 |
for file in files:
|
190 |
|
191 |
-
print(file)
|
192 |
-
|
193 |
with open(f"{root}/{file}") as soup_file:
|
194 |
|
195 |
-
print(soup_file)
|
196 |
-
|
197 |
soup = BeautifulSoup(soup_file, "xml")
|
198 |
text = soup.find("cas:Sofa").get("sofaString")
|
199 |
|
200 |
-
print(text)
|
201 |
-
|
202 |
yield {
|
203 |
"CLINENTITY": self.get_clinical_annotations(soup.find_all("custom:CLINENTITY"), text),
|
204 |
"EVENT": self.get_annotations(soup.find_all("custom:EVENT"), text),
|
@@ -210,25 +194,16 @@ class E3C(datasets.GeneratorBasedBuilder):
|
|
210 |
"TOKENS": self.get_annotations(soup.find_all("type4:Token"), text),
|
211 |
}
|
212 |
|
213 |
-
print("+++ get_parsed_data out +++")
|
214 |
-
|
215 |
def _generate_examples(self, filepath, split):
|
216 |
|
217 |
-
print(filepath)
|
218 |
-
print(split)
|
219 |
-
|
220 |
all_res = []
|
221 |
|
222 |
key = 0
|
223 |
|
224 |
parsed_content = self.get_parsed_data(filepath)
|
225 |
-
|
226 |
-
print(parsed_content)
|
227 |
|
228 |
for content in parsed_content:
|
229 |
|
230 |
-
print(content)
|
231 |
-
|
232 |
for sentence in content["SENTENCE"]:
|
233 |
|
234 |
tokens = [(
|
@@ -297,8 +272,6 @@ class E3C(datasets.GeneratorBasedBuilder):
|
|
297 |
})
|
298 |
|
299 |
key += 1
|
300 |
-
|
301 |
-
print("Length: ", len(all_res))
|
302 |
|
303 |
if self.config.name.find("clinical") != -1:
|
304 |
|
|
|
174 |
|
175 |
def get_parsed_data(self, filepath: str):
|
176 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
177 |
for root, _, files in os.walk(filepath):
|
178 |
|
|
|
|
|
179 |
for file in files:
|
180 |
|
|
|
|
|
181 |
with open(f"{root}/{file}") as soup_file:
|
182 |
|
|
|
|
|
183 |
soup = BeautifulSoup(soup_file, "xml")
|
184 |
text = soup.find("cas:Sofa").get("sofaString")
|
185 |
|
|
|
|
|
186 |
yield {
|
187 |
"CLINENTITY": self.get_clinical_annotations(soup.find_all("custom:CLINENTITY"), text),
|
188 |
"EVENT": self.get_annotations(soup.find_all("custom:EVENT"), text),
|
|
|
194 |
"TOKENS": self.get_annotations(soup.find_all("type4:Token"), text),
|
195 |
}
|
196 |
|
|
|
|
|
197 |
def _generate_examples(self, filepath, split):
|
198 |
|
|
|
|
|
|
|
199 |
all_res = []
|
200 |
|
201 |
key = 0
|
202 |
|
203 |
parsed_content = self.get_parsed_data(filepath)
|
|
|
|
|
204 |
|
205 |
for content in parsed_content:
|
206 |
|
|
|
|
|
207 |
for sentence in content["SENTENCE"]:
|
208 |
|
209 |
tokens = [(
|
|
|
272 |
})
|
273 |
|
274 |
key += 1
|
|
|
|
|
275 |
|
276 |
if self.config.name.find("clinical") != -1:
|
277 |
|