|
import spacy |
|
import json |
|
import requests |
|
import pandas as pd |
|
|
|
DATASET_URL = "https://raw.githubusercontent.com/liafacom/faquad/6ad978f20672bb41625b3b71fbe4a88b893d0a86/data/dataset.json" |
|
|
|
class SentenceModel(object): |
|
def __init__(self, model_name): |
|
self.nlp = spacy.load(model_name) |
|
|
|
def parse(self, text): |
|
with self.nlp.select_pipes(enable=['tok2vec', "parser", "senter"]): |
|
doc = self.nlp(text) |
|
sentences = [ (sentence.start_char, sentence.end_char) for sentence in doc.sents ] |
|
return sentences |
|
|
|
def context_generator(url): |
|
response = requests.get(url) |
|
data = json.loads(response.text)["data"] |
|
for idx, row in enumerate(data): |
|
for paragraph_idx, paragraph_row in enumerate(row["paragraphs"]): |
|
context = paragraph_row["context"] |
|
yield idx, paragraph_idx, context |
|
|
|
def define_split(document_index): |
|
if document_index % 5 == 0: |
|
return "test" |
|
elif document_index % 5 == 1: |
|
return "validation" |
|
else: |
|
return "train" |
|
|
|
def main(): |
|
df = [] |
|
model = SentenceModel("pt_core_news_sm") |
|
for idx, paragraph_idx, context in context_generator(DATASET_URL): |
|
for start_char, end_char in model.parse(context): |
|
row = { |
|
"document_index": idx, |
|
"paragraph_index": paragraph_idx, |
|
"sentence_start_char": start_char, |
|
"sentence_end_char": end_char, |
|
"split": define_split(idx) |
|
} |
|
df.append(row) |
|
df = pd.DataFrame(df) |
|
df.to_csv("spans.csv", index=False) |
|
|
|
if __name__ == "__main__": |
|
main() |