|
import srsly |
|
import typer |
|
import warnings |
|
from pathlib import Path |
|
import spacy |
|
from spacy.tokens import DocBin |
|
|
|
def convert(lang: str, input_paths: list[Path], output_dir: Path, spans_key: str = "sc"): |
|
nlp = spacy.blank(lang) |
|
nlp.add_pipe("sentencizer") |
|
|
|
|
|
output_dir.mkdir(parents=True, exist_ok=True) |
|
|
|
total_sentences = 0 |
|
|
|
|
|
for input_path in input_paths: |
|
print(f"Processing file: {input_path}") |
|
doc_bin = DocBin() |
|
|
|
for annotation in srsly.read_jsonl(input_path): |
|
text = annotation["text"] |
|
doc = nlp(text) |
|
|
|
for sent in doc.sents: |
|
|
|
sent_doc = nlp.make_doc(sent.text) |
|
spans = [] |
|
for item in annotation["spans"]: |
|
|
|
start = item["start"] - sent.start_char |
|
end = item["end"] - sent.start_char |
|
label = item["label"] |
|
|
|
|
|
if start >= 0 and end <= len(sent.text): |
|
span = sent_doc.char_span(start, end, label=label, alignment_mode="contract") |
|
if span is None: |
|
msg = f"Skipping entity [{start}, {end}, {label}] in the following text because the character span '{sent.text[start:end]}' does not align with token boundaries." |
|
warnings.warn(msg) |
|
else: |
|
spans.append(span) |
|
|
|
|
|
if spans: |
|
sent_doc.spans[spans_key] = spans |
|
doc_bin.add(sent_doc) |
|
total_sentences += 1 |
|
|
|
|
|
output_file = output_dir / f"{input_path.stem}.spacy" |
|
doc_bin.to_disk(output_file) |
|
|
|
print(f"Total sentences with spans: {total_sentences}") |
|
|
|
if __name__ == "__main__": |
|
typer.run(convert) |