spacy-project / scripts /convert_sents.py
wjbmattingly's picture
init
5472d34
import srsly
import typer
import warnings
from pathlib import Path
import spacy
from spacy.tokens import DocBin
def convert(lang: str, input_paths: list[Path], output_dir: Path, spans_key: str = "sc"):
nlp = spacy.blank(lang)
nlp.add_pipe("sentencizer")
# Ensure output directory exists
output_dir.mkdir(parents=True, exist_ok=True)
total_sentences = 0
# Process each input file
for input_path in input_paths:
print(f"Processing file: {input_path}")
doc_bin = DocBin()
for annotation in srsly.read_jsonl(input_path):
text = annotation["text"]
doc = nlp(text) # Process the document to split into sentences
for sent in doc.sents:
# Create a new Doc for the sentence
sent_doc = nlp.make_doc(sent.text)
spans = []
for item in annotation["spans"]:
# Adjust span start and end for the sentence
start = item["start"] - sent.start_char
end = item["end"] - sent.start_char
label = item["label"]
# Only consider spans that are within the sentence
if start >= 0 and end <= len(sent.text):
span = sent_doc.char_span(start, end, label=label, alignment_mode="contract")
if span is None:
msg = f"Skipping entity [{start}, {end}, {label}] in the following text because the character span '{sent.text[start:end]}' does not align with token boundaries."
warnings.warn(msg)
else:
spans.append(span)
# Add sentence to DocBin only if it contains spans
if spans:
sent_doc.spans[spans_key] = spans
doc_bin.add(sent_doc)
total_sentences += 1
# Write to output file in the specified directory
output_file = output_dir / f"{input_path.stem}.spacy"
doc_bin.to_disk(output_file)
print(f"Total sentences with spans: {total_sentences}")
if __name__ == "__main__":
typer.run(convert)