File size: 2,261 Bytes
5472d34 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 |
import srsly
import typer
import warnings
from pathlib import Path
import spacy
from spacy.tokens import DocBin
def convert(lang: str, input_paths: list[Path], output_dir: Path, spans_key: str = "sc"):
nlp = spacy.blank(lang)
nlp.add_pipe("sentencizer")
# Ensure output directory exists
output_dir.mkdir(parents=True, exist_ok=True)
total_sentences = 0
# Process each input file
for input_path in input_paths:
print(f"Processing file: {input_path}")
doc_bin = DocBin()
for annotation in srsly.read_jsonl(input_path):
text = annotation["text"]
doc = nlp(text) # Process the document to split into sentences
for sent in doc.sents:
# Create a new Doc for the sentence
sent_doc = nlp.make_doc(sent.text)
spans = []
for item in annotation["spans"]:
# Adjust span start and end for the sentence
start = item["start"] - sent.start_char
end = item["end"] - sent.start_char
label = item["label"]
# Only consider spans that are within the sentence
if start >= 0 and end <= len(sent.text):
span = sent_doc.char_span(start, end, label=label, alignment_mode="contract")
if span is None:
msg = f"Skipping entity [{start}, {end}, {label}] in the following text because the character span '{sent.text[start:end]}' does not align with token boundaries."
warnings.warn(msg)
else:
spans.append(span)
# Add sentence to DocBin only if it contains spans
if spans:
sent_doc.spans[spans_key] = spans
doc_bin.add(sent_doc)
total_sentences += 1
# Write to output file in the specified directory
output_file = output_dir / f"{input_path.stem}.spacy"
doc_bin.to_disk(output_file)
print(f"Total sentences with spans: {total_sentences}")
if __name__ == "__main__":
typer.run(convert) |