wjbmattingly's picture
init
5472d34
raw
history blame
2.19 kB
import toml
from collections import Counter
import srsly
import typer
from sklearn.model_selection import train_test_split
from pathlib import Path
def count_labels(data):
label_counter = Counter()
for annotation in data:
labels = [span['label'] for span in annotation['spans']]
label_counter.update(labels)
return label_counter
def split_data(input_file: Path, train_ratio: float = 0.7, dev_ratio: float = 0.15,
train_output: Path = Path("assets/train.jsonl"),
dev_output: Path = Path("assets/dev.jsonl"),
test_output: Path = Path("assets/test.jsonl"),
random_state: int = 1):
# Read data from JSONL
data = list(srsly.read_jsonl(input_file))
# Split data
test_ratio = 1 - train_ratio - dev_ratio
train_data, temp_data = train_test_split(data, test_size=(dev_ratio + test_ratio), random_state=random_state)
dev_data, test_data = train_test_split(temp_data, test_size=test_ratio/(dev_ratio + test_ratio), random_state=random_state)
# Count labels in each dataset
train_labels = count_labels(train_data)
dev_labels = count_labels(dev_data)
test_labels = count_labels(test_data)
# Write split data to JSONL files
srsly.write_jsonl(train_output, train_data)
srsly.write_jsonl(dev_output, dev_data)
srsly.write_jsonl(test_output, test_data)
# Combine label counts into a dictionary
all_labels = sorted(set(train_labels.keys()) | set(dev_labels.keys()) | set(test_labels.keys()))
annotations_data = {label: {'Train': train_labels.get(label, 0), 'Dev': dev_labels.get(label, 0), 'Test': test_labels.get(label, 0)} for label in all_labels}
# Print the table
print(f"{'Label':<20}{'Train':<10}{'Dev':<10}{'Test':<10}")
for label in all_labels:
print(f"{label:<20}{train_labels.get(label, 0):<10}{dev_labels.get(label, 0):<10}{test_labels.get(label, 0):<10}")
# Save the table in annotations.toml
with open("annotations.toml", "w") as toml_file:
toml.dump(annotations_data, toml_file)
print("Annotations data saved in annotations.toml")
if __name__ == "__main__":
typer.run(split_data)