spacy-project / scripts /build-table.py
wjbmattingly's picture
init
5472d34
import os
import json
import pandas as pd
from tabulate import tabulate
import typer
def generate_detailed_markdown_chart():
# Directories for each model type in the desired order
model_dirs = [
('small', 'training/sm'),
('medium', 'training/md'),
('large', 'training/lg'),
('transformer', 'training/trf')
]
# DataFrame to hold the overall data
overall_df = pd.DataFrame(columns=['Model', 'Precision', 'Recall', 'F-Score'])
# DataFrame to hold the per-type data
per_type_df = pd.DataFrame(columns=['Model', 'Label', 'Precision', 'Recall', 'F-Score'])
for model_name, dir_path in model_dirs:
metrics_file = os.path.join(dir_path, 'metrics.json')
# Check if the file exists
if os.path.exists(metrics_file):
with open(metrics_file, 'r') as file:
metrics = json.load(file)
# Extract overall metrics
overall_df = overall_df.append({
'Model': model_name.capitalize(),
'Precision': round(metrics['spans_sc_p'] * 100, 1),
'Recall': round(metrics['spans_sc_r'] * 100, 1),
'F-Score': round(metrics['spans_sc_f'] * 100, 1)
}, ignore_index=True)
# Extract per-type metrics
for label, scores in metrics.get('spans_sc_per_type', {}).items():
per_type_df = per_type_df.append({
'Model': model_name.capitalize(),
'Label': label,
'Precision': round(scores['p'] * 100, 1),
'Recall': round(scores['r'] * 100, 1),
'F-Score': round(scores['f'] * 100, 1)
}, ignore_index=True)
# Define the order for models
model_order = ['Small', 'Medium', 'Large', 'Transformer']
per_type_df['Model'] = pd.Categorical(per_type_df['Model'], categories=model_order, ordered=True)
# Sort the per_type_df first by Label, then by Model
per_type_df.sort_values(by=['Label', 'Model'], inplace=True)
# Convert the DataFrames to Markdown
overall_markdown = tabulate(overall_df, headers='keys', tablefmt='pipe', showindex=False)
per_type_markdown = tabulate(per_type_df, headers='keys', tablefmt='pipe', showindex=False)
# Write the Markdown tables to a file
with open('model_comparison.md', 'w') as md_file:
md_file.write("# Overall Model Performance\n")
md_file.write(overall_markdown)
md_file.write("\n\n# Performance per Label\n")
md_file.write(per_type_markdown)
print("Markdown chart created as 'model_comparison.md'")
if __name__ == "__main__":
typer.run(generate_detailed_markdown_chart)