File size: 3,264 Bytes
5472d34 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 |
import os
import json
import pandas as pd
from tabulate import tabulate
import typer
def create_readme_for_model(model_dir: str, project_url: str):
# Path to the metrics and meta files
metrics_file = os.path.join(model_dir, 'metrics.json')
meta_file = os.path.join(model_dir, 'model-best', 'meta.json')
# DataFrame for the model's overall performance metrics
overall_df = pd.DataFrame(columns=['Metric', 'Value'])
# DataFrame for the model's per-label performance metrics
per_label_df = pd.DataFrame(columns=['Label', 'Precision', 'Recall', 'F-Score'])
# Read and add metrics data
if os.path.exists(metrics_file):
with open(metrics_file, 'r') as file:
metrics = json.load(file)
overall_df = overall_df.append({'Metric': 'Precision', 'Value': round(metrics['spans_sc_p'] * 100, 1)}, ignore_index=True)
overall_df = overall_df.append({'Metric': 'Recall', 'Value': round(metrics['spans_sc_r'] * 100, 1)}, ignore_index=True)
overall_df = overall_df.append({'Metric': 'F-Score', 'Value': round(metrics['spans_sc_f'] * 100, 1)}, ignore_index=True)
# Extract and add per-type metrics
for label, scores in metrics.get('spans_sc_per_type', {}).items():
per_label_df = per_label_df.append({
'Label': label,
'Precision': round(scores['p'] * 100, 1),
'Recall': round(scores['r'] * 100, 1),
'F-Score': round(scores['f'] * 100, 1)
}, ignore_index=True)
# Sort the per_label_df by Label
per_label_df.sort_values(by='Label', inplace=True)
# Convert the DataFrames to Markdown tables
overall_markdown = tabulate(overall_df, headers='keys', tablefmt='pipe', showindex=False)
per_label_markdown = tabulate(per_label_df, headers='keys', tablefmt='pipe', showindex=False)
# Read meta.json file
meta_info = ""
if os.path.exists(meta_file):
with open(meta_file, 'r') as file:
meta_data = json.load(file)
for key, value in meta_data.items():
meta_info += f"- **{key}**: {value}\n"
# README content
readme_content = f"""
# Placing the Holocaust spaCy Model - {os.path.basename(model_dir).capitalize()}
This is a spaCy model trained as part of the placingholocaust spaCy project. Training and evaluation code, along with the dataset, can be found at the following URL: [Placingholocaust SpaCy Project]({project_url})
## Model Performance
{overall_markdown}
## Performance per Label
{per_label_markdown}
## Meta Information
{meta_info}
"""
# Write the README content to a file
readme_file = os.path.join(model_dir, 'README.md')
with open(readme_file, 'w') as file:
file.write(readme_content)
print(f"README created in {model_dir}")
def create_all_readmes(project_url: str):
# Directories for each model type
model_dirs = ['training/sm', 'training/md', 'training/lg', 'training/trf']
for dir in model_dirs:
create_readme_for_model(dir, project_url)
if __name__ == "__main__":
project_url = "https://huggingface.co/datasets/placingholocaust/spacy-project"
typer.run(lambda: create_all_readmes(project_url))
|