import os import json import pandas as pd from tabulate import tabulate import typer def create_readme_for_model(model_dir: str, project_url: str): # Path to the metrics and meta files metrics_file = os.path.join(model_dir, 'metrics.json') meta_file = os.path.join(model_dir, 'model-best', 'meta.json') # DataFrame for the model's overall performance metrics overall_df = pd.DataFrame(columns=['Metric', 'Value']) # DataFrame for the model's per-label performance metrics per_label_df = pd.DataFrame(columns=['Label', 'Precision', 'Recall', 'F-Score']) # Read and add metrics data if os.path.exists(metrics_file): with open(metrics_file, 'r') as file: metrics = json.load(file) overall_df = overall_df.append({'Metric': 'Precision', 'Value': round(metrics['spans_sc_p'] * 100, 1)}, ignore_index=True) overall_df = overall_df.append({'Metric': 'Recall', 'Value': round(metrics['spans_sc_r'] * 100, 1)}, ignore_index=True) overall_df = overall_df.append({'Metric': 'F-Score', 'Value': round(metrics['spans_sc_f'] * 100, 1)}, ignore_index=True) # Extract and add per-type metrics for label, scores in metrics.get('spans_sc_per_type', {}).items(): per_label_df = per_label_df.append({ 'Label': label, 'Precision': round(scores['p'] * 100, 1), 'Recall': round(scores['r'] * 100, 1), 'F-Score': round(scores['f'] * 100, 1) }, ignore_index=True) # Sort the per_label_df by Label per_label_df.sort_values(by='Label', inplace=True) # Convert the DataFrames to Markdown tables overall_markdown = tabulate(overall_df, headers='keys', tablefmt='pipe', showindex=False) per_label_markdown = tabulate(per_label_df, headers='keys', tablefmt='pipe', showindex=False) # Read meta.json file meta_info = "" if os.path.exists(meta_file): with open(meta_file, 'r') as file: meta_data = json.load(file) for key, value in meta_data.items(): meta_info += f"- **{key}**: {value}\n" # README content readme_content = f""" # Placing the Holocaust spaCy Model - {os.path.basename(model_dir).capitalize()} This is a spaCy model trained as part of the placingholocaust spaCy project. Training and evaluation code, along with the dataset, can be found at the following URL: [Placingholocaust SpaCy Project]({project_url}) ## Model Performance {overall_markdown} ## Performance per Label {per_label_markdown} ## Meta Information {meta_info} """ # Write the README content to a file readme_file = os.path.join(model_dir, 'README.md') with open(readme_file, 'w') as file: file.write(readme_content) print(f"README created in {model_dir}") def create_all_readmes(project_url: str): # Directories for each model type model_dirs = ['training/sm', 'training/md', 'training/lg', 'training/trf'] for dir in model_dirs: create_readme_for_model(dir, project_url) if __name__ == "__main__": project_url = "https://huggingface.co/datasets/placingholocaust/spacy-project" typer.run(lambda: create_all_readmes(project_url))