Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| import pandas as pd | |
| import os | |
| from evaluation import evaluate_model # Import your evaluation function | |
| import zipfile | |
| # Define the path where you want to save the leaderboard data | |
| leaderboard_file = "leaderboard.csv" | |
| # Check if leaderboard file exists, otherwise create an empty DataFrame | |
| if os.path.exists(leaderboard_file): | |
| leaderboard = pd.read_csv(leaderboard_file) | |
| else: | |
| leaderboard = pd.DataFrame(columns=["Model Name", "Score"]) | |
| print('file ok') | |
| def extract_model(model_file, extract_dir="models"): | |
| """ | |
| Extracts the uploaded model file if it's a zip archive. | |
| """ | |
| os.makedirs(extract_dir, exist_ok=True) # Ensure the directory exists | |
| model_path = os.path.join(extract_dir, model_file.name) | |
| if model_file.name.endswith(".zip"): | |
| with zipfile.ZipFile(model_file, 'r') as zip_ref: | |
| zip_ref.extractall(extract_dir) | |
| print(f"Extracted model to: {extract_dir}") | |
| return extract_dir | |
| else: | |
| # Save the file directly if it's not a zip | |
| model_file.save(model_path) | |
| return model_path | |
| # Submit the evaluation and update the leaderboard | |
| def submit_evaluation(model_name, model_file): | |
| """ | |
| Handles the model submission, evaluates it, and updates the leaderboard. | |
| """ | |
| try: | |
| # Extract or save the uploaded model | |
| model_path = extract_model(model_file) | |
| print(f"Model saved or extracted to: {model_path}") | |
| print("Starting evaluation...") | |
| # Example test data (replace with your actual test dataset) | |
| test_data = [ | |
| ("negative", 0), # (text, label) | |
| ("positive", 1), | |
| ] | |
| # Evaluate the model using your custom evaluation code | |
| score = evaluate_model(model_path, test_data) | |
| print(f"Model evaluated successfully. Score: {score}") | |
| # Update the leaderboard | |
| new_entry = {"Model Name": model_name, "Score": score} | |
| global leaderboard | |
| leaderboard = leaderboard.append(new_entry, ignore_index=True) | |
| leaderboard_sorted = leaderboard.sort_values(by="Score", ascending=False) | |
| # Save the updated leaderboard | |
| leaderboard_sorted.to_csv(leaderboard_file, index=False) | |
| print("Leaderboard updated.") | |
| # Return the sorted leaderboard | |
| return leaderboard_sorted, "Model submitted successfully!" | |
| except Exception as e: | |
| print(f"Error during evaluation: {str(e)}") | |
| return leaderboard, f"Error: {str(e)}" | |
| # Create the Gradio interface | |
| with gr.Blocks() as demo: | |
| gr.Markdown("# Model Evaluation Leaderboard") | |
| # User inputs for model name and file upload | |
| with gr.Row(): | |
| model_name_input = gr.Textbox(label="Model Name", placeholder="Enter the model name") | |
| model_file_input = gr.File( | |
| label="Upload Model (Supported Formats: .pt, .bin, .h5, .zip)", | |
| file_types=[".pt", ".bin", ".h5", ".zip"] | |
| ) | |
| submit_button = gr.Button("Submit Evaluation") | |
| # Leaderboard display and status message | |
| leaderboard_display = gr.Dataframe(leaderboard, label="Leaderboard") | |
| status_message = gr.Textbox(label="Status", interactive=False) | |
| # Link the submit button to the evaluation function | |
| submit_button.click( | |
| submit_evaluation, | |
| inputs=[model_name_input, model_file_input], | |
| outputs=[leaderboard_display, status_message] | |
| ) | |
| # Launch the Gradio app | |
| demo.launch(share=True) | |