import gradio as gr from transformers import pipeline import torch import numpy as np from monitoring import PerformanceMonitor, measure_time # Model IDs MODEL_OPTIONS = { "Base Model": "HuggingFaceTB/SmolLM2-1.7B-Instruct", "Fine-tuned Model": "Joash2024/Math-SmolLM2-1.7B" } # Initialize performance monitor monitor = PerformanceMonitor() def format_prompt(problem): """Format the input problem according to the model's expected format""" return f"Given a mathematical function, find its derivative.\n\nFunction: {problem}\nThe derivative of this function is:" @measure_time def get_model_response(problem, model_id): """Get response from a specific model""" try: # Initialize pipeline for each request pipe = pipeline( "text-generation", model=model_id, torch_dtype=torch.float16, device_map="auto", model_kwargs={"low_cpu_mem_usage": True} ) # Format prompt and generate response prompt = format_prompt(problem) response = pipe( prompt, max_new_tokens=50, # Shorter response temperature=0.1, do_sample=False, # Deterministic num_return_sequences=1, return_full_text=False # Only return new text )[0]["generated_text"] return response.strip() except Exception as e: return f"Error: {str(e)}" def solve_problem(problem, problem_type, model_type): """Solve a math problem using the selected model""" if not problem: return "Please enter a problem", None # Record problem type monitor.record_problem_type(problem_type) # Add problem type context if provided if problem_type != "Custom": problem = f"{problem_type}: {problem}" # Get response from selected model model_id = MODEL_OPTIONS[model_type] response, time_taken = get_model_response(problem, model_id) # Format response with steps output = f"""Solution: {response} Let's verify this step by step: 1. Starting with f(x) = {problem} 2. Applying differentiation rules 3. We get f'(x) = {response}""" # Record metrics monitor.record_response_time(model_type, time_taken) monitor.record_success(model_type, not response.startswith("Error")) # Get updated statistics stats = monitor.get_statistics() # Format statistics for display stats_display = f""" ### Performance Metrics #### Response Times (seconds) - {model_type}: {stats.get(f'{model_type}_avg_response_time', 0):.2f} avg #### Success Rates - {model_type}: {stats.get(f'{model_type}_success_rate', 0):.1f}% #### Problem Types Used """ for ptype, percentage in stats.get('problem_type_distribution', {}).items(): stats_display += f"- {ptype}: {percentage:.1f}%\n" return output, stats_display # Create Gradio interface with gr.Blocks(title="Mathematics Problem Solver") as demo: gr.Markdown("# Mathematics Problem Solver") gr.Markdown("Test our models on mathematical problems") with gr.Row(): with gr.Column(): problem_type = gr.Dropdown( choices=["Addition", "Root Finding", "Derivative", "Custom"], value="Derivative", label="Problem Type" ) model_type = gr.Dropdown( choices=list(MODEL_OPTIONS.keys()), value="Fine-tuned Model", label="Model to Use" ) problem_input = gr.Textbox( label="Enter your math problem", placeholder="Example: x^2 + 3x" ) solve_btn = gr.Button("Solve", variant="primary") with gr.Row(): solution_output = gr.Textbox(label="Solution", lines=5) # Performance metrics display with gr.Row(): metrics_display = gr.Markdown("### Performance Metrics\n*Solve a problem to see metrics*") # Example problems gr.Examples( examples=[ ["x^2 + 3x", "Derivative", "Fine-tuned Model"], ["144", "Root Finding", "Fine-tuned Model"], ["235 + 567", "Addition", "Fine-tuned Model"], ["\\sin{\\left(x\\right)}", "Derivative", "Fine-tuned Model"], ["e^x", "Derivative", "Fine-tuned Model"], ["\\frac{1}{x}", "Derivative", "Fine-tuned Model"], ["x^3 + 2x", "Derivative", "Fine-tuned Model"], ["\\cos{\\left(x^2\\right)}", "Derivative", "Fine-tuned Model"] ], inputs=[problem_input, problem_type, model_type], outputs=[solution_output, metrics_display], fn=solve_problem, cache_examples=True, ) # Connect the interface solve_btn.click( fn=solve_problem, inputs=[problem_input, problem_type, model_type], outputs=[solution_output, metrics_display] ) if __name__ == "__main__": demo.launch()