Spaces:
Sleeping
Sleeping
| import os | |
| import random | |
| import json | |
| import gradio as gr | |
| import google.generativeai as genai | |
| # Configure Gemini API - For Hugging Face deployment | |
| GEMINI_API_KEY = os.environ.get("GEMINI_API_KEY") | |
| genai.configure(api_key=GEMINI_API_KEY) | |
| # Challenge database with different difficulty levels | |
| challenges = { | |
| "easy": [ | |
| { | |
| "id": "e1", | |
| "title": "Sum of Two Numbers", | |
| "description": "Write a function that takes two numbers as input and returns their sum.", | |
| "example_input": "5, 3", | |
| "example_output": "8", | |
| "test_cases": [ | |
| {"input": "5, 3", "output": "8"}, | |
| {"input": "10, -5", "output": "5"}, | |
| {"input": "0, 0", "output": "0"} | |
| ] | |
| }, | |
| { | |
| "id": "e2", | |
| "title": "Even or Odd", | |
| "description": "Write a function that determines if a number is even or odd.", | |
| "example_input": "4", | |
| "example_output": "Even", | |
| "test_cases": [ | |
| {"input": "4", "output": "Even"}, | |
| {"input": "7", "output": "Odd"}, | |
| {"input": "0", "output": "Even"} | |
| ] | |
| }, | |
| { | |
| "id": "e3", | |
| "title": "String Reversal", | |
| "description": "Write a function that reverses a string.", | |
| "example_input": "hello", | |
| "example_output": "olleh", | |
| "test_cases": [ | |
| {"input": "hello", "output": "olleh"}, | |
| {"input": "python", "output": "nohtyp"}, | |
| {"input": "a", "output": "a"} | |
| ] | |
| } | |
| ], | |
| "medium": [ | |
| { | |
| "id": "m1", | |
| "title": "Palindrome Check", | |
| "description": "Write a function that checks if a string is a palindrome (reads the same backward as forward).", | |
| "example_input": "racecar", | |
| "example_output": "True", | |
| "test_cases": [ | |
| {"input": "racecar", "output": "True"}, | |
| {"input": "hello", "output": "False"}, | |
| {"input": "A man a plan a canal Panama", "output": "True"} | |
| ] | |
| }, | |
| { | |
| "id": "m2", | |
| "title": "List Comprehension", | |
| "description": "Write a function that returns a list of all even numbers from 1 to n using list comprehension.", | |
| "example_input": "10", | |
| "example_output": "[2, 4, 6, 8, 10]", | |
| "test_cases": [ | |
| {"input": "10", "output": "[2, 4, 6, 8, 10]"}, | |
| {"input": "5", "output": "[2, 4]"}, | |
| {"input": "1", "output": "[]"} | |
| ] | |
| }, | |
| { | |
| "id": "m3", | |
| "title": "Fibonacci Sequence", | |
| "description": "Write a function that returns the nth number in the Fibonacci sequence.", | |
| "example_input": "6", | |
| "example_output": "8", | |
| "test_cases": [ | |
| {"input": "6", "output": "8"}, | |
| {"input": "1", "output": "1"}, | |
| {"input": "10", "output": "55"} | |
| ] | |
| } | |
| ], | |
| "hard": [ | |
| { | |
| "id": "h1", | |
| "title": "Anagram Check", | |
| "description": "Write a function that determines if two strings are anagrams of each other.", | |
| "example_input": "listen, silent", | |
| "example_output": "True", | |
| "test_cases": [ | |
| {"input": "listen, silent", "output": "True"}, | |
| {"input": "hello, world", "output": "False"}, | |
| {"input": "Astronomer, Moon starer", "output": "True"} | |
| ] | |
| }, | |
| { | |
| "id": "h2", | |
| "title": "Prime Number Generator", | |
| "description": "Write a function that generates all prime numbers up to n using the Sieve of Eratosthenes algorithm.", | |
| "example_input": "30", | |
| "example_output": "[2, 3, 5, 7, 11, 13, 17, 19, 23, 29]", | |
| "test_cases": [ | |
| {"input": "30", "output": "[2, 3, 5, 7, 11, 13, 17, 19, 23, 29]"}, | |
| {"input": "10", "output": "[2, 3, 5, 7]"}, | |
| {"input": "2", "output": "[2]"} | |
| ] | |
| }, | |
| { | |
| "id": "h3", | |
| "title": "Recursive Binary Search", | |
| "description": "Write a recursive function that performs binary search on a sorted list.", | |
| "example_input": "[1, 2, 3, 4, 5, 6, 7, 8, 9, 10], 7", | |
| "example_output": "6", | |
| "test_cases": [ | |
| {"input": "[1, 2, 3, 4, 5, 6, 7, 8, 9, 10], 7", "output": "6"}, | |
| {"input": "[1, 2, 3, 4, 5], 1", "output": "0"}, | |
| {"input": "[1, 3, 5, 7, 9], 4", "output": "-1"} | |
| ] | |
| } | |
| ] | |
| } | |
| # User session data | |
| user_data = { | |
| "current_challenge": None, | |
| "difficulty_level": "easy", | |
| "correct_answers": 0, | |
| "total_attempts": 0, | |
| "solution_history": [] # Store previous solutions for LLM analysis | |
| } | |
| def get_challenge(): | |
| """Get a random challenge based on the current difficulty level""" | |
| level = user_data["difficulty_level"] | |
| available_challenges = challenges[level] | |
| challenge = random.choice(available_challenges) | |
| user_data["current_challenge"] = challenge | |
| return challenge | |
| def evaluate_code_with_gemini(user_code, challenge): | |
| """Evaluate the user's code using Gemini API""" | |
| try: | |
| # Check if API key is available | |
| if not GEMINI_API_KEY: | |
| return { | |
| "test_results": [], | |
| "overall_assessment": "API Key Missing", | |
| "feedback": "The Gemini API key is not configured. Please check the Hugging Face Space settings.", | |
| "is_correct": False, | |
| "code_quality_score": 5, | |
| "algorithm_efficiency_score": 5 | |
| } | |
| # Construct the prompt for Gemini | |
| prompt = f""" | |
| Evaluate the following Python code solution for the challenge: | |
| Challenge: {challenge['title']} | |
| Description: {challenge['description']} | |
| Test Cases: | |
| {json.dumps(challenge['test_cases'], indent=2)} | |
| User's Solution: | |
| ```python | |
| {user_code} | |
| ``` | |
| Evaluate if the solution correctly solves the challenge based on the test cases. | |
| Consider: | |
| 1. Correctness (does it produce the expected output for all test cases?) | |
| 2. Efficiency (is the solution reasonably efficient?) | |
| 3. Code quality (is the code well-structured and readable?) | |
| For each test case, indicate whether the solution passes or fails. | |
| Provide a brief explanation of why it passes or fails. | |
| Finally, provide an overall assessment: is the solution correct (pass all test cases)? | |
| Return your response in the following JSON format: | |
| {{ | |
| "test_results": [ | |
| {{"test_case": "input", "expected": "output", "result": "pass/fail", "explanation": "brief explanation"}} | |
| ], | |
| "overall_assessment": "pass/fail", | |
| "feedback": "brief feedback for the user", | |
| "is_correct": true/false, | |
| "code_quality_score": 1-10, | |
| "algorithm_efficiency_score": 1-10 | |
| }} | |
| Ensure your response is valid JSON. | |
| """ | |
| # Generate content with Gemini | |
| model = genai.GenerativeModel('gemini-1.5-pro') | |
| response = model.generate_content(prompt) | |
| # Parse the response | |
| try: | |
| result = json.loads(response.text) | |
| return result | |
| except json.JSONDecodeError: | |
| # If Gemini doesn't return valid JSON, provide a fallback response | |
| return { | |
| "test_results": [], | |
| "overall_assessment": "Unable to evaluate", | |
| "feedback": "There was an issue evaluating your code. Please try again.", | |
| "is_correct": False, | |
| "code_quality_score": 5, | |
| "algorithm_efficiency_score": 5 | |
| } | |
| except Exception as e: | |
| return { | |
| "test_results": [], | |
| "overall_assessment": f"Error: {str(e)}", | |
| "feedback": "There was an error evaluating your code. Please check your syntax and try again.", | |
| "is_correct": False, | |
| "code_quality_score": 5, | |
| "algorithm_efficiency_score": 5 | |
| } | |
| def adjust_difficulty_with_llm(user_code, evaluation, challenge): | |
| """Use LLM to adjust difficulty based on code quality and approach""" | |
| # Check if API key is available | |
| if not GEMINI_API_KEY: | |
| return fallback_difficulty_adjustment(evaluation.get("is_correct", False)) | |
| # Store the solution in history | |
| solution_entry = { | |
| "challenge_id": challenge["id"], | |
| "difficulty": user_data["difficulty_level"], | |
| "code": user_code, | |
| "is_correct": evaluation.get("is_correct", False), | |
| "code_quality_score": evaluation.get("code_quality_score", 5), | |
| "algorithm_efficiency_score": evaluation.get("algorithm_efficiency_score", 5) | |
| } | |
| user_data["solution_history"].append(solution_entry) | |
| # Format the prompt for Gemini | |
| prompt = f""" | |
| Analyze the user's solution and programming skill level to recommend an appropriate difficulty level. | |
| Current Difficulty Level: {user_data["difficulty_level"]} | |
| Challenge: {challenge["title"]} | |
| Description: {challenge["description"]} | |
| User's Solution: | |
| ```python | |
| {user_code} | |
| ``` | |
| Evaluation Summary: | |
| - Correctness: {"Correct" if evaluation.get("is_correct", False) else "Incorrect"} | |
| - Code Quality Score: {evaluation.get("code_quality_score", 5)}/10 | |
| - Algorithm Efficiency Score: {evaluation.get("algorithm_efficiency_score", 5)}/10 | |
| User's History: | |
| - Total Attempts: {user_data["total_attempts"]} | |
| - Correct Solutions: {user_data["correct_answers"]} | |
| - Success Rate: {user_data["correct_answers"] / user_data["total_attempts"] if user_data["total_attempts"] > 0 else 0:.2%} | |
| Based on this information, recommend the next difficulty level (easy, medium, or hard). | |
| Consider the following factors: | |
| 1. Whether the solution is correct | |
| 2. The quality and efficiency of the code | |
| 3. The user's historical performance | |
| 4. The current difficulty level | |
| Provide your recommendation in the following JSON format: | |
| {{ | |
| "recommended_difficulty": "easy/medium/hard", | |
| "explanation": "brief explanation for the recommendation", | |
| "skill_assessment": "brief assessment of the user's skill level" | |
| }} | |
| Ensure your response is valid JSON. | |
| """ | |
| try: | |
| # Generate content with Gemini | |
| model = genai.GenerativeModel('gemini-1.5-pro') | |
| response = model.generate_content(prompt) | |
| # Parse the response | |
| try: | |
| result = json.loads(response.text) | |
| old_difficulty = user_data["difficulty_level"] | |
| user_data["difficulty_level"] = result.get("recommended_difficulty", old_difficulty) | |
| # Ensure the difficulty is valid | |
| if user_data["difficulty_level"] not in ["easy", "medium", "hard"]: | |
| user_data["difficulty_level"] = old_difficulty | |
| return result | |
| except json.JSONDecodeError: | |
| # If Gemini doesn't return valid JSON, use a fallback approach | |
| return fallback_difficulty_adjustment(evaluation.get("is_correct", False)) | |
| except Exception as e: | |
| return { | |
| "recommended_difficulty": user_data["difficulty_level"], | |
| "explanation": f"Error in difficulty adjustment: {str(e)}. Maintaining current difficulty.", | |
| "skill_assessment": "Unable to assess skill level due to an error." | |
| } | |
| def fallback_difficulty_adjustment(is_correct): | |
| """Fallback method to adjust difficulty based on success rate""" | |
| if is_correct: | |
| user_data["correct_answers"] += 1 | |
| user_data["total_attempts"] += 1 | |
| # Calculate success rate | |
| success_rate = user_data["correct_answers"] / user_data["total_attempts"] if user_data["total_attempts"] > 0 else 0 | |
| # Adjust difficulty based on success rate | |
| current_level = user_data["difficulty_level"] | |
| old_level = current_level | |
| if success_rate > 0.7 and current_level == "easy": | |
| user_data["difficulty_level"] = "medium" | |
| elif success_rate > 0.7 and current_level == "medium": | |
| user_data["difficulty_level"] = "hard" | |
| elif success_rate < 0.3 and current_level == "hard": | |
| user_data["difficulty_level"] = "medium" | |
| elif success_rate < 0.3 and current_level == "medium": | |
| user_data["difficulty_level"] = "easy" | |
| return { | |
| "recommended_difficulty": user_data["difficulty_level"], | |
| "explanation": f"Based on your success rate of {success_rate:.2%}, {'increasing' if user_data['difficulty_level'] != old_level and 'easy' in old_level else 'decreasing' if user_data['difficulty_level'] != old_level else 'maintaining'} difficulty.", | |
| "skill_assessment": "Skill assessment based on success rate only." | |
| } | |
| def handle_submission(user_code): | |
| """Handle user code submission""" | |
| if not user_data["current_challenge"]: | |
| return "Please get a challenge first." | |
| challenge = user_data["current_challenge"] | |
| # Evaluate the code | |
| evaluation = evaluate_code_with_gemini(user_code, challenge) | |
| # Track correctness | |
| is_correct = evaluation.get("is_correct", False) | |
| if is_correct: | |
| user_data["correct_answers"] += 1 | |
| user_data["total_attempts"] += 1 | |
| # Adjust difficulty using LLM | |
| difficulty_adjustment = adjust_difficulty_with_llm(user_code, evaluation, challenge) | |
| # Format response | |
| response = f"## Evaluation Results\n\n" | |
| response += f"**Challenge:** {challenge['title']}\n\n" | |
| if "test_results" in evaluation and evaluation["test_results"]: | |
| response += "**Test Results:**\n" | |
| for test in evaluation["test_results"]: | |
| result = test.get("result", "N/A") | |
| input_val = test.get("test_case", "N/A") | |
| expected = test.get("expected", "N/A") | |
| explanation = test.get("explanation", "N/A") | |
| response += f"- Input: `{input_val}`, Expected: `{expected}`, Result: **{result}**\n" | |
| response += f" {explanation}\n\n" | |
| response += f"**Overall Assessment:** {evaluation.get('overall_assessment', 'N/A')}\n\n" | |
| response += f"**Code Quality:** {evaluation.get('code_quality_score', 'N/A')}/10\n" | |
| response += f"**Algorithm Efficiency:** {evaluation.get('algorithm_efficiency_score', 'N/A')}/10\n\n" | |
| response += f"**Feedback:** {evaluation.get('feedback', 'N/A')}\n\n" | |
| response += f"**Difficulty Adjustment:**\n" | |
| response += f"- New Difficulty: {difficulty_adjustment.get('recommended_difficulty', user_data['difficulty_level'])}\n" | |
| response += f"- Reason: {difficulty_adjustment.get('explanation', 'N/A')}\n" | |
| response += f"- Skill Assessment: {difficulty_adjustment.get('skill_assessment', 'N/A')}\n" | |
| return response | |
| def display_challenge(): | |
| """Get and display a challenge""" | |
| challenge = get_challenge() | |
| response = f"## {challenge['title']}\n\n" | |
| response += f"**Difficulty:** {user_data['difficulty_level']}\n\n" | |
| response += f"**Description:** {challenge['description']}\n\n" | |
| response += f"**Example Input:** {challenge['example_input']}\n" | |
| response += f"**Example Output:** {challenge['example_output']}\n\n" | |
| response += "Write your solution in Python and submit it when ready." | |
| return response | |
| def reset_session(): | |
| """Reset the user session""" | |
| user_data["current_challenge"] = None | |
| user_data["difficulty_level"] = "easy" | |
| user_data["correct_answers"] = 0 | |
| user_data["total_attempts"] = 0 | |
| user_data["solution_history"] = [] | |
| return "Session reset. Your progress has been cleared and difficulty has been reset to easy." | |
| def check_api_key(): | |
| """Check if the API key is properly configured""" | |
| if not GEMINI_API_KEY: | |
| return gr.Markdown(""" | |
| ## ⚠️ API Key Not Found | |
| The Gemini API key is not configured. Please add it in the Space secrets with the name `GEMINI_API_KEY`. | |
| ### How to add a secret: | |
| 1. Go to the Settings tab on your Space | |
| 2. Navigate to the "Repository secrets" section | |
| 3. Add a new secret with the name `GEMINI_API_KEY` and your API key as the value | |
| 4. Restart the Space | |
| """) | |
| else: | |
| return gr.Markdown("# LLM-Adaptive Python Coding Challenge\nThis application provides Python coding challenges that adapt to your skill level using AI.") | |
| # Set up the Gradio interface | |
| with gr.Blocks(title="LLM-Adaptive Python Coding Challenge", theme=gr.themes.Base()) as app: | |
| header = gr.Markdown("Checking API configuration...") | |
| with gr.Row(): | |
| with gr.Column(scale=2): | |
| challenge_display = gr.Markdown("Click 'Get Challenge' to start") | |
| with gr.Row(): | |
| get_challenge_btn = gr.Button("Get Challenge") | |
| reset_btn = gr.Button("Reset Progress") | |
| code_input = gr.Code(language="python", lines=15, label="Your Solution") | |
| submit_btn = gr.Button("Submit Solution") | |
| with gr.Column(scale=3): | |
| result_display = gr.Markdown("Results will appear here") | |
| gr.Markdown("### How it works") | |
| gr.Markdown("1. Get a challenge by clicking 'Get Challenge'") | |
| gr.Markdown("2. Write your solution in Python") | |
| gr.Markdown("3. Submit your solution for evaluation") | |
| gr.Markdown("4. The AI will analyze your code and adjust the difficulty based on your coding style, efficiency, and correctness") | |
| # Check API key on load | |
| app.load(check_api_key, [], [header]) | |
| get_challenge_btn.click(display_challenge, inputs=[], outputs=challenge_display) | |
| reset_btn.click(reset_session, inputs=[], outputs=result_display) | |
| submit_btn.click(handle_submission, inputs=[code_input], outputs=result_display) | |
| # Launch the app | |
| if __name__ == "__main__": | |
| app.launch() |