from typing import List, Literal, Optional, Dict, Any
import asyncio
from concurrent.futures import ThreadPoolExecutor
from urllib.parse import urljoin, urlparse
from pydantic import ConfigDict, Field, HttpUrl
from atomic_agents.agents.base_agent import BaseIOSchema
from atomic_agents.lib.base.base_tool import BaseTool, BaseToolConfig
import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..'))
from BasicTools.BehaviorQuantModel.UserEvaluation import UserEvaluator
from BasicTools.BehaviorQuantModel.DataModels import UserPerformanceMetrics
class GetUserEvaluationInputSchema(BaseIOSchema):
    """Input schema for GetUserEvaluation tool."""
    model_config = ConfigDict(
        json_schema_extra={
            "example": {
                "evaluation_user_id": "user_001"
            }
        }
    )
    evaluation_user_id: str = Field(
        ...,
        description="The ID of the user to get evaluation for.",
        examples="user_001"
    )
class GetUserEvaluationOutputSchema(BaseIOSchema):
    """Output schema for GetUserEvaluation tool."""
    model_config = ConfigDict(
        json_schema_extra={
            "example": {
                "evaluation": {
                    "score": 85,
                    "comments": "Good performance overall."
                }
            }
        }
    )
    evaluation: UserPerformanceMetrics = Field(
        ...,
        description="The evaluation for the user.",
        examples={
            "user_id": "user_12345",
            "total_questions": 20,
            "solved_questions": 15,
            "solve_rate": 0.75,
            "average_resolution_time": "0:25:30",
            "total_attempts": 27,
            "efficiency_score": 82.5,
            "consistency_score": 76.0,
            "writeup_quality_score": 88.0,
            "technical_skill_score": 91.0,
            "progress_velocity_score": 79.5,
            "overall_score": 83.4,
            "performance_rank": 5,
            "solved_question_ids": [
                "q001",
                "q002",
                "q003",
                "q005",
                "q006",
                "q007",
                "q009",
                "q010",
            ]
        }
    )
class GetUserEvaluationTool(BaseTool):
    """
    Tool to get user evaluation. It uses the UserEvaluator class to calculate the evaluation for a specific user.
    Return the evaluation score based on the user's performance.
    """
    name = "GetUserEvaluation"
    description = "Get the evaluation for a specific user."
    input_schema = GetUserEvaluationInputSchema
    output_schema = GetUserEvaluationOutputSchema
    evaluator: UserEvaluator
    def run(self, input: GetUserEvaluationInputSchema) -> GetUserEvaluationOutputSchema:
        self.evaluator = UserEvaluator()
        evaluation = self.evaluator.calculate_user_metrics(input.evaluation_user_id)
        return GetUserEvaluationOutputSchema(evaluation=evaluation)
