from typing import List, Literal, Optional, Dict, Any
import asyncio
from concurrent.futures import ThreadPoolExecutor
from urllib.parse import urljoin, urlparse
from pydantic import ConfigDict, Field, HttpUrl
from atomic_agents.agents.base_agent import BaseIOSchema
from atomic_agents.lib.base.base_tool import BaseTool, BaseToolConfig
import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..'))
from BasicTools.BehaviorQuantModel.QuestionEvaluation import QuestionEvaluator
from BasicTools.BehaviorQuantModel.DataModels import QuestionDifficultyMetrics
class GetQuestionEvaluationInputSchema(BaseIOSchema):
    """Input schema for GetQuestionEvaluation tool."""
    model_config = ConfigDict(
        json_schema_extra={
            "example": {
                "evaluation_question_id": "q001"
            }
        }
    )
    evaluation_question_id: str = Field(
        ...,
        description="The ID of the question to get evaluation for.",
        examples="q001"
    )
class GetQuestionEvaluationOutputSchema(BaseIOSchema):
    """Output schema for GetQuestionEvaluation tool."""
    model_config = ConfigDict(
        json_schema_extra={
            "example": {
                "evaluation": {
                    "question_id": "q001",
                    "pass_rate": 0.65,
                    "average_attempts": 2.3,
                    "average_resolution_time": "0:18:45",
                    "difficulty_level": "Medium",
                    "difficulty_score": 45.2
                }
            }
        }
    )
    evaluation: QuestionDifficultyMetrics = Field(
        ...,
        description="The evaluation for the question.",
        examples={
            "question_id": "q001",
            "pass_rate": 0.65,
            "average_attempts": 2.3,
            "average_resolution_time": "0:18:45",
            "difficulty_level": "Medium",
            "difficulty_score": 45.2
        }
    )
class GetQuestionEvaluationTool(BaseTool):
    """
    Tool to get question evaluation. It uses the QuestionEvaluator class to calculate the evaluation for a specific question.
    Return the evaluation score based on the question's difficulty metrics.
    """
    name = "GetQuestionEvaluation"
    description = "Get the evaluation for a specific question."
    input_schema = GetQuestionEvaluationInputSchema
    output_schema = GetQuestionEvaluationOutputSchema
    evaluator: QuestionEvaluator
    def run(self, input: GetQuestionEvaluationInputSchema) -> GetQuestionEvaluationOutputSchema:
        self.evaluator = QuestionEvaluator()
        evaluation = self.evaluator.calculate_question_difficulty(input.evaluation_question_id)
        return GetQuestionEvaluationOutputSchema(evaluation=evaluation)