# src/agents/jira_agent.py
from typing import Dict, Any, List
from langchain_openai import ChatOpenAI
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.messages import AIMessage, BaseMessage
from src.agents.base import BaseAgent
from src.tools.jira_connector import JiraConnector
from src.tools.rag_tool import RAGTool
from src.core.state import AgentState
from config import config
import logging
import asyncio
import json
import traceback

# Set up logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

class JiraAgent(BaseAgent):
    """
    Jira Agent that handles Jira-related operations.
    Can perform CRUD operations on Jira issues by interacting with MCP server.
    """
    
    def __init__(self):
        self.llm = ChatOpenAI(
            model=config.llm_model,
            openai_api_key=config.llm_api_key,
            openai_api_base=config.llm_api_base,
            temperature=0,
            request_timeout=30
        )
        self.jira_connector = JiraConnector()
        self.rag_tool = RAGTool()
        self.available_tools = []
        self.tool_descriptions = {}
        self._mcp_tools_loaded = False 
        
        # Create event loop for async operations
        # try:
        #     self.event_loop = asyncio.get_event_loop()
        # except RuntimeError:
        #     self.event_loop = asyncio.new_event_loop()
        #     asyncio.set_event_loop(self.event_loop)
        
        # Initialize MCP tools at startup
        self._initialize_mcp_tools()
        
        logger.info(f"JiraAgent initialized with model: {config.llm_model}")
        logger.info(f"MCP server: {config.mcp_url}")
    
    def _initialize_mcp_tools(self):
        """
        Initialize connection to MCP server and fetch available tools.
        This is called during agent initialization.
        """
        try:
            logger.info("Initializing MCP tools...")
            connected = False
        
            # 尝试直接在当前线程中建立连接，不依赖事件循环状态
            try:
                # 创建新的事件循环来处理连接（避免与现有事件循环冲突）
                import threading
                import concurrent.futures
                
                def connect_sync():
                    # 为这个线程创建新的事件循环
                    loop = asyncio.new_event_loop()
                    asyncio.set_event_loop(loop)
                    try:
                        return loop.run_until_complete(self.jira_connector.connect())
                    finally:
                        loop.close()
                
                # 在单独的线程中执行连接
                with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor:
                    future = executor.submit(connect_sync)
                    connected = future.result(timeout=30)  # 30秒超时
                    
            except Exception as e:
                logger.error(f"Error connecting in thread: {str(e)}")
                connected = False
            
            logger.info(f"MCP connection status: {connected}")
            
            if connected:
                # Get list of available tools
                self.available_tools = self.jira_connector.list_tools()
                
                # Build tool descriptions dictionary
                for tool in self.available_tools:
                    if isinstance(tool, dict) and "name" in tool and "description" in tool:
                        self.tool_descriptions[tool["name"]] = tool["description"]
                
                logger.info(f"Available MCP tools: {list(self.tool_descriptions.keys())}")
                self._mcp_tools_loaded = True
            else:
                logger.error("Failed to connect to MCP server during initialization")
        except Exception as e:
            logger.error(f"Error initializing MCP tools: {str(e)}")
            logger.error(f"Traceback: {traceback.format_exc()}")
    
    def process(self, state: AgentState) -> Dict[str, Any]:
        """
        Process Jira-related requests by interacting with MCP server.
        """
        messages = state["messages"]
        user_input = messages[-1].content if messages else ""
        
        logger.info(f"JiraAgent processing request: {user_input}")
        
        # Check if RAG was already used
        if state.get("rag_needed", False) and not state.get("rag_response"):
            logger.info("Consulting RAG knowledge base")
            rag_result = self.rag_tool.execute(user_input)
            logger.info(f"RAG response: {rag_result['result']}")
            
            new_messages: List[BaseMessage] = list(messages) + [AIMessage(content=f"RAG Response: {rag_result['result']}")]
            return {
                "rag_response": rag_result["result"],
                "messages": new_messages
            }
        
        # Otherwise, process as a Jira operation using MCP tools
        # logger.info("Processing as Jira operation using MCP tools")
        
        try:
            jira_response = self._process_jira_operation(user_input)
        except Exception as e:
            logger.error(f"Error processing Jira operation: {str(e)}")
            jira_response = f"Error processing request: {str(e)}"
            
        logger.info(f"Jira operation response: {jira_response}")
        
        new_messages: List[BaseMessage] = list(messages) + [AIMessage(content=jira_response)]
        return {
            "agent_response": jira_response,
            "messages": new_messages
        }
    
    def route(self, state: AgentState) -> str:
        """
        Determine the next step for the Jira agent.
        """
        logger.info("JiraAgent routing to end")
        # For simplicity, we'll just end after processing
        return "end"
        
    def _process_jira_operation(self, user_input: str) -> str:
        """
        Process a Jira operation based on user input and available MCP tools.
        """
        try:
            logger.info("Determining Jira operation using available MCP tools")
            logger.info(f"Available tools count: {len(self.tool_descriptions)}")
            # logger.info(f"Tool descriptions: {self.tool_descriptions}")
            
            # Format available tools for LLM
            tools_info = "\n".join([
                f"- {name}: {desc}" 
                for name, desc in self.tool_descriptions.items()
            ])
            
            if not tools_info:
                tools_info = "No tools available"
            
            # logger.info(f"Tools info for LLM: {tools_info}")
            
            # Create a prompt that includes available tools
            prompt = ChatPromptTemplate.from_messages([
                ("system", f"""You are a Jira expert that can identify what kind of operation the user wants to perform.
                
Available MCP tools that can be used:
{tools_info}

Based on the user request and available tools, determine the best approach. If a suitable tool exists, use it. Otherwise, provide a general response."""),
                ("human", """Based on this request and the available tools, what should we do?

Request: {input}
                 
CRITICAL INSTRUCTIONS:
- Respond ONLY with a valid JSON object
- Do NOT include any text before or after the JSON
- Ensure the JSON is properly formatted

Respond in JSON format:
{{
    "thought": "Your reasoning process - explain why you chose this approach",
    "tool_to_use": "exact_tool_name_or_none_if_no_suitable_tool",
    "parameters": {{
        // required parameters for the tool, based on the user request
    }},
    "operation_type": "CREATE|READ|UPDATE|DELETE|SEARCH|OTHER"
}}

Example responses:
1. If using a tool:
{{
    "thought": "User wants to create a new issue, and we have jira_create_issue tool available",
    "tool_to_use": "jira_create_issue",
    "parameters": {{
        "summary": "Implement new feature",
        "project_key": "PROJ",
        "issue_type":  "Story",
        "description": "User requested to create a new story"
    }},
    "operation_type": "CREATE"
}}

2. If no suitable tool:
{{
    "thought": "User is asking about something we don't have tools for",
    "tool_to_use": null,
    "parameters": {{}},
    "operation_type": "OTHER"
}}""")
            ])
            
            chain = prompt | self.llm
            llm_response = chain.invoke({"input": user_input}).content.strip()
            logger.info(f"LLM response: {llm_response}")
            
            # Parse LLM response and execute appropriate action
            try:
                decision = json.loads(llm_response)
                tool_name = decision.get("tool_to_use")
                parameters = decision.get("parameters", {})
                thought = decision.get("thought", "No reasoning provided")
                
                logger.info(f"LLM decision: {thought}")
                
                # If a tool is recommended, try to use it
                if tool_name and tool_name in self.tool_descriptions:
                    logger.info(f"Executing MCP tool: {tool_name} with parameters: {parameters}")
                    # try:
                    #     loop = asyncio.new_event_loop()
                    #     asyncio.set_event_loop(loop)
                    #     try:
                    #         result = loop.run_until_complete(self.jira_connector.execute_tool(tool_name, parameters))
                    #     finally:
                    #         loop.close()
                    #     return f"Successfully executed {tool_name}: {result.get('result', result) if isinstance(result, dict) else result}"
                    # except Exception as tool_error:
                    #     logger.error(f"Error executing tool {tool_name}: {str(tool_error)}")
                    #     return f"Error executing {tool_name}: {str(tool_error)}"
                    result = self.jira_connector.execute_tool_sync(tool_name, parameters)
                    return f"Successfully executed {tool_name}: {result.get('result', result) if isinstance(result, dict) else result}"
                else:
                    # No suitable tool found, use fallback processing
                    operation_type = decision.get("operation_type", "UNKNOWN")
                    return self._fallback_processing(user_input, operation_type)
                    
            except json.JSONDecodeError as parse_error:
                logger.error(f"Error parsing LLM response as JSON: {str(parse_error)}")
                return self._fallback_processing(user_input, "UNKNOWN")
                
        except Exception as e:
            logger.error(f"Error processing Jira operation with LLM: {str(e)}")
            return self._fallback_processing(user_input, "UNKNOWN")
    
    def _fallback_processing(self, user_input: str, operation_type: str) -> str:
        """
        Fallback processing when LLM-based approach fails or no suitable tool is found.
        """
        logger.info(f"Using fallback processing for operation type: {operation_type}")
        
        # Keyword-based detection as fallback
        user_input_lower = user_input.lower()
        if operation_type == "UNKNOWN":
            if "create" in user_input_lower or "new" in user_input_lower:
                operation_type = "CREATE"
            elif "get" in user_input_lower or "show" in user_input_lower or "view" in user_input_lower:
                operation_type = "READ"
            elif "update" in user_input_lower or "edit" in user_input_lower or "modify" in user_input_lower:
                operation_type = "UPDATE"
            elif "delete" in user_input_lower or "remove" in user_input_lower:
                operation_type = "DELETE"
            elif "search" in user_input_lower or "find" in user_input_lower:
                operation_type = "SEARCH"
            else:
                operation_type = "OTHER"
        
        # Try to use MCP tools even in fallback mode
        if self.tool_descriptions:
            available_tool_names = ", ".join(list(self.tool_descriptions.keys())[:3])  # Show first 3 tools
            return f"Understood request as {operation_type} operation. Available MCP tools: {available_tool_names}. Please be more specific about what you want to do."
        else:
            return f"Understood request as {operation_type} operation. Currently unable to connect to MCP tools. {user_input}"
    
    def cleanup(self):
        """
        Cleanup resources when agent is no longer needed.
        """
        try:
            if hasattr(self, 'event_loop') and self.event_loop and self.event_loop.is_running():
                self.event_loop.stop()
        except:
            pass