"""
Ollama provider implementation for Hawthorn MCP.

This module provides integration with Ollama models for text generation and tool usage,
including proper formatting of tool calls and their arguments.
"""

import json
import logging
import sys
import traceback
import copy
from typing import Dict, List, Any, Optional, Union, TypeVar

# Import Ollama types for response parsing
from ollama import Client, AsyncClient
from ollama._types import ChatResponse, Message

from .response import ResponseMessage
from .utils import ContentBlock, ToolUseBlock, TextBlock

# Configure logging
logger = logging.getLogger('hawthorn.mcp.providers.ollama')

# Constants
DEFAULT_API_HOST = "http://localhost:11434"
DEFAULT_TEMPERATURE = 0.7
DEFAULT_MAX_TOKENS = 1024
EMPTY_JSON_VALUES = ('', '{}')

DEFAULT_MAX_CONVERSATION_ROUND=3

# Type definitions
JsonDict = Dict[str, Any]
OllamaToolType = Dict[str, Any]
MessageType = Dict[str, Any]
T = TypeVar('T')

# Global mapping to track original tool names
tool_name_mapping: Dict[str, str] = {}

def parse_json_safely(json_str: Union[str, Any]) -> JsonDict:
    """
    Parse a JSON string safely, handling edge cases and returning an empty dict for invalid inputs.
    
    Args:
        json_str: String containing JSON or any other value
        
    Returns:
        Parsed JSON dict or empty dict if parsing fails
    """
    # Handle non-string inputs
    if not isinstance(json_str, str):
        return {}
    
    # Handle empty inputs
    json_str = json_str.strip()
    if not json_str or json_str in EMPTY_JSON_VALUES:
        return {}
    
    # Attempt to parse
    try:
        return json.loads(json_str)
    except json.JSONDecodeError:
        logger.warning(f"Failed to parse JSON string: {json_str}")
        return {}

def preprocess_messages(messages: List[MessageType]) -> List[MessageType]:
    """
    Preprocess conversation messages to ensure tool_calls.function.arguments are dictionaries.
    The Ollama server expects arguments as objects, not strings.
    
    Args:
        messages: List of message objects from the conversation history
        
    Returns:
        Processed copy of messages with tool call arguments converted to dictionaries
    """
    if not messages:
        return messages
    
    # Create a deep copy to avoid modifying the original
    msgs_copy = copy.deepcopy(messages)
    modified_count = 0
    
    for msg in msgs_copy:
        # Check if the message has tool_calls
        if isinstance(msg, dict) and 'tool_calls' in msg and msg['tool_calls']:
            for tool_call in msg['tool_calls']:
                if isinstance(tool_call, dict) and 'function' in tool_call:
                    if 'arguments' in tool_call['function']:
                        # If arguments is a string, parse it to a dict
                        if isinstance(tool_call['function']['arguments'], str):
                            try:
                                parsed = parse_json_safely(tool_call['function']['arguments'])
                                # If parsing results in an empty dict, remove the key instead
                                if not parsed: 
                                    del tool_call['function']['arguments']
                                    logger.debug("Removed empty arguments key during preprocessing.")
                                else:
                                    tool_call['function']['arguments'] = parsed
                                modified_count += 1
                            except Exception as e:
                                logger.error(f"Error parsing tool call arguments: {e}")
                                # If error, ensure key is removed or set to empty dict
                                if 'arguments' in tool_call['function']:
                                    del tool_call['function']['arguments'] 
    
    if modified_count > 0:
        logger.debug(f"Preprocessed {modified_count} tool call arguments from strings to dicts")
    
    return msgs_copy

class OllamaProvider:
    def __init__(self, model_cfg: Dict):
        self._validate_ollama_version()
        self.model_cfg=model_cfg
        self.options, self.host, self.keep_alive_seconds=self._parse_options(model_cfg)
        self.client=Client(host=self.host)
        self.asyncClient=AsyncClient(host=self.host)
    def _parse_options(self, model_cfg: Dict) -> tuple[Dict[str, Any], Optional[Any], str]:
        """
        Parse options for Ollama API call.
        
        Args:
            model_cfg: Model configuration
            
        Returns:
            Tuple of (options dict, client object, keep_alive value)
        """
        options = {}
        keep_alive_seconds = "0"
        # Set model parameters from config
        if "temperature" in model_cfg:
            options["temperature"] = model_cfg.get("temperature", DEFAULT_TEMPERATURE)
        if "top_k" in model_cfg:
            options["top_k"] = model_cfg.get("top_k")
        if "repetition_penalty" in model_cfg:
            options["repeat_penalty"] = model_cfg.get("repetition_penalty")
        if "max_tokens" in model_cfg:
            options["num_predict"] = model_cfg.get("max_tokens", DEFAULT_MAX_TOKENS)
        
        if "keep_alive_seconds" in model_cfg:
            keep_alive_seconds = model_cfg.get("keep_alive_seconds") + "s"
        host=model_cfg.get("client", DEFAULT_API_HOST)
        return options, host, keep_alive_seconds

    def metrics(self, response: ChatResponse)->Dict[str, Any]:
        metrics={
            "created_at": response.created_at,
            "model": response.model,
            "prompt":{
                "token": response.prompt_eval_count,
                "rate": round(response.prompt_eval_count/response.prompt_eval_duration*10**9,2),
            },
            "generate": {
                "token": response.eval_count,
                "rate": round(response.eval_count/response.eval_duration*10**9,2),
            },
            "total_token": response.prompt_eval_count+response.eval_count,
        }
        logger.debug(f"ollama metrics: {metrics}")
        return metrics
    def log_conversation_sample(self, conversation: List[MessageType]) -> None:
        """
        Log a sample of the conversation for debugging.
        
        Args:
            conversation: The conversation to log
        """
        if not conversation:
            return
            
        try:
            if len(conversation) > 0:
                first_msg = json.dumps(conversation[0])[:150]
                logger.debug(f"First message (abbreviated): {first_msg}...")
            
            if len(conversation) > 1:
                last_msg = json.dumps(conversation[-1])[:150]
                logger.debug(f"Last message (abbreviated): {last_msg}...")
        except Exception as e:
            logger.debug(f"Could not log conversation sample: {e}")
    def _validate_ollama_version(self) :
        """
        Import the necessary Ollama components.
        
        Returns:
            Tuple of (chat, Client, ResponseError) or None if import fails
        """
        # Try to get the version if available
        try:
            import importlib.metadata
            ollama_version = importlib.metadata.version('ollama')
            logger.info(f"Ollama SDK version: {ollama_version}")
        except (ImportError, importlib.metadata.PackageNotFoundError):
            logger.debug("Could not determine Ollama SDK version")

    def parse_think(self, content: str)-> Dict[str, Any]:
        start=content.find(f"<think>")
        if start == -1:
            return {"content": content}
        end=content.find(f"</think>")
        think_text=content[start+7:end]
        assistant_text=content[end+8:]
        logger.debug(f"think: {think_text},  assistant: {assistant_text}")
        return {"think":think_text, "content": assistant_text}
  
    def convert_mcp_tool(self, idx: int, tool: Dict[str, Any]) -> Optional[OllamaToolType]:
        """
        Convert a single MCP tool definition into Ollama tool format.
        
        Args:
            idx: Index for logging purposes
            tool: MCP tool definition
            
        Returns:
            Tool in Ollama format or None if processing fails
        """
        try:
            # Store the original name in our mapping
            original_name = tool["name"]
            logger.debug(f"Processing tool [{idx}]: {original_name}")
 
            # For server_name_tool_name format used in client.py
            server_tool_name = f"{original_name}"
            tool_name_mapping[original_name] = server_tool_name
 
            # Get parameter properties
            # properties, required = extract_tool_parameters(tool)
            properties = {}
            required = []
            if "parameters" in tool:
                if isinstance(tool["parameters"], dict):
                    properties = tool["parameters"].get("properties", {})
                    required = tool["parameters"].get("required", [])
                    logger.debug(f"Tool {original_name} has parameters: properties={list(properties.keys())}, required={required}")
                else:
                    logger.warning(f"Tool {original_name} parameters not a dict: {type(tool['parameters'])}")
            else:
                logger.debug(f"Tool {original_name} has no parameters defined")
 
            # Create tool in Ollama's expected format based on docs
            ollama_tool = {
                "type": "function",
                "function": {
                    "name": original_name,
                    "description": tool.get("description", ""),
                    "parameters": {
                        "type": "object",
                        "properties": properties,
                        "required": required
                    }
                }
            }
            logger.debug(f"Convert tool {original_name} to Ollama format successfully")
            return ollama_tool
        except Exception as e:
            logger.error(f"Error processing tool: {e}")
            return None

    def convert_mcp_tools(self, mcp_tools: Union[List[Any]]) -> List[OllamaToolType]:
        """
        Convert MCP tools format to Ollama tool format according to the Ollama SDK docs.
        
        Args:
            mcp_tools: Tools in MCP format (list, dict with 'tools' key, or object with 'tools' attribute)
            
        Returns:
            List of tools formatted for Ollama's API
        """
        logger.debug(f"Converting {len(mcp_tools)} MCP tools to Ollama format")
 
        # Clear the global mapping before processing
        tool_name_mapping.clear()
 
        # Extract tools from the input based on its type
        logger.debug(f"origin mcp tools: {json.dumps(mcp_tools, indent=2)}")
        ollama_tools = []
        # Process each tool in the list
        for idx, tool in enumerate(mcp_tools):
            if "name" in tool and "description" in tool:
                # Process valid tool
                ollama_tool = self.convert_mcp_tool(idx, tool)
                if ollama_tool:
                    ollama_tools.append(ollama_tool)
            else:
                logger.warning(f"Tool missing required attributes: has name = {'name' in tool}, has description = {'description' in tool}")
 
        logger.debug(f"Ollama format tools: {json.dumps(ollama_tools, indent=2)}")
        return ollama_tools

    def add_content_to_response(self, response: ResponseMessage, message: Message):
        if message.tool_calls != None:
            logger.debug(f"tool calls: {message.tool_calls}")
            for tc in message.tool_calls:
                content: ContentBlock= ToolUseBlock()
                content.name=tc.function.name
                content.input=tc.function.arguments
                response.content.append(content)
        else:
            content: ContentBlock= TextBlock()
            content.text=message.content
            if len(response.content)>0:
                last=response.content[-1]
                logger.info(f"last content: {last}, content info: {message.content}")
                if response.content[-1].type != "tool_use":
                    response.content[-1]=content
                else:
                    response.content.append(content)
            else:
                response.content.append(content)
    async def generate_response(
        self,
        conversation: List[MessageType], 
        all_functions: Union[List[Any], Dict[str, Any], Any],
        **args
    ) -> Dict[str, Any]:
        """
        Generate text using Ollama's API.
 
        Args:
            conversation: The conversation history as a list of message objects
            model_cfg: Configuration for the model including parameters and options
            all_functions: Available functions for the model to call
 
        Returns:
            Dict containing assistant_text and tool_calls
        """
        logger.debug("===== Starting generate_response in provider ollama =====")
        # Get model name from config
        max_round=args.get("max_round", DEFAULT_MAX_CONVERSATION_ROUND)
        model_name = self.model_cfg.get("model", "")
        if not model_name:
            error_msg = "Model name is required but was not provided in configuration"
            logger.error(error_msg)
            return {"assistant_text": error_msg, "tool_calls": []}
 
        logger.info(f"Using model: {model_name} chat with conversation max_round {max_round}")
        # Convert tools to Ollama format
        if all_functions is not None:
            ollama_tools = self.convert_mcp_tools(all_functions)
        else:
            ollama_tools = None
        # Preprocess conversation messages to ensure arguments are dictionaries
        processed_conversation = preprocess_messages(conversation)
        logger.debug(f"Chat parameters prepared with {ollama_tools} tools")
        # Add keep_alive if needed
        keep_alive=None
        if self.keep_alive_seconds != "0":
            keep_alive = self.keep_alive_seconds
        # Log conversation for debugging (abbreviated)
        self.log_conversation_sample(processed_conversation)
        logger.info(f"converstaion messages: {processed_conversation}")
        # Call Ollama API
        response_message:ResponseMessage=ResponseMessage(
                model=model_name,
                provider="ollama",
        )
        try:
            # Make the API call
            conversation_round=0
            while conversation_round<max_round:
                conversation_round+=1 
                response: ChatResponse= await self.asyncClient.chat(
                    model=model_name,
                    messages=conversation,
                    options=self.options,
                    stream=False,
                    tools=ollama_tools,
                    keep_alive=keep_alive,
                )
                response_message.metrics=self.metrics(response)
                content=self.parse_think(response.message.content)
                self.add_content_to_response(response_message, response.message)
                if response.message.tool_calls != None:
                    logger.info(f"find response.message.tool_calls: {response.message} at conversation round: {conversation_round}") 
                    break
                logger.debug(f"content {content}, conversation {conversation}")
            logger.debug(f"ResponseMessage is {response_message.content}") 
            
            """
            # Extract assistant text
            #content=self.parse_think(response.message.content)
            think_text=content.get("think")
            if think_text:
                logger.info(f"Think text: {think_text[:100]}...")
    
            assistant_text = content.get("content") or ""
            logger.debug(f"Assistant text (abbreviated): {assistant_text[:100]}...")
            """
            return response_message
        except Exception as e:
            logger.error(f"Unexpected error in generate_response: {e}")
            traceback.print_exc()
            response_message.additional={"assistant_text": f"Unexpected error: {str(e)}", "tool_calls": []}
            return response_message
