"""
Ollama provider implementation for Hawthorn MCP.

This module provides integration with Ollama models for text generation and tool usage,
including proper formatting of tool calls and their arguments.
"""

import json
import traceback
import copy
import uuid
from typing import override, Dict, List, Any, AsyncIterator, Union, TypeVar
from pprint import pformat
from mcp.types import Tool
from datetime import datetime

# Import Ollama types for response parsing
from ollama import Client, AsyncClient
from ollama._types import ChatResponse, Message

from ..schemas import  ModelResponse,Choice, ChoiceDelta,MessageChunk, MessageToolCall, Message as XMessage, MessageMetrics
from .provider import BaseProvider
from .utils import parse_json_safely, format_conversation, parse_content, parse_json

# Configure logging
from ..common import (
    ModelParameter, 
    get_logger
)
logger = get_logger(__name__)

# Constants
DEFAULT_API_HOST = "http://localhost:11434"
DEFAULT_TEMPERATURE = 0.7
DEFAULT_MAX_TOKENS = 1024

# Type definitions
OllamaToolType = Dict[str, Any]
MessageType = Dict[str, Any]
T = TypeVar('T')

# Global mapping to track original tool names
tool_name_mapping: Dict[str, str] = {}

def preprocess_messages(messages: List[MessageType]) -> List[MessageType]:
    """
    Preprocess conversation messages to ensure tool_calls.function.arguments are dictionaries.
    The Ollama server expects arguments as objects, not strings.
    
    Args:
        messages: List of message objects from the conversation history
        
    Returns:
        Processed copy of messages with tool call arguments converted to dictionaries
    """
    if not messages:
        return messages
    
    # Create a deep copy to avoid modifying the original
    msgs_copy = copy.deepcopy(messages)
    modified_count = 0
    
    for msg in msgs_copy:
        # Check if the message has tool_calls
        if isinstance(msg, dict) and 'tool_calls' in msg and msg['tool_calls']:
            for tool_call in msg['tool_calls']:
                if isinstance(tool_call, dict) and 'function' in tool_call:
                    if 'arguments' in tool_call['function']:
                        # If arguments is a string, parse it to a dict
                        if isinstance(tool_call['function']['arguments'], str):
                            try:
                                parsed = parse_json_safely(tool_call['function']['arguments'])
                                # If parsing results in an empty dict, remove the key instead
                                if not parsed: 
                                    del tool_call['function']['arguments']
                                    logger.debug("Removed empty arguments key during preprocessing.")
                                else:
                                    tool_call['function']['arguments'] = parsed
                                modified_count += 1
                            except Exception as e:
                                logger.error(f"Error parsing tool call arguments: {e}")
                                # If error, ensure key is removed or set to empty dict
                                if 'arguments' in tool_call['function']:
                                    del tool_call['function']['arguments'] 
    
    if modified_count > 0:
        logger.debug(f"Preprocessed {modified_count} tool call arguments from strings to dicts")
    
    return msgs_copy

class OllamaProvider(BaseProvider):
    def __init__(self, model_param: ModelParameter):
        super().__init__(model_param)
        self._validate_ollama_version()
        self.keep_alive=None
        if hasattr(model_param, "keep_alive_seconds") and model_param.keep_alive_seconds:
            self.keep_alive =  f"{model_param.keep_alive_seconds}s"
            if self.keep_alive == "0s":
                self.keep_alive = None
        # self.client=Client(host=self.host)
        # use Ollama async client as client in project Scorpio
        if self.base_url is None:
            self.base_url=DEFAULT_API_HOST
        self.client=AsyncClient(host=self.base_url)
    
    def _validate_ollama_version(self) :
        """
        Import the necessary Ollama components.
        Returns:
            Tuple of (chat, Client, ResponseError) or None if import fails
        """
        # Try to get the version if available
        try:
            import importlib.metadata
            ollama_version = importlib.metadata.version('ollama')
            logger.info(f"Ollama SDK version: {ollama_version}")
        except (ImportError, importlib.metadata.PackageNotFoundError):
            logger.debug("Could not determine Ollama SDK version")

    @override
    def format_tool(self, tool: Tool) -> OllamaToolType | None:
        """
        Convert MCP tool definition into Ollama function
        Args:
            tool: MCP tool definition
        Returns:
            Function with Ollama or None if processing fails
        """
        try:
            # Create tool in Ollama's expected format based on docs
            required=tool.inputSchema.get("required", [])
            properties = tool.inputSchema.get("properties", {})
            return {
                "type": "function",
                "function": {
                    "name": tool.name,
                    "description": tool.description,
                    "parameters": {
                        "type": "object",
                        "properties": properties,
                        "required": required
                    }
                } 
            } 
        except Exception as e:
            logger.error(f"error processing convert mcp tool to ollama function: {e}")
            return None

    @override
    async def chat(self, conversation: List[Dict],
                formatted_tools: List[Dict], stream: bool = False) -> Union[ChatResponse, AsyncIterator[ChatResponse]]:
        model_param=self.model_parameter
        options = {
            "temperature": DEFAULT_TEMPERATURE,
            "max_tokens": DEFAULT_MAX_TOKENS,
        }
        if hasattr(model_param, "temperature") and model_param.temperature:
            options["temperature"] = model_param.temperature
        if hasattr(model_param, "max_tokens") and model_param.max_tokens:
            options["max_tokens"] = model_param.max_tokens
        if hasattr(model_param, "top_k") and model_param.top_k:
            options["top_k"] = model_param.top_k
        if   hasattr(model_param, "repetition_penalty") and  model_param.repetition_penalty:
            options["repeat_penalty"] = model_param.repetition_penalty
        if hasattr(model_param, "num_predict") and  model_param.num_predict  :
            options["num_predict"] = model_param.num_predict
        
        try:
            response = await self.client.chat(
                model=self.model,
                messages=conversation,
                options=options,
                stream=stream,
                tools=formatted_tools,
                keep_alive=self.keep_alive,
            )
            return response
        except Exception as e:
            return {"assistant_text": f"Unexpected Ollama error: {str(e)}", "tool_calls": []}
            
    def metrics(self, response: ChatResponse)->Dict[str, Any]:
        metrics={
            "created_at": response.created_at,
            "model": response.model,
            "prompt":{
                "token": response.prompt_eval_count,
                "rate": round(response.prompt_eval_count/response.prompt_eval_duration*10**9,2), # type: ignore
            },
            "generate": {
                "token": response.eval_count,
                "rate": round(response.eval_count/response.eval_duration*10**9,2), # type: ignore
            },
            "total_token": response.prompt_eval_count+response.eval_count, # type: ignore
        }
        logger.debug(f"ollama metrics: {metrics}")
        return metrics
    
    def log_conversation_sample(self, conversation: List[MessageType]) -> None:
        """
        Log a sample of the conversation for debugging.
        
        Args:
            conversation: The conversation to log
        """
        if not conversation:
            return
            
        try:
            if len(conversation) > 0:
                first_msg = json.dumps(conversation[0])[:150]
                logger.debug(f"First message (abbreviated): {first_msg}...")
            
            if len(conversation) > 1:
                last_msg = json.dumps(conversation[-1])[:150]
                logger.debug(f"Last message (abbreviated): {last_msg}...")
        except Exception as e:
            logger.debug(f"Could not log conversation sample: {e}")

    @override
    async def response(self,
        messages: List[Message] = None,
        tools: List[Tool] = None,
        stream: bool = False,
        **args
    ) -> Union[ModelResponse, Dict[str, Any], Any]:
        """
        Generate text using Ollama's API.

        Args:
            conversation: The conversation history as a list of message objects
            model_cfg: Configuration for the model including parameters and options
            all_functions: Available functions for the model to call

        Returns:
            Dict containing assistant_text and tool_calls
        """
        logger.debug("===== Starting generate_response in provider ollama =====")
        formatted_functions = []
        # function 结构问题
        if tools:
            for tool in tools:
                formatted_func = self.format_tool(tool)
                if formatted_func:
                    formatted_functions.append(formatted_func)
                else:
                    pass
        else:
            formatted_functions=None
        if formatted_functions and len(formatted_functions) > 0:
            logger.info(f"format mcp tools: {[t.name for t in tools]} successfully.")
        
        # Preprocess conversation messages to ensure arguments are dictionaries
        # processed_conversation = preprocess_messages(conversation)
        conversation = format_conversation(messages, "openai") 
        self.log_conversation_sample(conversation)
        logger.info(f"conversation messages: {pformat(conversation)}")

        try:
            # Make the API call
            response = await self.chat(
                conversation=conversation, 
                formatted_tools=formatted_functions, 
                stream=stream
            )
            if not stream:
                if isinstance(response, ChatResponse):
                    return self._format_response(response)
                else:
                    return {"assistant_text": f"Unexpected error: unsupported data type {type(response)} ollama response "}
            else:
                # return response
                return self._format_stream_response(response) 
        except Exception as e:
            logger.error(f"Unexpected error in generate_response: {e}")
            traceback.print_exc()
            return {"assistant_text": f"Unexpected error: {str(e)}", "tool_calls": []}

    async def _format_stream_response(self, response: AsyncIterator[ChatResponse])->AsyncIterator[MessageChunk]:
        role='assistant'
        id=f"chatcmpl-{uuid.uuid4().hex}"
        model=self.model
        async for chunk in response:
            body=chunk.model_dump_json(indent=2, exclude_none=True)
            # logger.info(f"chunk body:\n{pformat(body)}")
            if chunk.model and model!=chunk.model:
                model=chunk.model
            #extra_fields = chunk.__pydantic_extra__ or {}
            tool_calls: List[MessageToolCall] =[]
            if chunk.message.tool_calls:
                for tc in chunk.message.tool_calls:
                    body=tc.model_dump()
                    body["type"]='function'
                    tool_call=MessageToolCall(**body)
                    tool_calls.append(tool_call)
                    logger.info(f"how to get tool_calls: {pformat(tool_calls)}")
            delta=ChoiceDelta(
                content=chunk.message.content,
                role=role,
                tool_calls= tool_calls if len(tool_calls)>0 else None,
            )
                
            finish_reason=None
            if chunk.done:
                finish_reason='stop'
                if len(tool_calls)>0:
                    finish_reason='tool_calls'
            _choice=Choice(
                delta=delta,
                finish_reason=finish_reason,
                index=0,
            )
            _chunk=MessageChunk(
                id=id,
                provider="ollama",
                model=model,
                role=role,
                choice=_choice,
                created_at=chunk.created_at,
            )
            if len(tool_calls)>0:
                logger.info(f"how to get MessageChunk: {pformat(_chunk)}")

            yield _chunk
            

    def _format_response(self, response: ChatResponse)->Union[ModelResponse, Dict]:
        message = response.message
        tool_calls: List[MessageToolCall]=[]
        if hasattr(message,'tool_calls') and message.tool_calls != None:
            logger.info(f"tool calls: {[ tc.function.name for tc in message.tool_calls]}")
            for tc in message.tool_calls:
                # if isinstance(tc.function.arguments, dict):
                #    tc.function.arguments=json.dumps(tc.function.arguments)
                body=tc.model_dump()
                body["type"]='function'
                tool_call=MessageToolCall(**body)
                tool_calls.append(tool_call)
        else:
            pass

        # content=message.content
        if hasattr(message, 'content') and message.content is not None:
                thinking, assistant_text=parse_content(message.content)
        message = XMessage(
            id="index-0",
            model=self.model,
            provider="ollama",
            role="assistant",
            created_at = response.created_at,
            thinking=thinking, 
            content=assistant_text,
            tool_calls=tool_calls,
        )
        logger.info(f"{message.provider}/{message.model}: generate message\n {pformat(message.model_dump(exclude_none=True))}")
        total_count=0
        if response.prompt_eval_count: total_count=int(response.prompt_eval_count)
        if response.eval_count: total_count=total_count+int(response.eval_count)
        metrics=MessageMetrics(
                created_at=response.created_at,
                prompt_eval_count=response.prompt_eval_count,
                eval_count=response.eval_count,
                total_count=total_count,
        )
        result = ModelResponse(
            id=message.id,
            provider=message.provider,
            created_at=response.created_at,
            message=message,
            metrics=metrics,
        )
        return result
    async def get_embedding(self, text: str) -> List[float]:
        """
        批量获取文本嵌入向量
        
        Args:
            texts: 文本列表
            
        Returns:
            嵌入向量列表
        """
        try:
            response = await self.client.embed(
                    model=self.model,
                    input=text
                )
            # to-do: record token cost
        except Exception as e:
            logger.error(f"生成嵌入向量失败: {e}")
            raise
        return response.embeddings[0]
    
    async def get_embeddings(self, texts: List[str]) -> List[List[float]]:
        """
        批量获取文本嵌入向量
        
        Args:
            texts: 文本列表
            
        Returns:
            嵌入向量列表
        """
        try:
            response = await self.client.embed(
                model=self.model,
                input=texts
            )
            # to-do: record token cost
        except Exception as e:
            logger.error(f"生成嵌入向量失败: {e}")
            raise

        return response.embeddings