import json
from typing import override, Dict, List, Any, AsyncGenerator, Union, Tuple

from datetime import datetime
from pprint import pformat
from openai import AsyncOpenAI, APIError, RateLimitError, AsyncStream
from openai.types.chat import ChatCompletion, ChatCompletionChunk
from openai.types.chat.chat_completion_tool_param import ChatCompletionToolParam
# Function, FunctionParameters
from openai.types.shared_params.function_definition import FunctionDefinition
from mcp.types import Tool


from ..schemas import Message, MessageToolCall, MessageMetrics, MessageChunk, Choice
from ..schemas import ModelResponse
from .provider import BaseProvider
from .utils import parse_json, format_conversation, parse_content

from ..common import (
    ModelParameter, 
    get_logger
)
logger = get_logger(__name__)

class OpenAIProvider(BaseProvider):
    def __init__(self, model_param: ModelParameter):
        super().__init__(model_param)
        if self.base_url:
            client = AsyncOpenAI(
                api_key=self.api_key, 
                base_url=self.base_url,
                max_retries=self.max_retries)
        else:
            client = AsyncOpenAI(api_key=self.api_key, max_retries=self.max_retries)
        self.client=client
    
    @override
    def format_tool(self, tool: Tool) -> ChatCompletionToolParam | None:
        """
        Format tool from MCP  definition into OpenAI ChatCompletionToolParam
        Args:
            tool: MCP tool definition
        Returns:
            OpenAI ChatCompletionToolParam or None if processing fails
        """
        try:
            # Create tool in Ollama's expected format based on docs
            param=ChatCompletionToolParam(
                function=FunctionDefinition(
                    name=tool.name, 
                    description=tool.description,
                    parameters=tool.inputSchema
                ),
                type="function",
            )
            return param
        except Exception as e:
            logger.error(f"error processing convert mcp tool to openai function: {e}")
            return None
    
    @override 
    async def chat(self, conversation: List[Dict],
                formatted_tools: List[Dict], stream: bool = False) -> ChatCompletion | AsyncStream[ChatCompletionChunk]:
        model_param=self.model_parameter
        try:
            # to-do: add tool call convert...
            """
            n: How many chat completion choices to generate for each input message. Note that
                you will be charged based on the number of generated tokens across all of the
                choices. Keep `n` as `1` to minimize costs.
            """
            if stream:
                # print(f"extra body {self.extra_body}")
                response = await self.client.chat.completions.create(
                    messages=conversation,
                    model=self.model,
                    temperature=model_param.temperature,
                    top_p=model_param.top_p,
                    max_tokens=model_param.max_tokens,
                    tools=formatted_tools,
                    stream_options=self.stream_options,
                    extra_body=self.extra_body,
                    stream=stream
                )
                return response
            response = await self.client.chat.completions.create(
                messages=conversation,
                model=self.model,
                temperature=model_param.temperature,
                top_p=model_param.top_p,
                max_tokens=model_param.max_tokens,
                tools=formatted_tools,
                extra_body=self.extra_body,
                stream=stream
            )
            return response
        except APIError as e:
            if isinstance(e, RateLimitError):
                return {"assistant_text": f"OpenAI rate limit: {str(e)}", "tool_calls": []}
            else:
                return {"assistant_text": f"OpenAI API error: {str(e)}", "tool_calls": []}
        except Exception as e:
            return {"assistant_text": f"Unexpected OpenAI error: {str(e)}", "tool_calls": []}
    @override
    async def response(self, 
            messages: List[Message] = None,
            tools: List[Tool] = None,
            stream: bool = False,
            **args
        ) -> Union[Dict, AsyncGenerator, ModelResponse]:
        """
        Generate text using OpenAI's API.
        
        Args:
            conversation: The conversation history
            model_param: Model configuration items
            all_functions: Available functions for the model to call
            tools: Available MCP tool for the model to call
            stream: Whether to stream the response
            
        Returns:
            If stream=False: Dict containing assistant_text and tool_calls
            If stream=True: AsyncGenerator yielding chunks of assistant text and tool calls
        """
        # Format functions for OpenAI API
        formatted_tools: List[ChatCompletionToolParam] =[] 
        if tools is not None:
            for tool in tools:
                formatted_tool = self.format_tool(tool)
                if formatted_tool:
                    formatted_tools.append(formatted_tool)
        if len(formatted_tools) > 0:
            logger.info(f"Format mcp tools: {[t.name for t in tools]} successfully.")
        
        conversation = format_conversation(messages, "openai")
        logger.info(f"conversation messages: {pformat(conversation)}")
        # call client.chat.completions.create
        response=await self.chat(
            conversation=conversation, 
            formatted_tools=formatted_tools, 
            stream=stream
        )
        
        if stream:
            return self._format_response_with_stream(response)
        else:
            with open(f'{response.id}.json', 'w') as f:
                body=json.dumps(conversation, indent=2, allow_nan=False)
                f.write(body)
            return self._format_response_with_stream(response)
    def _format_response(self, response: ChatCompletion)->Union[ModelResponse, Dict]:
        try:
            created_at=datetime.fromtimestamp(response.created)
            choice = response.choices[0]
            usage=response.usage
            """
            with open(f'{response.id}.json', 'w') as f:
                body=choice.message.model_dump_json(indent=2, exclude_none=True)
                f.write(body)
            """
            metrics=MessageMetrics(
                created_at=created_at.isoformat(),
                prompt_eval_count=usage.prompt_tokens,
                eval_count=usage.completion_tokens,
                total_count=usage.total_tokens,
            )
            
            tool_calls: List[MessageToolCall]=[]
            if hasattr(choice.message, 'tool_calls') and choice.message.tool_calls:
                logger.info(f"tool calls: {choice.message.tool_calls}")
                for tc in choice.message.tool_calls:
                    tool_call=MessageToolCall(**tc.model_dump())
                    tool_calls.append(tool_call)
            else: pass
            if hasattr(choice.message, 'content') and choice.message.content is not None:
                thinking, assistant_text=parse_content(choice.message.content)
            """
            with open(f'{message.id}.json', 'w') as f:
                body=response.model_dump_json(indent=2)
                f.write(body)
            """
            message = Message(
                id=f"index-{choice.index}",
                provider="openai",
                model=self.model,
                role=choice.message.role,
                content=assistant_text,
                thinking=thinking,
                tool_calls=tool_calls,
                created_at = created_at.isoformat(),
            )
            result = ModelResponse(
                id=response.id,
                provider=message.provider,
                created_at=datetime.fromtimestamp(response.created),
                message=message,
                metrics=metrics,
            )
            logger.info(f"{result.provider}/{result.message.model}: generate query result successfully with request created at {result.created_at}.")
            return result

        except APIError as e:
            if isinstance(e, RateLimitError):
                return {"assistant_text": f"OpenAI rate limit: {str(e)}", "tool_calls": []}
            else:
                return {"assistant_text": f"OpenAI API error: {str(e)}", "tool_calls": []}
        except Exception as e:
            return {"assistant_text": f"Unexpected OpenAI error: {str(e)}", "tool_calls": []}
    async def _stream(self, response: AsyncStream[ChatCompletionChunk])->AsyncGenerator[MessageChunk, None]:
        async for chunk in response:
            choice=Choice(**chunk.choices[0].model_dump())
            _chunk=MessageChunk(
                provider="openai",
                model=self.model,
                role="assistant",
                choice=choice,
                created_at=datetime.fromtimestamp(chunk.created).isoformat(),
            )
            yield _chunk
    
    
    async def _format_response_with_stream(self,response: AsyncStream[ChatCompletionChunk]) ->AsyncGenerator[MessageChunk, None]:
        """Internal function for streaming generation"""
    
        logger.info(f"start to format stream response from model {self.model}")
        role='assistant'
        id=None
        async for chunk in response:
            if not chunk.choices or not chunk.choices[0].delta:
                continue
            choice=chunk.choices[0]
            if choice.delta.role!=role:
                role=choice.delta.role
            if id is None:
                id=chunk.id
            _choice=Choice(**choice.model_dump())
            finish_reason = choice.finish_reason
            _chunk=MessageChunk(
                id=id,
                provider="openai",
                model=self.model,
                role=role,
                choice=_choice,
                created_at=datetime.fromtimestamp(chunk.created).isoformat(),
            )
            # logger.info(f"message chunk id: {chunk.id}")
            # 生成统一格式的块
            yield _chunk
            # 如果收到结束信号则退出
            if finish_reason:
                logger.info(f"finish reason: {finish_reason}")
                break
    
    async def get_embedding(self, input: str) -> List[List[float]]:
        return self.get_embeddings(input=input)
    
    async def get_embeddings(self, input: Union[str, List[str]]) -> List[List[float]]:
        """
        异步获取文本嵌入向量
        
        Args:
            input: 文本列表
            
        Returns:
            嵌入向量列表
        """
        try:
            response = await self.client.embeddings.create(
                model=self.model,
                input=input
            )
            
            embeddings = [item.embedding for item in response.data]
            return embeddings
            
        except Exception as e:
            logger.error(f"OpenAI 异步嵌入生成失败: {e}")
            raise
    
        