# -*- coding: utf-8 -*-
"""
火山引擎API服务模块
实现火山引擎AI模型调用功能

主要功能:
- 火山引擎API集成
- 流式响应处理
- 对话管理
- 错误处理和重试
"""

import os
import json
import asyncio
import aiohttp
from typing import Dict, Any, Optional, AsyncGenerator, List
from datetime import datetime, timezone
import logging
from dataclasses import dataclass
from enum import Enum

# 配置日志
logger = logging.getLogger(__name__)


class ModelType(Enum):
    """模型类型枚举 - 使用实际的端点ID"""
    DOUBAO_PRO_4K = "doubao-pro-4k"  # 豆包Pro-4k
    DOUBAO_PRO_32K = "doubao-pro-32k"  # 豆包Pro-32k
    DOUBAO_PRO_128K = "doubao-pro-128k"  # 豆包Pro-128k
    DOUBAO_LITE_4K = "doubao-lite-4k"  # 豆包Lite-4k
    DOUBAO_LITE_32K = "doubao-lite-32k"  # 豆包Lite-32k


@dataclass
class ChatMessage:
    """聊天消息数据类"""
    role: str  # system, user, assistant
    content: str
    timestamp: Optional[datetime] = None
    
    def to_dict(self) -> Dict[str, Any]:
        """转换为字典格式"""
        return {
            "role": self.role,
            "content": self.content
        }


@dataclass
class ChatResponse:
    """聊天响应数据类"""
    content: str
    model: str
    usage: Dict[str, int]
    finish_reason: str
    created_at: datetime
    
    @classmethod
    def from_api_response(cls, response_data: Dict[str, Any]) -> 'ChatResponse':
        """从API响应创建实例"""
        choice = response_data.get('choices', [{}])[0]
        message = choice.get('message', {})
        
        return cls(
            content=message.get('content', ''),
            model=response_data.get('model', ''),
            usage=response_data.get('usage', {}),
            finish_reason=choice.get('finish_reason', ''),
            created_at=datetime.now(timezone.utc)
        )


class VolcengineService:
    """火山引擎API服务类"""
    
    def __init__(self):
        # 从环境变量获取配置 - 使用与settings.py一致的配置名称
        self.api_key = os.getenv('ARK_API_KEY')
        self.base_url = os.getenv('ARK_BASE_URL', 'https://ark.cn-beijing.volces.com/api/v3')
        self.default_model = os.getenv('ARK_MODEL_ID', ModelType.DOUBAO_PRO_4K.value)
        
        # API配置
        self.timeout = int(os.getenv('VOLCENGINE_TIMEOUT', '60'))
        self.max_retries = int(os.getenv('VOLCENGINE_MAX_RETRIES', '3'))
        self.retry_delay = float(os.getenv('VOLCENGINE_RETRY_DELAY', '1.0'))
        
        # 默认参数
        self.default_temperature = float(os.getenv('VOLCENGINE_TEMPERATURE', '0.7'))
        self.default_max_tokens = int(os.getenv('VOLCENGINE_MAX_TOKENS', '2048'))
        self.default_top_p = float(os.getenv('VOLCENGINE_TOP_P', '0.9'))
        
        # 验证配置
        if not self.api_key:
            raise ValueError("ARK_API_KEY环境变量未设置")
        
        # HTTP会话
        self._session: Optional[aiohttp.ClientSession] = None
    
    async def _get_session(self) -> aiohttp.ClientSession:
        """获取HTTP会话"""
        if self._session is None or self._session.closed:
            headers = {
                'Authorization': f'Bearer {self.api_key}',
                'Content-Type': 'application/json',
                'User-Agent': 'AI-MarketIQ/1.0.0'
            }
            
            timeout = aiohttp.ClientTimeout(total=self.timeout)
            
            self._session = aiohttp.ClientSession(
                headers=headers,
                timeout=timeout,
                connector=aiohttp.TCPConnector(limit=100, limit_per_host=30)
            )
        
        return self._session
    
    async def close(self):
        """关闭HTTP会话"""
        if self._session and not self._session.closed:
            await self._session.close()
            self._session = None
    
    async def chat_completion(
        self,
        messages: List[ChatMessage],
        model: Optional[str] = None,
        temperature: Optional[float] = None,
        max_tokens: Optional[int] = None,
        top_p: Optional[float] = None,
        stream: bool = False,
        **kwargs
    ) -> ChatResponse:
        """
        聊天完成API调用
        
        Args:
            messages: 消息列表
            model: 模型名称
            temperature: 温度参数
            max_tokens: 最大token数
            top_p: top_p参数
            stream: 是否流式响应
            **kwargs: 其他参数
            
        Returns:
            ChatResponse: 聊天响应
        """
        # 构建请求参数
        request_data = {
            "model": model or self.default_model,
            "messages": [msg.to_dict() for msg in messages],
            "temperature": temperature or self.default_temperature,
            "max_tokens": max_tokens or self.default_max_tokens,
            "top_p": top_p or self.default_top_p,
            "stream": stream
        }
        
        # 添加其他参数
        request_data.update(kwargs)
        
        # 执行请求
        response_data = await self._make_request(
            method='POST',
            endpoint='/chat/completions',
            data=request_data
        )
        
        return ChatResponse.from_api_response(response_data)
    
    async def chat_completion_stream(
        self,
        messages: List[ChatMessage],
        model: Optional[str] = None,
        temperature: Optional[float] = None,
        max_tokens: Optional[int] = None,
        top_p: Optional[float] = None,
        **kwargs
    ) -> AsyncGenerator[Dict[str, Any], None]:
        """
        流式聊天完成API调用
        
        Args:
            messages: 消息列表
            model: 模型名称
            temperature: 温度参数
            max_tokens: 最大token数
            top_p: top_p参数
            **kwargs: 其他参数
            
        Yields:
            Dict: 流式响应数据
        """
        # 构建请求参数
        request_data = {
            "model": model or self.default_model,
            "messages": [msg.to_dict() for msg in messages],
            "temperature": temperature or self.default_temperature,
            "max_tokens": max_tokens or self.default_max_tokens,
            "top_p": top_p or self.default_top_p,
            "stream": True
        }
        
        # 添加其他参数
        request_data.update(kwargs)
        
        # 执行流式请求
        async for chunk in self._make_stream_request(
            method='POST',
            endpoint='/chat/completions',
            data=request_data
        ):
            yield chunk
    
    async def _make_request(
        self,
        method: str,
        endpoint: str,
        data: Optional[Dict[str, Any]] = None,
        params: Optional[Dict[str, Any]] = None
    ) -> Dict[str, Any]:
        """
        执行HTTP请求
        
        Args:
            method: HTTP方法
            endpoint: API端点
            data: 请求数据
            params: 查询参数
            
        Returns:
            Dict: 响应数据
        """
        url = f"{self.base_url}{endpoint}"
        session = await self._get_session()
        
        last_exception = None
        
        for attempt in range(self.max_retries + 1):
            try:
                logger.debug(f"发起API请求: {method} {url} (第{attempt + 1}次尝试)")
                
                async with session.request(
                    method=method,
                    url=url,
                    json=data,
                    params=params
                ) as response:
                    response_text = await response.text()
                    
                    if response.status == 200:
                        response_data = json.loads(response_text)
                        logger.debug(f"API请求成功: {response.status}")
                        return response_data
                    else:
                        error_msg = f"API请求失败: {response.status} - {response_text}"
                        logger.error(error_msg)
                        
                        # 解析错误响应
                        try:
                            error_data = json.loads(response_text)
                            error_msg = error_data.get('error', {}).get('message', error_msg)
                        except json.JSONDecodeError:
                            pass
                        
                        raise aiohttp.ClientResponseError(
                            request_info=response.request_info,
                            history=response.history,
                            status=response.status,
                            message=error_msg
                        )
                        
            except (aiohttp.ClientError, asyncio.TimeoutError) as e:
                last_exception = e
                if attempt < self.max_retries:
                    logger.warning(f"API请求失败，{self.retry_delay}秒后重试: {e}")
                    await asyncio.sleep(self.retry_delay)
                else:
                    logger.error(f"API请求失败，已达到最大重试次数: {e}")
                    break
            except Exception as e:
                logger.error(f"API请求发生未知错误: {e}")
                raise e
        
        # 如果所有重试都失败，抛出最后一个异常
        if last_exception:
            raise last_exception
        
        raise RuntimeError("API请求失败")
    
    async def _make_stream_request(
        self,
        method: str,
        endpoint: str,
        data: Optional[Dict[str, Any]] = None,
        params: Optional[Dict[str, Any]] = None
    ) -> AsyncGenerator[Dict[str, Any], None]:
        """
        执行流式HTTP请求
        
        Args:
            method: HTTP方法
            endpoint: API端点
            data: 请求数据
            params: 查询参数
            
        Yields:
            Dict: 流式响应数据
        """
        url = f"{self.base_url}{endpoint}"
        session = await self._get_session()
        
        try:
            logger.debug(f"发起流式API请求: {method} {url}")
            
            async with session.request(
                method=method,
                url=url,
                json=data,
                params=params
            ) as response:
                if response.status != 200:
                    error_text = await response.text()
                    error_msg = f"流式API请求失败: {response.status} - {error_text}"
                    logger.error(error_msg)
                    raise aiohttp.ClientResponseError(
                        request_info=response.request_info,
                        history=response.history,
                        status=response.status,
                        message=error_msg
                    )
                
                # 处理流式响应
                async for line in response.content:
                    line = line.decode('utf-8').strip()
                    
                    if not line:
                        continue
                    
                    # 处理SSE格式
                    if line.startswith('data: '):
                        data_str = line[6:]  # 移除 'data: ' 前缀
                        
                        if data_str == '[DONE]':
                            logger.debug("流式响应结束")
                            break
                        
                        try:
                            chunk_data = json.loads(data_str)
                            yield chunk_data
                        except json.JSONDecodeError as e:
                            logger.warning(f"解析流式响应数据失败: {e} - {data_str}")
                            continue
                
        except (aiohttp.ClientError, asyncio.TimeoutError) as e:
            logger.error(f"流式API请求失败: {e}")
            raise e
        except Exception as e:
            logger.error(f"流式API请求发生未知错误: {e}")
            raise e
    
    async def health_check(self) -> Dict[str, Any]:
        """
        健康检查
        
        Returns:
            Dict: 健康检查结果
        """
        health_info = {
            'status': 'unhealthy',
            'service': 'volcengine_api',
            'timestamp': datetime.now(timezone.utc).isoformat(),
            'details': {}
        }
        
        try:
            # 发送简单的测试请求
            test_messages = [
                ChatMessage(role='user', content='Hello')
            ]
            
            start_time = asyncio.get_event_loop().time()
            response = await self.chat_completion(
                messages=test_messages,
                max_tokens=10
            )
            response_time = (asyncio.get_event_loop().time() - start_time) * 1000
            
            health_info.update({
                'status': 'healthy',
                'details': {
                    'response_time_ms': round(response_time, 2),
                    'model': response.model,
                    'api_key_configured': bool(self.api_key),
                    'base_url': self.base_url
                }
            })
            
        except Exception as e:
            health_info['details']['error'] = str(e)
            logger.error(f"火山引擎API健康检查失败: {e}")
        
        return health_info
    
    def get_available_models(self) -> List[str]:
        """
        获取可用模型列表
        
        Returns:
            List[str]: 模型列表
        """
        return [model.value for model in ModelType]
    
    def validate_model(self, model: str) -> bool:
        """
        验证模型是否可用
        
        Args:
            model: 模型名称
            
        Returns:
            bool: 模型是否可用
        """
        return model in self.get_available_models()


# 全局服务实例（延迟初始化）
volcengine_service = None


async def get_volcengine_service() -> VolcengineService:
    """
    获取火山引擎服务实例（延迟初始化）
    
    Returns:
        VolcengineService: 服务实例
    """
    global volcengine_service
    if volcengine_service is None:
        volcengine_service = VolcengineService()
    return volcengine_service


async def close_volcengine_service():
    """
    关闭火山引擎服务
    """
    global volcengine_service
    if volcengine_service is not None:
        await volcengine_service.close()
        volcengine_service = None


# 便捷函数
async def chat_with_ai(
    messages: List[Dict[str, str]],
    model: Optional[str] = None,
    temperature: Optional[float] = None,
    max_tokens: Optional[int] = None,
    stream: bool = False
) -> ChatResponse:
    """
    与AI聊天的便捷函数
    
    Args:
        messages: 消息列表，格式为 [{'role': 'user', 'content': '...'}]
        model: 模型名称
        temperature: 温度参数
        max_tokens: 最大token数
        stream: 是否流式响应
        
    Returns:
        ChatResponse: 聊天响应
    """
    chat_messages = [
        ChatMessage(role=msg['role'], content=msg['content'])
        for msg in messages
    ]
    
    service = await get_volcengine_service()
    return await service.chat_completion(
        messages=chat_messages,
        model=model,
        temperature=temperature,
        max_tokens=max_tokens,
        stream=stream
    )


async def chat_with_ai_stream(
    messages: List[Dict[str, str]],
    model: Optional[str] = None,
    temperature: Optional[float] = None,
    max_tokens: Optional[int] = None
) -> AsyncGenerator[Dict[str, Any], None]:
    """
    与AI流式聊天的便捷函数
    
    Args:
        messages: 消息列表，格式为 [{'role': 'user', 'content': '...'}]
        model: 模型名称
        temperature: 温度参数
        max_tokens: 最大token数
        
    Yields:
        Dict: 流式响应数据
    """
    chat_messages = [
        ChatMessage(role=msg['role'], content=msg['content'])
        for msg in messages
    ]
    
    service = await get_volcengine_service()
    async for chunk in service.chat_completion_stream(
        messages=chat_messages,
        model=model,
        temperature=temperature,
        max_tokens=max_tokens
    ):
        yield chunk