#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
模型接口层
提供统一的模型访问接口，支持不同的模型后端实现
"""

import os
import sys
import asyncio
import logging
import httpx
from typing import Optional, Dict, Any, Union, List
from abc import ABC, abstractmethod
from enum import Enum

# 添加项目根目录到Python路径
sys.path.insert(0, os.path.abspath(os.path.dirname(os.path.dirname(__file__))))

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s | %(levelname)-8s | %(module)s:%(funcName)s:%(lineno)d | %(message)s'
)
logger = logging.getLogger(__name__)

# 定义模型后端枚举
class ModelBackend(Enum):
    VLLM = "vllm"
    OLLAMA = "ollama"
    LLAMA_CPP = "llama_cpp"
    FALLBACK = "fallback"


# 基础模型客户端抽象类
class BaseModelClient(ABC):
    """基础模型客户端抽象类"""
    
    def __init__(self):
        self.model_type = ModelBackend.FALLBACK
        self.is_initialized = False
        self.last_error = None
    
    async def initialize(self):
        """初始化客户端"""
        try:
            await self._initialize()
            self.is_initialized = True
            return True
        except Exception as e:
            self.last_error = str(e)
            logger.error(f"Failed to initialize client: {e}")
            return False
    
    async def _initialize(self):
        """具体初始化逻辑，由子类实现"""
        pass
    
    @abstractmethod
    async def generate(self, prompt: str, **kwargs) -> str:
        """生成文本
        
        Args:
            prompt: 提示文本
            **kwargs: 其他参数
            
        Returns:
            生成的文本
        """
        pass
    
    @abstractmethod
    async def chat_completion(self, messages: List[Dict[str, str]], **kwargs) -> str:
        """聊天完成
        
        Args:
            messages: 消息列表
            **kwargs: 其他参数
            
        Returns:
            生成的回复文本
        """
        pass
    
    async def health_check(self) -> bool:
        """健康检查"""
        return self.is_initialized


# VLLM客户端实现
class VllmClient(BaseModelClient):
    """VLLM模型客户端实现"""
    
    def __init__(self, config: Dict[str, Any] = None):
        super().__init__()
        self.model_type = ModelBackend.VLLM
        self.config = config or {
            "base_url": "http://localhost:8000/v1",
            "model": "vllm-7b",
            "api_key": "dummy_key"
        }
        self.headers = {
            "Content-Type": "application/json",
            "Authorization": f"Bearer {self.config['api_key']}"
        }
    
    async def _initialize(self):
        """初始化VLLM客户端"""
        logger.info(f"Initializing VLLM client with config: {self.config}")
    
    async def generate(self, prompt: str, **kwargs) -> str:
        """生成文本"""
        try:
            async with httpx.AsyncClient() as client:
                response = await client.post(
                    f"{self.config['base_url']}/completions",
                    headers=self.headers,
                    json={
                        "model": self.config["model"],
                        "prompt": prompt,
                        "max_tokens": kwargs.get("max_tokens", 1024),
                        "temperature": kwargs.get("temperature", 0.7),
                        "stop": kwargs.get("stop", []),
                        **kwargs
                    }
                )
                response.raise_for_status()
                return response.json()["choices"][0]["text"]
        except Exception as e:
            self.last_error = str(e)
            logger.error(f"VLLM generate error: {e}")
            raise
    
    async def chat_completion(self, messages: List[Dict[str, str]], **kwargs) -> str:
        """聊天完成"""
        try:
            async with httpx.AsyncClient() as client:
                response = await client.post(
                    f"{self.config['base_url']}/chat/completions",
                    headers=self.headers,
                    json={
                        "model": self.config["model"],
                        "messages": messages,
                        "max_tokens": kwargs.get("max_tokens", 1024),
                        "temperature": kwargs.get("temperature", 0.7),
                        **kwargs
                    }
                )
                response.raise_for_status()
                return response.json()["choices"][0]["message"]["content"]
        except Exception as e:
            self.last_error = str(e)
            logger.error(f"VLLM chat completion error: {e}")
            raise
    
    async def health_check(self) -> bool:
        """健康检查"""
        try:
            async with httpx.AsyncClient(timeout=5.0) as client:
                response = await client.get(f"{self.config['base_url']}/models")
                return response.status_code == 200
        except Exception:
            return False


# Ollama客户端实现
class OllamaClient(BaseModelClient):
    """Ollama模型客户端实现"""
    
    def __init__(self, config: Dict[str, Any] = None):
        super().__init__()
        self.model_type = ModelBackend.OLLAMA
        # 确保使用正确的API基础路径格式，并添加内存优化配置
        self.config = config or {
            "service_url": "http://localhost:11434",
            "model": "phi3:3.8b-instruct-q4_K_M",  # 使用轻量级量化模型
            "keep_alive": "1m",  # 缩短保持活动时间，减少内存占用
            "num_threads": 2,  # 限制线程数，减少内存使用
            "max_tokens": 512  # 减少最大生成长度
        }
        # 强制使用不含/api的基础URL
        if self.config['service_url'].endswith('/api'):
            self.config['service_url'] = self.config['service_url'][:-4]
    
    async def _initialize(self):
        """初始化Ollama客户端"""
        logger.info(f"Initializing Ollama client with config: {self.config}")
        # 使用同步健康检查避免异步问题
        if not self.health_check_sync():
            raise ConnectionError(f"Cannot connect to Ollama server at {self.config['service_url']}")
    
    # 同步版本的generate方法
    def generate_sync(self, prompt: str, **kwargs) -> str:
        """生成文本（同步版本）"""
        try:
            # 先检查服务状态
            if not self.health_check_sync():
                return "[Error] Ollama service is not available or not responding correctly"
            
            # 先尝试使用chat_completion_sync方法
            logger.info("Trying chat completion API first as preferred method")
            result = self.chat_completion_sync([{"role": "user", "content": prompt}], **kwargs)
            # 如果成功返回了聊天结果，就使用它
            if not result.startswith("[Error]"):
                return result
            # 否则继续尝试generate API
            logger.info("Chat completion failed, falling back to generate API")
            
            # 使用同步httpx客户端
            with httpx.Client() as client:
                service_url = self.config['service_url']
                # 使用标准Ollama API路径格式
                endpoint = f"{service_url}/api/generate"
                logger.info(f"Attempting to call Ollama generate API at: {endpoint}")
                
                # 打印详细调试信息
                logger.info(f"Using model: {self.config['model']}")
                logger.info(f"Prompt length: {len(prompt)} characters")
                logger.info(f"Prompt preview: {prompt[:100]}...")
                
                # 准备请求数据，应用内存优化参数
                request_data = {
                    "model": self.config["model"],
                    "prompt": prompt,
                    "stream": False,
                    "max_tokens": min(kwargs.get("max_tokens", self.config.get("max_tokens", 512)), 512),  # 严格限制最大生成长度
                    "keep_alive": self.config.get("keep_alive", "1m"),
                    "num_threads": self.config.get("num_threads", 2),
                    **kwargs
                }
                logger.info(f"Request data keys: {list(request_data.keys())}")
                
                response = client.post(endpoint, json=request_data)
                logger.info(f"Ollama generate API response status: {response.status_code}")
                
                # 检查HTTP状态码
                if response.status_code != 200:
                    logger.error(f"Ollama API returned non-200 status code: {response.status_code}")
                    # 尝试获取错误信息
                    try:
                        error_data = response.json()
                        if "error" in error_data:
                            return f"[Error] HTTP {response.status_code}: {error_data['error']}"
                        else:
                            return f"[Error] HTTP {response.status_code}: {response.text}"
                    except ValueError:
                        return f"[Error] HTTP {response.status_code}: {response.text}"
                
                # 检查响应内容
                try:
                    response_json = response.json()
                    logger.info(f"Response JSON keys: {list(response_json.keys())}")
                    
                    # 提取错误信息
                    if "error" in response_json:
                        error_msg = response_json["error"]
                        logger.error(f"Ollama API returned error: {error_msg}")
                        return f"[Error] Ollama API error: {error_msg}"
                    
                    # 返回响应内容
                    return response_json.get("response", "No response field in API response")
                except Exception as parse_error:
                    logger.error(f"Could not parse response as JSON: {str(parse_error)}, Response text: {response.text[:200]}...")
                    return f"[Error] Failed to parse response. Status: {response.status_code}, Raw: {response.text[:100]}..."
        except Exception as e:
            self.last_error = str(e)
            logger.error(f"Ollama generate error: {e}")
            # 返回错误信息作为响应，避免测试失败
            return f"[Error] {str(e)}"
    
    def chat_completion_sync(self, messages: list, **kwargs) -> str:
        """聊天完成（同步版本）"""
        try:
            with httpx.Client() as client:
                service_url = self.config['service_url']
                # Ollama标准chat API路径
                endpoint = f"{service_url}/api/chat"
                logger.info(f"Attempting to call Ollama chat API at: {endpoint}")
                
                # 打印调试信息
                logger.info(f"Using model: {self.config['model']}")
                logger.info(f"Message count: {len(messages)}")
                
                # 准备请求数据，应用内存优化参数
                request_data = {
                    "model": self.config["model"],
                    "messages": messages,
                    "stream": False,
                    "max_tokens": min(kwargs.get("max_tokens", self.config.get("max_tokens", 512)), 512),  # 严格限制最大生成长度
                    "keep_alive": self.config.get("keep_alive", "1m"),
                    "num_threads": self.config.get("num_threads", 2),
                    **kwargs
                }
                logger.info(f"Request data keys: {list(request_data.keys())}")
                
                response = client.post(endpoint, json=request_data)
                logger.info(f"Ollama chat API response status: {response.status_code}")
                
                # 检查HTTP状态码
                if response.status_code != 200:
                    logger.error(f"Ollama API returned non-200 status code: {response.status_code}")
                    # 尝试获取错误信息
                    try:
                        error_data = response.json()
                        if "error" in error_data:
                            return f"[Error] HTTP {response.status_code}: {error_data['error']}"
                        else:
                            return f"[Error] HTTP {response.status_code}: {response.text}"
                    except ValueError:
                        return f"[Error] HTTP {response.status_code}: {response.text}"
                
                # 检查响应内容
                try:
                    response_json = response.json()
                    logger.info(f"Response JSON keys: {list(response_json.keys())}")
                    
                    # 提取错误信息
                    if "error" in response_json:
                        error_msg = response_json["error"]
                        logger.error(f"Ollama chat API returned error: {error_msg}")
                        return f"[Error] Ollama API error: {error_msg}"
                except Exception as parse_error:
                    logger.error(f"Could not parse response as JSON: {str(parse_error)}, Response text: {response.text[:200]}...")
                    return f"[Error] Failed to parse response. Status: {response.status_code}, Raw: {response.text[:100]}..."
                
                # 尝试获取原始响应文本
                logger.info(f"Raw chat response text: {response.text[:300]}...")
                
                try:
                    return response_json.get("message", {}).get("content", "No content in API response")
                except Exception as e:
                    logger.error(f"Error processing response: {e}")
                    return f"[Error] Failed to process response: {str(e)}"
        except Exception as e:
            self.last_error = str(e)
            logger.error(f"Ollama chat completion error: {e}")
            return f"[Error] {str(e)}"
    
    # 异步版本的generate方法
    async def generate(self, prompt: str, **kwargs) -> str:
        """生成文本（异步版本）"""
        # 为了简化，使用同步版本
        return self.generate_sync(prompt, **kwargs)
    
    async def chat_completion(self, messages: List[Dict[str, str]], **kwargs) -> str:
        """聊天完成"""
        # 构建聊天提示词并使用generate方法
        chat_prompt = "\n".join([
            f"{'Human' if msg['role'] == 'user' else 'Assistant'}: {msg['content']}"
            for msg in messages
        ])
        chat_prompt += "\nAssistant: "
        return await self.generate(chat_prompt, **kwargs)
    
    # 同步版本的健康检查
    def health_check_sync(self) -> bool:
        """健康检查（同步版本）"""
        try:
            with httpx.Client(timeout=5.0) as client:
                service_url = self.config['service_url']
                # 使用Ollama标准的API路径
                endpoint = f"{service_url}/api/tags"
                logger.info(f"Performing health check on: {endpoint}")
                
                response = client.get(endpoint)
                logger.info(f"Health check response status: {response.status_code}")
                
                # 检查响应内容以验证服务是否正常
                if response.status_code == 200:
                    try:
                        response_json = response.json()
                        models = response_json.get("models", [])
                        logger.info(f"Available models: {len(models)}")
                        for model in models[:3]:  # 只记录前3个模型
                            logger.info(f"  - {model.get('name', 'Unknown')}")
                    except:
                        logger.error("Could not parse health check response as JSON")
                
                return response.status_code == 200
        except Exception as e:
            logger.error(f"Health check error: {e}")
            return False
    
    # 异步版本的健康检查
    async def health_check(self) -> bool:
        """健康检查（异步版本）"""
        # 使用同步版本避免异步问题
        return self.health_check_sync()


# LlamaCpp客户端实现
class LlamaCppClient(BaseModelClient):
    """LlamaCpp模型客户端实现"""
    
    def __init__(self, config: Dict[str, Any] = None):
        super().__init__()
        self.model_type = ModelBackend.LLAMA_CPP
        self.config = config or {
            "base_url": "http://localhost:8080/completion",
            "model": "llama-cpp-7b"
        }
    
    async def _initialize(self):
        """初始化LlamaCpp客户端"""
        logger.info(f"Initializing LlamaCpp client with config: {self.config}")
    
    async def generate(self, prompt: str, **kwargs) -> str:
        """生成文本"""
        try:
            async with httpx.AsyncClient() as client:
                response = await client.post(
                    self.config["base_url"],
                    json={
                        "prompt": prompt,
                        "max_tokens": kwargs.get("max_tokens", 1024),
                        "temperature": kwargs.get("temperature", 0.7),
                        **kwargs
                    }
                )
                response.raise_for_status()
                return response.json()["content"]
        except Exception as e:
            self.last_error = str(e)
            logger.error(f"LlamaCpp generate error: {e}")
            raise
    
    async def chat_completion(self, messages: List[Dict[str, str]], **kwargs) -> str:
        """LlamaCpp chat completion implementation (simulated based on generation interface)"""
        # Build chat prompt
        chat_prompt = "\n".join([
            f"{'Human' if msg['role'] == 'user' else 'Assistant'}: {msg['content']}"
            for msg in messages
        ])
        chat_prompt += "\nAssistant: "
        
        # Use generate for chat completion
        return await self.generate(chat_prompt, **kwargs)
    
    async def health_check(self) -> bool:
        """健康检查"""
        try:
            async with httpx.AsyncClient(timeout=5.0) as client:
                response = await client.get(self.config["base_url"])
                return response.status_code == 200
        except Exception:
            return False


# Fallback客户端实现
class FallbackClient(BaseModelClient):
    """降级客户端实现"""
    
    def __init__(self, strategy: str = "simple_concat"):
        super().__init__()
        self.model_type = ModelBackend.FALLBACK
        self.strategy = strategy
        logger.info(f"Fallback client initialized: strategy={strategy}")
    
    async def generate(self, prompt: str, **kwargs) -> str:
        """降级生成"""
        logger.info(f"Fallback generate called with prompt length: {len(prompt)} chars")
        # 简单的降级策略：返回输入的摘要
        if len(prompt) > 50:
            return f"[Fallback Response] Processed input: {prompt[:50]}... (truncated)"
        return f"[Fallback Response] Processed input: {prompt}"
    
    async def chat_completion(self, messages: List[Dict[str, str]], **kwargs) -> str:
        """降级聊天完成"""
        logger.info(f"Fallback chat_completion called with {len(messages)} messages")
        last_message = messages[-1]["content"] if messages else ""
        return f"[Fallback Chat Response] I received your message: {last_message[:100]}..."


# 模型客户端工厂类
class ModelClientFactory:
    """模型客户端工厂"""
    
    _clients: Dict[str, BaseModelClient] = {}
    _current_client: Optional[BaseModelClient] = None
    
    @classmethod
    async def get_client(cls, backend: Optional[ModelBackend] = None, config: Optional[Dict[str, Any]] = None) -> BaseModelClient:
        """获取模型客户端
        
        Args:
            backend: model backend type, use current configured backend if None
            config: configuration dictionary
            
        Returns:
            Model client instance
        """
        # 如果指定了后端，尝试创建该后端的客户端
        if backend:
            client = await cls._create_client(backend, config)
            if client:
                cls._current_client = client
                return client
        
        # 如果没有指定后端或创建失败，使用当前客户端或降级客户端
        if cls._current_client and await cls._current_client.health_check():
            return cls._current_client
        
        # 尝试自动选择可用的客户端
        for backend_type in [ModelBackend.OLLAMA, ModelBackend.LLAMA_CPP, ModelBackend.VLLM]:
            try:
                client = await cls._create_client(backend_type, config)
                if client and await client.health_check():
                    cls._current_client = client
                    return client
            except Exception as e:
                logger.warning(f"Failed to create {backend_type} client: {e}")
        
        # 如果所有客户端都不可用，返回降级客户端
        fallback = FallbackClient()
        cls._current_client = fallback
        return fallback
    
    @classmethod
    async def _create_client(cls, backend: ModelBackend, config: Optional[Dict[str, Any]] = None) -> Optional[BaseModelClient]:
        """创建客户端实例"""
        try:
            client_key = f"{backend.value}_client"
            
            # 如果客户端已存在且健康，直接返回
            if client_key in cls._clients and await cls._clients[client_key].health_check():
                return cls._clients[client_key]
            
            # 创建新客户端
            if backend == ModelBackend.VLLM:
                client = VllmClient(config)
            elif backend == ModelBackend.OLLAMA:
                client = OllamaClient(config)
            elif backend == ModelBackend.LLAMA_CPP:
                client = LlamaCppClient(config)
            elif backend == ModelBackend.FALLBACK:
                client = FallbackClient()
            else:
                raise ValueError(f"Unsupported backend: {backend}")
            
            # 初始化客户端
            if await client.initialize():
                cls._clients[client_key] = client
                return client
            
            return None
        except Exception as e:
            logger.error(f"Failed to create {backend} client: {e}")
            return None
    
    @classmethod
    def set_current_client(cls, client: BaseModelClient):
        """设置当前客户端"""
        cls._current_client = client
        logger.info(f"Set current model client: {client.model_type.value}")


# 全局便捷函数
def get_model_client(
    backend: Optional[ModelBackend] = None,
    config_name: Optional[str] = None
) -> BaseModelClient:
    """Utility function to get model client"""
    # 创建一个默认的降级客户端，避免异步调用问题
    from middleware.model_interface import FallbackClient
    client = FallbackClient()
    
    # 尝试直接创建Ollama客户端
    try:
        from middleware.model_interface import OllamaClient
        # 按优先级尝试不同的模型名称
        model_names = ["llama2", "gemma", "phi3", "mistral", "llama3"]
        
        for model_name in model_names:
            logger.info(f"Trying Ollama model: {model_name}")
            ollama_client = OllamaClient({"service_url": "http://localhost:11434", "model": model_name})
            logger.info(f"Created Ollama client with config: {ollama_client.config}")
            
            # 进行健康检查
            if ollama_client.health_check_sync():
                # 尝试一个简单的生成请求来验证模型是否真正可用
                test_prompt = "Hello, are you working?"
                test_response = ollama_client.generate_sync(test_prompt)
                if not test_response.startswith("[Error]") or "not found" not in test_response:
                    logger.info(f"Model {model_name} is working correctly")
                    return ollama_client
                logger.warning(f"Model {model_name} exists but returned error: {test_response}")
        
        # 如果所有模型都不可用，记录警告
        logger.warning("No available Ollama models found, using fallback client")
        return FallbackClient()
        
    except Exception as e:
        logger.warning(f"Failed to create Ollama client: {e}, using fallback")
        return client


# 导出所有类和函数
__all__ = [
    'BaseModelClient',
    'VllmClient',
    'OllamaClient',
    'LlamaCppClient',
    'FallbackClient',
    'ModelClientFactory',
    'get_model_client',
    'ModelBackend'
]
