import dotenv
dotenv.load_dotenv()
from langchain_community.embeddings import BedrockEmbeddings
from langchain_huggingface import HuggingFaceEmbeddings
from langchain_openai import OpenAIEmbeddings
import boto3
import os
import requests
import numpy as np
from typing import List, Optional
import logging
from utils.embedding_config import EmbeddingProvider, EmbeddingConfig

# 配置日志
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

class XinferenceEmbeddings:
    """使用Xinference服务进行文本嵌入的类"""
    
    def __init__(self, model_name: str, xinference_url: Optional[str] = None):
        """
        初始化Xinference嵌入处理器
        
        Args:
            model_name: Xinference中部署的嵌入模型名称
            xinference_url: Xinference服务器URL，如果为None则使用环境变量
        """
        self.model_name = model_name
        # 直接使用model_name作为model_id，跳过查找过程
        self.model_id = model_name
        
        # 获取Xinference URL
        if xinference_url is None:
            self.base_url = os.environ.get("XINFERENCE_SERVER_URL", "http://localhost:9997")
        else:
            self.base_url = xinference_url
            
        if not self.base_url.endswith('/'):
            self.base_url += '/'
            
        logger.info(f"初始化Xinference嵌入，模型: {model_name}, 使用ID: {self.model_id}, 服务URL: {self.base_url}")
    
    def _get_model_id(self):
        """获取模型ID"""
        try:
            # 获取所有模型列表
            response = requests.get(f"{self.base_url}v1/models")
            response.raise_for_status()
            models = response.json()
            
            # 查找匹配的模型
            self.model_id = None
            for model in models:
                if isinstance(model, dict):
                    if model.get("name", "").lower() == self.model_name.lower():
                        self.model_id = model.get("id")
                        logger.info(f"找到Xinference模型: {self.model_name}, ID: {self.model_id}")
                        break
            
            if self.model_id is None:
                available_models = []
                for m in models:
                    if isinstance(m, dict):
                        available_models.append(m.get('name', 'unknown'))
                    else:
                        available_models.append(str(m))
                
                logger.warning(f"未找到名为 {self.model_name} 的模型，可用模型: {available_models}")
                # 如果找不到完全匹配的名称，尝试部分匹配
                for model in models:
                    if isinstance(model, dict) and model.get("name") and self.model_name.lower() in model.get("name", "").lower():
                        self.model_id = model.get("id")
                        logger.info(f"找到部分匹配的Xinference模型: {model.get('name')}, ID: {self.model_id}")
                        break
            
            if self.model_id is None:
                # 如果仍然找不到，使用第一个嵌入模型
                for model in models:
                    if isinstance(model, dict) and model.get("type") == "embedding":
                        self.model_id = model.get("id")
                        logger.warning(f"未找到名为 {self.model_name} 的模型，使用第一个嵌入模型: {model.get('name')}, ID: {self.model_id}")
                        break
            
            if self.model_id is None:
                raise ValueError(f"在Xinference中未找到任何可用的嵌入模型")
            
        except Exception as e:
            logger.error(f"获取Xinference模型ID时出错: {str(e)}")
            raise
    
    def embed_query(self, text: str) -> List[float]:
        """嵌入单个查询文本"""
        return self.embed_documents([text])[0]
    
    def embed_documents(self, texts: List[str]) -> List[List[float]]:
        """嵌入多个文档"""
        if not self.model_id:
            raise ValueError("模型ID未初始化")
        
        try:
            endpoint = f"{self.base_url}v1/embeddings"
            
            payload = {
                "model": self.model_id,
                "input": texts
            }
            
            response = requests.post(endpoint, json=payload)
            response.raise_for_status()
            result = response.json()
            
            # 提取嵌入向量
            if isinstance(result, dict) and "data" in result and len(result["data"]) > 0:
                embeddings = []
                for item in result["data"]:
                    if isinstance(item, dict) and "embedding" in item:
                        embeddings.append(item["embedding"])
                    else:
                        logger.warning(f"嵌入结果项格式不正确: {item}")
                        # 添加零向量替代
                        embeddings.append([0.0] * 1024)
                
                if embeddings:
                    return embeddings
                else:
                    raise ValueError(f"未能从响应中提取任何有效的嵌入向量")
            else:
                logger.error(f"嵌入结果中没有有效数据: {result}")
                raise ValueError(f"嵌入结果中没有有效数据: {result}")
        
        except Exception as e:
            logger.error(f"Xinference嵌入处理失败: {str(e)}")
            # 返回零向量作为后备
            if len(texts) > 0:
                # 返回1024维零向量
                return [[0.0] * 1024 for _ in range(len(texts))]
            return []

class EmbeddingFactory:
    @staticmethod
    def create_embedding_function(config: EmbeddingConfig):
        if config.provider == EmbeddingProvider.BEDROCK:
            bedrock_client = boto3.client(
                service_name='bedrock-runtime',
                region_name=config.aws_region,
                aws_access_key_id=os.getenv('AWS_ACCESS_KEY_ID'),
                aws_secret_access_key=os.getenv('AWS_SECRET_ACCESS_KEY')
            )
            return BedrockEmbeddings(
                client=bedrock_client,
                model_id=config.model_name
            )
            
        elif config.provider == EmbeddingProvider.OPENAI:
            return OpenAIEmbeddings(
                model=config.model_name,
                openai_api_key=os.getenv('OPENAI_API_KEY')
            )
            
        elif config.provider == EmbeddingProvider.HUGGINGFACE:
            try:
                logger.info(f"尝试加载HuggingFace嵌入模型: {config.model_name}")
                return HuggingFaceEmbeddings(
                    model_name=config.model_name
                )
            except Exception as e:
                logger.error(f"加载HuggingFace嵌入模型失败: {str(e)}，尝试使用Xinference替代")
                # 如果HuggingFace失败，尝试使用Xinference
                try:
                    xinference_url = os.environ.get("XINFERENCE_SERVER_URL")
                    logger.info(f"尝试使用Xinference作为后备，URL: {xinference_url}")
                    return XinferenceEmbeddings(
                        model_name="bge-large", 
                        xinference_url=xinference_url
                    )
                except Exception as xe:
                    logger.error(f"Xinference后备也失败: {str(xe)}")
                    raise
            
        elif config.provider == EmbeddingProvider.XINFERENCE:
            logger.info(f"使用Xinference嵌入模型: {config.model_name}")
            return XinferenceEmbeddings(
                model_name=config.model_name,
                xinference_url=config.xinference_url
            )
            
        raise ValueError(f"Unsupported embedding provider: {config.provider}")