"""
文本嵌入API - 支持多种embedding模型
"""
import os
import torch
from volcenginesdkarkruntime import Ark
import requests
import json
import numpy as np
from typing import List, Optional, Dict, Any
import logging
from openai import OpenAI
from mia_app.infrastructure.external.llm_config import get_qwen_api_key

logger = logging.getLogger(__name__)


class QwenEmbeddingAPI:
    """阿里云百炼Qwen Embedding API - 使用OpenAI兼容模式"""

    def __init__(self, api_key: Optional[str] = None, base_url: Optional[str] = None):
        """
        初始化Qwen Embedding API

        Args:
            api_key: API密钥，如果为None则从环境变量获取
            base_url: API基础URL，如果为None则使用默认URL
        """
        self.api_key = api_key or os.getenv("DASHSCOPE_API_KEY", get_qwen_api_key())
        if not self.api_key:
            raise ValueError("DASHSCOPE API密钥未设置，请设置DASHSCOPE_API_KEY环境变量")

        # 阿里云百炼API基础URL（OpenAI兼容模式）
        self.base_url = base_url or "https://dashscope.aliyuncs.com/compatible-mode/v1"

        # 初始化OpenAI客户端
        self.client = OpenAI(
            api_key=self.api_key,
            base_url=self.base_url
        )

    def get_embedding(self, text: str, model: str = "text-embedding-v4", dimensions: int = 1024) -> Optional[List[float]]:
        """
        获取单个文本的embedding

        Args:
            text: 输入文本
            model: 模型名称，默认为text-embedding-v4
            dimensions: 向量维度，默认为1024

        Returns:
            embedding向量列表
        """
        try:
            response = self.client.embeddings.create(
                model=model,
                input=[text],
                dimensions=dimensions,
                encoding_format="float"
            )

            if response.data and len(response.data) > 0:
                return response.data[0].embedding
            else:
                logger.error("API响应中没有embedding数据")
                return None

        except Exception as e:
            logger.error(f"获取embedding失败: {e}")
            return None

    def get_embeddings_batch(self, texts: List[str], model: str = "text-embedding-v4", dimensions: int = 1024) -> Optional[List[List[float]]]:
        """
        批量获取文本的embedding

        Args:
            texts: 输入文本列表
            model: 模型名称，默认为text-embedding-v4
            dimensions: 向量维度，默认为1024

        Returns:
            embedding向量列表的列表
        """
        try:
            response = self.client.embeddings.create(
                model=model,
                input=texts,
                dimensions=dimensions,
                encoding_format="float"
            )

            if response.data:
                return [item.embedding for item in response.data]
            else:
                logger.error("API响应中没有embedding数据")
                return None

        except Exception as e:
            logger.error(f"批量获取embedding失败: {e}")
            return None

    def get_embedding_with_metadata(self, text: str, model: str = "text-embedding-v4", dimensions: int = 1024) -> Optional[Dict[str, Any]]:
        """
        获取embedding并包含元数据

        Args:
            text: 输入文本
            model: 模型名称
            dimensions: 向量维度

        Returns:
            包含embedding和元数据的字典
        """
        try:
            response = self.client.embeddings.create(
                model=model,
                input=[text],
                dimensions=dimensions,
                encoding_format="float"
            )

            if response.data and len(response.data) > 0:
                embedding_data = response.data[0]
                return {
                    "embedding": embedding_data.embedding,
                    "usage": response.usage.model_dump() if response.usage else {},
                    "model": model,
                    "dimensions": dimensions,
                    "text": text
                }
            else:
                logger.error("API响应中没有embedding数据")
                return None

        except Exception as e:
            logger.error(f"获取embedding元数据失败: {e}")
            return None

    def get_embedding_with_custom_dimensions(self, text: str, dimensions: int, model: str = "text-embedding-v4") -> Optional[List[float]]:
        """
        获取指定维度的embedding

        Args:
            text: 输入文本
            dimensions: 向量维度（支持1024, 1536等）
            model: 模型名称

        Returns:
            embedding向量列表
        """
        return self.get_embedding(text, model, dimensions)


class TextEmbeddingDoubaoLarge():
    """豆包大模型Embedding（原有实现）"""

    def __init__(self, api_key = 'bdf5f1 e08b9086db'):
        self.api_key = api_key
        self.client = Ark(
            api_key = api_key,
        )

    def encode(
        self, client, inputs: List[str], is_query: bool = False, mrl_dim: Optional[int] = None
    ):
        if is_query:
            # use instruction for optimal performance, feel free to tune this instruction for different tasks
            # to reproduce MTEB results, refer to https://github.com/embeddings-benchmark/mteb/blob/main/mteb/models/seed_models.py for detailed instructions per task)
            inputs = [
                f"Instruct: Given a web search query, retrieve relevant passages that answer the query\nQuery: {i}".format(
                    i
                )
                for i in inputs
            ]
        resp = client.embeddings.create(
            model="doubao-embedding-large-text-250515",
            input=inputs,
            encoding_format="float",
        )
        embedding = torch.tensor([d.embedding for d in resp.data], dtype=torch.bfloat16)
        if mrl_dim is not None:
            assert mrl_dim in [2048, 1024, 512, 256]
            embedding = embedding[:, :mrl_dim]
        # normalize to compute cosine sim
        embedding = torch.nn.functional.normalize(embedding, dim=1, p=2).float().numpy()
        return embedding

    def get_embedding(self, text):
        embedding = self.encode(self.client, [text], is_query=False, mrl_dim=1024)
        return embedding[0]

    def get_embedding_by_batch(self, batch_text):
        batch_embedding = self.encode(self.client, batch_text, is_query=False, mrl_dim=1024)
        return batch_embedding


class EmbeddingFactory:
    """Embedding工厂类，支持多种embedding模型"""

    @staticmethod
    def create_embedding(embedding_type: str = "qwen", **kwargs):
        """
        创建embedding实例

        Args:
            embedding_type: embedding类型，支持 "qwen", "doubao"
            **kwargs: 其他参数

        Returns:
            embedding实例
        """
        if embedding_type.lower() == "qwen":
            return QwenEmbeddingAPI(**kwargs)
        elif embedding_type.lower() == "doubao":
            return TextEmbeddingDoubaoLarge(**kwargs)
        else:
            raise ValueError(f"不支持的embedding类型: {embedding_type}")


# 使用示例
def demo_qwen_embedding():
    """演示Qwen Embedding的使用"""
    try:
        # 创建Qwen Embedding实例
        qwen_embedding = QwenEmbeddingAPI()

        # 测试单个文本
        text = "这是一个测试文本"
        embedding = qwen_embedding.get_embedding(text)

        if embedding:
            print(f"✅ 成功获取embedding，维度: {len(embedding)}")
            print(f"前5个值: {embedding[:5]}")
        else:
            print("❌ 获取embedding失败")

        # 测试批量文本（参考你的示例）
        texts = ['风急天高猿啸哀', '渚清沙白鸟飞回', '无边落木萧萧下', '不尽长江滚滚来']
        embeddings = qwen_embedding.get_embeddings_batch(texts)

        if embeddings:
            print(f"✅ 成功获取批量embedding，数量: {len(embeddings)}")
            for i, emb in enumerate(embeddings):
                print(f"文本{i+1} embedding维度: {len(emb)}")
                print(f"前3个值: {emb[:3]}")

        # 测试不同维度
        embedding_1536 = qwen_embedding.get_embedding_with_custom_dimensions(text, 1536)
        if embedding_1536:
            print(f"✅ 1536维度embedding: {len(embedding_1536)}")

        # 测试带元数据的embedding
        metadata_result = qwen_embedding.get_embedding_with_metadata(text)
        if metadata_result:
            print(f"✅ 带元数据的embedding:")
            print(f"   维度: {len(metadata_result['embedding'])}")
            print(f"   使用量: {metadata_result['usage']}")
            print(f"   模型: {metadata_result['model']}")

    except Exception as e:
        print(f"❌ 演示失败: {e}")


# 集成到向量搜索服务中的示例
def create_qwen_embeddings_for_vector_search():
    """为向量搜索创建Qwen embeddings"""
    try:
        qwen_embedding = QwenEmbeddingAPI()

        # 示例对话文本
        dialogs = [
            "用户: 如何学习Python编程？",
            "助手: Python是一门很好的入门编程语言，建议从基础语法开始学习。",
            "用户: 数据库设计有什么最佳实践？",
            "助手: 数据库设计需要考虑范式化、索引优化、数据完整性等因素。"
        ]

        # 获取embeddings
        embeddings = qwen_embedding.get_embeddings_batch(dialogs)

        if embeddings:
            print(f"✅ 成功为 {len(dialogs)} 个对话创建embeddings")

            # 计算相似度示例
            from sklearn.metrics.pairwise import cosine_similarity

            # 计算第一个对话与其他对话的相似度
            query_embedding = embeddings[0]
            similarities = cosine_similarity([query_embedding], embeddings[1:])[0]

            print("相似度分析:")
            for i, sim in enumerate(similarities):
                print(f"  对话1 vs 对话{i+2}: {sim:.4f}")

        return embeddings

    except Exception as e:
        print(f"❌ 创建embeddings失败: {e}")
        return None


if __name__ == "__main__":
    demo_qwen_embedding()
    print("\n" + "="*50)
    create_qwen_embeddings_for_vector_search()