# opensearch 混合检索示例
import requests
import json
import numpy as np
from typing import List, Dict, Any, Optional
from opensearchpy import OpenSearch
import time
import unittest
from unittest.mock import patch, MagicMock
from datetime import datetime, timezone


class EmbeddingClient:
    """Silicon Flow Embedding客户端"""

    def __init__(self, api_key: str):
        self.api_key = api_key
        self.base_url = "https://api.siliconflow.cn/v1/embeddings"
        self.model = "BAAI/bge-m3"
        self.headers = {
            'Authorization': f'Bearer {api_key}',
            'Content-Type': 'application/json'
        }

    def get_embedding(self, text: str, encoding_format: str = "float") -> List[float]:
        """获取单个文本的embedding"""
        payload = {
            "model": self.model,
            "input": text,
            "encoding_format": encoding_format
        }

        try:
            response = requests.post(
                self.base_url,
                headers=self.headers,
                json=payload,
                timeout=30
            )
            response.raise_for_status()

            result = response.json()
            embedding = result["data"][0]["embedding"]

            print(f"✅ 获取embedding成功，维度: {len(embedding)}")
            return embedding

        except requests.exceptions.RequestException as e:
            print(f"❌ 请求embedding失败: {e}")
            raise
        except (KeyError, IndexError) as e:
            print(f"❌ 解析embedding响应失败: {e}")
            raise

    def get_embeddings_batch(self, texts: List[str], encoding_format: str = "float") -> List[List[float]]:
        """批量获取embedding"""
        embeddings = []

        for i, text in enumerate(texts):
            try:
                embedding = self.get_embedding(text, encoding_format)
                embeddings.append(embedding)
                print(f"✅ 处理文本 {i + 1}/{len(texts)}")

                # 避免请求过快，添加小延迟
                if i < len(texts) - 1:
                    time.sleep(0.1)

            except Exception as e:
                print(f"❌ 处理文本 {i + 1} 失败: {e}")
                # 生成随机向量作为fallback
                fallback_embedding = np.random.normal(0, 1, 1024).tolist()
                embeddings.append(fallback_embedding)

        return embeddings


class ESVectorManager:
    """ES向量数据库管理器"""

    def __init__(self, embedding_client: Optional[EmbeddingClient] = None):
        # ES连接配置
        self.host = "localhost"
        self.port = 9200
        self.username = "admin"
        self.password = "StrongPass123!@"

        # Embedding客户端
        self.embedding_client = embedding_client

        # 初始化ES客户端
        self.es = OpenSearch(
            [f"http://{self.host}:{self.port}"],
            basic_auth=(self.username, self.password),
            verify_certs=False,
            ssl_show_warn=False
        )

        # 测试连接
        try:
            info = self.es.info()
            print(f"✅ ES连接成功！版本: {info['version']['number']}")
        except Exception as e:
            print(f"❌ ES连接失败: {e}")
            raise

    def create_vector_index(self, index_name: str, vector_dim: int = 1024):
        """创建向量索引"""
        mapping = {
            "settings": {
                "number_of_shards": 1,
                "number_of_replicas": 0,
                "index": {
                    "knn": True,
                    "max_result_window": 10000
                }
            },
            "mappings": {
                "properties": {
                    "id": {"type": "keyword"},
                    "title": {"type": "text", "analyzer": "standard"},
                    "content": {"type": "text", "analyzer": "standard"},
                    "vector": {
                        "type": "knn_vector",
                        "dimension": vector_dim,
                        "method": {
                            "name": "hnsw",
                            "engine": "nmslib"
                        }
                    },
                    "timestamp": {"type": "date"},
                    "metadata": {"type": "object"}
                }
            }
        }

        try:
            if self.es.indices.exists(index=index_name):
                print(f"⚠️  索引 '{index_name}' 已存在")
                return

            self.es.indices.create(index=index_name, body=mapping)
            print(f"✅ 索引 '{index_name}' 创建成功")
        except Exception as e:
            print(f"❌ 创建索引失败: {e}")
            raise

    def generate_random_vector(self, dim: int = 1024) -> List[float]:
        """生成随机向量（用于测试）"""
        vector = np.random.normal(0, 1, dim)
        vector = vector / np.linalg.norm(vector)
        return vector.tolist()

    def insert_document_with_embedding(self, index_name: str, doc_id: str,
                                       title: str, content: str,
                                       metadata: Dict = None,
                                       custom_vector: List[float] = None):
        """插入文档并自动生成embedding"""

        # 获取embedding向量
        if custom_vector:
            vector = custom_vector
        elif self.embedding_client:
            # 使用title和content组合生成embedding
            text_for_embedding = f"{title} {content}"
            vector = self.embedding_client.get_embedding(text_for_embedding)
        else:
            # 使用随机向量作为fallback
            vector = self.generate_random_vector()
            print("⚠️  使用随机向量，建议配置embedding客户端")

        document = {
            "id": doc_id,
            "title": title,
            "content": content,
            "vector": vector,
            "timestamp": datetime.now(timezone.utc).isoformat(),
            "metadata": metadata or {}
        }

        try:
            response = self.es.index(
                index=index_name,
                id=doc_id,
                body=document
            )
            print(f"✅ 文档插入成功: {doc_id}")
            return response
        except Exception as e:
            print(f"❌ 插入文档失败: {e}")
            raise

    def batch_insert_with_embeddings(self, index_name: str, documents: List[Dict]):
        """批量插入文档并生成embedding"""
        from opensearchpy.helpers import bulk

        # 批量生成embedding
        if self.embedding_client:
            texts = [f"{doc['title']} {doc['content']}" for doc in documents]
            embeddings = self.embedding_client.get_embeddings_batch(texts)
        else:
            embeddings = [self.generate_random_vector() for _ in documents]
            print("⚠️  使用随机向量，建议配置embedding客户端")

        # 准备批量插入数据
        actions = []
        for i, doc in enumerate(documents):
            doc_with_vector = {
                "id": doc["id"],
                "title": doc["title"],
                "content": doc["content"],
                "vector": embeddings[i],
                "timestamp": datetime.now(timezone.utc).isoformat(),
                "metadata": doc.get("metadata", {})
            }

            action = {
                "_index": index_name,
                "_id": doc["id"],
                "_source": doc_with_vector
            }
            actions.append(action)

        try:
            success, failed = bulk(self.es, actions)
            print(f"✅ 批量插入完成: 成功 {success} 条")
            return success, failed
        except Exception as e:
            print(f"❌ 批量插入失败: {e}")
            raise

    def semantic_search(self, index_name: str, query_text: str,
                        size: int = 10, min_score: float = 0.5):
        """语义搜索"""

        # 获取查询文本的embedding
        if self.embedding_client:
            query_vector = self.embedding_client.get_embedding(query_text)
        else:
            query_vector = self.generate_random_vector()
            print("⚠️  使用随机向量进行搜索，建议配置embedding客户端")

        query = {
            "query": {
                "knn": {
                    "vector": {
                        "vector": query_vector,
                        "k": size
                    }
                }
            }
        }

        try:
            response = self.es.search(
                index=index_name,
                body=query
            )

            results = []
            for hit in response["hits"]["hits"]:
                result = {
                    "id": hit["_id"],
                    "score": hit["_score"],
                    "title": hit["_source"].get("title", ""),
                    "content": hit["_source"].get("content", ""),
                    "metadata": hit["_source"].get("metadata", {})
                }
                results.append(result)

            print(f"✅ 语义搜索完成，找到 {len(results)} 条相似结果")
            return results
        except Exception as e:
            print(f"❌ 语义搜索失败: {e}")
            raise

    def hybrid_search(self, index_name: str, query_text: str,
                      size: int = 10, text_boost: float = 1.0, vector_boost: float = 2.0):
        """混合搜索（文本 + 语义）"""

        # 获取查询向量
        if self.embedding_client:
            query_vector = self.embedding_client.get_embedding(query_text)
        else:
            query_vector = self.generate_random_vector()
            print("⚠️  使用随机向量进行搜索，建议配置embedding客户端")

        query = {
            "query": {
                "bool": {
                    "should": [
                        {
                            "multi_match": {
                                "query": query_text,
                                "fields": ["title^2", "content"],
                                "boost": text_boost
                            }
                        },
                        {
                            "knn": {
                                "vector": {
                                    "vector": query_vector,
                                    "k": size,
                                    "boost": vector_boost
                                }
                            }
                        }
                    ]
                }
            }
        }

        try:
            response = self.es.search(
                index=index_name,
                body=query
            )

            results = []
            for hit in response["hits"]["hits"]:
                result = {
                    "id": hit["_id"],
                    "score": hit["_score"],
                    "title": hit["_source"].get("title", ""),
                    "content": hit["_source"].get("content", ""),
                    "metadata": hit["_source"].get("metadata", {})
                }
                results.append(result)

            print(f"✅ 混合搜索完成，找到 {len(results)} 条结果")
            return results
        except Exception as e:
            print(f"❌ 混合搜索失败: {e}")
            raise

    def get_document(self, index_name: str, doc_id: str):
        """获取单个文档"""
        try:
            response = self.es.get(index=index_name, id=doc_id)
            return response["_source"]
        except Exception as e:
            print(f"❌ 获取文档失败: {e}")
            return None

    def delete_index(self, index_name: str):
        """删除索引"""
        try:
            if self.es.indices.exists(index=index_name):
                self.es.indices.delete(index=index_name)
                print(f"✅ 索引 '{index_name}' 删除成功")
            else:
                print(f"⚠️  索引 '{index_name}' 不存在")
        except Exception as e:
            print(f"❌ 删除索引失败: {e}")


def main():
    """主函数演示"""

    # 配置API密钥
    API_KEY = "sk-dclqncyqicbugmulfwhnnwgrrnnkxldzjyigpsaetwtjsmrj"

    # 初始化embedding客户端
    embedding_client = EmbeddingClient(API_KEY)

    # 初始化ES管理器
    es_manager = ESVectorManager(embedding_client)

    # 索引名称
    index_name = "semantic_search_demo"

    print("\n" + "=" * 60)
    print("ES + Silicon Flow Embedding 语义搜索演示")
    print("=" * 60)

    # # 1. 创建向量索引（1024维）
    # print("\n1. 创建向量索引...")
    # es_manager.create_vector_index(index_name, vector_dim=1024)
    #
    # # 2. 插入单个文档
    # print("\n2. 插入单个文档...")
    # es_manager.insert_document_with_embedding(
    #     index_name=index_name,
    #     doc_id="doc_1",
    #     title="Python机器学习实战",
    #     content="Python是机器学习和数据科学领域最受欢迎的编程语言，拥有丰富的库和框架。",
    #     metadata={"category": "programming", "difficulty": "intermediate"}
    # )
    #
    # # 3. 批量插入文档
    # print("\n3. 批量插入文档...")
    # sample_docs = [
    #     {
    #         "id": "doc_2",
    #         "title": "深度学习与神经网络",
    #         "content": "深度学习是机器学习的一个分支，使用多层神经网络来学习数据的复杂模式。",
    #         "metadata": {"category": "AI", "difficulty": "advanced"}
    #     },
    #     {
    #         "id": "doc_3",
    #         "title": "自然语言处理技术",
    #         "content": "NLP技术让计算机能够理解、解释和生成人类语言，在搜索引擎和聊天机器人中广泛应用。",
    #         "metadata": {"category": "NLP", "difficulty": "advanced"}
    #     },
    #     {
    #         "id": "doc_4",
    #         "title": "数据可视化工具",
    #         "content": "数据可视化帮助我们更好地理解数据，Python中的matplotlib和seaborn是常用的可视化库。",
    #         "metadata": {"category": "data_science", "difficulty": "beginner"}
    #     },
    #     {
    #         "id": "doc_5",
    #         "title": "云计算与大数据",
    #         "content": "云计算提供了弹性的计算资源，使得大数据处理变得更加高效和经济。",
    #         "metadata": {"category": "cloud", "difficulty": "intermediate"}
    #     }
    # ]

    sample_docs=[
  {
    "id": "doc_6",
    "title": "京东 狗东 jd JD 360buy",
    "content": "京东 狗东 jd JD 360buy",
    "metadata": {"category": "general", "difficulty": "beginner"}
  },
  {
    "id": "doc_7",
    "title": "西装品类",
    "content": "西装品类",
    "metadata": {"category": "fashion", "difficulty": "beginner"}
  },
  {
    "id": "doc_8",
    "title": "跑步服品类",
    "content": "跑步服品类",
    "metadata": {"category": "sports", "difficulty": "beginner"}
  },
  {
    "id": "doc_9",
    "title": "男装品类",
    "content": "男装品类",
    "metadata": {"category": "fashion", "difficulty": "beginner"}
  },
  {
    "id": "doc_10",
    "title": "西游记",
    "content": "西游记",
    "metadata": {"category": "literature", "difficulty": "beginner"}
  },
  {
    "id": "doc_11",
    "title": "MAIA 玛伊娅玛娅 MAIA ACTIVE",
    "content": "MAIA 玛伊娅玛娅 MAIA ACTIVE",
    "metadata": {"category": "brand", "difficulty": "beginner"}
  },
  {
    "id": "doc_12",
    "title": "斐乐品牌",
    "content": "斐乐品牌",
    "metadata": {"category": "brand", "difficulty": "beginner"}
  },
  {
    "id": "doc_13",
    "title": "天猫平台",
    "content": "天猫平台",
    "metadata": {"category": "e-commerce", "difficulty": "beginner"}
  },
  {
    "id": "doc_14",
    "title": "小野和子品牌",
    "content": "小野和子品牌",
    "metadata": {"category": "brand", "difficulty": "beginner"}
  },
  {
    "id": "doc_15",
    "title": "阿里 ali tm 天猫 淘宝 tb",
    "content": "阿里 ali tm 天猫 淘宝 tb",
    "metadata": {"category": "e-commerce", "difficulty": "beginner"}
  },
  {
    "id": "doc_16",
    "title": "厦门中山路巴黎春天旗舰店",
    "content": "厦门中山路巴黎春天旗舰店",
    "metadata": {"category": "retail", "difficulty": "beginner"}
  },
  {
    "id": "doc_17",
    "title": "巴黎的春天",
    "content": "巴黎的春天",
    "metadata": {"category": "literature", "difficulty": "beginner"}
  },
  {
    "id": "doc_18",
    "title": "萨洛蒙品牌名称： SALOMON/萨洛蒙",
    "content": "萨洛蒙品牌名称： SALOMON/萨洛蒙",
    "metadata": {"category": "brand", "difficulty": "beginner"}
  },
  {
    "id": "doc_19",
    "title": "露露品牌名称： lululemon",
    "content": "露露品牌名称： lululemon",
    "metadata": {"category": "brand", "difficulty": "beginner"}
  },
  {
    "id": "doc_20",
    "title": "硬糖鞋 糖豆鞋 品类",
    "content": "硬糖鞋 糖豆鞋 品类",
    "metadata": {"category": "footwear", "difficulty": "beginner"}
  },
  {
    "id": "doc_21",
    "title": "母婴鞋",
    "content": "母婴鞋",
    "metadata": {"category": "footwear", "difficulty": "beginner"}
  },
  {
    "id": "doc_22",
    "title": "鬼冢虎品牌",
    "content": "鬼冢虎品牌",
    "metadata": {"category": "brand", "difficulty": "beginner"}
  }
]


    # es_manager.batch_insert_with_embeddings(index_name, sample_docs)
    #
    # # 等待索引刷新
    # es_manager.es.indices.refresh(index=index_name)
    # time.sleep(2)

    # 4. 语义搜索
    print("\n4. 语义搜索...")
    # search_queries = [
    #     "人工智能和机器学习",
    #     "编程语言学习",
    #     "数据分析工具"
    # ]
    #
    # for query in search_queries:
    #     print(f"\n🔍 搜索: '{query}'")
    #     results = es_manager.semantic_search(index_name, query, size=3)
    #
    #     for i, result in enumerate(results, 1):
    #         print(f"  {i}. [{result['score']:.4f}] {result['title']}")
    #         print(f"     {result['content'][:60]}...")
    #         print(f"     分类: {result['metadata'].get('category', 'N/A')}")

    # 5. 混合搜索
    print("\n5. 混合搜索...")
    hybrid_query = "Python数据科学"
    print(f"\n🔍 混合搜索: '{hybrid_query}'")

    # hybrid_results = es_manager.hybrid_search(
    #     index_name=index_name,
    #     query_text=hybrid_query,
    #     size=3,
    #     text_boost=1.5,
    #     vector_boost=2.0
    # )
    #
    # for i, result in enumerate(hybrid_results, 1):
    #     print(f"  {i}. [{result['score']:.4f}] {result['title']}")
    #     print(f"     {result['content'][:60]}...")

    print("\n" + "=" * 60)
    print("演示完成！")
    print("=" * 60)

    query_texts = ["玛伊娅", "ali", "狗东", "FILA", "巴春店", "SALOMON", "lululemon", "儿童鞋", "Onitsuka Tiger"]
    for query_text in query_texts:
        rrr = es_manager.hybrid_search(
            index_name=index_name,
            query_text=query_text,
            size=3,
            text_boost=1.5,
            vector_boost=2.0
        )
        print(f'关键词：{query_text}, 搜索结果：{rrr}')





if __name__ == "__main__":
    import sys

    main()
