from typing import List, Dict, Any
from pymilvus import connections, Collection, FieldSchema, CollectionSchema, DataType, utility
from langchain.embeddings import HuggingFaceEmbeddings
import numpy as np

class VectorStore:
    def __init__(self, collection_name: str = "pdf_documents"):
        self.collection_name = collection_name
        self.embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
        self._connect()
        self._create_collection()

    def _connect(self):
        """连接到Milvus服务器"""
        connections.connect(
            alias="default",
            host="192.168.1.20",
            port="19530"
        )

    def _create_collection(self):
        """创建Milvus集合"""
        if utility.has_collection(self.collection_name):
            self.collection = Collection(self.collection_name)
            return

        fields = [
            FieldSchema(name="id", dtype=DataType.INT64, is_primary=True, auto_id=True),
            FieldSchema(name="content", dtype=DataType.VARCHAR, max_length=65535),
            FieldSchema(name="content_type", dtype=DataType.VARCHAR, max_length=100),  # text/image
            FieldSchema(name="page_num", dtype=DataType.INT64),
            FieldSchema(name="embedding", dtype=DataType.FLOAT_VECTOR, dim=384)
        ]

        schema = CollectionSchema(fields=fields, description="PDF文档向量存储")
        self.collection = Collection(name=self.collection_name, schema=schema)
        
        # 创建索引
        index_params = {
            "metric_type": "L2",
            "index_type": "IVF_FLAT",
            "params": {"nlist": 1024}
        }
        self.collection.create_index(field_name="embedding", index_params=index_params)

    def add_documents(self, documents: List[Dict[str, Any]]):
        """添加文档到向量存储"""
        contents = []
        content_types = []
        page_nums = []
        embeddings = []

        for doc in documents:
            contents.append(doc['content'])
            content_types.append(doc['type'])
            page_nums.append(doc['page_num'])
            embedding = self.embeddings.embed_query(doc['content'])
            embeddings.append(embedding)

        entities = [
            contents,
            content_types,
            page_nums,
            embeddings
        ]

        self.collection.insert(entities)
        self.collection.flush()

    def search(self, query: str, top_k: int = 5) -> List[Dict[str, Any]]:
        """搜索相似文档"""
        query_embedding = self.embeddings.embed_query(query)
        
        search_params = {
            "metric_type": "L2",
            "params": {"nprobe": 10}
        }
        self.collection.load()
        results = self.collection.search(
            data=[query_embedding],
            anns_field="embedding",
            param=search_params,
            limit=top_k,
            output_fields=["content", "content_type", "page_num"]
        )

        return [
            {
                'content': hit.entity.get('content'),
                'type': hit.entity.get('content_type'),
                'page_num': hit.entity.get('page_num'),
                'score': hit.score
            }
            for hit in results[0]
        ] 