from pymilvus import connections, db
import os
from pymilvus import (
    connections,
    utility,
    FieldSchema,
    CollectionSchema,
    DataType,
    Collection,
)

from embeddings import BaseEmbedding

## milvus set up
MILVUS_HOST = os.getenv("MILVUS_HOST") 
MILVUS_PORT = os.getenv("MILVUS_PORT")
MILVUS_DATABASE = os.getenv("MILVUS_DATABASE")

if MILVUS_HOST == None:
    MILVUS_HOST = "127.0.0.1"

if MILVUS_PORT == None:
    MILVUS_PORT = "19530"

if MILVUS_DATABASE == None:
    MILVUS_DATABASE = "cbgpt"

connections.connect(host=MILVUS_HOST, port=MILVUS_PORT)
db.using_database(MILVUS_DATABASE)

class VectorStore():
    # 向量集合的维度
    def __init__(self, hub_id: int ,maxSize: int = 2000, dims: int = 1024):
        # connection check
        db.list_database()
        
        self.fields = [
            FieldSchema(name="id", dtype=DataType.INT64, is_primary=True, auto_id=True),
            FieldSchema(name="file_id", dtype=DataType.INT64),
            FieldSchema(name="file_name", dtype=DataType.VARCHAR, max_length=500),
            FieldSchema(name="index", dtype=DataType.INT32),
            FieldSchema(name="page_number", dtype=DataType.INT32),
            FieldSchema(name="text", dtype=DataType.VARCHAR, max_length=maxSize * 4),
            FieldSchema(name="embeddings", dtype=DataType.FLOAT_VECTOR, dim=dims)
        ]
        
        self.schema = CollectionSchema(
            fields=self.fields,
            description="Knowledge Hub",
            enable_dynamic_field=False
            )
        
        
        # 采用cos向量相似度比较方式
        self.index_param = {
            "metric_type":"COSINE",
            "index_type":"IVF_FLAT",
            "params":{"nlist":1024}
        }
        
        self.maxSize = maxSize
        self.dims = dims
        self.name = "knowledge_" + str(hub_id)
        
    # 创建集合和集合索引
    def create(self):
        self.collection = Collection(name = self.name, schema = self.schema)
        self.collection.create_index("embeddings", self.index_param)
        self.collection.create_index("file_id")
        self.collection.create_index("index")

    def load(self, replica_number: int = 1):
        self.collection = Collection(self.name)
        self.collection.load()

    def insert(self, file_id: int, file_name: str, page_number: list, text: list, embeddings: list):
        data = [
                [file_id for i in range(len(page_number))],
                [file_name for i in range(len(page_number))],
                [i for i in range(len(page_number))],
                page_number,
                text,
                embeddings
                ]
        
        self.collection.insert(data)
        
        # 调用该方法刷入数据
        self.collection.flush()
    
    def queryQuestion(self, question: str, embed, n: int = 3,padding: int=0):
        embeddings = embed(question)
        print(embeddings)
        return self.query(embeddings, n, padding)

    # 向量相似度查询，并且还需要查询目标上下文
    # 查询几条？
    def query(self, embeddings: list, n: int, padding :int):
        search_params = {
            "metric_type": "COSINE", 
            "offset": 0, 
            "ignore_growing": False, 
            "params": {"nprobe": 10}
        }
        # 这里只进行一个目标的查询
        results = self.collection.search(
            data=[embeddings], 
            anns_field="embeddings", 
            param=search_params,
            limit=n,
            expr=None,
            output_fields=["file_id" , "index", "file_name" ,'text', "page_number"],
            consistency_level="Strong"
        )
        
        results = results[0]
        
        data = []
        # 需要提取上下文，首先提取出各个数据，然后
        for result in results:
            distance = result.distance
            entity = result.entity
            data.append([entity.get("file_id"), entity.get("index"),entity.get("text"), entity.get("file_name"), entity.get("page_number"), distance])
       
        data.sort(key = lambda x:x[1])
            
        if padding == 0:
            return [[i[0], i[2], i[-3], i[-2], i[-1]] for i in data]

        extendedData = []
        for item in data:
            index = item[1]
            distance = item[-1]
            min_index = max(index-padding, 0)
            max_index = index + padding
            results = self.collection.query(
                expr = "file_id == " + str(item[0]) + " && " + "index >= " + str(min_index) + " && " + str(max_index) + ">= index",
                offset = 0,
                limit = 10, 
                output_fields = ["file_id" ,"index", "file_name",'text', "page_number"],
                )
            tmp = []    
            for result in results:
                entity = result
                tmp.append([entity.get("file_id"), entity.get("index"),entity.get("text"), entity.get("file_name"), entity.get("page_number"), distance])
                tmp.sort(key = lambda x:x[1])
            extendedData.append(tmp)

        # 去重
        size = len(extendedData)
        i = 0;
        while i < size - 1:
            item1 = extendedData[i][-1]
            item2 = extendedData[i+1][0]
            
            # 如果index顺序有重合
            if item1[1] >= item2[1]:
                # 如果是同一个文件
                if item1[0] == item2[0]:
                    # 将后一个列表加到前一个
                    diff = item2[1] - item1[1]
                    for k in range(diff + 1, len(extendedData[i+1])):
                        extendedData[i].append(extendedData[i+1][k])
                    extendedData.pop(i+1)
                    size -= 1
                    continue
            i += 1

        res = []
        #字符串拼接
        for item in extendedData:
            file_id = item[0][0]
            text = ""

            for i in item:
                text += i[2]
            res.append([file_id, text, item[0][-3], item[0][-2], item[0][-1]])
        
        return res
    
    def release(self):
        self.collection.release()
        
        
    def listCollection(self):
        return utility.list_collections()
    