import os
import uuid
from typing import List, Dict

import pandas as pd
from pymilvus import Collection, FieldSchema, DataType, CollectionSchema

from bot.openai_bot import OpenAIBot
from conf.config import logger, BASE_DIR
from db.milvus.milvus_db import MilvusClient


class BaseTextChunkDAO(object):
    """ 示例用的文本块数据访问对象 """

    def __init__(self, *args, **kwargs):
        super(BaseTextChunkDAO, self).__init__(*args, **kwargs)
        self.alias = str(uuid.uuid4())  # 给本次的milvus链接起个名字，后续释放链接要用到该名字
        self.collection_name = "base_text_chunk"

    def create_milvus_conn(self):
        """ 创建milvus连接 """
        milvus_client = MilvusClient(alias=self.alias)
        return milvus_client

    def create_collection(self, dim: int):
        """
        创建集合
        :param dim: 矢量维度
        :return:
        """
        milvus_client = self.create_milvus_conn()
        try:

            fields = [
                FieldSchema(name="text_chunk_id", dtype=DataType.INT64, is_primary=True, auto_id=False, description="文本块id"),
                FieldSchema(name="text_chunk_vector", dtype=DataType.FLOAT_VECTOR, dim=dim, description="文本块对应的向量"),
            ]
            schema = CollectionSchema(fields=fields, description='基本文本块')
            collection = Collection(name=self.collection_name, schema=schema, using=self.alias)

            index_params = {
                'metric_type': "IP",
                'index_type': "FLAT",
                'params': {}
            }
            collection.create_index(field_name='text_chunk_vector', index_params=index_params)
        finally:
            if "milvus_client" in dir():
                milvus_client.disconnect()

    @staticmethod
    def data_to_embeddings(csv_filename: str):
        """
        数据向量化
        :param csv_filename: csv文件名
        :return:
        """
        if csv_filename.endswith(".csv"):
            csv_filename = csv_filename[:-4]

        csv_path = os.path.join(BASE_DIR, f'docs/{csv_filename}.csv')
        csv_output_path = os.path.join(BASE_DIR, f'docs/{csv_filename}_within_vector.csv')
        df = pd.read_csv(filepath_or_buffer=csv_path, usecols=[0, 1],
                         converters={'text_chunk_id': lambda x: int(x)})

        openai_bot = OpenAIBot()
        text_chunk_vectors = openai_bot.embeddings(df['text_chunk'].tolist())

        # 删除原列
        df = df.drop(labels=['text_chunk'], axis=1)

        # 新增一列
        df = df.assign(text_chunk_vector=[str(vector) for vector in text_chunk_vectors])

        # 保存
        logger.info(f"输出路径：{csv_output_path}")
        df.to_csv(path_or_buf=csv_output_path, index=False)

    def get_ids(self, vector: List[float], top_k: int) -> List[Dict]:
        """
        获取数据项的id列表、向量距离列表
        :param vector: 待查找的向量
        :param top_k: 返回前几个
        :return:
        """
        milvus_client = self.create_milvus_conn()
        collection = Collection(self.collection_name, using=self.alias)  # collection.load()  # 提前加载一次即可

        try:
            results = collection.search(
                data=[vector, ],
                anns_field="text_chunk_vector",
                param={"metric_type": "IP"},
                limit=top_k
            )

            ids = results[0].ids
            distances = results[0].distances

            ls = list()
            for i in range(len(ids)):
                ls.append({
                    "id": ids[i],
                    "distance": distances[i],
                })
            return ls
        finally:
            milvus_client.disconnect()


def main():
    # # 步骤1：创建集合
    # text_chunk_dao = BaseTextChunkDAO()
    # text_chunk_dao.create_collection(dim=1536)

    # 步骤2：数据向量化
    text_chunk_dao = BaseTextChunkDAO()
    text_chunk_dao.data_to_embeddings(csv_filename="template.csv")

    # # 步骤3：查找向量
    # openai_bot = OpenAIBot()
    # vector = openai_bot.embeddings(text_list=["嘉盛嘉和园的容积率是多少？", ])[0]
    # text_chunk_dao = BaseTextChunkDAO()
    # res = text_chunk_dao.get_ids(vector=vector, top_k=5)
    # logger.info(f"{res}")


if __name__ == '__main__':
    main()
