from typing import List
from datetime import datetime
import asyncio
from concurrent.futures import ThreadPoolExecutor, as_completed
from functools import partial

from pymilvus import connections, FieldSchema, CollectionSchema, DataType, Collection, utility, \
    Partition

from utils.log_tool.custom_log import debug_logger


class MilvusConfig:
    MILVUS_HOST_LOCAL = ''
    MILVUS_PORT = ''
    MILVUS_USER = ''
    MILVUS_PASSWORD = ''
    MILVUS_DB_NAME = ''


class MilvusManager:
    def __init__(self, user_id: str, kb_ids: List[str], mode="local", milvus_lite=True):
        self.milvus_lite = milvus_lite
        self.host = MilvusConfig.MILVUS_HOST_LOCAL
        self.port = MilvusConfig.MILVUS_PORT
        self.user = MilvusConfig.MILVUS_USER
        self.password = MilvusConfig.MILVUS_PASSWORD
        self.db_name = MilvusConfig.MILVUS_DB_NAME
        # 数据库中表的概念
        self.sess: Collection = None
        # 分区, 同一个collection可以有一个或多个分区
        self.partitions: List[Partition] = []
        # 创建时参数
        if mode == 'local':
            self.create_params = {"metric_type": "L2", "index_type": "IVF_FLAT", "params": {"nlist": 2048}}
        else:
            self.create_params = {"metric_type": "L2", "index_type": "GPU_IVF_FLAT", "params": {"nlist": 2048}}
        # 查询时参数
        self.search_params = {"metric_type": "L2", "params": {"nprobe": 256}}

        self.user_id = user_id
        # 用户可以有多个kb,他定义kb_name, 自动为他生成kb_ids
        # kb_ids是一个milvus的数据库地号码，在生成kb时候提取生成
        self.kb_ids = kb_ids

        self.executor = ThreadPoolExecutor(max_workers=10)
        # 初始化kb
        self.init()

    @property
    def fields(self):
        fields = [
            FieldSchema(name='chunk_id', dtype=DataType.VARCHAR, max_length=64, is_primary=True),
            FieldSchema(name='file_id', dtype=DataType.VARCHAR, max_length=64),
            FieldSchema(name='file_name', dtype=DataType.VARCHAR, max_length=640),
            FieldSchema(name='file_path', dtype=DataType.VARCHAR, max_length=640),
            FieldSchema(name='timestamp', dtype=DataType.VARCHAR, max_length=64),
            FieldSchema(name='content', dtype=DataType.VARCHAR, max_length=4000),
            FieldSchema(name='embedding', dtype=DataType.FLOAT_VECTOR, dim=768)
        ]
        return fields

    def init(self):
        """
        为user_id 初始化一个 milvus 的数据库
        :return:
        """
        try:
            if self.milvus_lite:
                connections.connect("")
            connections.connect(host=self.host, port=self.port, user=self.user,
                                password=self.password, db_name=self.db_name)  # timeout=3 [cannot set]

            if utility.has_collection(self.user_id):
                self.sess = Collection(self.user_id)
                debug_logger.info(f'collection {self.user_id} exists')
            else:
                schema = CollectionSchema(self.fields)
                debug_logger.info(f'create collection {self.user_id} {schema}')
                self.sess = Collection(self.user_id, schema)
                # 针对embedding字段构建索引
                self.sess.create_index(field_name="embedding", index_params=self.create_params)
            # 为没有分区的知识库构建分区
            for kb_id in self.kb_ids:
                if not self.sess.has_partition(kb_id):
                    self.sess.create_partition(kb_id)
            self.partitions = [Partition(self.sess, kb_id) for kb_id in self.kb_ids]
            debug_logger.info('partitions: %s', self.kb_ids)
            self.sess.load()
        except Exception as e:
            debug_logger.error(e)

    async def insert_files(self, file_id, file_name, file_path, docs, embs, batch_size=1000):
        """
        批量
        本方法接受文件及文件拆解开的docs, embs结果，目标是将其写入milvus数据库中
        写入方法： 用self.partition的insert()方法
        :param file_id:
        :param file_name:
        :param file_path:
        :param docs:
        :param embs: embs结果
        :param batch_size:批量大小
        :return:
        """

        total_len = len(docs)
        for batch_start in range(0, total_len, batch_size):
            batch_end = min(batch_start + batch_size, total_len)

            # 构建一个存放一个batch的数据的容器
            data = [[] * batch_size]
            for ids, doc in enumerate(docs[batch_start, batch_end]):
                # 根据self.fields装填
                chunk_id = f"{file_id}_{ids}"
                now = datetime.now()
                timestamp = now.strftime("%Y%m%d%H%M")
                data[0] = chunk_id
                data[1] = file_id
                data[2] = file_name
                data[3] = file_path
                data[4] = timestamp
                data[5] = doc.page_content
                data[6] = embs[ids]

            # data的异步+多进程insert
            loop = asyncio.get_running_loop()
            try:
                mr = await loop.run_in_executor(self.executor, partial(self.partitions[0].insert, data=data))
                debug_logger(f"{file_name}:{mr}")
            except Exception as e:
                debug_logger.error(f"Milvus insert 失败，file_name: {file_name},e:{e}")
                return False









