from pymilvus import CollectionSchema, FieldSchema, DataType, Collection, connections


class MilvusMuster:
    def __init__(self, host: str, port: str, alias: str, collection_name: str = "article_paragraph_use_mpnet"):
        connections.connect(
            alias=alias,
            host=host,
            port=port
        )

        self.index_params = {
            "metric_type": "L2",
            "index_type": "IVF_PQ",
            "params": {"nlist": 1024}
        }
        self.collection_name = collection_name
        self.collection = Collection(self.collection_name)  # Get an existing collection.

        self.collection.load()
        self.search_params = {"metric_type": "L2", "params": {"nprobe": 20}}

    async def insert_vector(self, paragraph_id: str, article_id: str, paragraph_vector: list,user_id:str) -> None:
        '''数据插入'''
        paragraph_id = int(paragraph_id)
        article_id = int(article_id)
        user_id = int(user_id)
        collection = Collection(self.collection_name)  # Get an existing collection.
        data = [[paragraph_id], [article_id], [0], [paragraph_vector],[user_id]]
        res = self.collection.insert(data)
        return {"status":res, "countent":data}

    def get_top_paragraphs(self, Q_vector: list, articles: list, limit: int = 10) -> dict:
        '''通过问题与文章列表，检索相关段落(超过2篇文章)'''
        article_id_str = ""
        for line in articles:
            article_id_str += "article_id == " + str(int(line)) + " or "
        article_id_str = article_id_str[:-4]
        results = self.collection.search(
            data=[list(Q_vector)],
            anns_field="paragraph_vector",
            param=self.search_params,
            limit=limit,
            expr=article_id_str,
            consistency_level="Bounded"
        )
        paragraph_ids = []
        scores = []
        for line in results[0]:
            paragraph_ids.append(str(line.id))
            scores.append(str(line.score))
        return {"paragraph_ids":paragraph_ids[:limit],"scores":scores[:limit]}

    def get_top_paragraphs_by_user(self, Q_vector: list, user_id: str, limit: int = 10) -> dict:
        '''通过用户，检索相关段落(超过2篇文章)'''
        search_schema = "user_id == " + str(int(user_id))
        results = self.collection.search(
            data=[list(Q_vector)],
            anns_field="paragraph_vector",
            param=self.search_params,
            limit=limit,
            expr=search_schema,
            consistency_level="Bounded"
        )
        paragraph_ids = []
        scores = []
        for line in results[0]:
            paragraph_ids.append(str(line.id))
            scores.append(str(line.score))

        return {"paragraph_ids":paragraph_ids[:limit],"scores":scores[:limit]}

    def get_comparison_paragraphs(self, Q_vector: list, articles: list) -> list:
        '''通过问题与文章列表，检索相关段落(2篇文章)'''
        ids = []
        for line in articles:
            results = self.collection.search(
                data=[list(Q_vector)],
                anns_field="paragraph_vector",
                param=self.search_params,
                limit=10,
                expr="article_id == " + str(line),
                consistency_level="Strong"
            )
            try:
                ids.append(str(results[0].ids[0]))
            except:
                None
        return ids
    def delete_article_by_id(article_id:str):
        """
        删除具有特定 paragraph_id 的数据。
        :param paragraph_id: 想要删除的段落ID。
        """
        delete_expr = f"article_id == {article_id}"
        self.collection.delete(delete_expr)
        
        # 执行压缩操作，清理标记为删除的数据
        self.collection.compact()
        return True
    def delete_paragraph_by_id(self, paragraph_id: int) -> None:
        """
        删除具有特定 paragraph_id 的数据。
        :param paragraph_id: 想要删除的段落ID。
        """
        delete_expr = f"paragraph_id == {paragraph_id}"
        self.collection.delete(delete_expr)
        
        # 执行压缩操作，清理标记为删除的数据
        self.collection.compact()
        return True

    def build(self) -> None:
        '''构建集合'''
        paragraph_id = FieldSchema(
            name="paragraph_id",
            dtype=DataType.INT64,
            is_primary=True
        )

        article_id = FieldSchema(
            name="article_id",
            dtype=DataType.INT64
        )

        word_count = FieldSchema(
            name="word_count",
            dtype=DataType.INT64
        )
        paragraph_vector = FieldSchema(
            name="paragraph_vector",
            dtype=DataType.FLOAT_VECTOR,
            dim=384
        )
        schema = CollectionSchema(
            fields=[paragraph_id, article_id, word_count, paragraph_vector],
            description="Paragraph search"
        )

        collection = Collection(
            name=self.collection_name,
            schema=schema,
            using='default',
            shards_num=2,
            consistency_level="Bounded"
        )

