import configparser
import os
from typing import List

from langchain_community.chat_models import QianfanChatEndpoint
from sentence_transformers import SentenceTransformer
import sys

class ChatAI:
    def embed_query(self, text: str):
        """
        字符串向量化
        :param text:
        :return:
        """
        pass

    def embed_doc(self, texts: List[str]):
        """
        列表向量化
        :param texts:
        :return:
        """
        pass

    def queryDataBase(self, q_embeddings):
        """
        查询向量数据库
        :param q_embeddings:
        :return:
        """
        from database_helper import milvusDatabase
        return milvusDatabase.searchEmbedding(q_embeddings)

    def invoke(self, prompt):
        """
        向llm发起查询请求
        :param prompt:
        :return:
        """
        pass


class QianfanChatAI(ChatAI):

    def __init__(self):
        config = configparser.ConfigParser()
        # 如何获取项目根目录
        classpath = sys.path
        # 如何获取项目根目录
        config.read(classpath[00] + "/ai/ai-config.ini")
        os.environ["QIANFAN_ACCESS_KEY"] = config.get('qianfan', 'accessKey')
        os.environ["QIANFAN_SECRET_KEY"] = config.get('qianfan', 'secretKey')
        # 向量模型
        # self.embeddingModel = QianfanEmbeddingsEndpoint(
        #     streaming=True,
        #     temperature=0.2,
        #     model="Embedding-V1"
        # )
        # model_name_or_path='/home/llm/distiluse-base-multilingual-cased-v1',
        # model_name_or_path='D:\\02-workspace\\gitee-kbd\\ai-beehive\\langchain\\ai\\distiluse-base-multilingual-cased-v1',
        self.embeddingModel = SentenceTransformer(
            model_name_or_path=f'{classpath[00]}/sentence-transformers/distiluse-base-multilingual-cased-v1',
            tokenizer_kwargs={"clean_up_tokenization_spaces": False})
        # llm
        self.llm = QianfanChatEndpoint(
            streaming=True,
            temperature=0.9,
            model="ERNIE-Speed-128K"
        )

    def embed_query(self, text: str):
        # return self.embeddingModel.embed_query(text)
        return self.embeddingModel.encode(text)

    def embed_doc(self, texts: List[str]):
        return self.embeddingModel.embed_documents(texts)

    def invoke(self, prompt):
        chain = prompt | self.llm
        return chain.invoke(input={})


qianfanLLM = QianfanChatAI()


if __name__ == '__main__':
    ai = QianfanChatAI()
    print("ok")
