import json
import time

import chromadb
from sentence_transformers import SentenceTransformer, CrossEncoder

from app.provider.model_provider import ModelProvider
from zhanshop.app import App
from zhanshop.env import Env
from zhanshop.json import Json


class KnowledgeProvider(ModelProvider):
    modelParam = None
    chromaClient = None
    tableName = None
    model = None
    collection = None
    crossEncoder = None

    def __init__(self, dbName, tableName, modelParam: json):
        """
        构建模型和向量数据库
        :param dbName:
        :param tableName:
        :param modelParam:
        """
        self.modelParam = modelParam
        self.tableName = tableName
        model = self.getSentenceTransformerModelPath(modelParam["SentenceTransformer"]["model"])
        self.model = SentenceTransformer(model)
        dbPath = App.make(Env).rootPath+"/runtime/chroma/"+dbName
        self.chromaClient = chromadb.PersistentClient(path=dbPath)

    def getCollection(self):
        """
        获取知识库合集
        :return:
        """
        if(self.collection == None):
            self.modelParam["chromadb"]["name"] = self.tableName
            chromadbParam = self.modelParam["chromadb"]
            collection = self.chromaClient.get_or_create_collection(**chromadbParam)
            self.collection = collection

        return self.collection

    def getCrossEncoder(self):
        """
        获取重排模型
        :return:
        """
        if(self.crossEncoder == None):
            crossEncoderParam = self.modelParam["CrossEncoder"]
            crossEncoderParam["model_name_or_path"] = self.getCrossEncoderModelPath(crossEncoderParam["model_name_or_path"])
            self.crossEncoder = CrossEncoder(**crossEncoderParam);
        return self.crossEncoder

    def batchAdd(self, _documents: list, _metadatas: list = None, _uris: list = None):
        """
        批量添加数据
        :param ids: 向量ID列表
        :param documents: 原始文档列表
        :param metadatas: 关联的元数据
        :param uris: 文档内容的URI
        :return:
        """
        vectorCodes = self.model.encode(_documents).tolist()

        _ids = []
        prefix = time.time_ns() // 1000

        for key,val in enumerate(_documents):
            _ids.append(str(prefix + key))
        # 将向量数据和原始文档数据写入数据库
        # 如果有值就不要写入了
        self.getCollection().add(
            documents=_documents,
            embeddings=vectorCodes,
            ids=_ids,
            metadatas=_metadatas,
            uris=_uris
        )

    def add(self, id, document, metadatas = None, uris = None):
        vectorCodes = self.model.encode([document]).tolist()
        # 将向量数据和原始文档数据写入数据库
        if(metadatas != None): metadatas = [metadatas]
        if (uris != None): uris = [uris]
        self.getCollection().add(
            documents=[document],
            embeddings=vectorCodes,
            ids=[id],
            metadatas=metadatas,
            uris=uris
        )

    def query(self, question: str, limit: int = 10000):
        """
        查询问题
        :param question:
        :param limitL:
        :return:
        """
        vectorCodes = self.model.encode([question]).tolist()
        results = self.getCollection().query(
            query_embeddings=vectorCodes,
            n_results=limit  # limit 5
        )
        return results

    def count(self)->int:
        """
        获取当前集合数据条数
        :return:
        """
        return self.getCollection().count()

    def getScores(self, question, documents):
        """
        对搜索结果进行打分
        :param question:
        :param documents:
        :return:
        """
        QApairs = []
        for answer in documents["documents"][0]:
            QApairs.append((question, answer))

        scores = self.getCrossEncoder().predict(QApairs)

        scored_chunks = list(zip(documents["documents"][0], scores))
        scored_chunks.sort(key=lambda x: x[1], reverse=True)
        results = []
        for key, val in enumerate(scored_chunks):
            for k,v in enumerate(documents["documents"][0]):
                if (val[0] == v):
                    results.append({
                        "id": documents["ids"][0][k],
                        "document": val[0],
                        "metadata": documents["metadatas"][0][k],
                        "distance": documents["distances"][0][k],
                        "confidence": float(val[1])
                    })
                    del documents["documents"][0][k]
                    break

        return results