import json
from typing import List
import uuid
from typing import Union
import chromadb
import pandas as pd
from chromadb.config import Settings
from chromadb.utils import embedding_functions
import hashlib
from langchain.document_loaders import TextLoader, PyPDFLoader, Docx2txtLoader, UnstructuredXMLLoader, UnstructuredExcelLoader, UnstructuredMarkdownLoader, JSONLoader
from langchain.chains.combine_documents import create_stuff_documents_chain
from langchain.chains import create_retrieval_chain
from langchain_core.documents import Document
from langchain_community.embeddings import HuggingFaceBgeEmbeddings
from langchain_openai import OpenAIEmbeddings
from langchain_community.embeddings.fastembed import FastEmbedEmbeddings
from langchain_chroma import Chroma
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
import os
import re
import numpy as np
from ApiBase  import apiBase
from chromadb.utils import embedding_functions

#default_ef = embedding_functions.DefaultEmbeddingFunction()
#default_ef = embedding_functions.SentenceTransformerEmbeddingFunction(model_name="/data/huggingface/hub/models--BAAI--bge-small-zh-v1.5")

# pip uninstall hnswlib
# pip uninstall chroma-hnswlib
# pip install -U chroma-hnswlib
from chromadb import Documents, EmbeddingFunction, Embeddings

class ChromaDB_VectorStore():
    def __init__(self, config=None):
        #VannaBase.__init__(self, config=config)
        if config is None:
            config = {}

        self.model_name = apiBase.prop_read("OPENAI_MODEL")
        self.path = config.get("qa_path", "./vector_qa/")
        self.cn_vecdir = config.get("doc_path", "./vector_doc/")        
        if self.model_name == "Qwen2-0.5B-Instruct":
            self.embedding = OpenAIEmbeddings(model=self.model_name)
        else:
            self.embedding = HuggingFaceBgeEmbeddings(cache_folder="./huggingface/hub",model_name="BAAI/bge-small-zh-v1.5")   
            #self.embedding = HuggingFaceBgeEmbeddings(cache_folder="/home/apjai/.cache/chroma/onnx_models/all-MiniLM-L6-v2/onnx",model_name="sentence-transformers/all-MiniLM-L6-v2")   
        class MyEmbeddingFunction(EmbeddingFunction):
            def __init__(self,embed) -> None:
                super().__init__()
                self.embed=embed
            def __call__(self, input: Documents) -> Embeddings:
                embeddings = self.embed.embed_documents(input)
                return embeddings
        self.embedding_function = MyEmbeddingFunction(self.embedding)
        curr_client = config.get("client", "persistent")
        self.collection_metadata = config.get("collection_metadata", None)
        self.n_results_sql = config.get("n_results_sql", config.get("n_results", 10))
        self.n_results_documentation = config.get("n_results_documentation", config.get("n_results", 10))
        self.n_results_ddl = config.get("n_results_ddl", config.get("n_results", 10))
        
        if curr_client == "persistent":
            self.chroma_client = chromadb.PersistentClient(
                path=self.path, settings=Settings(anonymized_telemetry=False)
            )
        elif curr_client == "in-memory":
            self.chroma_client = chromadb.EphemeralClient(
                settings=Settings(anonymized_telemetry=False)
            )
        elif isinstance(curr_client, chromadb.api.client.Client):
            # allow providing client directly
            self.chroma_client = curr_client
        else:
            raise ValueError(f"Unsupported client was set in config: {curr_client}")

        self.documentation_collection = self.chroma_client.get_or_create_collection(
            name="documentation",
            embedding_function=self.embedding_function,
            metadata=self.collection_metadata,
        )
        self.ddl_collection = self.chroma_client.get_or_create_collection(
            name="ddl",
            embedding_function=self.embedding_function,
            metadata=self.collection_metadata,
        )
        self.sql_collection = self.chroma_client.get_or_create_collection(
            name="sql",
            embedding_function=self.embedding_function,
            metadata=self.collection_metadata,
        )
        
    # 根据collect名字查询数据,question={'q':'','a':'', 'score':0}
    def askQA(self,llm,sys_prompt,question,collect_list,max_topn,topn):
        lsTmpl=self.get_QAdoc(collect_list,sys_prompt,question,max_topn,topn)
        prompt_template = ChatPromptTemplate.from_messages(lsTmpl)
        initial_answer_chain = prompt_template | llm
        res = initial_answer_chain.invoke({"input": question})
        return res.content
    
    # 对结果进行验证，出现错误进行修复，修复不了，给出一个相关的分数question={'q':'','a':'', 'score':0}
    def fixQA(self,llm,sys_prompt,quest,task,collect_list,max_topn,topn,api):
        res=self.askQA(llm,sys_prompt,quest,collect_list,max_topn,topn) 
        #print(f"res.content={res.content}")
        task["a"]=res
        ret=api(res)
        if ret['code'] == 200:
            task["data"]=ret['data']
            task["score"]=1
            return task
        
        msg=ret['message']       
        fix_prompt=f'''{quest}
## ai
{res}
## human
There are many errors in the above code.Here is the error messages:
{msg}
Think carefully and FIX these errors'''
        res=self.askQA(llm,sys_prompt,fix_prompt,collect_list,max_topn,topn)
        task["a"]=res
        ret=api(res)
        if ret['code'] == 200:
            task["data"]=ret['data']
            task["score"]=1
            return task
        # 如果修复不成功，res和task['q'] 评估相似度
        task["score"]=0
        return task
    
    # 根据collect列表,查询出相近的QA，通过大模型来选择 查询多少个结果： n_results_sql
    def get_QAdoc(self,collect_list,sys_prompt,question,max_topn,topn):        
        ls = [("system",  sys_prompt)]
        for name in collect_list:
            collect = self.chroma_client.get_or_create_collection(
                name=name,
                embedding_function=self.embedding_function,
                metadata=self.collection_metadata,
            )
            doc=ChromaDB_VectorStore._extract_documents(
                collect.query(
                    query_texts=[question],
                    n_results=max_topn,
                )
            )
            doc_len = len(doc)
            if doc_len  == 0:
                continue
            if max_topn > doc_len:
                max_topn = doc_len
            orders=apiBase.get_random(max_topn,topn)            
            for index in orders:
                documentation= doc[index]
                ls.append(("human", documentation["question"]))
                ls.append(("ai",documentation["sql"]))
        ls.append(("human", "{input}"))
        ls.append(MessagesPlaceholder(variable_name="messages", optional=True))
        return ls

    def generate_embedding(self, data: str, **kwargs) -> List[float]:
        embedding = self.embedding_function([data])        
        if len(embedding) == 1:
            return embedding[0]
        return embedding
    
    # 训练QA对话
    def train_QA(self, collect_name,question, sql) -> str:
        question_sql_json = json.dumps(
            {
                "question": question,
                "sql": sql,
            },
            ensure_ascii=False,
        )
        collect = self.chroma_client.get_or_create_collection(
            name=collect_name,
            embedding_function=self.embedding_function,
            metadata=self.collection_metadata,
        )        
        id = self.deterministic_uuid(question_sql_json) + "-sql"
        collect.add(
            documents=question_sql_json,
            embeddings=self.generate_embedding(question_sql_json),
            ids=id,
        )
        return id
    
    def add_question_sql(self, question: str, sql: str, **kwargs) -> str:
        question_sql_json = json.dumps(
            {
                "question": question,
                "sql": sql,
            },
            ensure_ascii=False,
        )
        id = self.deterministic_uuid(question_sql_json) + "-sql"
        self.sql_collection.add(
            documents=question_sql_json,
            embeddings=self.generate_embedding(question_sql_json),
            ids=id,
        )
        return id

    def add_ddl(self, ddl: str, **kwargs) -> str:
        id = self.deterministic_uuid(ddl) + "-ddl"
        self.ddl_collection.add(
            documents=ddl,
            embeddings=self.generate_embedding(ddl),
            ids=id,
        )
        return id

    def add_documentation(self, documentation: str, **kwargs) -> str:
        id = self.deterministic_uuid(documentation) + "-doc"
        self.documentation_collection.add(
            documents=documentation,
            embeddings=self.generate_embedding(documentation),
            ids=id,
        )
        return id

    def get_training_data(self, **kwargs) -> pd.DataFrame:
        sql_data = self.sql_collection.get()

        df = pd.DataFrame()

        if sql_data is not None:
            # Extract the documents and ids
            documents = [json.loads(doc) for doc in sql_data["documents"]]
            ids = sql_data["ids"]

            # Create a DataFrame
            df_sql = pd.DataFrame(
                {
                    "id": ids,
                    "question": [doc["question"] for doc in documents],
                    "content": [doc["sql"] for doc in documents],
                }
            )

            df_sql["training_data_type"] = "sql"

            df = pd.concat([df, df_sql])

        ddl_data = self.ddl_collection.get()

        if ddl_data is not None:
            # Extract the documents and ids
            documents = [doc for doc in ddl_data["documents"]]
            ids = ddl_data["ids"]

            # Create a DataFrame
            df_ddl = pd.DataFrame(
                {
                    "id": ids,
                    "question": [None for doc in documents],
                    "content": [doc for doc in documents],
                }
            )

            df_ddl["training_data_type"] = "ddl"

            df = pd.concat([df, df_ddl])

        doc_data = self.documentation_collection.get()

        if doc_data is not None:
            # Extract the documents and ids
            documents = [doc for doc in doc_data["documents"]]
            ids = doc_data["ids"]

            # Create a DataFrame
            df_doc = pd.DataFrame(
                {
                    "id": ids,
                    "question": [None for doc in documents],
                    "content": [doc for doc in documents],
                }
            )

            df_doc["training_data_type"] = "documentation"

            df = pd.concat([df, df_doc])

        return df

    def remove_training_data(self, id: str, **kwargs) -> bool:
        if id.endswith("-sql"):
            self.sql_collection.delete(ids=id)
            return True
        elif id.endswith("-ddl"):
            self.ddl_collection.delete(ids=id)
            return True
        elif id.endswith("-doc"):
            self.documentation_collection.delete(ids=id)
            return True
        else:
            return False

    def remove_collection(self, collection_name: str) -> bool:
        """
        This function can reset the collection to empty state.

        Args:
            collection_name (str): sql or ddl or documentation

        Returns:
            bool: True if collection is deleted, False otherwise
        """
        if collection_name == "sql":
            self.chroma_client.delete_collection(name="sql")
            self.sql_collection = self.chroma_client.get_or_create_collection(
                name="sql", embedding_function=self.embedding_function
            )
            return True
        elif collection_name == "ddl":
            self.chroma_client.delete_collection(name="ddl")
            self.ddl_collection = self.chroma_client.get_or_create_collection(
                name="ddl", embedding_function=self.embedding_function
            )
            return True
        elif collection_name == "documentation":
            self.chroma_client.delete_collection(name="documentation")
            self.documentation_collection = self.chroma_client.get_or_create_collection(
                name="documentation", embedding_function=self.embedding_function
            )
            return True
        else:
            return False

    @staticmethod
    def _extract_documents(query_results) -> list:
        """
        Static method to extract the documents from the results of a query.

        Args:
            query_results (pd.DataFrame): The dataframe to use.

        Returns:
            List[str] or None: The extracted documents, or an empty list or
            single document if an error occurred.
        """
        if query_results is None:
            return []

        if "documents" in query_results:
            documents = query_results["documents"]

            if len(documents) == 1 and isinstance(documents[0], list):
                try:
                    documents = [json.loads(doc) for doc in documents[0]]
                except Exception as e:
                    return documents[0]

            return documents

    def get_similar_question_sql(self, question: str, **kwargs) -> list:
        return ChromaDB_VectorStore._extract_documents(
            self.sql_collection.query(
                query_texts=[question],
                n_results=self.n_results_sql,
            )
        )

    def get_related_ddl(self, question: str, **kwargs) -> list:
        return ChromaDB_VectorStore._extract_documents(
            self.ddl_collection.query(
                query_texts=[question],
                n_results=self.n_results_ddl,
            )
        )

    def get_related_documentation(self, question: str, **kwargs) -> list:
        return ChromaDB_VectorStore._extract_documents(
            self.documentation_collection.query(
                query_texts=[question],
                n_results=self.n_results_documentation,
            )
        )
    def deterministic_uuid(self,content: Union[str, bytes]) -> str:
        """Creates deterministic UUID on hash value of string or byte content.

        Args:
            content: String or byte representation of data.

        Returns:
            UUID of the content.
        """
        if isinstance(content, str):
            content_bytes = content.encode("utf-8")
        elif isinstance(content, bytes):
            content_bytes = content
        else:
            raise ValueError(f"Content type {type(content)} not supported !")

        hash_object = hashlib.sha256(content_bytes)
        hash_hex = hash_object.hexdigest()
        namespace = uuid.UUID("00000000-0000-0000-0000-000000000000")
        content_uuid = str(uuid.uuid5(namespace, hash_hex))

        return content_uuid

    # 加载文件到向量库
    def dir_upsert(self, dataDir, type='file'):
        if dataDir.endswith('/'):
            dataDir = dataDir[:-1]
        if os.path.isfile(dataDir):
            self.dir_upsert_file(dataDir, type)
        else:
            self.dir_upsert_dir(dataDir, type)

    # 把当前文件或文件加载到向量库
    def dir_upsert_dir(self, dataDir, type='file'):
        if os.path.isfile(dataDir):
            self.dir_upsert_file(dataDir, type)
            return " load file ok"
        folder_names = os.listdir(dataDir)
        if dataDir.endswith('/'):
            dataDir = dataDir[:-1]
        for name in folder_names:
            if os.path.isfile(dataDir + "/" + name):
                self.dir_upsert_file(dataDir + "/" + name, type)
            else:
                self.dir_upsert_dir(dataDir + "/" + name, type)
        return 'load  ok'

    # 把当前文件或文件加载到向量库
    def dir_upsert_file(self, dataDir, type='file'):
        docName = "doc" + apiBase.generate_md5(dataDir)
        if type == 'system-help':
            docName = "systemdochelp12345678"
        if type == 'init':
            docName = self.cn_vecname
        # try:
        #     self.chroma_client.delete_collection(name=docName)
        # except:
        #     pass
        file_extension = dataDir.split(".")[-1]
        if file_extension == "pdf":
            loader = PyPDFLoader(dataDir)
        elif file_extension == "docx":
            loader = Docx2txtLoader(dataDir)
        elif file_extension == "xml":
            loader = UnstructuredXMLLoader(dataDir)
        elif file_extension == "hpl":
            loader = UnstructuredXMLLoader(dataDir)
        elif file_extension == "hwf":
            loader = UnstructuredXMLLoader(dataDir)
        elif file_extension == "xlsx":
            loader = UnstructuredExcelLoader(dataDir)
        elif file_extension == "json":
            loader = JSONLoader(dataDir, encoding="utf-8")
        # elif file_extension == "md":
        #    loader = UnstructuredMarkdownLoader(dataDir,encoding="utf-8")
        elif file_extension == "md":
            loader = TextLoader(dataDir, encoding="utf-8")
        elif file_extension == "txt":
            loader = TextLoader(dataDir, encoding="utf-8")
        elif file_extension == "csv":
            loader = TextLoader(dataDir, encoding="utf-8")
        else:
            return f"不支持的文件类型:{file_extension}"
        documents = loader.load()        
        return self.split_document(docName,documents)
    
    # 自定义分隔符分割document
    def split_document(self,docName,documents,delimiters = r'[\n;.。?？！!]'):
        texts=[]
        for doc in documents:        
            sections = re.split(delimiters, doc.page_content)
            for section in sections:
                section=section.strip()
                if section == '':
                    continue
                doc=Document(page_content=section, metadata=doc.metadata)
                texts.append(doc)
        vectordb = Chroma.from_documents(collection_name=docName, documents=texts, embedding=self.embedding,
                                         persist_directory=self.cn_vecdir)          
        return vectordb
    # 根据路径查询
    def dir_query(self, dataDir, sys_prompt,usr_prompt):
        if dataDir.endswith('/'):
            dataDir = dataDir[:-1]
        self.dir_query_result = None
        if os.path.isfile(dataDir):
            self.dir_query_file_vec(dataDir, sys_prompt,usr_prompt, "txt")
        else:
            self.dir_query_dir(dataDir, sys_prompt,usr_prompt)
        return self.dir_query_result

    def dir_query_dir(self, dataDir, sys_prompt,usr_prompt):
        self.dir_query_result = None
        folder_names = os.listdir(dataDir)
        for name in folder_names:
            if os.path.isfile(dataDir + "/" + name):
                self.dir_query_file_vec(dataDir + "/" + name, sys_prompt,usr_prompt, "txt")
            else:
                self.dir_query_dir(dataDir + "/" + name, sys_prompt,usr_prompt)
        return self.dir_query_result

    # 当个文件查询
    def dir_query_file_vec(self, dataDir, sys,usr, type):
        if self.dir_query_result is not None:
            return self.dir_query_result
        if dataDir.endswith('/'):
            dataDir = dataDir[:-1]
        docName = "doc" + apiBase.generate_md5(dataDir)
        vectordb = Chroma(collection_name=docName, persist_directory=self.cn_vecdir, embedding_function=self.embedding)
        # self.dir_query_result = index.run(prompt)        
        retriever = vectordb.as_retriever(
            search_type="similarity_score_threshold",
            search_kwargs={'score_threshold': 0.1}
        )
        if apiBase.db_connection is None:
            apiBase.getLLMConnect()
        if sys is None or len(sys) == 0:
            system_prompt = apiBase.prompt_read("aigc/rag/system")
        else:
            system_prompt=sys
        if "{context}" not in system_prompt:
            system_prompt += "\r\n {context}"
        user_prompt=apiBase.prompt_read("aigc/rag/user")       
        prompt = ChatPromptTemplate.from_messages(
            [
                ("system", system_prompt),
                ("human", user_prompt),
            ]
        )

        question_answer_chain = create_stuff_documents_chain(self.llm, prompt)
        rag_chain = create_retrieval_chain(retriever, question_answer_chain)
        response = rag_chain.invoke({"input": usr})
        if len(response["context"]) == 0:
            return self.dir_query_result
        #if type == 'file':
        self.dir_query_result = response["answer"]
        # print(self.dir_query_result)
        self.dir_query_result = f'"answer":"{response["answer"]}","file":"{dataDir}"'
        return self.dir_query_result
        #else:
            # 返回找出的向量
            # self.dir_query_result = response["context"][0]
            # return self.dir_query_result
    # 根据doc的名称,获取
    def get_collect_retriever(self,dir,k):
        docName = "doc" + apiBase.generate_md5(dir)
        vectordb = Chroma(collection_name=docName, persist_directory=self.cn_vecdir, embedding_function=self.embedding)    
        retriever = vectordb.as_retriever(
            search_type="similarity_score_threshold",
            search_kwargs={'score_threshold': 0.1},
            k=k
        )
        return retriever
    
    # add by bzm :通过文件夹查询
    def call_rag(self, label, param):
        apiBase.log(f"call_rag:{label} {param}")
        if label == 'load':
            return self.dir_upsert(param)
        lst = param.split()
        dataDir = lst[0]
        prompt = lst[1]
        return self.dir_query(dataDir, prompt)
    

#chromadb = ChromaDB_VectorStore()
#chromadb.get_collect_retriever("sql")