import json
from typing import List
import uuid
from typing import Union
import pandas as pd
import hashlib
from langchain.document_loaders import TextLoader, PyPDFLoader, Docx2txtLoader, UnstructuredXMLLoader, UnstructuredExcelLoader, UnstructuredMarkdownLoader, JSONLoader
from langchain.chains.combine_documents import create_stuff_documents_chain
from langchain.chains import create_retrieval_chain
from langchain_core.documents import Document
from langchain_community.embeddings import HuggingFaceBgeEmbeddings
from langchain_openai import OpenAIEmbeddings
from langchain_community.embeddings.fastembed import FastEmbedEmbeddings
from langchain_chroma import Chroma
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
import os,re
import numpy as np
from ApiTools  import apiTools

class ApiSqlVector():
    def __init__(self, config=None):
        #VannaBase.__init__(self, config=config)
        if config is None:
            config = {}
        self.collection_metadata = config.get("collection_metadata", None)
        self.n_results_sql = config.get("n_results_sql", config.get("n_results", 10))
        self.n_results_documentation = config.get("n_results_documentation", config.get("n_results", 10))
        self.n_results_ddl = config.get("n_results_ddl", config.get("n_results", 10))
        self.vectDb=apiTools.load_vec()
        self.documentation_collection = self.vectDb.create_collection("documentation")
        self.ddl_collection = self.vectDb.create_collection("ddl")            
        self.sql_collection = self.vectDb.create_collection("sql")
    
    def add_question_sql(self, question: str, sql: str, **kwargs) -> str:
        self.vectDb.train_QA("sql",question, sql)        

    def add_ddl(self, ddl: str, **kwargs) -> str:
        apiTools.save_mes_vec("ddl",ddl,"")

    def add_documentation(self, documentation: str, **kwargs) -> str:
        apiTools.save_mes_vec("documentation",documentation,"")
        
    def get_training_data(self, **kwargs) -> pd.DataFrame:
        self.vectDb.create_collection("sql")
        sql_data = self.vectDb.get()
        df = pd.DataFrame()
        if sql_data is not None:
            # Extract the documents and ids
            documents = [json.loads(doc) for doc in sql_data["documents"]]
            ids = sql_data["ids"]

            # Create a DataFrame
            df_sql = pd.DataFrame(
                {
                    "id": ids,
                    "question": [doc["question"] for doc in documents],
                    "content": [doc["sql"] for doc in documents],
                }
            )
            df_sql["training_data_type"] = "sql"
            df = pd.concat([df, df_sql])

        self.vectDb.create_collection("ddl")
        ddl_data = self.vectDb.get()
        if ddl_data is not None:
            # Extract the documents and ids
            documents = [doc for doc in ddl_data["documents"]]
            ids = ddl_data["ids"]
            # Create a DataFrame
            df_ddl = pd.DataFrame(
                {
                    "id": ids,
                    "question": [None for doc in documents],
                    "content": [doc for doc in documents],
                }
            )
            df_ddl["training_data_type"] = "ddl"
            df = pd.concat([df, df_ddl])

        self.vectDb.create_collection("documentation")
        doc_data = self.vectDb.get()
        if doc_data is not None:
            # Extract the documents and ids
            documents = [doc for doc in doc_data["documents"]]
            ids = doc_data["ids"]

            # Create a DataFrame
            df_doc = pd.DataFrame(
                {
                    "id": ids,
                    "question": [None for doc in documents],
                    "content": [doc for doc in documents],
                }
            )
            df_doc["training_data_type"] = "documentation"
            df = pd.concat([df, df_doc])
        return df

    def remove_training_data(self, id: str, **kwargs) -> bool:
        self.vectDb.delete([id])        

    def remove_collection(self, collection_name: str) -> bool:
        self.vectDb.delete_collection(collection_name)
        
    def extract_documents(self,query_results) -> list:
        """
        Static method to extract the documents from the results of a query.

        Args:
            query_results (pd.DataFrame): The dataframe to use.

        Returns:
            List[str] or None: The extracted documents, or an empty list or
            single document if an error occurred.
        """
        if query_results is None:
            return []

        if "documents" in query_results:
            documents = query_results["documents"]
            if len(documents) == 1 and isinstance(documents[0], list):
                try:
                    documents = [json.loads(doc) for doc in documents[0]]
                except Exception as e:
                    return documents[0]

            return documents

    def get_similar_question_sql(self, question: str, **kwargs) -> list:
        self.vectDb.create_collection("sql")
        data=self.vectDb.query(query_texts=[question])
        return self.extract_documents(data)

    def get_related_ddl(self, question: str, **kwargs) -> list:
        self.vectDb.create_collection("ddl")
        data=self.vectDb.query(query_texts=[question])
        return self.extract_documents(data)

    def get_related_documentation(self, question: str, **kwargs) -> list:
        self.vectDb.create_collection("ddl")
        data=self.vectDb.query(query_texts=[question])
        return self.extract_documents(data)

    def submit_prompt(self, prompt,quest) -> str:       
        #ppt=json.dumps(prompt, ensure_ascii=False)
        #return apiBase.run_python_file("grp_lats",ppt,quest)
        prompt_template = ChatPromptTemplate.from_messages(prompt)
        initial_answer_chain = prompt_template | apiTools.llm
        res = initial_answer_chain.invoke({"input": quest})
        return res.content