import json
import chromadb
from langchain.document_loaders import TextLoader
from model_config import *
from readFile import *
from english2cypher import natural_language
from neo4j_driver import run_query
from streamlit_chat import message
import streamlit as st

# 指定chroma持久化的目录，当我们不知道目录时,chroma会将数据存储在内存中，随着程序的关闭就会删除

count = 0
chroma_client = chromadb.Client()
collection = chroma_client.create_collection(chromadb_collection_name)
print(chroma_client.list_collections())

data = {}


def add_collection(doc, source, ids):
    str_arr = doc.split("&")
    document = str_arr[0]
    data[str_arr[0]] = str_arr[1]
    print("documents", document, "source", source, "ids-->", ids)
    collection.add(documents=[document],
                   metadatas=[{"source": source}],
                   ids=[ids]
                   )


with open(LOCAL_DIR, encoding='utf-8') as f:
    lines = f.readlines()  # 读取全部内容
    for line in lines:
        count += 1
        add_collection(line, LOCAL_SOURCE, "id" + str(count))
print("本地文档读取，并写入向量库完成")

# #加载单个文档 可以自由选择
loader = TextLoader('./private/test.txt', encoding='utf8')
docs = loader.load()
print("docs--->", docs)

# 调用openai Embeddings
# os.environ["OPENAI_API_KEY"] = OPENAI_API_KEY
# embeddings = OpenAIEmbeddings(openai_api_key=os.environ["OPENAI_API_KEY"])
# # # 文档切块目的是为了防止超出GPTAPI的token限制
# text_splitter = TokenTextSplitter(chunk_size=100, chunk_overlap=0)
# doc_texts = text_splitter.split_documents(docs)
# # # 向量化
# # vectordb = Chroma.from_documents(doc_texts, embeddings, persist_directory=persist_directory)
# # # 持久化
# # vectordb.persist()
# # 执行到这里你会发现public目录下多了一些以parquest结尾的文件,这些文件就是chroma持久化本地的向量数据
# vectordb = Chroma.from_documents(doc_texts, embeddings, persist_directory=persist_directory)
# vectordb.persist()

# 搜索
st.set_page_config(layout="wide")
st.title("SGM本地知识库智能应答")
USER_ID = "Eleven"


def generate_context(prompt, context_data='generated'):
    context = []
    # If any history exists
    if st.session_state['generated']:
        # Add the last three exchanges
        size = len(st.session_state['generated'])
        for i in range(max(size - 3, 0), size):
            context.append(
                {'role': 'user', 'content': st.session_state['user_input'][i]})
            context.append(
                {'role': 'assistant', 'content': st.session_state[context_data][i]})
    # Add the latest user prompt
    context.append({'role': 'user', 'content': str(prompt)})
    print("输出结果context-->", context)
    return context


# Generated natural language
if 'generated' not in st.session_state:
    st.session_state['generated'] = []
# Neo4j database results
if 'database_results' not in st.session_state:
    st.session_state['database_results'] = []
# User input
if 'user_input' not in st.session_state:
    st.session_state['user_input'] = []
# Generated Cypher statements
if 'cypher' not in st.session_state:
    st.session_state['cypher'] = []


def get_text():
    input_text = st.text_input(
        "Ask away", "", key="input")
    print("用户输入内容为：-->", input_text)
    return input_text


def query_cypher(user_input1):
    results = collection.query(
        query_texts=[user_input1],
        n_results=1
    )
    return results


def langchain_collection_query(user_input2):
    print("user_input2-->", user_input2)
    print("langchain_collection_query_list-->", chroma_client.list_collections())
    settings = chromadb.config.Settings(chroma_db_impl="duckdb+parquet",
                                        persist_directory='private')
    client = chromadb.Client(settings=settings)
    print("langchain_collection_list-->", client.list_collections())
    langchain_collection = client.get_collection("langchain")

    results = langchain_collection.query(
        query_texts=[user_input2],
        n_results=1
    )
    print("langchain_collection_query-->", results)
    return results


# Define columns
col1, col2 = st.columns([2, 1])

with col2:
    another_placeholder = st.empty()
with col1:
    placeholder = st.empty()
user_input = get_text()

print("user_input-->", user_input)
# 通过目录加载向量 这里的目录就是我们持久化的目录
query_doc = ''
distance = 0
if len(user_input) > 0:
    result = query_cypher(user_input)
    print("向量库查询出来的最近向量-->", result)
    query_doc = data[result.get("documents")[0][0]]
    distances = result.get("distances")
    distance = distances[0][0]
    print("distance-->", distance)
    print("query_doc-->", query_doc)


def get_cypher(query):
    return run_query(query)


# 替换关键字
def change_key(text):
    try:
        print("本地知识库的文本内容-->", text)
        key_arr = get_keywords(user_input)
        print("关键字集合-->", key_arr)
        result_doc = replace_keywords(key_arr, text)
        return result_doc
    except Exception as es:
        print("e-->", es)


if len(query_doc) > 0:
    if distance > 0.71:
        st.session_state.database_results.append(ERROR_MSG)
        st.session_state.user_input.append(user_input)
        st.session_state.generated.append(ERROR_MSG),
    else:
        try:
            count = query_doc.count("%a%")
            if count > 0:
                query_text = change_key(query_doc)
                print("query_text-->", query_text)
                result = run_query(query_text)
                print("result1",result)
                if not result:
                    print("执行到这里1")
                    st.session_state.database_results.append(NONE_MSG)
                    st.session_state.user_input.append(user_input)
                    st.session_state.generated.append(NONE_MSG),
                else:
                    print("执行到这里2")
                    response = natural_language([{'role': 'user', 'content': json.dumps(result)},
                                                 {'role': 'assistant', 'content': 'Shrek 3'},
                                                 {'role': 'user',
                                                  'content': json.dumps(result)}
                                                 ])
                    st.session_state.database_results.append(str(response))
                    st.session_state.user_input.append(user_input)
                    st.session_state.generated.append(response),
            else:
                result = run_query(query_doc)
                if not result:
                    st.session_state.database_results.append(NONE_MSG)
                    st.session_state.user_input.append(user_input)
                    st.session_state.generated.append(NONE_MSG),
                else:
                    response = natural_language([{'role': 'user', 'content': json.dumps(result)},
                                                 {'role': 'assistant', 'content': 'Shrek 3'},
                                                 {'role': 'user',
                                                  'content': json.dumps(result)}
                                                 ])
                    st.session_state.database_results.append(str(response))
                    st.session_state.user_input.append(user_input)
                    st.session_state.generated.append(response),
        except Exception as e:
            print(e)
            st.session_state.database_results.append(EXCEPTION_MSG)
            st.session_state.user_input.append(user_input)
            st.session_state.generated.append(EXCEPTION_MSG),

# Message placeholder
with placeholder.container():
    if st.session_state['generated']:
        size = len(st.session_state['generated'])
        # Display only the last three exchanges
        for i in range(max(size - 3, 0), size):
            message(st.session_state['user_input'][i],
                    is_user=True, key=str(i) + '_user')
            message(st.session_state["generated"][i], key=str(i))

# Generated Cypher statements
with another_placeholder.container():
    if st.session_state['cypher']:
        st.text_area("Latest generated Cypher statement",
                     st.session_state['cypher'][-1], height=240)
