import example2 as example2
import openai
from retry import retry


from training import examples

from neo4j_driver import run_query
import  pinecone

openai.api_key = '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'

system = f"""
You are an assistant with an ability to generate Cypher queries based off example Cypher queries.
Example Cypher queries are: \n {examples} \n
Do not response with any explanation or any other information except the Cypher query.
You do not ever apologize and strictly generate cypher statements based of the provided Cypher examples.
You need to update the database using an appropriate Cypher statement when a user mentions their likes or dislikes, or what they watched already.
Do not provide any Cypher statements that can't be inferred from Cypher examples.
Inform the user when you can't infer the cypher statement due to the lack of context of the conversation and state what is the missing context.
"""
# pinecone.init(
#     api_key=pinecone_key,
#     environment=pinecone_environment
# )
#
# prompt="欧阳干最喜欢吃的水果是？"
#
# # 初始化索引
# active_indexes = pinecone.list_indexes()
# index = pinecone.Index(active_indexes[0])
# print("****************** 初始化索引：Done ******************")
#
# # 获取知识库内容
# file = open('data/question_bank.txt', 'r')
# content = file.read()
# file.close()
# print("****************** 知识库获取：Done ******************")
#
# # 生成知识库embedding vector
# data_embedding_res = openai.Embedding.create(
#   model="text-embedding-ada-002",
#   input=content
# )
# print("****************** 生成知识库embedding vector：Done ******************")
#
# # 更新知识库向量以及对应的元数据
# upsertRes = index.upsert([
#   ("q1", data_embedding_res['data'][0]['embedding'], { "data": content })
# ])
# print("****************** 更新知识库向量以及对应的元数据：Done ******************")
#
# # 生成问题embedding vector
# promt_embedding_res = openai.Embedding.create(
#   model="text-embedding-ada-002",
#   input=prompt
# )
# print("****************** 生成问题embedding vector：Done ******************")
#
# # 从知识库中检索相关内容
# # 返回的数据格式如下：
# # {
# #   'matches': [{
# #     'id': 'q1',
# #     'metadata': {'data': '2022年卡塔尔世界杯的冠军是卡塔尔'},
# #     'score': 0.952013373,
# #     'values': []
# #   }],
# #   'namespace': ''
# # }
# prompt_res = index.query(
#   promt_embedding_res['data'][0]['embedding'],
#   top_k=5,
#   include_metadata=True
# )
# print("****************** 从知识库中检索相关内容：Done ******************")
#
#
# # 重新构造prompts
# contexts = [item['metadata']['data'] for item in prompt_res['matches']]
# prompt_final = "\n\n"+"\n\n---\n\n".join(contexts)+"\n\n-----\n\n"+prompt
# print("****************** 重新构造prompts：Done ******************")
#
# # 与LLM交流
# completion = openai.ChatCompletion.create(
#   model="gpt-3.5-turbo",
#   messages=[
#     {"role": "user", "content": prompt}
#   ]
# )
#
# print("User: ", prompt)
# print("AI: ", completion.choices[0].message.content)


system2 = f"""
你是一名助手，能够根据示例回答出自然语言，示例是: \n {examples} \n
"""


@retry(tries=2, delay=5)
def natural_language(messages):
    messages = [
        {"role": "system", "content": system2}
    ] + messages
    # Make a request to OpenAI
    completions = openai.ChatCompletion.create(
        model="gpt-3.5-turbo",
        messages=messages,
        temperature=0.0
    )
    response = completions.choices[0].message.content
    print('自然语义模型调用openAi返回结果-->', response)
    return response
@retry(tries=2, delay=5)
def generate_cypher(messages):
    print('message->', messages)
    messages = [
        {"role": "system", "content": system}
    ] + messages
    # Make a request to OpenAI
    completions = openai.ChatCompletion.create(
        model="gpt-3.5-turbo",
        messages=messages,
        temperature=0.0
    )
    response = completions.choices[0].message.content
    print('首次返回结果-->', response)
    # Sometime the models bypasses system prompt and returns
    # data based on previous dialogue history
    if not "MATCH" in response and "{" in response:
        raise Exception(
            "GPT bypassed system message and is returning response based on previous conversation history" + response)
    # If the model apologized, remove the first line
    if "apologi" in response:
        response = " ".join(response.split("\n")[1:])
    # Sometime the model adds quotes around Cypher when it wants to explain stuff
    if "`" in response:
        response = response.split("```")[1].strip("`")
    print('处理完成以后返回结果-->',response)
    return response

import json

if __name__ == '__main__':
    content = "请介绍一下你自己是哪个模型"
    res = natural_language([{'role': 'user', 'content': content},
                           {'role': 'assistant', 'content': content},
                           {'role': 'user',
                               'content': content}
                           ])
    print(res)
    # response = run_query(res, params={})
    # #print('查询neo4j 返回结果-->',response)
    # print('str-->',json.dumps({ "showtime":"2021", "name":"流浪地球"}))
    # res2=natural_language([{'role': 'user', 'content': json.dumps({ "showtime":"2016", "name":"十月围城","act":"刘德华"})},
    #                        {'role': 'assistant', 'content': 'Shrek 3'},
    #                        {'role': 'user',
    #                            'content': json.dumps({ "showtime":"2016", "name":"十月围城","act":"刘德华"})}
    #                        ])
    # print(res2)

