import json
import openai
from langchain.prompts.chat import ChatPromptTemplate
from langchain.chains import LLMChain
from langchain.memory import ConversationBufferMemory
from fastapi import File, UploadFile, Form, Body, Query
from typing import Union, List
from scripts.server.utils import ChatHistorys,APIResponse,LLMResponse
import configs
from langchain.chat_models import ChatOpenAI
from scripts.server.kb_api import load_kb

def chat_with_openai_rq(query: str = Body(..., description="用户输入", examples=["恼羞成怒"]),
                        history = Body([]),
                        temperature: float = Body(0.5, description="生成文本的随机性", ge=0, le=1)
                        ):
    return _chat_with_openai_rq(query,history,temperature)
def _chat_with_openai_rq(query,history,temperature):
    '''
    非 langchain形式
    '''
    api_key = configs.API_keys.openai_api_key
    base_url = configs.API_keys.openai_api_url
    model_name = configs.API_keys.openai_model_name
    history.append({'role':'user','content':query})
    # create a chat completion
    client = openai.OpenAI(
        base_url=base_url,
        api_key=api_key,
    )

    completion = client.chat.completions.create(
        model=model_name,
        messages=history,
        temperature=temperature
    )
    answer = completion.choices[0].message.content
    history.append({"role": "assistant", "content": answer})
    return LLMResponse(code=200, msg=history[-1]['content'],history = history)


def chat(query,history,temperature=0.1):
    openai.api_base =  configs.API_keys.openai_api_url
    openai.api_key = configs.API_keys.openai_api_key
    model_name = configs.API_keys.openai_model_name
    history.append({'role': 'user', 'content': query})
    response = openai.ChatCompletion.create(model=model_name,
                                            messages=history,
                                            temperature=temperature)
    answer = response.choices[0].message.content
    return answer




def chat_with_openai_lc(query: str = Body(..., description="用户输入", examples=["恼羞成怒"]),
                        history = Body([]),
                        temperature: float = Body(0.5, description="生成文本的随机性", ge=0, le=1)
                        ):
    return _chat_with_openai_lc(query,history,temperature)
def _chat_with_openai_lc(query,history,temperature):
    '''
    langchain形式
    '''
    history.append({'role':'user', 'content': query})


    api_key = configs.API_keys.openai_api_key
    base_url = configs.API_keys.openai_api_url
    model_name = configs.API_keys.openai_model_name

    # 用于使用ChatPromptTemplate的列表
    history_trans = []
    for i in range(len(history)):
        if  history[i]['role'] == 'user':
            history_trans.append({"role": "human", "content": history[i]['content']})
        elif history[i]['role'] == "assistant":
            history_trans.append({"role": "ai", "content": history[i]['content']})
        else:
            history.append(history[i])

    # 构建对话历史
    chatprompt = ChatPromptTemplate.from_messages(
        [(history_trans[i]['role'], history_trans[i]['content'])for i in range(len(history_trans))])


    llm = ChatOpenAI(temperature=temperature, model_name=model_name, openai_api_key=api_key,openai_api_base = base_url)
    chat_llm_chain = LLMChain(
        llm=llm,
        prompt=chatprompt,
        verbose=True
    )
    responses = chat_llm_chain.predict()
    history.append({'role':'assistant', 'content': responses})
    return LLMResponse(code=200, msg=history[-1]['content'],history = history)


# def chat_with_kb(query: str = Body(..., description="用户输入", examples=["发动机装饰盖子的装饰罩的备件号是多少？"]),
#                  KB = Body([], description="当前使用的知识库"),)



if __name__ == '__main__':
    # query = "发动机装饰盖子的装饰罩的备件号是多少？"
    history = []
    kb_name = 'voyah_free_unstruct'
    embedding_model = 'bge-small-zh-v1.5'
    top_k = 5
    score_threshold = 0.1
    temperature = 0.1

    # 加载知识库
    db = load_kb(kb_name=kb_name,embedding_model=embedding_model)

    while True:
        query = input('请提问：')
        relevant_docs = db.similarity_search_with_relevance_scores(query=query, k=top_k)    # cosine
        # relevant_docs = db.similarity_search_with_score(query)  # l2-distance

        relevant_docs = '\n-----\n'.join([data[0].page_content for data in relevant_docs])
        model_input = configs.prompts.kb_prompt.render(relevant_docs=relevant_docs, query=query)
        answer = chat_with_openai_rq(**{"query":model_input, "history":[],"temperature":temperature})
        print('召回chunk',relevant_docs)
        print('\n模型回答结果：\n',answer.msg)