from flask import Blueprint, request, Response
from langchain_core.prompts import ChatPromptTemplate
from pojo.result import Result  # 所有的依赖必须从根目录开始引入
from langchain_community.chat_models import ChatOpenAI
import requests
from configuration.Template import template, intentionTemplate, template_from_history, multi_template,returnQuery_template
import json
import time
from configuration.database import db

"""sqlalchemy 操作mysql数据库"""
from sqlalchemy import create_engine, Table, MetaData
from sqlalchemy.orm import Session
from sqlalchemy import Column, Integer, String, create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from pojo.Hit_doc_history import hit_doc_history
from langchain.chains import LLMChain
from configuration.config import llmurl, llmport, DB_URI_PYMYSQL, model, api_key, temperature, base_url
from service.generateLLM import returnllm
from web.llm import qianwen_llm
"""
知识库模块
"""
engine = create_engine(DB_URI_PYMYSQL)
Session = sessionmaker(bind=engine)

knowledgebase = Blueprint("knowledgebase", __name__, url_prefix="/knowledgebase")


# 生成llmchain
def return_llmchain():
    llm = ChatOpenAI(model=model, temperature=temperature, api_key=api_key, base_url=base_url, streaming=True,
                     max_tokens=2048)

    prompt = ChatPromptTemplate.from_template(
        template=template
    )
    llm_chain = prompt | llm
    llm_chain = LLMChain(prompt=prompt, llm=llm)
    return llm_chain
    # llm = OpenAI(model=model, temperature=temperature, api_key=api_key, base_url=base_url, streaming=True,
    #                  max_tokens=2048)
    # return llm


# 使用了llmchain进行流式响应
def LLMchainchat(llm_chain, question, context):
    # 流式返回
    ret = llm_chain.stream({'question': question, 'context': context})

    def return_llm_stream():
        nonlocal ret
        # for content in itertools.islice(ret, None, None, -1):
        for content in ret:
            # print(content.find("="))
            content = str(content)[9:-1]
            print(content)
            yield content

    return Response(return_llm_stream(), mimetype='text/plain')


def intentionJudge(question, historyMessage, hitDocHistoryMessage) -> str:
    llm = returnllm()
    # format = prompt_template.format(context=context, question=question)
    # format = intentionTemplate.format(question=question, history=historyMessage, hitDocHistory=hitDocHistoryMessage)
    print('这是意图判断')
    format = intentionTemplate.format(question=question, history=historyMessage)
    print(format)

    res = llm.invoke(format)
    print(res.content)
    return res.content


##正文开始

# 搜索知识库
def search_doc(data):
    headers = {
        'accept': 'application/json',
        'Content-Type': 'application/json'
    }

    search_data = {
        "query": data['query'],
        "knowledge_base_name": data['knowledge_base_name'],
        "top_k": data['top_k'],
        "score_threshold": data['score_threshold'],
        "file_name": "",
        "metadata": {}
    }

    resp = requests.post(llmurl + llmport + '/knowledge_base/search_docs', headers=headers, json=search_data)
    print("这是搜索知识库返回的数据")
    print(resp.json())
    return resp.json()

def secondSearch_doc(data):
    historyList = data['history']
    if historyList == []:
        userFirstQuestion = ""
    else:
        userFirstQuestion = historyList[0]['content']

    headers = {
        'accept': 'application/json',
        'Content-Type': 'application/json'
    }

    search_data = {
        "query":  + data['query'],
        "knowledge_base_name": data['knowledge_base_name'],
        "top_k": data['top_k'],
        "score_threshold": data['score_threshold'],
        "file_name": "",
        "metadata": {}
    }

    resp = requests.post(llmurl + llmport + '/knowledge_base/search_docs', headers=headers, json=search_data)
    print("这是搜索知识库返回的数据")
    print(resp.json())
    return resp.json()

#MDC 只给走多轮对话用
def secondSearch_doc_for_intention(data):
    historyList = data['history']
    if historyList == []:
        userFirstQuestion = ""
    else:
        userFirstQuestion = historyList[0]['content']

    headers = {
        'accept': 'application/json',
        'Content-Type': 'application/json'
    }

    search_data = {
        "query": userFirstQuestion + "," + data['query'],
        "knowledge_base_name": data['knowledge_base_name'],
        "top_k": data['top_k'],
        "score_threshold": data['score_threshold'],
        "file_name": "",
        "metadata": {}
    }
    print('search_data')
    print(search_data)

    resp = requests.post(llmurl + llmport + '/knowledge_base/search_docs', headers=headers, json=search_data)
    print("这是搜索知识库返回的数据")
    print(resp.json())
    return resp.json()


# 存储到mysql数据库中
def storeData(chat_children_id, searchlist):
    # 使用 pymysql 作为 MySQL 驱动程序
    print("进入storeData*************")
    session = Session()
    pagecontentlist = []
    for item in searchlist:
        page_content = item['page_content']
        # print(page_content)

        source = item['metadata']['source']
        print(source)
        index_of_last1 = source.rfind('-#%')
        index_of_last2 = source.rfind('.')
        source = source[:index_of_last1] + source[index_of_last2:]
        print(source)

        page = item['metadata']['page']
        print("这是page")
        print(page)
        unique_id = item['metadata']['unique_id']
        unique_id = int(unique_id)
        print("这是unique_id")
        print(type(unique_id))
        # print(source)
        new_hit_doc_history = hit_doc_history(chat_child_id=chat_children_id, source=source, page_content=page_content,
                                              del_flag=0, doc_id=unique_id, page=page)
        session.add(new_hit_doc_history)
        session.commit()
        # result = new_hit_doc_history.to_dict()
        # pagecontentlist.append(result['page_content'])
    session.close()


def storeAgainData(hitDocHistoryList):
    print("storeAgainData*************")
    session = Session()
    for item in hitDocHistoryList:
        new_hit_doc_history = hit_doc_history(chat_child_id=item['chatChildId'], source=item['source'],
                                              page_content=item['pageContent'],
                                              del_flag=item['delFlag'], doc_id=item['docId'], page=item['page'])
        session.add(new_hit_doc_history)
        session.commit()
    session.close()


def returnHistory(data,newlist) -> dict:
    # 将对话记录保存在historyMessage字符串中。
    historyList = data['history']

    userFirstQuestion = historyList[0]['role']

    if historyList != []:
        result = []
        for dictionary in historyList:
            # 根据不同的角色来构建字符串
            if dictionary["role"] == "user":
                dict_str = f"user: {dictionary['content']}"
            elif dictionary["role"] == "assistant":
                dict_str = f"AI: {dictionary['content']}"
            result.append(dict_str)

        # 将所有字典的字符串合并成一个最终字符串
        historyMessage = '\n'.join(result)
        historyMessage = "这是对话记录：" + '\n' + historyMessage
    else:
        historyMessage = ""

    # 将上一轮访问的知识库文档保存在historyMessage字符串中。
    hitDocHistoryList = data['hitDocHistory']

    if hitDocHistoryList == []:
        hitDocHistoryMessage = ""
    else:
        page_contents = []
        for item in hitDocHistoryList:
            page_content = item.get('pageContent')  # 使用 .get() 方式安全访问字段
            if page_content is not None:  # 确保字段存在
                page_contents.append(page_content)
                
        #将第二轮搜索出的C4,C5,C6中和c1,c2,c3不一样的部分添加到page_contents中       
        page_contents.extend(newlist)
        page_contents = list(set(page_contents))
        # 打印或处理 page_contents 列表
        # print("这是拿到的pageContent数据")
        # print(page_contents)
        
        hitDocHistoryMessage = "这是已知信息:" + '\n' + "\n".join(page_contents)


    return {
        "historyMessage": historyMessage,
        "hitDocHistoryMessage": hitDocHistoryMessage
    }


# 不使用llmchain，使用/chat/chat接口进行普通流式对话
def normalChat(question, history, hitDocHistory, data):
    Template = template.format(question=question, history='', hitDocHistory=hitDocHistory)
    print("这是template" + Template)

    chatdata = {
        "query": Template,
        "conversation_id": "",
        "history_len": -1,
        "history": data['history'],
        "model_name": data['model_name'],
        "temperature": data['temperature'],
        "max_tokens": data['max_tokens'],
        "prompt_name": data['prompt_name'],
    }

    headers = {
        'accept': 'application/json',
        'Content-Type': 'application/json',
    }
    chatdata['stream'] = 'true'

    content = ""

    def stream(content):
        # 获取code
        resp = requests.post(llmurl + llmport + '/chat/chat', json=chatdata, stream=True, headers=headers)
        # 逐块读取响应体
        for chunk in resp.iter_content(chunk_size=1024, decode_unicode=True):
            content = chunk
            yield chunk

    return Response(stream(content), mimetype='text/plain')

def mutil_round_normalChat(question, history, hitDocHistory, data):
    Template = multi_template.format(question=question, history=history, hitDocHistory=hitDocHistory)
    print("这是template" + Template)

    chatdata = {
        "query": Template,
        "conversation_id": "",
        "history_len": -1,
        "history": data['history'],
        "model_name": data['model_name'],
        "temperature": data['temperature'],
        "max_tokens": data['max_tokens'],
        "prompt_name": data['prompt_name'],
    }

    headers = {
        'accept': 'application/json',
        'Content-Type': 'application/json',
    }
    chatdata['stream'] = 'true'

    content = ""

    def stream(content):
        # 获取code
        resp = requests.post(llmurl + llmport + '/chat/chat', json=chatdata, stream=True, headers=headers)
        # 逐块读取响应体
        for chunk in resp.iter_content(chunk_size=1024, decode_unicode=True):
            content = chunk
            yield chunk

    return Response(stream(content), mimetype='text/plain')

def get_part_historyChat(question, history):
    template = '''
    <指令>你是一个智能助手，你知道如何从一段文本中找到用户需要的部分。
    你需要分析分析用户的问题，从已知信息中找到用户需要或者询问的那部分内容。
    如果无法从中得到答案，请说 “无法搜索到相关内容”。
    </指令>
    <问题>{question}</问题>
    <已知信息>{context}</已知信息>
    '''
    llm = qianwen_llm()
    format = template.format(question=question, context=history)
    res = llm.invoke(format)
    # print('这是用户提问的相关内容')
    # print(res.content)
    return res.content

@knowledgebase.route('/returnQuery',methods = ['GET','POST'])
def returnquery():
    print("进入函数")
    data = request.json
    question = data['question']
    context = data['context']
    llm = returnllm()
    template = returnQuery_template.format(question = question,context = context)
    print("returnQuery_template")
    print(template)

    res = llm.invoke(template)
    results =res.content
    print(results)
    print(type(results))
    results = json.loads(results)
    print(type(results))
    data = results['output']
    return vars(Result(200,"返回成功",data))
    # return res.content



# 定义两个接口，一个流式输出问答，一个返回查找文档的标题
# 流失输出问答
@knowledgebase.route('/chat', methods=['POST'])
def chat():
    print("我进入了chat函数")
    # 获取数据
    data = request.json
    print("拿到的参数")
    print(data)
    question = data['query']

    # 将ID变成name
    knowledge_base_name = data.pop('knowledge_base_id')
    data['knowledge_base_name'] = knowledge_base_name
    chat_children_id = int(data["chat_child_id"])

    #如果用户为刚刚开始聊天，那么直接走搜索知识库的逻辑
    if data['history'] == []:

        # 搜索知识库返回文档数据，以列表的形式返回
        searchlist = search_doc(data)
        # 将搜索出来的文档数据存储到mysql数据库中
        storeData(chat_children_id, searchlist)
        document = "这是已知信息:" + '\n' + "\n".join([searchdoc["page_content"] for searchdoc in searchlist])
        # 提取历史信息
        historyList = data['history']
        result = []
        for dictionary in historyList:
            # 根据不同的角色来构建字符串
            if dictionary["role"] == "user":
                dict_str = f"user: {dictionary['content']}"
            elif dictionary["role"] == "assistant":
                dict_str = f"AI: {dictionary['content']}"
            result.append(dict_str)
        # 将所有字典的字符串合并成一个最终字符串
        context = "这是历史对话信息：" + '\n' + '\n'.join(result)
        # 不使用llmchain，使用/chat/chat接口进行普通流式对话
        return normalChat(question, context, document, data)

    #用户不是刚开始聊天，那么就得判断是否为多轮对话，或者是新的提问。
    else:
        # 更新第二版本：
        # 1：第一轮搜索给到文档快C1,C2,C3。
        # 进入意图判断之前仅先通过第二轮问题一起去搜索相关文档, 得到c4，c5, c6
        # 2：进入意图判断
        # 如果是多轮对话，先将c4，c5，c6和c1, c2, c3进行相同匹配，完全不同则将其加入到历史文档中。
        # 存储依旧只存储c1, c2, c3.
        # 然后对大模型进行提问
        # 3：如果不是多轮对话，直接将c4, c5, c6存储，并走非多轮对话的逻辑。

        #拿到第一轮对话的用户的问题

        # MDC 把意图判断提前
        History = returnHistory(data,[])
        historyMessage = History['historyMessage']
        hitDocHistoryMessage = History['hitDocHistoryMessage']



        # MDC  如果走多轮对话，搜索第二轮问题对应的文本块时，会把第一轮的问题拼进去，解决了命中其他文档的问题
        intention = intentionJudge(question, historyMessage,'')
        if '不是' in intention:
            secondsearchlist = search_doc(data)
        else:
            secondsearchlist = secondSearch_doc_for_intention(data)



        #找到c4,c5,c6文档快的信息和source源
        # secondsearchlist = secondSearch_doc(data)
        # print("这是找到的c4,c5,c6文档快")
        # print(secondsearchlist)
        secondPageContentlist = []
        secondSourcelist = []
        for item in secondsearchlist:
            secondPageContent = item['page_content']
            secondPageContentlist.append(secondPageContent)
            source = item['metadata']['source']
            index_of_last1 = source.rfind('-#%')
            index_of_last2 = source.rfind('.')
            source = source[:index_of_last1] + source[index_of_last2:]

            secondSourcelist.append(source)


        #拿到C1,C2,C3文档源路径
        firstSourcelsit = []
        for item in data['hitDocHistory']:
            firstSourcelsit.append(item.get('page_content'))

        #MDC 这样firstSourcelsit 就不会有重复数据了
        firstSourcelsit = list(set(firstSourcelsit))

        #创建一个新的列表存储c4,c5,c6文档块和c1,c2,c3做对比得到相同的字符串

        # MDC
        # 上面的判断逻辑有些问题，用的是没有处理过的文件名判断
        newList = []
        temp = []
        # for item in data['hitDocHistory']:
        #     newList.append(item.get('pageContent'))
        # for i in range(len(secondSourcelist)):
        #     if secondSourcelist[i] in firstSourcelsit:
        #         newList.append(secondsearchlist[i]['page_content'])
        #         temp.append(secondsearchlist[i])
        for i in range(len(secondSourcelist)):
            newList.append(secondsearchlist[i]['page_content'])


        multi_secondsearchlist = temp


        History = returnHistory(data,newList)
        historyMessage = History['historyMessage']

        second = historyMessage

        hitDocHistoryMessage = History['hitDocHistoryMessage']

        hitDocHistoryMessage = ''
        for i in newList:
            hitDocHistoryMessage += i
            hitDocHistoryMessage += "\n"

        # intention是一个字符串：yes或者no
        # intention = intentionJudge(question, historyMessage, hitDocHistoryMessage)


        # intention = '是'
        # MDC 注意intention的格式
        # if  "yes" in intention['output'] :
        if '不是' in intention:
            print('没进入多轮问答')
            # # 搜索知识库返回文档数据，以列表的形式返回
            # searchlist = search_doc(data)

            # 将搜索出来的文档数据存储到mysql数据库中
            storeData(chat_children_id, secondsearchlist)

            document = "这已知信息:" + '\n' + "\n".join([searchdoc["page_content"] for searchdoc in secondsearchlist])
            # 提取历史信息
            historyList = data['history']
            result = []
            for dictionary in historyList:
                # 根据不同的角色来构建字符串
                if dictionary["role"] == "user":
                    dict_str = f"user: {dictionary['content']}"
                elif dictionary["role"] == "assistant":
                    dict_str = f"AI: {dictionary['content']}"
                result.append(dict_str)
            # 将所有字典的字符串合并成一个最终字符串
            context = "这是历史对话信息：" + '\n' + '\n'.join(result)
            # 不使用llmchain，使用/chat/chat接口进行普通流式对话
            return normalChat(question, context, document, data)
        else:
            # 直接针对上一轮的历史文档和历史对话产生回答，同时将上一轮的hitDocHistoryList存储到数据库当中，再新写一个函数。
            print('进入多轮问答')
            # storeAgainData(data['hitDocHistory'])
            # storeData(chat_children_id, multi_secondsearchlist)
            storeData(chat_children_id, secondsearchlist)
            historyMessage = get_part_historyChat(question, historyMessage)
            # return normalChat(question,historyMessage,hitDocHistoryMessage,data)

            return mutil_round_normalChat(question,historyMessage,hitDocHistoryMessage,data)


        # except:
        #
        #     # # 搜索知识库返回文档数据，以列表的形式返回
        #     # searchlist = search_doc(data)
        #     # 将搜索出来的文档数据存储到mysql数据库中
        #
        #     storeData(chat_children_id, secondsearchlist)
        #     document = "这是历史文档信息:" + '\n' + "\n".join([searchdoc["page_content"] for searchdoc in secondsearchlist])
        #     # 提取历史信息
        #     historyList = data['history']
        #     result = []
        #     for dictionary in historyList:
        #         # 根据不同的角色来构建字符串
        #         if dictionary["role"] == "user":
        #             dict_str = f"user: {dictionary['content']}"
        #         elif dictionary["role"] == "assistant":
        #             dict_str = f"AI: {dictionary['content']}"
        #         result.append(dict_str)
        #     # 将所有字典的字符串合并成一个最终字符串
        #     context = "这是历史对话信息：" + '\n' + '\n'.join(result)
        #     # 不使用llmchain，使用/chat/chat接口进行普通流式对话
        #     return normalChat(question, context, document, data)


    # 生成llm_chain方便使用
    # llm_chain = return_llmchain()
    # 使用llmchain,使用其自带的流式方法
    # return LLMchainchat(llm_chain,question,context)
