#!/usr/bin/env python
# -*- coding:utf-8 -*-
import json
import os

from flask import current_app, Response, stream_with_context
from langchain.chains.combine_documents.map_reduce import MapReduceDocumentsChain
from langchain.chains.combine_documents.reduce import ReduceDocumentsChain
from langchain.chains.combine_documents.stuff import StuffDocumentsChain
from langchain.chains.conversation.base import ConversationChain
from langchain.chains.llm import LLMChain
from langchain.memory import ConversationBufferMemory
from langchain_community.document_loaders import PyPDFLoader, UnstructuredPowerPointLoader, TextLoader, CSVLoader, \
    JSONLoader, UnstructuredMarkdownLoader, UnstructuredWordDocumentLoader, UnstructuredExcelLoader
from langchain_community.vectorstores import Chroma
from langchain_core.prompts import PromptTemplate, ChatPromptTemplate, MessagesPlaceholder

from controller.documentController import DocumentController
from controller.pptBaseController import PptBaseController
from controller.pptChatController import PptChatController
from llm.embedding import LLMFactory
from models import db
from models.ppt_base import PptBase
from models.ppt_chat import PptChat
from service.llmService import LLMService
from service.pptChatService import PptChatService
from utils.document_split import DocumentSplitter
from utils.fetch_json import fetch_json
from utils.generate_id import GenerateID
from utils.loggings import loggings
from utils.response_code import RET


class PptChatDocumentService(PptChatController):
    basic_dir = os.path.dirname(os.path.dirname(__file__))
    upload_dir = os.path.join(basic_dir, 'uploads', 'documents')
    vector_dir = os.path.join(basic_dir, 'db', 'vectors')

    map_template = """以下是文档列表：
    ###
    {docs}
    ###
    根据文档列表请给出文档的大纲，最终需要提炼出包含了标题，每个章节，子标题的大纲结构，生成一个PPT的基本框架结构。结构示例如下：
    {{
        "title": "PPT标题",
        "subtitle": [
        {{
            "title": "章节1",
            "subtitle": [{{
                "title": "子标题1",
                "subtitle": [],
                "content": "子标题1下主要描述的内容"
            }}, {{
                "title": "子标题2",
                "subtitle": [],
                "content": "子标题2下主要描述的内容"
            }}],
            "content": "章节1下主要描述的内容"
        }}],
        "content": "本PPT的主题内容"
    }}
    请输出一个json格式的结果，结构包裹在```json```标签中。如果有其他的说明性文字需要放在标签之外。适当给一些说明文字。
    """

    reduce_template = """以下是一个文档的部分内容:
    {docs}
    将这些内容整合，提炼成文档主题的综合摘要，概括性地描述文档的主题，核心内容。"""

    basic_prompt = """你是一个PPT设计师，现在用户想要获取一个PPT的基本框架结构，包含了标题，每个章节，子标题的大纲结构，用户将使用这目录结构来组织自己的PPT。现在用户提供了自己的文档，以下是对文档的概况内容：
    {abstract}
    请根据用户的需求，生成一个PPT的基本框架结构。结构示例如下：
    {{{{
        "title": "PPT标题",
        "subtitle": [
        {{{{
            "title": "章节1",
            "subtitle": [{{{{
                "title": "子标题1",
                "subtitle": [],
                "content": "子标题1下主要描述的内容"
            }}}}, {{{{
                "title": "子标题2",
                "subtitle": [],
                "content": "子标题2下主要描述的内容"
            }}}}],
            "content": "章节1下主要描述的内容"
        }}}}],
        "content": "本PPT的主题内容"
    }}}}
    请输出一个json格式的结果，结构包裹在```json```标签中。如果有其他的说明性文字需要放在标签之外。适当给一些说明文字。
    """
    title_prompt = "请为以下内容起一个标题来概括这段对话，标题要比较简洁明了。以下输入都是在创建一个PPT框架的背景下的。最终直接输出这个标题即可。"

    modify_prompt = """你是一个PPT设计师，现在用户想要修改一个PPT的基本框架结构，包含了标题，每个章节，子标题的大纲结构，用户将使用这目录结构来组织自己的PPT。用户提出了一些修改的意见。
    
    用户文档中与修改意见相关的内容如下：
    ###
    {related}
    ###
    
    请根据用户的需求，生成一个PPT的基本框架结构。结构示例如下：
    {{{{
        "title": "PPT标题",
        "subtitle": [
        {{{{
            "title": "章节1",
            "subtitle": [{{{{
                "title": "子标题1",
                "subtitle": [],
                "content": "子标题1下主要描述的内容"
            }}}}, {{{{
                "title": "子标题2",
                "subtitle": [],
                "content": "子标题2下主要描述的内容"
            }}}}],
            "content": "章节1下主要描述的内容"
        }}}}],
        "content": "本PPT的主题内容"
    }}}}
    请输出一个json格式的结果，结构包裹在```json```标签中。如果有其他的说明性文字需要放在标签之外。适当给一些说明文字。
    """

    @classmethod
    def split_document(cls, doc_location):
        # Load Document
        suffix = doc_location.split('.')[-1]
        db_save_path = os.path.join(cls.basic_dir, doc_location)

        # Document Split
        if suffix in ['doc', 'docx']:
            loader = UnstructuredWordDocumentLoader(db_save_path)
            document = loader.load()
            split = DocumentSplitter.load_docx_splitter(document)
        elif suffix in ['ppt', 'pptx']:
            loader = UnstructuredPowerPointLoader(db_save_path)
            document = loader.load()
            split = DocumentSplitter.load_ppt_splitter(document)
        elif suffix == 'pdf':
            loader = PyPDFLoader(db_save_path, extract_images=True)
            document = loader.load()
            split = DocumentSplitter.load_pdf_splitter(document)
        elif suffix == 'text':
            loader = TextLoader(db_save_path)
            document = loader.load()
            split = DocumentSplitter.load_txt_splitter(document)
        elif suffix == 'md':
            loader = UnstructuredMarkdownLoader(db_save_path)
            document = loader.load()
            split = DocumentSplitter.load_md_splitter(document)
        elif suffix == 'csv':
            loader = CSVLoader(db_save_path)
            document = loader.load()
            split = DocumentSplitter.load_csv_splitter(document)
        elif suffix in ['xls', 'xlsx']:
            loader = UnstructuredExcelLoader(db_save_path)
            document = loader.load()
            split = DocumentSplitter.load_csv_splitter(document)
        else:
            return None, None

        return split, db_save_path

    @classmethod
    def get_whole_file_outline(cls, document):
        try:
            model = LLMService.get_model()
        except Exception as e:
            return {'code': RET.DATAERR, 'message': str(e), 'data': str(e)}

        map_prompt = PromptTemplate.from_template(cls.map_template)
        map_chain = LLMChain(llm=model, prompt=map_prompt)

        reduce_prompt = PromptTemplate.from_template(cls.reduce_template)

        reduce_chain = LLMChain(llm=model, prompt=reduce_prompt)

        combine_documents_chain = StuffDocumentsChain(
            llm_chain=reduce_chain, document_variable_name="docs"
        )

        reduce_documents_chain = ReduceDocumentsChain(
            combine_documents_chain=combine_documents_chain,
            collapse_documents_chain=combine_documents_chain,
            token_max=4000,
        )

        map_reduce_chain = MapReduceDocumentsChain(
            llm_chain=map_chain,
            reduce_documents_chain=reduce_documents_chain,
            document_variable_name="docs",
            return_intermediate_steps=False,
        )

        result = map_reduce_chain.invoke(document)
        return result['output_text']

    @classmethod
    def initialize_rag_retriever(cls, split, vector_save_path):
        # Embedding model
        embedding_model = LLMFactory.get_embedding(current_app.config['EMBEDDING_MODEL'],
                                                   current_app.config['EMBEDDING_API_KEY'])

        vector_db = Chroma.from_documents(documents=split,
                                          embedding=embedding_model,
                                          persist_directory=vector_save_path)

        vector_db.persist()

        return vector_save_path

    @classmethod
    def chat_ppt_framework_with_document(cls, **kwargs):
        from flask import g
        user_id = g.user['user_id']
        content = kwargs.get('content')
        files = kwargs.get('files')

        try:
            # file is same in history
            is_same = True

            if files is not None:
                files = [f['doc_id'] for f in files]

            res_ppt = PptBaseController.get(ppt_id=kwargs.get('ppt_id'))
            if res_ppt['code'] != RET.OK:
                return Response(f"event: error\ndata: {json.dumps({'error': 'PPT not found'})}\n\n",
                                mimetype="text/event-stream",
                                headers={})

            res_ppt = res_ppt['data'][0]
            file_info = res_ppt['doc_info']
            if file_info is None:
                file_list = []
            else:
                file_list = json.loads(file_info)

            for file in files:
                if file not in file_list:
                    is_same = False
                    break
        except Exception as e:
            return Response({'code': RET.DATAERR, 'message': str(e), 'data': str(e)}, mimetype="text/event-stream",
                            headers={})

        try:
            model = LLMService.get_model()
        except Exception as e:
            return Response({'code': RET.DATAERR, 'message': str(e), 'data': str(e)}, mimetype="text/event-stream",
                            headers={})

        chat_history = PptChatService.get_chat_history(ppt_id=kwargs.get('ppt_id'))

        if not is_same:
            documents = []
            for doc_id in files:
                doc_info = DocumentController.get(doc_id=doc_id)
                if doc_info['code'] != RET.OK or doc_info['totalCount'] != 1:
                    continue
                doc_info = doc_info['data'][0]
                split, document_save_path = cls.split_document(doc_info['doc_location'])
                if split is None:
                    return Response({'code': RET.DATAERR, 'message': 'Unsupported file format', 'data': {}},
                                    mimetype="text/event-stream", headers={})
                documents.extend(split)

            # First chat
            if len(chat_history['data']) == 0:
                vector_db_path = os.path.join('db', 'vectors', f'{user_id}', f'{GenerateID.create_random_id()}')
                vector_save_path = os.path.join(cls.basic_dir, vector_db_path)

                import threading
                threading.Thread(target=cls.initialize_rag_retriever, args=(documents, vector_save_path)).start()

                # cls.initialize_rag_retriever(documents, vector_db_path)

                abstract = cls.get_whole_file_outline(documents)

                prompt = cls.get_prompt(cls.basic_prompt.format(abstract=abstract), "{user_input}")
                prompt = ChatPromptTemplate.from_messages(prompt)
                chain = prompt | model

                response = chain.stream({"user_input": content})

            # update vector db
            else:
                vector_db_path = res_ppt['vector_path']
                if vector_db_path is None:
                    vector_db_path = os.path.join('db', 'vectors', f'{user_id}', f'{GenerateID.create_random_id()}')

                vector_save_path = os.path.join(cls.basic_dir, vector_db_path)
                cls.initialize_rag_retriever(documents, vector_save_path)

                related = cls.search_documents(vector_save_path, content, limit=2)
                related = "\n".join([r.page_content for r in related])
                history = [('system', cls.basic_prompt)]
                for chat in chat_history['data']:
                    history.append(('user', chat['content']))
                    history.append(('system', chat['response']))

                prompt = ChatPromptTemplate.from_messages(
                    [
                        MessagesPlaceholder(variable_name="history"),
                        ("system", cls.modify_prompt.format(related=related)),
                        ("user", "用户输入：{user_input}"),
                    ]
                )
                chain = prompt | model
                response = chain.stream({"user_input": content, "history": history})

        else:
            vector_db_path = res_ppt['vector_path']
            if vector_db_path is None:
                vector_db_path = os.path.join('db', 'vectors', f'{user_id}', f'{GenerateID.create_random_id()}')

            vector_save_path = os.path.join(cls.basic_dir, vector_db_path)

            related = cls.search_documents(vector_save_path, content, limit=2)
            related = "\n".join([r.page_content for r in related])
            history = [('system', cls.basic_prompt)]
            for chat in chat_history['data']:
                history.append(('user', chat['content']))
                history.append(('system', chat['response']))

            prompt = ChatPromptTemplate.from_messages(
                [
                    MessagesPlaceholder(variable_name="history"),
                    ("system", cls.modify_prompt.format(related=related)),
                    ("user", "用户输入：{user_input}"),
                ]
            )
            chain = prompt | model
            response = chain.stream({"user_input": content, "history": history})

        total_response = ''

        def generate():
            nonlocal total_response

            for trunk in response:
                c = trunk.content
                total_response += c

                yield f"event: message\ndata: {json.dumps({'content': c})}\n\n"

            from manage import app
            with app.app_context():

                try:
                    # 加入数据库
                    from utils.generate_id import GenerateID

                    ppt_id = kwargs.get('ppt_id', None)
                    if ppt_id is None:
                        ppt_id = GenerateID.create_random_id()
                        chat_root_id = GenerateID.create_random_id()
                        chat_id = GenerateID.create_random_id()

                        title = PptChatService.get_title(content, model)
                        # 去除标题首位双引号
                        if title.startswith('"') and title.endswith('"'):
                            title = title[1:-1]

                        db.session.begin_nested()

                        ppt_base_model = PptBase(
                            ppt_id=ppt_id,
                            chat_root_id=chat_root_id,
                            user_id=user_id,
                            title=title,
                            outline=fetch_json(total_response),
                            doc_info=json.dumps(files),
                            abstract=abstract,
                            vector_path=vector_db_path,
                            status=0,
                            gen_type=1,
                        )
                        db.session.add(ppt_base_model)

                        ppt_chat_model = PptChat(
                            chat_parent_id=chat_root_id,
                            chat_id=chat_id,
                            content=kwargs.get('content'),
                            response=total_response,
                            # doc_location=kwargs.get('doc_location'),
                        )
                        db.session.add(ppt_chat_model)
                        db.session.commit()
                        yield f"event: done\ndata: {json.dumps({'ppt_id': ppt_id, 'title': title, 'chat_id': chat_id})}\n\n"
                    else:
                        ppt_base_model = db.session.query(PptBase).filter(PptBase.ppt_id == ppt_id)
                        ppt_base_info = ppt_base_model.first()

                        if ppt_base_info.title is None:
                            title = PptChatService.get_title(content, model)
                            # 去除标题首位双引号
                            if title.startswith('"') and title.endswith('"'):
                                title = title[1:-1]
                            ppt_base_model.update({'title': title})

                        if ppt_base_info.chat_root_id is None:
                            chat_root_id = GenerateID.create_random_id()
                            ppt_base_model.update({'chat_root_id': chat_root_id})

                        if ppt_base_info.doc_info is None or is_same is False:
                            ppt_base_model.update({'doc_info': json.dumps(files)})

                        if ppt_base_info.vector_path is None:
                            ppt_base_model.update({'vector_path': vector_db_path})

                        ppt_base_model.update({'outline': fetch_json(total_response), 'status': 0, 'gen_type': 1})
                        chat_root_id = ppt_base_info.chat_root_id
                        chat_id = GenerateID.create_random_id()
                        ppt_chat_model = PptChat(
                            chat_parent_id=chat_root_id,
                            chat_id=chat_id,
                            content=kwargs.get('content'),
                            response=total_response,
                            # doc_location=kwargs.get('doc_location'),
                        )
                        db.session.add(ppt_chat_model)
                        db.session.commit()
                    yield f"event: done\ndata: {json.dumps({'ppt_id': ppt_id, 'chat_id': chat_id})}\n\n"
                except Exception as e:
                    db.session.rollback()
                    loggings.exception(1, e)
                    yield f"event: error\ndata: {json.dumps({'error': str(e)})}\n\n"

        headers = {
            'Content-Type': 'text/event-stream',
            'Cache-Control': 'no-cache',
            'Connection': 'keep-alive',
            'X-Accel-Buffering': 'no',
        }

        return Response(stream_with_context(generate()), mimetype="text/event-stream", headers=headers)

    @classmethod
    def search_documents(cls, path, query, limit=2):
        embedding_model = LLMFactory.get_embedding(current_app.config['EMBEDDING_MODEL'],
                                                   current_app.config['EMBEDDING_API_KEY'])

        vectordb = Chroma(persist_directory=path, embedding_function=embedding_model)
        docs = vectordb.similarity_search(query, k=limit)
        return docs

    @classmethod
    def get_prompt(cls, prompt, user_message: str) -> list:
        return [("system", prompt), ("user", user_message)]
