import os
import uuid
from dataclasses import dataclass
from operator import itemgetter
from typing import Any, Dict

from flask import request
from injector import inject
from langchain_core.memory import BaseMemory
from langchain_core.tracers import Run

from pkg import validate_error, FailedException, success, success_message

from schema.app_schema import ChatForm
from service import WeaviateService

from service.app_service import AppService

from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_openai import ChatOpenAI
from langchain_core.output_parsers import StrOutputParser
from langchain.memory import ConversationBufferWindowMemory
from langchain_community.chat_message_histories.file import FileChatMessageHistory
from langchain_core.runnables import RunnablePassthrough, RunnableLambda, RunnableConfig

llm = ChatOpenAI(model="gpt-3.5-turbo-16k")


@inject
@dataclass
class AppHandler:
    app_service: AppService

    weaviate_service: WeaviateService

    def hello_world(self):
        return '<h1>Hello World</h1>'
        raise FailedException("出错了")

    def chat(self):
        try:
            word = request.json.get("word")
            req = ChatForm()
            if not req.validate():
                return validate_error(req.errors)
            prompt = ChatPromptTemplate.from_messages([
                ("system", "你是人工智能机器人，根据用户的输入进行回复<context>{context}</context>"),
                MessagesPlaceholder("history"),
                ("human", "{word}")
            ])
            memory = ConversationBufferWindowMemory(
                k=3,
                input_key="word",
                output_key="output",
                return_messages=True,
                chat_memory=FileChatMessageHistory("./storage/memory/chat_history.json")
            )

            retriever = self.weaviate_service.get_retriever() | self.weaviate_service.combine_documents
            context = itemgetter('word') | retriever
            chain = (RunnablePassthrough.assign(
                history=RunnableLambda(self._load_memory_variables) | itemgetter("history"),
                context=context
            )
                     | prompt | llm | StrOutputParser()).with_listeners(
                on_end=self._save_context)
            input = {
                "word": word
            }
            content = chain.invoke(input, config={
                "configurable": {
                    "memory": memory
                }
            })
            print(content)
            return success(content)
        except FailedException as e:
            print(e)
            return e

    def create_app(self):
        # return ''
        app = self.app_service.create_app()
        return success(app)

    def get_app(self, id: uuid.UUID):
        app = self.app_service.get_app(id)
        return success_message(app.name)

    @classmethod
    def _load_memory_variables(cls, input: Dict[str, Any], config: RunnableConfig) -> Dict[str, Any]:
        configurable = config.get("configurable")
        memory = configurable.get("memory", None)
        if memory is not None and isinstance(memory, BaseMemory):
            return memory.load_memory_variables(input)

        return {
            "history": [],
        }

    def _save_context(self, run_obj: Run, config: RunnableConfig) -> None:
        configurable = config.get("configurable")
        memory = configurable.get("memory", None)
        if memory is not None and isinstance(memory, BaseMemory):
            memory.save_context(run_obj.inputs, run_obj.outputs)
