from typing import List
from server.adaptive_rag.llms import llms
from typing_extensions import TypedDict
from server.knowledge_base_service import knowledge_base_service
from langgraph.graph import END, StateGraph, START


class GraphState(TypedDict):
    """
    Represents the state of our graph.

    Attributes:
        question: question
        generation: LLM generation
        documents: list of documents
    """

    question: str
    generation: str
    documents: List[str]


class AdaptiveRag:

    def __init__(self, hallucination_threshold: float = 0.5, re_answer_threshold: int = 3):
        self.llms = llms
        self.kb_service = knowledge_base_service
        self.hallucination_threshold = hallucination_threshold
        self.re_answer_threshold = re_answer_threshold
        self.re_answer_count = 0

    def create_workflow(self):
        workflow = StateGraph(GraphState)
        # Define the nodes
        workflow.add_node("retrieve", self._retrieve)  # retrieve
        # workflow.add_node("grade_documents", self._grade_documents)  # grade documents
        workflow.add_node("generate", self._generate)  # generate
        workflow.add_node("rewrite_question", self._rewrite_question)  # transform_query

        # Build graph
        workflow.set_entry_point("retrieve")
        workflow.add_edge("retrieve", "generate")
        # workflow.add_edge("retrieve", "grade_documents")
        # workflow.add_conditional_edges(
        #     "grade_documents",
        #     self._decide_to_generate,
        #     {
        #         "rewrite question": "rewrite_question",
        #         "generate answer": "generate",
        #     },
        # )
        workflow.add_edge("rewrite_question", "retrieve")
        workflow.add_conditional_edges(
            "generate",
            self._grade_generation_v_documents_and_question,
            {
                "not supported": "generate",
                "end": END,
                "not useful": "rewrite_question",
            },
        )

        # Compile
        return workflow.compile()

    # Node #
    def _retrieve(self, state):
        """
        Retrieve documents

        Args:
            state (dict): The current graph state

        Returns:
            state (dict): New key added to state, documents, that contains retrieved documents
        """
        print("---RETRIEVE---")
        question = state["question"]

        # Retrieval
        documents = self.kb_service.search_documents(question, collection_name="eye", top_k=5)
        return {"documents": documents, "question": question}

    def _generate(self, state):
        """
        Generate answer

        Args:
            state (dict): The current graph state

        Returns:
            state (dict): New key added to state, generation, that contains LLM generation
        """
        print("---GENERATE---")
        question = state["question"]
        documents = state["documents"]

        # RAG generation
        generation = self.llms.rag_generator.invoke({"context": documents, "question": question})

        self.re_answer_count += 1

        return {"documents": documents, "question": question, "generation": generation}

    def _grade_documents(self, state):
        """
        Determines whether the retrieved documents are relevant to the question.

        Args:
            state (dict): The current graph state

        Returns:
            state (dict): Updates documents key with only filtered relevant documents
        """

        print("---CHECK DOCUMENT RELEVANCE TO QUESTION---")
        question = state["question"]
        documents = state["documents"]

        # Score each doc
        filtered_docs = []
        for d in documents:
            score = self.llms.retrieval_grader.invoke(
                {"question": question, "document": d.page_content}
            )
            grade = score.binary_score
            if grade == "yes":
                print("---GRADE: DOCUMENT RELEVANT---")
                filtered_docs.append(d)
            else:
                print("---GRADE: DOCUMENT NOT RELEVANT---")
                continue
        return {"documents": filtered_docs, "question": question}

    def _rewrite_question(self, state):
        """
            Transform the query to produce a better question.

            Args:
                state (dict): The current graph state

            Returns:
                state (dict): Updates question key with a re-phrased question
            """

        print("---REWRITE QUESTION---")
        question = state["question"]
        documents = state["documents"]

        # Re-write question
        better_question = self.llms.question_rewriter.invoke({"question": question})
        return {"documents": documents, "question": better_question}

    # Edge #
    def _decide_to_generate(self, state):
        """
        Determines whether to generate an answer, or re-generate a question.

        Args:
            state (dict): The current graph state

        Returns:
            str: Binary decision for next node to call
        """

        print("---ASSESS GRADED DOCUMENTS---")
        filtered_documents = state["documents"]

        if not filtered_documents:
            # All documents have been filtered check_relevance
            # We will re-generate a new query
            print(
                "---DECISION: ALL DOCUMENTS ARE NOT RELEVANT TO QUESTION, TRANSFORM QUERY---"
            )
            return "rewrite question"
        else:
            # We have relevant documents, so generate answer
            print("---DECISION: GENERATE---")
            return "generate answer"

    def _grade_generation_v_documents_and_question(self, state):
        """
        Determines whether the generation is grounded in the document and answers question.

        Args:
            state (dict): The current graph state

        Returns:
            str: Decision for next node to call
        """

        print("---CHECK HALLUCINATIONS---")
        question = state["question"]
        documents = state["documents"]
        generation = state["generation"]

        score = self.llms.hallucination_grader.invoke(
            {"documents": documents, "generation": generation}
        )
        grade = score.binary_score

        # Check hallucination
        if grade/10 >= self.hallucination_threshold:
            print("---DECISION: GENERATION IS GROUNDED IN DOCUMENTS---")
            # Check question-answering
            print("---GRADE GENERATION vs QUESTION---")
            score = self.llms.answer_grader.invoke({"question": question, "generation": generation})
            grade = score.binary_score
            if grade == "yes":
                print("---DECISION: GENERATION ADDRESSES QUESTION---")
                return "end"
            elif self.re_answer_count >= self.re_answer_threshold:
                print("---KNOWLEDGE BASE CANNOT ANSWER QUESTION---")
                self.re_answer_count = 0
                state["generation"] = ("sorry, this tool cannot find relevant knowledge about this question, please "
                                       "try other tools.")
                return "end"
            print("---DECISION: GENERATION DOES NOT ADDRESS QUESTION---")
            return "not useful"
        else:
            print("---DECISION: GENERATION IS NOT GROUNDED IN DOCUMENTS, RE-TRY---")
            return "not supported"

