from pydantic import BaseModel, Field
from server.common import chat_model
from langchain import hub
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate


def create_retrieval_grader():
    """This function will create a retrieval grader, which accepts a document and a question, and returns a binary
    score (yes or no) for relevance."""
    llm = chat_model.qwen
    structured_llm_grader = llm.with_structured_output(GradeDocuments)
    system = """You are a grader assessing relevance of a retrieved document to a user question. \n 
                If the document contains keyword(s) or semantic meaning related to the user question, grade it as relevant. \n
                It does not need to be a stringent test. The goal is to filter out erroneous retrievals. \n
                Give a binary score 'yes' or 'no' score to indicate whether the document is relevant to the question."""
    grade_prompt = ChatPromptTemplate.from_messages(
        [
            ("system", system),
            ("human", "Retrieved document: \n\n {document} \n\n User question: {question}"),
        ]
    )
    return grade_prompt | structured_llm_grader


def create_rag_generator():
    """This function will create a RAG generator. This generator accepts a retrieved document list and user's
    question. Then returns an answer which generated based on the document list."""
    llm = chat_model.qwen
    prompt = hub.pull("rlm/rag-prompt")
    return prompt | llm | StrOutputParser()


def create_hallucination_grader():
    """This function will create a hallucination grader, which accepts a set of retrieved documents and LLM
    generation, and returns a binary score (yes or no)."""
    llm = chat_model.qwen
    structured_llm_grader = llm.with_structured_output(GradeHallucinations)
    system = """You are a grader assessing whether an LLM generation is grounded in / supported by a set of retrieved 
    facts. \n Choose a score from 1 to 10.  Higher scores indicate that the answer is more grounded in / supported by 
    the set of facts."""
    hallucination_prompt = ChatPromptTemplate.from_messages(
        [
            ("system", system),
            ("human", "Set of facts: \n\n {documents} \n\n LLM generation: {generation}"),
        ]
    )
    return hallucination_prompt | structured_llm_grader


def create_answer_grader():
    """This function will create an answer grader, which accepts user's question and LLM generation, and returns a
    binary score (yes or no)."""
    llm = chat_model.qwen
    structured_llm_grader = llm.with_structured_output(GradeAnswer)
    system = """You are a grader assessing whether an answer addresses / resolves a question \n 
         Give a binary score 'yes' or 'no'. Yes' means that the answer resolves the question."""
    answer_prompt = ChatPromptTemplate.from_messages(
        [
            ("system", system),
            ("human", "User question: \n\n {question} \n\n LLM generation: {generation}"),
        ]
    )
    return answer_prompt | structured_llm_grader


def create_question_rewriter():
    """This function will create a question rewriter, which accepts user's question and returns a better version of
    it."""
    llm = chat_model.qwen
    system = """You a question re-writer that converts an input question to a better version that is optimized \n 
         for vectorstore retrieval. Look at the input and try to reason about the underlying semantic intent / meaning."""
    re_write_prompt = ChatPromptTemplate.from_messages(
        [
            ("system", system),
            (
                "human",
                "Here is the initial question: \n\n {question} \n Formulate an improved question.",
            ),
        ]
    )
    return re_write_prompt | llm | StrOutputParser()


class LLMS:
    """provides LLM for adaptive rag"""

    def __init__(self):
        self.retrieval_grader = create_retrieval_grader()
        self.rag_generator = create_rag_generator()
        self.hallucination_grader = create_hallucination_grader()
        self.answer_grader = create_answer_grader()
        self.question_rewriter = create_question_rewriter()


class GradeDocuments(BaseModel):
    """Binary score for relevance check on retrieved documents."""

    binary_score: str = Field(
        description="Documents are relevant to the question, 'yes' or 'no'"
    )


class GradeHallucinations(BaseModel):
    """Score from 1 to 10 for hallucination present in generation answer."""

    binary_score: int = Field(
        description="Answer is grounded in the facts, from 1 to 10"
    )


class GradeAnswer(BaseModel):
    """Binary score to assess answer addresses question."""

    binary_score: str = Field(
        description="Answer addresses the question, 'yes' or 'no'"
    )


llms = LLMS()

if __name__ == '__main__':
    llms = LLMS()
    score = llms.retrieval_grader.invoke(
        {"question": "使用电脑对眼睛的危害有哪些", "document": "电脑可能导致的眼睛不适有哪些？\n眼睛疲劳\n"
                                                               "如果室内的灯光或窗外的光线造成荧光幕反光时，便需要格外费神用力才能够看得清楚荧光幕上的字，在过度使用眼 \ 部肌肉的情况下，眼睛会很容易感觉疲劳。长时间集中精神对着电脑工作，眼部肌肉由于过度维持在同一个位置，亦会令眼睛疲倦。"})
    print(score.binary_score)
