from deepeval.models import DeepEvalBaseLLM
from deepeval.test_case import LLMTestCase, LLMTestCaseParams
from deepeval.metrics import AnswerRelevancyMetric, GEval
from langchain_core.prompts import ChatPromptTemplate
from deepeval.test_case import LLMTestCase

import re,json,os
from ApiModels import VectInfo
from ApiBase import apiBase

class ApiRft(DeepEvalBaseLLM):
    def __init__(self, model_name = None, *args, **kwargs):
        self.llm=kwargs['llm']
        self.vector=kwargs['vector']
        super().__init__(model_name, *args, **kwargs)
        
        
    # Load the model
    def set(self,llm,vector):
        self.llm=llm
        self.vector=vector
        
    def load_model(self, *args, **kwargs):
        return self.llm
    # Generate responses using the provided user prompt
    def generate(self, sys_pt,prompt: str) -> str:
        if sys_pt == None:
            sys_pt=self.get_system_prompt()
        prompt_template = ChatPromptTemplate.from_messages([
            ("system", sys_pt),
            ("user", "{input}")
        ])
        chain =prompt_template | self.llm
        resp = chain.invoke(input=prompt)
        return resp.content

    # Async version of the generate method
    async def a_generate(self, prompt: str) -> str:
        return self.generate(self.get_system_prompt(),prompt)

    # Retrieve the model name
    def get_model_name(self) -> str:
        return self.model_name
    def get_system_prompt(self) -> str:
        return (
            "You are FinBot, a financial advisor bot. Your task is to provide investment advice and financial planning "
            "recommendations based on the user's financial data. Always prioritize user privacy."
        )    
    # qa是否关联
    def relevancy(self,vcts:list[VectInfo],input,expected_output,output):
        jsndocs=self.vector.clts_query(vcts, input,"rft-doc")
        docs=[]
        if len(jsndocs) >0:
            for doc in jsndocs:
                docs.append(doc['txt'])
        test_case = LLMTestCase(input=input, actual_output=output, expected_output=expected_output,
                retrieval_context=docs)
        relevancy_metric = AnswerRelevancyMetric(model=self, threshold=0.5)
        relevancy_metric.measure(test_case)
        ret={}
        ret["score"]=relevancy_metric.score
        ret["reason"]=relevancy_metric.reason
        #print(relevancy_metric.score, relevancy_metric.reason)
        ret=json.dumps(ret)
        return ret
    
    #  判断条件
    def geval(self,vcts:list[VectInfo],criteria,input,expected_output,output):
        jsndocs=self.vector.clts_query(vcts, input,"rft-doc")
        docs=[]
        if len(jsndocs) >0:
            for doc in jsndocs:
                docs.append(doc['txt'])
        test_case = LLMTestCase(input=input, actual_output=output, expected_output=expected_output,
                retrieval_context=docs)
        correctness_metric = GEval(
            model=self,
            name="Correctness",
            #criteria="Correctness - determine if the actual output is correct according to the expected output.",
            criteria=criteria,
            evaluation_params=[LLMTestCaseParams.ACTUAL_OUTPUT, LLMTestCaseParams.EXPECTED_OUTPUT],
            strict_mode=True
        )
        correctness_metric.measure(test_case)
        #print(correctness_metric.score, correctness_metric.reason)
        ret={}
        ret["score"]=correctness_metric.score
        ret["reason"]=correctness_metric.reason
        #print(relevancy_metric.score, relevancy_metric.reason)
        ret=json.dumps(ret)
        return ret
