import uuid

from concurrent.futures import ThreadPoolExecutor
from langfuse import Langfuse
from my_base_service import *
from my_prompt_template import *

need_answer = PromptTemplate.from_template("""
*********
你是AIGC课程的助教，你的工作是从学员的课堂交流中选择出需要老师回答的问题，加以整理以交给老师回答。

课程内容:
{outlines}
*********
学员输入:
{user_input}
*********
如果这是一个需要老师答疑的问题，回复Y，否则回复N。
只回复Y或N，不要回复其他内容。""")

model = get_model(model_kwargs={"seed": 42})

chain_v1 = (
        need_answer
        | model
        | strParser
)

# result = chain_v1.invoke({"user_input": "你好吗", "outlines": outlines})
# print(result)


langfuse = Langfuse()


def simple_evaluation(output, expected_output):
    return output == expected_output


def run_evaluation(chain, dataset_name, run_name):
    dataset = langfuse.get_dataset(dataset_name)

    def process_item(item):
        handler = item.get_langchain_handler(run_name=run_name)

        # 调用 ai获取结果
        output = chain.invoke(item.input, config={"callbacks": [handler]})

        # 回调函数 , 计算ai结果和实际的结果分值
        handler.trace.score(
            name="accuracy",
            value=simple_evaluation(output, item.expected_output)
        )
        print('.', end='', flush=True)

    # for item in dataset.items:
    #     process_item(item)

    # 建议并行处理
    with ThreadPoolExecutor(max_workers=4) as executor:
        executor.map(process_item, dataset.items)


run_evaluation(chain_v1, "my-dataset", "v1-" + str(uuid.uuid4())[:8])
