# -*- coding: utf-8 -*-

# -------------------------------------------------------------------
# @Time    : 2024/5/21 10:44
# @Author  : xushuai
# @File    : conftest.py
# @Description: pytest测试的conftest.py文件
# -------------------------------------------------------------------
import pytest
import sys
import os

sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'utils')))
from py._xmlgen import html
from time import strftime
from src.common.config_loader import ConfigLoader
from src.model.llm_model import LlmModel


# 设置pytest-html报告标题
def pytest_html_report_title(report):
    report.title = "LLM API自动化测试报告"


# 设置pytest-html报告的summary部分内容
@pytest.hookimpl(optionalhook=True)
def pytest_html_results_summary(prefix, summary, postfix):
    prefix.extend([html.p("测试人: xushuai")])


# 设置pytest-html报告的Environment部分内容
def pytest_configure(config):
    metadata = config.pluginmanager.getplugin("metadata")
    if metadata:
        from pytest_metadata.plugin import metadata_key
        # 环境信息增加开始时间
        config.stash[metadata_key]['开始时间'] = strftime('%Y-%m-%d %H:%M:%S')
        # 环境信息添加测试环境地址
        config.stash[metadata_key]['测试环境地址'] = 'https://10.210.18.41/'


# 设置pytest-html报告的Environment部分内容，移除部分选项
@pytest.hookimpl(optionalhook=True)
def pytest_metadata(metadata):
    metadata.pop("Packages")
    metadata.pop("Plugins")


@pytest.fixture(scope='session')
def configs():
    config_file = 'config/config.yaml'
    return ConfigLoader(config_file)


@pytest.fixture(scope='session')
def get_llmchat_url(configs):
    ip = configs.get('llm_service.ip')
    chat_port = configs.get('llm_service.chat_port')
    url = configs.get('llm_service.url')
    return f'http://{ip}:{chat_port}{url}'


@pytest.fixture(scope='session')
def get_chat_body(configs):
    model = configs.get('llm_service.model_name')
    chat_content = configs.get('llm_service.chat_content')
    stream = configs.get('llm_service.stream')
    temprature = configs.get('llm_service.temprature')
    body = {
        "model": model,
        "messages": [
            {
                "role": "user",
                "content": chat_content
            }
        ],
        "stream": stream,
        "temperature": temprature
    }
    return body


@pytest.fixture(scope='session')
def get_llmprompts_url(configs):
    ip = configs.get('llm_service.ip')
    prompts_port = configs.get('llm_service.prompts_port')
    url = configs.get('llm_service.url')
    return f'http://{ip}:{prompts_port}{url}'


@pytest.fixture(scope='session')
def get_prompts_body(configs):
    model = configs.get('llm_service.model_name')
    template_id = configs.get('llm_service.template_id')
    context_key = configs.get('llm_service.context_key')
    context_value = configs.get('llm_service.context_value')
    stream = configs.get('llm_service.stream')
    temprature = configs.get('llm_service.temprature')
    body = {
        "model": model,
        "messages": [
            {
                "role": "user",
                "template_id": template_id,
                "context": {
                    context_key: context_value
                }
            }
        ],
        "stream": stream,
        "temperature": temprature
    }
    return body


@pytest.fixture(scope='session')
def get_knowledge_base_url(configs):
    ip = configs.get('llm_service.ip')
    knowledge_port = configs.get('llm_service.knowledge_port')
    return f'http://{ip}:{knowledge_port}'


@pytest.fixture(scope='session')
def get_knowledge_chat_body(configs):
    model = configs.get('llm_service.model_name')
    stream = configs.get('llm_service.stream')
    temprature = configs.get('llm_service.temprature')
    body = {
        "query": "你好",
        "knowledge_base_name": "党建工作报告",
        "top_k": 3,
        "score_threshold": 1,
        "history": [
            {
                "role": "user",
                "content": "我是国铁集团党委书记，帮我写一份党建工作年度总结报告"
            },
            {
                "role": "assistant",
                "content": "虎头虎脑"
            }
        ],
        "stream": stream,
        "model_name": model,
        "temperature": temprature,
        "max_tokens": 0,
        "prompt_name": "default"
    }
    return body


# 大模型一体机相关API测试
@pytest.fixture(scope='session')
def llm_model(configs):
    base_url = configs.get('api.base_url')
    token = configs.get('api.token')
    timeout = configs.get('api.timeout')
    return LlmModel(base_url, token, timeout)
