"""
大模型通信
负责读取 system/user 的提示模板, 发送给 Kimi K2 大模型并返回结果
(生成的playwright 控制指令代码)
"""

import os
import timeit
import requests
from openai import OpenAI
from config.config import load_config

# 导入统一的日志配置模块
from log_config import setup_logging
# 配置日志
setup_logging()
import logging
logger = logging.getLogger(__name__)


def query_kimi_k2(content, question):
    """
    调用Kimi API进行问答
    """
    # 加载环境变量
    config = load_config()
    # 打印环境变量值以调试 (实际生产中应移除)
    moonshot_api_key = config['llm']['kimi_api_key']
    model_name = config['llm']['kimi_model']
    base_url = config['llm']['kimi_base_url']

    logger.info(f"大模型 模型名称: {model_name}")    
    logger.info(f"大模型 base_url: {base_url}")

    # 配置Kimi API客户端
    client = OpenAI(
        api_key = moonshot_api_key,
        base_url = base_url,
    )

    with open("prompt/system.txt", "r") as f:
        system_prompt = f.read()

    with open("prompt/user.txt", "r") as f:
        user_prompt = f.read()    

    user_prompt = user_prompt.replace("{content}", content)
    user_prompt = user_prompt.replace("{question}", question)

    try:
        messages = [
            {   "role": "system", "content": system_prompt,},      
            {   "role": "user", "content": user_prompt,    } 
        ]
        logger.info("发送请求到Kimi API...")
        logger.info(str(messages)[:200] + " ...")
        
        completion = client.chat.completions.create(
            model=model_name,
            messages=messages,
            temperature=0.6,
            max_tokens=128 * 1024 ,
        )
        
        logger.info("成功收到响应")
        code = completion.choices[0].message.content
        return code

    except Exception as e:
        logger.error(f"API请求出错: {type(e).__name__}: {str(e)}")       
        return f"API请求出错: {type(e).__name__}: {str(e)}"


def glm_query(content, question):
    api_key = "bb990dd728b64ce3992852c209051f29.3riXdmRY7R8EDzzq"
    model_id = "glm-4-flash"
    url = f"https://api.bigmodel.cn/v1/models/{model_id}:generate"

    with open("./prompt/system.txt", "r") as f:
        system_prompt = f.read()

    with open("./prompt/user.txt", "r") as f:
        user_prompt = f.read()  

    user_prompt = user_prompt.replace("{content}", content)
    user_prompt = user_prompt.replace("{question}", question)
    #prompt = f"{system_prompt}\n{user_prompt}"

    client = ZhipuAI(api_key=api_key)
    response = client.chat.completions.create(
        model=model_id,
        messages=[
            {"role": "system", "content": system_prompt},
            {"role": "user", "content": user_prompt}
        ],
        max_tokens=128000
    )
    resp = response.choices[0].message.content
    print(f"resp: {resp}")


def doubao_query(content, question):
    api_key = '70c28a91-900c-41b0-a0a1-cd57c0aee266'
    # 请确保您已将 API Key 存储在环境变量 ARK_API_KEY 中
    # 初始化Openai客户端，从环境变量中读取您的API Key
    client = OpenAI(
        # 此为默认路径，您可根据业务所在地域进行配置
        base_url="https://ark.cn-beijing.volces.com/api/v3",
        # 从环境变量中获取您的 API Key
        api_key=api_key,
    )
    with open("./prompt/system.txt", "r") as f:
        system_prompt = f.read()

    with open("./prompt/user.txt", "r") as f:
        user_prompt = f.read()  

    user_prompt = user_prompt.replace("{content}", content)
    user_prompt = user_prompt.replace("{question}", question)    
    try:
        logger.info("发送请求到Doubao API...")
        messages = [
            {"role": "system", "content": system_prompt},
            {"role": "user", "content": user_prompt},
        ]
        logger.info(str(messages)[:200] + " ...")       
        response = client.chat.completions.create(
            # 指定您创建的方舟推理接入点 ID，此处已帮您修改为您的推理接入点 ID
            model = "doubao-1-5-pro-256k-250115",
            messages = messages,
        ) 
       
        code = response.choices[0].message.content
        logger.info(f"成功收到响应: {code}")         
        return code

    except Exception as e:
        logger.error(f"API请求出错: {type(e).__name__}: {str(e)}")
        return f"API请求出错: {type(e).__name__}: {str(e)}"

if __name__ == '__main__':
    content = ""
    question = "中国最早的人类是哪代人？"
    with open("./llm_result.txt", "w") as f:
        # elapsed_time = timeit.timeit(lambda: query_kimi_k2(content, question), number=3)
        # logger.info(f"Kimi K2 模型调用耗时(3次): {elapsed_time:.4f} 秒")
        # f.write(f"Kimi K2 模型调用耗时(3次): {elapsed_time:.4f} 秒\n")

        # elapsed_time = timeit.timeit(lambda: glm_query(content, question), number=3)
        # logger.info(f"GLM-4-Flash 模型调用耗时(3次): {elapsed_time:.4f} 秒")
        # f.write(f"GLM-4-Flash 模型调用耗时(3次): {elapsed_time:.4f} 秒\n")

        elapsed_time = timeit.timeit(lambda: doubao_query(content, question), number=3)
        logger.info(f"Doubao 模型调用耗时(3次): {elapsed_time:.4f} 秒")
        f.write(f"Doubao 模型调用耗时(3次): {elapsed_time:.4f} 秒\n")

        f.flush()

