"""
Author: xubing
Date: 2024-09-25 13:56:18
LastEditors: xubing
LastEditTime: 2024-11-21 21:55:54
Description: file content
"""

import time
from typing import List, Dict, Any, Generator

from logger_config_db import setup_logger
from myUtils.clients import (BaiChuanClient, ChatResponse, LLMClient, Message,
                          WenXinClientV2)

# 设置日志记录器
logger = setup_logger(__name__)


def create_client(model_name):
    """根据模型名称创建对应的客户端"""
    try:
        if model_name.lower().startswith("ernie"):
            return WenXinClientV2(model_name=model_name)
        elif model_name.lower().startswith("baichuan"):
            return BaiChuanClient(model_name=model_name)
        else:
            return LLMClient(model_name=model_name)
    except Exception as e:
        logger.error(f"Failed to create client for {model_name}: {e}")
        raise


def chat_with_model(model_name, messages, stream: bool = False):
    """与模型进行对话"""
    try:
        client = create_client(model_name)
        if stream:
            # 返回生成器对象，而不是在函数内消费它
            return client.chat_stream(messages)
        else:
            response = client.chat(messages)
            if isinstance(response, ChatResponse):
                if response.status:
                    return response.content
                else:
                    logger.error(f"Chat failed: {response.error_msg}")
                    return None
            else:
                # 处理直接返回字符串的情况
                return response
    except Exception as e:
        logger.error(f"Error in chat_with_model: {e}")
        return None


if __name__ == "__main__":
    messages = [
        {
            "role": "user",
            "content": "You are a helpful assistant."
        },
        {
            "role": "assistant",
            "content": "我是您的小助手，我可以怎么帮您？"
        },
        {
            "role": "user",
            "content": "你好，你是谁开发出来的基座模型"
        },
    ]
    
    keep_model = [
        # "moonshot-v1-vision-preview",
        # "glm-4v-plus-0111",
        # "qvq-72b-preview",
        "qwen-max",
        # "qwen-vl-max",
        # "qwen2-vl-72b",
        # "qwen2-7b-instruct",
        # "baichuan2-7b-chat-v1",
        # "qwen-coder-turbo",
        # "glm-4-flash",
        # "ERNIE-4.0-8K",
        # "baichuan2-13b-chat-v1",
        # "qwen2.5-coder:14b"
        # "glm4",
        # "Qwen2.5-14B-Instruct"
        # "glm4-9b-local",
        # "codegeex4-9b-local"
    ]
    
    stream = True
    for model in keep_model:
        start_time = time.time()
        try:
            if stream:
                print(f"\n{model} 流式输出:")
                # 获取生成器并在这里消费它
                stream_gen = chat_with_model(model_name=model,
                                             messages=messages,
                                             stream=stream)
                if stream_gen:  # 检查是否为None
                    for chunk in stream_gen:
                        print(chunk, end="")
                    print()
                else:
                    print(f"无法获取 {model} 的流式输出")
            else:
                response = chat_with_model(model_name=model,
                                           messages=messages,
                                           stream=stream)
                end_time = time.time()
                print(f"Time taken for {model}:", end_time - start_time,
                      "seconds")
                print(response)
        except Exception as e:
            print(f"处理 {model} 时出错: {e}")
        finally:
            end_time = time.time()
            print(f"总耗时 {model}:", end_time - start_time, "seconds")
            print("-" * 50)

# curl http://localhost:11434/api/chat -d '{
#   "model": "codegeex4:9b-all-fp16",
#   "messages": [
#     { "role": "user", "content": "why is the sky blue?" }
#   ]
# }'
