"""
配置文件
包含基本URL、默认模型名称等配置
"""
import os
from dotenv import load_dotenv
from openai import OpenAI
import logging

# 加载环境变量
load_dotenv(dotenv_path=".env")

# API密钥
GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY")
if not GOOGLE_API_KEY:
    raise ValueError("环境变量中未找到GOOGLE_API_KEY，请在secret_files/.env文件中设置此变量")

# 基本URL配置
FASTAPI_BASE_URL = "/api/googleOpenaiCompat"
GEMINI_BASE_URL = "https://generativelanguage.googleapis.com/v1beta"
GEMMA_BASE_URL = GEMINI_BASE_URL#"https://generativelanguage.googleapis.com/v1"

# 创建默认的Gemini客户端
client_gemini = OpenAI(
    api_key=GOOGLE_API_KEY,
    base_url=GEMINI_BASE_URL
)

# OpenAI兼容模型字典，用于映射模型到其对应的API基本URL
openai_compat = {
    GEMINI_BASE_URL: [
        "gemini-2.0-flash-exp",
        "gemini-1.0-pro-vision-latest",
        "gemini-pro-vision",
        "gemini-1.5-pro-001",
        "gemini-1.5-pro-002",
        "gemini-1.5-pro",
        "gemini-1.5-flash-001",
        "gemini-1.5-flash-001-tuning",
        "gemini-1.5-flash",
        "gemini-1.5-flash-002",
        "gemini-1.5-flash-8b",
        "gemini-1.5-flash-8b-001",
        "gemini-2.0-flash",
        "gemini-2.0-flash-001",
        "gemini-2.0-flash-lite-001",
        "gemini-2.0-flash-lite",
    ],
    GEMMA_BASE_URL: [
        "gemma-2b",
        "gemma-2b-it",
        "gemma-3b",
        "gemma-3b-it",
        "gemma-7b",
        "gemma-7b-it",
        "gemma-3-27b",
        "gemma-3-27b-it"
    ]
}

# 默认模型配置
DEFAULT_MODEL = "gemma-3-27b-it"  # 默认使用的模型
DEFAULT_TEMPERATURE = 0.7  # 默认温度参数

# 当前选择的默认模型
select_model = "gemma-3-27b-it"

# 所有Google模型名称列表
google_model = [
    # Gemini模型
    "gemini-2.0-flash-exp",
    "gemini-1.0-pro-vision-latest",
    "gemini-pro-vision",
    "gemini-1.5-pro-001",
    "gemini-1.5-pro-002",
    "gemini-1.5-pro",
    "gemini-1.5-flash-001",
    "gemini-1.5-flash-001-tuning",
    "gemini-1.5-flash",
    "gemini-1.5-flash-002",
    "gemini-1.5-flash-8b",
    "gemini-1.5-flash-8b-001",
    "gemini-2.0-flash",
    "gemini-2.0-flash-001",
    "gemini-2.0-flash-lite-001",
    "gemini-2.0-flash-lite",
    "embedding-001",
    "text-embedding-004",
    
    # Gemma模型
    "gemma-2b",
    "gemma-2b-it",
    "gemma-3b",
    "gemma-3b-it",
    "gemma-7b",
    "gemma-7b-it",
    "gemma-3-27b",
    "gemma-3-27b-it"
]

# 获取所有模型列表
def get_all_models():
    """返回所有Google AI模型列表"""
    return google_model

# 根据类型获取模型列表
def get_models_by_type(model_type):
    """
    根据类型返回模型列表
    
    参数:
        model_type: 字符串，'gemini'或'gemma'
    
    返回:
        对应类型的模型列表
    """
    if model_type.lower() == 'gemini':
        return [model for model in google_model if model.startswith("gemini")]
    elif model_type.lower() == 'gemma':
        return [model for model in google_model if model.startswith("gemma")]
    elif model_type.lower() == 'embedding':
        return [model for model in google_model if model.startswith("embedding")]
    elif model_type.lower() == 'text-embedding':
        return [model for model in google_model if model.startswith("text-embedding")]
    else:
        raise ValueError(f"不支持的模型类型: {model_type}，请使用'gemini'或'gemma'")

# 按大小过滤Gemma模型
def filter_gemma_by_size(size):
    """
    按大小过滤Gemma模型
    
    参数:
        size: 字符串，例如'2B', '3B', '7B', '27B'
    
    返回:
        符合指定大小的Gemma模型列表
    """
    size_str = str(size).upper()
    return [model for model in google_model if model.startswith("gemma") and size_str in model]

# 文件路径配置
INPUT_DIR = "input_files"  # 输入文件目录
OUTPUT_DIR = "output_files"  # 输出文件目录
SECRET_DIR = "secret_files"  # 密钥文件目录 

# 创建自定义客户端
def create_client(model_name=None):
    """
    根据模型名称创建适合的客户端
    
    参数:
        model_name: 模型名称，如果未提供则使用默认模型
    
    返回:
        适合该模型的OpenAI客户端
    """
    model = model_name or DEFAULT_MODEL
    
    # 确定使用哪个基本URL
    base_url = None
    for url, models in openai_compat.items():
        if model in models:
            base_url = url
            break
    
    if not base_url:
        if model.startswith("gemini"):
            base_url = GEMINI_BASE_URL
        elif model.startswith("gemma"):
            base_url = GEMMA_BASE_URL
        else:
            raise ValueError(f"不支持的模型: {model}")
    
    # 创建并返回客户端
    return OpenAI(
        api_key=GOOGLE_API_KEY,
        base_url=base_url
    )

# 修改select_model_instance函数
def select_model_instance(model_name=None):
    """
    根据模型名称选择对应的模型实例
    
    参数:
        model_name: 模型名称，如果未提供则使用默认模型(select_model)
    
    返回:
        适合该模型的模型实例对象
    """
    # 设置logging
    logging.basicConfig(level=logging.INFO)
    logger = logging.getLogger("model_instance")
    
    model = model_name or select_model
    logger.info(f"使用模型: {model}")
    
    # 创建客户端
    client = create_client(model)
    logger.info(f"已创建客户端，使用API Key: {GOOGLE_API_KEY[:4]}***")
    
    # 创建模型实例
    class ModelInstance:
        def __init__(self, client, model_name):
            self.client = client
            self.model_name = model_name
            self.logger = logging.getLogger(f"model_instance.{model_name}")
        
        async def generate_chat_response(self, messages, temperature=0.7, max_tokens=2000, stream=False):
            """
            生成聊天回复
            
            参数:
                messages: 消息列表
                temperature: 温度参数
                max_tokens: 最大生成令牌数
                stream: 是否使用流式传输
                
            返回:
                响应内容字典
            """
            try:
                self.logger.info(f"正在调用模型生成回复...")
                self.logger.info(f"消息数量: {len(messages)}, 第一条消息: {messages[0] if messages else 'None'}")
                
                # 确保消息格式正确
                formatted_messages = []
                for msg in messages:
                    if isinstance(msg, dict) and 'role' in msg and 'content' in msg:
                        formatted_messages.append({
                            "role": msg["role"],
                            "content": msg["content"]
                        })
                
                self.logger.info(f"调用OpenAI客户端，模型: {self.model_name}")
                response = self.client.chat.completions.create(
                    model=self.model_name,
                    messages=formatted_messages,
                    temperature=temperature,
                    max_tokens=max_tokens,
                    stream=stream
                )
                
                # 处理响应
                if stream:
                    # 处理流式响应
                    self.logger.info("返回流式响应")
                    return {"stream": response}
                else:
                    # 处理普通响应
                    if not hasattr(response, 'choices') or not response.choices:
                        self.logger.warning("响应中没有choices字段或为空")
                        return {"content": "", "prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}
                    
                    content = response.choices[0].message.content if response.choices and hasattr(response.choices[0], 'message') else ""
                    self.logger.info(f"收到响应内容: {content[:50]}... (截断)")
                    
                    # 尝试获取使用情况
                    prompt_tokens = getattr(response.usage, 'prompt_tokens', 0) if hasattr(response, 'usage') else 0
                    completion_tokens = getattr(response.usage, 'completion_tokens', 0) if hasattr(response, 'usage') else 0
                    total_tokens = getattr(response.usage, 'total_tokens', 0) if hasattr(response, 'usage') else 0
                    
                    return {
                        "content": content,
                        "prompt_tokens": prompt_tokens,
                        "completion_tokens": completion_tokens,
                        "total_tokens": total_tokens
                    }
            except Exception as e:
                self.logger.error(f"生成回复时发生错误: {str(e)}", exc_info=True)
                # 返回错误信息作为内容
                return {"content": f"抱歉，模型生成回复时出错: {str(e)}", "error": str(e)}
    
    return ModelInstance(client, model) 