#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
多模型对话Web应用

该应用提供了一个简单的Web界面，用于与多个大模型进行对话。
支持动态切换不同的模型进行对话。
"""

from flask import Flask, render_template, request, jsonify, Response
import os
from openai import OpenAI
import logging
import json
from logging.handlers import RotatingFileHandler

# 配置日志
def setup_logger():
    # 创建logs目录（如果不存在）
    if not os.path.exists('logs'):
        os.makedirs('logs')
        
    # 配置根日志记录器
    logger = logging.getLogger()
    logger.setLevel(logging.INFO)
    
    # 创建格式化器
    formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
    
    # 添加控制台处理器
    console_handler = logging.StreamHandler()
    console_handler.setFormatter(formatter)
    logger.addHandler(console_handler)
    
    # 添加文件处理器
    file_handler = RotatingFileHandler(
        'logs/app.log',
        maxBytes=1024 * 1024,  # 1MB
        backupCount=5,
        encoding='utf-8'
    )
    file_handler.setFormatter(formatter)
    logger.addHandler(file_handler)
    
    return logger

# 初始化日志
logger = setup_logger()
logging.info("应用启动，日志系统初始化完成")

app = Flask(__name__)

class ModelChat:
    """
    通用大模型对话接口
    支持动态切换不同的模型
    """
    
    def __init__(self):
        """
        初始化聊天接口管理器
        """
        self.models = {}
        self.current_model = None
        
    def add_model(self, model_name, base_url, api_key, model_id):
        """
        添加新的模型配置
        
        Args:
            model_name (str): 模型名称
            base_url (str): API基础URL
            api_key (str): API密钥
            model_id (str): 模型ID
        """
        try:
            client = OpenAI(
                api_key=api_key,
                base_url=base_url
            )
            
            self.models[model_name] = {
                "client": client,
                "model_id": model_id,
                "config": {
                    "base_url": base_url,
                    "api_key": api_key,
                    "model_id": model_id
                }
            }
            
            # 如果是第一个添加的模型，设置为当前模型
            if not self.current_model:
                self.current_model = model_name
                
            logging.info(f"成功添加模型: {model_name}")
            return True
            
        except Exception as e:
            logging.error(f"添加模型 {model_name} 失败: {str(e)}")
            return False
            
    def switch_model(self, model_name):
        """
        切换当前使用的模型
        
        Args:
            model_name (str): 要切换到的模型名称
            
        Returns:
            bool: 切换是否成功
        """
        if model_name in self.models:
            self.current_model = model_name
            logging.info(f"已切换到模型: {model_name}")
            return True
        else:
            logging.error(f"模型 {model_name} 不存在")
            return False
            
    def get_current_model(self):
        """
        获取当前使用的模型信息
        
        Returns:
            dict: 当前模型的配置信息
        """
        if not self.current_model:
            return None
        return {
            "name": self.current_model,
            **self.models[self.current_model]["config"]
        }
        
    def list_models(self):
        """
        列出所有可用的模型
        
        Returns:
            list: 模型名称列表
        """
        return list(self.models.keys())
    
    def chat(self, messages, stream=True):
        """
        与当前选择的大模型进行对话
        
        Args:
            messages (list): 对话消息列表
            stream (bool): 是否使用流式响应
            
        Returns:
            dict or generator: 模型响应
        """
        if not self.current_model:
            raise ValueError("未选择任何模型")
            
        try:
            model = self.models[self.current_model]
            completion = model["client"].chat.completions.create(
                model=model["model_id"],
                messages=messages,
                stream=stream
            )
            return completion
            
        except Exception as e:
            logging.error(f"聊天请求失败: {str(e)}")
            if stream:
                def error_generator():
                    yield {"error": str(e)}
                return error_generator()
            else:
                return {"error": str(e)}

# 创建聊天实例
chat_manager = ModelChat()

# 添加预定义的模型
bailian_api_key = os.environ.get("DASHSCOPE_API_KEY")
if bailian_api_key:
    logging.info("找到蓝源模型API密钥，尝试添加蓝源模型")
    success = chat_manager.add_model(
        "蓝源模型",
        "https://dashscope.aliyuncs.com/compatible-mode/v1",
        bailian_api_key,
        "qwq-plus"
    )
    if success:
        logging.info("蓝源模型添加成功")
    else:
        logging.error("蓝源模型添加失败")
else:
    logging.warning("未找到蓝源模型API密钥(DASHSCOPE_API_KEY)")


# 百炼模型
bailian_api_key = os.environ.get("DASHSCOPE_API_KEY")
if bailian_api_key:
    logging.info("找到百炼模型API密钥，尝试添加百炼模型")
    success = chat_manager.add_model(
        "百炼模型",
        "https://dashscope.aliyuncs.com/compatible-mode/v1",
        bailian_api_key,
        "qwq-plus"
    )
    if success:
        logging.info("百炼模型添加成功")
    else:
        logging.error("百炼模型添加失败")
else:
    logging.warning("未找到百炼模型API密钥(DASHSCOPE_API_KEY)")

# 豆包模型
doubao_api_key = os.environ.get("DOUBAO_API_KEY")
if doubao_api_key:
    logging.info("找到豆包模型API密钥，尝试添加豆包模型")
    success = chat_manager.add_model(
        "豆包模型",
        "https://ark.cn-beijing.volces.com/api/v3",
        doubao_api_key,
        "ep-20250305101734-7k2mw"
    )
    if success:
        logging.info("豆包模型添加成功")
    else:
        logging.error("豆包模型添加失败")
else:
    logging.warning("未找到豆包模型API密钥(DOUBAO_API_KEY)")

# Kimi模型

kimi_api_key = os.environ.get("KIMI_API_KEY")
if kimi_api_key:
    logging.info("找到Kimi模型API密钥，尝试添加Kimi模型")
    success = chat_manager.add_model(
        "Kimi模型",
        "https://api.moonshot.cn/v1",
        kimi_api_key,
        "moonshot-v1-8k"  # Kimi的默认模型ID
    )
    if success:
        logging.info("Kimi模型添加成功")
    else:
        logging.error("Kimi模型添加失败")
else:
    logging.warning("未找到Kimi模型API密钥")

# 检查已加载的模型
available_models = chat_manager.list_models()
logging.info(f"当前可用的模型: {available_models}")
if not available_models:
    logging.warning("警告：没有任何可用的模型！")

@app.route('/')
def index():
    """首页路由"""
    models = chat_manager.list_models()
    current_model = chat_manager.get_current_model()
    return render_template('index.html', 
                         models=models, 
                         current_model=current_model)

@app.route('/models', methods=['GET'])
def list_models():
    """获取所有可用模型列表"""
    models = chat_manager.list_models()
    current_model = chat_manager.get_current_model()
    return jsonify({
        "models": models,
        "current_model": current_model
    })

@app.route('/models/switch', methods=['POST'])
def switch_model():
    """切换模型"""
    data = request.json
    model_name = data.get('model_name')
    
    if not model_name:
        return jsonify({"error": "未提供模型名称"}), 400
        
    if chat_manager.switch_model(model_name):
        return jsonify({
            "success": True,
            "current_model": chat_manager.get_current_model()
        })
    else:
        return jsonify({
            "error": f"切换到模型 {model_name} 失败"
        }), 400

@app.route('/models/add', methods=['POST'])
def add_model():
    """添加新模型"""
    data = request.json
    required_fields = ['name', 'base_url', 'api_key', 'model_id']
    
    # 检查必需字段
    for field in required_fields:
        if field not in data:
            return jsonify({
                "error": f"缺少必需字段: {field}"
            }), 400
    
    success = chat_manager.add_model(
        data['name'],
        data['base_url'],
        data['api_key'],
        data['model_id']
    )
    
    if success:
        return jsonify({
            "success": True,
            "models": chat_manager.list_models()
        })
    else:
        return jsonify({
            "error": "添加模型失败"
        }), 400

@app.route('/chat', methods=['POST'])
def chat():
    """处理聊天请求"""
    if not chat_manager.get_current_model():
        return jsonify({"error": "未选择任何模型"}), 500
    
    try:
        data = request.json
        if not data:
            return jsonify({"error": "无效的JSON数据"}), 400
            
        messages = data.get('messages', [])
        
        if not messages:
            return jsonify({"error": "消息不能为空"}), 400
        
        # 获取最后一条消息的选项
        last_message = messages[-1]
        options = last_message.get('options', {})
        deep_thinking = options.get('deep_thinking', True)
        
        logging.info(f"接收到聊天请求，使用模型: {chat_manager.current_model}")
        logging.info(f"最后一条消息: {messages[-1].get('content', '')[:50]}...")
        logging.info(f"深度思考模式: {deep_thinking}")
        
        # 根据不同模型处理消息格式
        if chat_manager.current_model == "豆包模型":
            # 豆包模型需要特殊的消息格式
            formatted_messages = []
            for msg in messages:
                if msg["role"] == "user":
                    formatted_content = [{"type": "text", "text": msg["content"]}]
                    formatted_messages.append({
                        "role": msg["role"],
                        "content": formatted_content
                    })
                else:
                    formatted_messages.append(msg)
            messages = formatted_messages
        elif chat_manager.current_model == "Kimi模型":
            # Kimi模型使用标准的OpenAI消息格式，无需特殊处理
            pass
        
        # 创建聊天完成请求
        completion = chat_manager.chat(messages)
        
        # 返回分块的响应
        def generate():
            reasoning_content = ""  # 完整思考过程
            answer_content = ""     # 完整回复
            is_answering = False    # 判断是否结束思考过程并开始回复
            
            try:
                # 发送保持连接的注释
                yield ":\n\n".encode('utf-8')
                
                # 标记开始
                start_data = json.dumps({
                    "type": "start",
                    "model": chat_manager.current_model
                })
                yield f"data: {start_data}\n\n".encode('utf-8')
                
                for chunk in completion:
                    if hasattr(chunk, 'choices') and not chunk.choices:
                        if hasattr(chunk, 'usage'):
                            usage_data = json.dumps({
                                "type": "usage", 
                                "content": str(chunk.usage)
                            })
                            yield f"data: {usage_data}\n\n".encode('utf-8')
                    elif hasattr(chunk, 'error'):
                        error_str = str(getattr(chunk, 'error', '未知错误'))
                        error_data = json.dumps({
                            "type": "error", 
                            "content": error_str
                        })
                        yield f"data: {error_data}\n\n".encode('utf-8')
                        break
                    else:
                        delta = chunk.choices[0].delta
                        
                        # 根据不同模型处理响应
                        if chat_manager.current_model == "豆包模型":
                            # 豆包模型直接使用content
                            if hasattr(delta, 'content') and delta.content is not None:
                                answer_content += delta.content
                                answer_data = json.dumps({
                                    "type": "answer",
                                    "content": delta.content
                                })
                                yield f"data: {answer_data}\n\n".encode('utf-8')
                        elif chat_manager.current_model == "Kimi模型":
                            # Kimi模型使用标准的OpenAI流式响应格式
                            if hasattr(delta, 'content') and delta.content is not None:
                                answer_content += delta.content
                                answer_data = json.dumps({
                                    "type": "answer",
                                    "content": delta.content
                                })
                                yield f"data: {answer_data}\n\n".encode('utf-8')
                        else:
                            # 百炼模型处理思考过程和回答
                            if hasattr(delta, 'reasoning_content') and delta.reasoning_content is not None and deep_thinking:
                                reasoning_content += delta.reasoning_content
                                reasoning_data = json.dumps({
                                    "type": "reasoning",
                                    "content": delta.reasoning_content
                                })
                                yield f"data: {reasoning_data}\n\n".encode('utf-8')
                            elif hasattr(delta, 'content') and delta.content is not None:
                                if not is_answering and delta.content != "":
                                    is_answering = True
                                    switch_data = json.dumps({"type": "switch_to_answer"})
                                    yield f"data: {switch_data}\n\n".encode('utf-8')
                                
                                answer_content += delta.content
                                answer_data = json.dumps({
                                    "type": "answer",
                                    "content": delta.content
                                })
                                yield f"data: {answer_data}\n\n".encode('utf-8')
                            
            except Exception as e:
                error_data = json.dumps({
                    "type": "error",
                    "content": str(e)
                })
                yield f"data: {error_data}\n\n".encode('utf-8')
                logging.error(f"生成响应时出错: {str(e)}")
                
        return Response(generate(), mimetype='text/event-stream')
        
    except Exception as e:
        logging.error(f"处理聊天请求时出错: {str(e)}")
        return jsonify({"error": str(e)}), 500

if __name__ == '__main__':
    # 运行应用
    app.run(debug=True, host='0.0.0.0', port=5000) 