import os
import json
import uuid
import yaml
import base64
from flask import Flask, request, jsonify, Response, render_template, abort, send_from_directory
from flask_socketio import SocketIO, send, emit
import google.generativeai as genai
from mistralai import Mistral
from dotenv import load_dotenv
from datetime import datetime
import PIL.Image

load_dotenv()

# --- Load Configuration from YAML ---
try:
    with open('config.yml', 'r', encoding='utf-8') as f:
        config = yaml.safe_load(f)
except (IOError, yaml.YAMLError) as e:
    print(f"Error loading config.yml: {e}")
    config = {}

# --- Google Gemini Configuration ---
GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY", config.get('google_api_key'))
if not GOOGLE_API_KEY:
    print("Warning: Google API Key not found.")
genai.configure(api_key=GOOGLE_API_KEY)

# --- Mistral AI Configuration ---
MISTRAL_API_KEY = os.getenv("MISTRAL_API_KEY", config.get('mistral_api_key'))
MISTRAL_AGENT_ID = os.getenv("MISTRAL_AGENT_ID", config.get('mistral_agent_id'))
mistral_client = None
if MISTRAL_API_KEY and MISTRAL_AGENT_ID:
    try:
        mistral_client = Mistral(api_key=MISTRAL_API_KEY)
        print(f"Mistral client initialized successfully with Agent ID: {MISTRAL_AGENT_ID}")
    except Exception as e:
        print(f"Failed to initialize Mistral client: {e}")
else:
    print("Warning: Mistral API Key or Agent ID not found. Mistral model will not be available.")

# --- Mistral Vision Models ---
MISTRAL_VISION_MODELS = [
    "pixtral-12b-2409",
    "pixtral-large-latest",
    "mistral-large-latest",
    "mistral-small-2506"
]

# --- General Configuration ---
AVAILABLE_MODELS = config.get('available_models', ["gemini-2.5-flash"])
DEFAULT_MODEL = AVAILABLE_MODELS[0] if AVAILABLE_MODELS else "gemini-2.5-flash"
COMMANDS_FILE = config.get('commands_file', 'commands.json')

# --- Command Persistence ---
def load_commands():
    if not os.path.exists(COMMANDS_FILE):
        return []
    try:
        with open(COMMANDS_FILE, 'r', encoding='utf-8') as f:
            commands = json.load(f)
            for cmd in commands:
                if 'id' not in cmd:
                    cmd['id'] = str(uuid.uuid4())
            save_commands(commands)
            return commands
    except (json.JSONDecodeError, IOError) as e:
        print(f"Error loading commands: {e}. Starting with empty list.")
        return []

def save_commands(commands):
    try:
        with open(COMMANDS_FILE, 'w', encoding='utf-8') as f:
            json.dump(commands, f, ensure_ascii=False, indent=4)
    except IOError as e:
        print(f"Error saving commands: {e}")

# --- Flask App Initialization ---
app = Flask(__name__)
app.config['SECRET_KEY'] = os.urandom(24)
UPLOAD_FOLDER = 'uploads'
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
socketio = SocketIO(app)
app.json.ensure_ascii = False
saved_commands = load_commands()
group_chat_messages = []

os.makedirs(UPLOAD_FOLDER, exist_ok=True)

# --- Helper Functions ---
def format_history_for_gemini(history_json):
    try:
        history = json.loads(history_json)
        if not isinstance(history, list): return []
        formatted = []
        for item in history:
            role = item.get('role')
            parts = item.get('parts')
            if role and parts and role in ['user', 'model']:
                if isinstance(parts, list):
                    formatted.append({'role': role, 'parts': parts})
                elif isinstance(parts, str):
                    formatted.append({'role': role, 'parts': [parts]})
        return formatted
    except (json.JSONDecodeError, TypeError):
        return []

def format_history_for_mistral(history_json):
    try:
        history = json.loads(history_json)
        if not isinstance(history, list): return []
        formatted = []
        for item in history:
            role = item.get('role')
            parts = item.get('parts')
            content = ''.join(parts) if isinstance(parts, list) else (parts if isinstance(parts, str) else '')
            if role == 'model':
                role = 'assistant'
            if role in ['user', 'assistant'] and content:
                formatted.append({"role": role, "content": content})
        return formatted
    except (json.JSONDecodeError, TypeError):
        return []

def is_mistral_vision_model(model_name):
    """检查是否为支持视觉的 Mistral 模型"""
    return any(vision_model in model_name for vision_model in MISTRAL_VISION_MODELS)

# --- Routes ---
@app.route('/')
def index():
    models = AVAILABLE_MODELS
    default_model = DEFAULT_MODEL
    return render_template('index.html', models=models, default_model=default_model)

@app.route('/chat', methods=['POST'])
def chat():
    user_input = request.form.get('user_input')
    history_json = request.form.get('history', '[]')
    model_name = request.form.get('model_name', DEFAULT_MODEL)
    
    # 支持多图片上传
    image_files = request.files.getlist('images')  # 获取多张图片

    if not user_input and not image_files:
        return jsonify({"error": "No user input or images provided"}), 400

    if model_name not in AVAILABLE_MODELS:
         return jsonify({"error": f"Invalid model name: {model_name}"}), 400

    # --- Route to Mistral Model ---
    if model_name.startswith('mistral'):
        if not mistral_client:
            return jsonify({"error": "Mistral client is not configured on the server."}), 500
        
        # 检查是否支持图片
        if image_files and not is_mistral_vision_model(model_name):
            return jsonify({"error": f"The selected Mistral model '{model_name}' does not support image input. Please use a vision-capable model like pixtral-large-latest or mistral-large-latest."}), 400
        
        # 如果是 Agent Stream 模式
        if "agent" in model_name.lower():
            conversation_history = format_history_for_mistral(history_json)
            
            # 构建用户消息
            user_message_content = []
            if user_input:
                user_message_content.append({"type": "text", "text": user_input})
            
            # 添加图片
            if image_files and is_mistral_vision_model(model_name):
                for image_file in image_files:
                    try:
                        image_data = base64.b64encode(image_file.read()).decode('utf-8')
                        user_message_content.append({
                            "type": "image_url",
                            "image_url": f"data:image/jpeg;base64,{image_data}"
                        })
                    except Exception as e:
                        return jsonify({"error": f"Error processing image: {str(e)}"}), 400
            
            conversation_history.append({"role": "user", "content": user_message_content})
            
            def generate_mistral_agent():
                try:
                    response_stream = mistral_client.beta.conversations.start_stream(
                        inputs=conversation_history,
                        agent_id=MISTRAL_AGENT_ID,
                    )
                    for chunk in response_stream:
                        if chunk.event == 'message.output.delta' and chunk.data.content:
                            yield f"data: {json.dumps({'text': chunk.data.content})}\n\n"
                    yield f"data: {json.dumps({'end': True})}\n\n"
                except Exception as e:
                    error_message = f"无法生成内容: {str(e)}"
                    app.logger.error(error_message)
                    yield f"data: {json.dumps({'error': error_message})}\n\n"

            return Response(generate_mistral_agent(), mimetype='text/event-stream')
        
        # 普通 Mistral 模型(非 Agent 模式)
        else:
            conversation_history = format_history_for_mistral(history_json)
            
            # 构建消息内容
            if image_files and is_mistral_vision_model(model_name):
                user_message_content = []
                if user_input:
                    user_message_content.append({"type": "text", "text": user_input})
                
                for image_file in image_files:
                    try:
                        image_data = base64.b64encode(image_file.read()).decode('utf-8')
                        user_message_content.append({
                            "type": "image_url",
                            "image_url": f"data:image/jpeg;base64,{image_data}"
                        })
                    except Exception as e:
                        return jsonify({"error": f"Error processing image: {str(e)}"}), 400
                
                conversation_history.append({"role": "user", "content": user_message_content})
            else:
                conversation_history.append({"role": "user", "content": user_input})
            
            def generate_mistral():
                try:
                    response = mistral_client.chat.stream(
                        model=model_name,
                        messages=conversation_history
                    )
                    for chunk in response:
                        if chunk.data.choices[0].delta.content:
                            yield f"data: {json.dumps({'text': chunk.data.choices[0].delta.content})}\n\n"
                    yield f"data: {json.dumps({'end': True})}\n\n"
                except Exception as e:
                    error_message = f"无法生成内容: {str(e)}"
                    app.logger.error(error_message)
                    yield f"data: {json.dumps({'error': error_message})}\n\n"

            return Response(generate_mistral(), mimetype='text/event-stream')

    # --- Route to Google Gemini Model (Default) ---
    else:
        if not GOOGLE_API_KEY:
            return jsonify({"error": "Google API Key is not configured on the server."}), 500

        formatted_history = format_history_for_gemini(history_json)
        prompt_parts = []
        
        if user_input:
            prompt_parts.append(user_input)
        
        # 处理多张图片
        if image_files:
            for image_file in image_files:
                try:
                    with PIL.Image.open(image_file.stream) as img:
                        prompt_parts.append(img.copy())
                except Exception as e:
                    return jsonify({"error": f"Error processing image: {str(e)}"}), 400
        
        try:
            model = genai.GenerativeModel(model_name)
            
            def generate_gemini():
                try:
                    full_history = formatted_history + [{'role': 'user', 'parts': prompt_parts}]
                    stream = model.generate_content(full_history, stream=True)
                    
                    for chunk in stream:
                        if chunk.parts:
                            text_part = ''.join(part.text for part in chunk.parts if hasattr(part, 'text'))
                            if text_part:
                               yield f"data: {json.dumps({'text': text_part})}\n\n"
                    yield f"data: {json.dumps({'end': True})}\n\n"
                except Exception as e:
                     error_message = f"无法生成内容: {str(e)}"
                     app.logger.error(error_message)
                     yield f"data: {json.dumps({'error': error_message})}\n\n"

            return Response(generate_gemini(), mimetype='text/event-stream')

        except Exception as e:
            error_message = f"Failed to initialize model or start chat: {str(e)}"
            app.logger.error(error_message)
            return jsonify({"error": error_message}), 500

# --- Command API Routes ---
@app.route('/api/commands', methods=['GET'])
def get_commands():
    return jsonify(saved_commands)

@app.route('/api/commands', methods=['POST'])
def add_command():
    data = request.json
    name = data.get('name')
    command_text = data.get('command')

    if not name or not command_text:
        return jsonify({"error": "Name and command are required"}), 400

    new_command = {"id": str(uuid.uuid4()), "name": name, "command": command_text}
    saved_commands.append(new_command)
    save_commands(saved_commands)
    return jsonify({"message": "Command added", "command": new_command}), 201

@app.route('/api/commands/<string:command_id>', methods=['PUT'])
def update_command(command_id):
    data = request.json
    name = data.get('name')
    command_text = data.get('command')

    if not name or not command_text:
        return jsonify({"error": "Name and command are required"}), 400

    command_index = next((index for (index, cmd) in enumerate(saved_commands) if cmd.get("id") == command_id), None)

    if command_index is None:
        return jsonify({"error": "Command not found"}), 404

    saved_commands[command_index]['name'] = name
    saved_commands[command_index]['command'] = command_text
    save_commands(saved_commands)
    return jsonify({"message": "Command updated", "command": saved_commands[command_index]})

@app.route('/api/commands/<string:command_id>', methods=['DELETE'])
def delete_command(command_id):
    global saved_commands
    initial_length = len(saved_commands)
    saved_commands = [cmd for cmd in saved_commands if cmd.get("id") != command_id]

    if len(saved_commands) == initial_length:
        return jsonify({"error": "Command not found"}), 404

    save_commands(saved_commands)
    return jsonify({"message": "Command deleted"})

# --- Group Chat Routes ---
@app.route('/group-chat')
def group_chat():
    return render_template('group_chat.html')

@app.route('/uploads/<filename>')
def uploaded_file(filename):
    return send_from_directory(app.config['UPLOAD_FOLDER'], filename)

@app.route('/api/upload', methods=['POST'])
def upload_file():
    if 'file' not in request.files:
        return jsonify({"error": "No file part"}), 400
    file = request.files['file']
    if file.filename == '':
        return jsonify({"error": "No selected file"}), 400
    if file:
        filename = str(uuid.uuid4()) + "_" + file.filename
        file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
        
        file_url = f"/uploads/{filename}"
        message = {
            'id': str(uuid.uuid4()),
            'sid': request.form.get('sid'),
            'type': 'file',
            'filename': file.filename,
            'url': file_url,
            'timestamp': datetime.utcnow().isoformat() + "Z"
        }
        group_chat_messages.append(message)
        socketio.emit('message', message)
        return jsonify({"success": True, "url": file_url})

# --- Socket.IO Event Handlers for Group Chat ---
@socketio.on('connect')
def handle_connect():
    emit('init', group_chat_messages)

@socketio.on('message')
def handle_message(data):
    message = {
        'id': str(uuid.uuid4()),
        'sid': request.sid,
        'type': 'text',
        'content': data,
        'timestamp': datetime.utcnow().isoformat() + "Z"
    }
    group_chat_messages.append(message)
    emit('message', message, broadcast=True)

@socketio.on('delete_message')
def handle_delete(message_id):
    global group_chat_messages
    message_to_delete = next((msg for msg in group_chat_messages if msg.get('id') == message_id), None)
    
    if message_to_delete and message_to_delete.get('sid') == request.sid:
        group_chat_messages = [msg for msg in group_chat_messages if msg.get('id') != message_id]
        emit('message_deleted', {'id': message_id}, broadcast=True)

if __name__ == '__main__':
    socketio.run(app, host='0.0.0.0', port=5055, allow_unsafe_werkzeug=True)
