# app.py
from flask import Flask, request, jsonify, abort, Response, render_template, send_from_directory
import requests
import os
import json
import uuid
import time
import base64
from werkzeug.utils import secure_filename
from datetime import datetime
import tempfile
import shutil
import mimetypes

app = Flask(__name__)

GEMINI_API_BASE_URL = "https://generativelanguage.googleapis.com"

# 文件上传配置
UPLOAD_FOLDER = 'uploads'
ALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg', 'gif', 'webp'}  # 只允许图片格式
MAX_FILE_SIZE = 20 * 1024 * 1024  # 20MB

# 确保上传目录存在
os.makedirs(UPLOAD_FOLDER, exist_ok=True)

def allowed_file(filename):
    return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS

def get_file_size(file_path):
    return os.path.getsize(file_path)

def get_mime_type(file_path):
    """根据文件扩展名获取MIME类型"""
    mime_type, _ = mimetypes.guess_type(file_path)
    return mime_type or 'application/octet-stream'

def encode_file_to_base64(file_path):
    """将文件编码为base64"""
    try:
        with open(file_path, 'rb') as file:
            return base64.b64encode(file.read()).decode('utf-8')
    except Exception as e:
        app.logger.error(f"Error encoding file {file_path}: {e}")
        raise

# 添加一个新的路由用于渲染聊天页面
@app.route('/web')
def chat_page():
    return render_template('index.html') # 将渲染 templates 文件夹下的 index.html

# 文件上传路由
@app.route('/v1/upload', methods=['POST'])
def upload_file():
    if 'file' not in request.files:
        abort(400, description="No file part")
    
    file = request.files['file']
    if file.filename == '':
        abort(400, description="No selected file")
    
    # 检查文件扩展名
    if not allowed_file(file.filename):
        abort(400, description="Only image files (PNG, JPG, JPEG, GIF, WebP) are allowed")
    
    # 检查MIME类型
    if not file.content_type or not file.content_type.startswith('image/'):
        abort(400, description="Invalid file type. Only image files are allowed")
    
    # 检查文件大小
    file.seek(0, 2)  # 移动到文件末尾
    file_size = file.tell()
    file.seek(0)  # 重置到文件开头
    
    if file_size > MAX_FILE_SIZE:
        abort(400, description=f"File too large. Maximum size is {MAX_FILE_SIZE // (1024*1024)}MB")
    
    # 安全地保存文件
    filename = secure_filename(file.filename)
    timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
    unique_filename = f"{timestamp}_{uuid.uuid4().hex}_{filename}"
    file_path = os.path.join(UPLOAD_FOLDER, unique_filename)
    
    try:
        file.save(file_path)
        
        # 返回文件信息
        return jsonify({
            'file_id': unique_filename,
            'filename': filename,
            'size': file_size,
            'url': f'/uploads/{unique_filename}',
            'path': file_path
        })
    except Exception as e:
        app.logger.error(f"Error saving file: {e}")
        abort(500, description="Error saving file")

# 文件访问路由
@app.route('/uploads/<filename>')
def uploaded_file(filename):
    file_path = os.path.join(UPLOAD_FOLDER, filename)
    if os.path.exists(file_path):
        return send_from_directory(UPLOAD_FOLDER, filename)
    else:
        abort(404, description="File not found")

@app.route('/v1/chat/completions', methods=['POST'])
def openai_chat_completions_proxy():
    # 从请求头中获取 Gemini API Key
    # 约定：客户端通过 'X-Gemini-Api-Key' 或 'Authorization' 头发送 API Key
    gemini_api_key = request.headers.get('X-Gemini-Api-Key') or request.headers.get('Authorization', '').replace('Bearer ', '')

    if not gemini_api_key:
        abort(401, description="Unauthorized: X-Gemini-Api-Key or Authorization header is missing.")

    try:
        openai_request_data = request.get_json(force=True)
    except Exception as e:
        app.logger.error(f"Error parsing OpenAI request JSON: {e}")
        abort(400, description="Invalid JSON in request body.")

    # **核心改动：直接使用客户端传入的模型名称**
    gemini_model = openai_request_data.get("model")
    if not gemini_model:
        abort(400, description="Model name is missing in the request.")

    # 1. 将 OpenAI 请求转换为 Gemini 请求
    gemini_request_data = {}

    # 1.1 转换 messages 到 contents
    contents = []
    for message in openai_request_data.get("messages", []):
        role = message.get("role")
        content = message.get("content")
        
        # 处理包含文件的消息
        if isinstance(content, list):
            # 多模态内容（文本+图片）
            parts = []
            text_content = ""
            
            for part in content:
                if part.get("type") == "text":
                    text_content += part.get("text", "")
                elif part.get("type") == "image_url":
                    # 处理图片文件
                    image_url = part.get("image_url", {})
                    if isinstance(image_url, dict):
                        file_path = image_url.get("url", "").replace("/uploads/", "")
                        if file_path:
                            full_path = os.path.join(UPLOAD_FOLDER, file_path)
                            if os.path.exists(full_path):
                                try:
                                    # 检查文件类型
                                    file_ext = os.path.splitext(full_path)[1].lower()
                                    mime_type = get_mime_type(full_path)
                                    
                                    # 只处理图片格式
                                    if file_ext in ['.png', '.jpg', '.jpeg', '.gif', '.webp']:
                                        # 图片文件，使用inline_data
                                        image_data = encode_file_to_base64(full_path)
                                        parts.append({
                                            "inline_data": {
                                                "mime_type": mime_type,
                                                "data": image_data
                                            }
                                        })
                                        app.logger.info(f"处理图片文件: {file_path}, 类型: {mime_type}")
                                    else:
                                        # 不支持的格式，添加到文本内容中
                                        text_content += f"\n[附件: {os.path.basename(full_path)} - 格式不支持]"
                                        app.logger.warning(f"不支持的文件类型: {file_ext}, 文件: {file_path}")
                                except Exception as e:
                                    app.logger.error(f"Error processing file: {e}")
                                    text_content += f"\n[附件处理失败: {os.path.basename(full_path)}]"
            
            # 添加文本内容
            if text_content.strip():
                parts.insert(0, {"text": text_content.strip()})
            
            if parts:
                contents.append({"role": "user" if role == "user" else "model", "parts": parts})
        else:
            # 纯文本消息
            if role == "user":
                contents.append({"role": "user", "parts": [{"text": content}]})
            elif role == "assistant":
                # Gemini的'model'角色对应OpenAI的'assistant'
                contents.append({"role": "model", "parts": [{"text": content}]})
            elif role == "system":
                # Gemini没有直接的'system'角色，通常建议将system prompt融合到第一个user message中
                # 这里简单地将其作为user role处理，实际应用中可能需要更复杂的策略
                contents.append({"role": "user", "parts": [{"text": f"System message: {content}"}]})

    if not contents:
        abort(400, description="No valid messages found in request.")

    gemini_request_data["contents"] = contents

    # 1.2 转换其他参数（例如 temperature, max_tokens, top_p, stop）
    if "temperature" in openai_request_data:
        gemini_request_data["temperature"] = openai_request_data["temperature"]
    if "max_tokens" in openai_request_data:
        gemini_request_data["maxOutputTokens"] = openai_request_data["max_tokens"]
    if "top_p" in openai_request_data:
        gemini_request_data["topP"] = openai_request_data["top_p"]
    if "stop" in openai_request_data:
        gemini_request_data["stopSequences"] = openai_request_data["stop"]

    # 注意：流式传输支持
    stream = openai_request_data.get("stream", False)

    # 2. 调用 Gemini API
    gemini_api_path = f"v1beta/models/{gemini_model}:generateContent"
    full_gemini_url = f"{GEMINI_API_BASE_URL}/{gemini_api_path}"

    headers = {
        "Content-Type": "application/json",
        "x-goog-api-key": gemini_api_key
    }

    try:
        if stream:
            # 对于流式请求，Gemini API 的路径是不同的
            full_gemini_url = f"{GEMINI_API_BASE_URL}/v1beta/models/{gemini_model}:streamGenerateContent"
            return Response(
                generate_stream_response(full_gemini_url, headers, gemini_request_data, gemini_model), # 传递模型名称
                mimetype='text/event-stream'
            )
        else:
            gemini_response = requests.post(full_gemini_url, headers=headers, json=gemini_request_data)
            gemini_response.raise_for_status() # 检查HTTP错误

            # 3. 将 Gemini 响应转换为 OpenAI 响应
            openai_response_data = convert_gemini_response_to_openai(gemini_response.json(), openai_model=gemini_model) # 传递模型名称
            return jsonify(openai_response_data)

    except requests.exceptions.RequestException as e:
        app.logger.error(f"Error forwarding request to Gemini API: {e}")
        error_detail = f"Proxy error communicating with Gemini API: {e}"
        if hasattr(e, 'response') and e.response is not None:
            try:
                error_detail += f" - Gemini response: {e.response.text}"
                app.logger.error(f"Gemini API Error Response: {e.response.text}")
            except:
                pass
        abort(500, description=error_detail)
    except Exception as e:
        app.logger.error(f"Unexpected error in proxy: {e}")
        abort(500, description=f"An unexpected error occurred: {e}")

def convert_gemini_response_to_openai(gemini_json, openai_model):
    """
    将 Gemini API 的 JSON 响应转换为 OpenAI 兼容的 JSON 格式。
    现在 `openai_model` 直接使用传入的 Gemini 模型名称。
    """
    if not gemini_json or not gemini_json.get("candidates"):
        return {
            "id": f"chatcmpl-{str(uuid.uuid4())}",
            "object": "chat.completion",
            "created": int(time.time()),
            "model": openai_model, # 使用传入的模型名称
            "choices": [],
            "usage": {
                "prompt_tokens": 0,
                "completion_tokens": 0,
                "total_tokens": 0
            }
        }

    first_candidate = gemini_json["candidates"][0]
    generated_text = ""
    if first_candidate.get("content") and first_candidate["content"].get("parts"):
        for part in first_candidate["content"]["parts"]:
            if "text" in part:
                generated_text += part["text"]

    prompt_tokens = gemini_json.get("usageMetadata", {}).get("promptTokenCount", 0)
    completion_tokens = gemini_json.get("usageMetadata", {}).get("candidatesTokenCount", 0)
    total_tokens = prompt_tokens + completion_tokens

    openai_response = {
        "id": f"chatcmpl-{str(uuid.uuid4())}",
        "object": "chat.completion",
        "created": int(time.time()),
        "model": openai_model, # 使用传入的模型名称
        "choices": [
            {
                "index": 0,
                "message": {
                    "role": "assistant",
                    "content": generated_text,
                },
                "logprobs": None,
                "finish_reason": "stop" # 根据Gemini的finish_reason映射
            }
        ],
        "usage": {
            "prompt_tokens": prompt_tokens,
            "completion_tokens": completion_tokens,
            "total_tokens": total_tokens
        }
    }
    return openai_response

def generate_stream_response(url, headers, data, openai_model_name):
    """
    处理流式响应。将 Gemini 的 SSE 响应转换为 OpenAI 的 SSE 格式。
    现在 `openai_model_name` 直接使用传入的 Gemini 模型名称。
    """
    url_with_stream_param = f"{url}?alt=sse"

    try:
        with requests.post(url_with_stream_param, headers=headers, json=data, stream=True) as r:
            r.raise_for_status()
            for chunk in r.iter_content(chunk_size=None):
                if chunk:
                    try:
                        decoded_chunk = chunk.decode('utf-8')
                        for line in decoded_chunk.splitlines():
                            line = line.strip()
                            if not line:
                                continue
                            if line.startswith("data:"):
                                line = line[5:].strip()
                            if line.startswith("{") and line.endswith("}"):
                                gemini_chunk_json = json.loads(line)
                                openai_chunk_response = convert_gemini_stream_chunk_to_openai(gemini_chunk_json, openai_model_name) # 传递模型名称
                                yield f"data: {json.dumps(openai_chunk_response)}\n\n"
                                # 模拟结束标记，根据 Gemini 的实际行为调整
                                if gemini_chunk_json.get("promptFeedback") and gemini_chunk_json["promptFeedback"].get("blockReason"):
                                    yield "data: [DONE]\n\n"
                                    return
                    except json.JSONDecodeError:
                        app.logger.warning(f"Could not decode JSON from stream chunk: {chunk}")
                    except Exception as e:
                        app.logger.error(f"Error processing stream chunk: {e}")
    except requests.exceptions.RequestException as e:
        app.logger.error(f"Stream request failed: {e}")
        if hasattr(e, 'response') and e.response is not None:
            app.logger.error(f"Gemini API Error Response: {e.response.text}")
        # 返回错误信息给客户端
        error_response = {
            "id": f"chatcmpl-{str(uuid.uuid4())}",
            "object": "chat.completion.chunk",
            "created": int(time.time()),
            "model": openai_model_name,
            "choices": [
                {
                    "index": 0,
                    "delta": {"content": f"\n\n❌ 请求失败: {str(e)}"},
                    "logprobs": None,
                    "finish_reason": "stop"
                }
            ]
        }
        yield f"data: {json.dumps(error_response)}\n\n"
        yield "data: [DONE]\n\n"

def convert_gemini_stream_chunk_to_openai(gemini_stream_chunk, openai_model_name):
    """
    将 Gemini 流式响应的单个块转换为 OpenAI 兼容的流式块。
    现在 `openai_model_name` 直接使用传入的 Gemini 模型名称。
    """
    delta_content = ""
    finish_reason = None

    if gemini_stream_chunk.get("candidates"):
        first_candidate = gemini_stream_chunk["candidates"][0]
        if first_candidate.get("content") and first_candidate["content"].get("parts"):
            for part in first_candidate["content"]["parts"]:
                if "text" in part:
                    delta_content += part["text"]
        if first_candidate.get("finishReason"):
            if first_candidate["finishReason"] == "STOP":
                finish_reason = "stop"
            elif first_candidate["finishReason"] == "MAX_TOKENS":
                finish_reason = "length"
            elif first_candidate["finishReason"] == "SAFETY":
                finish_reason = "content_filter"
            # 更多映射可以根据Gemini文档添加

    openai_chunk = {
        "id": f"chatcmpl-{str(uuid.uuid4())}",
        "object": "chat.completion.chunk",
        "created": int(time.time()),
        "model": openai_model_name, # 使用传入的模型名称
        "choices": [
            {
                "index": 0,
                "delta": {"content": delta_content},
                "logprobs": None,
                "finish_reason": finish_reason
            }
        ]
    }
    return openai_chunk

if __name__ == '__main__':
    app.run(debug=True)