# 🔹 标准库导入
import os
import json
import asyncio
import time
from collections import defaultdict
from typing import Dict, Optional

# 🔹 第三方库导入
import httpx
from fastapi import FastAPI, Request
from fastapi.responses import JSONResponse
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel

# 🔹 创建 FastAPI 应用
app = FastAPI()

origins = [
    "*"
]

app.add_middleware(
    CORSMiddleware,
    allow_origins=origins,
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
)

# 🔹 全局变量
API_KEYS = []
current_key_index = 0
key_lock = asyncio.Lock()

# 🔹 内存速率限制数据结构 {ip: [timestamps]}
rate_limit_data: Dict[str, list] = defaultdict(list)
RATE_LIMIT_TIMES = 50  # 次数
RATE_LIMIT_SECONDS = 60  # 秒

# =========================
# 内存速率限制器
# =========================
async def rate_limiter(request: Request):
    ip = request.client.host
    now = time.time()

    # 清理旧的时间戳
    rate_limit_data[ip] = [t for t in rate_limit_data[ip] if now - t < RATE_LIMIT_SECONDS]

    if len(rate_limit_data[ip]) >= RATE_LIMIT_TIMES:
        return JSONResponse(
            status_code=429,
            content={"error": "Too Many Requests. Please slow down."}
        )

    rate_limit_data[ip].append(now)
    return None

# 🔹 加载 API 密钥函数
def load_api_keys():
    global API_KEYS
    try:
        file_path = os.path.join(os.path.dirname(__file__), "API_KEYS.txt")
        with open(file_path, "r", encoding="utf-8") as f:
            API_KEYS = [key.strip() for key in f if key.strip()]
        if not API_KEYS:
            raise RuntimeError("❌ API_KEYS.txt 文件中未找到任何API Key。")
        print(f"✅ 成功加载 {len(API_KEYS)} 个 API Key。")
    except FileNotFoundError:
        raise RuntimeError("❌ API_KEYS.txt 文件未找到，请确认文件存在于 main.py 同级目录。")
    except Exception as e:
        raise RuntimeError(f"❌ 读取 API_KEYS.txt 出错: {e}")

# 🔹 应用启动时初始化
@app.on_event("startup")
async def startup():
    load_api_keys()

# 🔹 请求体模型
class GeminiRequest(BaseModel):
    text: str
    # 增加可选的生成参数，默认为 None
    temperature: Optional[float] = None
    top_p: Optional[float] = None
    top_k: Optional[int] = None
    max_output_tokens: Optional[int] = None

# 🔹 新增多模态请求体模型
class MultimodalRequest(BaseModel):
    text: str
    image_data: str # Base64 编码的图片数据
    image_mime_type: str = "image/jpeg" # 图片的 MIME 类型，默认为 jpeg
    # 增加可选的生成参数，默认为 None
    temperature: Optional[float] = None
    top_p: Optional[float] = None
    top_k: Optional[int] = None
    max_output_tokens: Optional[int] = None


# 🔹 获取下一个 API Key（轮询机制）
async def get_next_api_key():
    global current_key_index
    async with key_lock:
        key = API_KEYS[current_key_index]
        current_key_index = (current_key_index + 1) % len(API_KEYS)
    return key

# 🔹 动态构建 generationConfig
def get_generation_config(body: BaseModel) -> Dict:
    config = {}
    if getattr(body, 'temperature', None) is not None:
        config["temperature"] = body.temperature
    if getattr(body, 'top_p', None) is not None:
        config["top_p"] = body.top_p
    if getattr(body, 'top_k', None) is not None:
        config["top_k"] = body.top_k
    if getattr(body, 'max_output_tokens', None) is not None:
        config["max_output_tokens"] = body.max_output_tokens
    return config


# 🔹 路由定义：处理 POST /gemini 请求
@app.post("/gemini")
async def post_gemini(request: Request, body: GeminiRequest):
    # 速率限制
    limit_response = await rate_limiter(request)
    if limit_response:
        return limit_response

    api_key = await get_next_api_key()
    BASE_URL = f"https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:generateContent?key={api_key}"
    
    # 动态构建 generationConfig
    generation_config = get_generation_config(body)

    payload = {
        "contents": [
            {
                "parts": [
                    {"text": body.text}
                ]
            }
        ]
    }
    
    if generation_config:
        payload["generationConfig"] = generation_config

    async with httpx.AsyncClient(timeout=999) as client:
        response = await client.post(
            BASE_URL,
            json=payload,
            headers={"Content-Type": "application/json"}
        )

    if response.status_code != 200:
        print(f"❌ Gemini API 错误: 状态码 {response.status_code}, 响应内容: {response.text}")
        return JSONResponse(status_code=response.status_code, content=response.json())

    data = response.json()
    try:
        answer_text = data["candidates"][0]["content"]["parts"][0]["text"]
    except Exception as e:
        print(f"❌ 解析 Gemini 响应失败: {e}, 原始数据: {data}")
        return JSONResponse(status_code=500, content={"error": "No valid answer from model."})

    return {"answer": answer_text}

# 🔹 新增路由：处理 POST /gemini/vision 请求 (多模态)
@app.post("/gemini/vision")
async def post_gemini_vision(request: Request, body: MultimodalRequest):
    # 速率限制
    limit_response = await rate_limiter(request)
    if limit_response:
        return limit_response

    api_key = await get_next_api_key()
    BASE_URL = f"https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:generateContent?key={api_key}"

    # 动态构建 generationConfig
    generation_config = get_generation_config(body)

    # 构建多模态请求体
    payload = {
        "contents": [
            {
                "parts": [
                    {"text": body.text},
                    {
                        "inline_data": {
                            "mime_type": body.image_mime_type,
                            "data": body.image_data,
                        }
                    }
                ]
            }
        ]
    }
    
    if generation_config:
        payload["generationConfig"] = generation_config

    async with httpx.AsyncClient(timeout=999) as client:
        response = await client.post(
            BASE_URL,
            json=payload,
            headers={"Content-Type": "application/json"}
        )

    if response.status_code != 200:
        print(f"❌ Gemini API 错误: 状态码 {response.status_code}, 响应内容: {response.text}")
        return JSONResponse(status_code=response.status_code, content=response.json())

    data = response.json()
    try:
        answer_text = data["candidates"][0]["content"]["parts"][0]["text"]
    except Exception as e:
        print(f"❌ 解析 Gemini 响应失败: {e}, 原始数据: {data}")
        return JSONResponse(status_code=500, content={"error": "No valid answer from model."})

    return {"answer": answer_text}

# 🔹 违禁词检测路由
@app.post("/gemini/text_moderation")
async def text_moderation(request: Request, body: GeminiRequest):
    # 速率限制
    limit_response = await rate_limiter(request)
    if limit_response:
        return limit_response

    api_key = await get_next_api_key()
    BASE_URL = f"https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:generateContent?key={api_key}"
    
    prompt_text = (
        f"请评估以下文本的有害程度，并给出一个0到100之间的整数分数，"
        f"以及给出这个分数的原因，且原因用中文回答。"
        f"请只返回包含 'score' 和 'reason' 字段的JSON对象，不要附带任何其他解释。"
        f"以下是文本：\n\n{body.text}"
    )
    
    payload = {
        "contents": [
            {
                "parts": [
                    {"text": prompt_text}
                ]
            }
        ],
        "generationConfig": {
            "responseMimeType": "application/json",
            "responseSchema": {
                "type": "OBJECT",
                "properties": {
                    "score": {"type": "INTEGER"},
                    "reason": {"type": "STRING"}
                },
                "propertyOrdering": ["score", "reason"]
            }
        }
    }
    
    try:
        async with httpx.AsyncClient(timeout=999) as client:
            response = await client.post(
                BASE_URL,
                json=payload,
                headers={"Content-Type": "application/json"}
            )
        response.raise_for_status()
    except httpx.HTTPStatusError as e:
        print(f"❌ Gemini API HTTP 错误: {e.response.status_code}, 响应内容: {e.response.text}")
        return JSONResponse(status_code=e.response.status_code, content={"error": e.response.json()})
    except Exception as e:
        print(f"❌ 发送请求或接收响应时出错: {e}")
        return JSONResponse(status_code=500, content={"error": "An unexpected error occurred."})
    
    try:
        data = response.json()
        json_string = data["candidates"][0]["content"]["parts"][0]["text"]
        parsed_data = json.loads(json_string)
        score = parsed_data.get("score")
        reason = parsed_data.get("reason")

        if score is None or reason is None:
            raise ValueError("解析后的JSON中未找到 'score' 或 'reason' 字段")
        
        return JSONResponse(status_code=200, content={"score": score, "reason": reason, "status": "success"})
    except (json.JSONDecodeError, ValueError) as e:
        print(f"❌ 解析 Gemini 响应失败: {e}, 原始数据: {response.text}")
        return JSONResponse(status_code=500, content={"error": "Failed to parse model response."})
    except IndexError:
        print(f"❌ Gemini 响应格式异常，candidates或parts字段缺失。原始数据: {response.text}")
        return JSONResponse(status_code=500, content={"error": "Invalid model response format."})