import os
import json
import uuid
from typing import Dict, Any, List, Optional
from datetime import datetime, timezone

from dotenv import load_dotenv
from fastapi import FastAPI, HTTPException, Body, Path as FastAPIPath
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel, Field
from langchain_core.messages import HumanMessage, AIMessage
from langchain_core.chat_history import BaseChatMessageHistory
from langchain_core.language_models.chat_models import BaseChatModel
from langchain_openai import ChatOpenAI
# from langchain_community.chat_models import ChatTongyi # No longer needed
from langchain_community.chat_message_histories import ChatMessageHistory # Added for get_session_history

# 从 app.chains 导入链的获取函数
from app.chains import get_analyzer_questioner_chain, get_optimizer_chain

# 加载环境变量
load_dotenv()

# 模型配置文件路径
# 确保这个路径相对于项目根目录下的 backend/app/main.py 是正确的
# 如果 main.py 在 backend/app/ 目录下，则 model_configs.json 在 ../
MODEL_CONFIG_FILE = os.path.join(os.path.dirname(__file__), "..", "model_configs.json")


# Pydantic 模型定义
class ModelConfigBase(BaseModel):
    display_name: str = Field(..., description="用户为模型设置的易记名称")
    api_base_url: str = Field(..., description="API 地址，例如 OpenAI 的 openai_api_base") # Made mandatory
    model_name_internal: str = Field(..., description="实际的模型名称，例如 gpt-3.5-turbo")
    is_enabled: bool = Field(True, description="模型是否启用")

class ModelConfigCreate(ModelConfigBase):
    api_key: str = Field(..., description="API 密钥") # 创建时必填

class ModelConfigUpdate(BaseModel):
    display_name: Optional[str] = None
    api_base_url: Optional[str] = None # Keep optional for update, but frontend makes it required
    model_name_internal: Optional[str] = None
    api_key: Optional[str] = None # 更新时可选，如果提供则更新
    is_enabled: Optional[bool] = None

class ModelConfigInDB(ModelConfigBase):
    id: str
    api_key: str # 数据库中存储原始 API Key
    created_at: datetime
    updated_at: datetime

class ModelConfigResponse(ModelConfigBase): # 用于API响应，不含API Key
    id: str
    created_at: datetime
    updated_at: datetime

class TestConnectionResponse(BaseModel):
    success: bool
    message: str

# 优化器API请求体模型
class OptimizerInitiateRequest(BaseModel):
    prompt: str
    selected_model_id: str

class OptimizerRefineRequest(BaseModel):
    conversation_id: str
    answer: str
    # selected_model_id: str # refine 目前不直接调用LLM，可以不传

class OptimizerFinalizeRequest(BaseModel):
    conversation_id: str
    selected_model_id: str

# 内存中的对话历史存储
session_history: Dict[str, BaseChatMessageHistory] = {}

def get_session_history(session_id: str) -> BaseChatMessageHistory:
    if session_id not in session_history:
        session_history[session_id] = ChatMessageHistory()
    return session_history[session_id]

# 模型配置辅助函数
def load_model_configs() -> List[ModelConfigInDB]:
    if not os.path.exists(MODEL_CONFIG_FILE):
        # 如果文件不存在，创建一个空列表并保存，确保文件存在
        save_model_configs([]) # 保存一个空列表会创建文件
        return []
    try:
        with open(MODEL_CONFIG_FILE, "r", encoding="utf-8") as f:
            configs_data = json.load(f)
        return [ModelConfigInDB(**config) for config in configs_data]
    except (json.JSONDecodeError, TypeError) as e:
        print(f"Error loading model configs from {MODEL_CONFIG_FILE}: {e}")
        # 可以考虑创建一个备份或返回一个更明确的错误
        return []


def save_model_configs(configs: List[ModelConfigInDB]):
    configs_data = [config.model_dump(mode="json") for config in configs]
    try:
        with open(MODEL_CONFIG_FILE, "w", encoding="utf-8") as f:
            json.dump(configs_data, f, ensure_ascii=False, indent=2)
    except IOError as e:
        print(f"Error saving model configs to {MODEL_CONFIG_FILE}: {e}")
        # 处理写入错误，例如权限问题

def get_model_config_by_id(model_id: str, configs: Optional[List[ModelConfigInDB]] = None) -> Optional[ModelConfigInDB]:
    if configs is None:
        configs = load_model_configs()
    for config in configs:
        if config.id == model_id:
            return config
    return None

def get_llm_instance(config: ModelConfigInDB) -> BaseChatModel:
    if not config.api_key:
        raise ValueError(f"API key is missing for model {config.display_name} ({config.id})")
    if not config.api_base_url: # API Base URL is now mandatory
        raise ValueError(f"API Base URL is missing for model {config.display_name} ({config.id})")

    # Always use ChatOpenAI, compatible with OpenAI API standard
    params = {
        "model_name": config.model_name_internal,
        "temperature": 0.7, # Default temperature
        "openai_api_key": config.api_key,
        "openai_api_base": config.api_base_url,
    }
    
    return ChatOpenAI(**params)

# 创建 FastAPI 应用
app = FastAPI(
    title="AI Prompt Optimizer API",
    version="1.0",
    description="一个用于优化 AI 提示词的后端 API，支持多轮问答和模型管理。",
)

# 配置 CORS
app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
)

@app.get("/")
async def root():
    return {"message": "欢迎来到 AI 提示词优化器 API!"}

# --------------------------------------------------------------------
# 模型管理 API Endpoints
# --------------------------------------------------------------------
@app.get("/api/v1/models", response_model=List[ModelConfigResponse], summary="获取所有模型配置")
async def list_models_api():
    configs_in_db = load_model_configs()
    return [ModelConfigResponse.model_validate(config.model_dump()) for config in configs_in_db]

@app.post("/api/v1/models", response_model=ModelConfigResponse, status_code=201, summary="新增模型配置")
async def create_model_api(model_create: ModelConfigCreate):
    configs = load_model_configs()
    
    if any(c.display_name == model_create.display_name for c in configs):
        raise HTTPException(status_code=400, detail="Model display name already exists.")

    now = datetime.now(timezone.utc)
    new_model_data = model_create.model_dump()
    # Ensure provider is not part of the saved data if it's removed from ModelConfigBase/InDB
    # However, ModelConfigCreate still has it. We should remove it from the dict before creating ModelConfigInDB
    # For now, let's assume ModelConfigInDB will also have provider removed or ignored.
    # If ModelConfigInDB still expects 'provider', we might need to add a default or handle it.
    # Based on previous changes, 'provider' was removed from ModelConfigBase, so it should be fine.
    # The 'provider' field should no longer be in new_model_data if ModelConfigCreate doesn't have it.
    # No need to explicitly delete if it's not in the Pydantic model.

    new_model_data["id"] = str(uuid.uuid4().hex)
    new_model_data["created_at"] = now
    new_model_data["updated_at"] = now
    
    new_model = ModelConfigInDB(**new_model_data)
    configs.append(new_model)
    save_model_configs(configs)
    
    return ModelConfigResponse.model_validate(new_model.model_dump())

@app.put("/api/v1/models/{model_id}", response_model=ModelConfigResponse, summary="更新模型配置")
async def update_model_api(model_update: ModelConfigUpdate, model_id: str = FastAPIPath(..., description="要更新的模型的ID")):
    configs = load_model_configs()
    model_to_update = None
    model_idx = -1

    for idx, config in enumerate(configs):
        if config.id == model_id:
            model_to_update = config
            model_idx = idx
            break
    
    if not model_to_update:
        raise HTTPException(status_code=404, detail="Model not found")

    update_data = model_update.model_dump(exclude_unset=True)
    
    # 如果更新了 display_name，检查是否与其他现有模型的 display_name 冲突 (不包括自身)
    if 'display_name' in update_data and any(c.display_name == update_data['display_name'] and c.id != model_id for c in configs):
        raise HTTPException(status_code=400, detail="Another model with this display name already exists.")

    # The 'provider' field should no longer be in update_data if ModelConfigUpdate doesn't have it.
    # No need to explicitly delete if it's not in the Pydantic model.

    updated_model = model_to_update.model_copy(update=update_data)
    updated_model.updated_at = datetime.now(timezone.utc)
    
    configs[model_idx] = updated_model
    save_model_configs(configs)
    
    return ModelConfigResponse.model_validate(updated_model.model_dump())

@app.delete("/api/v1/models/{model_id}", status_code=204, summary="删除模型配置")
async def delete_model_api(model_id: str = FastAPIPath(..., description="要删除的模型的ID")):
    configs = load_model_configs()
    initial_len = len(configs)
    
    configs_after_delete = [config for config in configs if config.id != model_id]
    
    if len(configs_after_delete) == initial_len:
        raise HTTPException(status_code=404, detail="Model not found")
        
    save_model_configs(configs_after_delete)
    return None 

@app.post("/api/v1/models/{model_id}/test", response_model=TestConnectionResponse, summary="测试模型连接")
async def test_model_connection_api(model_id: str = FastAPIPath(..., description="要测试的模型的ID")):
    config = get_model_config_by_id(model_id)
    if not config:
        raise HTTPException(status_code=404, detail="Model not found")
    if not config.is_enabled:
        return TestConnectionResponse(success=False, message="模型已被禁用，无法测试。")

    try:
        llm = get_llm_instance(config)
        test_message = HumanMessage(content="Hello") # 简单的测试消息
        response = await llm.ainvoke([test_message])

        if response and isinstance(response, AIMessage) and response.content:
            return TestConnectionResponse(success=True, message=f"连接成功。模型响应: {response.content[:100]}...")
        else:
            return TestConnectionResponse(success=False, message="连接成功，但模型未返回有效响应。")
    except ValueError as ve: # API Key 缺失等配置问题
        return TestConnectionResponse(success=False, message=f"配置错误: {str(ve)}")
    except Exception as e:
        return TestConnectionResponse(success=False, message=f"连接测试失败: {type(e).__name__} - {str(e)}")

# --------------------------------------------------------------------
# 优化器 API Endpoints (已修改)
# --------------------------------------------------------------------
@app.post("/api/v1/optimizer/initiate", response_model=Dict[str, Any])
async def initiate_optimization(request_data: OptimizerInitiateRequest):
    original_prompt = request_data.prompt
    selected_model_id = request_data.selected_model_id

    if not original_prompt:
        raise HTTPException(status_code=400, detail="缺少原始提示词")
    if not selected_model_id:
        raise HTTPException(status_code=400, detail="缺少选定的模型ID")

    model_config = get_model_config_by_id(selected_model_id)
    if not model_config:
        raise HTTPException(status_code=404, detail=f"选定模型ID '{selected_model_id}' 未找到")
    if not model_config.is_enabled:
        raise HTTPException(status_code=400, detail=f"选定模型 '{model_config.display_name}' 已被禁用")

    try:
        llm = get_llm_instance(model_config)
    except ValueError as e:
        raise HTTPException(status_code=400, detail=str(e))
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"初始化LLM失败: {str(e)}")
        
    analyzer_chain = get_analyzer_questioner_chain(llm)

    conversation_id = str(uuid.uuid4().hex) # 使用 hex
    history = get_session_history(conversation_id)
    history.add_user_message(original_prompt)

    response_content = await analyzer_chain.ainvoke(
        {"input": original_prompt, "history": history.messages}
    )
    
    if response_content.get('type') == 'questions':
        history.add_ai_message(json.dumps(response_content['content'], ensure_ascii=False))
    else:
        history.add_ai_message(response_content.get('content', ''))

    return {**response_content, "conversation_id": conversation_id}

@app.post("/api/v1/optimizer/refine", response_model=Dict[str, Any])
async def refine_optimization(request_data: OptimizerRefineRequest):
    conversation_id = request_data.conversation_id
    answer = request_data.answer

    if not conversation_id or not answer: # answer 可以为空字符串，但不应为 None
        raise HTTPException(status_code=400, detail="缺少 conversation_id 或回答")

    if conversation_id not in session_history:
         raise HTTPException(status_code=404, detail="无效的 conversation_id，请先调用 initiate")
    history = get_session_history(conversation_id)
    history.add_user_message(answer)

    return {"type": "ready_to_optimize", "content": "您的回答已收到，现在可以生成优化后的提示词了。", "conversation_id": conversation_id}

@app.post("/api/v1/optimizer/finalize", response_model=Dict[str, Any])
async def finalize_optimization(request_data: OptimizerFinalizeRequest):
    conversation_id = request_data.conversation_id
    selected_model_id = request_data.selected_model_id

    if not conversation_id:
        raise HTTPException(status_code=400, detail="缺少 conversation_id")
    if not selected_model_id:
        raise HTTPException(status_code=400, detail="缺少选定的模型ID")

    if conversation_id not in session_history:
         raise HTTPException(status_code=404, detail="无效的 conversation_id，请先调用 initiate")
    history = get_session_history(conversation_id)
    if not history.messages:
        raise HTTPException(status_code=400, detail="对话历史为空，无法生成优化提示词")

    model_config = get_model_config_by_id(selected_model_id)
    if not model_config:
        raise HTTPException(status_code=404, detail=f"选定模型ID '{selected_model_id}' 未找到")
    if not model_config.is_enabled:
        raise HTTPException(status_code=400, detail=f"选定模型 '{model_config.display_name}' 已被禁用")

    try:
        llm = get_llm_instance(model_config)
    except ValueError as e:
        raise HTTPException(status_code=400, detail=str(e))
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"初始化LLM失败: {str(e)}")

    optim_chain = get_optimizer_chain(llm)
    
    response_content = await optim_chain.ainvoke(
        {"history": history.messages}
    )
    
    # 考虑是否在此处清理会话历史
    # del session_history[conversation_id]

    return response_content # 直接返回优化链的输出

if __name__ == "__main__":
    import uvicorn
    # 确保 model_configs.json 文件存在且可写，如果不存在，load_model_configs 会尝试创建它
    load_model_configs() 
    uvicorn.run(app, host="0.0.0.0", port=8000)
