from fastapi import FastAPI, Request
from fastapi.responses import JSONResponse
from fastapi.middleware.cors import CORSMiddleware
from fastapi.staticfiles import StaticFiles
import uvicorn
import importlib
import os
import glob
import argparse
from dotenv import load_dotenv
from py_files.openai_proxy import router as openai_proxy_router
from py_files.get_dir_prompt import image_ocr_router
from py_files.request_url_llm import router as llm_request_router
from py_files.get_file_type_prompt import image_agent_router
from py_files.file_upload2url import file_folder_upload_router
from py_files.pdf2imageself import router as pdf2image_router
from py_files.image2pdfself import router as image2pdf_router
# 加载环境变量
load_dotenv()

# 默认配置
DEFAULT_HOST = os.getenv("HOST", "0.0.0.0")
DEFAULT_PORT = int(os.getenv("PORT", "8007"))
DEFAULT_SERVER_URL = os.getenv("SERVER_URL", f"http://localhost:{DEFAULT_PORT}")

# 创建FastAPI应用
app = FastAPI(
    title="LiteLLM API",
    description="OpenAI兼容的API接口，支持加载多种大模型进行对话",
    version="1.0.0",
)

# 添加CORS中间件
app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
)

# 确保上传目录存在
UPLOAD_FOLDER = 'uploads'
if not os.path.exists(UPLOAD_FOLDER):
    os.makedirs(UPLOAD_FOLDER)

# 添加静态文件服务
app.mount("/uploads", StaticFiles(directory=UPLOAD_FOLDER), name="uploads")

# 全局变量：服务器URL (供其他模块使用)
# 这个值会在命令行参数解析后更新
SERVER_URL = DEFAULT_SERVER_URL

# 根路由
@app.get("/")
async def root():
    return {"message": "see api in /docs"}

# 添加路由器
app.include_router(openai_proxy_router)
app.include_router(image_ocr_router)  # 添加新的图像OCR路由器
app.include_router(llm_request_router)  # 添加大模型请求路由器
app.include_router(image_agent_router)  # 添加新的图像处理路由器
app.include_router(file_folder_upload_router)  # 添加上传路由器
app.include_router(pdf2image_router)
app.include_router(image2pdf_router)

# 主函数
if __name__ == "__main__":
    # 解析命令行参数
    parser = argparse.ArgumentParser(description="LiteLLM API服务")
    parser.add_argument("--test-models", action="store_true", help="启动前测试所有模型连通性")
    parser.add_argument("--host", default=DEFAULT_HOST, help=f"绑定主机地址 (默认: {DEFAULT_HOST})")
    parser.add_argument("--port", type=int, default=DEFAULT_PORT, help=f"绑定端口 (默认: {DEFAULT_PORT})")
    parser.add_argument("--server-url", default=DEFAULT_SERVER_URL, 
                        help=f"服务器URL，用于生成资源访问地址 (默认: {DEFAULT_SERVER_URL})")
    args = parser.parse_args()
    
    # 更新全局服务器URL
    SERVER_URL = args.server_url
    
    # 如果指定了--test-models参数，则测试模型连通性
    if args.test_models:
        print("测试模型连通性...")
        try:
            from py_files.config import test_all_models
            available, unavailable, _ = test_all_models(verbose=True)
            print(f"可用模型: {', '.join(available)}")
            if unavailable:
                print(f"不可用模型: {', '.join(unavailable)}")
        except Exception as e:
            print(f"测试模型时发生错误: {e}")
    else:
        print("已跳过模型连通性测试。如需测试，请使用 --test-models 参数")
        
    # 打印服务器配置信息
    print(f"服务器将在 {args.host}:{args.port} 上启动")
    print(f"资源访问URL前缀: {SERVER_URL}")
    
    # 启动服务
    uvicorn.run("main:app", host=args.host, port=args.port, reload=True) 
