import os
import ray
from ray import serve
import requests
from fastapi import FastAPI, UploadFile, File, HTTPException
from fastapi.responses import JSONResponse
import uvicorn
import time

from config import get_config
from logger import setup_logger
from model import sam_model

config = get_config()
logger = setup_logger('ray_serve')

# 创建FastAPI应用
app = FastAPI(
    title="SAM Embedding Service (Ray Serve)",
    description="基于Ray Serve的Segment Anything Model (SAM) 图像嵌入服务，支持GPU加速和自动扩展",
    version="1.0.0"
)

# 定义Ray Serve部署
@serve.deployment(
    name="sam-embedding-service",
    num_replicas=config.SERVE_NUM_REPLICAS,
    ray_actor_options={
        "num_gpus": config.SERVE_GPUS_PER_REPLICA if config.DEVICE.startswith('cuda') else 0,
        "num_cpus": 1
    },
    health_check_period_s=30,
    health_check_timeout_s=10
)
@serve.ingress(app)
class SAMEmbeddingDeployment:
    """SAM嵌入服务部署类"""
    def __init__(self):
        """初始化部署，加载模型"""
        # 获取当前部署的资源信息
        self.resource_info = {
            "num_gpus": config.SERVE_GPUS_PER_REPLICA if config.DEVICE.startswith('cuda') else 0,
            "num_cpus": 1
        }
        
        logger.info(f"初始化SAM嵌入服务部署，资源配置: {self.resource_info}")
        
        # 加载SAM模型
        start_time = time.time()
        success = sam_model.load_model()
        load_time = time.time() - start_time
        
        if success:
            logger.info(f"SAM模型加载成功，耗时: {load_time:.2f}秒")
        else:
            logger.error(f"SAM模型加载失败，耗时: {load_time:.2f}秒")
            # 模型加载失败，服务仍会启动，但请求会返回错误
    
    @app.get("/")
    def root(self):
        """根路径，返回服务信息"""
        return {
            "service": "SAM Embedding Service (Ray Serve)",
            "version": "1.0.0",
            "status": "running",
            "device": config.DEVICE,
            "model_type": config.MODEL_TYPE,
            "ray_serve_replicas": config.SERVE_NUM_REPLICAS,
            "resources": self.resource_info
        }
    
    @app.get("/health")
    def health_check(self):
        """健康检查接口"""
        try:
            # 检查模型是否加载成功
            if not sam_model.model_loaded:
                return JSONResponse(
                    status_code=503,
                    content={"status": "error", "message": "模型加载失败"}
                )
            
            # 获取Ray集群信息
            ray_cluster_info = {
                "available_nodes": len(ray.nodes()),
                "total_gpus": int(ray.cluster_resources().get("GPU", 0)),
                "available_gpus": int(ray.available_resources().get("GPU", 0))
            }
            
            return {
                "status": "ok", 
                "device": config.DEVICE, 
                "model_type": config.MODEL_TYPE,
                "ray_cluster": ray_cluster_info
            }
        except Exception as e:
            logger.error(f"健康检查失败: {e}")
            return JSONResponse(
                status_code=503,
                content={"status": "error", "message": str(e)}
            )
    
    @app.post("/embedding")
    async def generate_embedding(self, file: UploadFile = File(...)):
        """上传图像文件，生成embedding"""
        from api import generate_embedding
        # 直接复用api.py中的实现
        return await generate_embedding(file)
    
    @app.post("/embedding/base64")
    def generate_embedding_base64(self, request):
        """通过base64编码的图像数据，生成embedding"""
        from api import generate_embedding_base64
        # 直接复用api.py中的实现
        return generate_embedding_base64(request)
    
    @app.delete("/cache")
    def clear_cache(self):
        """清除缓存"""
        from api import clear_cache
        # 直接复用api.py中的实现
        return clear_cache()
    
    @app.get("/stats")
    def get_stats(self):
        """获取服务统计信息"""
        from api import get_stats
        # 直接复用api.py中的实现
        stats = get_stats()
        # 添加Ray Serve特有的统计信息
        stats.update({
            "ray_serve_replicas": config.SERVE_NUM_REPLICAS,
            "ray_serve_gpus_per_replica": config.SERVE_GPUS_PER_REPLICA
        })
        return stats
    
    @app.get("/ray/cluster")
    def get_ray_cluster_info(self):
        """获取Ray集群信息"""
        try:
            cluster_info = {
                "nodes": ray.nodes(),
                "cluster_resources": dict(ray.cluster_resources()),
                "available_resources": dict(ray.available_resources()),
                "jobs": ray.state.jobs(),
                "actors": ray.state.actors()
            }
            return cluster_info
        except Exception as e:
            logger.error(f"获取Ray集群信息失败: {e}")
            raise HTTPException(status_code=500, detail=str(e))

# 启动Ray Serve服务
def start_ray_serve():
    """启动Ray Serve服务"""
    try:
        # 初始化Ray
        logger.info(f"初始化Ray，目标设备: {config.DEVICE}")
        
        # 配置Ray初始化参数
        ray_init_args = {
            "ignore_reinit_error": True
        }
        
        # 如果是在多GPU环境下，可以配置资源
        if config.DEVICE.startswith('cuda'):
            # 如果有指定GPU索引，设置CUDA_VISIBLE_DEVICES
            if config.GPU_INDEX and config.GPU_INDEX.isdigit():
                os.environ["CUDA_VISIBLE_DEVICES"] = config.GPU_INDEX
                logger.info(f"设置CUDA_VISIBLE_DEVICES: {config.GPU_INDEX}")
        
        # 初始化Ray
        ray.init(**ray_init_args)
        logger.info("Ray初始化成功")
        
        # 获取Ray集群信息
        cluster_resources = ray.cluster_resources()
        logger.info(f"Ray集群资源: {cluster_resources}")
        
        # 启动Ray Serve
        logger.info(f"启动Ray Serve服务，监听地址: {config.HOST}:{config.PORT}")
        serve.run(
            SAMEmbeddingDeployment.bind(),
            host=config.HOST,
            port=config.PORT,
            name="sam-embedding-service"
        )
        
        logger.info("Ray Serve服务启动成功")
        
        # 保持服务运行
        try:
            while True:
                time.sleep(60)
        except KeyboardInterrupt:
            logger.info("接收到中断信号，正在停止服务...")
            serve.shutdown()
            ray.shutdown()
            logger.info("服务已停止")
    except Exception as e:
        logger.error(f"启动Ray Serve服务失败: {e}")
        # 确保Ray正确关闭
        if ray.is_initialized():
            ray.shutdown()
        raise

# 如果直接运行该文件，启动Ray Serve服务
if __name__ == "__main__":
    start_ray_serve()