import uvicorn
import GPUtil
import redis
import time
import json
import logging
import asyncio  # 导入 asyncio 模块
from fastapi import FastAPI, HTTPException, Header, Depends, Query
from pydantic import BaseModel, Field
from typing import Dict, Any, AsyncGenerator
from threading import Thread, Lock
from modelscope import AutoModelForCausalLM, AutoTokenizer
from fastapi.responses import StreamingResponse

# Redis配置（使用用户提供的参数）
redis_client = redis.Redis(
    host="117.72.102.95",
    port=19736,
    password="Aresenyang@1217",
    db=1,
    socket_timeout=30,  # 增加超时时间
    health_check_interval=30
)

# 日志配置
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s [%(levelname)s] %(message)s',
    handlers=[
        logging.FileHandler("model_service.log"),
        logging.StreamHandler()
    ]
)

# OpenAI兼容参数定义
class OpenAIRequest(BaseModel):
    model: str = Field(..., description="模型名称（固定为deepseek-r1-1.5b）")
    prompt: str = Field(..., description="输入提示")
    temperature: float = Field(1.0, ge=0.0, le=2.0, description="采样温度")
    max_tokens: int = Field(16, ge=1, le=4096, description="最大生成令牌数")
    n: int = Field(1, ge=1, le=5, description="生成结果数量")
    user: str = Field(..., description="用户标识符")
    priority: str = Field("low", enum=["high", "low"], description="优先级")
    stream: bool = Field(False, description="是否流式返回结果")

class OpenAIResponseChunk(BaseModel):
    id: str = "chatcmpl-12345"
    object: str = "text_completion"
    created: int = int(time.time())
    model: str = "deepseek-r1-1.5b"
    choices: list[Dict[str, Any]] = []
    usage: Dict[str, int] = {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}

class OpenAIResponse(BaseModel):
    id: str = "chatcmpl-12345"
    object: str = "text_completion"
    created: int = int(time.time())
    model: str = "deepseek-r1-1.5b"
    choices: list[Dict[str, Any]] = []
    usage: Dict[str, int] = {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}

# GPU资源监控参数
GPU_UTIL_THRESHOLD = 0.95
GPU_MEMORY_THRESHOLD = 0.95
MAX_HIGH_CONCURRENT = 3
MAX_LOW_CONCURRENT = 2

model_path = "/home/llm/deepseek-r1-1.5b"
model_name = "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B"

# 模型加载（单例模式）
class ModelLoader:
    _instance = None
    models = {}
    tokenizers = {}
    lock = Lock()

    def __new__(cls):
        if not cls._instance:
            cls._instance = super().__new__(cls)
        return cls._instance

    def get_model(self, gpu_id):
        with self.lock:
            if gpu_id not in self.models:
                self.tokenizers[gpu_id] = AutoTokenizer.from_pretrained(model_name, cache_dir=model_path, local_files_only=True)
                self.models[gpu_id] = AutoModelForCausalLM.from_pretrained(model_name, cache_dir=model_path, local_files_only=True).to(f'cuda:{gpu_id}')
            return self.models[gpu_id], self.tokenizers[gpu_id]

model_loader = ModelLoader()

# 任务队列管理
class TaskQueue:
    def __init__(self):
        self.high_queue = "high_tasks"
        self.low_queue = "low_tasks"
        # 初始化Redis计数器
        redis_client.set("high_count", 0)
        redis_client.set("low_count", 0)

    def add_task(self, task, task_id):
        priority = task.priority
        if priority == "high":
            # 使用事务确保原子性
            with redis_client.pipeline() as pipe:
                pipe.lpush(self.high_queue, json.dumps({"task": task.dict(), "id": task_id}))
                pipe.incr("high_count")  # 使用Redis存储计数器
                pipe.execute()
        else:
            with redis_client.pipeline() as pipe:
                pipe.lpush(self.low_queue, json.dumps({"task": task.dict(), "id": task_id}))
                pipe.incr("low_count")
                pipe.execute()

    def cleanup_low_priority(self):
        removed = 0
        # 使用Redis的ltrim实现原子清理
        while removed < MAX_LOW_CONCURRENT:
            task = redis_client.rpop(self.low_queue)
            if task:
                redis_client.decr("low_count")  # 更新Redis计数器
                removed += 1
            else:
                break
        return removed

task_queue = TaskQueue()

# 任务处理类
class TaskProcessor(Thread):
    def __init__(self):
        super().__init__()
        self.running = True

    def run(self):
        while self.running:
            try:
                # 优先处理高优先级任务
                task_data = redis_client.rpop(task_queue.high_queue)
                if task_data:
                    task_info = json.loads(task_data)
                    task = OpenAIRequest.parse_obj(task_info["task"])
                    task_id = task_info["id"]
                    self.process_task(task, task_id)
                else:
                    # 处理低优先级任务
                    task_data = redis_client.rpop(task_queue.low_queue)
                    if task_data:
                        task_info = json.loads(task_data)
                        task = OpenAIRequest.parse_obj(task_info["task"])
                        task_id = task_info["id"]
                        self.process_task(task, task_id)
            except Exception as e:
                logging.error(f"任务处理出错: {str(e)}")
            time.sleep(0.01)  # 减少睡眠时间

    def process_task(self, task, task_id):
        priority = task.priority
        try:
            # 检查并发限制时从Redis读取计数器
            current_high = int(redis_client.get("high_count") or 0)
            current_low = int(redis_client.get("low_count") or 0)
            if (task.priority == "high" and current_high >= MAX_HIGH_CONCURRENT or
                    task.priority == "low" and current_low >= MAX_LOW_CONCURRENT):
                raise HTTPException(status_code=429, detail="超过并发限制")
            # 检查GPU资源
            available_gpu = self.get_available_gpu()
            if not available_gpu:
                if priority == "high":
                    task_queue.cleanup_low_priority()
                    available_gpu = self.get_available_gpu()
            if not available_gpu:
                # 减少计数器后重新入队
                redis_client.decr(f"{task.priority}_count")
                task_queue.add_task(task, task_id)
                return

            # 获取模型和推理
            model, tokenizer = model_loader.get_model(available_gpu.id)
            inputs = tokenizer(task.prompt, return_tensors="pt").to(f'cuda:{available_gpu.id}')

            # 流式生成
            total_prompt_tokens = len(inputs.input_ids[0])
            total_completion_tokens = 0

            response = OpenAIResponse()
            for i in range(task.n):
                generated_output = ""
                generation_kwargs = {
                    "input_ids": inputs.input_ids,
                    "max_length": task.max_tokens + total_prompt_tokens,
                    "temperature": task.temperature,
                    "num_return_sequences": 1,
                    "do_sample": True,
                    "top_k": 50,
                    "top_p": 0.95,
                    "return_dict_in_generate": True,
                    "output_scores": True
                }

                outputs = model.generate(**generation_kwargs)
                sequences = outputs.sequences

                for sequence in sequences:
                    new_text = tokenizer.decode(sequence[len(inputs.input_ids[0]):], skip_special_tokens=True)
                    generated_output += new_text
                    completion_tokens = len(tokenizer.encode(generated_output)) - total_prompt_tokens
                    total_completion_tokens += completion_tokens

                    chunk_response = {
                        "id": response.id,
                        "object": response.object,
                        "created": response.created,
                        "model": response.model,
                        "choices": [
                            {
                                "text": new_text,
                                "index": i
                            }
                        ],
                        "usage": {
                            "prompt_tokens": total_prompt_tokens,
                            "completion_tokens": completion_tokens,
                            "total_tokens": total_prompt_tokens + completion_tokens
                        }
                    }

                    if task.stream:
                        # 将字典转换为JSON字符串后再发布到Redis
                        logging.info(f"Publishing to Redis: {json.dumps(chunk_response)}")
                        redis_client.publish(f"task_stream_{task_id}", json.dumps(chunk_response))
                    else:
                        response.choices.append(chunk_response["choices"][0])
                        response.usage["prompt_tokens"] = chunk_response["usage"]["prompt_tokens"]
                        response.usage["completion_tokens"] += chunk_response["usage"]["completion_tokens"]
                        response.usage["total_tokens"] += chunk_response["usage"]["total_tokens"]

            if not task.stream:
                # 将最终响应存储到Redis
                redis_client.setex(f"task_result_{task_id}", 3600, json.dumps(response.dict()))

            logging.info(f"用户 {task.user} 的任务处理成功")

            # 更新计数器
            redis_client.decr(f"{task.priority}_count")
        except Exception as e:
            logging.error(f"任务处理失败: {str(e)}")
            # 异常时也需减少计数器
            redis_client.decr(f"{task.priority}_count")  # 异常时减少计数器
            raise HTTPException(status_code=500, detail=str(e))

    def get_available_gpu(self):
        # 指定可用的GPU列表（假设0,1,2,7未被VLLM占用）
        allowed_gpus = [0]

        gpus = GPUtil.getGPUs()
        for gpu in gpus:
            if (gpu.id in allowed_gpus and
                    gpu.memoryUtil < GPU_MEMORY_THRESHOLD and
                    gpu.load < GPU_UTIL_THRESHOLD):
                return gpu
        return None

# FastAPI应用配置
app = FastAPI(title="DeepSeek 兼容 API", version="1.0.0")

# OpenAI兼容的认证中间件
async def api_key_auth(api_key: str = Header(..., description="API 密钥")):
    if api_key != "YOUR_API_KEY":  # 替换为实际验证逻辑
        raise HTTPException(status_code=401, detail="无效的 API 密钥")

# API端点
@app.post("/v1/completions", dependencies=[Depends(api_key_auth)])
async def create_completion(request: OpenAIRequest):
    # 从Redis获取实时计数器
    high_count = int(redis_client.get("high_count") or 0)
    low_count = int(redis_client.get("low_count") or 0)

    # 检查并发限制
    if (request.priority == "high" and high_count >= MAX_HIGH_CONCURRENT or
            request.priority == "low" and low_count >= MAX_LOW_CONCURRENT):
        raise HTTPException(
            status_code=429,
            detail="超过并发限制"
        )

    # 添加任务到队列
    task_id = f"task_{request.user}_{int(time.time())}"
    task_queue.add_task(request, task_id)
    return {"状态": "任务已排队", "task_id": task_id}

@app.get("/v1/tasks/stream", dependencies=[Depends(api_key_auth)], response_class=StreamingResponse)
async def stream_task_result(id: str = Query(..., description="任务ID")):
    async def event_generator():
        pubsub = redis_client.pubsub()
        pubsub.subscribe(f"task_stream_{id}")
        logging.info(f"订阅频道: task_stream_{id}")
        while True:
            message = pubsub.get_message(timeout=1)  # 设置 timeout 参数
            if message is not None and message['type'] == 'message':
                data = message['data']
                if isinstance(data, bytes):
                    data = data.decode('utf-8')
                logging.info(f"从Redis接收: {data}")
                yield f"data: {data}\n\n"  # 确保数据以正确的格式发送
            else:
                # 如果没有消息，等待一段时间再继续
                await asyncio.sleep(0.1)  # 减少睡眠时间

    return StreamingResponse(event_generator(), media_type="text/event-stream")

@app.get("/v1/tasks", response_model=OpenAIResponse, dependencies=[Depends(api_key_auth)])
async def get_task_result(id: str = Query(..., description="任务ID")):
    result = redis_client.get(f"task_result_{id}")
    if result is None:
        raise HTTPException(status_code=404, detail="任务结果未找到")
    return OpenAIResponse.parse_raw(result)

# 启动多个任务处理器
num_processors = 4  # 根据实际情况调整处理器数量
for _ in range(num_processors):
    task_processor = TaskProcessor()
    task_processor.start()

# GPU监控线程
class GPUMonitor(Thread):
    def __init__(self):
        super().__init__()
        self.running = True

    def run(self):
        while self.running:
            allowed_gpus = [0]

            gpus = GPUtil.getGPUs()
            for gpu in gpus:
                if (gpu.id in allowed_gpus):
                    logging.info(
                        f"GPU {gpu.id}: "
                        f"利用率 {gpu.load:.2f}, "
                        f"内存使用率 {gpu.memoryUtil * 100:.2f}% ({gpu.memoryUsed}/{gpu.memoryTotal}MB)"
                    )
            time.sleep(5)

gpu_monitor = GPUMonitor()
gpu_monitor.start()

if __name__ == "__main__":
    uvicorn.run(app, host="0.0.0.0", port=8000)



