import uvicorn
import GPUtil
import redis
import time
import json
import logging
from fastapi import FastAPI, HTTPException, Header, Depends
from pydantic import BaseModel, Field
from typing import Dict, Any
from threading import Thread, Lock
from modelscope import AutoModelForCausalLM, AutoTokenizer

# Redis配置（使用用户提供的参数）
redis_client = redis.Redis(
    host="117.72.102.95",
    port=19736,
    password="Aresenyang@1217",
    db=1,
    socket_timeout=10,
    health_check_interval=30
)

# 日志配置
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s [%(levelname)s] %(message)s',
    handlers=[
        logging.FileHandler("model_service.log"),
        logging.StreamHandler()
    ]
)

# OpenAI兼容参数定义
class OpenAIRequest(BaseModel):
    model: str = Field(..., description="模型名称（固定为deepseek-r1-1.5b）")
    prompt: str = Field(..., description="输入提示")
    temperature: float = Field(1.0, ge=0.0, le=2.0, description="采样温度")
    max_tokens: int = Field(16, ge=1, le=4096, description="最大生成令牌数")
    n: int = Field(1, ge=1, le=5, description="生成结果数量")
    user: str = Field(..., description="用户标识符")
    priority: str = Field("low", enum=["high", "low"], description="优先级")

class OpenAIResponse(BaseModel):
    id: str = "chatcmpl-12345"
    object: str = "text_completion"
    created: int = int(time.time())
    model: str = "deepseek-r1-1.5b"
    choices: list[Dict[str, Any]] = []
    usage: Dict[str, int] = {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}
    taskId: str = "userid-1234567890"

# GPU资源监控参数
GPU_UTIL_THRESHOLD = 0.95
GPU_MEMORY_THRESHOLD = 0.95
MAX_HIGH_CONCURRENT = 3
MAX_LOW_CONCURRENT = 2

model_path = "/home/llm/deepseek-r1-1.5b"
model_name = "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B"

# 模型加载（单例模式）
class ModelLoader:
    _instance = None
    models = {}
    tokenizers = {}
    lock = Lock()

    def __new__(cls):
        if not cls._instance:
            cls._instance = super().__new__(cls)
        return cls._instance

    def get_model(self, gpu_id):
        with self.lock:
            if gpu_id not in self.models:
                self.tokenizers[gpu_id] = AutoTokenizer.from_pretrained(model_name, cache_dir=model_path, local_files_only=True)
                self.models[gpu_id] = AutoModelForCausalLM.from_pretrained(model_name, cache_dir=model_path, local_files_only=True).to(f'cuda:{gpu_id}')
            return self.models[gpu_id], self.tokenizers[gpu_id]

model_loader = ModelLoader()

# 任务队列管理
class TaskQueue:
    def __init__(self):
        self.high_queue = "high_tasks"
        self.low_queue = "low_tasks"
        # 初始化Redis计数器
        redis_client.set("high_count", 0)
        redis_client.set("low_count", 0)

    def add_task(self, task, task_id):
        priority = task.priority
        if priority == "high":
            # 使用事务确保原子性
            with redis_client.pipeline() as pipe:
                pipe.lpush(self.high_queue, json.dumps({"task": task.dict(), "id": task_id}))
                pipe.incr("high_count")  # 使用Redis存储计数器
                pipe.execute()
        else:
            with redis_client.pipeline() as pipe:
                pipe.lpush(self.low_queue, json.dumps({"task": task.dict(), "id": task_id}))
                pipe.incr("low_count")
                pipe.execute()

    def cleanup_low_priority(self):
        removed = 0
        # 使用Redis的ltrim实现原子清理
        while removed < MAX_LOW_CONCURRENT:
            task = redis_client.rpop(self.low_queue)
            if task:
                redis_client.decr("low_count")  # 更新Redis计数器
                removed += 1
            else:
                break
        return removed

task_queue = TaskQueue()

# 任务处理类
class TaskProcessor(Thread):
    def __init__(self):
        super().__init__()
        self.running = True

    def run(self):
        while self.running:
            try:
                # 优先处理高优先级任务
                task_data = redis_client.rpop(task_queue.high_queue)
                if task_data:
                    task_info = json.loads(task_data)
                    task = OpenAIRequest.parse_obj(task_info["task"])
                    task_id = task_info["id"]
                    self.process_task(task, task_id)
                else:
                    # 处理低优先级任务
                    task_data = redis_client.rpop(task_queue.low_queue)
                    if task_data:
                        task_info = json.loads(task_data)
                        task = OpenAIRequest.parse_obj(task_info["task"])
                        task_id = task_info["id"]
                        self.process_task(task, task_id)
            except Exception as e:
                logging.error(f"Error in task processing: {str(e)}")
            time.sleep(0.1)

    def process_task(self, task, task_id):
        priority = task.priority
        try:
            # 检查并发限制时从Redis读取计数器
            current_high = int(redis_client.get("high_count") or 0)
            current_low = int(redis_client.get("low_count") or 0)
            if (task.priority == "high" and current_high >= MAX_HIGH_CONCURRENT or
                    task.priority == "low" and current_low >= MAX_LOW_CONCURRENT):
                raise HTTPException(status_code=429, detail="Exceed concurrent limit")
            # 检查GPU资源
            available_gpu = self.get_available_gpu()
            if not available_gpu:
                if priority == "high":
                    task_queue.cleanup_low_priority()
                    available_gpu = self.get_available_gpu()
            if not available_gpu:
                # 减少计数器后重新入队
                redis_client.decr(f"{task.priority}_count")
                task_queue.add_task(task, task_id)
                return

            # 获取模型和推理
            model, tokenizer = model_loader.get_model(available_gpu.id)
            inputs = tokenizer(task.prompt, return_tensors="pt").to(f'cuda:{available_gpu.id}')
            outputs = model.generate(
                inputs.input_ids,
                max_length=task.max_tokens,
                temperature=task.temperature,
                num_return_sequences=task.n
            )

            # 构造响应
            response = OpenAIResponse()
            response.choices = [
                {
                    "text": tokenizer.decode(output, skip_special_tokens=True),
                    "index": i
                }
                for i, output in enumerate(outputs)
            ]
            response.usage = {
                "prompt_tokens": len(inputs.input_ids[0]),
                "completion_tokens": len(outputs[0]),
                "total_tokens": len(inputs.input_ids[0]) + len(outputs[0])
            }

            # 将响应存储到Redis
            redis_client.setex(f"task_result_{task_id}", 3600, json.dumps(response.dict()))

            logging.info(f"Task for user {task.user} processed successfully")

            # 更新计数器
            redis_client.decr(f"{task.priority}_count")
        except Exception as e:
            logging.error(f"Task processing failed: {str(e)}")
            # 异常时也需减少计数器
            redis_client.decr(f"{task.priority}_count")  # 异常时减少计数器
            raise HTTPException(status_code=500, detail=str(e))

    def get_available_gpu(self):
        # 指定可用的GPU列表（假设0,1,2,7未被VLLM占用）
        allowed_gpus = [0]

        gpus = GPUtil.getGPUs()
        for gpu in gpus:
            if (gpu.id in allowed_gpus and
                    gpu.memoryUtil < GPU_MEMORY_THRESHOLD and
                    gpu.load < GPU_UTIL_THRESHOLD):
                return gpu
        return None

# FastAPI应用配置
app = FastAPI(title="DeepSeek Compatible API", version="1.0.0")

# OpenAI兼容的认证中间件
async def api_key_auth(api_key: str = Header(..., description="API密钥")):
    if api_key != "YOUR_API_KEY":  # 替换为实际验证逻辑
        raise HTTPException(status_code=401, detail="Invalid API key")

# API端点
@app.post("/v1/completions", response_model=OpenAIResponse, dependencies=[Depends(api_key_auth)])
async def create_completion(request: OpenAIRequest):
    # 从Redis获取实时计数器
    high_count = int(redis_client.get("high_count") or 0)
    low_count = int(redis_client.get("low_count") or 0)

    # 检查并发限制
    if (request.priority == "high" and high_count >= MAX_HIGH_CONCURRENT or
            request.priority == "low" and low_count >= MAX_LOW_CONCURRENT):
        raise HTTPException(
            status_code=429,
            detail="Exceed concurrent limit"
        )

    # 添加任务到队列
    task_id = f"{request.user}-{int(time.time())}"
    task_queue.add_task(request, task_id)
    response = OpenAIResponse()
    response.taskId = task_id

    return response

@app.get("/v1/tasks/{task_id}", response_model=OpenAIResponse, dependencies=[Depends(api_key_auth)])
async def get_task_result(task_id: str):
    result = redis_client.get(f"task_result_{task_id}")
    if result is None:
        raise HTTPException(status_code=404, detail="Task result not found")
    return OpenAIResponse.parse_raw(result)

# 启动任务处理器
task_processor = TaskProcessor()
task_processor.start()

# GPU监控线程
class GPUMonitor(Thread):
    def __init__(self):
        super().__init__()
        self.running = True

    def run(self):
        while self.running:
            allowed_gpus = [0]

            gpus = GPUtil.getGPUs()
            for gpu in gpus:
                if (gpu.id in allowed_gpus):
                    logging.info(
                        f"GPU {gpu.id}: "
                        f"Utilization {gpu.load:.2f}, "
                        f"Memory {gpu.memoryUsed}/{gpu.memoryTotal}MB"
                    )
            time.sleep(5)

gpu_monitor = GPUMonitor()
gpu_monitor.start()

if __name__ == "__main__":
    uvicorn.run(app, host="0.0.0.0", port=8000)



