from fastapi import FastAPI, Request
from transformers import AutoTokenizer, AutoModelForCausalLM, Qwen2_5_VLForConditionalGeneration, AutoProcessor
import uvicorn
import json
import datetime
import torch
from pydantic import BaseModel
from qwen_vl_utils import process_vision_info

import os
import uuid
from fastapi import File, UploadFile
from fastapi.responses import JSONResponse
from fastapi.staticfiles import StaticFiles
import shutil
from fastapi.middleware.cors import CORSMiddleware

app = FastAPI()
app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],  # 允许所有来源
    allow_credentials=True,
    allow_methods=["*"],   # 允许所有HTTP方法
    allow_headers=["*"]    # 允许所有请求头
)
# 获取根目录路径
rootpath = os.getcwd()

# 静态文件夹映射
app.mount("/data", StaticFiles(directory="data"), name="data")

# 上传文件接口
@app.post("/upload")
async def upload_file(files: list[UploadFile] = File(...)):
    try:
        # 存储上传的文件路径
        resp = []

        for file in files:
            # 获取文件的后缀名
            suffix = os.path.splitext(file.filename)[1]
            print(f"suffix={suffix}")
            
            # 生成唯一的ID（可以使用 UUID 或自定义的 snowflake 实现）
            file_id = str(uuid.uuid4())
            print(f"id={file_id}")

            # 构造文件路径
            filepath = os.path.join(rootpath, "data", f"{file_id}{suffix}")
            file_url = f"file://{rootpath}/data/{file_id}{suffix}"

            # 保存文件
            with open(filepath, "wb") as buffer:
                shutil.copyfileobj(file.file, buffer)
            
            # 添加文件的路径到响应列表
            resp.append(file_url)

        # 返回成功响应
        return JSONResponse(content={
            "code": 2000,
            "data": "文件上传成功",
            "path": resp
        })

    except Exception as e:
        # 错误处理
        return JSONResponse(content={
            "code": 4001,
            "data": f"文件上传失败: {str(e)}"
        }, status_code=400)

# 设置设备参数
DEVICE = "cuda"  # 使用CUDA
DEVICE_ID = "0"  # CUDA设备ID，如果未设置则为空
CUDA_DEVICE = f"{DEVICE}:{DEVICE_ID}" if DEVICE_ID else DEVICE  # 组合CUDA设备信息

# 清理GPU内存函数
def torch_gc():
    if torch.cuda.is_available():  # 检查是否可用CUDA
        with torch.cuda.device(CUDA_DEVICE):  # 指定CUDA设备
            torch.cuda.empty_cache()  # 清空CUDA缓存
            torch.cuda.ipc_collect()  # 收集CUDA内存碎片


class DetectAbnormalAction(BaseModel):
    question: str
    path: str

# 处理图像中是否存在风险行为
@app.post("/question/video")
async def detectionVideo(request: DetectAbnormalAction):
    global model, tokenizer
    # 构建prompt
    messages = [{
        "role": "user",
        "content": [
            {
                "type": "video",
                "video": request.path,
            },
            {
                "type": "text",
                "text": request.question,
            }
        ]
    }]

    text = tokenizer.apply_chat_template(
        messages, tokenize=False, add_generation_prompt=True
    )
    
    image_inputs, video_inputs, video_kwargs  = process_vision_info(messages, return_video_kwargs=True)
    inputs = tokenizer(
        text=[text],
        images=image_inputs,
        videos=video_inputs,
        padding=True,
        return_tensors="pt",
        **video_kwargs,
    )
    inputs = inputs.to("cuda")

    # Inference
    generated_ids = model.generate(**inputs, max_new_tokens=128)
    generated_ids_trimmed = [
        out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
    ]
    output_text = tokenizer.batch_decode(
        generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
    )
    resp = {
        "code": 2000,
        "data": output_text,
    }
    torch_gc()
    return resp

# 处理图像中是否存在风险行为
@app.post("/question/image")
async def detectionImage(request: DetectAbnormalAction):
    global model, tokenizer
    # 构建prompt
    messages = [{
        "role": "user",
        "content": [
            {
                "type": "image",
                "image": request.path,
            },
            {
                "type": "text",
                "text": request.question,
            }
        ]
    }]

    text = tokenizer.apply_chat_template(
        messages, tokenize=False, add_generation_prompt=True
    )
    
    image_inputs, video_inputs = process_vision_info(messages)
    inputs = tokenizer(
        text=[text],
        images=image_inputs,
        videos=video_inputs,
        padding=True,
        return_tensors="pt",
    )
    inputs = inputs.to("cuda")

    # Inference
    generated_ids = model.generate(**inputs, max_new_tokens=128)
    generated_ids_trimmed = [
        out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
    ]
    output_text = tokenizer.batch_decode(
        generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
    )
    resp = {
        "code": 2000,
        "data": output_text,
    }
    torch_gc()
    return resp

# 主函数入口
if __name__ == '__main__':
    # 加载预训练的分词器和模型
    model_name_or_path = '../LLaMA-Factory/saves/Qwen2.5-VL-7B-Instruct/lora/new-53'
    model = Qwen2_5_VLForConditionalGeneration.from_pretrained(
        model_name_or_path, torch_dtype="auto", device_map="auto"
    )
    tokenizer = AutoProcessor.from_pretrained(model_name_or_path)
    # tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, use_fast=False)
    # model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto", torch_dtype=torch.bfloat16)

    # 启动FastAPI应用
    # 用6006端口可以将autodl的端口映射到本地，从而在本地使用api
    uvicorn.run(app, host='0.0.0.0', port=6006, workers=1)  # 在指定端口和主机上启动应用	
