from fastapi import FastAPI, HTTPException, BackgroundTasks, Request
from fastapi.responses import JSONResponse, FileResponse
from pydantic import BaseModel
import time
import os
import uuid
from scripts.my_sample import load_pipe, infer
from constant import port
import gc
import psutil  # 引入psutil库

# Initialize the model pipeline
load_pipe()

app = FastAPI()

# Dictionary to store task information
tasks = {}

# Directory to save generated images
output_base_dir = 'output_images'
if not os.path.exists(output_base_dir):
    os.makedirs(output_base_dir)

# 获取当前进程
process = psutil.Process()

class InferenceRequest(BaseModel):
    prompt: str
    width: int = 1024
    height: int = 1024
    num_inference_steps: int = 32
    guidance_scale: float = 5.0
    num_images_per_prompt: int = 1
    seed: int = 90
    task_id: str = None

def run_inference(task_id, prompt, width, height, num_inference_steps, guidance_scale, num_images_per_prompt, seed):
    # 获取infer前的内存占用
    memory_before = process.memory_info().rss / 1024 / 1024  # 转换为MB
    tasks[task_id]['start_time'] = time.time()
    output_dir, images = infer(
        prompt,
        width,
        height,
        num_inference_steps,
        guidance_scale,
        num_images_per_prompt,
        seed,
    )
    memory_after1 = process.memory_info().rss / 1024 / 1024  # 转换为MB
    tasks[task_id]['output_dir'] = output_base_dir
    for i in range(len(images)):
        img = images[i]
        img.save(os.path.join(output_base_dir, f'{task_id}_{i}.jpg'))
    tasks[task_id]['output_path'] = [os.path.join(output_base_dir, f'{task_id}_{i}.jpg') for i in range(num_images_per_prompt)]
    tasks[task_id]['status'] = 'completed'
    tasks[task_id]['end_time'] = time.time()
    tasks[task_id]['duration'] = round(tasks[task_id]['end_time'] - tasks[task_id]['start_time'], 2)
    gc.collect()
    memory_after = process.memory_info().rss / 1024 / 1024  # 转换为MB

    print(f'推理前内存占用: {memory_before:.2f} MB')
    print(f'推理后内存占用: {memory_after1:.2f} MB')
    print(f'推理后内存占用: {memory_after:.2f} MB')
    print(f'内存增加: {memory_after - memory_before:.2f} MB')

@app.post("/start_inference")
async def start_inference(request: InferenceRequest, background_tasks: BackgroundTasks):
    prompt = request.prompt
    width = request.width
    height = request.height
    num_inference_steps = request.num_inference_steps
    guidance_scale = request.guidance_scale
    num_images_per_prompt = request.num_images_per_prompt
    seed = request.seed

    if not prompt:
        raise HTTPException(status_code=400, detail="Prompt is required")

    task_id = str(uuid.uuid4())
    task_id = request.task_id or task_id
    tasks[task_id] = {'status': 'running'}

    background_tasks.add_task(run_inference, task_id, prompt, width, height, num_inference_steps, guidance_scale, num_images_per_prompt, seed)

    return {"task_id": task_id}

@app.get("/get_image_info/{task_id}")
async def get_image_info(task_id: str):
    task = tasks.get(task_id)
    if not task:
        raise HTTPException(status_code=404, detail="Task ID not found")

    return task

@app.get("/get_image/{task_id}/{image_i}")
async def get_image(task_id: str, image_i: str):
    task = tasks.get(task_id)
    if not task:
        raise HTTPException(status_code=404, detail="Task ID not found")

    if task['status'] != 'completed':
        return JSONResponse(content={"status": task['status']}, status_code=202)

    output_dir = task['output_dir']
    image_path = os.path.join(output_dir, f'{task_id}_{image_i}.jpg')
    if not os.path.exists(image_path):
        raise HTTPException(status_code=404, detail="No images found")

    return FileResponse(image_path)

if __name__ == '__main__':
    import uvicorn
    uvicorn.run(app, host='0.0.0.0', port=port)