from modelscope import DiffusionPipeline, FlowMatchEulerDiscreteScheduler, snapshot_download
import torch
import math
import os
from pathlib import Path
from datetime import datetime
import base64
from io import BytesIO
from typing import Optional, List
from http.server import BaseHTTPRequestHandler, HTTPServer
import json
import urllib.parse
import time

# 设置HF_ENDPOINT使用国内镜像加速模型下载
os.environ["HF_ENDPOINT"] = "https://hf-mirror.com"

# 减少显存碎片化
os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True"

# 启用推理模式，减少显存占用
torch.inference_mode()

# 图像生成请求模型
class ImageGenerationRequest:
    def __init__(self, prompt: str, negative_prompt: str = "", width: int = 1024, height: int = 1024, 
                 num_inference_steps: int = 8, seed: int = 0):
        self.prompt = prompt
        self.negative_prompt = negative_prompt
        self.width = width
        self.height = height
        self.num_inference_steps = num_inference_steps
        self.seed = seed

# 图像生成响应模型
class ImageGenerationResponse:
    def __init__(self, images: List[str], prompt: str):
        self.images = images
        self.prompt = prompt

# 初始化模型
def initialize_model():
    print("=" * 50)
    print("开始初始化模型...")
    print("=" * 50)
    
    scheduler_config = {
        'base_image_seq_len': 256,
        'base_shift': math.log(3),
        'invert_sigmas': False,
        'max_image_seq_len': 8192,
        'max_shift': math.log(3),
        'num_train_timesteps': 1000,
        'shift': 1.0,
        'shift_terminal': None,
        'stochastic_sampling': False,
        'time_shift_type': 'exponential',
        'use_beta_sigmas': False,
        'use_dynamic_shifting': True,
        'use_exponential_sigmas': False,
        'use_karras_sigmas': False,
    }

    print("正在配置调度器...")
    scheduler = FlowMatchEulerDiscreteScheduler.from_config(scheduler_config)
    print("调度器配置完成")

    # 检测可用GPU数量
    num_gpus = torch.cuda.device_count()
    print(f"检测到 {num_gpus} 个GPU设备")

    # 根据项目规范，对于DiffusionPipeline模型，我们使用max_memory参数进行显存分配
    # 而不是手动指定每层的设备映射
    if num_gpus > 1:
        print("检测到多个GPU，正在进行显存分配...")
        # 获取每个GPU的显存信息并计算可分配的显存量
        max_memory = {}
        for i in range(num_gpus):
            free_mem, total_mem = torch.cuda.mem_get_info(i)
            # 按70%的空闲显存计算分配量，同时确保不超过22GB
            allocated_mem = min(int(free_mem * 0.7), 22 * 1024**3, free_mem)
            max_memory[i] = allocated_mem
            print(f"GPU {i}: 分配 {(allocated_mem / 1024**3):.2f} GB 显存")
        
        # 加载模型并指定显存分配
        print("正在加载模型到多个GPU...")
        pipe = DiffusionPipeline.from_pretrained(
            'Qwen/Qwen-Image',
            scheduler=scheduler,
            torch_dtype=torch.bfloat16,
            max_memory=max_memory,  # 为每个GPU分配显存
        )
        print("多GPU模型加载完成")
    else:
        # 单GPU情况
        print("检测到单个GPU，正在加载模型...")
        pipe = DiffusionPipeline.from_pretrained(
            'Qwen/Qwen-Image',
            scheduler=scheduler,
            torch_dtype=torch.bfloat16,
        )
        pipe = pipe.to("cuda")
        print("单GPU模型加载完成")

    print(f"模型已分配到{num_gpus}个GPU设备上")
    print("=" * 50)
    print("模型初始化完成")
    print("=" * 50)
    return pipe

# 提前下载LoRA权重
def download_lora_weights():
    print("=" * 50)
    print("开始下载LoRA权重...")
    print("=" * 50)
    # 使用ModelScope的snapshot_download下载LoRA权重
    model_dir = snapshot_download('lightx2v/Qwen-Image-Lightning')
    print(f"LoRA权重已下载到: {model_dir}")
    
    # 查找.pt或.safetensors文件
    lora_files = list(Path(model_dir).glob("*.safetensors")) + list(Path(model_dir).glob("*.pt"))
    if not lora_files:
        raise FileNotFoundError("在下载的LoRA权重目录中未找到.safetensors或.pt文件")
    
    lora_file_path = lora_files[0]  # 使用找到的第一个文件
    print(f"使用LoRA文件: {lora_file_path}")
    print("=" * 50)
    print("LoRA权重下载完成")
    print("=" * 50)
    return str(lora_file_path)

# 图像生成函数
def generate_image(request: ImageGenerationRequest, pipe):
    """生成图像"""
    print("=" * 50)
    print("开始生成图像...")
    print("=" * 50)
    
    print(f"输入参数:")
    print(f"  - 提示词: {request.prompt}")
    print(f"  - 负面提示词: {request.negative_prompt}")
    print(f"  - 图像尺寸: {request.width}x{request.height}")
    print(f"  - 推理步数: {request.num_inference_steps}")
    print(f"  - 随机种子: {request.seed}")
    
    start_time = time.time()
    print("开始图像生成过程...")
    
    # 定义回调函数以显示进度
    def progress_callback(step: int, timestep: int, latents: torch.Tensor):
        progress = (step + 1) / request.num_inference_steps * 100
        print(f"生成进度: {progress:.1f}% (步骤 {step + 1}/{request.num_inference_steps})")
    
    print("正在调用模型生成图像...")
    image = pipe(
        prompt=request.prompt,
        negative_prompt=request.negative_prompt,
        width=request.width,
        height=request.height,
        num_inference_steps=request.num_inference_steps,
        true_cfg_scale=1.0,
        generator=torch.manual_seed(request.seed),
        callback=progress_callback,
        callback_steps=1
    ).images[0]
    
    generation_time = time.time() - start_time
    print(f"图像生成完成，耗时: {generation_time:.2f} 秒")
    
    print("正在将图像转换为base64编码...")
    # 将图像转换为base64编码
    buffered = BytesIO()
    image.save(buffered, format="PNG")
    img_str = base64.b64encode(buffered.getvalue()).decode()
    print("图像转换完成")
    
    print("=" * 50)
    print("图像生成流程结束")
    print("=" * 50)
    
    return img_str

# HTTP请求处理器
class ImageGenerationHandler(BaseHTTPRequestHandler):
    pipe = None
    
    def do_POST(self):
        if self.path == "/v1/images/generations":
            request_start_time = time.time()
            print("=" * 60)
            print("收到新的图像生成请求")
            print("=" * 60)
            
            try:
                # 获取请求内容长度
                content_length = int(self.headers['Content-Length'])
                print(f"请求内容长度: {content_length} 字节")
                
                # 读取请求体
                post_data = self.rfile.read(content_length)
                print("请求数据接收完成")
                
                # 解析JSON数据
                data = json.loads(post_data.decode('utf-8'))
                print("请求数据解析完成")
                
                print("请求参数:")
                for key, value in data.items():
                    print(f"  - {key}: {value}")
                
                # 创建请求对象
                request = ImageGenerationRequest(
                    prompt=data.get('prompt', ''),
                    negative_prompt=data.get('negative_prompt', ''),
                    width=data.get('width', 1024),
                    height=data.get('height', 1024),
                    num_inference_steps=data.get('num_inference_steps', 8),
                    seed=data.get('seed', 0)
                )
                
                print("开始处理图像生成请求...")
                # 生成图像
                image_data = generate_image(request, self.pipe)
                print("图像生成完成")
                
                # 构造响应
                response_data = {
                    "images": [image_data],
                    "prompt": request.prompt
                }
                
                request_end_time = time.time()
                total_time = request_end_time - request_start_time
                print(f"请求处理完成，总耗时: {total_time:.2f} 秒")
                print("响应数据已准备就绪")
                print("=" * 60)
                print("请求处理流程结束")
                print("=" * 60)
                
                # 发送成功响应
                self.send_response(200)
                self.send_header('Content-type', 'application/json')
                self.send_header('Access-Control-Allow-Origin', '*')
                self.end_headers()
                self.wfile.write(json.dumps(response_data).encode('utf-8'))
                
            except Exception as e:
                error_end_time = time.time()
                total_error_time = error_end_time - request_start_time
                print(f"请求处理出错，总耗时: {total_error_time:.2f} 秒")
                print(f"错误信息: {str(e)}")
                print("=" * 60)
                print("请求处理流程结束（出错）")
                print("=" * 60)
                
                # 发送错误响应
                self.send_response(500)
                self.send_header('Content-type', 'application/json')
                self.end_headers()
                error_response = {"error": str(e)}
                self.wfile.write(json.dumps(error_response).encode('utf-8'))
        else:
            # 未找到路径
            self.send_response(404)
            self.end_headers()
    
    def do_OPTIONS(self):
        # 处理CORS预检请求
        self.send_response(200)
        self.send_header('Access-Control-Allow-Origin', '*')
        self.send_header('Access-Control-Allow-Methods', 'POST, OPTIONS')
        self.send_header('Access-Control-Allow-Headers', 'Content-Type')
        self.end_headers()

# 启动API服务
def run_server(pipe, port=8800):
    # 设置处理器的pipe实例
    ImageGenerationHandler.pipe = pipe
    
    # 创建服务器
    server_address = ('localhost', port)
    httpd = HTTPServer(server_address, ImageGenerationHandler)
    print(f"Starting server on port {port}...")
    print(f"OpenAI API endpoint: http://localhost:{port}/v1/images/generations")
    print("Press Ctrl+C to stop the server")
    
    try:
        httpd.serve_forever()
    except KeyboardInterrupt:
        print("\nServer stopped")
        httpd.server_close()

if __name__ == "__main__":
    # 初始化模型
    pipe = initialize_model()
    
    # 下载LoRA权重
    lora_file_path = download_lora_weights()
    print("正在加载LoRA权重...")
    pipe.load_lora_weights(lora_file_path)
    print("LoRA权重加载完成")
    
    # 启动API服务
    run_server(pipe, port=8800)