import os
import time
import torch
from diffusers import StableDiffusionPipeline, DPMSolverMultistepScheduler
import numpy as np
from PIL import Image

def log(message):
    """打印带时间戳的日志"""
    from datetime import datetime
    timestamp = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
    print(f"[{timestamp}] {message}")

def main():
    # 记录开始时间
    start_time = time.time()
    log("开始处理...")

    try:
        # 设置模型路径
        model_base_path = 'E:/workspace/llm/text2image/models'

        # 直接使用基础模型，不加载LoRA
        log("加载基础模型...")
        base_model_path = os.path.join(model_base_path, 'AI-ModelScope/stable-diffusion-v1-5')

        # 设置设备
        device = "cuda" if torch.cuda.is_available() else "cpu"
        log(f"使用设备: {device}")

        # 加载模型
        pipe = StableDiffusionPipeline.from_pretrained(
            base_model_path,
            torch_dtype=torch.float16 if device == "cuda" else torch.float32,
            safety_checker=None,
            requires_safety_checker=False
        )

        # 使用更快的调度器
        pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
        pipe = pipe.to(device)

        # 启用内存优化
        if device == "cuda":
            log("启用内存优化...")
            pipe.enable_attention_slicing()
            pipe.enable_vae_slicing()

        # 生成图像
        log("生成图像中...")
        prompt = "A beautiful girl in the sunshine, high quality, detailed, 8k, professional photography"
        negative_prompt = "low quality, blurry, worst quality, deformed, ugly, bad anatomy"

        # 使用torch.inference_mode()以节省内存
        with torch.inference_mode():
            result = pipe(
                prompt=prompt,
                negative_prompt=negative_prompt,
                num_inference_steps=30,
                guidance_scale=7.5,
            )

        # 直接保存PIL图像，避免OpenCV转换
        output_path = "result11.png"
        result.images[0].save(output_path)
        log(f"图像已保存到: {output_path}")

        # 清理资源
        del pipe
        if torch.cuda.is_available():
            torch.cuda.empty_cache()

        # 计算总时间
        elapsed_time = time.time() - start_time
        log(f"处理完成! 总耗时: {elapsed_time:.2f}秒")

    except Exception as e:
        log(f"发生错误: {str(e)}")
        import traceback
        traceback.print_exc()

if __name__ == "__main__":
    main()