import torch
from diffusers import StableDiffusionPipeline
import logging
import os
import re
from datetime import datetime
from config import OUTPUT_DIR, MODEL_CACHE_DIR

# 配置日志
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)


class LocalModel:
    def __init__(self, model_name="runwayml/stable-diffusion-v1-5", device=None):
        """
        初始化本地模型

        参数:
            model_name: HuggingFace模型名称
            device: 使用的设备 (cuda/cpu)
        """
        self.model_name = model_name
        self.device = device or ("cuda" if torch.cuda.is_available() else "cpu")
        self.pipeline = None
        self.model_loaded = False

        logger.info(f"使用设备: {self.device}")
        logger.info(f"加载模型: {model_name}")

    def load_model(self):
        """加载Stable Diffusion模型"""
        if self.model_loaded:
            return True

        try:
            # 使用 Stable Diffusion 管道
            self.pipeline = StableDiffusionPipeline.from_pretrained(
                self.model_name,
                torch_dtype=torch.float16 if self.device == "cuda" else torch.float32,
                cache_dir=MODEL_CACHE_DIR,
                safety_checker=None,
                requires_safety_checker=False
            )

            self.pipeline = self.pipeline.to(self.device)

            # 优化性能
            if self.device == "cuda":
                self.pipeline.enable_attention_slicing()
                if hasattr(self.pipeline, 'enable_xformers_memory_efficient_attention'):
                    self.pipeline.enable_xformers_memory_efficient_attention()

            self.model_loaded = True
            logger.info("Stable Diffusion模型加载成功")
            return True

        except Exception as e:
            logger.error(f"Stable Diffusion模型加载失败: {str(e)}")
            return False

    def is_english(self, text):
        """
        检查文本是否为英文

        参数:
            text: 输入文本

        返回:
            bool: 是否为英文
        """
        # 简单的英文检测：主要包含ASCII字符
        english_ratio = sum(1 for c in text if ord(c) < 128) / len(text) if text else 0
        return english_ratio > 0.7

    def optimize_prompt(self, prompt):
        """
        优化提示词，如果是英文则添加适量质量标签

        参数:
            prompt: 原始提示词

        返回:
            优化后的提示词
        """
        if self.is_english(prompt):
            # 英文提示词：只在开头添加少量质量标签，避免覆盖主题
            quality_tags = ["high quality", "detailed"]

            # 检查是否已经包含质量标签
            existing_tags = ["high quality", "detailed", "4k", "8k", "masterpiece", "best quality"]
            has_quality_tag = any(tag in prompt.lower() for tag in existing_tags)

            if not has_quality_tag:
                optimized_prompt = f"{', '.join(quality_tags)}, {prompt}"
            else:
                optimized_prompt = prompt

            logger.info(f"使用英文提示词: {optimized_prompt}")
            return optimized_prompt
        else:
            # 中文提示词：保持原样（如果需要翻译可以在这里添加）
            logger.info(f"使用中文提示词: {prompt}")
            return prompt

    def generate_image(self, prompt, negative_prompt=None, num_inference_steps=25, guidance_scale=7.5):
        """
        生成图像

        参数:
            prompt: 提示词（支持中英文）
            negative_prompt: 负面提示词
            num_inference_steps: 推理步数
            guidance_scale: 引导尺度

        返回:
            生成的图像路径
        """
        if not self.model_loaded:
            if not self.load_model():
                return None

        try:
            # 优化提示词
            final_prompt = self.optimize_prompt(prompt)

            logger.info(f"最终提示词: {final_prompt}")

            # 设置负面提示词
            if negative_prompt is None:
                negative_prompt = (
                    "low quality, worst quality, bad anatomy, bad hands, text, error, "
                    "missing fingers, extra digit, fewer digits, cropped, worst quality, "
                    "low quality, normal quality, jpeg artifacts, signature, watermark, "
                    "username, blurry, bad feet"
                )

            # 生成图像
            result = self.pipeline(
                prompt=final_prompt,
                negative_prompt=negative_prompt,
                num_inference_steps=num_inference_steps,
                guidance_scale=guidance_scale,
                width=512,
                height=512
            )

            # 保存图像
            image = result.images[0]
            timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
            filename = f"generated_image_{timestamp}.png"
            image_path = os.path.join(OUTPUT_DIR, filename)

            image.save(image_path)
            logger.info(f"图像已保存: {image_path}")

            # 保存提示词信息
            info_file = os.path.join(OUTPUT_DIR, f"prompt_info_{timestamp}.txt")
            with open(info_file, 'w', encoding='utf-8') as f:
                f.write(f"原始提示词: {prompt}\n")
                f.write(f"最终提示词: {final_prompt}\n")
                f.write(f"负面提示词: {negative_prompt}\n")
                f.write(f"是否为英文: {self.is_english(prompt)}\n")

            return image_path

        except Exception as e:
            logger.error(f"图像生成失败: {str(e)}")
            return None

    def generate_variations(self, prompt, num_variations=4, **kwargs):
        """
        生成多个变体

        参数:
            prompt: 文本提示词
            num_variations: 变体数量
            **kwargs: 其他生成参数

        返回:
            生成的图像路径列表
        """
        image_paths = []

        for i in range(num_variations):
            logger.info(f"生成变体 {i + 1}/{num_variations}")
            image_path = self.generate_image(prompt, **kwargs)
            if image_path:
                image_paths.append(image_path)

        return image_paths