import os
import torch
from diffusers import StableDiffusion3Pipeline
import gradio as gr
from transformers import MarianMTModel, MarianTokenizer, CLIPTokenizer
from io import BytesIO
from PIL import Image
import tempfile
import logging
import math

# 设置日志记录
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

# 设置环境变量
os.environ['PYTORCH_CUDA_ALLOC_CONF'] = 'expandable_segments:True'

# 自定义CSS以隐藏"通过API使用"和"使用Gradio构建"链接以及"How to use"部分
custom_css = """
/* 隐藏 "通过API使用" 按钮 */
button.show-api.svelte-1rjryqp {
    display: none !important;
}
footer > div.svelte-1rjryqp {
    display: none !important;
}
/* 隐藏 "使用 Gradio 构建" 链接 */
a.built-with.svelte-1rjryqp {
    display: none !important;
}
"""

# 加载预训练模型
device = "cuda" if torch.cuda.is_available() else "cpu"
pipe = StableDiffusion3Pipeline.from_pretrained(
    "/data_new/model/stable-diffusion-3-medium-diffusers",   # SD模型，可自行下载
    torch_dtype=torch.float16
)

pipe = pipe.to(device)
# 启用注意力切片
pipe.enable_attention_slicing(slice_size=2)

# 加载翻译模型和分词器
translation_model_name = '/data_new/model/opus-mt-zh-en'  # 翻译模型，可自行下载
translator = MarianMTModel.from_pretrained(translation_model_name)
tokenizer = MarianTokenizer.from_pretrained(translation_model_name)

# 加载CLIP tokenizer
clip_tokenizer = CLIPTokenizer.from_pretrained("/data_new/model/stable-diffusion-3-medium-diffusers", subfolder="tokenizer")

# 缓存翻译结果
translation_cache = {}

def translate_to_english(text):
    """将中文文本翻译成英文"""
    if text in translation_cache:
        return translation_cache[text]

    try:
        inputs = tokenizer(text, return_tensors="pt")
        translated = translator.generate(**inputs)
        translated_text = [tokenizer.decode(t, skip_special_tokens=True) for t in translated][0]
        translation_cache[text] = translated_text
        logger.info(f"Translated: {text} -> {translated_text}")
        return translated_text
    except Exception as e:
        logger.error(f"Translation failed: {e}")
        return ""

def check_prompt_length(prompt):
    """检查并截断提示词长度"""
    tokens = clip_tokenizer.tokenize(prompt)
    token_ids = clip_tokenizer.convert_tokens_to_ids(tokens)
    if len(token_ids) > 77:
        logger.warning(f"Prompt length exceeds the maximum allowed length (77 tokens). Truncating: {prompt}")
        token_ids = token_ids[:77]
    return clip_tokenizer.decode(token_ids)

def adjust_to_multiple_of_eight(dimension):
    """ 调整给定的尺寸为最接近的16的倍数 """
    return (dimension + 15) // 16 * 16  # 使用整数除法和加法来四舍五入到最近的16的倍数

# 创建宽度和高度的下拉选项（保证是16的倍数）
width_options = [str(i) for i in range(16, 2993, 16)]
height_options = [str(i) for i in range(16, 2993, 16)]

def generate_image(prompt, style, width, height, num_inference_steps=20, guidance_scale=5.0):
    """生成图像"""
    prompt_en = translate_to_english(prompt)
    if not prompt_en:
        return None

    full_prompt = f"{style} {prompt_en}"

    # 检查并截断提示词长度
    full_prompt = check_prompt_length(full_prompt)

    # 调整宽度和高度为16的倍数
    adjusted_width = adjust_to_multiple_of_eight(width)
    adjusted_height = adjust_to_multiple_of_eight(height)

    logger.info(f"Generating image with dimensions: {adjusted_width}x{adjusted_height}")
    logger.info(f"Full prompt: {full_prompt}")

    try:
        # 启用混合精度
        with torch.autocast(device_type=device,
                            dtype=torch.float16 if device == "cuda" else torch.float32), torch.no_grad():
            # 尝试不同的注意力切片大小，这里只是示范，具体数值需根据实际情况调整
            pipe.enable_attention_slicing(slice_size=2)  # 可以试着调整这个值

            # 生成图像
            output = pipe(
                prompt=full_prompt,
                negative_prompt="",
                num_inference_steps=num_inference_steps,
                guidance_scale=guidance_scale,
                width=int(adjusted_width),
                height=int(adjusted_height)
            )
            image = output.images[0]

        # 如果有其他可选的优化措施，也可以在这里加入

        # 将图像调整回用户指定的尺寸
        if (adjusted_width, adjusted_height) != (width, height):
            logger.info(f"Resizing image from {adjusted_width}x{adjusted_height} to {width}x{height}")
            image = image.resize((width, height), Image.Resampling.LANCZOS)

        # 释放不必要的张量
        torch.cuda.empty_cache()
        logger.info(f"Generated image for prompt: {full_prompt}")
        return image
    except Exception as e:
        logger.error(f"Image generation failed: {e}", exc_info=True)
        return None

def save_image(image, format, temp_dir):
    """将图像保存到临时文件并返回文件路径"""
    if image is None:
        return None

    # 确保 format 不为 None
    if format is None:
        format = "JPEG"

    # 创建临时文件
    with tempfile.NamedTemporaryFile(delete=False, dir=temp_dir, suffix=f'.{format.lower()}') as temp_file:
        if format.lower() == 'png':
            image.save(temp_file, format='PNG')
        elif format.lower() == 'webp':
            image.save(temp_file, format='WEBP')
        elif format.lower() == 'bmp':
            image.save(temp_file, format='BMP')
        elif format.lower() == 'tiff':
            image.save(temp_file, format='TIFF')
        elif format.lower() == 'gif':
            image.save(temp_file, format='GIF', save_all=True, loop=0)
        else:  # 默认为 JPEG
            image = image.convert("RGB")  # JPEG 不支持 alpha 通道
            image.save(temp_file, format='JPEG')

    logger.info(f"Saved image to: {temp_file.name}")
    return temp_file.name

def on_generate_click(prompt, style, width, height, num_inference_steps, guidance_scale):
    """生成图像按钮点击事件"""
    # 将宽度和高度转换为整数
    width, height = int(width), int(height)
    return generate_image(prompt, style, width, height, num_inference_steps, guidance_scale)

def on_download_click(image, format, temp_dir):
    """下载图像按钮点击事件"""
    if image is not None:
        file_path = save_image(image, format, temp_dir)
        if file_path:
            return file_path
    return None

# 创建 Gradio 界面
with gr.Blocks(title="文生图小工具", css=custom_css) as demo:
    with gr.Row():
        prompt_input = gr.Textbox(label="提示词", placeholder="输入一个文本提示...")
        style_dropdown = gr.Dropdown(
            ["写实", "卡通", "油画", "素描", "水彩", "抽象", "印象派", "立体派", "超现实主义", "像素艺术", "赛博朋克",
             "水墨画", "波普艺术", "哥特式", "极简主义", "新古典主义", "巴洛克", "洛可可", "表现主义", "未来主义",
             "达达主义",
             "后印象派", "野兽派", "点彩派", "浪漫主义", "立体现实主义", "涂鸦艺术", "复古", "极繁主义", "浮雕艺术",
             "浮世绘"],
            label="风格"
        )
        width_dropdown = gr.Dropdown(width_options, value="512", label="宽度")
        height_dropdown = gr.Dropdown(height_options, value="512", label="高度")
        inference_steps_slider = gr.Slider(1, 100, value=28, step=1, label="推理步数")
        guidance_scale_slider = gr.Slider(0, 10, value=5.0, step=0.5, label="引导系数")

    with gr.Row():
        generate_button = gr.Button("生成图像")
        output_image = gr.Image(type="pil", interactive=False)
        download_format_dropdown = gr.Dropdown(
            ["JPEG", "PNG", "WEBP", "BMP", "TIFF", "GIF"],  # 增加更多格式
            label="下载格式",
            value="JPEG"  # 设置默认值
        )
        download_button = gr.Button("下载图像")

    # 添加示例
    examples = [
        ["一只猫举着一块写着'你好世界'的牌子", "写实", "512", "512", 20, 5.0],
        ["美丽的海上日落", "油画", "768", "768", 28, 6.0],
        ["未来城市的夜景天际线", "卡通", "1024", "1024", 28, 6.0],
        ["抽象的艺术作品", "抽象", "512", "512", 28, 7.0],
        ["赛博朋克风格的城市", "赛博朋克", "768", "768", 20, 5.0],
        ["水墨画风格的山水", "水墨画", "1024", "1024", 28, 9.0]
    ]

    gr.Examples(examples, [prompt_input, style_dropdown, width_dropdown, height_dropdown, inference_steps_slider,
                           guidance_scale_slider])

    # 生成图像按钮点击事件
    generate_button.click(on_generate_click,
                          [prompt_input, style_dropdown, width_dropdown, height_dropdown, inference_steps_slider,
                           guidance_scale_slider],
                          output_image)

    # 下载图像按钮点击事件
    download_button.click(on_download_click, [output_image, download_format_dropdown], outputs=[gr.File()])

    # 自定义页脚
    with gr.Row():
        gr.HTML('<div style="text-align: center; padding: 10px;">版权所有 © 2024 文生图小工具公司 | 版本 1.0</div>')

# 启动 Gradio 界面并公开给外网
demo.launch(server_name='0.0.0.0', server_port=7861)  # 0.0.0.0 表示监听所有网络接口