import gradio as gr
from Core.comfy_client import ComfyUIClient
from Core.baidu_translate import baidu_translate
import json
import os
import time
import shutil


class TextToImageInterface:
    def __init__(self):
        self.client = ComfyUIClient()
        # 目标工作流：FLUX1_text_to_image.json
        self.workflow_path = os.path.join(
            os.path.dirname(__file__), "..", "Comfyui", "work_flow", "FLUX1_text_to_image.json"
        )

        # 目录与模型文件列表
        from config.config import get_unet_dir_path, get_clip_dir_path
        self.unet_dir = get_unet_dir_path()
        self.clip_dir = get_clip_dir_path()

        self.unet_files = []
        if os.path.exists(self.unet_dir):
            for f in os.listdir(self.unet_dir):
                if f.lower().endswith(('.safetensors', '.ckpt')):
                    self.unet_files.append(f)

        self.clip_files = []
        if os.path.exists(self.clip_dir):
            for f in os.listdir(self.clip_dir):
                if f.lower().endswith(('.safetensors', '.ckpt')):
                    self.clip_files.append(f)

        # 默认模型值
        self.default_unet = "flux1-dev-fp8.safetensors"
        self.default_clip1 = "t5xxl_fp8_e4m3fn.safetensors"

        # 如果默认值不在列表中，而又存在文件列表，则使用第一个
        if self.unet_files and self.default_unet not in self.unet_files:
            self.default_unet = self.unet_files[0]
        if self.clip_files and self.default_clip1 not in self.clip_files:
            self.default_clip1 = self.clip_files[0]

        # 临时目录
        self.temp_dir = os.path.join(os.path.dirname(__file__), "..", "temp")
        os.makedirs(self.temp_dir, exist_ok=True)

    def load_workflow(self):
        try:
            with open(self.workflow_path, "r", encoding="utf-8") as f:
                return json.load(f)
        except Exception as e:
            print(f"Error loading workflow: {e}")
            return None

    def generate_image(self, prompt_text, unet_name, clip_name1, width, height, steps, denoise):
        try:
            workflow = self.load_workflow()
            if not workflow:
                return None, "生成失败：无法加载工作流"

            # 设置UNET（节点20）
            if "20" in workflow and "inputs" in workflow["20"]:
                workflow["20"]["inputs"]["unet_name"] = unet_name

            # 设置双CLIP加载器（节点17）clip_name1
            if "17" in workflow and "inputs" in workflow["17"]:
                workflow["17"]["inputs"]["clip_name1"] = clip_name1

            # 设置空Latent（节点24）宽高
            if "24" in workflow and "inputs" in workflow["24"]:
                workflow["24"]["inputs"]["width"] = int(width)
                workflow["24"]["inputs"]["height"] = int(height)

            # 设置文本（节点19）
            if "19" in workflow and "inputs" in workflow["19"]:
                workflow["19"]["inputs"]["text"] = prompt_text or ""

            # 设置K采样器（节点41）步数与降噪
            if "41" in workflow and "inputs" in workflow["41"]:
                workflow["41"]["inputs"]["steps"] = int(steps)
                workflow["41"]["inputs"]["denoise"] = float(denoise)
                # 随机种子
                workflow["41"]["inputs"]["seed"] = int.from_bytes(os.urandom(8), "big")

            # 提交生成请求
            response = self.client.post_prompt(workflow)
            prompt_id = response.get("prompt_id")
            if not prompt_id:
                return None, "生成失败：未返回prompt_id"

            # 轮询机制等待图片生成
            max_retries = 40
            retry_interval = 2
            for _ in range(max_retries):
                history = self.client.get_history(prompt_id)
                if prompt_id in history and "outputs" in history[prompt_id]:
                    output = history[prompt_id]["outputs"]
                    for node_id in output:
                        if "images" in output[node_id]:
                            for image in output[node_id]["images"]:
                                image_bytes = self.client.get_image(image["filename"], image.get("subfolder"), image.get("type"))
                                result_file = os.path.join(self.temp_dir, f"flux_t2i_{prompt_id}.png")
                                with open(result_file, "wb") as f:
                                    f.write(image_bytes)
                                # 同步保存一份到项目内的 output_images 目录
                                try:
                                    from config.config import get_local_output_images_path
                                    out_dir = get_local_output_images_path()
                                    os.makedirs(out_dir, exist_ok=True)
                                    dest_file = os.path.join(out_dir, os.path.basename(result_file))
                                    shutil.copy2(result_file, dest_file)
                                except Exception as copy_err:
                                    print(f"拷贝到output_images失败: {copy_err}")
                                return os.path.abspath(result_file), "生成成功"
                time.sleep(retry_interval)

            return None, "生成失败：超时或无输出"
        except Exception as e:
            print(f"Error generating image: {str(e)}")
            return None, f"生成失败：{str(e)}"


def Tab_text_to_image():
    interface = TextToImageInterface()

    # 下拉选项
    unet_choices = interface.unet_files
    default_unet = interface.default_unet if interface.default_unet in unet_choices else (unet_choices[0] if unet_choices else "flux1-dev-fp8.safetensors")

    clip_choices = interface.clip_files
    default_clip1 = interface.default_clip1 if interface.default_clip1 in clip_choices else (clip_choices[0] if clip_choices else "t5xxl_fp8_e4m3fn.safetensors")

    with gr.Row():
        with gr.Column(scale=1):
            gr.Markdown("### 文本生成图像（FLUX1）")

            prompt_input = gr.Textbox(label="提示词(Text)", lines=3, placeholder="输入生成提示词")

            unet_dropdown = gr.Dropdown(
                label="UNET模型(unet_name)",
                choices=unet_choices,
                value=default_unet,
                interactive=bool(unet_choices)
            )

            clip1_dropdown = gr.Dropdown(
                label="CLIP文本编码器(clip_name1)",
                choices=clip_choices,
                value=default_clip1,
                interactive=bool(clip_choices)
            )

            width_slider = gr.Slider(label="宽度(width)", minimum=0, maximum=2048, step=1, value=1024)
            height_slider = gr.Slider(label="高度(height)", minimum=0, maximum=2048, step=1, value=1536)

            steps_input = gr.Number(label="步数(steps)", value=20, minimum=0, maximum=60)
            denoise_slider = gr.Slider(label="降噪(denoise)", minimum=0, maximum=1, value=1, step=0.01)

        with gr.Column(scale=2):
            generate_btn = gr.Button("生成", variant="primary")
            output_image = gr.Image(label="生成结果")
            status_text = gr.Textbox(label="状态信息", interactive=False)
            translated_text = gr.Textbox(label="英文翻译(用于生成)", lines=3, interactive=False)

    # 翻译后再生成：将中文提示词通过百度翻译转换为英文，用作工作流输入
    def translate_and_generate(prompt_text, unet_name, clip_name1, width, height, steps, denoise):
        try:
            english = baidu_translate(prompt_text or "")
            translate_info = "翻译成功"
        except Exception as e:
            english = prompt_text or ""
            translate_info = f"翻译失败，改用原文本: {str(e)}"
        img_path, status = interface.generate_image(english, unet_name, clip_name1, width, height, steps, denoise)
        # 将翻译状态合并到状态信息
        status_out = f"{status}\n{translate_info}" if translate_info else status
        return img_path, status_out, english

    generate_btn.click(
        fn=translate_and_generate,
        inputs=[prompt_input, unet_dropdown, clip1_dropdown, width_slider, height_slider, steps_input, denoise_slider],
        outputs=[output_image, status_text, translated_text]
    )

    # 输出文件夹图片预览
    from config.config import get_output_path
    output_dir = get_output_path()

    def get_output_images():
        if not os.path.exists(output_dir):
            return []
        images = []
        for file in os.listdir(output_dir):
            if file.lower().endswith((".png", ".jpg", ".jpeg")):
                images.append(os.path.join(output_dir, file))
        images.sort(key=lambda x: os.path.getmtime(x), reverse=True)
        return images

    with gr.Row():
        refresh_btn = gr.Button("刷新输出预览")
    with gr.Row():
        output_gallery = gr.Gallery(label="输出文件夹图片", columns=4, show_label=True, elem_id="output_gallery_t2i")

    refresh_btn.click(fn=get_output_images, outputs=[output_gallery])