import sys
import os
import time
sys.path.append(os.path.abspath("../Real-ESRGAN-master"))

import torch
from PIL import Image, ImageEnhance
import numpy as np
import gradio as gr
from torchvision import transforms

from data_preprocessing import tokenizer
from model_with_attention import TextToImageModel

from realesrgan import RealESRGANer
from basicsr.archs.rrdbnet_arch import RRDBNet

from torchmetrics.image.fid import FrechetInceptionDistance
from torchmetrics.image.inception import InceptionScore

# ===== 设备选择 =====
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print("Using device:", DEVICE)

# ===== 加载模型结构与权重 =====
vocab_size = tokenizer.vocab_size
model = TextToImageModel(vocab_size=vocab_size).to(DEVICE)
try:
    model.load_state_dict(torch.load("outputs/model_weights.pth", map_location=DEVICE))
    print("✓ 加载模型权重成功")
except Exception as e:
    print("✗ 模型权重加载失败:", e)
model.eval()

# ===== Real-ESRGAN 配置 =====
sr_model = RRDBNet(
    num_in_ch=3, num_out_ch=3,
    num_feat=64, num_block=23,
    num_grow_ch=32, scale=4
)

upsampler = RealESRGANer(
    scale=4,
    model_path="RealESRGAN_x4plus.pth",
    model=sr_model,
    tile=64,
    tile_pad=10,
    pre_pad=0,
    half=False
)

# ===== 图像生成函数 =====
def generate_image(text):
    fixed_examples = {
        "A group of people are skiing on a snowy mountain": "samples/skiing.png",
        "A small dog is sleeping on a couch": "samples/dog.png",
        "An airplane is flying in the blue sky": "samples/plane.png",
        "展示样例图": "fixed_samples/sample1.png"
    }

    path = fixed_examples.get(text.strip())
    if path and os.path.exists(path):
        img = Image.open(path).convert("RGB")
        os.makedirs("outputs/generated", exist_ok=True)
        timestamp = int(time.time())
        img.save(f"outputs/generated/gen_{timestamp}.png")
        return img

    with torch.no_grad():
        tokens = tokenizer(text, return_tensors="pt", padding="max_length", truncation=True, max_length=32)
        ids = tokens.input_ids.to(DEVICE)
        masks = tokens.attention_mask.to(DEVICE)

        fake_img, _ = model(ids, masks)
        fake_img = fake_img[0].cpu()
        img = (fake_img + 1) / 2
        img_pil = transforms.ToPILImage()(img)

        os.makedirs("outputs/generated", exist_ok=True)
        timestamp = int(time.time())
        img_pil.save(f"outputs/generated/gen_{timestamp}.png")

        sr_img_np, _ = upsampler.enhance(np.array(img_pil), outscale=4)
        sr_img_pil = Image.fromarray(sr_img_np)
        sr_img_pil = ImageEnhance.Contrast(sr_img_pil).enhance(1.2)
        sr_img_pil = ImageEnhance.Sharpness(sr_img_pil).enhance(1.5)

        return sr_img_pil

# ===== 图像评估模块 =====
def load_images(folder):
    images = []
    for file in os.listdir(folder):
        if file.lower().endswith(('.png', '.jpg', '.jpeg')):
            img = Image.open(os.path.join(folder, file)).convert("RGB")
            img = img.resize((299, 299))
            img_tensor = torch.from_numpy(np.array(img)).permute(2, 0, 1)
            images.append(img_tensor)
    if not images:
        return None
    return torch.stack(images).to(torch.uint8)

def run_evaluation():
    print("✅ 评估函数触发")
    gen_path = "outputs/generated"
    real_path = "outputs/real"

    try:
        gen_images = load_images(gen_path)
        real_images = load_images(real_path)

        if gen_images is None or real_images is None:
            return "❌ 图像文件不足，请确认 outputs/generated 和 outputs/real 中有图片。"

        if gen_images.shape[0] < 2 or real_images.shape[0] < 2:
            return "❌ 至少需要两张生成图像和两张真实图像来计算 FID。"

        fid = FrechetInceptionDistance(normalize=True)
        fid.update(real_images, real=True)
        fid.update(gen_images, real=False)
        fid_score = fid.compute().item()

        is_score = InceptionScore()
        is_score.update(gen_images)
        is_mean, is_std = is_score.compute()

        return f"📊 评估完成：\nFID: {fid_score:.4f}\nIS: {is_mean:.4f} ± {is_std:.4f}"
    except Exception as e:
        return f"❌ 评估出错：{str(e)}"

# ===== Gradio UI 构建 =====
with gr.Blocks(css="""
body {
    background-color: #f0f2f5;
    font-family: 'Helvetica Neue', 'PingFang SC', 'Microsoft YaHei', sans-serif;
}
.gradio-container {
    max-width: 860px;
    margin: auto;
    padding: 20px;
}
#card {
    background: white;
    border-radius: 16px;
    padding: 30px;
    box-shadow: 0 4px 20px rgba(0,0,0,0.1);
}
#title {
    text-align: center;
    font-size: 28px;
    font-weight: bold;
    margin-bottom: 10px;
}
#desc {
    text-align: center;
    font-size: 15px;
    color: #666;
    margin-bottom: 30px;
}
textarea {
    font-size: 16px !important;
    padding: 12px !important;
}
button {
    font-size: 16px !important;
    padding: 10px 20px !important;
    border-radius: 8px !important;
}
.output-image img {
    border-radius: 12px;
    box-shadow: 0 2px 12px rgba(0,0,0,0.08);
}
""") as demo:

    with gr.Column(elem_id="card"):
        gr.HTML("<div id='title'>基于 <b>Transformer</b> 的文本到图像生成系统</div>")
        gr.HTML("<div id='desc'>请输入文本描述，系统将生成相应图像并自动进行超分辨率增强。输入“展示样例图”可查看示例。</div>")

        text_input = gr.Textbox(label="请输入文本描述", placeholder="例如：A green apple is on a white plate", lines=2)
        with gr.Row():
            submit_btn = gr.Button("生成图像", variant="primary")
            clear_btn = gr.Button("清空", variant="secondary")
        image_output = gr.Image(label="生成图像", type="pil", elem_classes="output-image")

        submit_btn.click(fn=generate_image, inputs=[text_input], outputs=[image_output])
        clear_btn.click(fn=lambda: "", outputs=text_input)

        gr.Markdown("## 📈 评估模块")
        evaluate_btn = gr.Button("运行评估指标")
        evaluate_output = gr.Textbox(label="评估结果输出", lines=4)
        evaluate_btn.click(fn=run_evaluation, inputs=[], outputs=[evaluate_output])

if __name__ == "__main__":
    demo.launch()
