import os
import torch
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms
from PIL import Image

from diffusers import DiffusionPipeline, UNet2DConditionModel, AutoencoderKL
from transformers import CLIPTokenizer, CLIPTextModel
from peft import LoraConfig, get_peft_model

# ========== 数据集定义 ==========
class NamePersonDataset(Dataset):
    def __init__(self, image_dir, captions_file, tokenizer, size=512):
        self.image_dir = image_dir
        self.tokenizer = tokenizer
        self.size = size

        self.items = []
        with open(captions_file, 'r', encoding='utf-8') as f:
            for line in f:
                fn, cap = line.strip().split("|", 1)
                fn, cap = fn.strip(), cap.strip()
                path = os.path.join(image_dir, fn)
                if os.path.isfile(path):
                    self.items.append((path, cap))

        self.transform = transforms.Compose([
            transforms.Resize((size, size)),
            transforms.CenterCrop(size),
            transforms.ToTensor(),
            transforms.Normalize([0.5]*3, [0.5]*3),
        ])

    def __len__(self):
        return len(self.items)

    def __getitem__(self, idx):
        path, caption = self.items[idx]
        image = Image.open(path).convert("RGB")
        image = self.transform(image)

        tokenized = self.tokenizer(
            caption,
            truncation=True,
            padding="max_length",
            max_length=self.tokenizer.model_max_length,
            return_tensors="pt"
        )

        return {
            "pixel_values": image,
            "input_ids": tokenized.input_ids.squeeze(0),
            "attention_mask": tokenized.attention_mask.squeeze(0),
        }


# ========== 训练 & 合并 ==========
def train_and_merge(
    name_token: str,
    image_dir: str,
    captions_file: str,
    output_dir: str,
    base_model_id: str = "black-forest-labs/FLUX.1-dev",
    batch_size: int = 2,
    lr: float = 5e-5,
    num_epochs: int = 3,
    lora_rank: int = 8,
    device: str = "cuda"
):
    # 1. 加载 pipeline
    pipe = DiffusionPipeline.from_pretrained(base_model_id, torch_dtype=torch.float16).to(device)

    tokenizer = pipe.tokenizer
    text_encoder = pipe.text_encoder
    unet = pipe.unet
    vae = pipe.vae

    # 冻结主模型
    unet.requires_grad_(False)
    text_encoder.requires_grad_(False)
    vae.requires_grad_(False)

    # 2. 给 UNet 和 Text Encoder 加 LoRA
    unet = get_peft_model(
        unet,
        LoraConfig(
            r=lora_rank,
            lora_alpha=16,
            lora_dropout=0.1,
            task_type="UNET"
        )
    )

    text_encoder = get_peft_model(
        text_encoder,
        LoraConfig(
            r=lora_rank,
            lora_alpha=16,
            lora_dropout=0.1,
            task_type="SEQ_CLS"  # 文本模型
        )
    )

    # 3. 数据
    dataset = NamePersonDataset(image_dir, captions_file, tokenizer, size=512)
    dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True)

    optimizer = torch.optim.AdamW(
        list(unet.parameters()) + list(text_encoder.parameters()), lr=lr
    )

    # 4. 训练循环 (简化版，只展示框架)
    for epoch in range(num_epochs):
        for step, batch in enumerate(dataloader):
            optimizer.zero_grad()

            pixel_values = batch["pixel_values"].to(device)
            input_ids = batch["input_ids"].to(device)
            attention_mask = batch["attention_mask"].to(device)

            # 示例：这里只演示 forward / loss，你需要接入真正的噪声预测 loss
            text_embeds = text_encoder(input_ids=input_ids, attention_mask=attention_mask).last_hidden_state
            noise = torch.randn_like(pixel_values)
            # TODO: 按 scheduler 添加噪声，调用 unet 前向传播
            # predicted = unet(...)
            # loss = mse_loss(predicted, noise)
            loss = (noise - noise).mean()  # 仅作示意

            loss.backward()
            optimizer.step()

            if step % 10 == 0:
                print(f"Epoch {epoch}, Step {step}, Loss {loss.item():.6f}")

    # 5. 合并 LoRA 到原始模型
    unet = unet.merge_and_unload()
    text_encoder = text_encoder.merge_and_unload()

    pipe.unet = unet
    pipe.text_encoder = text_encoder

    # 6. 保存完整模型
    pipe.save_pretrained(output_dir)
    print(f"✅ 已导出合并后的完整模型到: {output_dir}")


if __name__ == "__main__":
    name = "NAME_TOKEN"
    image_dir = "./data/NAME_TOKEN/images"
    captions_file = "./data/NAME_TOKEN/captions.txt"
    output_dir = "./merged_pipeline/NAME_TOKEN"

    os.makedirs(output_dir, exist_ok=True)

    train_and_merge(
        name_token=name,
        image_dir=image_dir,
        captions_file=captions_file,
        output_dir=output_dir,
        base_model_id="black-forest-labs/FLUX.1-dev",
        batch_size=2,
        lr=5e-5,
        num_epochs=1,  # demo 可以先跑 1
        lora_rank=8,
        device="cuda"
    )
