# train_lora.py
import argparse
import os
from PIL import Image
from peft import get_peft_model, LoraConfig
from tqdm import tqdm
from pathlib import Path

import torch
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms

from diffusers import StableDiffusionPipeline, DDPMScheduler
from diffusers.models.attention_processor import LoRAAttnProcessor
from transformers import CLIPTokenizer


class ImageDataset(Dataset):
    def __init__(self, image_dir, tokenizer, concept):
        self.images = []
        self.tokenizer = tokenizer
        self.concept = concept
        self.transform = transforms.Compose([
            transforms.Resize((512, 512)),
            transforms.ToTensor(),
            transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
        ])
        for file in os.listdir(image_dir):
            if file.lower().endswith((".jpg", ".png", ".jpeg")):
                self.images.append(os.path.join(image_dir, file))

    def __len__(self):
        return len(self.images)

    def __getitem__(self, idx):
        image = Image.open(self.images[idx]).convert("RGB")
        pixel_values = self.transform(image)
        text_input = self.tokenizer(self.concept, padding="max_length", max_length=77, return_tensors="pt").input_ids[0]
        return {
            "pixel_values": pixel_values,
            "input_ids": text_input
        }


def inject_lora_into_unet(unet, rank=4):
    for name, module in unet.named_modules():
        # 判断模块是否有set_processor方法（符合diffusers UNet Attention层接口）
        if hasattr(module, 'set_processor'):
            hidden_size = module.to_q.in_features if hasattr(module, "to_q") else None
            if hidden_size is not None:
                module.set_processor(LoRAAttnProcessor())


def train_lora(concept_name, image_dir, output_dir):
    # 加载预训练模型
    pipe = StableDiffusionPipeline.from_pretrained(
        "runwayml/stable-diffusion-v1-5", torch_dtype=torch.float32
    )
    vae = pipe.vae
    unet = pipe.unet
    tokenizer = pipe.tokenizer
    text_encoder = pipe.text_encoder
    noise_scheduler = DDPMScheduler.from_pretrained("runwayml/stable-diffusion-v1-5", subfolder="scheduler")

    # 注入 LoRA 层
    # 这里改为使用 peft 注入 LoRA
    lora_config = LoraConfig(
        r=4,
        lora_alpha=16,
        target_modules=["to_q", "to_k", "to_v", "to_out.0"],
        bias="none",
        # task_type="UNET",
    )
    unet = get_peft_model(unet, lora_config)
    # inject_lora_into_unet(unet)

    # 准备数据
    dataset = ImageDataset(image_dir, tokenizer, concept_name)
    dataloader = DataLoader(dataset, batch_size=1, shuffle=True)

    device = "cuda" if torch.cuda.is_available() else "mps" if torch.backends.mps.is_available() else "cpu"
    vae.to(device)
    unet.to(device)
    text_encoder.to(device)

    optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, unet.parameters()), lr=1e-4)

    # 训练循环
    for epoch in range(5):
        for batch in tqdm(dataloader, desc=f"Epoch {epoch + 1}"):
            pixel_values = batch["pixel_values"].to(device)
            input_ids = batch["input_ids"].to(device)

            with torch.no_grad():
                latents = vae.encode(pixel_values).latent_dist.sample() * 0.18215
                noise = torch.randn_like(latents)
                timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (1,), device=device).long()
                noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps)
                encoder_hidden_states = text_encoder(input_ids)[0]

            noise_pred = unet(noisy_latents, timesteps, encoder_hidden_states).sample
            loss = torch.nn.functional.mse_loss(noise_pred, noise)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

    # 保存 LoRA 层
    output_dir = Path(output_dir)
    output_dir.mkdir(parents=True, exist_ok=True)
    unet.save_attn_procs(output_dir)
    print(f"✅ 训练完成，LoRA 权重已保存至: {output_dir}")


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument("--instance_data_dir", type=str, required=True)
    parser.add_argument("--output_dir", type=str, required=True)
    parser.add_argument("--concept_name", type=str, default="concept")
    args = parser.parse_args()

    train_lora(args.concept_name, args.instance_data_dir, args.output_dir)
