import os

import torch
import torch.nn as nn
import torch.optim as optim
from more_itertools import chunked
from torch.utils.data import DataLoader, Dataset
from torchvision import transforms
from tqdm import tqdm

import wandb
from model import UNet

# 数据预处理
transform = transforms.Compose(
    [
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.0], std=[1.0]),
    ]
)


# 自定义数据集类

import numpy as np


class ImagePairDataset(Dataset):
    def __init__(self, input_dir, output_dir, group_size=8000):
        """
        初始化数据集。

        参数:
            input_dir (str): 输入数据目录（包含 .npy 文件）。
            output_dir (str): 输出数据目录（包含 .npy 文件）。
            group_size (int): 每组的大小（默认 8000）。
            transform (callable, optional): 可选的图像变换函数。
        """
        self.input_dir = input_dir
        self.output_dir = output_dir
        self.group_size = group_size

        # 获取所有输入和输出文件的路径
        self.input_files = sorted(
            [
                os.path.join(input_dir, f)
                for f in os.listdir(input_dir)
                if f.endswith(".npy")
            ]
        )
        self.output_files = sorted(
            [
                os.path.join(output_dir, f)
                for f in os.listdir(output_dir)
                if f.endswith(".npy")
            ]
        )

        # 检查输入和输出文件是否匹配
        assert len(self.input_files) == len(
            self.output_files
        ), "输入和输出文件数量不匹配"
        for in_file, out_file in zip(self.input_files, self.output_files):
            assert os.path.basename(in_file) == os.path.basename(
                out_file
            ), "输入和输出文件名不匹配"

        # load all npy file together
        self.input_data = []
        self.output_data = []
        for in_file, out_file in zip(self.input_files, self.output_files):
            self.input_data.append(np.load(in_file))
            self.output_data.append(np.load(out_file))

        # stack all data together
        self.input_data = np.concatenate(self.input_data, axis=0)
        self.output_data = np.concatenate(self.output_data, axis=0)

    def __len__(self):
        """
        返回数据集的总样本数。
        """
        return self.input_data.shape[0]

    def __getitem__(self, idx):
        """
        根据索引返回一个样本。

        参数:
            idx (int): 样本索引。

        返回:
            input_data (np.ndarray): 输入数据，形状为 (1, 108, 108)。
            output_data (np.ndarray): 输出数据，形状为 (1, 108, 108)。
        """

        input_data = self.input_data[idx]
        output_data = self.output_data[idx]

        input_data = transform(input_data)
        output_data = transform(output_data)

        return input_data.transpose(0, 1), output_data.transpose(0, 1)

    def get_groups_images(self):
        """
        返回所有组的图像。

        返回:
            input_images (np.ndarray): 输入图像，形状为 (N, 108, 108)。
            output_images (np.ndarray): 输出图像，形状为 (N, 108, 108)。
        """
        for input_group, output_group in zip(
            chunked(self.input_data, self.group_size),
            chunked(self.output_data, self.group_size),
        ):
            input_image = input_group[0]
            output_image = output_group[0]
            yield input_image, output_image


# 加载数据集
dataset = ImagePairDataset(input_dir="dataset/input", output_dir="dataset/output")


# 训练函数
def train(model, dataloader, criterion, optimizer, device):
    model.train()
    running_loss = 0.0
    for img_input, img_output in tqdm(dataloader):
        img_input, img_output = img_input.to(device), img_output.to(device)

        optimizer.zero_grad()
        output = model(img_input)
        loss = criterion(output, img_output)

        loss.backward()
        optimizer.step()

        running_loss += loss.item()
    return running_loss / len(dataloader)


# 验证函数
def validate(model, dataloader, criterion, device):
    model.eval()
    running_loss = 0.0

    with torch.no_grad():
        for img_input, img_output in tqdm(dataloader):
            img_input, img_output = img_input.to(device), img_output.to(device)

            output = model(img_input)
            loss = criterion(output, img_output)

            running_loss += loss.item()

    return running_loss / len(dataloader)


if __name__ == "__main__":
    # Initialize wandb
    wandb.init(
        project="quickdraw-unet",
        config={
            "architecture": "UNet",
            "dataset": "QuickDraw",
            "epochs": 5,
            "batch_size": 8,
            "learning_rate": 0.001,
            "optimizer": "Adam",
        },
    )

    # 划分训练集和验证集
    train_size = int(0.95 * len(dataset))
    val_size = len(dataset) - train_size
    train_dataset, val_dataset = torch.utils.data.random_split(
        dataset, [train_size, val_size]
    )
    train_loader = DataLoader(train_dataset, batch_size=8, shuffle=True)
    val_loader = DataLoader(val_dataset, batch_size=8, shuffle=False)

    # 初始化模型、损失函数和优化器
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model = UNet().to(device)
    # criterion = nn.MSELoss()
    criterion = nn.L1Loss()
    optimizer = optim.Adam(model.parameters(), lr=0.001)

    # Watch the model
    wandb.watch(model)

    # 训练循环
    num_epochs = 5
    best_val_loss = float("inf")
    for epoch in range(num_epochs):
        train_loss = train(model, train_loader, criterion, optimizer, device)
        val_loss = validate(model, val_loader, criterion, device)

        # Log metrics to wandb
        wandb.log({"epoch": epoch + 1, "train_loss": train_loss, "val_loss": val_loss})

        print(
            f"Epoch [{epoch+1}/{num_epochs}], Train Loss: {train_loss:.4f}, Val Loss: {val_loss:.4f}"
        )

        # Save best model
        if val_loss < best_val_loss:
            best_val_loss = val_loss
            torch.save(
                model.state_dict(), os.path.join(wandb.run.dir, "best_model.pth")
            )
            wandb.save("best_model.pth")

    # Save final model
    torch.save(model.state_dict(), os.path.join(wandb.run.dir, "final_model.pth"))
    wandb.save("final_model.pth")

    # Close wandb run
    wandb.finish()
