import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
from torchvision import datasets, transforms
from torch.utils.data import DataLoader, Subset
import argparse
import os
import time
from einops import repeat
from tqdm import tqdm
import yaml

# 自定义Lambda模块
class Lambda(nn.Module):
    def __init__(self, func):
        super().__init__()
        self.func = func

    def forward(self, x):
        return self.func(x)

# 配置参数
parser = argparse.ArgumentParser(description='ViT for CIFAR10 Classification')
parser.add_argument('--batch-size', type=int, default=32, help='训练批次大小')
parser.add_argument('--epochs', type=int, default=200, help='训练轮数')
parser.add_argument('--lr', type=float, default=5e-5, help='初始学习率')
parser.add_argument('--patch-size', type=int, default=4, help='图像分块大小（适用于32x32图像）')
parser.add_argument('--dim', type=int, default=256, help='Transformer隐藏层维度')
parser.add_argument('--depth', type=int, default=12, help='Transformer层数')
parser.add_argument('--heads', type=int, default=8, help='多头注意力头数')
parser.add_argument('--mlp-dim', type=int, default=512, help='MLP层维度')
parser.add_argument('--dropout', type=float, default=0.1, help='dropout率')
parser.add_argument('--seed', type=int, default=42, help='随机种子')
args = parser.parse_args()

# 设备配置
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

# 数据预处理（CIFAR10专用归一化参数）
train_transform = torchvision.transforms.Compose([
    transforms.RandomCrop(size=(32, 32), padding=4),
    transforms.RandomHorizontalFlip(),
    transforms.RandomRotation(15),  # 随机旋转
    transforms.ColorJitter(brightness=0.1, contrast=0.1, saturation=0.1, hue=0.1),  # 颜色抖动
    transforms.ToTensor(),
    transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
])

val_transform = torchvision.transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
])

# 加载数据集
train_dataset = datasets.CIFAR10(
    root='./data', train=True, download=True, transform=train_transform
)
val_dataset = datasets.CIFAR10(
    root='./data', train=False, download=True, transform=val_transform
)

# 随机选择20%测试集数据（固定随机种子保证可重复性）
import random
random.seed(args.seed)
test_indices = random.sample(range(len(val_dataset)), int(0.2 * len(val_dataset)))
test_subset = Subset(val_dataset, test_indices)

# 数据加载器（Windows下num_workers设为2避免兼容性问题）
train_loader = DataLoader(
    train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=2, pin_memory=True
)
test_loader = DataLoader(
    test_subset, batch_size=args.batch_size, shuffle=True, num_workers=2, pin_memory=True
)

# 定义Attention模块
class Attention(nn.Module):
    def __init__(self, dim, heads=8, dim_head=32, dropout=0.):
        super().__init__()
        self.dim_head = dim_head
        self.dim = dim
        inner_dim = dim_head * heads
        self.heads = heads
        self.scale = dim_head ** -0.5

        self.norm = nn.LayerNorm(dim)
        self.qkv = nn.Linear(dim, inner_dim * 3, bias=False)
        self.attend = nn.Softmax(dim=-1)
        self.dropout = nn.Dropout(dropout)

        self.to_out = nn.Sequential(
            nn.Linear(inner_dim, dim),
            nn.Dropout(dropout)
        )

    def forward(self, x):
        x = self.norm(x)
        qkv = self.qkv(x).chunk(3, dim=-1)
        q, k, v = map(lambda t: t.reshape(x.shape[0], -1, self.heads, self.dim_head).transpose(1, 2), qkv)

        dots = (q @ k.transpose(-2, -1)) * self.scale
        attn = self.attend(dots)
        attn = self.dropout(attn)

        out = (attn @ v).transpose(1, 2).reshape(x.shape[0], -1, self.dim)
        return self.to_out(out)

# 定义Transformer Encoder
class Transformer(nn.Module):
    def __init__(self, dim, depth, heads, dim_head, mlp_dim, dropout=0.):
        super().__init__()
        self.layers = nn.ModuleList([
            nn.ModuleList([
                Attention(dim, heads, dim_head, dropout),
                nn.Sequential(
                    nn.LayerNorm(dim),
                    nn.Linear(dim, mlp_dim),
                    nn.ReLU(),
                    nn.Dropout(dropout),
                    nn.Linear(mlp_dim, dim),
                    nn.Dropout(dropout)
                )
            ])
            for _ in range(depth)
        ])

    def forward(self, x):
        for attn, ff in self.layers:
            x = attn(x) + x
            x = ff(x) + x
        return x

# 定义ViT模型（修正Lambda用法）
class ViT(nn.Module):
    def __init__(self, num_classes=10, patch_size=4, dim=256, depth=12, heads=8, mlp_dim=512, dropout=0.1):
        super().__init__()
        image_size = 32
        self.patch_size = patch_size
        patch_dim = 3 * patch_size ** 2
        num_patches = (image_size // patch_size) ** 2

        self.patch_embedding = nn.Sequential(
            nn.Unfold(kernel_size=patch_size, stride=patch_size),
            Lambda(lambda x: x.permute(0, 2, 1)),
            nn.Linear(patch_dim, dim)
        )

        self.cls_token = nn.Parameter(torch.randn(1, 1, dim))
        self.pos_embedding = nn.Parameter(torch.randn(1, num_patches + 1, dim))
        self.dropout = nn.Dropout(dropout)

        self.transformer = Transformer(dim, depth, heads, dim_head=dim // heads, mlp_dim=mlp_dim, dropout=dropout)
        self.norm = nn.LayerNorm(dim)
        self.classifier = nn.Linear(dim, num_classes)

    def forward(self, x):
        b, c, h, w = x.shape
        patches = self.patch_embedding(x)

        cls_tokens = repeat(self.cls_token, '1 1 d -> b 1 d', b=b)
        x = torch.cat([cls_tokens, patches], dim=1)
        x += self.pos_embedding[:, :x.shape[1]]
        x = self.dropout(x)

        x = self.transformer(x)
        x = self.norm(x[:, 0])
        return self.classifier(x)

# 初始化模型和优化器
model = ViT(
    patch_size=args.patch_size,
    dim=args.dim,
    depth=args.depth,
    heads=args.heads,
    mlp_dim=args.mlp_dim,
    dropout=args.dropout
).to(device)

# Label Smoothing
criterion = nn.CrossEntropyLoss(label_smoothing=0.1)
optimizer = optim.AdamW(model.parameters(), lr=args.lr, weight_decay=0.01)
scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=args.epochs, eta_min=1e-6)

best_acc = 0.0
start_time = time.time()
history = {}
patience = 100  # 早停耐心值
no_improve_epochs = 0  # 无提升轮数s

# 检查是否存在保存的模型和yaml文件
checkpoint_dir = 'checkpoints'
yaml_file = 'training_history.yaml'
if os.path.exists(checkpoint_dir) and os.listdir(checkpoint_dir):
    # 找到最后一个epoch的模型文件
    checkpoint_files = [f for f in os.listdir(checkpoint_dir) if f.endswith('.pth')]
    checkpoint_files.sort(key=lambda x: int(x.split('_')[1].split('.')[0]))
    last_checkpoint = os.path.join(checkpoint_dir, checkpoint_files[-1])

    # 加载模型和优化器状态
    checkpoint = torch.load(last_checkpoint)
    model.load_state_dict(checkpoint['model_state_dict'])
    optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
    start_epoch = checkpoint['epoch'] + 1

    # 读取yaml文件中的历史记录
    if os.path.exists(yaml_file):
        with open(yaml_file, 'r') as f:
            history = yaml.safe_load(f)
else:
    start_epoch = 1

# 训练函数（带进度条）
def train(epoch):
    model.train()
    total_loss = 0.0
    correct = 0
    total = 0

    pbar = tqdm(train_loader, desc=f'Epoch {epoch}/{args.epochs}', unit='batch')
    for batch_idx, (inputs, targets) in enumerate(pbar):
        inputs, targets = inputs.to(device), targets.to(device)
        optimizer.zero_grad()

        outputs = model(inputs)
        loss = criterion(outputs, targets)
        loss.backward()
        optimizer.step()

        total_loss += loss.item()
        _, predicted = outputs.max(1)
        total += targets.size(0)
        correct += predicted.eq(targets).sum().item()

        pbar.set_postfix({
            'Loss': f'{loss.item():.4f}',
            'Train Acc': f'{100 * correct / total:.2f}%',
            'LR': f'{optimizer.param_groups[0]["lr"]:.6f}'
        })

    return total_loss / len(train_loader), 100 * correct / total

# 测试函数
def test(epoch):
    model.eval()
    total_loss = 0.0
    correct = 0
    total = 0

    with torch.no_grad():
        pbar = tqdm(test_loader, desc=f'Test Epoch {epoch}', unit='batch')
        for batch_idx, (inputs, targets) in enumerate(pbar):
            inputs, targets = inputs.to(device), targets.to(device)
            outputs = model(inputs)
            loss = criterion(outputs, targets)

            total_loss += loss.item()
            _, predicted = outputs.max(1)
            total += targets.size(0)
            correct += predicted.eq(targets).sum().item()

            pbar.set_postfix({
                'Test Loss': f'{loss.item():.4f}',
                'Test Acc': f'{100 * correct / total:.2f}%'
            })

    acc = 100 * correct / total
    return total_loss / len(test_loader), acc

# 训练主循环
if __name__ == '__main__':
    for epoch in range(start_epoch, args.epochs + 1):
        epoch_start = time.time()
        train_loss, train_acc = train(epoch)
        test_loss, test_acc = test(epoch)
        epoch_time = time.time() - epoch_start

        # 保存当前epoch模型
        torch.save({
            'epoch': epoch,
            'model_state_dict': model.state_dict(),
            'optimizer_state_dict': optimizer.state_dict(),
            'train_acc': train_acc,
            'test_acc': test_acc
        }, f'{checkpoint_dir}/epoch_{epoch}.pth')

        # 记录训练历史
        history[epoch] = {
            'train_loss': train_loss,
            'train_acc': train_acc,
            'test_loss': test_loss,
            'test_acc': test_acc,
            'duration': f'{epoch_time:.2f}s'
        }

        # 保存YAML日志
        with open('training_history.yaml', 'w') as f:
            yaml.dump(history, f)

        scheduler.step()  # 更新学习率

        print(f"Epoch {epoch} completed | Time: {epoch_time:.2f}s | Test Acc: {test_acc:.2f}%")

        # 早停机制
        if test_acc > best_acc:
            best_acc = test_acc
            no_improve_epochs = 0
        else:
            no_improve_epochs += 1
            if no_improve_epochs >= patience:
                print(f"Early stopping at epoch {epoch} due to no improvement in {patience} epochs.")
                break

    print(f"\nTraining finished! Best Test Accuracy: {best_acc:.2f}%")
    print(f"Total training time: {time.time() - start_time:.2f} seconds")