import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from functools import partial
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
from timm.models.registry import register_model
from timm.models.vision_transformer import _cfg
import math
from torchvision import datasets, transforms
from torch.utils.data import DataLoader
import torch.optim as optim
import matplotlib.pyplot as plt

# 定义MLP模块
class MLP(nn.Module):
    def __init__(self, dim, mlp_ratio=4, dropout_rate=0.3):
        super().__init__()
        self.norm = LayerNorm(dim, eps=1e-6, data_format="channels_first")
        self.fc1 = nn.Conv2d(in_channels=dim, out_channels=int(dim * mlp_ratio), kernel_size=1)
        self.pos = nn.Conv2d(in_channels=int(dim * mlp_ratio), out_channels=int(dim * mlp_ratio), kernel_size=3, padding=1, groups=int(dim * mlp_ratio))
        self.fc2 = nn.Conv2d(in_channels=int(dim * mlp_ratio), out_channels=dim, kernel_size=1)
        self.act = nn.GELU()
        self.dropout = nn.Dropout(dropout_rate)

    def forward(self, x):
        x = self.norm(x)
        x = self.fc1(x)
        x = self.act(x)
        x = self.dropout(x)
        x = x + self.act(self.pos(x))
        x = self.fc2(x)
        x = self.dropout(x)
        return x

# 定义ConvMod模块
class ConvMod(nn.Module):
    def __init__(self, dim, dropout_rate=0.3):
        super().__init__()
        self.norm = LayerNorm(dim, eps=1e-6, data_format="channels_first")
        self.a = nn.Sequential(
            nn.Conv2d(in_channels=dim, out_channels=dim, kernel_size=1),
            nn.GELU(),
            nn.Dropout(dropout_rate),
            nn.Conv2d(in_channels=dim, out_channels=dim, kernel_size=11, padding=5, groups=dim)
        )
        self.v = nn.Conv2d(in_channels=dim, out_channels=dim, kernel_size=1)
        self.proj = nn.Conv2d(in_channels=dim, out_channels=dim, kernel_size=1)
        self.dropout = nn.Dropout(dropout_rate)

    def forward(self, x):
        x = self.norm(x)
        a = self.a(x)
        x = a * self.v(x)
        x = self.proj(x)
        x = self.dropout(x)
        return x

# 定义Block模块
class Block(nn.Module):
    def __init__(self, dim, mlp_ratio=4., drop_path=0., dropout_rate=0.3):
        super().__init__()
        self.attn = ConvMod(dim, dropout_rate)
        self.mlp = MLP(dim, mlp_ratio, dropout_rate)
        layer_scale_init_value = 1e-6
        self.layer_scale_1 = nn.Parameter(layer_scale_init_value * torch.ones((dim)), requires_grad=True)
        self.layer_scale_2 = nn.Parameter(layer_scale_init_value * torch.ones((dim)), requires_grad=True)
        self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()

    def forward(self, x):
        x = x + self.drop_path(self.layer_scale_1.unsqueeze(-1).unsqueeze(-1) * self.attn(x))
        x = x + self.drop_path(self.layer_scale_2.unsqueeze(-1).unsqueeze(-1) * self.mlp(x))
        return x

# 定义LayerNorm模块
class LayerNorm(nn.Module):
    def __init__(self, normalized_shape, eps=1e-6, data_format="channels_last"):
        super().__init__()
        self.weight = nn.Parameter(torch.ones(normalized_shape))
        self.bias = nn.Parameter(torch.zeros(normalized_shape))
        self.eps = eps
        self.data_format = data_format
        if self.data_format not in ["channels_last", "channels_first"]:
            raise NotImplementedError
        self.normalized_shape = (normalized_shape,)

    def forward(self, x):
        if self.data_format == "channels_last":
            return F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps)
        elif self.data_format == "channels_first":
            u = x.mean(1, keepdim=True)
            s = (x - u).pow(2).mean(1, keepdim=True)
            x = (x - u) / torch.sqrt(s + self.eps)
            x = self.weight[:, None, None] * x + self.bias[:, None, None]
            return x

# 定义PatchEmbed模块
class PatchEmbed(nn.Module):
    def __init__(self, in_channels=3, embed_dim=768, patch_size=4):
        super().__init__()
        self.proj = nn.Conv2d(in_channels, embed_dim, kernel_size=patch_size, stride=patch_size)

    def forward(self, x):
        x = self.proj(x)
        return x

# 定义Conv2Former模型
class Conv2Former(nn.Module):
    def __init__(self, num_classes=100, embed_dims=[96, 192, 384, 768], depths=[4, 4, 34, 4], mlp_ratio=4., drop_path_rate=0., dropout_rate=0.3):
        super().__init__()
        self.patch_embed1 = PatchEmbed(in_channels=3, embed_dim=embed_dims[0], patch_size=4)
        self.patch_embed2 = PatchEmbed(in_channels=embed_dims[0], embed_dim=embed_dims[1], patch_size=2)
        self.patch_embed3 = PatchEmbed(in_channels=embed_dims[1], embed_dim=embed_dims[2], patch_size=2)
        self.patch_embed4 = PatchEmbed(in_channels=embed_dims[2], embed_dim=embed_dims[3], patch_size=2)
        self.stage1 = self._make_stage(embed_dims[0], depths[0], mlp_ratio, drop_path_rate, dropout_rate)
        self.stage2 = self._make_stage(embed_dims[1], depths[1], mlp_ratio, drop_path_rate, dropout_rate)
        self.stage3 = self._make_stage(embed_dims[2], depths[2], mlp_ratio, drop_path_rate, dropout_rate)
        self.stage4 = self._make_stage(embed_dims[3], depths[3], mlp_ratio, drop_path_rate, dropout_rate)
        self.norm = nn.LayerNorm(embed_dims[-1], eps=1e-6)
        self.head = nn.Linear(embed_dims[-1], num_classes)

    def _make_stage(self, dim, depth, mlp_ratio, drop_path_rate, dropout_rate):
        blocks = []
        for i in range(depth):
            blocks.append(Block(dim, mlp_ratio, drop_path_rate, dropout_rate))
        return nn.Sequential(*blocks)

    def forward(self, x):
        x = self.patch_embed1(x)
        x = self.stage1(x)
        x = self.patch_embed2(x)
        x = self.stage2(x)
        x = self.patch_embed3(x)
        x = self.stage3(x)
        x = self.patch_embed4(x)
        x = self.stage4(x)
        x = x.flatten(2).transpose(1, 2)
        x = self.norm(x)
        x = x.mean(dim=1)
        x = self.head(x)
        return x

# 定义训练函数
def train(model, device, train_loader, optimizer, criterion, epoch, log_interval, train_acc_list, train_loss_list):
    model.train()
    running_loss = 0.0
    correct = 0
    total = 0
    for batch_idx, (data, target) in enumerate(train_loader):
        data, target = data.to(device), target.to(device)
        optimizer.zero_grad()
        output = model(data)
        loss = criterion(output, target)
        loss.backward()
        optimizer.step()
        running_loss += loss.item()
        _, predicted = torch.max(output.data, 1)
        total += target.size(0)
        correct += (predicted == target).sum().item()
        if batch_idx % log_interval == 0:
            accuracy = 100. * correct / total
            print(f'Train Epoch: {epoch} [{batch_idx * len(data)}/{len(train_loader.dataset)} ({100. * batch_idx / len(train_loader):.0f}%)]\tLoss: {running_loss / (batch_idx + 1):.6f}\tAccuracy: {accuracy:.2f}%')
            running_loss = 0.0
            correct = 0
            total = 0
    train_acc_list.append(100. * correct / total)
    train_loss_list.append(running_loss / len(train_loader))

# 定义测试函数
def test(model, device, test_loader, criterion, test_acc_list, test_loss_list):
    model.eval()
    test_loss = 0
    correct = 0
    with torch.no_grad():
        for data, target in test_loader:
            data, target = data.to(device), target.to(device)
            output = model(data)
            test_loss += criterion(output, target).item() * data.size(0)  # sum up batch loss
            pred = output.argmax(dim=1, keepdim=True)  # get the index of the max log-probability
            correct += pred.eq(target.view_as(pred)).sum().item()

    test_loss /= len(test_loader.dataset)
    accuracy = 100. * correct / len(test_loader.dataset)
    print(f'\nTest set: Average loss: {test_loss:.4f}, Accuracy: {correct}/{len(test_loader.dataset)} ({accuracy:.0f}%)\n')
    test_acc_list.append(accuracy)
    test_loss_list.append(test_loss)
    return test_loss, accuracy

# 定义主函数
def main():
    batch_size = 128
    epochs = 100
    lr = 0.001  # 初始学习率
    momentum = 0.9
    weight_decay = 1e-4
    seed = 1
    log_interval = 100
    use_cuda = torch.cuda.is_available()
    device = torch.device("cuda" if use_cuda else "cpu")

    torch.manual_seed(seed)

    # 数据增强和预处理
    transform_train = transforms.Compose([
        transforms.RandomCrop(32, padding=4),
        transforms.RandomHorizontalFlip(),
        transforms.ColorJitter(brightness=0.2, contrast=0.2, saturation=0.2, hue=0.1),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
        transforms.RandomErasing(p=0.5, scale=(0.02, 0.33), ratio=(0.3, 3.3))
    ])

    transform_test = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    ])

    data_dir = '/kaggle/input/mydata'  # 修改为云服务器上的路径

    # 加载数据集
    train_dataset = datasets.CIFAR100(root=data_dir, train=True, download=True, transform=transform_train)
    test_dataset = datasets.CIFAR100(root=data_dir, train=False, download=True, transform=transform_test)

    train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
    test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False)

    model = Conv2Former(num_classes=100).to(device)
    optimizer = optim.AdamW(model.parameters(), lr=lr, weight_decay=weight_decay)
    scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=epochs)  # 使用余弦退火学习率调度器

    criterion = nn.CrossEntropyLoss()

    best_acc = 0.0
    train_acc_list = []
    train_loss_list = []
    test_acc_list = []
    test_loss_list = []

    for epoch in range(1, epochs + 1):
        train(model, device, train_loader, optimizer, criterion, epoch, log_interval, train_acc_list, train_loss_list)
        test_loss, test_acc = test(model, device, test_loader, criterion, test_acc_list, test_loss_list)
        if test_acc > best_acc:
            best_acc = test_acc
            torch.save(model.state_dict(), 'best_model.pth')
            print(f'Saved best model with accuracy: {best_acc:.2f}%')

        scheduler.step()  # 更新学习率

    # 绘制训练和测试准确率及损失图
    epochs_range = range(1, epochs + 1)

    plt.figure(figsize=(12, 4))
    plt.subplot(1, 2, 1)
    plt.plot(epochs_range, train_acc_list, label='Training Accuracy')
    plt.plot(epochs_range, test_acc_list, label='Testing Accuracy')
    plt.xlabel('Epochs')
    plt.ylabel('Accuracy')
    plt.legend(loc='lower right')
    plt.title('Training and Testing Accuracy')

    plt.subplot(1, 2, 2)
    plt.plot(epochs_range, train_loss_list, label='Training Loss')
    plt.plot(epochs_range, test_loss_list, label='Testing Loss')
    plt.xlabel('Epochs')
    plt.ylabel('Loss')
    plt.legend(loc='upper right')
    plt.title('Training and Testing Loss')

    plt.show()

if __name__ == '__main__':
    main()
