import os
os.environ['KMP_DUPLICATE_LIB_OK'] = 'TRUE'
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torchvision
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
import numpy as np
import random
import math

# 设置随机种子
def set_seed(seed=42):
    torch.manual_seed(seed)
    torch.cuda.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)
    np.random.seed(seed)
    random.seed(seed)
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False

set_seed(42)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

# 数据增强 - 添加Mixup和CutMix
class MixupCutmix:
    def __init__(self, mixup_alpha=1.0, cutmix_alpha=1.0, prob=0.5):
        self.mixup_alpha = mixup_alpha
        self.cutmix_alpha = cutmix_alpha
        self.prob = prob
    
    def __call__(self, batch):
        if np.random.rand() < self.prob:
            if np.random.rand() < 0.5:
                return self.mixup(batch)
            else:
                return self.cutmix(batch)
        return batch
    
    def mixup(self, batch):
        inputs, targets = batch
        batch_size = inputs.size(0)
        indices = torch.randperm(batch_size)
        lam = np.random.beta(self.mixup_alpha, self.mixup_alpha)
        
        mixed_inputs = lam * inputs + (1 - lam) * inputs[indices]
        targets_a, targets_b = targets, targets[indices]
        
        return mixed_inputs, targets_a, targets_b, lam
    
    def cutmix(self, batch):
        inputs, targets = batch
        batch_size = inputs.size(0)
        indices = torch.randperm(batch_size)
        lam = np.random.beta(self.cutmix_alpha, self.cutmix_alpha)
        
        bbx1, bby1, bbx2, bby2 = self.rand_bbox(inputs.size(), lam)
        inputs[:, :, bbx1:bbx2, bby1:bby2] = inputs[indices, :, bbx1:bbx2, bby1:bby2]
        
        # 调整lambda值
        lam = 1 - ((bbx2 - bbx1) * (bby2 - bby1) / (inputs.size()[-1] * inputs.size()[-2]))
        targets_a, targets_b = targets, targets[indices]
        
        return inputs, targets_a, targets_b, lam
    
    def rand_bbox(self, size, lam):
        W = size[2]
        H = size[3]
        cut_rat = np.sqrt(1. - lam)
        cut_w = int(W * cut_rat)
        cut_h = int(H * cut_rat)
        
        cx = np.random.randint(W)
        cy = np.random.randint(H)
        
        bbx1 = np.clip(cx - cut_w // 2, 0, W)
        bby1 = np.clip(cy - cut_h // 2, 0, H)
        bbx2 = np.clip(cx + cut_w // 2, 0, W)
        bby2 = np.clip(cy + cut_h // 2, 0, H)
        
        return bbx1, bby1, bbx2, bby2

# 数据变换策略
transform_train = transforms.Compose([
    transforms.RandomCrop(32, padding=4),
    transforms.RandomHorizontalFlip(p=0.5),
    transforms.ColorJitter(brightness=0.2, contrast=0.2, saturation=0.2, hue=0.1),
    transforms.ToTensor(),
    transforms.Normalize((0.5071, 0.4867, 0.4408), (0.2675, 0.2565, 0.2761)),
    transforms.RandomErasing(p=0.25, scale=(0.02, 0.33), ratio=(0.3, 3.3))
])

transform_test = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize((0.5071, 0.4867, 0.4408), (0.2675, 0.2565, 0.2761))
])

# 标签平滑
class LabelSmoothingCrossEntropy(nn.Module):
    def __init__(self, smoothing=0.1):
        super(LabelSmoothingCrossEntropy, self).__init__()
        self.smoothing = smoothing
        
    def forward(self, input, target):
        log_prob = F.log_softmax(input, dim=-1)
        weight = input.new_ones(input.size()) * self.smoothing / (input.size(-1) - 1.)
        weight.scatter_(-1, target.unsqueeze(-1), (1. - self.smoothing))
        loss = (-weight * log_prob).sum(dim=-1).mean()
        return loss

# ResNet块
class ResBlock(nn.Module):
    def __init__(self, in_channels, out_channels, stride=1, dropout_rate=0.1):
        super(ResBlock, self).__init__()
        self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3,
                               stride=stride, padding=1, bias=False)
        self.bn1 = nn.BatchNorm2d(out_channels)
        self.dropout1 = nn.Dropout2d(dropout_rate)
        
        self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, 
                               stride=1, padding=1, bias=False)
        self.bn2 = nn.BatchNorm2d(out_channels)
        self.dropout2 = nn.Dropout2d(dropout_rate)
        
        self.relu = nn.ReLU(inplace=True)
        self.downsample = nn.Sequential()
        
        if stride != 1 or in_channels != out_channels:
            self.downsample = nn.Sequential(
                nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=stride, bias=False),
                nn.BatchNorm2d(out_channels)
            )

    def forward(self, x):
        residual = self.downsample(x)
        
        out = self.conv1(x)
        out = self.bn1(out)
        out = self.relu(out)
        out = self.dropout1(out)
        
        out = self.conv2(out)
        out = self.bn2(out)
        out = self.dropout2(out)
        
        out += residual
        out = self.relu(out)
        return out

# 原型记忆模块
class PrototypeMemory(nn.Module):
    def __init__(self, num_classes, feature_dim, momentum=0.99, temperature=0.1):
        super(PrototypeMemory, self).__init__()
        self.num_classes = num_classes
        self.feature_dim = feature_dim
        self.momentum = momentum
        self.temperature = temperature
        
        # 使用Xavier初始化
        self.register_buffer('prototypes', torch.zeros(num_classes, feature_dim))
        nn.init.xavier_normal_(self.prototypes)
        self.prototypes = F.normalize(self.prototypes, dim=1)
        
        self.register_buffer('initialized', torch.zeros(num_classes, dtype=torch.bool))
        self.register_buffer('class_counts', torch.zeros(num_classes))
        
    def update_prototypes(self, features, labels):
        features = F.normalize(features, dim=1)
        
        with torch.no_grad():
            for class_id in torch.unique(labels):
                class_id = class_id.item()
                class_mask = (labels == class_id)
                class_features = features[class_mask]
                
                if len(class_features) > 0:
                    current_center = class_features.mean(dim=0)
                    current_center = F.normalize(current_center, dim=0)
                    
                    if not self.initialized[class_id]:
                        self.prototypes[class_id] = current_center
                        self.initialized[class_id] = True
                        self.class_counts[class_id] = len(class_features)
                    else:
                        # 自适应动量更新
                        alpha = min(0.1, 1.0 / (self.class_counts[class_id] + 1))
                        self.prototypes[class_id] = (
                            (1 - alpha) * self.prototypes[class_id] + 
                            alpha * current_center
                        )
                        self.prototypes[class_id] = F.normalize(self.prototypes[class_id], dim=0)
                        self.class_counts[class_id] += len(class_features)
    
    def compute_prototype_loss(self, features, labels):
        if not self.initialized.any():
            return torch.tensor(0.0, device=features.device)
            
        features = F.normalize(features, dim=1)
        prototypes = F.normalize(self.prototypes, dim=1)
        
        # 计算相似度
        similarities = torch.mm(features, prototypes.t()) / self.temperature
        
        # 只对已初始化的类别计算损失
        mask = self.initialized[labels]
        if mask.sum() == 0:
            return torch.tensor(0.0, device=features.device)
        
        valid_similarities = similarities[mask]
        valid_labels = labels[mask]
        
        # 使用focal loss的思想
        log_probs = F.log_softmax(valid_similarities, dim=1)
        ce_loss = F.nll_loss(log_probs, valid_labels)
        
        return ce_loss

# ResNet模型
class OptimizedResNet(nn.Module):
    def __init__(self, block, layers, num_classes=100, feature_dim=512, dropout_rate=0.1):
        super(OptimizedResNet, self).__init__()
        self.in_channels = 64
        self.feature_dim = feature_dim
        
        # 初始层
        self.conv = nn.Sequential(
            nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False),
            nn.BatchNorm2d(64),
            nn.ReLU(inplace=True)
        )
        
        # ResNet层
        self.layer1 = self.make_layer(block, 64, layers[0], stride=1, dropout_rate=dropout_rate)
        self.layer2 = self.make_layer(block, 128, layers[1], stride=2, dropout_rate=dropout_rate)
        self.layer3 = self.make_layer(block, 256, layers[2], stride=2, dropout_rate=dropout_rate)
        self.layer4 = self.make_layer(block, 512, layers[3], stride=2, dropout_rate=dropout_rate)
        
        self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))
        
        # 简化的特征投影
        self.feature_proj = nn.Sequential(
            nn.Linear(512, feature_dim),
            nn.BatchNorm1d(feature_dim),
            nn.ReLU(inplace=True),
            nn.Dropout(dropout_rate)
        )
        
        # 分类器
        self.classifier = nn.Linear(feature_dim, num_classes)
        
        # 原型记忆
        self.prototype_memory = PrototypeMemory(num_classes, feature_dim)
        
        self._initialize_weights()
        
    def _initialize_weights(self):
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
            elif isinstance(m, nn.BatchNorm2d):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.Linear):
                nn.init.xavier_normal_(m.weight)
                if m.bias is not None:
                    nn.init.constant_(m.bias, 0)

    def make_layer(self, block, out_channels, num_blocks, stride, dropout_rate):
        strides = [stride] + [1] * (num_blocks - 1)
        layers = []
        for stride in strides:
            layers.append(block(self.in_channels, out_channels, stride, dropout_rate))
            self.in_channels = out_channels
        return nn.Sequential(*layers)

    def forward(self, x, labels=None, update_prototype=False):
        out = self.conv(x)
        out = self.layer1(out)
        out = self.layer2(out)
        out = self.layer3(out)
        out = self.layer4(out)
        out = self.avg_pool(out)
        out = torch.flatten(out, 1)
        
        features = self.feature_proj(out)
        logits = self.classifier(features)
        
        if self.training and labels is not None:
            if update_prototype:
                self.prototype_memory.update_prototypes(features.detach(), labels)
            
            prototype_loss = self.prototype_memory.compute_prototype_loss(features, labels)
            return logits, features, prototype_loss
        else:
            return logits, features, None

# 余弦退火学习率调度器
class CosineAnnealingWarmRestarts:
    def __init__(self, optimizer, T_0=10, T_mult=2, eta_min=0, warmup_epochs=5):
        self.optimizer = optimizer
        self.T_0 = T_0
        self.T_mult = T_mult
        self.eta_min = eta_min
        self.warmup_epochs = warmup_epochs
        self.base_lrs = [group['lr'] for group in optimizer.param_groups]
        self.epoch = 0
        
    def step(self):
        if self.epoch < self.warmup_epochs:
            # Warmup phase
            lr = self.base_lrs[0] * (self.epoch + 1) / self.warmup_epochs
        else:
            # Cosine annealing
            epoch_after_warmup = self.epoch - self.warmup_epochs
            T_cur = epoch_after_warmup % self.T_0
            lr = self.eta_min + (self.base_lrs[0] - self.eta_min) * (1 + math.cos(math.pi * T_cur / self.T_0)) / 2
            
        for param_group in self.optimizer.param_groups:
            param_group['lr'] = lr
            
        self.epoch += 1
        return lr

# 混合损失函数
class AdaptiveLoss(nn.Module):
    def __init__(self, proto_weight=0.001, warmup_epochs=10, label_smoothing=0.1):
        super(AdaptiveLoss, self).__init__()
        self.proto_weight = proto_weight
        self.warmup_epochs = warmup_epochs
        self.ce_loss = LabelSmoothingCrossEntropy(smoothing=label_smoothing)
    
    def forward(self, logits, labels, prototype_loss, epoch=0, lam=None, targets_a=None, targets_b=None):
        if lam is not None and targets_a is not None and targets_b is not None:
            # Mixup/CutMix损失
            ce_loss = lam * self.ce_loss(logits, targets_a) + (1 - lam) * self.ce_loss(logits, targets_b)
        else:
            ce_loss = self.ce_loss(logits, labels)
        
        # 自适应原型损失权重
        if epoch < self.warmup_epochs:
            current_weight = 0  # 前几个epoch不使用原型损失
        else:
            current_weight = self.proto_weight * min(1.0, (epoch - self.warmup_epochs) / 10)
            
        total_loss = ce_loss + current_weight * prototype_loss
        return total_loss, ce_loss, prototype_loss

if __name__ == '__main__':
    # 优化的超参数
    batch_size = 128
    num_workers = 4
    num_epochs = 200
    initial_lr = 0.001
    weight_decay = 1e-4
    
    print(f"使用设备: {device}")
    
    # 数据加载
    trainset = torchvision.datasets.CIFAR100(root='./data', train=True,
                                            download=True, transform=transform_train)
    trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size,
                                              shuffle=True, pin_memory=True, 
                                              num_workers=num_workers, drop_last=True)

    testset = torchvision.datasets.CIFAR100(root='./data', train=False,
                                           download=True, transform=transform_test)
    testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size,
                                             shuffle=False, pin_memory=True, 
                                             num_workers=num_workers)

    # 初始化mixup/cutmix
    mixup_cutmix = MixupCutmix(mixup_alpha=1.0, cutmix_alpha=1.0, prob=0.5)

    # 模型初始化
    net = OptimizedResNet(ResBlock, [2, 2, 2, 2], 
                         num_classes=100, feature_dim=512, 
                         dropout_rate=0.1).to(device)
    
    # 损失函数和优化器
    criterion = AdaptiveLoss(proto_weight=0.01, warmup_epochs=20, label_smoothing=0.1)
    optimizer = optim.AdamW(net.parameters(), lr=initial_lr, weight_decay=weight_decay)
    
    # 学习率调度器
    scheduler = CosineAnnealingWarmRestarts(optimizer, T_0=20, eta_min=1e-6, warmup_epochs=5)
    
    # 记录训练过程
    train_losses, train_accs = [], []
    val_losses, val_accs = [], []
    ce_losses, proto_losses = [], []
    learning_rates = []

    print("开始训练")
    print(f"总参数数量: {sum(p.numel() for p in net.parameters()):,}")
    
    best_val_acc = 0.0
    
    for epoch in range(num_epochs):
        # 学习率调度
        current_lr = scheduler.step()
        learning_rates.append(current_lr)
        
        # 训练阶段
        net.train()
        running_loss = 0.0
        running_ce_loss = 0.0
        running_proto_loss = 0.0
        correct_train = 0
        total_train = 0
        
        for i, (images, labels) in enumerate(trainloader):
            images, labels = images.to(device), labels.to(device)
            
            # 应用Mixup/CutMix (在训练后期使用)
            lam, targets_a, targets_b = None, None, None
            if epoch > 10 and np.random.rand() < 0.5:
                if np.random.rand() < 0.5:
                    # Mixup
                    mixed_data = mixup_cutmix.mixup((images, labels))
                    images, targets_a, targets_b, lam = mixed_data
                else:
                    # CutMix
                    mixed_data = mixup_cutmix.cutmix((images, labels))
                    images, targets_a, targets_b, lam = mixed_data
                
            optimizer.zero_grad()
            
            # 前向传播
            update_proto = (epoch > 15 and i % 10 == 0)  # 延迟并减少原型更新频率
            
            if lam is not None:
                logits, features, prototype_loss = net(images, targets_a, update_proto)
            else:
                logits, features, prototype_loss = net(images, labels, update_proto)
            
            # 计算损失
            total_loss, ce_loss, proto_loss = criterion(
                logits, labels, prototype_loss, epoch, lam, targets_a, targets_b
            )
            
            # 反向传播
            total_loss.backward()
            
            # 梯度裁剪
            torch.nn.utils.clip_grad_norm_(net.parameters(), max_norm=0.5)
            
            optimizer.step()
            
            # 统计
            running_loss += total_loss.item()
            running_ce_loss += ce_loss.item()
            running_proto_loss += proto_loss.item()
            
            if lam is None:  # 只在没有Mixup/CutMix时计算准确率
                _, predicted = torch.max(logits, 1)
                total_train += labels.size(0)
                correct_train += (predicted == labels).sum().item()
            
            if (i + 1) % 100 == 0:
                print(f'Epoch[{epoch+1}/{num_epochs}], Step[{i+1}/{len(trainloader)}], '
                      f'Loss:{total_loss.item():.4f}, CE:{ce_loss.item():.4f}, Proto:{proto_loss.item():.4f}, LR:{current_lr:.6f}')

        # 计算epoch平均值
        epoch_loss = running_loss / len(trainloader)
        epoch_ce_loss = running_ce_loss / len(trainloader)
        epoch_proto_loss = running_proto_loss / len(trainloader)
        epoch_acc = 100.0 * correct_train / total_train if total_train > 0 else 0
        
        train_losses.append(epoch_loss)
        train_accs.append(epoch_acc)
        ce_losses.append(epoch_ce_loss)
        proto_losses.append(epoch_proto_loss)

        # 验证阶段
        net.eval()
        correct_val = 0
        total_val = 0
        val_loss = 0.0

        with torch.no_grad():
            for images, labels in testloader:
                images, labels = images.to(device), labels.to(device)
                logits, _, _ = net(images)
                
                loss = F.cross_entropy(logits, labels)
                val_loss += loss.item()

                _, predicted = torch.max(logits, 1)
                total_val += labels.size(0)
                correct_val += (predicted == labels).sum().item()

        epoch_val_loss = val_loss / len(testloader)
        epoch_val_acc = 100.0 * correct_val / total_val
        
        val_losses.append(epoch_val_loss)
        val_accs.append(epoch_val_acc)
        
        # 保存最佳模型
        if epoch_val_acc > best_val_acc:
            best_val_acc = epoch_val_acc
            torch.save(net.state_dict(), 'best_Innocent_model.pth')
        
        print(f'Epoch {epoch+1}/{num_epochs}:')
        print(f'  训练 - Loss: {epoch_loss:.4f}, Accuracy: {epoch_acc:.2f}%')
        print(f'  验证 - Loss: {epoch_val_loss:.4f}, Accuracy: {epoch_val_acc:.2f}%')
        print(f'  CE Loss: {epoch_ce_loss:.4f}, Proto Loss: {epoch_proto_loss:.4f}')
        print(f'  学习率: {current_lr:.6f}')
        print('-' * 60)

    print('训练完成!')
    print(f'最佳验证准确率: {best_val_acc:.2f}%')

'''
    # 绘制训练曲线
    plt.figure(figsize=(20, 12))

    plt.subplot(2, 4, 1)
    plt.plot(train_losses, label="训练损失", linewidth=2)
    plt.plot(val_losses, label="验证损失", linewidth=2)
    plt.xlabel('Epoch')
    plt.ylabel('Loss')
    plt.title('损失曲线')
    plt.legend()
    plt.grid(True, alpha=0.3)

    plt.subplot(2, 4, 2)
    plt.plot(train_accs, label="训练准确率", linewidth=2)
    plt.plot(val_accs, label="验证准确率", linewidth=2)
    plt.xlabel('Epoch')
    plt.ylabel('Accuracy (%)')
    plt.title('准确率曲线')
    plt.legend()
    plt.grid(True, alpha=0.3)

    plt.subplot(2, 4, 3)
    plt.plot(learning_rates, label="学习率", color='green', linewidth=2)
    plt.xlabel('Epoch')
    plt.ylabel('Learning Rate')
    plt.title('学习率调度')
    plt.legend()
    plt.grid(True, alpha=0.3)
    plt.yscale('log')

    plt.subplot(2, 4, 4)
    plt.plot(ce_losses, label="交叉熵损失", linewidth=2)
    plt.plot(proto_losses, label="原型损失", linewidth=2)
    plt.xlabel('Epoch')
    plt.ylabel('Loss')
    plt.title('损失组件')
    plt.legend()
    plt.grid(True, alpha=0.3)

    plt.tight_layout()
    plt.show()
'''