import torch
import torch.nn as nn
import torch.optim as optim
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
import time
import os
import argparse
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
from sklearn.metrics import precision_score, recall_score, f1_score, classification_report, confusion_matrix
from tqdm import tqdm

from models.ResNet import ResNet18 # 这里使用ResNet18
from data.CIFAR10Dataset import CIFAR10Dataset, unpickle

# 设置命令行参数
parser = argparse.ArgumentParser(description='PyTorch CIFAR-10 Training')
parser.add_argument('--lr', default=0.1, type=float, help='learning rate')
parser.add_argument('--resume', '-r', action='store_true', help='resume from checkpoint')
parser.add_argument('--epochs', default=200, type=int, help='number of epochs')
parser.add_argument('--batch-size', default=128, type=int, help='batch size')
parser.add_argument('--checkpoint-dir', default='./checkpoints', type=str, help='checkpoint directory')
parser.add_argument('--data-dir', default='./cifar-10-batches-py', type=str, help='data directory')
parser.add_argument('--result-dir', default='./results', type=str, help='result directory for plots and logs')
parser.add_argument('--test-only', action='store_true', help='test only mode')
args = parser.parse_args()

# 设置设备
device = 'cuda' if torch.cuda.is_available() else 'cpu'
print(f'Using device: {device}')

# 创建结果目录
os.makedirs(args.result_dir, exist_ok=True)
os.makedirs(args.checkpoint_dir, exist_ok=True)

# 增强的数据预处理
print('==> Preparing data..')
transform_train = transforms.Compose([
    transforms.ToPILImage(),
    transforms.RandomCrop(32, padding=4),
    transforms.RandomHorizontalFlip(),
    transforms.ColorJitter(brightness=0.2, contrast=0.2, saturation=0.2),
    transforms.RandomRotation(10),
    transforms.ToTensor(),
    transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])

transform_test = transforms.Compose([
    transforms.ToPILImage(),
    transforms.ToTensor(),
    transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])

# 创建数据集和数据加载器
trainset = CIFAR10Dataset(args.data_dir, train=True, transform=transform_train)
testset = CIFAR10Dataset(args.data_dir, train=False, transform=transform_test)

trainloader = DataLoader(trainset, batch_size=args.batch_size, shuffle=True, num_workers=2)
testloader = DataLoader(testset, batch_size=args.batch_size, shuffle=False, num_workers=2)

# 获取类别名称
meta_dict = unpickle(os.path.join(args.data_dir, "batches.meta"))
class_names = [label.decode('utf-8') for label in meta_dict[b'label_names']]
print('Classes: ', class_names)

# 初始化模型
print('==> Building model..')
net = ResNet18()
net = net.to(device)

# 如果有多个GPU，使用数据并行
if device == 'cuda' and torch.cuda.device_count() > 1:
    net = torch.nn.DataParallel(net)
    torch.backends.cudnn.benchmark = True

# 定义损失函数和优化器
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=args.lr, momentum=0.9, weight_decay=5e-4)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=args.epochs)

# 可视化类
class TrainingVisualizer:
    def __init__(self, result_dir):
        self.result_dir = result_dir
        self.train_losses = []
        self.train_accs = []
        self.test_losses = []
        self.test_accs = []
        self.precisions = []
        self.recalls = []
        self.f1_scores = []
        self.epochs = []
        
        # 创建日志文件
        self.log_file = os.path.join(result_dir, 'training_log.csv')
        with open(self.log_file, 'w') as f:
            f.write("Epoch,Train Loss,Train Acc,Test Loss,Test Acc,Precision,Recall,F1 Score,Time\n")
    
    def update(self, epoch, train_loss, train_acc, test_loss, test_acc, precision, recall, f1, epoch_time):
        self.epochs.append(epoch)
        self.train_losses.append(train_loss)
        self.train_accs.append(train_acc)
        self.test_losses.append(test_loss)
        self.test_accs.append(test_acc)
        self.precisions.append(precision)
        self.recalls.append(recall)
        self.f1_scores.append(f1)
        
        # 记录到日志文件
        with open(self.log_file, 'a') as f:
            f.write(f"{epoch},{train_loss:.4f},{train_acc:.2f},{test_loss:.4f},{test_acc:.2f},{precision:.4f},{recall:.4f},{f1:.4f},{epoch_time:.2f}\n")
    
    def plot_training_history(self):
        plt.figure(figsize=(15, 10))
        
        # 损失曲线
        plt.subplot(2, 2, 1)
        plt.plot(self.epochs, self.train_losses, 'b-', label='Train Loss')
        plt.plot(self.epochs, self.test_losses, 'r-', label='Test Loss')
        plt.xlabel('Epoch')
        plt.ylabel('Loss')
        plt.title('Training and Test Loss')
        plt.legend()
        plt.grid(True)
        
        # 准确率曲线
        plt.subplot(2, 2, 2)
        plt.plot(self.epochs, self.train_accs, 'b-', label='Train Accuracy')
        plt.plot(self.epochs, self.test_accs, 'r-', label='Test Accuracy')
        plt.xlabel('Epoch')
        plt.ylabel('Accuracy (%)')
        plt.title('Training and Test Accuracy')
        plt.legend()
        plt.grid(True)
        
        # 精确率、召回率和F1分数曲线
        plt.subplot(2, 2, 3)
        plt.plot(self.epochs, self.precisions, 'g-', label='Precision')
        plt.plot(self.epochs, self.recalls, 'm-', label='Recall')
        plt.plot(self.epochs, self.f1_scores, 'c-', label='F1 Score')
        plt.xlabel('Epoch')
        plt.ylabel('Score')
        plt.title('Precision, Recall and F1 Score')
        plt.legend()
        plt.grid(True)
        
        # 学习率曲线（如果需要）
        plt.subplot(2, 2, 4)
        # 这里可以添加学习率曲线，如果你记录了学习率
        
        plt.tight_layout()
        plot_path = os.path.join(self.result_dir, 'training_history.png')
        plt.savefig(plot_path)
        plt.close()
        print(f"Training history plot saved to {plot_path}")
    
    def plot_confusion_matrix(self, net, device, testloader, class_names):
        """绘制混淆矩阵"""
        net.eval()
        all_preds = []
        all_labels = []
        
        with torch.no_grad():
            for inputs, labels in tqdm(testloader, desc="Generating confusion matrix"):
                inputs, labels = inputs.to(device), labels.to(device)
                outputs = net(inputs)
                _, preds = torch.max(outputs, 1)
                
                all_preds.extend(preds.cpu().numpy())
                all_labels.extend(labels.cpu().numpy())
        
        # 计算混淆矩阵
        cm = confusion_matrix(all_labels, all_preds)
        
        plt.figure(figsize=(10, 8))
        sns.heatmap(cm, annot=True, fmt='d', cmap='Blues', 
                   xticklabels=class_names, yticklabels=class_names)
        plt.xlabel('Predicted')
        plt.ylabel('True')
        plt.title('Confusion Matrix')
        plt.xticks(rotation=45)
        plt.yticks(rotation=0)
        plt.tight_layout()
        
        plot_path = os.path.join(self.result_dir, 'confusion_matrix.png')
        plt.savefig(plot_path)
        plt.close()
        print(f"Confusion matrix plot saved to {plot_path}")
        
        return cm

    def plot_class_accuracy(self, cm, class_names):
        """绘制每个类别的准确率"""
        class_acc = np.diag(cm) / np.sum(cm, axis=1) * 100
        
        plt.figure(figsize=(12, 6))
        bars = plt.bar(range(len(class_names)), class_acc)
        plt.xlabel('Class')
        plt.ylabel('Accuracy (%)')
        plt.title('Accuracy per Class')
        plt.xticks(range(len(class_names)), class_names, rotation=45)
        
        # 在柱子上添加数值标签
        for bar, acc in zip(bars, class_acc):
            plt.text(bar.get_x() + bar.get_width()/2, bar.get_height() + 0.5,
                    f'{acc:.1f}%', ha='center', va='bottom')
        
        plt.tight_layout()
        plot_path = os.path.join(self.result_dir, 'class_accuracy.png')
        plt.savefig(plot_path)
        plt.close()
        print(f"Class accuracy plot saved to {plot_path}")
        
        return class_acc

# 初始化可视化器
visualizer = TrainingVisualizer(args.result_dir)

# 训练函数
def train(epoch):
    print(f'\nEpoch: {epoch}')
    net.train()
    train_loss = 0
    correct = 0
    total = 0
    
    # 使用tqdm进度条
    pbar = tqdm(trainloader, desc=f'Epoch {epoch}')
    
    for batch_idx, (inputs, targets) in enumerate(pbar):
        inputs, targets = inputs.to(device), targets.to(device)
        
        optimizer.zero_grad()
        outputs = net(inputs)
        loss = criterion(outputs, targets)
        loss.backward()
        optimizer.step()
        
        train_loss += loss.item()
        _, predicted = outputs.max(1)
        total += targets.size(0)
        correct += predicted.eq(targets).sum().item()
        
        # 更新进度条描述
        if batch_idx % 10 == 0:
            pbar.set_postfix({
                'Loss': f'{loss.item():.3f}',
                'Acc': f'{100.*correct/total:.2f}%'
            })
    
    # 记录训练准确率和损失
    train_acc = 100. * correct / total
    avg_loss = train_loss / len(trainloader)
    print(f'Train Loss: {avg_loss:.3f} | Train Acc: {train_acc:.3f}%')
    
    return avg_loss, train_acc

# 测试函数
def test(epoch, best_acc=0):  # 添加 best_acc 参数
    net.eval()
    test_loss = 0
    correct = 0
    total = 0
    all_preds = []
    all_targets = []
    
    with torch.no_grad():
        pbar = tqdm(testloader, desc='Testing')
        for batch_idx, (inputs, targets) in enumerate(pbar):
            inputs, targets = inputs.to(device), targets.to(device)
            outputs = net(inputs)
            loss = criterion(outputs, targets)
            
            test_loss += loss.item()
            _, predicted = outputs.max(1)
            total += targets.size(0)
            correct += predicted.eq(targets).sum().item()
            
            # 收集预测和目标用于计算指标
            all_preds.extend(predicted.cpu().numpy())
            all_targets.extend(targets.cpu().numpy())
            
            # 更新进度条
            if batch_idx % 10 == 0:
                pbar.set_postfix({
                    'Acc': f'{100.*correct/total:.2f}%'
                })
    
    # 记录测试准确率和损失
    test_acc = 100. * correct / total
    avg_loss = test_loss / len(testloader)
    
    # 计算额外指标
    precision = precision_score(all_targets, all_preds, average='weighted', zero_division=0)
    recall = recall_score(all_targets, all_preds, average='weighted', zero_division=0)
    f1 = f1_score(all_targets, all_preds, average='weighted', zero_division=0)
    
    # 打印结果
    print(f'Test Loss: {avg_loss:.3f} | Test Acc: {test_acc:.3f}%')
    print(f'Precision: {precision:.3f} | Recall: {recall:.3f} | F1 Score: {f1:.3f}')
    
    # 每10个epoch或最佳epoch打印详细分类报告
    if epoch % 10 == 0 or test_acc > best_acc:
        print("\nDetailed Classification Report:")
        print(classification_report(all_targets, all_preds, target_names=class_names, zero_division=0))
    
    return avg_loss, test_acc, precision, recall, f1

# 保存检查点
def save_checkpoint(epoch, acc, is_best=False):
    state = {
        'net': net.state_dict(),
        'acc': acc,
        'epoch': epoch,
        'optimizer': optimizer.state_dict(),
        'scheduler': scheduler.state_dict()
    }
    
    # 保存常规检查点
    checkpoint_path = os.path.join(args.checkpoint_dir, f'ckpt_epoch_{epoch}.pth')
    torch.save(state, checkpoint_path)
    print(f'Checkpoint saved to {checkpoint_path}')
    
    # 如果是最佳模型，额外保存
    if is_best:
        best_path = os.path.join(args.checkpoint_dir, 'best.pth')
        torch.save(state, best_path)
        print(f'Best model saved to {best_path}')

# 加载检查点
def load_checkpoint():
    checkpoint_path = os.path.join(args.checkpoint_dir, 'best.pth')
    if os.path.isfile(checkpoint_path):
        print(f'==> Loading checkpoint from {checkpoint_path}')
        checkpoint = torch.load(checkpoint_path)
        net.load_state_dict(checkpoint['net'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        scheduler.load_state_dict(checkpoint['scheduler'])
        start_epoch = checkpoint['epoch'] + 1
        best_acc = checkpoint['acc']
        print(f'==> Loaded checkpoint (epoch {checkpoint["epoch"]}, acc {checkpoint["acc"]:.2f}%)')
        return start_epoch, best_acc
    else:
        print('==> No checkpoint found, starting from scratch')
        return 0, 0

# 只进行测试的函数
def test_only():
    """只进行测试，不训练"""
    print('==> Testing only mode..')
    
    # 加载最佳模型
    best_path = os.path.join(args.checkpoint_dir, 'best.pth')
    if os.path.isfile(best_path):
        checkpoint = torch.load(best_path)
        net.load_state_dict(checkpoint['net'])
        print(f'Loaded best model from epoch {checkpoint["epoch"]} with accuracy {checkpoint["acc"]:.2f}%')
    else:
        print('No best model found! Please train first.')
        return
    
    # 进行测试
    test_loss, test_acc, precision, recall, f1 = test(0, 0)
    print(f'Final Test Accuracy: {test_acc:.2f}%')
    print(f'Precision: {precision:.3f} | Recall: {recall:.3f} | F1 Score: {f1:.3f}')
    
    # 绘制混淆矩阵和类别准确率
    cm = visualizer.plot_confusion_matrix(net, device, testloader, class_names)
    class_acc = visualizer.plot_class_accuracy(cm, class_names)
    
    # 打印详细的准确率分析
    print("\n=== 详细准确率分析 ===")
    for i, (class_name, acc) in enumerate(zip(class_names, class_acc)):
        print(f"{class_name}: {acc:.2f}%")
    
    print(f"\n平均准确率: {np.mean(class_acc):.2f}%")
    print(f"最佳准确率: {np.max(class_acc):.2f}% ({class_names[np.argmax(class_acc)]})")
    print(f"最差准确率: {np.min(class_acc):.2f}% ({class_names[np.argmin(class_acc)]})")
    
    # 保存测试结果
    with open(os.path.join(args.result_dir, 'test_results.txt'), 'w') as f:
        f.write(f"Final Test Accuracy: {test_acc:.2f}%\n")
        f.write(f"Precision: {precision:.3f}\n")
        f.write(f"Recall: {recall:.3f}\n")
        f.write(f"F1 Score: {f1:.3f}\n\n")
        f.write("=== 详细准确率分析 ===\n")
        for i, (class_name, acc) in enumerate(zip(class_names, class_acc)):
            f.write(f"{class_name}: {acc:.2f}%\n")
        f.write(f"\n平均准确率: {np.mean(class_acc):.2f}%\n")
        f.write(f"最佳准确率: {np.max(class_acc):.2f}% ({class_names[np.argmax(class_acc)]})\n")
        f.write(f"最差准确率: {np.min(class_acc):.2f}% ({class_names[np.argmin(class_acc)]})\n")

# 主训练循环
def main():
    start_epoch = 0
    best_acc = 0  # 最佳测试准确率
    
    # 如果设置了resume参数，尝试加载检查点
    if args.resume:
        start_epoch, best_acc = load_checkpoint()
    
    # 训练循环
    for epoch in range(start_epoch, args.epochs):
        start_time = time.time()
        
        # 训练和测试
        train_loss, train_acc = train(epoch)
        test_loss, test_acc, precision, recall, f1 = test(epoch , best_acc)
        
        # 更新学习率
        scheduler.step()
        
        # 更新可视化数据
        epoch_time = time.time() - start_time
        visualizer.update(epoch, train_loss, train_acc, test_loss, test_acc, precision, recall, f1, epoch_time)
        
        # 保存检查点
        is_best = test_acc > best_acc
        if is_best:
            best_acc = test_acc
        
        # 每10个epoch保存一次检查点，或者当达到最佳准确率时
        if epoch % 10 == 0 or is_best:
            save_checkpoint(epoch, test_acc, is_best)
        
        print(f'Epoch time: {epoch_time:.2f}s')
    
    print(f'Training completed. Best accuracy: {best_acc:.2f}%')
    
    # 训练完成后绘制图表
    visualizer.plot_training_history()
    
    # 绘制混淆矩阵和类别准确率
    cm = visualizer.plot_confusion_matrix(net, device, testloader, class_names)
    class_acc = visualizer.plot_class_accuracy(cm, class_names)
    
    # 打印详细的准确率分析
    print("\n=== 详细准确率分析 ===")
    for i, (class_name, acc) in enumerate(zip(class_names, class_acc)):
        print(f"{class_name}: {acc:.2f}%")
    
    print(f"\n平均准确率: {np.mean(class_acc):.2f}%")
    print(f"最佳准确率: {np.max(class_acc):.2f}% ({class_names[np.argmax(class_acc)]})")
    print(f"最差准确率: {np.min(class_acc):.2f}% ({class_names[np.argmin(class_acc)]})")
    
    # 保存最终结果
    with open(os.path.join(args.result_dir, 'final_results.txt'), 'w') as f:
        f.write(f"Best accuracy: {best_acc:.2f}%\n\n")
        f.write("=== 详细准确率分析 ===\n")
        for i, (class_name, acc) in enumerate(zip(class_names, class_acc)):
            f.write(f"{class_name}: {acc:.2f}%\n")
        f.write(f"\n平均准确率: {np.mean(class_acc):.2f}%\n")
        f.write(f"最佳准确率: {np.max(class_acc):.2f}% ({class_names[np.argmax(class_acc)]})\n")
        f.write(f"最差准确率: {np.min(class_acc):.2f}% ({class_names[np.argmin(class_acc)]})\n")

if __name__ == '__main__':
    if args.test_only:
        test_only()
    else:
        main()