import os
import time
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
import torchvision.models as models
from torch.utils.data import Dataset, DataLoader, random_split
from sklearn.metrics import precision_score, recall_score, f1_score, confusion_matrix
import matplotlib.pyplot as plt
from PIL import Image
from tqdm import tqdm
from MLP_Mixer import mixer_s16
from our_new_model import create_combined_model
from mobilevitv2 import create_mobilevitv2_model

# 1. 数据预处理与增强管道
class FlowerDataset(Dataset):
    def __init__(self, img_dir, label_file, transform=None):
        self.img_dir = img_dir
        self.labels = pd.read_csv(label_file, header=None)
        self.labels = self.labels.squeeze(1)
        self.transform = transform

    def __len__(self):
        return len(self.labels)
    def num_class(self):
        unique_labels = np.unique(self.labels.iloc[:])
        return len(unique_labels)

    def __getitem__(self, idx):
        img_name = f"image_{idx+1:05d}.jpg"
        img_path = os.path.join(self.img_dir, img_name)
        image = Image.open(img_path).convert('RGB')
        label = self.labels.iloc[idx]
        
        if self.transform:
            image = self.transform(image)
            
        return image, label - 1  # 标签从0开始

# 添加高斯噪声的变换
class AddGaussianNoise(object):
    def __init__(self, mean=0., std=0.01):
        self.std = std
        self.mean = mean
        
    def __call__(self, tensor):
        return tensor + torch.randn(tensor.size()) * self.std + self.mean
    
    def __repr__(self):
        return self.__class__.__name__ + '(mean={0}, std={1})'.format(self.mean, self.std)

# 数据增强与归一化
transform_train = transforms.Compose([
    transforms.Resize(255),
    transforms.RandomCrop(224, padding=4),
    transforms.RandomHorizontalFlip(p=0.5),
    transforms.ColorJitter(brightness=0.2, contrast=0.2, saturation=0.2),
    transforms.ToTensor(),
    transforms.Normalize(mean=[0.485, 0.456, 0.406],  # ImageNet均值
                         std=[0.229, 0.224, 0.225]),   # ImageNet标准差
    AddGaussianNoise(0, 0.01)
])

transform_test = transforms.Compose([
    transforms.Resize(255),
    transforms.CenterCrop(224),
    transforms.ToTensor(),
    transforms.Normalize(mean=[0.485, 0.456, 0.406], 
                         std=[0.229, 0.224, 0.225])
])

# 2. 创建数据集和数据加载器
dataset = FlowerDataset('D://桌面//data//jpg', 'D://桌面//data//label.csv', transform_train)
#num_classes = len(set([label for _, label in dataset]))
num_classes = dataset.num_class()

# 数据集划分 (8:1:1)
train_size = int(0.8 * len(dataset))
val_size = int(0.1 * len(dataset))
test_size = len(dataset) - train_size - val_size

train_dataset, val_dataset, test_dataset = random_split(
    dataset, [train_size, val_size, test_size]
)


# 验证集和测试集使用无增强的transform
val_dataset.dataset.transform = transform_test
test_dataset.dataset.transform = transform_test

# 3. 模型工厂 - 统一创建不同模型
def create_model(model_name, num_classes):
    """
    创建指定名称的预训练模型并修改分类头
    
    支持的模型:
    - 'resnet18'
    - 'alexnet'
    - 'vgg16'
    - 'mobilenetv3'
    - 'vit'
    - 'MSRF'
    - 'create_mobilevitv2_model' (自定义模型)
    """
    if model_name == 'resnet18':
        model = models.resnet18(weights=models.ResNet18_Weights.IMAGENET1K_V1)
        num_ftrs = model.fc.in_features
        model.fc = nn.Linear(num_ftrs, num_classes)
    elif model_name == 'alexnet':
        model = models.alexnet(weights=models.AlexNet_Weights.IMAGENET1K_V1)
        num_ftrs = model.classifier[6].in_features
        model.classifier[6] = nn.Linear(num_ftrs, num_classes)
    elif model_name == 'vgg16':
        model = models.vgg16(weights=models.VGG16_Weights.IMAGENET1K_V1)
        num_ftrs = model.classifier[6].in_features
        model.classifier[6] = nn.Linear(num_ftrs, num_classes)
    elif model_name == 'mobilenetv3':
        model = models.mobilenet_v3_large(weights=models.MobileNet_V3_Large_Weights.IMAGENET1K_V1)
        num_ftrs = model.classifier[3].in_features
        model.classifier[3] = nn.Linear(num_ftrs, num_classes)
    elif model_name == 'vit':
        model = models.vit_b_16(weights=models.ViT_B_16_Weights.IMAGENET1K_V1)
        num_ftrs = model.heads.head.in_features
        model.heads.head = nn.Linear(num_ftrs, num_classes)
    elif model_name == 'mobilevitv2':
        model  = create_mobilevitv2_model(num_classes=num_classes, variant='mobilevitv2_100')
    elif model_name == 'NewCombinedModel':  
        model = create_combined_model(num_classes=num_classes)
    elif model_name == 'mixer_s16':  
        model = mixer_s16(num_classes=num_classes)
    else:
        raise ValueError(f"未知的模型名称: {model_name}")
    
    return model

# 4. 训练函数（添加tqdm进度条）
# 4. 训练函数（添加tqdm进度条）
def train_model(model, model_name, criterion, optimizer, train_loader, val_loader, 
               train_dataset, val_dataset, num_epochs=20):
    best_acc = 0.0
    train_losses, val_losses = [], []
    train_accs, val_accs = [], []
    
    # 学习率调度器
    scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=3)
    
    # 创建epoch进度条
    epoch_pbar = tqdm(range(num_epochs), desc=f"训练 {model_name}", position=0, leave=True)
    
    for epoch in epoch_pbar:
        epoch_start = time.time()
        model.train()
        running_loss = 0.0
        running_corrects = 0
        
        # 创建batch进度条
        batch_pbar = tqdm(train_loader, desc=f"Epoch {epoch+1}/{num_epochs}", position=1, leave=False)
        for inputs, labels in batch_pbar:
            inputs, labels = inputs.to(device), labels.to(device)
            
            optimizer.zero_grad()
            outputs = model(inputs)
            loss = criterion(outputs, labels)
            loss.backward()
            optimizer.step()
            
            _, preds = torch.max(outputs, 1)
            running_loss += loss.item() * inputs.size(0)
            running_corrects += torch.sum(preds == labels.data)
            
            # 更新batch进度条描述
            batch_pbar.set_postfix(loss=loss.item())
        
        epoch_loss = running_loss / len(train_dataset)
        epoch_acc = running_corrects.double() / len(train_dataset)
        train_losses.append(epoch_loss)
        train_accs.append(epoch_acc.item())
        
        # 验证阶段
        val_loss, val_acc = validate_model(model, criterion, val_loader, val_dataset)
        val_losses.append(val_loss)
        val_accs.append(val_acc)
        
        scheduler.step(val_loss)
        
        # 保存最佳模型
        if val_acc > best_acc:
            best_acc = val_acc
            # 确保保存目录存在
            save_dir = 'E:/Model'
            os.makedirs(save_dir, exist_ok=True)
            torch.save(model.state_dict(), os.path.join(save_dir, f'best_{model_name}.pth'))
        
        epoch_time = time.time() - epoch_start
        epoch_pbar.set_postfix(
            train_loss=f"{epoch_loss:.4f}", 
            val_loss=f"{val_loss:.4f}",
            val_acc=f"{val_acc:.4f}",
            time=f"{epoch_time:.2f}s"
        )
    
    # 绘制训练曲线
    plt.figure(figsize=(12, 5))
    plt.subplot(1, 2, 1)
    plt.plot(train_losses, label='Train Loss')
    plt.plot(val_losses, label='Val Loss')
    plt.legend()
    plt.title(f'{model_name} Loss Curve')
    
    plt.subplot(1, 2, 2)
    plt.plot(train_accs, label='Train Acc')
    plt.plot(val_accs, label='Val Acc')
    plt.legend()
    plt.title(f'{model_name} Accuracy Curve')
    plt.savefig(os.path.join(save_dir, f'training_curve_{model_name}.png'))
    plt.close()
    
    return model

# 验证函数（添加tqdm进度条）
def validate_model(model, criterion, loader, dataset):
    model.eval()
    running_loss = 0.0
    running_corrects = 0
    
    # 创建验证进度条
    val_pbar = tqdm(loader, desc="验证中", position=1, leave=False)
    
    with torch.no_grad():
        for inputs, labels in val_pbar:
            inputs, labels = inputs.to(device), labels.to(device)
            outputs = model(inputs)
            loss = criterion(outputs, labels)
            
            _, preds = torch.max(outputs, 1)
            running_loss += loss.item() * inputs.size(0)
            running_corrects += torch.sum(preds == labels.data)
            
            # 更新验证进度条
            val_pbar.set_postfix(loss=loss.item())
    
    loss = running_loss / len(dataset)
    acc = running_corrects.double() / len(dataset)
    return loss, acc.item()

# 5. 模型评估函数（添加tqdm进度条）
def evaluate_model(model, model_name, loader, dataset):
    model.eval()
    all_preds = []
    all_labels = []
    inference_times = []
    
    # 创建评估进度条
    eval_pbar = tqdm(loader, desc=f"评估 {model_name}", position=0, leave=True)
    
    with torch.no_grad():
        for inputs, labels in eval_pbar:
            inputs = inputs.to(device)
            labels = labels.to(device)
            
            start_time = time.time()
            outputs = model(inputs)
            inference_times.append(time.time() - start_time)
            
            _, preds = torch.max(outputs, 1)
            all_preds.extend(preds.cpu().numpy())
            all_labels.extend(labels.cpu().numpy())
    
    # 计算指标
    accuracy = np.mean(np.array(all_preds) == np.array(all_labels))
    precision = precision_score(all_labels, all_preds, average='macro')
    recall = recall_score(all_labels, all_preds, average='macro')
    f1 = f1_score(all_labels, all_preds, average='macro')
    avg_inference_time = np.mean(inference_times) * 1000  # 毫秒
    speed = len(dataset) / np.sum(inference_times)  # 样本/秒
    
    # 计算参数数量
    params = sum(p.numel() for p in model.parameters() if p.requires_grad)
    
    # 混淆矩阵
    cm = confusion_matrix(all_labels, all_preds)
    plt.figure(figsize=(10, 8))
    plt.imshow(cm, interpolation='nearest', cmap=plt.cm.Blues)
    plt.title(f'{model_name} Confusion Matrix')
    plt.colorbar()
    plt.savefig(os.path.join('E:/Model', f'confusion_matrix_{model_name}.png'))
    plt.close()
    
    print('-' * 50)
    print(f'[{model_name}] 测试集准确率: {accuracy:.4f}')
    print(f'精确率 (Precision): {precision:.4f}')
    print(f'召回率 (Recall): {recall:.4f}')
    print(f'F1 分数: {f1:.4f}')
    print(f'参数数量: {params:,}')
    print(f'平均推理时间: {avg_inference_time:.4f} 毫秒/样本')
    print(f'推理速度: {speed:.2f} 样本/秒')
    print('-' * 50)
    
    return {
        'model': model_name,
        'accuracy': accuracy,
        'precision': precision,
        'recall': recall,
        'f1': f1,
        'params': params,
        'inference_time': avg_inference_time,
        'speed': speed
    }


# 6. 主程序
if __name__ == '__main__':
  
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print(f"使用设备: {device}")
    
    # 要比较的模型列表（新增模型）
    model_names = ['mixer_s16','NewCombinedModel','mobilevitv2','resnet18', 'alexnet', 'vgg16', 'mobilenetv3', 'vit']
    results = []
    
    # 为每个模型创建DataLoader
    batch_size = 64
    train_loaders = []
    val_loaders = []
    test_loaders = []
    
    for _ in model_names:
        train_loaders.append(DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=4))
        val_loaders.append(DataLoader(val_dataset, batch_size=batch_size, num_workers=2))
        test_loaders.append(DataLoader(test_dataset, batch_size=batch_size, num_workers=2))
    
    # 创建模型训练进度条
    model_pbar = tqdm(model_names, desc="训练模型", position=0, leave=True)
    
    # 训练和评估每个模型
    for i, model_name in enumerate(model_pbar):
        model_pbar.set_description(f"训练 {model_name}")
        
        # 创建模型
        model = create_model(model_name, num_classes).to(device)
        criterion = nn.CrossEntropyLoss()
        optimizer = optim.Adam(model.parameters(), lr=0.001)
        
        # 训练模型
        trained_model = train_model(
            model, model_name, criterion, optimizer,
            train_loaders[i], val_loaders[i],
            train_dataset, val_dataset,
            num_epochs=1
        )
        
        # 加载最佳模型
        save_dir = 'E:/Model'
        os.makedirs(save_dir, exist_ok=True)
        trained_model.load_state_dict(torch.load(os.path.join(save_dir, f'best_{model_name}.pth')))
        
        # 测试模型
        print(f"\n测试 {model_name} 模型性能...")
        test_result = evaluate_model(
            trained_model, model_name,
            test_loaders[i], test_dataset
        )
        results.append(test_result)
    
        # 保存最终模型（添加文件名和扩展名）
        final_model_path = os.path.join(save_dir, f'final_{model_name}.pth')
        torch.save(trained_model.state_dict(), final_model_path)
        print(f"{model_name} 模型已保存至: {final_model_path}")
        
        # 更新主进度条
        model_pbar.set_postfix(acc=f"{test_result['accuracy']:.4f}")
    
    # 输出比较结果
    print("\n\n模型比较结果:")
    print("="*80)
    print(f"{'模型':<15} {'准确率':<10} {'F1分数':<10} {'参数量':<15} {'推理速度(样本/秒)':<20}")
    print("-"*80)
    for res in results:
        print(f"{res['model']:<15} {res['accuracy']:.4f}    {res['f1']:.4f}    {res['params']:<15,} {res['speed']:<20.2f}")
    print("="*80)
    
    # 保存结果到CSV


