import os
import numpy as np
import torch
import torch.nn as nn
from torch.nn import functional as F
import matplotlib.pyplot as plt
from sklearn.metrics import classification_report, confusion_matrix
from classification_dataset import create_classifier_loaders, create_test_classifier_loaders
from resnet3d import generate_model
from tqdm import tqdm

os.environ['CUDA_VISIBLE_DEVICES']='1'

class LabelSmoothCrossEntropy(nn.Module):
    def __init__(self, smoothing=0.1):
        super().__init__()
        self.smoothing = smoothing
        
    def forward(self, pred, target):
        log_prob = F.log_softmax(pred, dim=-1)
        nll_loss = -log_prob.gather(dim=-1, index=target.unsqueeze(1))
        nll_loss = nll_loss.squeeze(1)
        smooth_loss = -log_prob.mean(dim=-1)
        loss = (1 - self.smoothing) * nll_loss + self.smoothing * smooth_loss
        return loss.mean()
        
def train_model(model, train_loader, val_loader, criterion, optimizer, device, 
                num_epochs=50, scheduler=None):
    min_valid_loss = np.inf
    best_acc = 0
    def process_epoch(data_loader, is_train, epoch):
        model.train() if is_train else model.eval()
        stage = "训练" if is_train else "验证"
        total_loss = 0.0
        total_correct = 0
        total_samples = 0
        all_preds = []
        all_labels = []
        progress_bar = tqdm(data_loader, desc=f"{stage} Epoch {epoch+1}", leave=False)

        with torch.set_grad_enabled(is_train):
            for batch_idx, (images, labels, identifer) in enumerate(progress_bar):
                imgs = images.to(device)
                labels = labels.to(device)
                if is_train:
                    optimizer.zero_grad()
                outputs = model(imgs)
                loss = criterion(outputs, labels)

                if is_train:
                    loss.backward()
                    optimizer.step()
                
                total_loss += loss.item()
                probs = F.softmax(outputs, dim=1)
                preds = probs.argmax(dim=1)
                total_correct += (preds == labels).sum().item()
                total_samples += labels.size(0)
                
                all_preds.extend(preds.cpu().numpy())
                all_labels.extend(labels.cpu().numpy())
                
                log_interval = 50 if is_train else 50
                if batch_idx % log_interval == 0:
                    progress_bar.write(
                        f"Batch {batch_idx}: 预测 {preds} | 真实 {labels.cpu().numpy()}"
                    )
        epoch_loss = total_loss / len(data_loader)
        epoch_acc = total_correct / total_samples
        
        if not is_train:
            cm = confusion_matrix(all_labels, all_preds, labels=[0, 1, 2, 3, 4])
            cm_str = np.array2string(cm, separator='\t', 
                                    formatter={'int': lambda x: f"{x:3d}"},
                                    prefix='\t')
            print(f"验证混淆矩阵:\n{cm_str}\n")
        
        return epoch_loss, epoch_acc
    
    for e in range(num_epochs):
        train_loss, train_acc = process_epoch(train_loader, True, e)
        val_loss, val_acc = process_epoch(val_loader, False, e)
        scheduler.step(val_loss)
        print(f'Epoch {e+1}')
        print(f'训练 Loss: {train_loss:.8f} Acc: {train_acc:.4f}')
        print(f'验证 Loss: {val_loss:.8f} Acc: {val_acc:.4f}')

        with open('./train_classfication/train_log.txt', 'a') as f:
            f.write(f'Epoch {e+1}\n')
            f.write(f'训练 Loss: {train_loss:.8f} Acc: {train_acc:.4f}\n')
            f.write(f'验证 Loss: {val_loss:.8f} Acc: {val_acc:.4f}\n')
            f.write('\n')

        if val_acc > best_acc:
            print(f'✅验证准确率上升 ({min_valid_loss:.6f} → {val_loss:.6f})')
            best_acc = val_acc
            torch.save(model.state_dict(), './train_classfication/3d_best_model.pth')
            min_valid_loss = val_loss

    torch.save(model.state_dict(), './train_classfication/3d_last_model.pth')

def test_model(model, test_loader, criterion, device, 
               cmap_path='confusion_matrix.png', class_names=None):
    model.eval()
    total_loss = 0.0
    total_correct = 0
    total_samples = 0
    all_preds = []
    all_labels = []
    
    progress_bar = tqdm(test_loader, desc="测试进度")
    with torch.no_grad():
        for images, labels, identifier in progress_bar:
            images = images.to(device)
            labels = labels.to(device)
            
            outputs = model(images)
            loss = criterion(outputs, labels)
            total_loss += loss.item()
            probs = F.softmax(outputs, dim=1)
            preds = probs.argmax(dim=1)
            total_correct += (preds == labels).sum().item()
            total_samples += labels.size(0)
            
            all_preds.extend(preds.cpu().numpy())
            all_labels.extend(labels.cpu().numpy())
    test_loss = total_loss / len(test_loader) # 根据batch数量平均
    test_acc = total_correct / total_samples
    cm = confusion_matrix(all_labels, all_preds)
    plot_confusion_matrix(cm, class_names, cmap_path)

    report = classification_report(
        all_labels, all_preds, 
        target_names=class_names or [f'Class {i}' for i in range(cm.shape[0])]
    )

    print(f'\n测试结果:')
    print(f' - 平均Loss: {test_loss:.6f}')
    print(f' - 准确率: {test_acc*100:.2f}%')
    print(f' - 混淆矩阵已保存至: {cmap_path}')
    print('\n分类报告:')
    print(report)
    
    return test_loss, test_acc, cm

def plot_confusion_matrix(cm, class_names=None, save_path='confusion_matrix.png', dpi=150):
    if class_names is None:
        class_names = [str(i) for i in range(len(cm))]
    
    fig, ax = plt.subplots(figsize=(10, 8))
    im = ax.imshow(cm, interpolation='nearest', cmap=plt.cm.Blues)
    cbar = ax.figure.colorbar(im, ax=ax)
    cbar.ax.set_ylabel('sample number', rotation=-90, va="bottom")

    ax.set(
        xticks=np.arange(len(class_names)),
        yticks=np.arange(len(class_names)),
        xticklabels=class_names,
        yticklabels=class_names,
        ylabel='true label',
        xlabel='prep label'
    )

    plt.setp(ax.get_xticklabels(), rotation=45, ha="right", rotation_mode="anchor")

    fmt = 'd'
    thresh = cm.max() / 2.
    for i in range(len(class_names)):
        for j in range(len(class_names)):
            ax.text(
                j, i, 
                format(cm[i, j], fmt),
                ha="center", va="center",
                color="white" if cm[i, j] > thresh else "black"
            )
    ax.set_title("confusion matrix", pad=20)
    fig.tight_layout()
    
    plt.savefig(save_path, dpi=dpi, bbox_inches='tight')
    plt.close(fig)

if __name__ == "__main__":
    import argparse
    parser = argparse.ArgumentParser(description="训练ResNet模型")
    parser.add_argument("--mode",type=str,default="test",help="训练模式|测试模式")
    parser.add_argument("--data_root", type=str, default="./ribfrac-dataset/exam_data", help="数据集根目录")# 可以在此处切换数据集目录
    parser.add_argument("--model_dir", type=str, default="./train_classfication/3d_best_model.pth", help="模型文件路径")
    parser.add_argument("--batch_size", type=int, default=8, help="批次大小")
    parser.add_argument("--num_workers", type=int, default=2, help="数据加载线程数")
    args = parser.parse_args()
    
    data_root = args.data_root
    model_dir = args.model_dir
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print(f"使用设备: {device} - {torch.cuda.get_device_name(0)}")
    model = generate_model(50, n_classes=5, dropout_prob=0.3).to(device)
    num_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
    print(f"模型参数数量: {num_params/1e6:.2f}M")
    if args.mode == 'train':
        train_loader, val_loader = create_classifier_loaders(
            data_root,
            batch_size=8
        )

        criterion = LabelSmoothCrossEntropy(smoothing=0.1)
        optimizer = torch.optim.AdamW(model.parameters(), lr=0.001, weight_decay=0.01)
        scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.5, patience=5, verbose=True)

        train_model(model, train_loader,val_loader, criterion, optimizer, 
                    device, num_epochs=50, scheduler=scheduler)
    if args.mode == 'test':
        test_loader = create_test_classifier_loaders(        
            data_root = data_root,
            batch_size=8
        )
        criterion = LabelSmoothCrossEntropy(smoothing=0.1)
        model.load_state_dict(torch.load(model_dir))
        test_model(model, test_loader, criterion, device,
                   cmap_path='./train_classfication/confusion_matrix.png')
