import os
import torch
from torch import nn
from tqdm import tqdm
import numpy as np
from sklearn.metrics import confusion_matrix, classification_report
import torch.nn.functional as F
import matplotlib.pyplot as plt
from torchvision.models import resnet50
from dataloader_roi import MedicalDataset
# from dataset_25d import create_classifier_loaders, create_test_classifier_loaders
from torch.utils.data import DataLoader

os.environ["CUDA_VISIBLE_DEVICES"]="0"

class Backbone(nn.Module):
    def __init__(self):
        super().__init__()
        base_model = resnet50(weights=None)
        encoder_layers = list(base_model.children())
        self.backbone = nn.Sequential(*encoder_layers[:9])
        self.dropout = nn.Dropout(0.3)
    def forward(self, x):
        x = self.backbone(x)
        x = self.dropout(x)
        return x
    
class ModalitySpecificNet(nn.Module):
    def __init__(self, num_classes):
        super().__init__()
        
        self.backbone = Backbone()
        self.backbone.load_state_dict(torch.load("./pretrained_model/RadImageNet_pytorch/ResNet50.pt"))

        self.projection = nn.Sequential(
            nn.Conv2d(2048, 512, 1),
            nn.BatchNorm2d(512),
            nn.ReLU()
        )

        encoder_layer = nn.TransformerEncoderLayer(
            d_model=512, 
            nhead=8, 
            dim_feedforward=1024,
            dropout=0.2
        )
        self.transformer = nn.TransformerEncoder(encoder_layer, num_layers=3)

        self.fusion = nn.Sequential(
            nn.Conv2d(512 * 4, 512, 1),
            nn.BatchNorm2d(512),
            nn.ReLU(),
            nn.Conv2d(512, 512, 3, padding=1),
            nn.BatchNorm2d(512),
            nn.Dropout(0.3),
            nn.ReLU()
        )
        
        self.classifier = nn.Sequential(
            nn.AdaptiveAvgPool2d(1),
            nn.Flatten(),
            nn.Linear(512, 256),
            nn.ReLU(),
            nn.Dropout(0.5),
            nn.Linear(256, num_classes)
        )

    def forward(self, x):
        modalities = []
        for i in range(3):
            img = x[:, i,:,:,:]
            feat = self.backbone(img)
            projected = self.projection(feat)  # [B, 512, H, W]
            modalities.append(projected)
        
        b, c, h, w = modalities[0].shape
        spatial_tokens = []
        for mod in modalities:
            spatial_embed = mod.flatten(2).permute(2, 0, 1)  # [H*W, B, C]
            spatial_tokens.append(spatial_embed)
        
        all_tokens = torch.cat(spatial_tokens, dim=0)  # [3*H*W, B, C]
        fused = self.transformer(all_tokens)

        fused = fused.permute(1, 2, 0).view(b, c, 3, h, w)
        fused = fused.mean(dim=2)  # 平均融合各模态特征 [B, C, H, W]
        
        fused = self.fusion(torch.cat([
            fused, 
            modalities[0], 
            modalities[1], 
            modalities[2]
        ], dim=1)) + fused
        
        return self.classifier(fused)

def train_model(model, train_loader, val_loader, criterion, optimizer, device, 
                num_epochs=50, scheduler=None):
    min_valid_loss = np.inf
    best_acc = 0
    def process_epoch(data_loader, is_train, epoch):
        model.train() if is_train else model.eval()
        stage = "训练" if is_train else "验证"
        total_loss = 0.0
        total_correct = 0
        total_samples = 0
        all_preds = []
        all_labels = []
        progress_bar = tqdm(data_loader, desc=f"{stage} Epoch {epoch+1}", leave=False)

        with torch.set_grad_enabled(is_train):
            for batch_idx, batch in enumerate(progress_bar):
                images = batch['images'].to(device)
                labels = batch['labels'].to(device)
                identifiers = batch['p_id']
                if is_train:
                    optimizer.zero_grad()
                outputs = model(images)
                loss = criterion(outputs, labels)

                if is_train:
                    loss.backward()
                    optimizer.step()
                
                total_loss += loss.item()
                probs = F.softmax(outputs, dim=1)
                preds = probs.argmax(dim=1)
                total_correct += (preds == labels).sum().item()
                total_samples += labels.size(0)
                
                all_preds.extend(preds.cpu().numpy())
                all_labels.extend(labels.cpu().numpy())
                
                log_interval = 50 if is_train else 50
                if batch_idx % log_interval == 0:
                    progress_bar.write(
                        f"Batch {batch_idx}: 预测 {preds} | 真实 {labels.cpu().numpy()}"
                    )
        epoch_loss = total_loss / len(data_loader)
        epoch_acc = total_correct / total_samples
        
        if not is_train:
            cm = confusion_matrix(all_labels, all_preds, labels=[0, 1])
            cm_str = np.array2string(cm, separator='\t', 
                                    formatter={'int': lambda x: f"{x:3d}"},
                                    prefix='\t')
            print(f"验证混淆矩阵:\n{cm_str}\n")
        
        return epoch_loss, epoch_acc
    
    for e in range(num_epochs):
        train_loss, train_acc = process_epoch(train_loader, True, e)
        val_loss, val_acc = process_epoch(val_loader, False, e)
        scheduler.step(val_loss)
        print(f'Epoch {e+1}')
        print(f'训练 Loss: {train_loss:.8f} Acc: {train_acc:.4f}')
        print(f'验证 Loss: {val_loss:.8f} Acc: {val_acc:.4f}')

        with open('./model_roi_radimagenet/train_log.txt', 'a') as f:
            f.write(f'Epoch {e+1}\n')
            f.write(f'Train Loss: {train_loss:.8f} Acc: {train_acc:.4f}\n')
            f.write(f'Valid Loss: {val_loss:.8f} Acc: {val_acc:.4f}\n')
            f.write('\n')

        if val_acc > best_acc:
            print(f'✅验证准确率上升 - loss变化：({min_valid_loss:.6f} → {val_loss:.6f})')
            best_acc = val_acc
            torch.save(model.state_dict(), './model_roi_radimagenet/best_model.pth')
            min_valid_loss = val_loss

    torch.save(model.state_dict(), './model_roi_radimagenet/last_model.pth')

def test_model(model, test_loader, criterion, device, 
               cmap_path=None, class_names=None):
    model.eval()
    total_loss = 0.0
    total_correct = 0
    total_samples = 0
    all_preds = []
    all_labels = []
    
    progress_bar = tqdm(test_loader, desc="测试进度")
    with torch.no_grad():
        for batch_idx, batch in enumerate(progress_bar):
            images = batch['images']
            labels = batch['labels']
            images = images.to(device)
            labels = labels.to(device)
            
            outputs = model(images)
            loss = criterion(outputs, labels)
            total_loss += loss.item()
            probs = F.softmax(outputs, dim=1)
            preds = probs.argmax(dim=1)
            total_correct += (preds == labels).sum().item()
            total_samples += labels.size(0)
            
            all_preds.extend(preds.cpu().numpy())
            all_labels.extend(labels.cpu().numpy())
    test_loss = total_loss / len(test_loader) # 根据batch数量平均
    test_acc = total_correct / total_samples
    cm = confusion_matrix(all_labels, all_preds)
    plot_confusion_matrix(cm, class_names, cmap_path)

    report = classification_report(
        all_labels, all_preds, 
        target_names=class_names or [f'Class {i}' for i in range(cm.shape[0])]
    )

    print(f'\n测试结果:')
    print(f' - 平均Loss: {test_loss:.6f}')
    print(f' - 准确率: {test_acc*100:.2f}%')
    print(f' - 混淆矩阵已保存至: {cmap_path}')
    print('\n分类报告:')
    print(report)
    
    return test_loss, test_acc, cm

def plot_confusion_matrix(cm, class_names=None, save_path='./model_roi_radimagenet/confusion_matrix.png', dpi=150):
    if class_names is None:
        class_names = [str(i) for i in range(len(cm))]
    
    fig, ax = plt.subplots(figsize=(10, 8))
    im = ax.imshow(cm, interpolation='nearest', cmap=plt.cm.Blues)
    cbar = ax.figure.colorbar(im, ax=ax)
    cbar.ax.set_ylabel('sample number', rotation=-90, va="bottom")

    ax.set(
        xticks=np.arange(len(class_names)),
        yticks=np.arange(len(class_names)),
        xticklabels=class_names,
        yticklabels=class_names,
        ylabel='true label',
        xlabel='prep label'
    )

    plt.setp(ax.get_xticklabels(), rotation=45, ha="right", rotation_mode="anchor")

    fmt = 'd'
    thresh = cm.max() / 2.
    for i in range(len(class_names)):
        for j in range(len(class_names)):
            ax.text(
                j, i, 
                format(cm[i, j], fmt),
                ha="center", va="center",
                color="white" if cm[i, j] > thresh else "black"
            )
    ax.set_title("confusion matrix", pad=20)
    fig.tight_layout()
    
    plt.savefig(save_path, dpi=dpi, bbox_inches='tight')
    plt.close(fig)

if __name__ == "__main__":
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print(f"使用设备: {device} - {torch.cuda.get_device_name(0)}")
    model = ModalitySpecificNet(num_classes=2).to(device)
    num_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
    print(f"模型参数数量: {num_params/1e6:.2f}M")

    data_path = "./datasets/pT/three_label_data"
    train_dataset = MedicalDataset(data_path, phase='train')
    val_dataset = MedicalDataset(data_path, phase='val')
    train_loader = DataLoader(train_dataset, batch_size=4, shuffle=True)
    val_loader = DataLoader(val_dataset, batch_size=4, shuffle=False)
    test_dataset = MedicalDataset(data_path, phase='test')
    test_loader = DataLoader(test_dataset, batch_size=4, shuffle=False)

    criterion = nn.CrossEntropyLoss()
    optimizer = torch.optim.AdamW(model.parameters(), lr=0.0001, weight_decay=0.005)
    scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.5, patience=5, verbose=True)
    # train_model(model, train_loader,val_loader, criterion, optimizer, device, num_epochs=50, scheduler=scheduler)
    model.load_state_dict(torch.load('./model_roi_radimagenet/last_model.pth'))
    test_model(model, test_loader, criterion, device, cmap_path='./model_roi_radimagenet/confusion_matrix.png',class_names=['0','1'])


