import os
import numpy as np
import pandas as pd
import nibabel as nib
import cv2
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
import torchvision.models as models
import timm
from sklearn.preprocessing import StandardScaler
from tqdm import tqdm
import matplotlib.pyplot as plt  # 新增导入
import torch.nn.functional as F 
import matplotlib.pyplot as plt
from sklearn.metrics import roc_curve, auc
from sklearn.preprocessing import label_binarize
import numpy as np
from sklearn.metrics import roc_curve, auc
from sklearn.metrics import confusion_matrix
from sklearn.metrics import f1_score
from sklearn.metrics import accuracy_score
import datetime

# 文件配置参数
DATA_DIR = './datasets/single_model'
# 训练参数
BATCH_SIZE = 8
EPOCHS = 30
LEARNING_RATE = 1e-4
IMG_SIZE = 224

ADAM_EPS = 1e-8  # 优化器epsilon参数
GRAD_CLIP = 1.0  # 梯度裁剪阈值
WARMUP_EPOCHS = 3  # 学习率预热epoch数
MIN_LR = 1e-6   # 学习率下限
LABEL_SMOOTHING = 0.1  # 标签平滑系数

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

class MedicalDataset(Dataset):
    def __init__(self, phase='train', scaler=None, ori_img=0.6):
        """
        phase: 数据集阶段，可选 'train', 'val', 'test'
        """
        self.phase = phase
        self.data_dir = os.path.join(DATA_DIR, phase)
        self.ori_img = ori_img  # ROI增强值，具体有没有效果不大清楚
        # 加载对应阶段的标签文件
        self.label_df = pd.read_csv(os.path.join(self.data_dir, 'label.csv'))
        self.labels = (self.label_df['label'] - 1).tolist()  # 标签转换为0-based

        # 数据标准化
        if scaler is None and phase == 'train':
            self.scaler = StandardScaler()
        else:
            self.scaler = scaler

    def load_nifti(self, p_id):
        """
        返回值:
        roi_weighted:增强后的图像数据;维度: (224, 224); [0,1]
        padded_roi:处理后ROI掩模;​维度: (224, 224); {0,1}
        """
        p_id = str(p_id)
        img_path = os.path.join(self.data_dir, "images", f"{p_id}_T2_axi_000.nii.gz")
        roi_path = os.path.join(self.data_dir, "masks", f"{p_id}_T2_axi_roi.nii.gz")
        
        # 加载nifti文件
        img = nib.load(img_path).get_fdata().astype(np.float32)
        roi = nib.load(roi_path).get_fdata().astype(np.uint8)
        # 找到包含最大ROI的切片
        roi_sums = roi.sum(axis=(0, 1))
        slice_idx = np.argmax(roi_sums)
        
        # 提取切片
        img_slice = img[..., slice_idx]
        roi_slice = roi[..., slice_idx]
        
        # 改进的ROI处理流程
        # 1. 找到ROI的边界框
        rows = np.any(roi_slice, axis=1)
        cols = np.any(roi_slice, axis=0)
        ymin, ymax = np.where(rows)[0][[0, -1]] if rows.any() else (0, img_slice.shape[0])
        xmin, xmax = np.where(cols)[0][[0, -1]] if cols.any() else (0, img_slice.shape[1])

        # 2. 扩展边界（增加10%的上下文信息）
        height = ymax - ymin
        width = xmax - xmin
        ymin = max(0, int(ymin - 0.2 * height))
        ymax = min(img_slice.shape[0], int(ymax + 0.2 * height))
        xmin = max(0, int(xmin - 0.2 * width))
        xmax = min(img_slice.shape[1], int(xmax + 0.2 * width))

        # 3. 裁剪ROI区域
        cropped_img = img_slice[ymin:ymax, xmin:xmax]
        cropped_roi = roi_slice[ymin:ymax, xmin:xmax]

        # 4. 保持长宽比的缩放
        h, w = cropped_img.shape
        scale_ratio = min(IMG_SIZE/h, IMG_SIZE/w)
        new_h, new_w = int(h * scale_ratio), int(w * scale_ratio)
        
        # 双线性插值缩放图像
        scaled_img = cv2.resize(cropped_img, (new_w, new_h), 
                               interpolation=cv2.INTER_LINEAR)
        # 最近邻插值缩放ROI
        scaled_roi = cv2.resize(cropped_roi.astype(np.uint8), (new_w, new_h),
                               interpolation=cv2.INTER_NEAREST)

        # 5. 填充到目标尺寸
        pad_top = (IMG_SIZE - new_h) // 2
        pad_bottom = IMG_SIZE - new_h - pad_top
        pad_left = (IMG_SIZE - new_w) // 2
        pad_right = IMG_SIZE - new_w - pad_left
        padded_img = cv2.copyMakeBorder(scaled_img, pad_top, pad_bottom,
                                       pad_left, pad_right, cv2.BORDER_CONSTANT, 
                                       value=np.percentile(scaled_img, 5))
        padded_roi = cv2.copyMakeBorder(scaled_roi, pad_top, pad_bottom,
                                       pad_left, pad_right, cv2.BORDER_CONSTANT,
                                       value=0)

        # 6. ROI加权增强
        roi_weighted = padded_img * (self.ori_img + 0.5*(padded_roi > 0))  # ROI区域增强
        
        # 7. 直方图归一化（仅针对ROI区域）
        roi_pixels = roi_weighted[padded_roi > 0]
        if len(roi_pixels) > 0:
            p2, p98 = np.percentile(roi_pixels, (2, 98))
            roi_weighted = np.clip((roi_weighted - p2) / (p98 - p2 + 1e-8), 0, 1)

        return roi_weighted.astype(np.float32), padded_roi.astype(np.uint8)
    
    def __len__(self):
        return len(self.label_df)

    def __getitem__(self, idx):
        row = self.label_df.iloc[idx]
        p_id = row['p_id']
        proc_img, proc_roi = self.load_nifti(p_id)
        return {
            'image': torch.FloatTensor(proc_img).unsqueeze(0),
            'roi_mask': torch.FloatTensor(proc_roi),
            'label': row['label'] - 1,
            'p_id': p_id
        }

# 新增SE注意力模块
class SEBlock(nn.Module):
    def __init__(self, channels, reduction=16):
        super().__init__()
        # 自动调整reduction比例防止维度过度压缩
        reduction = max(channels // 16, 4)  # 保证最小中间维度为4
        self.avgpool = nn.AdaptiveAvgPool2d(1)
        self.fc = nn.Sequential(
            nn.Linear(channels, channels // reduction),
            nn.ReLU(),
            nn.Linear(channels // reduction, channels),
            nn.Sigmoid()
        )
    
    def forward(self, x):
        b, c, _, _ = x.size()
        y = self.avgpool(x).view(b, c)
        y = self.fc(y).view(b, c, 1, 1)
        return x * y.expand_as(x)

class FusionModel(nn.Module):
    def __init__(self, num_classes):
        super().__init__()
        # 第一阶段：使用更深的ResNet
        original_resnet = models.resnet34(pretrained=True)
        self.resnet = nn.Sequential(
            nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3, bias=False),
            *list(original_resnet.children())[1:-2]
        )
        
        # 第二阶段：更完整的ViT集成
        self.vit_adapter = nn.Sequential(
            nn.Conv2d(512, 768, 1),
            nn.BatchNorm2d(768),
            nn.GELU()
        )
        self.vit = timm.create_model('vit_base_patch16_224', pretrained=True)
        
        # 第三阶段：改进的CNN结构
        self.cnn = nn.Sequential(
            nn.Conv2d(768, 256, 3, padding=1),
            nn.BatchNorm2d(256),
            nn.GELU(),
            SEBlock(256),
            nn.MaxPool2d(2),
            
            nn.Conv2d(256, 512, 3, padding=1),
            nn.BatchNorm2d(512),
            nn.GELU(),
            SEBlock(512),
            nn.AdaptiveAvgPool2d(1)
        )
        
        # 改进的分类器
        self.classifier = nn.Sequential(
            nn.Linear(512, 512),
            nn.BatchNorm1d(512),
            nn.GELU(),
            nn.Dropout(0.6),
            
            nn.Linear(512, 256),
            # 添加维度检查层
            nn.Identity(),
            nn.BatchNorm1d(256),
            nn.GELU(),
            nn.Dropout(0.4),
            
            nn.Linear(256, num_classes)
        )
    def forward(self, x):
        img = x['image']

        # ResNet特征提取
        resnet_feat = self.resnet(img)
        
        # ViT特征增强
        vit_feat = self.vit_adapter(resnet_feat)
        B, C, H, W = vit_feat.shape
        vit_feat = vit_feat.flatten(2).permute(0, 2, 1)
        vit_feat = torch.cat([self.vit.cls_token.expand(B,-1,-1), vit_feat], dim=1)
        vit_feat = self.vit.blocks(self.vit.norm_pre(vit_feat))
        vit_feat = vit_feat[:, 1:].permute(0, 2, 1).view(B, C, H, W)
        
        # CNN细粒度处理
        cnn_feat = self.cnn(vit_feat)
        # 确保特征维度正确
        if cnn_feat.dim() == 4:  # [B, C, 1, 1]
            cnn_feat = cnn_feat.view(B, -1)  # 转换为[B, C]
        elif cnn_feat.dim() == 3:  # [B, C, 1]
            cnn_feat = cnn_feat.squeeze(-1)
        return self.classifier(cnn_feat)

class FocalLoss(nn.Module):
    def __init__(self, alpha=None, gamma=2.0):
        super().__init__()
        self.alpha = alpha
        self.gamma = gamma

    def forward(self, inputs, targets):
        ce_loss = F.cross_entropy(inputs, targets, reduction='none', weight=self.alpha)
        pt = torch.exp(-ce_loss)
        focal_loss = (1 - pt)**self.gamma * ce_loss
        return focal_loss.mean()

def load_train_val_test_data():
    # 创建训练集并获取scaler
    train_dataset = MedicalDataset(phase='train')
    # 使用训练集的scaler初始化验证集和测试集
    val_dataset = MedicalDataset(phase='val', scaler=train_dataset.scaler)
    test_dataset = MedicalDataset(phase='test', scaler=train_dataset.scaler)

    # 创建DataLoader
    train_loader = DataLoader(train_dataset, batch_size=BATCH_SIZE, 
                            shuffle=True, pin_memory=True, drop_last=True)
    val_loader = DataLoader(val_dataset, batch_size=BATCH_SIZE, 
                          shuffle=False, pin_memory=True, drop_last=True)
    test_loader = DataLoader(test_dataset, batch_size=BATCH_SIZE,
                           shuffle=False, pin_memory=True, drop_last=True)
    # show_samples(train_dataset)  # 展示训练集增强前后的样本
    return train_loader, val_loader, test_loader

def train_and_evaluate(model, train_loader, val_loader):
    # 初始化训练日志
    start_time = datetime.datetime.now()
    timestamp = start_time.strftime("%Y%m%d_%H%M%S")
    log_filename = f"training_log_{timestamp}.txt"
    
    # 记录初始信息
    with open(log_filename, 'w') as f:
        f.write(f"Training Start Time: {start_time}\n")
        f.write(f"Epochs: {EPOCHS}, Initial LR: {LEARNING_RATE}, Device: {device}\n")
        f.write("Hyperparameters:\n")
        f.write(f"- Focal Loss Gamma: 2.0\n")
        f.write(f"- Optimizer: AdamW\n")
        f.write(f"- Weight Decay: 1e-4\n")
        f.write(f"- Gradient Clip: 1.0\n\n")

    # 根据类别分布设置Focal Loss参数
    class_counts = np.bincount(train_loader.dataset.labels)
    class_weights = 1 / np.sqrt(class_counts)
    class_weights = torch.tensor(class_weights / class_weights.sum(), dtype=torch.float).to(device)
    
    criterion = FocalLoss(alpha=class_weights, gamma=2.0)

    # 分阶段优化策略
    optimizer = torch.optim.AdamW([
        {'params': model.resnet.parameters(), 'lr': LEARNING_RATE*0.1},
        {'params': model.vit_adapter.parameters(), 'lr': LEARNING_RATE},
        {'params': model.vit.parameters(), 'lr': LEARNING_RATE*0.5},
        {'params': model.cnn.parameters()},
        {'params': model.classifier.parameters(), 'weight_decay': 0.05}
    ], lr=LEARNING_RATE, weight_decay=1e-4)
    
    # 动态学习率调整
    scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
        optimizer, mode='max', factor=0.5, patience=3, verbose=True
    )

    best_val_f1 = 0.0
    for epoch in range(EPOCHS):
        epoch_start = datetime.datetime.now()
        model.train()
        train_loss = 0.0
        
        for batch in tqdm(train_loader, desc=f'Epoch {epoch+1}'):
            inputs = {'image': batch['image'].to(device)}
            labels = batch['label'].to(device)
            
            optimizer.zero_grad()
            
            with torch.cuda.amp.autocast():
                outputs = model(inputs)
                loss = criterion(outputs, labels)
            
            loss.backward()
            nn.utils.clip_grad_norm_(model.parameters(), 1.0)
            optimizer.step()
            
            train_loss += loss.item()
        
        # 验证阶段
        model.eval()
        val_loss = 0.0
        all_preds, all_labels = [], []
        
        with torch.no_grad():
            for batch in val_loader:
                inputs = {'image': batch['image'].to(device)}
                labels = batch['label'].to(device)
                
                outputs = model(inputs)
                loss = criterion(outputs, labels)
                
                val_loss += loss.item()
                preds = outputs.argmax(dim=1)
                
                all_preds.extend(preds.cpu().numpy())
                all_labels.extend(labels.cpu().numpy())
        
        # 计算指标
        val_acc = accuracy_score(all_labels, all_preds)
        val_f1 = f1_score(all_labels, all_preds, average='macro')
        cm = confusion_matrix(all_labels, all_preds)
        current_lrs = [group['lr'] for group in optimizer.param_groups]
        epoch_duration = datetime.datetime.now() - epoch_start
        
        # 构建日志条目
        log_entry = f"\nEpoch {epoch+1}/{EPOCHS} [{epoch_duration}]"
        log_entry += f"\nTrain Loss: {train_loss/len(train_loader):.4f}"
        log_entry += f"\nVal Loss: {val_loss/len(val_loader):.4f}"
        log_entry += f"\nVal Acc: {val_acc:.4f} | Val F1: {val_f1:.4f}"
        log_entry += f"\nLearning Rates: {[f'{lr:.2e}' for lr in current_lrs]}"
        log_entry += "\nConfusion Matrix:\n" + np.array2string(cm, separator=', ')
        
        # 打印并保存日志
        print(log_entry)
        with open(log_filename, 'a') as f:
            f.write(log_entry + "\n")
        
        # 调整学习率并保存最佳模型
        scheduler.step(val_f1)
        if val_f1 > best_val_f1:
            best_val_f1 = val_f1
            save_msg = f"\n ********** New Best Model (F1: {val_f1:.4f})********** "
            print(save_msg)
            with open(log_filename, 'a') as f:
                f.write(save_msg + "\n")
            torch.save(model.state_dict(), f'model_resnet_vit.pth')

    # 记录最终信息
    end_time = datetime.datetime.now()
    total_duration = end_time - start_time
    final_log = f"\n\nTraining Complete!\nTotal Duration: {total_duration}\n"
    final_log += f"Best Validation F1: {best_val_f1:.4f}\n"
    final_log += f"Model Saved: model_resnet_vit.pth"
    
    print(final_log)
    with open(log_filename, 'a') as f:
        f.write(final_log)
    return 0

if __name__ == "__main__":
    train_loader, val_loader, test_loader = load_train_val_test_data()
    train_dataset = train_loader.dataset
    num_classes = train_dataset.label_df['label'].nunique()
    model = FusionModel(num_classes).to(device)

    train_loader, val_loader, test_loader = load_train_val_test_data()
    train_and_evaluate(model, train_loader, val_loader)
    #test_model(model, test_loader, device, num_classes)