import os
import numpy as np
import pandas as pd
import nibabel as nib
import cv2
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
import torchvision.models as models
from sklearn.preprocessing import StandardScaler
from tqdm import tqdm
import torch.nn.functional as F 
import numpy as np
from sklearn.metrics import confusion_matrix
from sklearn.metrics import f1_score
from sklearn.metrics import accuracy_score
import datetime
import matplotlib.pyplot as plt
from sklearn.metrics import roc_curve, auc
from sklearn.preprocessing import label_binarize

# 文件配置参数
DATA_DIR = './datasets/single_mod'
# 训练参数
BATCH_SIZE = 8
EPOCHS = 30
LEARNING_RATE = 1e-3
IMG_SIZE = 224

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

class MedicalDataset(Dataset):
    def __init__(self, phase='train', scaler=None, ori_img=0.6):
        """
        phase: 数据集阶段，可选 'train', 'val', 'test', 'foreign'
        """
        self.phase = phase
        self.data_dir = os.path.join(DATA_DIR, phase)
        self.ori_img = ori_img  # ROI增强值，具体有没有效果不大清楚
        # 加载对应阶段的标签文件
        self.label_df = pd.read_csv(os.path.join(self.data_dir, 'label.csv'))
        self.labels = (self.label_df['label'] - 1).tolist()  # 标签转换为0-based

        # 数据标准化
        if scaler is None and phase == 'train':
            self.scaler = StandardScaler()
        else:
            self.scaler = scaler

    def load_nifti(self, p_id):
        """
        返回值:
        roi_weighted:增强后的图像数据;维度: (224, 224); [0,1]
        padded_roi:处理后ROI掩模;​维度: (224, 224); {0,1}
        """
        p_id = str(p_id)
        if self.phase == 'foreign':
            img_path = os.path.join(self.data_dir, "images", f"{p_id}_000.nii.gz")
            roi_path = os.path.join(self.data_dir, "masks", f"{p_id}_roi.nii.gz")
        else:
            img_path = os.path.join(self.data_dir, "images", f"{p_id}_T2_axi_000.nii.gz")
            roi_path = os.path.join(self.data_dir, "masks", f"{p_id}_T2_axi_roi.nii.gz")
        
        # 加载nifti文件
        img = nib.load(img_path).get_fdata().astype(np.float32)
        roi = nib.load(roi_path).get_fdata().astype(np.uint8)
        # 找到包含最大ROI的切片
        roi_sums = roi.sum(axis=(0, 1))
        slice_idx = np.argmax(roi_sums)
        
        # 提取切片
        img_slice = img[..., slice_idx]
        roi_slice = roi[..., slice_idx]
        
        # 改进的ROI处理流程
        # 1. 找到ROI的边界框
        rows = np.any(roi_slice, axis=1)
        cols = np.any(roi_slice, axis=0)
        ymin, ymax = np.where(rows)[0][[0, -1]] if rows.any() else (0, img_slice.shape[0])
        xmin, xmax = np.where(cols)[0][[0, -1]] if cols.any() else (0, img_slice.shape[1])

        # 2. 扩展边界（增加10%的上下文信息）
        height = ymax - ymin
        width = xmax - xmin
        ymin = max(0, int(ymin - 0.2 * height))
        ymax = min(img_slice.shape[0], int(ymax + 0.2 * height))
        xmin = max(0, int(xmin - 0.2 * width))
        xmax = min(img_slice.shape[1], int(xmax + 0.2 * width))

        # 3. 裁剪ROI区域
        cropped_img = img_slice[ymin:ymax, xmin:xmax]
        cropped_roi = roi_slice[ymin:ymax, xmin:xmax]

        # 4. 保持长宽比的缩放
        h, w = cropped_img.shape
        scale_ratio = min(IMG_SIZE/h, IMG_SIZE/w)
        new_h, new_w = int(h * scale_ratio), int(w * scale_ratio)
        
        # 双线性插值缩放图像
        scaled_img = cv2.resize(cropped_img, (new_w, new_h), 
                               interpolation=cv2.INTER_LINEAR)
        # 最近邻插值缩放ROI
        scaled_roi = cv2.resize(cropped_roi.astype(np.uint8), (new_w, new_h),
                               interpolation=cv2.INTER_NEAREST)

        # 5. 填充到目标尺寸
        pad_top = (IMG_SIZE - new_h) // 2
        pad_bottom = IMG_SIZE - new_h - pad_top
        pad_left = (IMG_SIZE - new_w) // 2
        pad_right = IMG_SIZE - new_w - pad_left
        padded_img = cv2.copyMakeBorder(scaled_img, pad_top, pad_bottom,
                                       pad_left, pad_right, cv2.BORDER_CONSTANT, 
                                       value=np.percentile(scaled_img, 5))
        padded_roi = cv2.copyMakeBorder(scaled_roi, pad_top, pad_bottom,
                                       pad_left, pad_right, cv2.BORDER_CONSTANT,
                                       value=0)

        # 6. ROI加权增强
        kernel = np.ones((3, 3), np.uint8)
        eroded_roi = cv2.erode(padded_roi, kernel, iterations=1)
        boundary_mask = (padded_roi - eroded_roi).astype(np.float32) 
        
        roi_weighted = padded_img * (self.ori_img + 0.5*(padded_roi > 0)+ 0.4 * boundary_mask)  # ROI区域增强
        
        # 7. 直方图归一化（仅针对ROI区域）
        roi_pixels = roi_weighted[padded_roi > 0]
        if len(roi_pixels) > 0:
            p2, p98 = np.percentile(roi_pixels, (2, 98))
            roi_weighted = np.clip((roi_weighted - p2) / (p98 - p2 + 1e-8), 0, 1)

        return roi_weighted.astype(np.float32)
    
    def __len__(self):
        return len(self.label_df)

    def __getitem__(self, idx):
        row = self.label_df.iloc[idx]
        p_id = row['p_id']
        proc_img = self.load_nifti(p_id)
            # 训练阶段的数据增强
        if self.phase == 'train':
        # 随机水平翻转
            if np.random.rand() < 0.5:
                proc_img = cv2.flip(proc_img, 1)
            
            # 随机旋转（-15到15度）
            angle = np.random.uniform(-15, 15)
            M = cv2.getRotationMatrix2D((IMG_SIZE//2, IMG_SIZE//2), angle, 1.0)
            proc_img = cv2.warpAffine(proc_img, M, (IMG_SIZE, IMG_SIZE),
                                    flags=cv2.INTER_LINEAR,
                                    borderMode=cv2.BORDER_REFLECT)
            
            # 随机亮度调整
            brightness = np.random.uniform(0.8, 1.2)
            proc_img = np.clip(proc_img * brightness, 0, 1)
            
            # 添加高斯噪声
            if np.random.rand() < 0.5:
                noise = np.random.normal(0, 0.03, proc_img.shape).astype(np.float32)
                proc_img = np.clip(proc_img + noise, 0, 1)
            
            # 随机高斯模糊
            if np.random.rand() < 0.3:
                ksize = np.random.choice([3,5])
                proc_img = cv2.GaussianBlur(proc_img, (ksize,ksize), 0)
        return {
            'image': torch.FloatTensor(proc_img).unsqueeze(0),
            'label': row['label'] - 1,
            'p_id': p_id
        }
    
    def visualize_sample(self, idx, show_original=False):
        """
        可视化单个样本
        idx: 样本索引
        show_original: 是否同时显示原始切片和处理后的图像
        """
        row = self.label_df.iloc[idx]
        p_id = row['p_id']
        label = row['label'] - 1  # 0-based标签
        
        # 获取处理后的图像
        proc_img = self.load_nifti(p_id)
        
        # 如果需要显示原始切片
        if show_original:
            # 复制加载原始数据的代码
            if self.phase == 'foreign':
                img_path = os.path.join(self.data_dir, "images", f"{p_id}_000.nii.gz")
                roi_path = os.path.join(self.data_dir, "masks", f"{p_id}_roi.nii.gz")
            else:
                img_path = os.path.join(self.data_dir, "images", f"{p_id}_T2_axi_000.nii.gz")
                roi_path = os.path.join(self.data_dir, "masks", f"{p_id}_T2_axi_roi.nii.gz")
            
            img = nib.load(img_path).get_fdata().astype(np.float32)
            roi = nib.load(roi_path).get_fdata().astype(np.uint8)
            roi_sums = roi.sum(axis=(0, 1))
            slice_idx = np.argmax(roi_sums)
            img_slice = img[..., slice_idx]
            roi_slice = roi[..., slice_idx]
        
        # 创建可视化
        fig, axes = plt.subplots(1, 3 if show_original else 1, figsize=(15, 5))
        
        if show_original:
            # 显示原始切片
            axes[0].imshow(img_slice, cmap='gray')
            axes[0].set_title(f'Original Slice\nPatient ID: {p_id}')
            axes[0].axis('off')
            
            # 显示ROI区域
            axes[1].imshow(roi_slice, cmap='jet')
            axes[1].set_title('ROI Mask')
            axes[1].axis('off')
            
            # 显示处理后的图像
            axes[2].imshow(proc_img, cmap='gray')
            axes[2].set_title(f'Processed Image\nLabel: {label}')
            axes[2].axis('off')
        else:
            # 只显示处理后的图像
            axes.imshow(proc_img, cmap='gray')
            axes.set_title(f'Patient ID: {p_id}\nLabel: {label}')
            axes.axis('off')
        
        plt.tight_layout()
        plt.show()

    def visualize_random_samples(self, num_samples=3):
        """随机可视化多个样本"""
        indices = np.random.choice(len(self), num_samples, replace=False)
        for idx in indices:
            self.visualize_sample(idx, show_original=True)

class SimplifiedModel(nn.Module):
    def __init__(self, num_classes):
        super().__init__()
        
        # 主干网络：使用预训练ResNet-34（适配单通道输入）
        self.backbone = models.resnet34(pretrained=True)
        self.backbone.conv1 = nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3, bias=False)
        
        # 移除原分类器
        self.backbone.fc = nn.Identity()
        
        # 简化特征处理
        self.feature_processor = nn.Sequential(
            nn.Conv2d(512, 256, 3, padding=1),
            nn.BatchNorm2d(256),
            nn.ReLU(),
            nn.AdaptiveAvgPool2d(1)
        )
        
        # 分类器
        self.classifier = nn.Sequential(
            nn.Linear(256, 128),
            nn.ReLU(),
            nn.Dropout(0.5),
            nn.Linear(128, num_classes)
        )

    def forward(self, x):
        x = self.backbone.conv1(x['image'])
        x = self.backbone.bn1(x)
        x = self.backbone.relu(x)
        x = self.backbone.maxpool(x)

        x = self.backbone.layer1(x)
        x = self.backbone.layer2(x)
        x = self.backbone.layer3(x)
        x = self.backbone.layer4(x)
        
        x = self.feature_processor(x)
        x = x.view(x.size(0), -1)
        return self.classifier(x)

class FocalLoss(nn.Module):
    def __init__(self, alpha=None, gamma=2.0, label_smoothing=0.1):
        super().__init__()
        self.alpha = alpha
        self.gamma = gamma
        self.label_smoothing = label_smoothing

    def forward(self, inputs, targets):
        num_classes = inputs.size(-1)
        log_probs = F.log_softmax(inputs, dim=-1)
        targets = F.one_hot(targets, num_classes).float()
        targets = (1 - self.label_smoothing) * targets + self.label_smoothing / num_classes
        ce_loss = - (targets * log_probs).sum(dim=-1)
        pt = torch.exp(-ce_loss)
        focal_loss = (1 - pt)**self.gamma * ce_loss
        if self.alpha is not None:
            alpha = self.alpha[torch.argmax(targets, dim=1)]
            focal_loss = alpha * focal_loss
        return focal_loss.mean()

def load_train_val_test_data():
    # 创建训练集并获取scaler
    train_dataset = MedicalDataset(phase='train')
    # 使用训练集的scaler初始化验证集和测试集
    val_dataset = MedicalDataset(phase='val', scaler=train_dataset.scaler)
    test_dataset = MedicalDataset(phase='test', scaler=train_dataset.scaler)

    # 创建DataLoader
    train_loader = DataLoader(train_dataset, batch_size=BATCH_SIZE, 
                            shuffle=True, pin_memory=True)
    val_loader = DataLoader(val_dataset, batch_size=BATCH_SIZE, 
                          shuffle=False, pin_memory=True)
    test_loader = DataLoader(test_dataset, batch_size=BATCH_SIZE,
                           shuffle=False, pin_memory=True)
    # show_samples(train_dataset)  # 展示训练集增强前后的样本
    return train_loader, val_loader, test_loader

def load_foreign_data():
    # 加载外部验证数据集
    foreign_dataset = MedicalDataset(phase='foreign')
    foreign_dataset.visualize_random_samples(num_samples=3)
    # 使用训练集的scaler初始化验证集和测试集
    foreign_loader = DataLoader(foreign_dataset, batch_size=BATCH_SIZE, 
                            shuffle=True, pin_memory=True)

    # show_samples(train_dataset)  # 展示训练集增强前后的样本
    return foreign_loader

def train_and_evaluate(model, train_loader, val_loader):
    # 初始化训练日志
    start_time = datetime.datetime.now()
    timestamp = start_time.strftime("%Y%m%d_%H%M%S")
    log_filename = f"training_log_{timestamp}.txt"
    
    # 初始化存储列表
    metrics = {
        'train_loss': [],
        'val_loss': [],
        'val_f1': []
    }

    # 记录初始信息
    with open(log_filename, 'w') as f:
        f.write(f"Training Start Time: {start_time}\n")
        f.write(f"Epochs: {EPOCHS}, Initial LR: {LEARNING_RATE}, Device: {device}\n")
        f.write("Hyperparameters:\n")
        f.write(f"- Focal Loss Gamma: 2.0\n")
        f.write(f"- Optimizer: AdamW\n")
        f.write(f"- Weight Decay: 1e-4\n")
        f.write(f"- Gradient Clip: 1.0\n\n")

    # 根据类别分布设置Focal Loss参数
    class_counts = np.bincount(train_loader.dataset.labels)
    class_weights = 1 / np.sqrt(class_counts)
    class_weights = torch.tensor(class_weights / class_weights.sum(), dtype=torch.float).to(device)
    
    class DynamicFocalLoss(nn.Module):
        def __init__(self, init_weights, gamma=2.0, decay_factor=0.95):
            super().__init__()
            self.init_weights = torch.tensor(init_weights, dtype=torch.float32).to(device)
            self.gamma = gamma
            self.decay_factor = decay_factor
            self.epoch = 0  # 需要外部更新

        def forward(self, inputs, targets):
            # 动态衰减类别权重
            current_weights = self.init_weights * (self.decay_factor ** self.epoch)
            current_weights = current_weights / current_weights.sum()
            
            ce_loss = F.cross_entropy(inputs, targets, reduction='none')
            pt = torch.exp(-ce_loss)
            focal_loss = (1 - pt)**self.gamma * ce_loss
            weighted_loss = focal_loss * current_weights[targets]
            return weighted_loss.mean()

    criterion = DynamicFocalLoss(class_weights)

    # 分阶段优化策略
    optimizer = torch.optim.AdamW([
        {'params': model.backbone.parameters(), 'lr': LEARNING_RATE*0.1},
        {'params': model.feature_processor.parameters()},
        {'params': model.classifier.parameters(), 'weight_decay': 0.05}
    ], lr=LEARNING_RATE, weight_decay=1e-4)
    

    # 动态学习率调整
    scheduler = torch.optim.lr_scheduler.OneCycleLR(
        optimizer, 
        max_lr=LEARNING_RATE*10,
        total_steps=EPOCHS*len(train_loader),
        pct_start=0.3
    )

    best_val_f1 = 0.0
    for epoch in range(EPOCHS):
        epoch_start = datetime.datetime.now()
        model.train()
        train_loss = 0.0
        
        for batch in tqdm(train_loader, desc=f'Epoch {epoch+1}'):
            inputs = {'image': batch['image'].to(device)}
            labels = batch['label'].to(device)
            
            optimizer.zero_grad()
            
            with torch.cuda.amp.autocast():
                outputs = model(inputs)
                loss = criterion(outputs, labels)
            
            loss.backward()
            nn.utils.clip_grad_norm_(model.parameters(), 1.0)
            optimizer.step()
            
            train_loss += loss.item()
        
        # 验证阶段
        model.eval()
        val_loss = 0.0
        all_preds, all_labels = [], []
        
        with torch.no_grad():
            for batch in val_loader:
                inputs = {'image': batch['image'].to(device)}
                labels = batch['label'].to(device)
                
                outputs = model(inputs)
                loss = criterion(outputs, labels)
                
                val_loss += loss.item()
                preds = outputs.argmax(dim=1)
                
                all_preds.extend(preds.cpu().numpy())
                all_labels.extend(labels.cpu().numpy())
        
        # 计算指标
        val_acc = accuracy_score(all_labels, all_preds)
        val_f1 = f1_score(all_labels, all_preds, average='macro')
        cm = confusion_matrix(all_labels, all_preds)
        current_lrs = [group['lr'] for group in optimizer.param_groups]
        epoch_duration = datetime.datetime.now() - epoch_start
        
        # 构建日志条目
        log_entry = f"\nEpoch {epoch+1}/{EPOCHS} [{epoch_duration}]"
        log_entry += f"\nTrain Loss: {train_loss/len(train_loader):.4f}"
        log_entry += f"\nVal Loss: {val_loss/len(val_loader):.4f}"
        log_entry += f"\nVal Acc: {val_acc:.4f} | Val F1: {val_f1:.4f}"
        log_entry += f"\nLearning Rates: {[f'{lr:.2e}' for lr in current_lrs]}"
        log_entry += "\nConfusion Matrix:\n" + np.array2string(cm, separator=', ')
        
        # 打印并保存日志
        print(log_entry)
        with open(log_filename, 'a') as f:
            f.write(log_entry + "\n")
        
        metrics['train_loss'].append(train_loss)
        metrics['val_loss'].append(val_loss)
        metrics['val_f1'].append(val_f1)

        # 调整学习率并保存最佳模型
        scheduler.step(val_f1)
        if val_f1 > best_val_f1:
            best_val_f1 = val_f1
            save_msg = f"\n ********** New Best Model (F1: {val_f1:.4f})********** "
            print(save_msg)
            with open(log_filename, 'a') as f:
                f.write(save_msg + "\n")
            torch.save(model.state_dict(), f'./focal_loss_resnet/best_model_simple.pth')

    # 记录最终信息
    end_time = datetime.datetime.now()
    total_duration = end_time - start_time
    final_log = f"\n\nTraining Complete!\nTotal Duration: {total_duration}\n"
    final_log += f"Best Validation F1: {best_val_f1:.4f}\n"
    final_log += f"Model Saved: best_model_ori.pth"
    
    print(final_log)
    with open(log_filename, 'a') as f:
        f.write(final_log)
    return metrics

def test_model(model, test_loader, device, num_classes):
    """测试模型并返回详细预测结果"""
    model.eval()
    all_probs = []
    all_preds = []
    all_labels = []
    all_pids = []

    with torch.no_grad():
        for batch in tqdm(test_loader, desc='Testing'):
            inputs = {'image': batch['image'].to(device)}
            labels = batch['label'].cpu().numpy()
            p_ids = batch['p_id'].cpu().numpy()
            
            outputs = model(inputs)
            probs = F.softmax(outputs, dim=1).cpu().numpy()
            preds = np.argmax(probs, axis=1)
            
            all_probs.extend(probs)
            all_preds.extend(preds)
            all_labels.extend(labels)
            all_pids.extend(p_ids)

    # 创建结果DataFrame
    results_df = pd.DataFrame({
        'p_id': all_pids,
        'true_label': all_labels,
        'pred_label': all_preds
    })
    
    # 添加每个类别的概率列
    for cls in range(num_classes):
        results_df[f'prob_class_{cls}'] = [prob[cls] for prob in all_probs]

    return results_df

def save_test_report(results_df, num_classes, outputpath, auc_png_path):
    """保存测试结果和生成可视化报告"""
    # 保存原始数据到Excel
    excel_path = outputpath
    writer = pd.ExcelWriter(excel_path, engine='xlsxwriter')
    
    # 结果数据表
    results_df.to_excel(writer, sheet_name='Raw Data', index=False)
    
    # 统计信息表
    stats_df = pd.DataFrame({
        'Metric': ['Accuracy', 'Macro F1', 'Weighted F1'],
        'Value': [
            accuracy_score(results_df.true_label, results_df.pred_label),
            f1_score(results_df.true_label, results_df.pred_label, average='macro'),
            f1_score(results_df.true_label, results_df.pred_label, average='weighted')
        ]
    })
    stats_df.to_excel(writer, sheet_name='Statistics', index=False)
    
    # 混淆矩阵
    cm = confusion_matrix(results_df.true_label, results_df.pred_label)
    cm_df = pd.DataFrame(cm, 
        columns=[f'Pred {i}' for i in range(num_classes)],
        index=[f'True {i}' for i in range(num_classes)])
    cm_df.to_excel(writer, sheet_name='Confusion Matrix')
    
    writer.close()
    
    # 绘制AUC曲线
    plt.figure(figsize=(10, 8))
    y_true = label_binarize(results_df.true_label, classes=range(num_classes))
    y_score = results_df[[f'prob_class_{i}' for i in range(num_classes)]].values
    
    fpr = dict()
    tpr = dict()
    roc_auc = dict()
    for i in range(num_classes):
        fpr[i], tpr[i], _ = roc_curve(y_true[:, i], y_score[:, i])
        roc_auc[i] = auc(fpr[i], tpr[i])
    
    colors = ['#4285f4', '#db4437', '#f4b400', '#0f9d58', '#ab47bc']
    for i, color in zip(range(num_classes), colors):
        plt.plot(fpr[i], tpr[i], color=color, lw=2,
                 label=f'Class {i} (AUC = {roc_auc[i]:.2f})')
    
    plt.plot([0, 1], [0, 1], 'k--', lw=2)
    plt.xlim([0.0, 1.0])
    plt.ylim([0.0, 1.05])
    plt.xlabel('False Positive Rate')
    plt.ylabel('True Positive Rate')
    plt.title('Multiclass ROC Curves')
    plt.legend(loc="lower right")
    plt.savefig(auc_png_path, bbox_inches='tight')
    plt.close()

if __name__ == "__main__":
    train_loader, val_loader, test_loader = load_train_val_test_data()
    train_dataset = train_loader.dataset
    num_classes = train_dataset.label_df['label'].nunique()
    model = SimplifiedModel(num_classes).to(device)

    # train_and_evaluate(model, train_loader, val_loader)

    print("\nStarting Final Testing...")
    model.load_state_dict(torch.load('./focal_loss_resnet/best_model_simple.pth'))
    
    # # 获取测试集结果
    # test_results = test_model(model, val_loader, device, num_classes)
    
    # # # 生成测试报告
    # save_test_report(test_results, num_classes)
    
    # print("Test report saved to test_report.xlsx")
    # print("AUC curves saved to auc_curves.png")

    # 加载外部验证数据集并进行测试
    foreign_loader = load_foreign_data()
    foreign_results = test_model(model, foreign_loader, device, num_classes)
    save_test_report(foreign_results, num_classes, './focal_loss_resnet/foreign_pred.xlsx', './focal_loss_resnet/foreign_auc_curves.png')
    print("foreign report saved to foreign_report.xlsx")
    print("AUC curves saved to foreign_auc_curves.png")
