import os
import numpy as np
import pandas as pd
import nibabel as nib
import cv2
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
import torchvision.models as models
from sklearn.preprocessing import StandardScaler
from tqdm import tqdm
import torch.nn.functional as F 
import numpy as np
from sklearn.metrics import confusion_matrix
from sklearn.metrics import f1_score
from sklearn.metrics import accuracy_score
import datetime
import matplotlib.pyplot as plt
from sklearn.metrics import roc_curve, auc
from sklearn.preprocessing import label_binarize

# 文件配置参数
DATA_DIR = './datasets/single_mod'
# 训练参数
BATCH_SIZE = 8
EPOCHS = 30
LEARNING_RATE = 1e-3
IMG_SIZE = 224

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

class MedicalDataset(Dataset):
    def __init__(self, phase='train', scaler=None, ori_img=0.6):
        """
        phase: 数据集阶段，可选 'train', 'val', 'test', 'foreign'
        """
        self.phase = phase
        self.data_dir = os.path.join(DATA_DIR, phase)
        self.ori_img = ori_img  # ROI增强值，具体有没有效果不大清楚
        # 加载对应阶段的标签文件
        self.label_df = pd.read_csv(os.path.join(self.data_dir, 'label.csv'))
        self.labels = (self.label_df['label'] - 1).tolist()  # 标签转换为0-based

        # 数据标准化
        if scaler is None and phase == 'train':
            self.scaler = StandardScaler()
        else:
            self.scaler = scaler

    def load_nifti(self, p_id):
        """
        返回值:
        roi_weighted:增强后的图像数据;维度: (224, 224); [0,1]
        padded_roi:处理后ROI掩模;​维度: (224, 224); {0,1}
        """
        p_id = str(p_id)
        if self.phase == 'foreign':
            img_path = os.path.join(self.data_dir, "images", f"{p_id}_000.nii.gz")
            roi_path = os.path.join(self.data_dir, "masks", f"{p_id}_roi.nii.gz")
        else:
            img_path = os.path.join(self.data_dir, "images", f"{p_id}_T2_axi_000.nii.gz")
            roi_path = os.path.join(self.data_dir, "masks", f"{p_id}_T2_axi_roi.nii.gz")
        
        # 加载nifti文件
        img = nib.load(img_path).get_fdata().astype(np.float32)
        roi = nib.load(roi_path).get_fdata().astype(np.uint8)
        # 找到包含最大ROI的切片
        roi_sums = roi.sum(axis=(0, 1))
        slice_idx = np.argmax(roi_sums)
        
        # 提取切片
        img_slice = img[..., slice_idx]
        roi_slice = roi[..., slice_idx]
        
        # 改进的ROI处理流程
        # 1. 找到ROI的边界框
        rows = np.any(roi_slice, axis=1)
        cols = np.any(roi_slice, axis=0)
        ymin, ymax = np.where(rows)[0][[0, -1]] if rows.any() else (0, img_slice.shape[0])
        xmin, xmax = np.where(cols)[0][[0, -1]] if cols.any() else (0, img_slice.shape[1])

        # 2. 扩展边界（增加10%的上下文信息）
        height = ymax - ymin
        width = xmax - xmin
        ymin = max(0, int(ymin - 0.2 * height))
        ymax = min(img_slice.shape[0], int(ymax + 0.2 * height))
        xmin = max(0, int(xmin - 0.2 * width))
        xmax = min(img_slice.shape[1], int(xmax + 0.2 * width))

        # 3. 裁剪ROI区域
        cropped_img = img_slice[ymin:ymax, xmin:xmax]
        cropped_roi = roi_slice[ymin:ymax, xmin:xmax]

        # 4. 保持长宽比的缩放
        h, w = cropped_img.shape
        scale_ratio = min(IMG_SIZE/h, IMG_SIZE/w)
        new_h, new_w = int(h * scale_ratio), int(w * scale_ratio)
        
        # 双线性插值缩放图像
        scaled_img = cv2.resize(cropped_img, (new_w, new_h), 
                               interpolation=cv2.INTER_LINEAR)
        # 最近邻插值缩放ROI
        scaled_roi = cv2.resize(cropped_roi.astype(np.uint8), (new_w, new_h),
                               interpolation=cv2.INTER_NEAREST)

        # 5. 填充到目标尺寸
        pad_top = (IMG_SIZE - new_h) // 2
        pad_bottom = IMG_SIZE - new_h - pad_top
        pad_left = (IMG_SIZE - new_w) // 2
        pad_right = IMG_SIZE - new_w - pad_left
        padded_img = cv2.copyMakeBorder(scaled_img, pad_top, pad_bottom,
                                       pad_left, pad_right, cv2.BORDER_CONSTANT, 
                                       value=np.percentile(scaled_img, 5))
        padded_roi = cv2.copyMakeBorder(scaled_roi, pad_top, pad_bottom,
                                       pad_left, pad_right, cv2.BORDER_CONSTANT,
                                       value=0)

        # 6. ROI加权增强
        kernel = np.ones((3, 3), np.uint8)
        eroded_roi = cv2.erode(padded_roi, kernel, iterations=1)
        boundary_mask = (padded_roi - eroded_roi).astype(np.float32) 
        
        roi_weighted = padded_img * (self.ori_img + 0.5*(padded_roi > 0)+ 0.4 * boundary_mask)  # ROI区域增强
        
        # 7. 直方图归一化（仅针对ROI区域）
        roi_pixels = roi_weighted[padded_roi > 0]
        if len(roi_pixels) > 0:
            p2, p98 = np.percentile(roi_pixels, (2, 98))
            roi_weighted = np.clip((roi_weighted - p2) / (p98 - p2 + 1e-8), 0, 1)

        return roi_weighted.astype(np.float32)
    
    def __len__(self):
        return len(self.label_df)

    def __getitem__(self, idx):
        row = self.label_df.iloc[idx]
        p_id = row['p_id']
        proc_img = self.load_nifti(p_id)
            # 训练阶段的数据增强
        if self.phase == 'train':
        # 随机水平翻转
            if np.random.rand() < 0.5:
                proc_img = cv2.flip(proc_img, 1)
            
            # 随机旋转（-15到15度）
            angle = np.random.uniform(-15, 15)
            M = cv2.getRotationMatrix2D((IMG_SIZE//2, IMG_SIZE//2), angle, 1.0)
            proc_img = cv2.warpAffine(proc_img, M, (IMG_SIZE, IMG_SIZE),
                                    flags=cv2.INTER_LINEAR,
                                    borderMode=cv2.BORDER_REFLECT)
            
            # 随机亮度调整
            brightness = np.random.uniform(0.8, 1.2)
            proc_img = np.clip(proc_img * brightness, 0, 1)
            
            # 添加高斯噪声
            if np.random.rand() < 0.5:
                noise = np.random.normal(0, 0.03, proc_img.shape).astype(np.float32)
                proc_img = np.clip(proc_img + noise, 0, 1)
            
            # 随机高斯模糊
            if np.random.rand() < 0.3:
                ksize = np.random.choice([3,5])
                proc_img = cv2.GaussianBlur(proc_img, (ksize,ksize), 0)
        return {
            'image': torch.FloatTensor(proc_img).unsqueeze(0),
            'label': row['label'] - 1,
            'p_id': p_id
        }

class SimplifiedModel(nn.Module):
    def __init__(self, num_classes):
        super().__init__()
        
        # 主干网络：使用预训练ResNet-34（适配单通道输入）
        self.backbone = models.resnet34(pretrained=True)
        self.backbone.conv1 = nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3, bias=False)
        
        # 移除原分类器
        self.backbone.fc = nn.Identity()
        
        # 简化特征处理
        self.feature_processor = nn.Sequential(
            nn.Conv2d(512, 256, 3, padding=1),
            nn.BatchNorm2d(256),
            nn.ReLU(),
            nn.AdaptiveAvgPool2d(1)
        )
        
        # 分类器
        self.classifier = nn.Sequential(
            nn.Linear(256, 128),
            nn.ReLU(),
            nn.Dropout(0.5),
            nn.Linear(128, num_classes)
        )
        
        # 添加钩子捕获特征
        self.features = None
        self.feature_processor.register_forward_hook(self._hook_features)

    def _hook_features(self, module, input, output):
        self.features = output

    def forward(self, x):
        # 重置特征
        self.features = None
        
        x = self.backbone.conv1(x['image'])
        x = self.backbone.bn1(x)
        x = self.backbone.relu(x)
        x = self.backbone.maxpool(x)

        x = self.backbone.layer1(x)
        x = self.backbone.layer2(x)
        x = self.backbone.layer3(x)
        x = self.backbone.layer4(x)
        
        x = self.feature_processor(x)
        x = x.view(x.size(0), -1)
        return self.classifier(x)

def extract_features(model, loader, device):
    """提取模型特征层的信息"""
    model.eval()
    all_features = []
    all_pids = []
    
    with torch.no_grad():
        for batch in tqdm(loader, desc='Extracting Features'):
            inputs = {'image': batch['image'].to(device)}
            p_ids = batch['p_id'].cpu().numpy()
            
            # 前向传播以捕获特征
            _ = model(inputs)
            
            # 获取特征
            features = model.features.cpu().numpy()
            features = features.squeeze()  # 移除多余的维度
            
            all_features.append(features)
            all_pids.extend(p_ids)
    
    # 合并所有批次的特征
    all_features = np.vstack(all_features)
    
    # 创建DataFrame
    feature_df = pd.DataFrame(all_features)
    feature_df.columns = [f'feature_{i}' for i in range(feature_df.shape[1])]
    feature_df.insert(0, 'p_id', all_pids)
    
    return feature_df

def load_train_val_test_data():
    # 创建训练集并获取scaler
    train_dataset = MedicalDataset(phase='train')
    # 使用训练集的scaler初始化验证集和测试集
    val_dataset = MedicalDataset(phase='val', scaler=train_dataset.scaler)
    test_dataset = MedicalDataset(phase='test', scaler=train_dataset.scaler)

    # 创建DataLoader
    train_loader = DataLoader(train_dataset, batch_size=BATCH_SIZE, 
                            shuffle=True, pin_memory=True)
    val_loader = DataLoader(val_dataset, batch_size=BATCH_SIZE, 
                          shuffle=False, pin_memory=True)
    test_loader = DataLoader(test_dataset, batch_size=BATCH_SIZE,
                           shuffle=False, pin_memory=True)
    # show_samples(train_dataset)  # 展示训练集增强前后的样本
    return train_loader, val_loader, test_loader

def load_foreign_data():
    # 加载外部验证数据集
    foreign_dataset = MedicalDataset(phase='foreign')
    # 使用训练集的scaler初始化验证集和测试集
    foreign_loader = DataLoader(foreign_dataset, batch_size=BATCH_SIZE, 
                            shuffle=True, pin_memory=True)

    # show_samples(train_dataset)  # 展示训练集增强前后的样本
    return foreign_loader

if __name__ == "__main__":
    train_loader, val_loader, test_loader = load_train_val_test_data()
    foreign_loader = load_foreign_data()
    train_dataset = train_loader.dataset
    num_classes = train_dataset.label_df['label'].nunique()
    model = SimplifiedModel(num_classes).to(device)

    print("\nStarting Feature Extraction...")
    model.load_state_dict(torch.load('./focal_loss_resnet/best_model_simple.pth'))
    
    # 提取特征
    feature_df = extract_features(model, foreign_loader, device)
    
    # 保存为Excel
    feature_df.to_excel('./focal_loss_resnet/extracted_foreign_features.xlsx', index=False)
    print("Features saved to extracted_features.xlsx")

