import os
import numpy as np
import pandas as pd
import nibabel as nib
import cv2
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
import torchvision.models as models
import timm
from sklearn.preprocessing import StandardScaler
from tqdm import tqdm
import torch.nn.functional as F 
import numpy as np
from sklearn.metrics import confusion_matrix
from sklearn.metrics import f1_score
from sklearn.metrics import accuracy_score
import datetime

from test_method import test_model, save_test_report

from visualization import show_samples
from visualization import visualize_gradcam

# 文件配置参数
DATA_DIR = './datasets/single_mod'
# 训练参数
BATCH_SIZE = 8
EPOCHS = 30
LEARNING_RATE = 1e-3
IMG_SIZE = 224

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

class MedicalDataset(Dataset):
    def __init__(self, phase='train', scaler=None, ori_img=0.6):
        """
        phase: 数据集阶段，可选 'train', 'val', 'test'
        """
        self.phase = phase
        self.data_dir = os.path.join(DATA_DIR, phase)
        self.ori_img = ori_img  # ROI增强值，具体有没有效果不大清楚
        # 加载对应阶段的标签文件
        self.label_df = pd.read_csv(os.path.join(self.data_dir, 'label.csv'))
        self.labels = (self.label_df['label'] - 1).tolist()  # 标签转换为0-based

        # 数据标准化
        if scaler is None and phase == 'train':
            self.scaler = StandardScaler()
        else:
            self.scaler = scaler

    def load_nifti(self, p_id):
        """
        返回值:
        roi_weighted:增强后的图像数据;维度: (224, 224); [0,1]
        padded_roi:处理后ROI掩模;​维度: (224, 224); {0,1}
        """
        p_id = str(p_id)
        img_path = os.path.join(self.data_dir, "images", f"{p_id}_T2_axi_000.nii.gz")
        roi_path = os.path.join(self.data_dir, "masks", f"{p_id}_T2_axi_roi.nii.gz")
        
        # 加载nifti文件
        img = nib.load(img_path).get_fdata().astype(np.float32)
        roi = nib.load(roi_path).get_fdata().astype(np.uint8)
        # 找到包含最大ROI的切片
        roi_sums = roi.sum(axis=(0, 1))
        slice_idx = np.argmax(roi_sums)
        
        # 提取切片
        img_slice = img[..., slice_idx]
        roi_slice = roi[..., slice_idx]
        
        # 改进的ROI处理流程
        # 1. 找到ROI的边界框
        rows = np.any(roi_slice, axis=1)
        cols = np.any(roi_slice, axis=0)
        ymin, ymax = np.where(rows)[0][[0, -1]] if rows.any() else (0, img_slice.shape[0])
        xmin, xmax = np.where(cols)[0][[0, -1]] if cols.any() else (0, img_slice.shape[1])

        # 2. 扩展边界（增加10%的上下文信息）
        height = ymax - ymin
        width = xmax - xmin
        ymin = max(0, int(ymin - 0.2 * height))
        ymax = min(img_slice.shape[0], int(ymax + 0.2 * height))
        xmin = max(0, int(xmin - 0.2 * width))
        xmax = min(img_slice.shape[1], int(xmax + 0.2 * width))

        # 3. 裁剪ROI区域
        cropped_img = img_slice[ymin:ymax, xmin:xmax]
        cropped_roi = roi_slice[ymin:ymax, xmin:xmax]

        # 4. 保持长宽比的缩放
        h, w = cropped_img.shape
        scale_ratio = min(IMG_SIZE/h, IMG_SIZE/w)
        new_h, new_w = int(h * scale_ratio), int(w * scale_ratio)
        
        # 双线性插值缩放图像
        scaled_img = cv2.resize(cropped_img, (new_w, new_h), 
                               interpolation=cv2.INTER_LINEAR)
        # 最近邻插值缩放ROI
        scaled_roi = cv2.resize(cropped_roi.astype(np.uint8), (new_w, new_h),
                               interpolation=cv2.INTER_NEAREST)

        # 5. 填充到目标尺寸
        pad_top = (IMG_SIZE - new_h) // 2
        pad_bottom = IMG_SIZE - new_h - pad_top
        pad_left = (IMG_SIZE - new_w) // 2
        pad_right = IMG_SIZE - new_w - pad_left
        padded_img = cv2.copyMakeBorder(scaled_img, pad_top, pad_bottom,
                                       pad_left, pad_right, cv2.BORDER_CONSTANT, 
                                       value=np.percentile(scaled_img, 5))
        padded_roi = cv2.copyMakeBorder(scaled_roi, pad_top, pad_bottom,
                                       pad_left, pad_right, cv2.BORDER_CONSTANT,
                                       value=0)

        # 6. ROI加权增强
        kernel = np.ones((3, 3), np.uint8)
        eroded_roi = cv2.erode(padded_roi, kernel, iterations=1)
        boundary_mask = (padded_roi - eroded_roi).astype(np.float32) 
        
        roi_weighted = padded_img * (self.ori_img + 0.5*(padded_roi > 0)+ 0.4 * boundary_mask)  # ROI区域增强
        
        # 7. 直方图归一化（仅针对ROI区域）
        roi_pixels = roi_weighted[padded_roi > 0]
        if len(roi_pixels) > 0:
            p2, p98 = np.percentile(roi_pixels, (2, 98))
            roi_weighted = np.clip((roi_weighted - p2) / (p98 - p2 + 1e-8), 0, 1)

        return roi_weighted.astype(np.float32)
    
    def __len__(self):
        return len(self.label_df)

    def __getitem__(self, idx):
        row = self.label_df.iloc[idx]
        p_id = row['p_id']
        proc_img = self.load_nifti(p_id)
            # 训练阶段的数据增强
        if self.phase == 'train':
        # 随机水平翻转
            if np.random.rand() < 0.5:
                proc_img = cv2.flip(proc_img, 1)
            
            # 随机旋转（-15到15度）
            angle = np.random.uniform(-15, 15)
            M = cv2.getRotationMatrix2D((IMG_SIZE//2, IMG_SIZE//2), angle, 1.0)
            proc_img = cv2.warpAffine(proc_img, M, (IMG_SIZE, IMG_SIZE),
                                    flags=cv2.INTER_LINEAR,
                                    borderMode=cv2.BORDER_REFLECT)
            
            # 随机亮度调整
            brightness = np.random.uniform(0.8, 1.2)
            proc_img = np.clip(proc_img * brightness, 0, 1)
            
            # 添加高斯噪声
            if np.random.rand() < 0.5:
                noise = np.random.normal(0, 0.03, proc_img.shape).astype(np.float32)
                proc_img = np.clip(proc_img + noise, 0, 1)
            
            # 随机高斯模糊
            if np.random.rand() < 0.3:
                ksize = np.random.choice([3,5])
                proc_img = cv2.GaussianBlur(proc_img, (ksize,ksize), 0)
        return {
            'image': torch.FloatTensor(proc_img).unsqueeze(0),
            'label': row['label'] - 1,
            'p_id': p_id
        }

# 新增SE注意力模块
class SEBlock(nn.Module):
    def __init__(self, channels, reduction=16):
        super().__init__()
        # 自动调整reduction比例防止维度过度压缩
        reduction = max(channels // 16, 4)  # 保证最小中间维度为4
        self.avgpool = nn.AdaptiveAvgPool2d(1)
        self.fc = nn.Sequential(
            nn.Linear(channels, channels // reduction),
            nn.ReLU(),
            nn.Dropout(0.3),
            nn.Linear(channels // reduction, channels),
            nn.Sigmoid()
        )
    
    def forward(self, x):
        b, c, _, _ = x.size()
        y = self.avgpool(x).view(b, c)
        y = self.fc(y).view(b, c, 1, 1)
        return x * y.expand_as(x)

class FusionModel(nn.Module):
    def __init__(self, num_classes):
        super().__init__()
        
        # 第一阶段：改进的ResNet特征提取
        original_resnet = models.resnet34(pretrained=True)
        self.resnet = nn.ModuleDict({
            'layer0': nn.Sequential(
                nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3, bias=False),
                original_resnet.bn1,
                original_resnet.relu,
                original_resnet.maxpool
            ),
            'layer1': original_resnet.layer1,
            'layer2': original_resnet.layer2,
            'layer3': original_resnet.layer3,
        })
        
        # 残差特征适配器
        self.res_adapter = nn.Sequential(
            nn.Conv2d(256, 512, 3, padding=1),  # 添加padding保持尺寸
            nn.BatchNorm2d(512),
            nn.GELU()
        )
        
        # 第二阶段：改进的ViT集成
        self.vit_adapter = nn.Sequential(
            nn.Conv2d(512, 768, 3, padding=1),  # 添加padding保持尺寸
            nn.BatchNorm2d(768),
            nn.GELU(),
            nn.Dropout2d(0.2)
        )
        self.vit = timm.create_model('vit_base_patch16_224', pretrained=True)
        
        # 跨阶段特征融合模块
        self.cross_fusion = CrossStageFusion(768, 512)
        
        # 第三阶段：深度残差CNN
        self.cnn = nn.Sequential(
            ResidualBlock(768, 256),
            nn.Dropout2d(0.2),
            SEBlock(256),
            ResidualBlock(256, 512),
            nn.AdaptiveAvgPool2d(1)
        )
        
        # 改进的分类器
        self.classifier = ResidualClassifier(512, num_classes)
        
    def forward(self, x):
        img = x['image']
        
        # 多尺度ResNet特征
        res0 = self.resnet['layer0'](img)  # [B,64,56,56]
        res1 = self.resnet['layer1'](res0) # [B,64,56,56]
        res2 = self.resnet['layer2'](res1) # [B,128,28,28]
        res3 = self.resnet['layer3'](res2) # [B,256,14,14]
        
        # 残差特征增强
        res_feat = self.res_adapter(res3)  # [B,512,14,14]
        
        # ViT特征处理
        vit_feat = self.vit_adapter(res_feat)  # [B,768,14,14]
        B, C, H, W = vit_feat.shape
        vit_tokens = vit_feat.flatten(2).permute(0, 2, 1)  # [B,196,768]
        vit_tokens = torch.cat([self.vit.cls_token.expand(B,-1,-1), vit_tokens], dim=1)
        vit_tokens = self.vit.blocks(self.vit.norm_pre(vit_tokens))
        vit_feat = vit_tokens[:, 1:].permute(0, 2, 1).view(B, C, H, W)
        
        # 跨阶段特征融合
        fused_feat = self.cross_fusion(vit_feat, res_feat)  # [B,768,14,14]
        
        # 残差CNN处理
        cnn_feat = self.cnn(fused_feat)
        cnn_feat = cnn_feat.view(B, -1)  # [B,512]
        
        return self.classifier(cnn_feat)

class CrossStageFusion(nn.Module):
    """跨阶段特征融合模块（修正尺寸对齐问题）"""
    def __init__(self, in_channels, res_channels):
        super().__init__()
        # 修改1：移除上采样，保持空间尺寸不变
        self.res_upsample = nn.Sequential(
            nn.Conv2d(res_channels, in_channels, 3, padding=1),
            nn.BatchNorm2d(in_channels),
            nn.Dropout2d(0.2)
        )
        # 修改2：添加通道数匹配的注意力机制
        self.attention = CBAM(in_channels)
        
    def forward(self, vit_feat, res_feat):
        # res_feat原始尺寸: [B,512,14,14]
        res_feat = self.res_upsample(res_feat)  # 输出: [B,768,14,14]
        attn_map = self.attention(vit_feat)     # 输出: [B,768,14,14]
        # 此时vit_feat和res_feat尺寸一致
        return vit_feat * attn_map + res_feat

class CBAM(nn.Module):
    """结合通道和空间注意力"""
    def __init__(self, channels, reduction=16):
        super().__init__()
        self.channel_att = nn.Sequential(
            nn.AdaptiveAvgPool2d(1),
            nn.Conv2d(channels, channels//reduction, 1),
            nn.ReLU(),
            nn.Conv2d(channels//reduction, channels, 1),
            nn.Sigmoid()
        )
        self.spatial_att = nn.Sequential(
            nn.Conv2d(channels, 1, 7, padding=3),
            nn.Sigmoid()
        )
        
    def forward(self, x):
        channel_att = self.channel_att(x)
        spatial_att = self.spatial_att(x)
        return channel_att * spatial_att

class ResidualBlock(nn.Module):
    """改进的残差块"""
    def __init__(self, in_channels, out_channels):
        super().__init__()
        self.conv1 = nn.Conv2d(in_channels, out_channels, 3, padding=1)
        self.bn1 = nn.BatchNorm2d(out_channels)
        self.dropout = nn.Dropout2d(0.3) 
        self.conv2 = nn.Conv2d(out_channels, out_channels, 3, padding=1)
        self.bn2 = nn.BatchNorm2d(out_channels)
        self.shortcut = nn.Sequential()
        if in_channels != out_channels:
            self.shortcut = nn.Sequential(
                nn.Conv2d(in_channels, out_channels, 1),
                nn.BatchNorm2d(out_channels)
            )
            
    def forward(self, x):
        residual = self.shortcut(x)
        out = F.gelu(self.bn1(self.conv1(x)))
        out = self.dropout(out)
        out = self.bn2(self.conv2(out))
        out += residual
        return F.gelu(out)

class ResidualClassifier(nn.Module):
    """带残差连接的分类器"""
    def __init__(self, in_features, num_classes):
        super().__init__()
        self.fc1 = nn.Linear(in_features, 512)
        self.bn1 = nn.BatchNorm1d(512)
        self.dropout1 = nn.Dropout(0.5)
        self.fc2 = nn.Linear(512, 256)
        self.bn2 = nn.BatchNorm1d(256)
        self.dropout2 = nn.Dropout(0.5)
        self.fc3 = nn.Linear(256, num_classes)
        
    def forward(self, x):
        residual = x
        x = F.gelu(self.bn1(self.fc1(x)))
        x = self.dropout1(x)
        x = F.gelu(self.bn2(self.fc2(x))) + residual[:, :256]  # 残差连接
        x = self.dropout2(x)
        return self.fc3(x)

class FocalLoss(nn.Module):
    def __init__(self, alpha=None, gamma=2.0, label_smoothing=0.1):
        super().__init__()
        self.alpha = alpha
        self.gamma = gamma
        self.label_smoothing = label_smoothing

    def forward(self, inputs, targets):
        num_classes = inputs.size(-1)
        log_probs = F.log_softmax(inputs, dim=-1)
        targets = F.one_hot(targets, num_classes).float()
        targets = (1 - self.label_smoothing) * targets + self.label_smoothing / num_classes
        ce_loss = - (targets * log_probs).sum(dim=-1)
        pt = torch.exp(-ce_loss)
        focal_loss = (1 - pt)**self.gamma * ce_loss
        if self.alpha is not None:
            alpha = self.alpha[torch.argmax(targets, dim=1)]
            focal_loss = alpha * focal_loss
        return focal_loss.mean()

def load_train_val_test_data():
    # 创建训练集并获取scaler
    train_dataset = MedicalDataset(phase='train')
    # 使用训练集的scaler初始化验证集和测试集
    val_dataset = MedicalDataset(phase='val', scaler=train_dataset.scaler)
    test_dataset = MedicalDataset(phase='test', scaler=train_dataset.scaler)

    # 创建DataLoader
    train_loader = DataLoader(train_dataset, batch_size=BATCH_SIZE, 
                            shuffle=True, pin_memory=True, drop_last=True)
    val_loader = DataLoader(val_dataset, batch_size=BATCH_SIZE, 
                          shuffle=False, pin_memory=True, drop_last=True)
    test_loader = DataLoader(test_dataset, batch_size=BATCH_SIZE,
                           shuffle=False, pin_memory=True, drop_last=True)
    # show_samples(train_dataset)  # 展示训练集增强前后的样本
    return train_loader, val_loader, test_loader

def train_and_evaluate(model, train_loader, val_loader):
    # 初始化训练日志
    start_time = datetime.datetime.now()
    timestamp = start_time.strftime("%Y%m%d_%H%M%S")
    log_filename = f"training_log_{timestamp}.txt"
    
    # 初始化存储列表
    metrics = {
        'train_loss': [],
        'val_loss': [],
        'val_f1': []
    }

    # 记录初始信息
    with open(log_filename, 'w') as f:
        f.write(f"Training Start Time: {start_time}\n")
        f.write(f"Epochs: {EPOCHS}, Initial LR: {LEARNING_RATE}, Device: {device}\n")
        f.write("Hyperparameters:\n")
        f.write(f"- Focal Loss Gamma: 2.0\n")
        f.write(f"- Optimizer: AdamW\n")
        f.write(f"- Weight Decay: 1e-4\n")
        f.write(f"- Gradient Clip: 1.0\n\n")

    # 根据类别分布设置Focal Loss参数
    class_counts = np.bincount(train_loader.dataset.labels)
    class_weights = 1 / np.sqrt(class_counts)
    class_weights = torch.tensor(class_weights / class_weights.sum(), dtype=torch.float).to(device)
    
    criterion = FocalLoss(alpha=class_weights, gamma=2.0)

    # 分阶段优化策略
    optimizer = torch.optim.AdamW(model.parameters(), lr=LEARNING_RATE, weight_decay=1e-5)
    
    # 动态学习率调整
    scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
        optimizer, mode='max', factor=0.5, patience=3, verbose=True
    )

    best_val_f1 = 0.0
    for epoch in range(EPOCHS):
        epoch_start = datetime.datetime.now()
        model.train()
        train_loss = 0.0
        
        for batch in tqdm(train_loader, desc=f'Epoch {epoch+1}'):
            inputs = {'image': batch['image'].to(device)}
            labels = batch['label'].to(device)
            
            optimizer.zero_grad()
            
            with torch.cuda.amp.autocast():
                outputs = model(inputs)
                loss = criterion(outputs, labels)
            
            loss.backward()
            optimizer.step()
            
            train_loss += loss.item()
        
        # 验证阶段
        model.eval()
        val_loss = 0.0
        all_preds, all_labels = [], []
        
        with torch.no_grad():
            for batch in val_loader:
                inputs = {'image': batch['image'].to(device)}
                labels = batch['label'].to(device)
                
                outputs = model(inputs)
                loss = criterion(outputs, labels)
                
                val_loss += loss.item()
                preds = outputs.argmax(dim=1)
                
                all_preds.extend(preds.cpu().numpy())
                all_labels.extend(labels.cpu().numpy())
        
        # 计算指标
        val_acc = accuracy_score(all_labels, all_preds)
        val_f1 = f1_score(all_labels, all_preds, average='macro')
        cm = confusion_matrix(all_labels, all_preds)
        current_lrs = [group['lr'] for group in optimizer.param_groups]
        epoch_duration = datetime.datetime.now() - epoch_start
        
        # 构建日志条目
        log_entry = f"\nEpoch {epoch+1}/{EPOCHS} [{epoch_duration}]"
        log_entry += f"\nTrain Loss: {train_loss/len(train_loader):.4f}"
        log_entry += f"\nVal Loss: {val_loss/len(val_loader):.4f}"
        log_entry += f"\nVal Acc: {val_acc:.4f} | Val F1: {val_f1:.4f}"
        log_entry += f"\nLearning Rates: {[f'{lr:.2e}' for lr in current_lrs]}"
        log_entry += "\nConfusion Matrix:\n" + np.array2string(cm, separator=', ')
        
        # 打印并保存日志
        print(log_entry)
        with open(log_filename, 'a') as f:
            f.write(log_entry + "\n")
        
        metrics['train_loss'].append(train_loss)
        metrics['val_loss'].append(val_loss)
        metrics['val_f1'].append(val_f1)

        # 调整学习率并保存最佳模型
        scheduler.step(val_f1)
        if val_f1 > best_val_f1:
            best_val_f1 = val_f1
            save_msg = f"\n ********** New Best Model (F1: {val_f1:.4f})********** "
            print(save_msg)
            with open(log_filename, 'a') as f:
                f.write(save_msg + "\n")
            torch.save(model.state_dict(), f'best_model_ori.pth')

    # 记录最终信息
    end_time = datetime.datetime.now()
    total_duration = end_time - start_time
    final_log = f"\n\nTraining Complete!\nTotal Duration: {total_duration}\n"
    final_log += f"Best Validation F1: {best_val_f1:.4f}\n"
    final_log += f"Model Saved: best_model_ori.pth"
    
    print(final_log)
    with open(log_filename, 'a') as f:
        f.write(final_log)
    return metrics

if __name__ == "__main__":
    train_loader, val_loader, test_loader = load_train_val_test_data()
    train_dataset = train_loader.dataset
    num_classes = train_dataset.label_df['label'].nunique()
    model = FusionModel(num_classes).to(device)

    train_loader, val_loader, test_loader = load_train_val_test_data()
    train_and_evaluate(model, train_loader, val_loader)

    # model.load_state_dict(torch.load('best_model_ori.pth'))
    
    # # 获取测试集结果
    # test_results = test_model(model, val_loader, device, num_classes)
    
    # # 生成测试报告
    # save_test_report(test_results, num_classes)
    # visualize_gradcam(model, test_loader, device, IMG_SIZE, num_samples=5)
