import os
import nibabel as nib
import pandas as pd
import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import cv2
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, models
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from tqdm import tqdm
import torch.nn.functional as F

# 增强的数据预处理方法

def augment_rotate(img, max_degree=15):
    degree = np.random.uniform(-max_degree, max_degree)
    rows, cols = img.shape
    M = cv2.getRotationMatrix2D((cols/2, rows/2), degree, 1)
    return cv2.warpAffine(img, M, (cols, rows))

def augment_flip(img):
    if np.random.rand() > 0.5:
        return cv2.flip(img, 1)
    return img

def augment_gamma(img, gamma_range=(0.5, 2.0)):
    gamma = np.random.uniform(gamma_range[0], gamma_range[1])
    return np.sign(img) * (np.abs(img) ** gamma)

# 改进的数据加载类
class MedicalDataset(Dataset):
    def __init__(self, df, img_dir, mask_dir, transform=None, is_train=True):
        self.df = df.copy()
        self.img_dir = img_dir
        self.mask_dir = mask_dir
        self.is_train = is_train
        self.transform = transform
        self.resize = transforms.Resize((224, 224))
        self.valid_indices = self._filter_valid_samples()

    def _filter_valid_samples(self):
        valid_indices = []
        for idx in range(len(self.df)):
            p_id = str(self.df.iloc[idx]['p_ID'])
            img_path = os.path.join(self.img_dir, f"{p_id}.nii.gz")
            mask_path = os.path.join(self.mask_dir, f"{p_id}mask.nii.gz")
            if os.path.exists(img_path) and os.path.exists(mask_path):
                valid_indices.append(idx)
        return valid_indices

    def __load_volume(self, p_id):
        img = nib.load(os.path.join(self.img_dir, f"{p_id}.nii.gz")).get_fdata().astype(np.float32)
        mask = nib.load(os.path.join(self.mask_dir, f"{p_id}mask.nii.gz")).get_fdata().astype(np.uint8)
        
        # 寻找ROI最大的切片
        roi_sums = mask.sum(axis=(0, 1))
        slice_idx = np.argmax(roi_sums)
        
        img_slice = img[..., slice_idx]
        mask_slice = mask[..., slice_idx]
        
        # ROI区域标准化
        img_slice = (img_slice - img_slice[mask_slice > 0].mean()) / (img_slice[mask_slice > 0].std() + 1e-8)
        img_slice = np.clip(img_slice, -3, 3) / 3  # 归一化到[-1, 1]
        
        # 数据增强
        if self.is_train and self.transform:
            # 生成随机种子保证同步增强
            seed = torch.randint(0, 2**32, size=(1,)).item()
            
            # 旋转增强
            np.random.seed(seed)
            img_slice = augment_rotate(img_slice)
            mask_slice = augment_rotate(mask_slice)
            
            # 翻转增强
            np.random.seed(seed)
            img_slice = augment_flip(img_slice)
            mask_slice = augment_flip(mask_slice)
            
            # Gamma增强
            np.random.seed(seed)
            img_slice = augment_gamma(img_slice)
        
        # 调整尺寸并转换为Tensor
        img_slice = cv2.resize(img_slice, (224, 224))
        mask_slice = cv2.resize(mask_slice.astype(float), (224, 224)) > 0.5
        
        return (
            torch.FloatTensor(img_slice).unsqueeze(0),
            torch.FloatTensor(mask_slice.astype(np.float32)).unsqueeze(0)
        )

    def __getitem__(self, idx):
        p_id = self.df.iloc[self.valid_indices[idx]]['p_ID']
        img, mask = self.__load_volume(p_id)
        label = self.df.iloc[self.valid_indices[idx]]['label'] - 1
        return img, mask, label

    def __len__(self):
        return len(self.valid_indices)

# 改进的双分支模型
class DualPathModel(nn.Module):
    def __init__(self, num_classes=3):
        super().__init__()
        
        # 图像处理分支 (加入残差连接)
        self.img_cnn = nn.Sequential(
            nn.Conv2d(1, 32, 3, padding=1),
            nn.BatchNorm2d(32),
            nn.ReLU(),
            nn.MaxPool2d(2),
            ResidualBlock(32, 64),
            nn.MaxPool2d(2),
            ResidualBlock(64, 128),
            nn.MaxPool2d(2)
        )
        
        # ROI处理分支 (使用预训练ResNet34)
        self.mask_resnet = models.resnet34(pretrained=True)
        self.mask_resnet.conv1 = nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3, bias=False)
        self.mask_resnet = nn.Sequential(*list(self.mask_resnet.children())[:-2])
        
        # 特征融合模块
        self.fusion = nn.Sequential(
            nn.Linear(128 + 512, 512),
            nn.BatchNorm1d(512),
            nn.ReLU(),
            nn.Dropout(0.5),
            SEBlock(512),
            nn.Linear(512, 256)
        )
        
        self.classifier = nn.Sequential(
            nn.LayerNorm(256),
            nn.Linear(256, 128),
            nn.GELU(),
            nn.Dropout(0.3),
            nn.Linear(128, num_classes)
        )

    def forward(self, img, mask):
        # 图像特征提取 [B, 128, 14, 14]
        img_feat = self.img_cnn(img)
        img_feat = nn.AdaptiveAvgPool2d(1)(img_feat).squeeze()  # [B, 128]
        
        # ROI特征提取 [B, 512, 7, 7]
        mask_feat = self.mask_resnet(mask)
        mask_feat = nn.AdaptiveAvgPool2d(1)(mask_feat).squeeze()  # [B, 512]
        
        # 特征融合
        combined = torch.cat([img_feat, mask_feat], dim=1)
        fused = self.fusion(combined)
        
        return self.classifier(fused)

class ResidualBlock(nn.Module):
    def __init__(self, in_channels, out_channels):
        super().__init__()
        self.conv = nn.Sequential(
            nn.Conv2d(in_channels, out_channels, 3, padding=1),
            nn.BatchNorm2d(out_channels),
            nn.ReLU(),
            nn.Conv2d(out_channels, out_channels, 3, padding=1),
            nn.BatchNorm2d(out_channels)
        )
        self.shortcut = nn.Sequential()
        if in_channels != out_channels:
            self.shortcut = nn.Sequential(
                nn.Conv2d(in_channels, out_channels, 1),
                nn.BatchNorm2d(out_channels)
            )

    def forward(self, x):
        return F.relu(self.conv(x) + self.shortcut(x))

class SEBlock(nn.Module):
    def __init__(self, channel, reduction=16):
        super().__init__()
        self.avg_pool = nn.AdaptiveAvgPool2d(1)
        self.fc = nn.Sequential(
            nn.Linear(channel, channel // reduction),
            nn.ReLU(),
            nn.Linear(channel // reduction, channel),
            nn.Sigmoid()
        )

    def forward(self, x):
        b, c, _, _ = x.size()
        y = self.avg_pool(x).view(b, c)
        y = self.fc(y).view(b, c, 1, 1)
        return x * y

# 改进的数据划分方法
def train_test_split_wrapper(df):
    train_val, test = train_test_split(df, test_size=0.1, stratify=df['label'], random_state=42)
    train, val = train_test_split(train_val, test_size=0.1111, stratify=train_val['label'], random_state=42)
    return train, val, test

# 初始化设置
df = pd.read_csv('./MRI_all_labels/label.csv')
train_df, val_df, test_df = train_test_split_wrapper(df)

transform = transforms.Compose([
    transforms.RandomAffine(degrees=15, translate=(0.2, 0.2)),
    transforms.RandomVerticalFlip(),
])

train_dataset = MedicalDataset(train_df, './datasets/single_mod/train/images', './datasets/single_mod/train/masks', transform=transform, is_train=True)
val_dataset = MedicalDataset(val_df, './datasets/single_mod/val/images', './datasets/single_mod/val/masks', is_train=False)
test_dataset = MedicalDataset(test_df, './datasets/single_mod/val/images', './datasets/single_mod/val/masks', is_train=False)

batch_size = 16
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
val_loader = DataLoader(val_dataset, batch_size=batch_size)
test_loader = DataLoader(test_dataset, batch_size=batch_size)

# 改进的训练流程
def train_model():
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    model = DualPathModel().to(device)
    
    # 带类别权重的损失函数
    class_counts = df['label'].value_counts().sort_index().values
    class_weights = 1. / torch.tensor(class_counts, dtype=torch.float)
    criterion = nn.CrossEntropyLoss(weight=class_weights.to(device))
    
    # 分层学习率优化器
    optimizer = optim.AdamW([
        {'params': model.img_cnn.parameters(), 'lr': 1e-4},
        {'params': model.mask_resnet.parameters(), 'lr': 1e-5},
        {'params': model.fusion.parameters()},
        {'params': model.classifier.parameters()}
    ], lr=1e-3, weight_decay=1e-4)
    
    scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'max', patience=3, factor=0.5)
    
    best_val_acc = 0.0
    early_stop_counter = 0
    PATIENCE = 10

    for epoch in range(50):
        model.train()
        train_loss = 0.0
        for imgs, masks, labels in tqdm(train_loader, desc=f'Epoch {epoch+1}'):
            imgs, masks, labels = imgs.to(device), masks.to(device), labels.to(device)
            
            optimizer.zero_grad()
            outputs = model(imgs, masks)
            loss = criterion(outputs, labels)
            loss.backward()
            nn.utils.clip_grad_norm_(model.parameters(), 1.0)
            optimizer.step()
            train_loss += loss.item()

        # 验证阶段
        model.eval()
        val_loss = 0.0
        correct = 0
        total = 0
        with torch.no_grad():
            for imgs, masks, labels in val_loader:
                imgs, masks, labels = imgs.to(device), masks.to(device), labels.to(device)
                outputs = model(imgs, masks)
                loss = criterion(outputs, labels)
                val_loss += loss.item()
                
                _, predicted = torch.max(outputs.data, 1)
                total += labels.size(0)
                correct += (predicted == labels).sum().item()

        val_acc = correct / total
        scheduler.step(val_acc)
        
        # 早停机制
        if val_acc > best_val_acc:
            best_val_acc = val_acc
            torch.save(model.state_dict(), 'best_model_cat.pth')
            early_stop_counter = 0
        else:
            early_stop_counter += 1
            if early_stop_counter >= PATIENCE:
                print(f"Early stopping at epoch {epoch+1}")
                break

        print(f'Epoch {epoch+1} | Train Loss: {train_loss/len(train_loader):.4f} | '
              f'Val Loss: {val_loss/len(val_loader):.4f} | Val Acc: {val_acc:.4f}')

    # 最终测试
    model.load_state_dict(torch.load('best_model_cat.pth'))
    model.eval()
    correct = 0
    total = 0
    all_preds = []
    all_labels = []
    with torch.no_grad():
        for imgs, masks, labels in test_loader:
            imgs, masks, labels = imgs.to(device), masks.to(device), labels.to(device)
            outputs = model(imgs, masks)
            _, predicted = torch.max(outputs.data, 1)
            total += labels.size(0)
            correct += (predicted == labels).sum().item()
            all_preds.extend(predicted.cpu().numpy())
            all_labels.extend(labels.cpu().numpy())

    print(f'Test Accuracy: {correct / total:.4f}')
    print(classification_report(all_labels, all_preds, target_names=['Class 0', 'Class 1', 'Class 2']))

if __name__ == "__main__":
    train_model()