import os
import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader
from torch.nn import functional as F
import torch.nn as nn
import torchvision.transforms as transforms
import torch.optim as optim
from torch.autograd import Function
from torch.optim.lr_scheduler import StepLR
import nibabel as nib
import pandas as pd
from PIL import Image
from sklearn import metrics

class MedicalDataset(Dataset):
    """Medical影像数据集"""

    def __init__(self, csv_file, root_dir, transform=None):
        """
        Args:
            csv_file (string): CSV文件的路径
            root_dir (string): 包含img和masks目录的根目录
            transform (callable, optional): 可选的变换操作
        """
        self.data_frame = pd.read_csv(csv_file)
        self.root_dir = root_dir
        self.transform = transform

    def __len__(self):
        return len(self.data_frame)

    def __getitem__(self, idx):
        if torch.is_tensor(idx):
            idx = idx.tolist()

        # 从CSV中获取p_id和标签
        p_id = self.data_frame.iloc[idx, 0]
        label = self.data_frame.iloc[idx, 1]
        
        # 构建图像路径
        img_path = os.path.join(self.root_dir, 'img', f'{p_id}_T2_axi_000.nii.gz')
        
        # 加载NIfTI文件
        img = nib.load(img_path)
        img_data = img.get_fdata().squeeze()
        
        # 归一化并转换为PIL图像
        img_data = (img_data - np.min(img_data)) / (np.max(img_data) - np.min(img_data)) * 255
        img_data = img_data.astype(np.uint8)
        image = Image.fromarray(img_data).convert('RGB')
        
        # 应用变换
        if self.transform:
            image = self.transform(image)
            
        return image, label, img_path

# 数据预处理
normalize = transforms.Normalize(mean=[0.5115, 0.5115, 0.5115],
                                 std=[0.1316, 0.1316, 0.1316])

train_data_transforms = transforms.Compose([
    transforms.Resize((224, 224)),
    transforms.RandomHorizontalFlip(),
    transforms.RandomVerticalFlip(),
    transforms.ToTensor(),
    normalize
])

val_test_data_transforms = transforms.Compose([
    transforms.Resize((224, 224)),
    transforms.ToTensor(),
    normalize
])

# 数据集路径配置
base_dir = './datasets/single_mod'
image_datasets = {
    'train': MedicalDataset(
        csv_file=os.path.join(base_dir, 'train', 'label.csv'),
        root_dir=os.path.join(base_dir, 'train'),
        transform=train_data_transforms
    ),
    'val': MedicalDataset(
        csv_file=os.path.join(base_dir, 'val', 'label.csv'),
        root_dir=os.path.join(base_dir, 'val'),
        transform=val_test_data_transforms
    ),
    'test': MedicalDataset(
        csv_file=os.path.join(base_dir, 'test', 'label.csv'),
        root_dir=os.path.join(base_dir, 'test'),
        transform=val_test_data_transforms
    )
}

# 数据加载器
dataloaders = {
    'train': DataLoader(
        image_datasets['train'],
        batch_size=16,
        shuffle=True,
        pin_memory=True
    ),
    'val': DataLoader(
        image_datasets['val'],
        batch_size=8,
        shuffle=False,
        pin_memory=True
    ),
    'test': DataLoader(
        image_datasets['test'],
        batch_size=8,
        shuffle=False,
        pin_memory=True
    )
}

# 模型初始化（使用Swin Transformer）
import timm
model = timm.create_model(
    'swin_base_patch4_window7_224',
    pretrained=True,
    num_classes=2,
    pretrained_cfg_overlay=dict(file='Pretrained_model/swin_base_patch4_window7_224_22kto1k.pth')
)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model.to(device)

def training_Swin_transformer():
    save_dir = 'Swin_Transformer_weights/GorC_2_label'
    os.makedirs(save_dir, exist_ok=True)

    criterion = nn.CrossEntropyLoss()
    optimizer = optim.Adam(model.parameters(), lr=3e-5)
    scheduler = StepLR(optimizer, step_size=5, gamma=0.7)
    best_acc = 0.0

    for epoch in range(100):
        print(f'Epoch {epoch+1}/100')
        print('-' * 10)
        
        for phase in ['train', 'validation']:
            if phase == 'train':
                model.train()
            else:
                model.eval()

            running_loss = 0.0
            running_corrects = 0

            for inputs, labels, _ in dataloaders[phase]:
                inputs = inputs.to(device)
                labels = labels.to(device)
                
                with torch.set_grad_enabled(phase == 'train'):
                    outputs = model(inputs)
                    loss = criterion(outputs, labels)

                    if phase == 'train':
                        optimizer.zero_grad()
                        loss.backward()
                        optimizer.step()

                _, preds = torch.max(outputs, 1)
                running_loss += loss.item() * inputs.size(0)
                running_corrects += torch.sum(preds == labels.data)

            epoch_loss = running_loss / len(image_datasets[phase])
            epoch_acc = running_corrects.double() / len(image_datasets[phase])

            if phase == 'validation' and epoch_acc > best_acc:
                best_acc = epoch_acc
                torch.save(model.state_dict(), os.path.join(save_dir, 'best_weight.pth'))

            print(f'{phase} Loss: {epoch_loss:.4f} Acc: {epoch_acc:.4f}')

    # 最终模型保存
    torch.save(model.state_dict(), os.path.join(save_dir, 'final_weight.pth'))

    # 测试集评估
    model.eval()
    softmax = nn.Softmax(dim=1)
    all_labels = []
    all_preds = []
    all_probs = []
    all_paths = []

    with torch.no_grad():
        for inputs, labels, paths in dataloaders['test']:
            inputs = inputs.to(device)
            labels = labels.to(device)
            
            outputs = model(inputs)
            probs = softmax(outputs)
            _, preds = torch.max(outputs, 1)

            all_labels.extend(labels.cpu().numpy())
            all_preds.extend(preds.cpu().numpy())
            all_probs.extend(probs.cpu().numpy())
            all_paths.extend(paths)

    # 保存结果
    results = np.column_stack((
        np.array(all_paths).reshape(-1, 1),
        np.array(all_labels).reshape(-1, 1),
        np.array(all_preds).reshape(-1, 1),
        np.array(all_probs)
    ))
    np.save('results/SwinTrans_results.npy', results)

if __name__ == '__main__':
    training_Swin_transformer()