import os
import pydicom
import wandb
import datetime
import numpy as np
import pandas as pd
from tqdm import tqdm
from einops import rearrange
import torch
import torchvision
import torch.nn as nn
from torch.utils.data.dataset import Dataset
from torch.utils.data import Dataset, DataLoader, Subset
from torchvision.transforms.v2 import Resize, Compose, Normalize, ColorJitter, RandomAffine, RandomErasing, ToTensor, GaussianBlur
import sklearn
from sklearn.model_selection import KFold  # 交叉验证划分
from sklearn.metrics import accuracy_score, roc_auc_score
import warnings
warnings.filterwarnings("ignore")


class CNNModel(nn.Module):
    def __init__(self, mode="train", depth=16):
        super().__init__()
        if mode == "train":
            self.model = torchvision.models.resnet18(True)
        else:
            self.model = torchvision.models.resnet18(False)
        self.model.conv1 = nn.Conv2d(depth, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)  # todo 修改通道数
        self.model.fc = torch.nn.Linear(512, 256)
        self.bn1 = nn.BatchNorm1d(256)
        self.lu = nn.GELU()
        self.l1 = nn.Linear(256, 64)  # 使用清洗的数据
        self.bn2 = nn.BatchNorm1d(64)
        self.fc=nn.Linear(64,14)
        

    def forward(self, x):

        x = self.model(x)
        x = self.bn1(x)
        x = self.lu(x)
        x = self.l1(x)
        x = self.bn2(x)
        x=self.fc(x)
        return x


'''
建立数据集实现图像的拼接
'''
TRAIN_IMG_PATH = 'train_test_data/datasets/train_images'  # TODO 修改路径

def fetch_img_paths(train_img_path):
    len_list = []
    min_length = 10000
    img_paths = []
    print('Scanning directories...')
    for patient in tqdm(os.listdir(train_img_path)):
        for scan in os.listdir(os.path.join(train_img_path, patient)):
            scan_path = os.listdir(os.path.join(train_img_path, patient, scan))
            scans = [
                [filename[:-4],
                    os.path.join(train_img_path, patient, scan, filename)]
                for filename in scan_path
                if filename.endswith('.dcm') and 10 <= int(filename[:-4]) <= 1500
            ]
            scans = sorted(scans, key=lambda x: int(x[0]))
            img_path = np.array(scans)
            img_paths.append(img_path[:, 1].tolist())
            min_length = min(min_length, len(scans))
        len_list.append(len(img_path))
    return img_paths


def select_elements_with_spacing(input_list, depth):  # 读取40张图片
    length = len(input_list)
    if length <= depth:
        return input_list
    else:
        indexs = np.linspace(0, length - 1, depth).astype(int)
    return [input_list[index] for index in indexs]


def preprocess_jpeg(dicm_img_path):
    dicoms = clip_rescale_extract_dicom_image(pydicom.read_file(dicm_img_path))
    return dicoms


def clip_rescale_extract_dicom_image(dicom_ds):
    image = dicom_ds.pixel_array

    # find rescale params
    if ("RescaleIntercept" in dicom_ds) and ("RescaleSlope" in dicom_ds):
        intercept = float(dicom_ds.RescaleIntercept)
        slope = float(dicom_ds.RescaleSlope)
    center = int(dicom_ds.WindowCenter)
    width = int(dicom_ds.WindowWidth)
    low = center - width / 2
    high = center + width / 2
    image = (image * slope) + intercept
    image = np.clip(image, low, high)
    image = (image / np.max(image) * 255).astype(np.int16)
    return image

# dataset


class AbdominalData(Dataset):  # TODO 考虑添加分割的图像
    def __init__(self, df_path, current_fold, num_fold=5):
        super().__init__()
        # collect all the image instance paths
        self.img_paths = fetch_img_paths(TRAIN_IMG_PATH)
        self.df = pd.read_csv(df_path)
        self.num_fold = num_fold
        self.current_fold = current_fold
        self.kf = KFold(n_splits=num_fold)
        self.transform = Compose([
            Resize((224, 224), antialias=True),  # 进行重新训练  先用小的分辨率训练再使用大的分辨率训练
            ColorJitter(brightness=0.2),  # Randomly adjust brightness
            ColorJitter(contrast=0.2),  # Randomly adjust contrast
            GaussianBlur(kernel_size=3, sigma=(0.1, 2.0)),  # 添加高斯模糊
            RandomAffine(degrees=0, scale=(0.8, 1.2)),
            RandomErasing(p=0.2, scale=(0.02, 0.2)),  # Coarse dropout
            Normalize([0.5], [0.5]),
            ToTensor(),
        ])

    def __len__(self):
        return len(self.img_paths)

    def __getitem__(self, idx):
        dicom_images = select_elements_with_spacing(self.img_paths[idx], depth)
        patient_id = dicom_images[0].split('/')[-3]
        images = []

        for d in dicom_images:
            image = preprocess_jpeg(d)
            images.append(image)
        images = np.stack(images)/1.0
        imgs = torch.tensor(images, dtype=torch.float).unsqueeze(1)
        imgs = self.transform(imgs).squeeze()
        labels = self.df[self.df.patient_id == int(patient_id)].values[0][1:]
        labels = labels.astype(float)
        labels = torch.from_numpy(labels).float()
        return imgs, labels

    def get_splits(self):
        fold_data = list(self.kf.split(self.img_paths))
        train_indices, val_indices = fold_data[self.current_fold]
        train_data = self._get_subset(train_indices)
        val_data = self._get_subset(val_indices)
        return train_data, val_data

    def _get_subset(self, indices):
        return Subset(self, indices)


def train(train_loader, model, criterion, optimizer, Regularization=True, a=0.1, b = 1):
    L_loss = nn.L1Loss()
    L = 0
    model.train()
    train_loss = 0.0
    for input, target in tqdm(train_loader):
        input = input.cuda()
        target = target.cuda()
        # compute output
        output = model(input)
        if Regularization:
            L = a*L_loss(output, torch.zeros_like(output))+L_loss(model.module.fc.weight, torch.zeros_like(model.module.fc.weight))*b
        loss = criterion(output, target)+L
        # compute gradient and do SGD step
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        train_loss += loss.item()

    return train_loss/len(train_loader)


def validate(val_loader, model, criterion):
    model.eval()
    val_loss = 0.0
    val_pred = []
    val_label = []
    with torch.no_grad():
        for input, target in tqdm(val_loader):
            input = input.cuda()
            target = target.cuda()
            output = model(input)
            val_loss += criterion(output, target)

            output = torch.sigmoid(output)
            val_pred.append(output.data.cpu().numpy())
            val_label.append(target.data.cpu().numpy())
    return val_loss / len(val_loader.dataset), val_pred, val_label


def normalize_probabilities_to_one(df: pd.DataFrame, group_columns: list) -> pd.DataFrame:
    row_totals = df[group_columns].sum(axis=1)
    if row_totals.min() == 0:
        raise ParticipantVisibleError(
            'All rows must contain at least one non-zero prediction')
    for col in group_columns:
        df[col] /= row_totals
    return df


def score(solution: pd.DataFrame, submission: pd.DataFrame, row_id_column_name: str) -> float:

    del solution[row_id_column_name]
    del submission[row_id_column_name]

    # Calculate the label group log losses
    binary_targets = ['bowel', 'extravasation']
    triple_level_targets = ['kidney', 'liver', 'spleen']
    all_target_categories = binary_targets + triple_level_targets

    label_group_losses = []
    for category in all_target_categories:
        if category in binary_targets:
            col_group = [f'{category}_healthy', f'{category}_injury']
        else:
            col_group = [f'{category}_healthy',
                         f'{category}_low', f'{category}_high']
        solution = normalize_probabilities_to_one(solution, col_group)

        for col in col_group:
            if col not in submission.columns:
                raise ParticipantVisibleError(
                    f'Missing submission column {col}')
        submission = normalize_probabilities_to_one(submission, col_group)

        label_group_losses.append(
            sklearn.metrics.log_loss(
                y_true=solution[col_group].values,
                y_pred=submission[col_group].values,
                sample_weight=solution[f'{category}_weight'].values
            )
        )

    # Derive a new any_injury label by taking the max of 1 - p(healthy) for each label group
    healthy_cols = [x + '_healthy' for x in all_target_categories]
    any_injury_labels = (1 - solution[healthy_cols]).max(axis=1)
    any_injury_predictions = (1 - submission[healthy_cols]).max(axis=1)
    any_injury_loss = sklearn.metrics.log_loss(
        y_true=any_injury_labels.values,
        y_pred=any_injury_predictions.values,
        sample_weight=solution['any_injury_weight'].values
    )

    label_group_losses.append(any_injury_loss)
    return np.mean(label_group_losses)


if __name__ == '__main__':

    # 初始化
    depth = 8
    BATCH_SIZE = 128
    LR = 0.004

    torch.manual_seed(2023)
    torch.backends.cudnn.deterministic = False
    torch.backends.cudnn.benchmark = True

    # 损失函数
    pos_weight = torch.Tensor([1, 2, 1, 6, 1, 2, 4, 1, 2, 4, 1, 2, 4, 6]).cuda()
    criterion = nn.BCEWithLogitsLoss(pos_weight=pos_weight)

    # 模型加载
    device_id = [0, 1, 2]
    model=CNNModel(mode="train",depth=depth) #todo 修改模型
    model = model.cuda()
    model = torch.nn.parallel.DataParallel(model, device_ids=device_id)

    # 模型优化器
    optimizer = torch.optim.SGD(model.parameters(), lr=LR, momentum=0.9, weight_decay=0.0001)

    # 数据集划分
    train_csv_path = 'train_test_data/datasets/train.csv'
    train_data_0, val_data_0 = AbdominalData(
        train_csv_path, current_fold=0).get_splits()
    train_data_1, val_data_1 = AbdominalData(
        train_csv_path, current_fold=1).get_splits()
    train_data_2, val_data_2 = AbdominalData(
        train_csv_path, current_fold=2).get_splits()
    train_data_3, val_data_3 = AbdominalData(
        train_csv_path, current_fold=3).get_splits()
    train_data_4, val_data_4 = AbdominalData(
        train_csv_path, current_fold=4).get_splits()

    train_dataloader_0 = DataLoader(
        train_data_0, batch_size=BATCH_SIZE, shuffle=True, num_workers=32)
    val_dataloader_0 = DataLoader(
        val_data_0, batch_size=BATCH_SIZE, shuffle=False, num_workers=32)
    train_dataloader_1 = DataLoader(
        train_data_1, batch_size=BATCH_SIZE, shuffle=True, num_workers=32)
    val_dataloader_1 = DataLoader(
        val_data_1, batch_size=BATCH_SIZE, shuffle=False, num_workers=32)
    train_dataloader_2 = DataLoader(
        train_data_2, batch_size=BATCH_SIZE, shuffle=True, num_workers=32)
    val_dataloader_2 = DataLoader(
        val_data_2, batch_size=BATCH_SIZE, shuffle=False, num_workers=32)
    train_dataloader_3 = DataLoader(
        train_data_3, batch_size=BATCH_SIZE, shuffle=True, num_workers=32)
    val_dataloader_3 = DataLoader(
        val_data_3, batch_size=BATCH_SIZE, shuffle=False, num_workers=32)
    train_dataloader_4 = DataLoader(
        train_data_4, batch_size=BATCH_SIZE, shuffle=True, num_workers=32)
    val_dataloader_4 = DataLoader(
        val_data_4, batch_size=BATCH_SIZE, shuffle=False, num_workers=32)

    dataloaders = [(train_dataloader_0, val_dataloader_0),
                   (train_dataloader_1, val_dataloader_1),
                   (train_dataloader_2, val_dataloader_2),
                   (train_dataloader_3, val_dataloader_3),
                   (train_dataloader_4, val_dataloader_4)]
    # wandb
    wandb.init(
        # set the wandb project where this run will be logged
        project="rasnac",
        entity="bravebobo",
        # track hyperparameters and run metadata
        config={
            "architecture": "transformer",
            "dataset": "rasnac",
            "epochs": 1000,
        },
        name=f'baseline_2_5D_r_a_1_{depth}_{BATCH_SIZE}_momuntum_0.9'
    )

    # 开始训练
    prev_val_best_loss = float('inf')
    remove_path = f''
    epoch = 500
    for epoch in tqdm(range(epoch)):
        if epoch > 20 and epoch % 5 == 0 and optimizer.state_dict()['param_groups'][0]['lr'] > 0.0003:
            for p in optimizer.param_groups:
                p['lr'] *= 0.9

        train_dataloader, val_dataloader = dataloaders[epoch % 5]
        train_loss = train(train_dataloader, model, criterion, optimizer)
        val_loss, val_pred, val_label = validate(val_dataloader, model, criterion)

        # metrics
        val_pred = np.vstack(val_pred)
        val_pred = val_pred[:, :-1]
        val_label = np.vstack(val_label)

        val_pred = pd.DataFrame(val_pred)
        val_pred.columns = ['bowel_healthy', 'bowel_injury', 'extravasation_healthy',
                            'extravasation_injury', 'kidney_healthy', 'kidney_low', 'kidney_high',
                            'liver_healthy', 'liver_low', 'liver_high', 'spleen_healthy',
                            'spleen_low', 'spleen_high']
        val_pred['patient_id'] = range(len(val_pred))

        val_label = pd.DataFrame(val_label)
        val_label.columns = ['bowel_healthy', 'bowel_injury', 'extravasation_healthy',
                             'extravasation_injury', 'kidney_healthy', 'kidney_low', 'kidney_high',
                             'liver_healthy', 'liver_low', 'liver_high', 'spleen_healthy',
                             'spleen_low', 'spleen_high', 'any_injury']
        val_label['patient_id'] = range(len(val_pred))

        # metrics weight
        val_label['bowel_weight'] = val_label['bowel_healthy'].map({1: 1}).fillna(2)
        val_label['extravasation_weight'] = val_label['extravasation_healthy'].map({1: 1}).fillna(6)
        val_label['extravasation_weight'] = val_label['extravasation_healthy'].map({1: 1}).fillna(6)

        kidney_label = val_label[['kidney_healthy', 'kidney_low', 'kidney_high']].values.argmax(1)
        kidney_label = pd.Series(kidney_label)
        val_label['kidney_weight'] = kidney_label.map({0: 1, 1: 2, 2: 4})

        liver_label = val_label[['liver_healthy', 'liver_low', 'liver_high']].values.argmax(1)
        liver_label = pd.Series(liver_label)
        val_label['liver_weight'] = liver_label.map({0: 1, 1: 2, 2: 4})

        spleen_label = val_label[['spleen_healthy', 'spleen_low', 'spleen_high']].values.argmax(1)
        spleen_label = pd.Series(spleen_label)
        val_label['spleen_weight'] = spleen_label.map({0: 1, 1: 2, 2: 4})
        val_label['any_injury_weight'] = val_label['any_injury'].map({0: 1, 1: 6})
        val_loss = score(val_label, val_pred, 'patient_id')

        print(train_loss, val_loss)
        if val_loss < prev_val_best_loss:
            try:
                os.remove(remove_path)
            except Exception as e:
                pass
            finally:
                torch.save({'epoch': epoch,
                            'state_dict': model.state_dict(),
                            'optimizer': optimizer.state_dict()},
                           f'checkpoints/{depth}_{BATCH_SIZE}_a_1_{val_loss:.5f}_model_{epoch}.pth')

                remove_path = f'checkpoints/{depth}_{BATCH_SIZE}_a_1_{val_loss:.5f}_model_{epoch}.pth'
                prev_val_best_loss = val_loss

        wandb.log({"train_loss": train_loss,
                   "val_loss": val_loss,
                  "lRS": optimizer.state_dict()['param_groups'][0]['lr']})
    wandb.finish()
