import os
import pydicom
import numpy as np
import pandas as pd
from tqdm import tqdm
from einops import rearrange
import sklearn
from sklearn.metrics import accuracy_score, roc_auc_score
from sklearn.model_selection import KFold
import torch
import torch.nn as nn
from torch.nn import functional as F
from torch.utils.data.dataset import Dataset
from torch.utils.data import Dataset, DataLoader, Subset
from torchvision.transforms.v2 import Resize, Compose, Normalize, ColorJitter, RandomAffine, RandomErasing, ToTensor, GaussianBlur
from torchsummary import summary
from video_swin_transformer_dropkey import SwinTransformer3D
import warnings
import wandb
warnings.filterwarnings("ignore")

# dataset

TRAIN_IMG_PATH = '/home/fd_chen/kaggle/rasac/train_test_data/datasets/train_images'  # TODO 修改路径

def train(train_loader, model, criterion, optimizer):
    model.train()
    train_loss = 0.0
    for input, target in tqdm(train_loader):
        input = input.cuda()
        target = target.cuda()
        # compute output
        output = model(input)
        loss = criterion(output, target)
        # compute gradient and do SGD step
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        train_loss += loss.item()
    return train_loss/len(train_loader)


def validate(val_loader, model, criterion):
    model.eval()
    val_loss = 0.0
    val_pred = []
    val_label = []
    with torch.no_grad():
        for input, target in tqdm(val_loader):
            input = input.cuda()
            target = target.cuda()
            output = model(input)
            val_loss += criterion(output, target)
            output = torch.sigmoid(output)
            val_pred.append(output.data.cpu().numpy())
            val_label.append(target.data.cpu().numpy())
    return val_loss / len(val_loader.dataset), val_pred, val_label


def normalize_probabilities_to_one(df: pd.DataFrame, group_columns: list) -> pd.DataFrame:
    row_totals = df[group_columns].sum(axis=1)
    if row_totals.min() == 0:
        raise ParticipantVisibleError(
            'All rows must contain at least one non-zero prediction')
    for col in group_columns:
        df[col] /= row_totals
    return df


def score(solution: pd.DataFrame, submission: pd.DataFrame, row_id_column_name: str) -> float:

    del solution[row_id_column_name]
    del submission[row_id_column_name]

    # Calculate the label group log losses
    binary_targets = ['bowel', 'extravasation']
    triple_level_targets = ['kidney', 'liver', 'spleen']
    all_target_categories = binary_targets + triple_level_targets

    label_group_losses = []
    for category in all_target_categories:
        if category in binary_targets:
            col_group = [f'{category}_healthy', f'{category}_injury']
        else:
            col_group = [f'{category}_healthy',
                         f'{category}_low', f'{category}_high']
        solution = normalize_probabilities_to_one(solution, col_group)

        for col in col_group:
            if col not in submission.columns:
                raise ParticipantVisibleError(
                    f'Missing submission column {col}')
        submission = normalize_probabilities_to_one(submission, col_group)

        label_group_losses.append(
            sklearn.metrics.log_loss(
                y_true=solution[col_group].values,
                y_pred=submission[col_group].values,
                sample_weight=solution[f'{category}_weight'].values
            )
        )

    # Derive a new any_injury label by taking the max of 1 - p(healthy) for each label group
    healthy_cols = [x + '_healthy' for x in all_target_categories]
    any_injury_labels = (1 - solution[healthy_cols]).max(axis=1)
    any_injury_predictions = (1 - submission[healthy_cols]).max(axis=1)
    any_injury_loss = sklearn.metrics.log_loss(
        y_true=any_injury_labels.values,
        y_pred=any_injury_predictions.values,
        sample_weight=solution['any_injury_weight'].values
    )
    label_group_losses.append(any_injury_loss)
    return np.mean(label_group_losses)


def fetch_img_paths(train_img_path):
    len_list = []
    min_length = 10000
    img_paths = []
    print('Scanning directories...')
    for patient in tqdm(os.listdir(train_img_path)):
        for scan in os.listdir(os.path.join(train_img_path, patient)):
            scan_path = os.listdir(os.path.join(train_img_path, patient, scan))
            scans = [
                [filename[:-4],
                    os.path.join(train_img_path, patient, scan, filename)]
                for filename in scan_path
                if filename.endswith('.dcm') and 10 <= int(filename[:-4]) <= 1500
            ]
            scans = sorted(scans, key=lambda x: int(x[0]))
            img_path = np.array(scans)
            img_paths.append(img_path[:, 1].tolist())
            min_length = min(min_length, len(scans))
        len_list.append(len(img_path))
    return img_paths

def select_images_p(numbers,sigma=0.01):
    samples = np.random.normal(loc=numbers/2, scale=1, size=numbers)
    sample_sum = np.sum(samples)
    normalized_samples = samples / sample_sum
    return normalized_samples.tolist()

def select_elements_with_spacing(input_list,depth):  # 读取40张图片
    length = len(input_list)
    if length <= depth:
        return input_list
    else:
        indexs = np.linspace(0, length - 1, 32).astype(int)
        # return np.random.choice(input_list, depth, p=select_images_p(length))
        return np.array(input_list)[indexs].tolist()
     

def preprocess_jpeg(dicm_img_path):
    dicom = clip_rescale_extract_dicom_image(pydicom.read_file(dicm_img_path))
    dicom = np.array(dicom)
    dicoms = np.expand_dims(dicom, axis=2)
    dicoms = np.concatenate((dicoms, dicoms, dicoms), axis=-1)
    return dicoms


def clip_rescale_extract_dicom_image(dicom_ds):
    image = dicom_ds.pixel_array

    # find rescale params
    if ("RescaleIntercept" in dicom_ds) and ("RescaleSlope" in dicom_ds):
        intercept = float(dicom_ds.RescaleIntercept)
        slope = float(dicom_ds.RescaleSlope)
    center = int(dicom_ds.WindowCenter)
    width = int(dicom_ds.WindowWidth)
    low = center - width / 2
    high = center + width / 2

    image = (image * slope) + intercept
    image = np.clip(image, low, high)
    image = (image / np.max(image) * 255).astype(np.int16)
    return image

# dataset
class AbdominalData(Dataset):  # TODO 考虑添加分割的图像
    def __init__(self, df_path, current_fold, num_fold=5,depth=32):
        super().__init__()
        # collect all the image instance paths
        self.depth = depth
        self.img_paths = fetch_img_paths(TRAIN_IMG_PATH)
        self.df = pd.read_csv(df_path)
        self.num_fold = num_fold
        self.current_fold = current_fold
        self.kf = KFold(n_splits=num_fold)
        self.transform = Compose([
            Resize((224, 224), antialias=True),  # 进行重新训练  先用小的分辨率训练再使用大的分辨率训练
            ColorJitter(brightness=0.2),  # Randomly adjust brightness
            ColorJitter(contrast=0.2),  # Randomly adjust contrast
            GaussianBlur(kernel_size=3, sigma=(0.1, 2.0)),  # 添加高斯模糊
            RandomAffine(degrees=0, scale=(0.8, 1.2)),
            RandomErasing(p=0.2, scale=(0.02, 0.2)),  # Coarse dropout
            Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5]),
            ToTensor(),
        ])

    def __len__(self):
        return len(self.img_paths)

    def __getitem__(self, idx):
        dicom_images = select_elements_with_spacing(self.img_paths[idx],depth=self.depth)
        patient_id = dicom_images[0].split('/')[-3]
        images = []

        for d in dicom_images:
            image = preprocess_jpeg(d)
            images.append(image)
        images = np.array(images)/1.0
        image = torch.tensor(images, dtype=torch.float)
        image = rearrange(image, 't  h w c ->   t c h w')
        image = self.transform(image).squeeze(dim=0)
        image = rearrange(image, ' t c h w ->  c t h w')
        labels = self.df[self.df.patient_id == int(patient_id)].values[0][1:]
        labels = torch.from_numpy(labels).float()
        return image, labels

    def get_splits(self):
        fold_data = list(self.kf.split(self.img_paths))
        train_indices, val_indices = fold_data[self.current_fold]
        train_data = self._get_subset(train_indices)
        val_data = self._get_subset(val_indices)
        return train_data, val_data

    def _get_subset(self, indices):
        return Subset(self, indices)


class TransformerMode(nn.Module):
    def __init__(self,mode='train'):
        super().__init__()
        model = SwinTransformer3D()
        if mode == 'train':
            model.load_state_dict(torch.load("checkpoints/swin_base_patch244_window1677_sthv2.pth", map_location=torch.device('cpu')), strict=False)
        self.backbone = model
        self.drop = nn.Dropout3d(p=0.2)

        self.conv3d1 = nn.Conv3d(768, 64, kernel_size=(3, 3, 3), padding=(2, 2, 2), bias=False)
        # batch norm
        self.bn1 = nn.BatchNorm3d(64)
        self.relu = nn.GELU()
        self.pool = nn.AdaptiveMaxPool3d((1, 1, 1))
        self.output = nn.Linear(64, 14)

    def forward(self, x):
        x = self.backbone(x)
        x = self.drop(x)
        x = self.conv3d1(x)
        x = self.bn1(x)
        x = self.relu(x)
        x = self.pool(x)
        x = torch.flatten(x, 1)
        x = self.output(x)
        return x


if __name__ == "__main__":
    # wandb
    depth = 4
    wandb.init(
        project="rasnac",
        entity="bravebobo",
        config={
            "learning_rate": 0.002,
            "architecture": "transformer",
            "dataset": "rasnac",
            "epochs": 1000,
        }
    )

    model = TransformerMode(mode='train')
    model = model.cuda()

    BATCH_SIZE = 128
    LR = 0.002
    epoch = 1000

    # 数据集划分
    train_csv_path = 'train_test_data/datasets/train.csv'
    train_data_0, val_data_0 = AbdominalData(
        train_csv_path, current_fold=0,depth=depth).get_splits()
    train_data_1, val_data_1 = AbdominalData(
        train_csv_path, current_fold=1,depth=depth).get_splits()
    train_data_2, val_data_2 = AbdominalData(
        train_csv_path, current_fold=2,depth=depth).get_splits()
    train_data_3, val_data_3 = AbdominalData(
        train_csv_path, current_fold=3,depth=depth).get_splits()
    train_data_4, val_data_4 = AbdominalData(
        train_csv_path, current_fold=4,depth=depth).get_splits()

    train_dataloader_0 = DataLoader(
        train_data_0, batch_size=BATCH_SIZE, shuffle=True, num_workers=28)
    val_dataloader_0 = DataLoader(
        val_data_0, batch_size=BATCH_SIZE, shuffle=False, num_workers=28)
    train_dataloader_1 = DataLoader(
        train_data_1, batch_size=BATCH_SIZE, shuffle=True, num_workers=28)
    val_dataloader_1 = DataLoader(
        val_data_1, batch_size=BATCH_SIZE, shuffle=False, num_workers=28)
    train_dataloader_2 = DataLoader(
        train_data_2, batch_size=BATCH_SIZE, shuffle=True, num_workers=28)
    val_dataloader_2 = DataLoader(
        val_data_2, batch_size=BATCH_SIZE, shuffle=False, num_workers=28)
    train_dataloader_3 = DataLoader(
        train_data_3, batch_size=BATCH_SIZE, shuffle=True, num_workers=28)
    val_dataloader_3 = DataLoader(
        val_data_3, batch_size=BATCH_SIZE, shuffle=False, num_workers=28)
    train_dataloader_4 = DataLoader(
        train_data_4, batch_size=BATCH_SIZE, shuffle=True, num_workers=28)
    val_dataloader_4 = DataLoader(
        val_data_4, batch_size=BATCH_SIZE, shuffle=False, num_workers=28)

    dataloaders = [(train_dataloader_0, val_dataloader_0),
                   (train_dataloader_1, val_dataloader_1),
                   (train_dataloader_2, val_dataloader_2),
                   (train_dataloader_3, val_dataloader_3),
                   (train_dataloader_4, val_dataloader_4)]

    # optimizer
    optimizer = torch.optim.SGD(model.parameters(), lr=LR,momentum=0.9, weight_decay=1e-4)

    device_id = [0, 1, 2]
    pos_weight = torch.Tensor([1, 2, 1, 6, 1, 2, 4, 1, 2, 4, 1, 2, 4, 6]).cuda()
    criterion = nn.BCEWithLogitsLoss(pos_weight=pos_weight)
    prev_val_best_loss = float('inf')
    model = torch.nn.parallel.DataParallel(model, device_ids=device_id)

    # 开始训练
    remove_path = f''
    for epoch in tqdm(range(epoch)):
        if epoch > 50 and epoch % 10 == 0 and optimizer.state_dict()['param_groups'][0]['lr'] > 0.0004:
            for p in optimizer.param_groups:
                p['lr'] *= 0.9

        train_dataloader, val_dataloader = dataloaders[epoch % 5]
        train_loss = train(train_dataloader, model, criterion, optimizer)
        val_loss, val_pred, val_label = validate(val_dataloader, model, criterion)

        # metrics
        val_pred = np.vstack(val_pred)
        val_pred = val_pred[:, :-1]
        val_label = np.vstack(val_label)

        val_pred = pd.DataFrame(val_pred)
        val_pred.columns = ['bowel_healthy', 'bowel_injury', 'extravasation_healthy',
                            'extravasation_injury', 'kidney_healthy', 'kidney_low', 'kidney_high',
                            'liver_healthy', 'liver_low', 'liver_high', 'spleen_healthy',
                            'spleen_low', 'spleen_high']
        val_pred['patient_id'] = range(len(val_pred))

        val_label = pd.DataFrame(val_label)
        val_label.columns = ['bowel_healthy', 'bowel_injury', 'extravasation_healthy',
                             'extravasation_injury', 'kidney_healthy', 'kidney_low', 'kidney_high',
                             'liver_healthy', 'liver_low', 'liver_high', 'spleen_healthy',
                             'spleen_low', 'spleen_high', 'any_injury']
        val_label['patient_id'] = range(len(val_pred))

        # metrics weight
        val_label['bowel_weight'] = val_label['bowel_healthy'].map({
            1: 1}).fillna(2)
        val_label['extravasation_weight'] = val_label['extravasation_healthy'].map({
            1: 1}).fillna(6)
        val_label['extravasation_weight'] = val_label['extravasation_healthy'].map({
            1: 1}).fillna(6)

        kidney_label = val_label[['kidney_healthy','kidney_low', 'kidney_high']].values.argmax(1)
        kidney_label = pd.Series(kidney_label)
        val_label['kidney_weight'] = kidney_label.map({0: 1, 1: 2, 2: 4})

        liver_label = val_label[['liver_healthy',
                                'liver_low', 'liver_high']].values.argmax(1)
        liver_label = pd.Series(liver_label)
        val_label['liver_weight'] = liver_label.map({0: 1, 1: 2, 2: 4})

        spleen_label = val_label[['spleen_healthy','spleen_low', 'spleen_high']].values.argmax(1)
        spleen_label = pd.Series(spleen_label)
        val_label['spleen_weight'] = spleen_label.map({0: 1, 1: 2, 2: 4})
        val_label['any_injury_weight'] = val_label['any_injury'].map({0: 1, 1: 6})
        val_loss = score(val_label, val_pred, 'patient_id')

        print(train_loss, val_loss)
        if val_loss < prev_val_best_loss:
            try:
                os.remove(remove_path)
            except Exception as e:
                pass
            finally:
                torch.save(model.state_dict(),  f'./checkpoints/{depth}_{LR}_{val_loss:.5f}_model_{epoch}.pth')
                remove_path = f'./checkpoints/{depth}_{LR}_{val_loss:.5f}_model_{epoch}.pth'
                prev_val_best_loss = val_loss
        if epoch % 10 == 0:
            torch.save(model.state_dict(),  f'./checkpoints/{depth}_{LR}_{train_loss:.5f}_model_{epoch}.pth')

        wandb.log({"train_loss": train_loss,
                   "val_loss": val_loss,
                  "lRS": optimizer.state_dict()['param_groups'][0]['lr']})

    wandb.finish()
