import glob
import os
import cv2
import random
import pydicom
import numpy as np
import pandas as pd
from tqdm import tqdm
from PIL import Image
import matplotlib.pyplot as plt

import torch
import torchvision
import torch.nn as nn
from torch.optim import Adam
import torchvision.datasets as datasets
import torchvision.transforms as transforms
from torch.utils.data.dataset import Dataset
from torch.utils.tensorboard import SummaryWriter
from torch.utils.data import Dataset, DataLoader, Subset
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torchvision.transforms.v2 import Resize, Compose, RandomHorizontalFlip, ColorJitter, RandomAffine, RandomErasing, ToTensor
import sklearn
from sklearn.model_selection import KFold, StratifiedKFold #交叉验证划分
from sklearn.metrics import accuracy_score, roc_auc_score

from torch.utils.tensorboard import SummaryWriter

import warnings
warnings.filterwarnings("ignore")

'''
建立数据集实现图像的拼接
'''
TRAIN_IMG_PATH = '/home/fd_chen/kaggle/rasac/data/trian_images' #TODO 修改路径

def fetch_img_paths(train_img_path):
    '''
    按照病人将图像进行分组
    '''
    img_paths = []

    print('Scanning directories...')
    for patient in tqdm(os.listdir(train_img_path)):
        for scan in os.listdir(os.path.join(TRAIN_IMG_PATH, patient)):
            scans = [
                os.path.join(TRAIN_IMG_PATH, patient, scan, img)
                for img in os.listdir(
                    os.path.join(TRAIN_IMG_PATH, patient, scan)
                )
            ]
            img_paths.append(scans)
    return img_paths

def select_elements_with_spacing(input_list, spacing):
    if len(input_list) < spacing * 4:
        raise ValueError("List should contain at least 4 * spacing elements.")
    lower_bound = int(len(input_list) * 0.4)
    upper_bound = int(len(input_list) * 0.6)
    
    spacing = (upper_bound - lower_bound) // 3
    selected_indices = [lower_bound, lower_bound + spacing, lower_bound + (2*spacing), upper_bound]
    return [input_list[index] for index in selected_indices]

def preprocess_jpeg(jpeg_path):
    img = cv2.imread(jpeg_path)
    return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)/255

# dataset
class AbdominalData(Dataset): #TODO 考虑添加分割的图像
    def __init__(self, df_path, current_fold, num_fold = 5):
        super().__init__()
        # collect all the image instance paths
        self.img_paths = fetch_img_paths(TRAIN_IMG_PATH) 
        self.df = pd.read_csv(df_path)
        self.num_fold = num_fold
        self.current_fold = current_fold
        self.kf = KFold(n_splits=num_fold)
        self.transform = Compose([
#                             Resize((256, 256), antialias=True),
                            RandomHorizontalFlip(),  # Randomly flip images left-right
                            ColorJitter(brightness=0.2),  # Randomly adjust brightness
                            ColorJitter(contrast=0.2),  # Randomly adjust contrast
                            RandomAffine(degrees=0, shear=10),  # Apply shear transformation
                            RandomAffine(degrees=0, scale=(0.8, 1.2)),  # Apply zoom transformation
                            RandomErasing(p=0.2, scale=(0.02, 0.2)), # Coarse dropout
                            ToTensor(),
                        ])
    
    def __len__(self):
        return len(self.img_paths)
    
    def __getitem__(self, idx):
        dicom_images = select_elements_with_spacing(self.img_paths[idx],spacing = 2)
        patient_id = dicom_images[0].split('/')[-3]
        images = []

        for d in dicom_images:
            image = preprocess_jpeg(d)
            images.append(image)
        images = np.stack(images)
        image = torch.tensor(images, dtype = torch.float).unsqueeze(dim = 1)
        image = self.transform(image).squeeze(dim = 1)
        # labels=torch.from_numpy(self.df.loc[patient_id].values[:]).float()
        labels = self.df[self.df.patient_id == int(patient_id)].values[0][1:]
        labels= torch.from_numpy(labels).float()
        return image,labels
    
    def get_splits(self):
        fold_data = list(self.kf.split(self.img_paths))
        train_indices, val_indices = fold_data[self.current_fold]
        train_data = self._get_subset(train_indices)
        val_data = self._get_subset(val_indices)
        return train_data, val_data

    def _get_subset(self, indices):
        return Subset(self, indices)
    

def train(train_loader, model, criterion, optimizer):
    model.train()
    train_loss = 0.0
    for input, target in tqdm(train_loader):
        input = input.cuda()
        target = target.cuda()
        # compute output
        output = model(input)
        loss = criterion(output, target)
        # compute gradient and do SGD step
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        train_loss += loss.item()

    return train_loss/len(train_loader)

def validate(val_loader, model, criterion):
    model.eval()
    val_loss = 0.0
    val_pred = []
    val_label = []
    with torch.no_grad():
        for input, target in tqdm(val_loader):
            input = input.cuda()
            target = target.cuda()
            output = model(input)
            val_loss += criterion(output, target)

            output = torch.sigmoid(output)
            val_pred.append(output.data.cpu().numpy())
            val_label.append(target.data.cpu().numpy())
    return val_loss / len(val_loader.dataset), val_pred, val_label


def normalize_probabilities_to_one(df: pd.DataFrame, group_columns: list) -> pd.DataFrame:
    row_totals = df[group_columns].sum(axis=1)
    if row_totals.min() == 0:
        raise ParticipantVisibleError('All rows must contain at least one non-zero prediction')
    for col in group_columns:
        df[col] /= row_totals
    return df

def score(solution: pd.DataFrame, submission: pd.DataFrame, row_id_column_name: str) -> float:

    del solution[row_id_column_name]
    del submission[row_id_column_name]

    # Calculate the label group log losses
    binary_targets = ['bowel', 'extravasation']
    triple_level_targets = ['kidney', 'liver', 'spleen']
    all_target_categories = binary_targets + triple_level_targets

    label_group_losses = []
    for category in all_target_categories:
        if category in binary_targets:
            col_group = [f'{category}_healthy', f'{category}_injury']
        else:
            col_group = [f'{category}_healthy', f'{category}_low', f'{category}_high']
        solution = normalize_probabilities_to_one(solution, col_group)

        for col in col_group:
            if col not in submission.columns:
                raise ParticipantVisibleError(f'Missing submission column {col}')
        submission = normalize_probabilities_to_one(submission, col_group)
        
        label_group_losses.append(
            sklearn.metrics.log_loss(
                y_true=solution[col_group].values,
                y_pred=submission[col_group].values,
                sample_weight=solution[f'{category}_weight'].values
            )
        )

    # Derive a new any_injury label by taking the max of 1 - p(healthy) for each label group
    healthy_cols = [x + '_healthy' for x in all_target_categories]
    any_injury_labels = (1 - solution[healthy_cols]).max(axis=1)
    any_injury_predictions = (1 - submission[healthy_cols]).max(axis=1)
    any_injury_loss = sklearn.metrics.log_loss(
        y_true=any_injury_labels.values,
        y_pred=any_injury_predictions.values,
        sample_weight=solution['any_injury_weight'].values
    )

    label_group_losses.append(any_injury_loss)
    return np.mean(label_group_losses)


torch.manual_seed(0)
torch.backends.cudnn.deterministic = False
torch.backends.cudnn.benchmark = True

summaryWriter = SummaryWriter()

pos_weight = torch.Tensor([1,2,1,6,1,2,4,1,2,4,1,2,4,6]).cuda()
criterion = nn.BCEWithLogitsLoss(pos_weight=pos_weight)
BATCH_SIZE=64

# 数据集划分
train_csv_path='/home/fd_chen/kaggle/rasac/data/train .csv'
train_data_0, val_data_0 = AbdominalData(train_csv_path, current_fold=0).get_splits()
train_data_1, val_data_1 = AbdominalData(train_csv_path, current_fold=1).get_splits()
train_data_2, val_data_2 = AbdominalData(train_csv_path, current_fold=2).get_splits()
train_data_3, val_data_3 = AbdominalData(train_csv_path, current_fold=3).get_splits()
train_data_4, val_data_4 = AbdominalData(train_csv_path, current_fold=4).get_splits()

train_dataloader_0 = DataLoader(train_data_0,batch_size = BATCH_SIZE, shuffle = True)
val_dataloader_0 = DataLoader(val_data_0,batch_size = BATCH_SIZE, shuffle = False)
train_dataloader_1 = DataLoader(train_data_1,batch_size = BATCH_SIZE, shuffle = True)
val_dataloader_1 = DataLoader(val_data_1,batch_size = BATCH_SIZE, shuffle = False)
train_dataloader_2 = DataLoader(train_data_2,batch_size = BATCH_SIZE, shuffle = True)
val_dataloader_2 = DataLoader(val_data_2,batch_size = BATCH_SIZE, shuffle = False)
train_dataloader_3 = DataLoader(train_data_3,batch_size = BATCH_SIZE, shuffle = True)
val_dataloader_3 = DataLoader(val_data_3,batch_size = BATCH_SIZE, shuffle = False)
train_dataloader_4 = DataLoader(train_data_4,batch_size = BATCH_SIZE, shuffle = True)
val_dataloader_4 = DataLoader(val_data_4,batch_size = BATCH_SIZE, shuffle = False)
    
dataloaders = [(train_dataloader_0, val_dataloader_0),
               (train_dataloader_1, val_dataloader_1),
               (train_dataloader_2, val_dataloader_2), 
               (train_dataloader_3, val_dataloader_3),
               (train_dataloader_4, val_dataloader_4)]


# 模型定义
device_id = [0, 1, 2]
model = torchvision.models.resnet18(True)
model.fc = torch.nn.Linear(512, 14)
model.conv1 = nn.Conv2d(4, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)
model=model.cuda()
model = torch.nn.parallel.DataParallel(model, device_ids=device_id)

LR=0.0001
optimizer = torch.optim.SGD(model.parameters(), lr=LR)


# 开始训练 
train_losses = []
val_losses = []
LRS=[]

prev_val_best_loss = float('inf')
remove_path = f''

epoch=200

for epoch in tqdm(range(epoch)):
    if epoch>120 and epoch%5==0:
        for p in optimizer.param_groups:
                p['lr'] *= 0.95

    LRS.append(optimizer.state_dict()['param_groups'][0]['lr'])
    
    train_dataloader, val_dataloader = dataloaders[epoch%5]
    train_loss = train(train_dataloader, model, criterion, optimizer)
    val_loss, val_pred, val_label = validate(val_dataloader, model, criterion)
    
    # metrics
    val_pred = np.vstack(val_pred)
    val_pred = val_pred[:, :-1]
    val_label = np.vstack(val_label)

    val_pred = pd.DataFrame(val_pred)
    val_pred.columns = ['bowel_healthy', 'bowel_injury', 'extravasation_healthy',
           'extravasation_injury', 'kidney_healthy', 'kidney_low', 'kidney_high',
           'liver_healthy', 'liver_low', 'liver_high', 'spleen_healthy',
           'spleen_low', 'spleen_high']
    val_pred['patient_id'] = range(len(val_pred))

    
    val_label = pd.DataFrame(val_label)
    val_label.columns = ['bowel_healthy', 'bowel_injury', 'extravasation_healthy',
           'extravasation_injury', 'kidney_healthy', 'kidney_low', 'kidney_high',
           'liver_healthy', 'liver_low', 'liver_high', 'spleen_healthy',
           'spleen_low', 'spleen_high', 'any_injury']
    val_label['patient_id'] = range(len(val_pred))
    
    # metrics weight
    val_label['bowel_weight'] = val_label['bowel_healthy'].map({1:1}).fillna(2)
    val_label['extravasation_weight'] = val_label['extravasation_healthy'].map({1:1}).fillna(6)
    val_label['extravasation_weight'] = val_label['extravasation_healthy'].map({1:1}).fillna(6)

    kidney_label = val_label[['kidney_healthy', 'kidney_low','kidney_high']].values.argmax(1)
    kidney_label = pd.Series(kidney_label)
    val_label['kidney_weight'] = kidney_label.map({0: 1, 1:2, 2:4})

    liver_label = val_label[['liver_healthy', 'liver_low','liver_high']].values.argmax(1)
    liver_label = pd.Series(liver_label)
    val_label['liver_weight'] = liver_label.map({0: 1, 1:2, 2:4})

    spleen_label = val_label[['spleen_healthy', 'spleen_low','spleen_high']].values.argmax(1)
    spleen_label = pd.Series(spleen_label)
    val_label['spleen_weight'] = spleen_label.map({0: 1, 1:2, 2:4})
    val_label['any_injury_weight'] = val_label['any_injury'].map({0:1, 1:6})
    val_loss = score(val_label, val_pred, 'patient_id')
    
    train_losses.append(train_loss)
    val_losses.append(val_loss)

    print(train_loss, val_loss)
    print(train_loss, val_loss)
    if val_loss < prev_val_best_loss:
        try:
            os.remove(remove_path)
        except Exception as e:
            pass
        finally:
            torch.save(model.state_dict(),
                       f'./checkpoints/{LR}_model_{epoch}.pth')
            remove_path = f'./checkpoints/{LR}_model_{epoch}.pth'
            prev_val_best_loss = val_loss

    summaryWriter.add_scalar("training_loss",train_loss, epoch )
    summaryWriter.add_scalar("val_loss",val_loss, epoch )
    summaryWriter.add_scalar("LRS",optimizer.state_dict()['param_groups'][0]['lr'], epoch )

summaryWriter.close()
scale_x = range(len(train_losses))
plt.plot(scale_x, train_losses, c='blue', marker='o',linestyle=':', label='train_losses')
plt.plot(scale_x, val_losses, c='green', marker='+',linestyle='--', label='val_losses')
plt.legend()
plt.savefig(f'./figs/{LR}_result.png')
