import glob
import tqdm
import os
import pydicom
import sklearn.metrics
import pandas.api.types

import pandas as pd
import numpy as np
from PIL import Image
import seaborn as sns

import matplotlib.pyplot as plt
from ipywidgets import interact
import matplotlib.animation as animation
from IPython.display import HTML


import torchvision
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.autograd import Variable
import torchvision.datasets as datasets
import torchvision.models as models
import torchvision.transforms as transforms
from torch.utils.data.dataset import Dataset
from torch.utils.tensorboard import SummaryWriter

import warnings
warnings.filterwarnings("ignore")

def normalize_probabilities_to_one(df: pd.DataFrame, group_columns: list) -> pd.DataFrame:

    row_totals = df[group_columns].sum(axis=1)
    if row_totals.min() == 0:
        raise ParticipantVisibleError(
            'All rows must contain at least one non-zero prediction')
    for col in group_columns:
        df[col] /= row_totals
    return df

# Model Architecure

class CNNModel(nn.Module):
    def __init__(self):
        super().__init__()

        self.input = nn.Conv2d(1, 3, kernel_size=3)
        model = models.efficientnet_b0(weights='IMAGENET1K_V1')

        self.features = model.features
        self.avgpool = model.avgpool

        self.output = nn.Linear(1280, 13) #使用清洗的数据

    def forward(self, x):

        # extract features
        x = self.input(x)
        x = self.features(x)
        x = self.avgpool(x)
        x = torch.flatten(x, 1)
        x = self.output(x)
        return x

class RSNADataset(Dataset):
    def __init__(self, img_path, transform=None):
        self.img_path = img_path
        self.transform = transform if transform is not None else None

    def __getitem__(self, index):
        pid = self.img_path[index].split('/')[-3]
        pid = int(pid)
        labels=np.zeros(13)
        img = Image.open(self.img_path[index])
        try:
            if self.transform is not None:
                img = self.transform(img)
            labels = train_csv.loc[pid].values[0][:13]
            labels=labels.astype(float)

        except Exception as e:
            labels = train_csv.loc[pid].values[:13]
            labels=labels.astype(float)

        finally:
            return img, torch.from_numpy(labels)
            

    def __len__(self):
        return len(self.img_path)

def train(train_loader, model, criterion, optimizer):
    model.train()
    train_loss = 0.0
    for input, target in tqdm.tqdm(train_loader):
        input = input.to(device)
        target = target.to(device)

        # compute output
        output = model(input)
        loss = criterion(output, target)

        # compute gradient and do SGD step
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        train_loss += loss.item()

    return train_loss/len(train_loader)


def validate(val_loader, model, criterion):
    model.eval()
    val_loss = 0.0

    val_pred = []
    val_label = []
    with torch.no_grad():
        for input, target in tqdm.tqdm(val_loader):
            input = input.to(device)
            target = target.to(device)
            output = model(input)
            val_loss += criterion(output, target)

            output = torch.sigmoid(output)
            val_pred.append(output.data.cpu().numpy())
            val_label.append(target.data.cpu().numpy())
    return val_loss / len(val_loader.dataset), val_pred, val_label


def score(solution: pd.DataFrame, submission: pd.DataFrame, row_id_column_name: str) -> float:

    del solution[row_id_column_name]
    del submission[row_id_column_name]

    # Calculate the label group log losses
    binary_targets = ['bowel', 'extravasation']
    triple_level_targets = ['kidney', 'liver', 'spleen']
    all_target_categories = binary_targets + triple_level_targets

    label_group_losses = []
    for category in all_target_categories:
        if category in binary_targets:
            col_group = [f'{category}_healthy', f'{category}_injury']
        else:
            col_group = [f'{category}_healthy',f'{category}_low', f'{category}_high']

        solution = normalize_probabilities_to_one(solution, col_group)

        for col in col_group:
            if col not in submission.columns:
                raise ParticipantVisibleError(
                    f'Missing submission column {col}')
        submission = normalize_probabilities_to_one(submission, col_group)

        label_group_losses.append(
            sklearn.metrics.log_loss(
                y_true=solution[col_group].values,
                y_pred=submission[col_group].values,
                sample_weight=solution[f'{category}_weight'].values
            )
        )
    return np.mean(label_group_losses)


DATA_PATH = '/home/fd_chen/kaggle/rasac/dataset'  # TODO

train_csv = pd.read_csv(f'{DATA_PATH}/train.csv')
train_csv = train_csv.set_index('patient_id')

train_series_meta = pd.read_csv(f'{DATA_PATH}/train_series_meta.csv')
test_series_meta = pd.read_csv(f'{DATA_PATH}/test_series_meta.csv')

image_level_labels = pd.read_csv(f'{DATA_PATH}/image_level_labels.csv')

train_dicom_tags = pd.read_parquet(f'{DATA_PATH}/train_dicom_tags.parquet')
test_dicom_tags = pd.read_parquet(f'{DATA_PATH}/test_dicom_tags.parquet')

train_512_pngs = glob.glob(os.path.join(DATA_PATH,'train_images/*/*/*.png'))
np.random.shuffle(train_512_pngs)

torch.manual_seed(0)
torch.backends.cudnn.deterministic = False
torch.backends.cudnn.benchmark = True
device = torch.device("cuda")


# Define the transform
transform = transforms.Compose([
    transforms.Resize((512, 512)),  # Resize the image to 224x224 pixels
    transforms.RandomVerticalFlip(0.5),  # Resize the image to 224x224 pixels
    transforms.RandomHorizontalFlip(0.5),  # Resize the image to 224x224 pixels
    transforms.ToTensor()  # Convert the image to a PyTorch tensor
])


pids = list(train_csv.index)

train_loader = torch.utils.data.DataLoader(
    RSNADataset([x for x in train_512_pngs if int(
        x.split('/')[-3]) in pids[:-200]], transform),
    batch_size=256, shuffle=True, num_workers=28, pin_memory=False
)

val_loader = torch.utils.data.DataLoader(
    RSNADataset([x for x in train_512_pngs if int(
        x.split('/')[-3]) in pids[-200:]], transform),
    batch_size=256, shuffle=True, num_workers=28, pin_memory=False
)

device_id = [0, 1, 2]
model = CNNModel().to(device)
model = torch.nn.parallel.DataParallel(model, device_ids=device_id)

pos_weight = torch.Tensor([1, 2, 1, 6, 1, 2, 4, 1, 2, 4, 1, 2, 4]).to(device)
criterion = nn.BCEWithLogitsLoss(pos_weight=pos_weight)
LR=1e-4
optimizer = torch.optim.SGD(model.parameters(),lr=LR)

summaryWriter = SummaryWriter()

best_loss = 100
train_losses = []
val_losses = []
epochs = 200
LRS=[]

for epoch in tqdm.tqdm(list(range(epochs))):
    #学习率衰减 手动调整
    if epoch>100 and epoch%2==0:
        for p in optimizer.param_groups:
                p['lr'] *= 0.9
    LRS.append(optimizer.state_dict()['param_groups'][0]['lr'])


    train_loss = train(train_loader, model, criterion, optimizer)
    val_loss, val_pred, val_label = validate(val_loader, model, criterion)

    # metrics
    val_pred = np.vstack(val_pred)
    val_label = np.vstack(val_label)

    val_pred = pd.DataFrame(val_pred)
    
    val_pred.columns = ['bowel_healthy', 'bowel_injury', 'extravasation_healthy',
                        'extravasation_injury', 'kidney_healthy', 'kidney_low', 'kidney_high',
                        'liver_healthy', 'liver_low', 'liver_high', 'spleen_healthy',
                        'spleen_low', 'spleen_high']
    val_pred['patient_id'] = range(len(val_pred))

    val_label = pd.DataFrame(val_label)
    val_label.columns = ['bowel_healthy', 'bowel_injury', 'extravasation_healthy',
                         'extravasation_injury', 'kidney_healthy', 'kidney_low', 'kidney_high',
                         'liver_healthy', 'liver_low', 'liver_high', 'spleen_healthy',
                         'spleen_low', 'spleen_high']
    val_label['patient_id'] = range(len(val_pred))

    # metrics weight
    val_label['bowel_weight'] = val_label['bowel_healthy'].map({1: 1}).fillna(2)
    val_label['extravasation_weight'] = val_label['extravasation_healthy'].map({1: 1}).fillna(6)

    kidney_label = val_label[['kidney_healthy','kidney_low', 'kidney_high']].values.argmax(1)
    kidney_label = pd.Series(kidney_label)
    val_label['kidney_weight'] = kidney_label.map({0: 1, 1: 2, 2: 4})

    liver_label = val_label[['liver_healthy','liver_low', 'liver_high']].values.argmax(1)
    liver_label = pd.Series(liver_label)
    val_label['liver_weight'] = liver_label.map({0: 1, 1: 2, 2: 4})

    spleen_label = val_label[['spleen_healthy','spleen_low', 'spleen_high']].values.argmax(1)
    spleen_label = pd.Series(spleen_label)
    val_label['spleen_weight'] = spleen_label.map({0: 1, 1: 2, 2: 4})
    
    
    val_loss = score(val_label, val_pred, 'patient_id')
    train_losses.append(train_loss)
    val_losses.append(val_loss)

    print(train_loss, val_loss)
    if val_loss < best_loss:
        torch.save(model.state_dict(), f'rasac/checkpoints/model_{epoch}.pth')
        best_loss = val_loss

    summaryWriter.add_scalar("training_loss",train_loss, epoch )
    summaryWriter.add_scalar("val_loss",val_loss, epoch )
    summaryWriter.add_scalar("LRS",optimizer.state_dict()['param_groups'][0]['lr'], epoch )

scale_x = range(len(train_losses))
plt.plot(scale_x, train_losses, c='blue', marker='o',linestyle=':', label='train_losses')
plt.plot(scale_x, val_losses, c='green', marker='+',linestyle='--', label='val_losses')
plt.legend()
plt.savefig('figs/result.png')
