import torch
import torchvision
import numpy as np
import random
from torch import nn
from tqdm import trange, tqdm
from warmup_scheduler import GradualWarmupScheduler
import argparse
from sklearn.metrics import f1_score,average_precision_score,recall_score
import wandb
import copy

from preprocess.kfolder import kfolder_resnet_single,kfolder_resnet_double
from preprocess.traindataset import TrainDataset
from preprocess.testdataset import TestDataset
from preprocess import traindataset_double,traindataset_3d,testdataset_double,testdataset_3d
from model.get_model import get_single_model,get_double_model,get_3d_model
import itertools
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix, accuracy_score, recall_score, precision_score, f1_score
from pylab import mpl
import matplotlib 

from sklearn.metrics import roc_auc_score,accuracy_score,multilabel_confusion_matrix
import os

def seed_everything(seed):
    random.seed(seed)
    os.environ['PYTHONHASHSEED'] = str(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)
    torch.backends.cudnn.deterministic = True


import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix, accuracy_score, recall_score, precision_score, f1_score
import pandas as pd


def save_results_to_csv(true_labels, predicted_probabilities, file_path):
    """
    Save the true labels and predicted probabilities to a CSV file.

    Parameters:
    - true_labels: An array of the true labels.
    - predicted_probabilities: An array of predicted probabilities.
    - file_path: The path to the output CSV file.
    """
    # Assuming true_labels and predicted_probabilities are numpy arrays
    true_labels_df = pd.DataFrame(true_labels, columns=[f'TrueLabel_{i}' for i in range(true_labels.shape[1])])
    predicted_probabilities_df = pd.DataFrame(predicted_probabilities, columns=[f'PredictedProb_{i}' for i in range(predicted_probabilities.shape[1])])

    # Concatenate the true labels and predicted probabilities
    results_df = pd.concat([true_labels_df, predicted_probabilities_df], axis=1)

    # Save to CSV
    results_df.to_csv(file_path)




def evaluate_classification(labels, predictions,model_name):
    """
    Evaluate classification metrics and plot confusion matrix for multi-label classification.

    Parameters:
    labels (np.array): True labels.
    predictions (np.array): Predictions.
    """

    label_mapping = {
        '10000': 0,  # 空白
        '01000': 1,  # 无表达
        '00100': 2,  # 单细胞核表达
        '00010': 3,  # 单细胞膜表达
        '00001': 4,  # 单细胞浆表达
        '00110': 5,  # 细胞核细胞膜共表达
        '00101': 6,  # 细胞核细胞浆共表达
        '00011': 7,  # 细胞膜细胞浆共表达
    }

    def convert_prob_to_labels(probabilities, threshold):
        binary_labels = (probabilities >= threshold).astype(int)
        adjusted_labels = []

        for i, label in enumerate(binary_labels):
            label_str = ''.join(map(str, label))
            if label_str in label_mapping:
                adjusted_labels.append(label_mapping[label_str])
            else:
                # Use the index of the maximum probability before binarization
                max_index = np.argmax(probabilities[i])
                fallback_label_str = '0' * max_index + '1' + '0' * (4 - max_index)
                adjusted_labels.append(label_mapping[fallback_label_str])

        return np.array(adjusted_labels)



    def plot_confusion_matrix(cm, classes, title='Confusion matrix', cmap=plt.cm.Blues):
        """
        Plot the confusion matrix.
        """
        plt.figure(figsize=(10,9))
        plt.imshow(cm, interpolation='nearest', cmap=cmap)
        plt.title(title)
        plt.colorbar()
        tick_marks = np.arange(len(classes))
        plt.xticks(tick_marks, classes, rotation=45)
        plt.yticks(tick_marks, classes)

        thresh = cm.max() / 2.
        for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
            plt.text(j, i, f'{cm[i, j]:.0f}', horizontalalignment="center",
                     color="white" if cm[i, j] > thresh else "black")

        plt.tight_layout()
        plt.ylabel('真实值')
        plt.xlabel('预测值')
        plt.savefig(f"{model_name}.png")
        plt.close('all')

    # Convert labels and predictions to single integer format
    true_labels_single = convert_prob_to_labels(labels,THRESHOLD)
    # print (true_labels_single)
    preds_labels_single = convert_prob_to_labels(predictions,THRESHOLD)
    class_names = ['空白', '无表达', '细胞核', '细胞膜', 
                   '细胞浆', '细胞核+细胞膜', '细胞核+细胞浆', '细胞膜+细胞浆']
    # Compute confusion matrix
    cm = confusion_matrix(true_labels_single, preds_labels_single,labels= list(range(len(class_names))))

    plot_confusion_matrix(cm, class_names,model_name)

    # Compute metrics
    accuracy = accuracy_score(true_labels_single, preds_labels_single)
    recall = recall_score(true_labels_single, preds_labels_single, average='macro')
    precision = precision_score(true_labels_single, preds_labels_single, average='macro')
    f1 = f1_score(true_labels_single, preds_labels_single, average='macro')

    # Since specificity is not directly available, we calculate it manually
    tn = cm[0, 0]
    fp = cm[0, 1:].sum()
    fn = cm[1:, 0].sum()
    tp = cm[1:, 1:].sum()
    specificity = tn / (tn + fp) if (tn + fp) > 0 else 0


    return accuracy,recall,specificity,precision,f1




def CrossEntropyLoss_label_smooth(outputs, targets,
                                  num_classes=2, epsilon=0.1):
    N = targets.size(0)
    smoothed_labels = torch.full(size=(N, num_classes),
                                 fill_value=epsilon / (num_classes - 1))
    targets = targets.data.cpu()
    smoothed_labels.scatter_(dim=1, index=torch.unsqueeze(targets, dim=1),
                             value=1 - epsilon)
    # outputs = outputs.data.cpu()
    log_prob = nn.functional.log_softmax(outputs, dim=1).cpu()
    loss = - torch.sum(log_prob * smoothed_labels) / N
    return loss


if __name__ == '__main__':

    parser = argparse.ArgumentParser()
    parser.add_argument('--modelname_list', default = ['resnet50'],nargs='+')
    parser.add_argument('--random_seed', type=int, default=2022)
    parser.add_argument('--warmup',action="store_false")
    parser.add_argument('--LabelSmooth',action="store_true")
    parser.add_argument('--need_val',action="store_true")
    parser.add_argument('--schedulerType',type=str,default="cosine")
    parser.add_argument('--epoch_num',type=int, default=50)
    parser.add_argument('--lr',type=float, default=1e-2)
    parser.add_argument('--folder_num',type=int, default=1)
    parser.add_argument('--batchsize',type=int, default=4)
    parser.add_argument('--warmup_ratio',type=int, default=100)
    parser.add_argument('--model_type',type=str, default="single") # single ,double or 3d
    parser.add_argument('--double_model_ratio',type=float, default=0.7)
    parser.add_argument('--warmup_epoch',type=int, default=5)
    parser.add_argument('--loss_fuction',type=str, default="BCE") # CE or BCE
    parser.add_argument('--num_classes',type=int, default=5) 
    parser.add_argument('--multi_class',type=str, default="ovr") 
    parser.add_argument('--roc_average',type=str, default="macro") 

    args = parser.parse_args()

    model_type = args.model_type
    modelname_list = args.modelname_list
    loss_fuction = args.loss_fuction
    seed = args.random_seed
    warmup = args.warmup
    epoch_num = args.epoch_num
    ifLabelSmooth = args.LabelSmooth
    schedulerType = args.schedulerType
    num_folder = args.folder_num
    batch_size = args.batchsize
    warmup_ratio = args.warmup_ratio
    num_classes = args.num_classes
    multi_class = args.multi_class
    roc_average = args.roc_average
    need_val = args.need_val
    if warmup:
        lr = args.lr/warmup_ratio
        warmup_epoch = args.warmup_epoch 
    else:
        lr = args.lr
        warmup_ratio = 0
        warmup_epoch = 0
    
    seed_everything(seed)
    THRESHOLD = 0.5


    for modelname in modelname_list:
        # modelname_list = ['resnet18','resnet34','resnet50','resnext50_32x4d','resnext101_32x8d']
        
        device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

        # summary
        wandb.init(project=f"细胞染色质控多标签分类", entity="dogewandb")
        wandb.run.name = modelname
        wandb.config.update ( {
        "model_type":model_type,
        "modelname":modelname,
        "need_val" :need_val,
        "num_folder" : num_folder,
        "epochs": epoch_num,
        "batch_size": batch_size,
        "learning_rate": lr,
        "loss_fuction" : loss_fuction,
        "warmup" :warmup,
        "warmup_ratio" : warmup_ratio,
        "warmup_epoch" : warmup_epoch,
        "LabelSmmoth" :ifLabelSmooth,
        "scheduler" : schedulerType,
        "num_classes" : num_classes,
        "roc_multi_class" : multi_class,
        "roc_average" :roc_average
        })
        # kfolder

        if model_type == "single":
            # train_image_path_lists, test_image_path_lists = kfolder_resnet_single(num_folder=num_folder,image_dir='../data/dataset_npy_single',seed=seed)
            if need_val:
                train_image_path_lists,val_image_path_lists, test_image_path_lists = kfolder_resnet_single(num_folder=num_folder,image_dir='../1.数据集预处理/dataset',seed=seed,need_val=need_val,shuffle=True)
            else:
                # train_image_path_lists,test_image_path_lists =  kfolder_resnet_single(num_folder=num_folder,image_dir='../data/dataset_resnet_single',seed=seed,need_val=need_val)
                train_image_path_lists,test_image_path_lists =  kfolder_resnet_single(num_folder=num_folder,image_dir='../1.数据集预处理/dataset',seed=seed,need_val=need_val,shuffle=True)
            # train_image_path_lists, test_image_path_lists = kfolder_resnet_single(num_folder=num_folder,image_dir='../data/dataset_origin_without_roi',seed=seed)
        elif model_type == "double":
            wandb.config.update ( {"double_model_ratio" : args.double_model_ratio})
            train_image_path_lists, test_image_path_lists = kfolder_resnet_double(num_folder=num_folder,image_dir='../data/dataset_resnet_double',seed=seed,need_val=need_val)
        elif model_type == "3d":
            train_image_path_lists, test_image_path_lists = kfolder_resnet_double(num_folder=num_folder,image_dir='../data/dataset_resnet_double',seed=seed,need_val=need_val)
        elif model_type == "origin":
            if need_val:
                    train_image_path_lists,val_image_path_lists, test_image_path_lists = kfolder_resnet_single(num_folder=num_folder,image_dir='../data/dataset_origin_without_roi',seed=seed,need_val=need_val)
            else:
                train_image_path_lists,test_image_path_lists =  kfolder_resnet_single(num_folder=num_folder,image_dir='../data/dataset_origin_without_roi',seed=seed,need_val=need_val)



        val_AUC_all = 0
        val_preds_allfolder = []
        val_labels_allfolder = []
        val_preds_allfolder_wandb = []
        test_AUC_all = 0
        test_preds_allfolder = []
        test_labels_allfolder = []
        test_preds_allfolder_wandb = []

        for folder_index in range(num_folder):

            # model and dataset
            model = None
            if model_type =="single" or model_type == "origin":
                model = get_single_model(modelname,num_classes=num_classes)
                traindataset = TrainDataset(train_image_path_lists[folder_index])
                testdataset = TestDataset(test_image_path_lists[folder_index])
                if need_val:
                    valdataset = TestDataset(val_image_path_lists[folder_index])
            elif model_type == "double":
                model = get_double_model(modelname,args.double_model_ratio,num_classes=num_classes)
                traindataset = traindataset_double.TrainDataset(train_image_path_lists[folder_index])
                testdataset = testdataset_double.TestDataset(test_image_path_lists[folder_index])
            elif model_type == "3d":
                model = get_3d_model(modelname,num_classes=num_classes)
                traindataset = traindataset_3d.TrainDataset(train_image_path_lists[folder_index])
                testdataset = testdataset_3d.TestDataset(test_image_path_lists[folder_index])
            model = model.to(device)
            model.train(mode=True)

            # dataloader
            trainloader = torch.utils.data.DataLoader(traindataset, batch_size=batch_size, shuffle=True,num_workers=12)
            testloader = torch.utils.data.DataLoader(testdataset, batch_size=batch_size, shuffle=False,num_workers=12)
            if need_val:
                valloader = torch.utils.data.DataLoader(valdataset, batch_size=batch_size, shuffle=False,num_workers=12)

            # loss and optimizer
            if loss_fuction =='CE':
                criterion = nn.CrossEntropyLoss().to(device)
            elif loss_fuction =='BCE':
                criterion = nn.BCEWithLogitsLoss().to(device)
            optimizer = torch.optim.SGD(model.parameters(), lr=lr, momentum=0.9)
            if schedulerType == 'step':
                scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.8)
            elif schedulerType == 'cosine':
                scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, epoch_num-warmup_epoch, eta_min=0, last_epoch=-1)
            if warmup:
                scheduler = GradualWarmupScheduler(optimizer, multiplier=warmup_ratio, total_epoch=warmup_epoch, after_scheduler=scheduler)


            # epoches
            for epoch in trange(epoch_num):

                correct = 0
                total = 0
                correct_val = 0
                total_val = 0
                correct_test = 0
                total_test = 0

                # train 
                model.train()
                for i, data, in (enumerate(trainloader, 0)):
                    if model_type !='double'  :
                        img,label=data
                        # get the input
                        inputs = img.to(device)
                        labels = label.to(device).float()
                        # zeros the paramster gradients
                        optimizer.zero_grad()
                        # forward + backward + optimizer
                        outputs = model(inputs)
                    else:
                        img,mask,label=data
                        # get the input
                        img = img.to(device)
                        mask = mask.to(device)
                        labels = label.to(device).float()
                        # zeros the paramster gradients
                        optimizer.zero_grad()
                        # forward + backward + optimizer
                        outputs = model(img,mask)
                    
                    if ifLabelSmooth:
                        loss = CrossEntropyLoss_label_smooth(outputs, labels, num_classes=num_classes,epsilon=0.1)
                    elif loss_fuction=='BCE':
                        # criterion_labels = nn.functional.one_hot(labels,num_classes=num_classes).float()
                        loss = criterion(outputs, labels)
                    else:
                        loss = criterion(outputs, labels)
                    # backward loss 
                    loss.backward()  
                    optimizer.step()  

                    # print statistics
                    prediction = outputs > THRESHOLD
                
                    correct += (prediction == labels.byte()).all(1).sum().float()
                    # print (prediction ,labels.byte(),(prediction == labels.byte()).all(1).sum().float())
                    total += len(labels)
                scheduler.step()
                train_acc = (correct / total).cpu().detach().data.numpy()
                print('train_acc:',train_acc)
                # train log
                wandb.log({
                f"folder{folder_index}/LearningRate" :optimizer.param_groups[0]['lr'],
                f"folder{folder_index}/trainloss": loss,
                f"folder{folder_index}/trainAccuracy":train_acc},step=epoch+folder_index*epoch_num)
                
                # validation and test
                if (epoch+1)%1==0:

                    model.eval()
                    # test
                    with torch.no_grad():
                        prob_all = []
                        label_all = []
                        test_list = []
                        prob_all_wandb = []
                        for index, data in (enumerate(testloader)):
                            # get pred
                            if model_type !='double'  :
                                test_img,test_label = data
                                test_inputs = test_img.to(device)
                                test_labels = test_label.to(device).float()
                                test_outputs = model(test_inputs)
                            else:
                                test_img,test_mask,test_label = data
                                test_img = test_img.to(device)
                                test_mask = test_mask.to(device)
                                test_labels = test_label.to(device).float()
                                test_outputs = model(test_img,test_mask)
                            # save results
                            # if loss_fuction=='BCE':
                            #     test_labels = nn.functional.one_hot(test_labels,num_classes=2).float()
                            test_loss = criterion(test_outputs, test_labels)
                            # test_outputs = nn.functional.softmax(test_outputs, dim=1)
                            if num_classes >2:
                                prob_all.extend(test_outputs[:,].cpu().numpy())
                            else:
                                prob_all.extend(test_outputs[:,1].cpu().numpy())


                            prob_all_wandb.extend(test_outputs.cpu().numpy())
                            label_all.extend(test_label.cpu().numpy())
                            
                            # val_restult_np = (prediction_val == val_labels).cpu().detach().data.numpy()
                            # val_list+= np.argwhere(val_restult_np==False)[:,0].tolist()


                        acc , sen ,spec ,pre , f1=  evaluate_classification(np.array(label_all), np.array(prob_all_wandb),model_name=modelname)
                        save_results_to_csv(np.array(label_all), np.array(prob_all_wandb),f'{modelname}.csv')
                        # test log
                        wandb.log({
                        # f"folder{folder_index}/testloss": test_loss,
                        # f"folder{folder_index}/testAUC":test_auc,
                        f"folder{folder_index}/testAccuracy":acc,
                        },step=epoch+folder_index*epoch_num)

                        torch.save(model.state_dict(), f"./pth/{modelname}_{folder_index}.pth")
                        if (epoch+1) ==epoch_num :
                            # wrong_list = [test_image_path_lists[folder_index][i] for i in val_list]
                            # print(f'{test_acc}',f'{test_auc}',f'{correct_val}',f'{total_val}',f'{folder_index} {modelname} {wrong_list}')

                            # 保存模型参数到路径"
                            torch.save(model.state_dict(), f"./pth/{modelname}_{folder_index}.pth")
                            test_preds_allfolder += prob_all
                            test_preds_allfolder_wandb += prob_all_wandb
                            test_labels_allfolder += label_all
                            # test_AUC_all += test_auc

                            # wandb.run.summary["average_test_AUC"] = test_AUC_all/(folder_index+1)

                            wandb.run.summary["test_accuracy"] = acc
                            # wandb.run.summary["AllData_test_AUC"] = 
                            wandb.run.summary["sensitivity"]=  sen
                            wandb.run.summary["specificity"] = spec 
                            wandb.run.summary["F1"] = f1



                            
                            # wandb_table_data =  np.concatenate((np.array(test_preds_allfolder_wandb),np.array(test_labels_allfolder )[:, np.newaxis]), axis=1)
                            # wandb_table = wandb.Table(data=wandb_table_data,columns=["空白","未染色","细胞核", "细胞膜","细胞浆","label"])
                            # wandb.log({
                            #     "results": wandb_table,
                            #     "roc_curve" : wandb.plot.roc_curve( test_labels_allfolder,test_preds_allfolder_wandb, labels=["空白","未染色","细胞核", "细胞膜","细胞浆",], classes_to_plot=None),#"空白","未染色","细胞核", "细胞膜","细胞浆",
                            #     "conf_mat" : wandb.plot.confusion_matrix(probs=np.array(test_preds_allfolder_wandb),y_true=np.array(test_labels_allfolder),  class_names=["空白","未染色","细胞核", "细胞膜","细胞浆",])
                            #     },step=epoch+folder_index*epoch_num)

