import matplotlib.pyplot as plt
import random,os,torch
import numpy as np
import copy
from sklearn.metrics import multilabel_confusion_matrix
from pathlib import Path
from os.path import splitext
import torch
from torch.fft import fft2, ifft2

def undersample(image, mask, norm='ortho'):
    assert image.shape == mask.shape ,(image.shape, mask.shape)

    k = fft2(image, norm=norm)
    k_und = mask * k
    x_und = ifft2(k_und, norm=norm)

    return x_und, k_und, k

def plot_img_and_mask(img, mask):
    classes = mask.shape[0] if len(mask.shape) > 2 else 1
    fig, ax = plt.subplots(1, classes + 1)
    ax[0].set_title('Input image')
    ax[0].imshow(img)
    if classes > 1:
        for i in range(classes):
            ax[i + 1].set_title(f'Output mask (class {i + 1})')
            ax[i + 1].imshow(mask[i, :, :])
    else:
        ax[1].set_title(f'Output mask')
        ax[1].imshow(mask)
    plt.xticks([]), plt.yticks([])
    plt.show()

def seed_everything(seed):
    random.seed(seed)
    os.environ['PYTHONHASHSEED'] = str(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)
    torch.backends.cudnn.deterministic = True

def specificityCalc(Predictions, Labels):
    MCM = multilabel_confusion_matrix(Labels, Predictions,
                                      sample_weight=None,
                                      labels=None, samplewise=None)
    tn_sum = MCM[:, 0, 0]
    fp_sum = MCM[:, 0, 1]

    tp_sum = MCM[:, 1, 1]
    fn_sum = MCM[:, 1, 0]

    Condition_negative = tn_sum + fp_sum + 1e-6

    Specificity = tn_sum / Condition_negative
    macro_specificity = np.average(Specificity, weights=None)

    # micro_specificity = np.sum(tn_sum) / np.sum(tn_sum+fp_sum)

    return macro_specificity


def kfolder(num_folder=10,image_dir='data/imgs_3c',seed=2022,need_val=False, shuffle = False):
    # generate image path list
    labels = ["HCM","HTN","NOR"]
    image_path_dict = {}
    image_path = Path(image_dir)
    for label_dir_name in labels:
        image_path_list = list(image_path.glob(label_dir_name + '*'))
        image_path_list = [splitext(os.path.split(str(path))[-1])[0] for path in image_path_list]
        image_path_list.sort()
        image_path_dict [label_dir_name]= image_path_list
        # print (len(image_path_list))

    # shuffle images
    if shuffle:
        for label in image_path_dict.keys():
            image_path_list = copy.deepcopy(image_path_dict[label])
            random.seed(seed)
            random.shuffle(image_path_list)
            image_path_dict[label] = image_path_list
            # print (image_path_dict[label])
    
    # generate train and test dataset  by folders
    train_image_path_lists =[]
    val_image_path_lists =[]
    test_image_path_lists =[]

    for floder_index in range(num_folder):
        
        train_image_path_list =[]
        test_image_path_list =[]
        

        for split_index in range (num_folder):


            for label in image_path_dict.keys():
                image_path_list = image_path_dict[label]

                num_perfolder = len (image_path_list)//num_folder
                index_add_one = len(image_path_list)%num_folder
                
                
                num_list = [num_perfolder+1 if num<index_add_one else num_perfolder for num in range(10)]
                
                start_index = int(np.sum(num_list[:split_index]))
                end_index = int(np.sum(num_list[:split_index+1]))
                # print('index',start_index,end_index,len (image_path_list),index_add_one)
                split_list = image_path_list [start_index:end_index]
                
                # add sample path to train and test list
                # the times of adding is the same as the argument_ratio
                if split_index == floder_index:
                    test_image_path_list += split_list
                else:
                    train_image_path_list += split_list
        if not need_val:
            train_image_path_lists.append(copy.deepcopy(train_image_path_list))
            test_image_path_lists.append(copy.deepcopy(test_image_path_list))
        else:
            len_train = len(train_image_path_list)
            random.seed(seed)
            random.shuffle(train_image_path_list)
            train_image_path_lists.append(copy.deepcopy(train_image_path_list[0:len_train-len_train//4]))
            val_image_path_lists.append(copy.deepcopy(train_image_path_list[len_train-len_train//4+1:len_train-1]))
            test_image_path_lists.append(copy.deepcopy(test_image_path_list))

        # print (len(train_image_path_list),len(test_image_path_list))
        # print (test_image_path_list)
    if not need_val:
        return train_image_path_lists,test_image_path_lists
    else:
        return train_image_path_lists,val_image_path_lists,test_image_path_lists

# print(kfolder())

def split_dataset(image_dir='data/t1',percentage = 0.8):

    image_path = Path(image_dir)
    image_path_list = list(image_path.glob('*'))
    image_path_list = [splitext(os.path.split(str(path))[-1])[0] for path in image_path_list]
    # print (image_path_list)

    image_path_list.sort()

    image_num = len(image_path_list)
    train_image_path_list = []
    test_image_path_list = []

    count = 0
    for image_path in image_path_list:
        
        # raw t1 + masked t2 -> raw t2

        if count < image_num*percentage:
            train_image_path_list .append(image_path)
        else:
            test_image_path_list .append(image_path)
        count += 1


    return train_image_path_list,test_image_path_list

# print (split_dataset())