"""
This will perform K-means clustering on the training data

Good reference for clustering
https://github.com/facebookresearch/faiss/wiki/FAQ#questions-about-training
"""

from __future__ import print_function

import argparse

import numpy as np
import pandas as pd
import torch
from torch.utils.data import DataLoader

# from src.utils.DEC import dec_cluster

from dataset_modules.dataset_generic import Generic_MIL_Dataset
# from file_utils import save_pkl
from utils.file_utils import save_pkl
from utils.proto_utils import cluster

import os

from utils.utils import get_split_loader


def main(args, dataset):
    # create results directory if necessary
    if not os.path.isdir(args.results_dir):
        os.mkdir(args.results_dir)

    if args.k_start == -1:
        start = 0
    else:
        start = args.k_start
    if args.k_end == -1:
        end = args.k
    else:
        end = args.k_end

    folds = np.arange(start, end)
    for i in folds:
        seed_torch(args.seed)
        train_dataset, val_dataset, test_dataset = dataset.return_splits(from_id=False,
                                                                         csv_path='{}/splits_{}.csv'.format(
                                                                             args.split_dir, i))
        print("Training on {} samples".format(len(train_dataset)))
        train_loader = get_split_loader(train_dataset, training=True, testing=args.testing, weighted=args.weighted_sample)

        _, weights = cluster(train_loader,
                             i,
                             n_proto=args.n_proto,
                             n_iter=args.n_iter,
                             n_init=args.n_init,
                             feature_dim=args.in_dim,
                             mode=args.mode,
                             n_proto_patches=args.n_proto_patches,
                             use_cuda=True if torch.cuda.is_available() else False)
        fold_result_dir = os.path.join(args.results_dir, f'{i}')
        os.makedirs(fold_result_dir, exist_ok=True)

        filename = os.path.join(fold_result_dir,
                                f"prototypes_{args.mode}_num_{args.n_proto_patches}.pkl")
        save_pkl(filename, {'prototypes': weights})


# Generic training settings
parser = argparse.ArgumentParser(description='Configurations for WSI Training')
parser.add_argument('--data_root_dir', type=str, default=None,
                    help='manually specify the data source')
parser.add_argument('--k', type=int, default=10, help='number of folds (default: 10)')
parser.add_argument('--k_start', type=int, default=-1, help='start fold (default: -1, last fold)')
parser.add_argument('--k_end', type=int, default=-1, help='end fold (default: -1, first fold)')
parser.add_argument('--seed', type=int, default=1,
                    help='random seed for reproducible experiment (default: 1)')
# model / loss fn args ###
parser.add_argument('--n_proto', type=int, help='Number of prototypes')
parser.add_argument('--n_proto_patches', type=int, default=10000,
                    help='Number of patches per prototype to use. Total patches = n_proto * n_proto_patches')
parser.add_argument('--n_init', type=int, default=5,
                    help='Number of different KMeans initialization (for FAISS)')
parser.add_argument('--n_iter', type=int, default=50,
                    help='Number of iterations for Kmeans clustering')
parser.add_argument('--in_dim', type=int)
parser.add_argument('--mode', type=str, choices=['kmeans', 'faiss'], default='kmeans')
parser.add_argument('--task', type=str, choices=['MSI_PAIP','MSI_TCGA','MSI_HCH','MSI_EXT_HCH','MSI_EXT_PAIP','camelyon','camelyon16','TCGA_NSCLC'])

# dataset / split args ###
# parser.add_argument('--split_dir', type=str, default=None,
#                     help='manually specify the set of splits to use')
parser.add_argument('--exp_code', type=str, help='experiment code for saving results')
parser.add_argument('--split_dir', type=str, default=None,
                    help='manually specify the set of splits to use, '
                    +'instead of infering from the task and label_frac argument (default: None)')
parser.add_argument('--results_dir', default='./results', help='results directory (default: ./results)')
parser.add_argument('--num_workers', type=int, default=8)
parser.add_argument('--testing', action='store_true', default=False, help='debugging tool')
parser.add_argument('--weighted_sample', action='store_true', default=False, help='enable weighted sampling')

args = parser.parse_args()
device=torch.device("cuda" if torch.cuda.is_available() else "cpu")

def seed_torch(seed=7):
    import random
    random.seed(seed)
    os.environ['PYTHONHASHSEED'] = str(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    if device.type == 'cuda':
        torch.cuda.manual_seed(seed)
        torch.cuda.manual_seed_all(seed) # if you are using multi-GPU.
    torch.backends.cudnn.benchmark = False
    torch.backends.cudnn.deterministic = True

seed_torch(args.seed)
def load_data(args):
    settings = {'num_splits': args.k,
                'k_start': args.k_start,
                'k_end': args.k_end,
                'task': args.task,
                'results_dir': args.results_dir,
                'experiment': args.exp_code,
                'seed': args.seed,
                'n_proto':args.n_proto,
                'n_proto_patches':args.n_proto_patches,
                'mode': args.mode,
                }

    print('\nLoad Dataset')

    if args.task == 'task_1_tumor_vs_normal':
        args.n_classes = 2
        dataset = Generic_MIL_Dataset(csv_path='dataset_csv/tumor_vs_normal_dummy_clean.csv',
                                      data_dir=os.path.join(args.data_root_dir, 'tumor_vs_normal_resnet_features'),
                                      shuffle=False,
                                      seed=args.seed,
                                      print_info=True,
                                      label_dict={'normal_tissue': 0, 'tumor_tissue': 1},
                                      patient_strat=False,
                                      ignore=[])

    elif args.task == 'task_2_tumor_subtyping':
        args.n_classes = 3
        dataset = Generic_MIL_Dataset(csv_path='dataset_csv/tumor_subtyping_dummy_clean.csv',
                                      data_dir=os.path.join(args.data_root_dir, 'tumor_subtyping_resnet_features'),
                                      shuffle=False,
                                      seed=args.seed,
                                      print_info=True,
                                      label_dict={'subtype_1': 0, 'subtype_2': 1, 'subtype_3': 2},
                                      patient_strat=False,
                                      ignore=[])

        if args.model_type in ['clam_sb', 'clam_mb']:
            assert args.subtyping
    elif args.task == 'MSI_PAIP':
        args.n_classes = 2
        dataset = Generic_MIL_Dataset(csv_path='/mnt/sda1/dataset_csv/MSI_classification_PAIP.csv',
                                      data_dir=os.path.join(args.data_root_dir, 'uni/extracted_mag20x_patch256_fp'),
                                      shuffle=False,
                                      seed=args.seed,
                                      print_info=False,
                                      label_dict={'MSS': 0, 'MSI-H': 1},
                                      patient_strat=False,
                                      ignore=[])
    elif args.task == 'MSI_TCGA':
        args.n_classes = 2
        dataset = Generic_MIL_Dataset(csv_path='/mnt/sda1/dataset_csv/MSI_classification_TCGA_S1e9+.csv',
                                      data_dir=os.path.join(args.data_root_dir, 'uni/extracted_mag20x_patch256_fp'),
                                      shuffle=False,
                                      seed=args.seed,
                                      print_info=False,
                                      label_dict={'MSS': 0, 'MSI-H': 1},
                                      patient_strat=False,
                                      ignore=[])
    elif args.task == 'MSI_HCH':
        args.n_classes = 3
        dataset = Generic_MIL_Dataset(csv_path='/mnt/sda1/yxy_project/CLAM2024/dataset_csv/MSI_classification_HCH20221202.csv',
                                      data_dir=os.path.join(args.data_root_dir, 'uni/extracted_mag20x_patch256_fp'),
                                      shuffle=False,
                                      seed=args.seed,
                                      print_info=False,
                                      label_dict={'MSS': 0, 'MSI-H': 1, 'Normal': 2},
                                      patient_strat=False,
                                      ignore=[])
    elif args.task == 'camelyon':
        args.n_classes = 2
        dataset = Generic_MIL_Dataset(csv_path='/mnt/sda1/yxy_project/CLAM2024/dataset_csv/filtered_camelyon_data.csv',
                                      data_dir=os.path.join(args.data_root_dir, 'extracted_mag20x_patch256_fp'),
                                      shuffle=False,
                                      seed=args.seed,
                                      print_info=False,
                                      label_dict={'normal_tissue': 0, 'tumor_tissue': 1},
                                      patient_strat=False,
                                      ignore=[])
    elif args.task == 'camelyon16':
        args.n_classes = 2
        dataset = Generic_MIL_Dataset(csv_path='/mnt/sda1/yxy_project/CLAM2024/dataset_csv/camelyon16_add.csv',
                                      data_dir=os.path.join(args.data_root_dir, 'features_uni'),
                                      shuffle=False,
                                      seed=args.seed,
                                      print_info=False,
                                      label_dict={'normal': 0, 'tumor': 1},
                                      patient_strat=False,
                                      ignore=[])
    elif args.task == 'TCGA_NSCLC':
        args.n_classes = 2
        dataset = Generic_MIL_Dataset(csv_path='/mnt/sda2/WSI/TCGA-NSCLC/TCGA-NSCLC.csv',
                                       data_dir=os.path.join(args.data_root_dir, 'uni_features'),
                                       shuffle=False,
                                       seed=args.seed,
                                       print_info=False,
                                       label_dict={'LUAD': 0, 'LUSC': 1},
                                       patient_strat=False,
                                       ignore=[])
    else:
        raise NotImplementedError
    if not os.path.isdir(args.results_dir):
        os.makedirs(args.results_dir, exist_ok=True)

    args.results_dir = os.path.join(args.results_dir, str(args.exp_code) +'_c{}'.format(args.n_proto))
    if not os.path.isdir(args.results_dir):
        os.makedirs(args.results_dir, exist_ok=True)


    if args.split_dir is None:
        args.split_dir = os.path.join('splits', args.task+'_c{}'.format(int(args.n_proto)))
    else:
        args.split_dir = os.path.join('splits', args.split_dir)

    print('split_dir: ', args.split_dir)
    assert os.path.isdir(args.split_dir)

    settings.update({'split_dir': args.split_dir})
    print("################# Settings ###################")
    for key, val in settings.items():
        print("{}:  {}".format(key, val))
    return dataset, settings


if __name__ == "__main__":
    dataset, settings = load_data(args)
    results = main(args, dataset)
    print("finished!")
    print("end script")