import os
from itertools import tee
import numpy as np
import sklearn.metrics as skl_metrics
from dataset import map, repeat, concatenate
from processor2 import random_crop, middle_crop, vertical_rotation_and_flip
from easy_io import read_pkl_file
from utils import groupby, read_from_h5_file, normalization, append_axis, astype
# import global_config


VMIN = -1024
VMAX = 800


def get_candidates(candidate_file):
    # return read_pkl_file(os.path.join(global_config.data_folder, candidate_file))
    return read_pkl_file(candidate_file)


def divide_candidates(candidates, valid_folds):
    divisions = groupby(candidates, lambda c: c['fold'] in valid_folds)
    train_candidates = divisions[False]
    valid_candidates = divisions[True]
    return train_candidates, valid_candidates


def label_candidates(candidates):
    divisions = groupby(candidates, lambda c: c['polarity'])
    pos_candidates = divisions[1]
    mid_candidates = divisions[0]
    neg_candidates = divisions[-1]
    return pos_candidates, mid_candidates, neg_candidates


def candidates_to_dataset(candidates, candidate_vol_file):
    # reader = read_from_h5_file_with_cache(os.path.join(global_config.data_folder, candidate_vol_file))
    reader = read_from_h5_file(candidate_vol_file, with_cache=False)
    candidate_paths = [c['path'] for c in candidates]
    return map(reader, candidate_paths)


def balance_datasets(dataset1, dataset2):
    if len(dataset1) > len(dataset2):
        factor = round(len(dataset1) / len(dataset2))
        return dataset1, repeat(dataset2, factor)
    else:
        factor = round(len(dataset2) / len(dataset1))
        return repeat(dataset1, factor), dataset2


def preprocessing(dataset, **kwargs):
    dataset = map(normalization(vmin=kwargs['vmin'], vmax=kwargs['vmax']), dataset)
    dataset = map(append_axis(), dataset)
    dataset = map(astype('float32'), dataset)
    return dataset


def dataset_to_generator(dataset, *more, shuffle, seed=None):
    nb_samples = len(dataset)
    assert all(len(d) == nb_samples for d in more)
    datasets = (dataset, ) + more

    def _generate_index():
        if shuffle:
            rng = np.random.RandomState(seed)
        while True:
            if shuffle:
                indices = rng.permutation(nb_samples)
            else:
                indices = np.arange(nb_samples)
            for i in indices:
                yield i

    def _generate_data(d, g):
        for i in g:
            yield d[i]

    if len(datasets) > 1:
        generators = tee(_generate_index(), len(datasets))
        return tuple(_generate_data(d, g) for d, g in zip(datasets, generators))
    else:
        return (dataset[i] for i in _generate_index())


def train_postprocessing(train_generator, **kwargs):
    train_generator = middle_crop(train_generator, crop_shape=np.asarray(kwargs['crop_shape']) + np.asarray(kwargs['shift_range']))
    train_generator = random_crop(train_generator, crop_shape=kwargs['crop_shape'], seed=kwargs['seed'])
    train_generator = vertical_rotation_and_flip(train_generator, ndim=kwargs['ndim'], seed=kwargs['seed'])
    return train_generator


def valid_postprocessing(valid_generator, **kwargs):
    valid_generator = middle_crop(valid_generator, crop_shape=kwargs['crop_shape'])
    return valid_generator


def batched_generator(generator, batchsize, nb_samples=None):
    if nb_samples is not None:
        nb_batches, nb_left_samples = divmod(nb_samples, batchsize)
        while True:
            for _ in range(nb_batches):
                yield np.asarray(tuple(next(generator) for _ in range(batchsize)))
            yield np.asarray(tuple(next(generator) for _ in range(nb_left_samples)))
    else:
        while True:
            yield np.asarray(tuple(next(generator) for _ in range(batchsize)))


def get_info(info_file, folds):
    # nb_total_scans_dict, nb_total_labels_dict, nb_total_maligns_dict = read_pkl_file(os.path.join(global_config.data_folder, info_file))
    nb_total_scans_dict, nb_total_labels_dict, nb_total_maligns_dict = read_pkl_file(info_file)
    nb_total_scans = sum(nb_total_scans_dict[f] for f in folds)
    nb_total_labels = sum(nb_total_labels_dict[f] for f in folds)
    nb_total_maligns = sum(nb_total_maligns_dict[f] for f in folds)
    return nb_total_scans, nb_total_labels, nb_total_maligns


def get_statistical_function_and_logging_str(valid_candidates, nb_total_scans, nb_total_labels, nb_total_maligns):
    candidate_polarities = np.asarray([c['polarity'] for c in valid_candidates], 'int')
    candidate_noduleids = np.asarray([c['noduleid'] for c in valid_candidates], 'int')
    candidate_maligns = np.asarray([c['malign'] > 3.0 if c['source'] == 'lidc' else c['malign'] > 6.0 for c in valid_candidates]) & (candidate_polarities == 1)
    assert len(candidate_polarities) == len(candidate_noduleids)
    assert np.all((candidate_polarities == -1) == (candidate_noduleids == -1))
    assert np.all((candidate_polarities >= 0) == (candidate_noduleids >= 0))
    eps = 10e-8

    def helper(probs):
        probs = np.asarray(probs)
        assert probs.shape == (len(candidate_polarities), 2)

        normed_probs = probs / np.sum(probs, axis=-1, keepdims=True)
        clipped_probs = np.clip(normed_probs, eps, 1 - eps)
        log_probs = np.log(clipped_probs)
        losses = -1.0 * log_probs
        pos_loss = np.mean(losses[candidate_polarities == 1][:, 1])
        neg_loss = np.mean(losses[candidate_polarities == -1][:, 0])

        preds = np.argmax(probs, axis=-1)
        pos_accuracy = np.mean(preds[candidate_polarities == 1] == 1)
        neg_accuracy = np.mean(preds[candidate_polarities == -1] == 0)

        confidence = probs[:, 1]
        sorting_idx = np.argsort(confidence)[::-1]
        sorted_confidence = confidence[sorting_idx]
        sorted_candidate_noduleids = candidate_noduleids[sorting_idx]
        sorted_candidate_polarities = candidate_polarities[sorting_idx]
        sorted_candidate_maligns = candidate_maligns[sorting_idx]

        neg_confidence = sorted_confidence[sorted_candidate_polarities == -1]
        _, unique_idx = np.unique(sorted_candidate_noduleids[sorted_candidate_polarities == 1], return_index=True)
        pos_confidence = sorted_confidence[sorted_candidate_polarities == 1][unique_idx]
        _, malign_unique_idx = np.unique(sorted_candidate_noduleids[sorted_candidate_maligns], return_index=True)
        malign_confidence = sorted_confidence[sorted_candidate_maligns][malign_unique_idx]

        fpr, tpr, _ = skl_metrics.roc_curve(
            np.concatenate([np.zeros((len(neg_confidence),), 'float'), np.ones((len(pos_confidence),), 'float')], axis=0),
            np.concatenate([neg_confidence, pos_confidence], axis=0),
        )

        if np.any(np.isnan(fpr)):
            print('ERROR: zero false positives')
            return np.zeros((7,), 'float')
        fpr_multiplier = np.sum(candidate_polarities == -1) / nb_total_scans
        tpr_multiplier = len(unique_idx) / nb_total_labels
        fps = fpr * fpr_multiplier
        sens = tpr * tpr_multiplier
        recalls = np.interp([0.125, 0.25, 0.5, 1, 2, 4, 8], fps, sens)

        fpr, tpr, _ = skl_metrics.roc_curve(
            np.concatenate([np.zeros((len(neg_confidence),), 'float'), np.ones((len(malign_confidence),), 'float')],
                           axis=0),
            np.concatenate([neg_confidence, malign_confidence], axis=0),
        )

        if np.any(np.isnan(fpr)):
            print('ERROR: zero false positives')
            return np.zeros((7,), 'float')
        fpr_multiplier = np.sum(candidate_polarities == -1) / nb_total_scans
        tpr_multiplier = len(malign_unique_idx) / nb_total_maligns
        fps = fpr * fpr_multiplier
        sens = tpr * tpr_multiplier
        malign_recalls = np.interp([0.125, 0.25, 0.5, 1, 2, 4, 8], fps, sens)

        return dict(
            pos_loss=pos_loss,
            neg_loss=neg_loss,
            pos_accuracy=pos_accuracy,
            neg_accuracy=neg_accuracy,
            recalls=recalls,
            malign_recalls=malign_recalls,
        )

    return helper, '{now:%H:%M:%S} epoch {epoch:04d} time {time:05d} loss {loss:.6f} acc {acc:.4f} ' \
                   'val_pos_loss {pos_loss:.6f} val_neg_loss {neg_loss:.6f} ' \
                   'val_pos_acc {pos_accuracy:.4f} val_neg_acc {neg_accuracy:.4f} ' \
                   'val_recalls {recalls[0]:.4f} {recalls[1]:.4f} {recalls[2]:.4f} {recalls[3]:.4f} ' \
                   '{recalls[4]:.4f} {recalls[5]:.4f} {recalls[6]:.4f} ' \
                   'val_malign_recalls {malign_recalls[0]:.4f} {malign_recalls[1]:.4f} {malign_recalls[2]:.4f} ' \
                   '{malign_recalls[3]:.4f} {malign_recalls[4]:.4f} {malign_recalls[5]:.4f} {malign_recalls[6]:.4f}'


def get_config(candidate_file, candidate_vol_file, batchsize, crop_shape, logger=None):
    input_shape = tuple(crop_shape) + (1,)

    valid_candidates = get_candidates(candidate_file)
    valid_dataset = candidates_to_dataset(valid_candidates, candidate_vol_file)
    valid_dataset = preprocessing(valid_dataset, vmin=VMIN, vmax=VMAX)
    nb_valid_samples = len(valid_dataset)

    valid_generator = dataset_to_generator(valid_dataset, shuffle=False)
    valid_generator = valid_postprocessing(valid_generator, crop_shape=input_shape)
    valid_generator = batched_generator(valid_generator, batchsize, nb_valid_samples)

    logging_tuple = (
        ('nb_valid_candidates', len(valid_candidates)),
        ('nb_valid_samples', nb_valid_samples),
        ('input_shape', input_shape),
    )
    if logger:
        print_function = logger.info
    else:
        print_function = print
    print_function('')
    print_function('*' * 20)
    for k, v in logging_tuple:
        print_function('{k}: {v}'.format(k=k, v=v))
    print_function('*' * 20)
    print_function('')

    return dict(
        valid_generator=valid_generator,
        nb_valid_samples=nb_valid_samples,
        input_shape=input_shape,
    )
