import functools
from typing import Union, List, Type, Callable, Dict
import numpy as np
import pandas as pd
import pyod.utils.example

from .utils import *
from src.visualize import *
from sklearn.model_selection import KFold
import pyod.models as pyod_models


class Model:
    """
    子类可以实现fit+predict，或者直接实现fit_predict。
    """

    def __init__(self, **kwargs):
        raise NotImplementedError

    def fit(self, X_train: np.ndarray, y_train: np.ndarray):
        raise NotImplementedError

    def predict(self, X_test: np.ndarray):
        raise NotImplementedError

    def fit_predict(self, X_train: np.ndarray, y_train: np.ndarray, X_test: np.ndarray, y_test=None):
        self.fit(X_train, y_train)
        return self.predict(X_test)

    @classmethod
    def validate(cls, data: 'ValidateData', vis=False, **kwargs):
        raise NotImplementedError


def preprocess(drop_EJ=False):
    if drop_EJ:
        train_features_ = train_features.drop(columns='EJ')
        test_features_ = test_features.drop(columns='EJ')
    else:
        train_features_ = train_features.copy()
        train_features_["EJ"] = train_features["EJ"].replace({"A": 0, "B": 1})
        test_features_ = test_features.copy()
        test_features_["EJ"] = test_features["EJ"].replace({"A": 0, "B": 1})

    train_features_ = StandardScaler().fit_transform(train_features_)
    test_features_ = StandardScaler().fit_transform(test_features_)
    return train_features_, test_features_


class ValidateData:
    """
    Class for fast & fair validation.
    """

    def __init__(self, random_seed: int = 42, test_size=0.3, label_name: str = 'Class', use_cv=True):
        self.use_cv = use_cv
        self.label_name = label_name

        if label_name == 'Class':
            y = train_main_target
            num_classes = 2
        elif label_name == 'All':
            y = list(train_targets_label.values()) + [train_main_target]
            num_classes = -1
        elif isinstance(label_name, list):
            y = [train_targets_label[lb] for lb in label_name] + [train_main_target]
            num_classes = [train_targets_num_classes[lb] for lb in label_name] + [2]
        else:
            y = train_targets_label[label_name]
            num_classes = train_targets_num_classes[label_name]

        train_features_, test_features_ = preprocess()
        if use_cv:
            self.X_train = train_features_
            self.y_train = y
            self.y_test = self.X_test = None
            return

        if label_name != 'All' and not isinstance(label_name, list):
            X_train, X_test, y_train, y_test = train_test_split(train_features_, y, test_size=test_size,
                                                                random_state=random_seed)
        else:
            X_train, X_test, *y_star = train_test_split(train_features_, *y, test_size=test_size,
                                                        random_state=random_seed)
            y_train = y_star[0::2]
            y_test = y_star[1::2]

        self.X_train = X_train
        self.X_test = X_test
        self.y_train = y_train
        self.y_test = y_test
        self.num_features = self.X_train.shape[1]
        self.num_classes = num_classes

    def do_validate(self, model: Model):
        """
        一键式验证模型。
        """
        assert not self.use_cv
        ypred = model.fit_predict(self.X_train, self.y_train, self.X_test, self.y_test)
        if self._many_labels:
            return evaluate(self.y_test[-1], ypred)
        return evaluate(self.y_test, ypred)

    @property
    def _many_labels(self):
        return self.label_name == 'All' or isinstance(self.label_name, list)

    def do_cross_validate(self, model: Model):
        assert self.use_cv
        res = []
        for train_idx, test_idx in KFold().split(self.X_train):
            X_train, X_test = self.X_train[train_idx], self.X_train[test_idx]

            if self._many_labels:
                y_test = [y[test_idx] for y in self.y_train]
                y_train = [y[train_idx] for y in self.y_train]
            else:
                y_train, y_test = self.y_train[train_idx], self.y_train[test_idx]

            ypred = model.fit_predict(X_train, y_train, X_test, y_test)

            if self._many_labels:
                out = evaluate(y_test[-1], ypred)
            else:
                out = evaluate(y_test, ypred)
            res.append(out)

        res = {key: np.mean([x[key] for x in res]) for key in res[0].keys()}
        print(f'Cross-validate: {res}')
        return res


class PyODModel(Model):

    def __init__(self):
        from pyod.models.knn import KNN
        from pyod.models.auto_encoder import AutoEncoder
        from pyod.models.ocsvm import OCSVM
        model_cls = AutoEncoder

        self.model = model_cls(contamination=get_outlier_ratio())

    def fit(self, X_train: np.ndarray, y_train: np.ndarray):
        self.model.fit(X_train, y_train)

    def predict(self, X_test: np.ndarray):
        res = self.model.predict_proba(X_test)
        # res = np.concatenate([1 - one_probas, one_probas], 1)
        return res

    @classmethod
    def validate(cls, data: 'ValidateData', vis=False, **kwargs):
        model = cls()
        out = data.do_validate(model)
        print(out)


def do_submit(model_type: Type[Model], all_labels=False, **kwargs):
    """
    一键式运行模型并生成提交文件。
    """
    if all_labels:
        y = list(train_targets_label.values()) + [train_main_target]
    else:
        y = train_main_target

    train_features_, test_features_ = preprocess()
    model = model_type(input_dim=train_features_.shape[1], num_classes=2, **kwargs)
    probas = model.fit_predict(train_features_, y, test_features_)
    make_submission(probas)


class MLP(Model):

    def __init__(self, input_dim: int, num_classes: int = 2):
        # 建立Keras的Sequential模型
        model = Sequential()

        # 添加两个全连接层作为隐藏层，每层有10个神经元，使用ReLU作为激活函数
        model.add(Dense(10, activation='relu', input_dim=input_dim))
        model.add(Dense(10, activation='relu'))

        # 添加输出层，使用softmax作为激活函数，对结果进行多分类预测
        model.add(Dense(num_classes, activation='softmax'))

        # 编译模型，使用categorical_crossentropy作为损失函数，优化器选择adam，评估指标选用准确率
        model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])

        self.model = model
        self.num_classes = num_classes

    def fit(self, X_train: np.ndarray, y_train: np.ndarray):
        y_train = to_categorical(y_train)
        validation_data = None
        model = self.model
        # 训练模型，使用验证集进行模型效果的评估，设置batch_size为16，迭代次数为50次
        history = model.fit(X_train, y_train, verbose=0,
                            batch_size=128, epochs=100, validation_data=validation_data)
        self.history = history

    def predict(self, X_test: np.ndarray):
        return self.model.predict(X_test).squeeze()

    @classmethod
    def validate(cls, label_name='Alpha', vis=False):
        data = ValidateData(label_name=label_name)
        model = MLP(input_dim=data.num_features, num_classes=data.num_classes)
        model.fit(data.X_train, data.y_train)
        history = model.history

        if vis:
            plt.figure(figsize=(10, 5))
            # 绘制损失函数和准确率曲线图
            plt.subplot(121)
            plt.plot(history.history['loss'], label='loss', color='red')
            plt.title('Loss')
            plt.legend()

            plt.subplot(122)
            plt.plot(history.history['accuracy'], label='acc', color='blue')
            plt.title(f'Accuracy-{data.label_name}')
            plt.legend()
            plt.show()

        # 在测试集上测试模型的效果
        score = evaluate(data.y_test, model.predict(data.X_test))
        print(score)
        return score


class TreeModel(Model):
    """
    树模型，包括随机森林（rf）和梯度提升树（gb）from sklearn
    """
    TREE_TYPE = dict(
        rf=RandomForestClassifier,
        gb=GradientBoostingClassifier,
        lr=LogisticRegression,
    )
    model: Union[RandomForestClassifier, GradientBoostingClassifier]

    def __init__(self, tree_type: str = 'gb', num_classes: int = 2, input_dim=None):
        self.model = self.TREE_TYPE[tree_type]()
        self.num_classes = num_classes

    def fit(self, X_train: np.ndarray, y_train: np.ndarray):
        self.model.fit(X_train, y_train)

    def predict(self, X_test):
        probas = self.model.predict_proba(X_test)
        return probas

    @classmethod
    def validate(cls, data: ValidateData, tree_type: str):
        model = TreeModel(num_classes=data.num_classes, tree_type=tree_type)
        return data.do_validate(model)

    @classmethod
    def validate_tree_type(cls):
        data = ValidateData()
        for tt in ['rf', 'gb', 'lr']:
            res = cls.validate(data, tt)
            print(f'{tt} {res}')


def make_basic_model(name: str, input_dim: int, num_classes: int):
    if name == 'mlp':
        return MLP(input_dim=input_dim, num_classes=num_classes)
    else:
        return TreeModel(tree_type=name, num_classes=num_classes)


RESAMPLE_TYPE = dict(
    up=RandomOverSampler,
    down=RandomUnderSampler,
)


def do_resample(type: str, X, y):
    return RESAMPLE_TYPE[type]().fit_resample(X, y)


class ResampleEnsemble(Model):
    """
    重采样集成模型。对不平衡的数据进行重采样，支持up down两种采样，
    子模型支持mlp rf gb三种。
    """
    RESAMPLE_TYPE = dict(
        up=RandomOverSampler,
        down=RandomUnderSampler,
    )

    def _make_model(self, name: str):
        return make_basic_model(name, input_dim=self.input_dim, num_classes=self.num_classes)

    def __init__(self, num_classes: int = 2, resample_type: str = 'up', num_ensemble: int = 5,
                 model_list: List[str] = None, input_dim=None):
        if model_list is None:
            model_list = 'mlp gb'.split()
        self.num_classes = num_classes
        self.resample_type = self.RESAMPLE_TYPE[resample_type]
        self.num_ensemble = num_ensemble
        self.model_list = model_list
        self.input_dim = input_dim
        self.num_models = len(self.model_list)

    def fit_predict(self, X_train: np.ndarray, y_train: np.ndarray, X_test: np.ndarray, y_test=None):
        self.input_dim = X_train.shape[1]
        target_ensemble = np.zeros((X_test.shape[0], self.num_classes))
        history = defaultdict(list)
        for i in range(self.num_ensemble):
            X, y = self.resample_type().fit_resample(X_train, y_train)
            # TODO: move resample into inner loop
            for model_type in self.model_list:
                model = self._make_model(model_type)
                model.fit(X, y)
                target_ensemble += model.predict(X_test)
                if y_test is None:
                    continue

            if y_test is not None:
                target_ensemble_each = target_ensemble / (i + 1) / self.num_models
                [history[key].append(val) for key, val in evaluate(y_test, target_ensemble_each).items()]
            print(f'Done {i}')

        target_ensemble /= self.num_ensemble * self.num_models
        self.history = history
        return target_ensemble

    @classmethod
    def validate(cls, data: ValidateData, vis=False, **model_params):
        """
        验证集成数量的影响。
        """
        model = ResampleEnsemble(**model_params)
        model.fit_predict(data.X_train, data.y_train, data.X_test, data.y_test)
        history = model.history
        if vis:
            plt.plot(history['bll'], label='bll', color='blue')
            plt.plot(history['cll'], label='cll', color='red')
            plt.xlabel('#Ensembles')
            plt.title('main_target_ensemble')
            plt.legend()
            plt.show()
        res = {key: val[-1] for key, val in history.items()}
        print(res)
        return res

    @classmethod
    def validate_num(cls):
        data = ValidateData()
        cls.validate(data, vis=True, num_ensemble=20)

    @classmethod
    def validate_model_list(cls):
        def fn(model_list):
            print(model_list)
            return cls.validate(data=data, model_list=model_list, num_ensemble=10)

        data = ValidateData()
        times = 1
        out = [
            do_repeated(lambda: fn(['mlp']), times),
            do_repeated(lambda: fn(['mlp', 'rf']), times),
            do_repeated(lambda: fn(['mlp', 'gb']), times),
            do_repeated(lambda: fn(['mlp', 'rf', 'gb']), times),
        ]
        pprint(out)


class StackModel(Model):
    """
    下层模型预测4个标签：Alpha,Beta,Gamma,Delta.
    上层模型从这些标签的概率预测值，预测Class。
    """

    def __init__(self, lower_resample='up', model_list=None,
                 upper_resample='up',
                 num_ensemble: int = 5,
                 num_classes: int = 2, input_dim: int = None):
        if model_list is None:
            model_list = ['mlp', 'gb']
        self.input_dim = input_dim
        self.model_list = model_list
        self.num_ensemble = num_ensemble
        self.num_classes = num_classes
        self.lower_sample = lower_resample
        self.upper_sample = upper_resample
        self._data = {}

    def _make_upper_model(self, mod: str, num_classes: int) -> Model:
        return make_basic_model(name=mod, input_dim=self.input_dim, num_classes=num_classes)

    def _make_lower_model(self, mod: str, input_dim: int) -> Model:
        return make_basic_model(name=mod, input_dim=input_dim, num_classes=2)

    def fit_predict(self, X_train: np.ndarray, y_train: List[np.ndarray], X_test: np.ndarray, y_test=None):
        mid_labels, end_labels = y_train[:-1], y_train[-1]
        self.input_dim = X_train.shape[1]
        lower_score = {}
        mid_labels_test = []
        mid_labels_onehot = []

        # Lower models: features => mid-labels
        for i, y in enumerate(mid_labels):
            num_classes = len(np.unique(y))
            mid_labels_test_ensemble = np.zeros((X_test.shape[0], num_classes))

            for c in range(self.num_ensemble):
                x_sample, y_sample = do_resample(self.lower_sample, X_train, y)
                print(y_sample.shape)
                for mod in self.model_list:
                    model = self._make_upper_model(mod, num_classes=num_classes)
                    model.fit(x_sample, y_sample)
                    mid_labels_test_ensemble += model.predict(X_test)
                print(f'[StackModel] done class {i} ensemble {c}')

            mid_labels_test_ensemble /= self.num_ensemble * len(self.model_list)
            if y_test is not None:
                lower_score[i] = evaluate_multiclass(y_test[i], mid_labels_test_ensemble)

            # Convert probas to labels.
            mid_labels_test_ensemble = np.argmax(mid_labels_test_ensemble, 1)
            # Convert labels to onehot
            mid_labels_test_ensemble = np.eye(num_classes)[mid_labels_test_ensemble]
            mid_labels_test.append(mid_labels_test_ensemble)
            # Convert train mid label to onehot
            mid_labels_onehot.append(np.eye(num_classes)[y])

        # Upper models: mid-labels => end-labels
        # Train on train_targets.
        mid_labels_test = np.concatenate(mid_labels_test, 1)
        end_probas_ensemble = np.zeros((X_test.shape[0], self.num_classes))
        mid_labels = np.concatenate(mid_labels_onehot, 1)
        for c in range(self.num_ensemble):
            x_sample, y_sample = do_resample(self.upper_sample, mid_labels, end_labels)
            for mod in self.model_list:
                model = self._make_lower_model(mod, input_dim=mid_labels.shape[1])
                model.fit(x_sample, y_sample)
                end_probas_ensemble += model.predict(mid_labels_test)

        print(f'[StackModel] done upper_model')
        end_probas_ensemble /= self.num_ensemble * len(self.model_list)
        self.lower_score = lower_score
        self.end_probas = end_probas_ensemble
        return end_probas_ensemble

    @classmethod
    def validate(cls, data: ValidateData, vis=True, **kwargs):
        # 下采样比较好
        model = StackModel(**kwargs)
        out = data.do_validate(model)
        print('Score of lower models')
        pprint(model.lower_score)
        print('Score of upper models')
        print(out)
        if vis:
            y_pred = np.argmax(model.end_probas, 1)
            Y = pd.DataFrame({'ypred': y_pred, 'ytrue': data.y_test[-1]})
            tsne_2D_many_labels(data.X_test, Y)
            plt.show()
        return out

    @classmethod
    def validate_resample(cls, data: ValidateData):
        do_grid_search(
            params_space=dict(upper_resample='up down'.split(),
                              lower_resample='up down'.split()),
            model_type=cls,
            data=data,
        )

    @classmethod
    def validate_num_ensemble(cls, data: ValidateData):
        do_grid_search(
            params_space=dict(num_ensemble=[5, 6, 7, 8]),
            model_type=cls,
            data=data,
            times=1
        )

    @classmethod
    def validate_submodels(cls, data: ValidateData):
        do_grid_search(
            params_space=dict(
                model_list=[
                    ['rf'], ['rf', 'gb'], ['rf', 'gb', 'mlp'],
                ],
            ),
            model_type=cls,
            data=data,
            times=1
        )

    @classmethod
    def validate_targets(cls):
        data = ValidateData(label_name='Alpha,Beta,Gamma,Delta'.split(','))
        cls.validate(data)
        data = ValidateData(label_name='Beta,Gamma,Delta'.split(','))
        cls.validate(data)


class StackModelV2(Model):
    """
    下层模型预测4个标签：Alpha,Beta,Gamma,Delta.
    上层模型从这些标签的概率预测值，预测Class。
    """

    def __init__(self, lower_resample='up', model_list=None,
                 upper_resample='up',
                 num_ensemble: int = 5,
                 num_classes: int = 2, input_dim: int = None):
        if model_list is None:
            model_list = ['mlp', 'gb']
        self.input_dim = input_dim
        self.model_list = model_list
        self.num_ensemble = num_ensemble
        self.num_classes = num_classes
        self.lower_sample = lower_resample
        self.upper_sample = upper_resample

    def _make_upper_model(self, mod: str, num_classes: int) -> Model:
        return make_basic_model(name=mod, input_dim=self.input_dim, num_classes=num_classes)

    def _make_lower_model(self, mod: str, input_dim: int) -> Model:
        return make_basic_model(name=mod, input_dim=input_dim, num_classes=2)

    def fit_predict(self, X_train: np.ndarray, y_train: List[np.ndarray], X_test: np.ndarray, y_test=None):
        mid_labels, end_labels = y_train[:-1], y_train[-1]
        self.input_dim = X_train.shape[1]
        end_probas_ensemble = np.zeros((X_test.shape[0], self.num_classes))
        num_classes = [len(np.unique(y)) for y in mid_labels]
        mid_labels_onehot = [np.eye(c)[y] for c, y in zip(num_classes, mid_labels)]
        mid_labels_cat = np.concatenate(mid_labels_onehot, 1)

        for j in range(self.num_ensemble):
            for mod in self.model_list:
                mid_preds_list = []

                # Lower models: features => mid-labels
                for i, y in enumerate(mid_labels):
                    c = num_classes[i]
                    x_sample, y_sample = do_resample(self.lower_sample, X_train, y)
                    model = self._make_upper_model(mod, num_classes=c)
                    model.fit(x_sample, y_sample)
                    x = model.predict(X_test)
                    if y_test is not None:
                        metric = evaluate(y_test[i], x)
                        print(f'Eval: #{j} mod {mod} lb {i} upper {metric}')
                    x = np.argmax(x, 1)
                    mid_preds = np.eye(c)[x]
                    mid_preds_list.append(mid_preds)

                # Upper models: mid-labels => end-labels
                x_sample, y_sample = do_resample(self.upper_sample, mid_labels_cat, end_labels)
                model = self._make_lower_model(mod, input_dim=x_sample.shape[1])
                model.fit(x_sample, y_sample)
                mid_preds_cat = np.concatenate(mid_preds_list, 1)
                preds = model.predict(mid_preds_cat)
                if y_test is not None:
                    metric = evaluate(y_test[-1], preds)
                    print(f'Eval: #{j} mod {mod} lb main lower {metric}')
                end_probas_ensemble += preds
                print(f'[StackModelV2] done model {mod} ensemble {j}')

        end_probas_ensemble /= self.num_ensemble * len(self.model_list)
        self.end_probas = end_probas_ensemble
        return end_probas_ensemble

    @classmethod
    def validate(cls, data: ValidateData, vis=True, **kwargs):
        # 下采样比较好
        model = cls(**kwargs)
        out = data.do_validate(model)
        print(out)
        if vis:
            y_pred = np.argmax(model.end_probas, 1)
            Y = pd.DataFrame({'ypred': y_pred, 'ytrue': data.y_test[-1]})
            tsne_2D_many_labels(data.X_test, Y)
            plt.show()
        return out

    @classmethod
    def validate_resample(cls, data: ValidateData):
        do_grid_search(
            params_space=dict(upper_resample='up down'.split(),
                              lower_resample='up down'.split()),
            model_type=cls,
            data=data,
        )

    @classmethod
    def validate_num_ensemble(cls, data: ValidateData):
        do_grid_search(
            params_space=dict(num_ensemble=[2, 3, 4, 5, 6, 7, 8]),
            model_type=cls,
            data=data,
            times=1
        )

    @classmethod
    def validate_submodels(cls, data: ValidateData):
        do_grid_search(
            params_space=dict(
                model_list=[
                    ['rf'], ['rf', 'gb'], ['rf', 'gb', 'mlp'],
                ],
            ),
            model_type=cls,
            data=data,
            times=1
        )

    @classmethod
    def validate_targets(cls):
        data = ValidateData(label_name='Alpha,Beta,Gamma,Delta'.split(','))
        cls.validate(data)
        data = ValidateData(label_name='Beta,Gamma,Delta'.split(','))
        cls.validate(data)


def do_repeated(fn: Callable[[], Union[dict, float]], times: int = 5):
    """
    一键式重复跑多次取平均值，消除随机误差。
    """
    res = [fn() for _ in range(times)]
    if isinstance(res[0], dict):
        return {key: np.mean([x[key] for x in res]) for key in res[0].keys()}
    else:
        return np.mean(res)


def do_grid_search(params_space: Dict[str, list], model_type: Type[Model],
                   times: int = 1, model_params: dict = {},
                   data=None):
    """
    一键式超参搜索。
    """
    if data is None:
        data = ValidateData()

    metric_df = []
    params_df = []
    for params in kv_product(**params_space):
        params.update(model_params)  # 一些固定参数
        metrics = do_repeated(lambda: model_type.validate(data=data, vis=False, **model_params), times=times)
        print(f'Params {params} Metrics {metrics}')
        metric_df.append(metrics)
        params_df.append(params)

    metric_df = pd.DataFrame.from_records(metric_df)
    params_df = pd.DataFrame.from_records(params_df)
    df = pd.concat((metric_df, params_df), axis=1)
    print('--------------- Search Results ------------------')
    print(df)
    return df
