# coding=utf-8
import numpy as np
import time
import matplotlib.pyplot as plt
from keras.layers.core import Dense, Activation, Dropout
from keras.layers.recurrent import LSTM
from keras.models import Sequential
from scipy import sparse
# from sklearn.pipeline import
from scipy.sparse import csr_matrix
import sklearn as sk
from sklearn.base import BaseEstimator
try:
    from joblib import Parallel, delayed
except ImportError:
    from sklearn.externals.joblib import Parallel, delayed

from sklearn.pipeline import FeatureUnion, _fit_one_transformer, _fit_transform_one, _transform_one
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import OneHotEncoder, RobustScaler, MaxAbsScaler, LabelEncoder


class LSTMSeriesModel(BaseEstimator):

    def __init__(self, layes_after_input=None, batch_size=512, nb_epoch=100, validation_split=0.05):
        # type: (list, int, int, float) -> None
        if layes_after_input is None:
            layes_after_input = [50, 100, 1]
        self.model = None
        self.layes_after_input = layes_after_input
        self.batch_size = batch_size
        self.nb_epoch = nb_epoch
        self.validation_split = validation_split

    def fit(self, X, y):
        if self.model is None:
            layers = [int(X.shape[2])]
            layers.extend(self.layes_after_input)
            self.model = LSTMSeriesModel.build_model(layers)
            self.model.fit(X, y, batch_size=self.batch_size, epochs=self.nb_epoch, validation_split=self.validation_split)
        else:
            self.model.fit(X, y, batch_size=self.batch_size, epochs=self.nb_epoch, validation_split=self.validation_split)

    def predict(self, X):
        return self.model.predict(X)

    def transform(self, X):
        return self.predict(X)

    @staticmethod
    def build_model(layers):  # layers [1,50,100,1]
        model = Sequential()
        keras2 = True
        if keras2:
            model.add(LSTM(return_sequences=True, input_shape=(None, layers[0]), units=layers[1]))
            model.add(Dropout(0.2))

            model.add(LSTM(layers[2], return_sequences=True))
            model.add(Dropout(0.2))
            model.add(LSTM(layers[2], return_sequences=True))
            model.add(Dropout(0.2))
            model.add(LSTM(layers[2], return_sequences=True))
            model.add(Dropout(0.2))
            model.add(LSTM(layers[2], return_sequences=True))
            model.add(Dropout(0.2))

            model.add(LSTM(layers[2], return_sequences=False))
            model.add(Dropout(0.2))

            model.add(Dense(units=layers[3]))
        else:
            model.add(LSTM(input_dim=layers[0], output_dim=layers[1], return_sequences=True))
            model.add(Dropout(0.2))
            model.add(LSTM(layers[2], return_sequences=False))
            model.add(Dropout(0.2))
            model.add(Dense(output_dim=layers[3]))

        model.add(Activation("linear"))
        start = time.time()
        model.compile(loss="mse", optimizer="rmsprop")
        print("Compilation Time : ", time.time() - start)
        return model


class ReShapeInGroup(BaseEstimator):

    def __init__(self, num_in_group):
        self.num_in_group = num_in_group

    def transform(self, X):
        # print(X.shape)
        # print(type(X))
        # print(X.__dict__)
        if isinstance(X, csr_matrix):
            X = X.toarray()
        row = int(X.shape[0] / self.num_in_group)
        col = self.num_in_group
        depth = X.shape[1]
        # print(type(X.toarray()))
        try:
            return X.reshape((row, col, depth))
        except ValueError as e:
            print(X)
            raise e

    def fit(self, X, y=None):
        print("===============ReShapeInGroup==========fit========no use")
        print(self.num_in_group)
        print(X)
        print(y)
        exit()

    def fit_transform(self, X, y=None, **fit_params):
        # type: (np.ndarray, np.ndarray, dict) -> np.array
        if y is not None:
            print(y)
            print("y in fit_transform in ReShapeInGroup is not None")
        if fit_params is not None and isinstance(fit_params, dict) and len(fit_params.keys()) > 0:
            print(fit_params)
            print("fit_params in fit_transform in ReShapeInGroup is not None")
        return self.transform(X)


class FeatureUnionExt(FeatureUnion):
    # 相比FeatureUnion，多了idx_list参数，其表示每个并行工作需要读取的特征矩阵的列
    def __init__(self, transformer_list, idx_list, n_jobs=1, transformer_weights=None):

        self.idx_list = idx_list
        if not isinstance(transformer_list, list):
            transformer_list = list(transformer_list)
        # print("++++++++++++++ original input++++++++++++++++++")
        # print(transformer_list)


        super(FeatureUnionExt, self).__init__(transformer_list=list(map(lambda trans: (trans[0], trans[1]), transformer_list)),
                                              n_jobs=n_jobs, transformer_weights=transformer_weights)
        # a = FeatureUnion.__init__(self, transformer_list=map(lambda trans: (trans[0], trans[1]), transformer_list),
        #                           n_jobs=n_jobs, transformer_weights=transformer_weights)

        print("--------------------------transformer_list----------------------------")
        if self.transformer_list is None:
            if isinstance(self.transformer_list, list) and len(self.transformer_list) == 0:
                self.transformer_list = super(FeatureUnionExt, self).__dict__['transformer_list']
            else:
                print(self.transformer_list, '=-===== WTF ??????????????????')
        else:
            print(self.transformer_list, '-------- is not None!!!!')
            print(list(self.transformer_list))

        # print(list(map(lambda trans: (trans[0], trans[1]), transformer_list)))
        # print()
        # print(a)

    # 由于只部分读取特征矩阵，方法fit需要重构
    def fit(self, X, y=None):
        transformer_idx_list = map(lambda trans, idx: (trans[0], trans[1], idx), self.transformer_list, self.idx_list)
        transformers = Parallel(n_jobs=self.n_jobs)(
            # 从特征矩阵中提取部分输入fit方法
            delayed(_fit_one_transformer)(trans, X[:, idx], y)
            for name, trans, idx in transformer_idx_list)
        self._update_transformer_list(transformers)
        return self

    # 由于只部分读取特征矩阵，方法fit_transform需要重构
    def fit_transform(self, X, y=None, **fit_params):
        if isinstance(X, list):
            X = np.array(X)
        if not isinstance(self.transformer_list, list):
            self.transformer_list = list(self.transformer_list)
        print('***************$%^%&%^&^%&%^&%^&')
        print(self.transformer_list)
        print(self.idx_list)

        transformer_idx_list = map(lambda trans, idx: (trans[0], trans[1], idx), self.transformer_list, self.idx_list)
        print(type(transformer_idx_list))
        print()
        # exit()
        if not isinstance(transformer_idx_list, list):
            transformer_idx_list = list(transformer_idx_list)
        print("--------------transformer_idx_list in fit_transform in FeatureUnionExt-----------------------")
        print(transformer_idx_list)
        print("-----------------X--------------------")
        print(X)
        # print(type(X))
        # print(np.array(X)[:, [1, 2]])
        # exit()

        if sk.__version__ >= '0.19.1':
            result = Parallel(n_jobs=self.n_jobs)(
                # 从特征矩阵中提取部分输入fit_transform方法
                delayed(_fit_transform_one)(trans,  # name=name, 0.19.1取消了这个attr
                                            X=X[:, idx] if len(X.shape) == 2 else X,
                                            y=y,
                                            weight=self.transformer_weights, **fit_params)
                for name, trans, idx in transformer_idx_list)
        else:
            result = Parallel(n_jobs=self.n_jobs)(
                # 从特征矩阵中提取部分输入fit_transform方法
                delayed(_fit_transform_one)(trans, name=name,
                                            X=X[:, idx] if len(X.shape) == 2 else X,
                                            y=y,
                                            weight=self.transformer_weights, **fit_params)
                for name, trans, idx in transformer_idx_list)

        # print("--------result in FeatureUnionExt----------")
        # for name, trans, idx in transformer_idx_list:
        #     print(name)
        #     print(trans)
        #     print(idx)
        #     print(_fit_transform_one(trans,  # name=name,
        #                              X=X[:, idx] if len(X.shape) == 2 else X,
        #                              y=y,
        #                              weight=self.transformer_weights, **fit_params))
        # print("--------result in FeatureUnionExt----------")
        # print(result)
        # exit()
        Xs, transformers = zip(*result)
        self._update_transformer_list(transformers)
        if any(sparse.issparse(f) for f in Xs):
            Xs = sparse.hstack(Xs).tocsr()
        else:
            Xs = np.hstack(Xs)
        return Xs

    # 由于只部分读取特征矩阵，方法transform需要重构
    def transform(self, X):
        transformer_idx_list = map(lambda trans, idx: (trans[0], trans[1], idx), self.transformer_list, self.idx_list)
        if sk.__version__ >= '0.19.1':
            Xs = Parallel(n_jobs=self.n_jobs)(
                # 从特征矩阵中提取部分输入transform方法
                delayed(_transform_one)(trans,  # name=name,
                                        X=X[:, idx] if len(X.shape) == 2 else X,
                                        weight=self.transformer_weights)
                for name, trans, idx in transformer_idx_list)
        else:
            Xs = Parallel(n_jobs=self.n_jobs)(
                # 从特征矩阵中提取部分输入transform方法
                delayed(_transform_one)(trans, name=name,
                                        X=X[:, idx] if len(X.shape) == 2 else X,
                                        weight=self.transformer_weights)
                for name, trans, idx in transformer_idx_list)
        if any(sparse.issparse(f) for f in Xs):
            Xs = sparse.hstack(Xs).tocsr()
        else:
            Xs = np.hstack(Xs)
        return Xs


class LabelOnehotEncodePipe:

    def __init__(self):
        self.multi_encoder = [LabelEncoder()]
        self.onehot = OneHotEncoder()

    def fit_transform(self, X, y=None, **fit_params):
        if fit_params is not None and isinstance(fit_params, dict) and len(fit_params.keys()) > 0:
            print(fit_params)
        self.fit(X, y)
        return self.transform(X)

    def transform(self, X):
        shape = X.shape
        if len(shape) == 2 and shape[1] > 1:
            col_total = shape[1]
            if len(self.multi_encoder) < col_total:
                for i in range(col_total - 1):
                    self.multi_encoder.append(LabelEncoder())
            result = X.copy()
            for i in range(col_total):
                result[:, i] = self.multi_encoder[i].transform(result[:, i])
        else:
            result = self.multi_encoder[0].transform(X)
        if len(shape) == 2:
            result = result.reshape(shape)
        result = self.onehot.transform(result)
        return result

    def fit(self, X, y=None):
        shape = X.shape
        if len(shape) == 2 and shape[1] > 1:
            col_total = shape[1]
            if len(self.multi_encoder) < col_total:
                for i in range(col_total - 1):
                    self.multi_encoder.append(LabelEncoder())
            result = X.copy()
            # print(result)
            # exit()
            for i in range(col_total):
                # print("------------")
                fit_result = self.multi_encoder[i].fit_transform(result[:, i])
                # print(fit_result)
                result[:, i] = fit_result
        else:
            result = self.multi_encoder[0].fit(X)
        # print('fit in LabelOnehotEncodePipe')
        # print(result)
        # exit()
        result = self.onehot.fit(result, y)
        return result


if __name__ == "__main__":
    import sklearn as sk
    print(sk.__version__)
    aaa = [1, 2, 3, 4, 5]
    print(list(map(lambda x: x + 1, aaa)))
    # exit()
    epochs = 100
    seq_len = 50

    col_list = ['name', 'quantity', 'shift']
    X_train = [[[1, 2, 3],
                [1, 3, 0]],
               [[2, 2, 0],
                [2, 3, 6]],
               [[3, 2, 1],
                [3, 3, 2]],
               [[1, 3, 1],
                [1, 4, 2]]]
    # print(np.array(X_train))
    cate = 'abc'
    # cate = 1
    X_train_flat = [[cate, 2, 3],
                    [cate, 3, 0],
                    [2, 2, 0],
                    [2, 3, 6],
                    [3, 2, 1],
                    [3, 3, 2],
                    [cate, 3, 1],
                    [cate, 4, 2]]
    X_train_flat = np.array(X_train_flat)
    # print(X_train_flat[:, 2])
    # X_train_flat[:, 2] = np.array([2, 2, 3, 3, 2, 2, 3, 3])
    # print(X_train_flat)
    # print("0000000000000000000000000000")
    # print(np.array(X_train_flat).reshape([int(len(X_train_flat)/2), 2, 3]))
    # exit()

    y_train = [4 + 3.0 / 2.0,
               0.4 + 6.0 / 2.0,
               20 + 3.0 / 2.0,
               5 + 3.0 / 2.0]
    y_train = np.array(y_train)
    print(y_train)

    X_predict = [[[cate, 4 + i, 3],
                  [cate, 5 + i, 0]] for i in range(32)]
    X_predict = np.array(X_predict)
    y_true = [6 + i + 3.0 / 2.0 for i in range(32)]

    ohe = OneHotEncoder(categorical_features=[0])  # 等价于 [True, False, True]
    # ohe.fit(X_train_flat, y_train)
    # ans = ohe.transform(X_train_flat).toarray()
    # print("----------------------")
    # print(ans)

    rbs = RobustScaler()
    # scaler = MinMaxScaler(feature_range=(0, 1))
    scaler = MaxAbsScaler()
    # scaler.fit(ans, y_train)
    print(np.array(X_train_flat))
    print('++++++++++++++++++++++++++')
    # print(scaler.transform(ans))
    string_processor = ("LabelOnehotEncodePipe", LabelOnehotEncodePipe())
    # string_processor = ("OneHot", OneHotEncoder())
    pipe = Pipeline([('FeatureUnionNum', FeatureUnionExt(transformer_list=[string_processor,
                                                                           ("MinMaxScaler", scaler)],
                                                         idx_list=[[0, 2], [1]])),
                     ("Reshaper", ReShapeInGroup(num_in_group=2)),
                     ("LSTM", LSTMSeriesModel(layes_after_input=[50, 100, 1], nb_epoch=100))
                     ])

    def expand_y_to_period_times(np_y, times):
        # type: (np.ndarray, int) -> np.ndarray
        # e.g. times=2
        r_list = []
        for row in np_y:
            for i in range(times):
                r_list.append(row)
        return np.array(r_list)
    print(y_train.shape)
    # print()
    # y_train_in = expand_y_to_period_times(y_train, 2)
    y_train_in = y_train
    # exit()

    pipe.fit(X_train_flat, y_train_in)

    print(scaler.__dict__)
    print(pipe.predict(X_train_flat))
    print(y_train)
    # print(X_predict)
    print("--------------------------------------=")
    X_predict = X_predict.reshape((-1, 3))
    print(X_predict)
    # exit()
    y_predict = pipe.predict(X_predict)
    print(y_predict.reshape((-1)).tolist())
    print(y_true)
    plt.plot(y_predict)
    plt.plot(y_true)
    plt.show()
    exit()







    lstm_model = LSTMSeriesModel.build_model([3, 50, 100, 1])
    lstm_model.fit(X_train, y_train, batch_size=512, nb_epoch=epochs, validation_split=0.05)
    print('----------------------------------')
    # for x_step in X_predict:
    print(lstm_model.predict(X_predict))
    print(y_true)
