"""
书籍 FTRL 算法
"""

import sys
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import MinMaxScaler


NEAR_0 = 1e-10


class LrCls:
    """
    LR：二分类
    """

    @staticmethod
    def fn_sigmoid(w, x):
        """
        决策函数为sigmoid函数: y = 1 / (1 + e^{-x})
        :param w: 权重
        :param x: x
        :return: predict
        """
        return 1.0 / (1.0 + np.exp(-np.dot(x, w))).reshape(x.shape[0], 1)

    @staticmethod
    def loss(y, y_pre):
        """
        交叉熵损失函数
        二分类：L = sum(-y_i * log(p_i) -(1 - y_i) * log(1 - p_i)) / N
          y_i: 样本i的label，0或1
          p_i：样本i预测为正的概率

        多分类：L = sum(-sum(y_ic * log(p_ic))) / N
          c的和为类别的数量
          y_ic：指示变量（0或1）,如果该类别和样本i的类别相同就是1，否则是0；
          p_ic：对于观测样本i属于类别 [公式] 的预测概率

        np.nan_to_num：用零代替NaN，用大的有限数代替无穷大

        :param y:
        :param y_pre:
        :return:
        """
        return np.sum(np.nan_to_num(-y * np.log(y_pre + NEAR_0) -
                                    (1 - y) * np.log(1 - y_pre + NEAR_0)))

    @staticmethod
    def grad(y, y_pre, x):
        """
        交叉熵损失函数对权重w的一阶导数
        :param y: 训练集label
        :param y_pre: 预测值
        :param x: 训练集
        :return: 值
        """
        return np.mean((y_pre - y) * x, axis=0)


class LrReg:
    """
    LR：回归
    """

    @staticmethod
    def fn_sigmoid(w, x):
        """
        决策函数为sigmoid函数: y = 1 / (1 + e^{-x})
        :param w: 权重
        :param x: x
        :return: predict
        """
        return 1.0 / (1.0 + np.exp(-np.dot(x, w))).reshape(x.shape[0], 1)

    @staticmethod
    def loss(y, y_pre):
        """
        误差平方和损失函数
        :param y: 训练集label
        :param y_pre: 预测值
        :return: 误差平方和损失函数值
        """
        return np.sum(np.nan_to_num(np.power(y_pre - y, 2))) / 2

    @staticmethod
    def grad(y, y_pre, x):
        """
        误差平方和损失函数对权重w的一阶导数
        :param y: 训练集label
        :param y_pre: 预测值
        :param x: 训练集
        :return:  值
        """
        grand_x = (y_pre - y) * (1 - y_pre) * y_pre
        grand_x = np.dot(grand_x, x)
        # return np.mean((y_pre - y) * (1 - y_pre) * y_pre * x, axis=0)
        return np.mean(grand_x, axis=0)


class Ttrl:
    """
    FTRL
    """
    def __init__(self, dim, l1, l2, alpha, beta=1, decisionfunc=LrCls):
        """

        :param dim: 维度
        :param l1: w的l1正则项，lambda_1
        :param l2: w的l2正则项，lambda_2
        :param alpha: 初始学习率
        :param beta:
        :param decisionfunc:
        """
        self.dim = dim
        self.l1 = l1
        self.l2 = l2
        self.decisionfunc = decisionfunc
        self.alpha = alpha
        self.beta = beta
        self.z_value = np.zeros(dim)
        self.n_value = np.zeros(dim)
        self.w_value = np.zeros(dim)

    def predict(self, x):
        """

        :param x: x
        :return: y_predict
        """
        return self.decisionfunc.fn_sigmoid(self.w_value, x)

    def update(self, x, y):
        """

        :param x: 训练集
        :param y: 训练集label
        :return: 损失函数值
        """
        self.w_value = np.array([0 if np.abs(self.z_value[i]) <= self.l1
                                 else (np.sign(self.z_value[i]) * self.l1 -
                                 self.z_value[i]) /
                                (self.l2 + (self.beta + np.sqrt(self.n_value[i])
                                            )
                                 / self.alpha) for i in range(self.dim)])
        y_pre = self.predict(x)
        grad_value = self.decisionfunc.grad(y, y_pre, x)
        sigma = (np.sqrt(self.n_value + grad_value * grad_value) -
                 np.sqrt(self.n_value)) / self.alpha
        self.z_value += grad_value - sigma * self.w_value
        self.n_value += grad_value * grad_value
        return self.decisionfunc.loss(y, y_pre)

    def train(self, corpus_generator, verbos=False, epochs=100, batch=64):
        """

        :param corpus_generator: 训练集
        :param verbos:
        :param epochs:
        :param batch:
        :return:
        """
        total = 0
        for itr in range(epochs):
            if verbos:
                sys.stderr.write('=' * 100 + '\n')
                sys.stderr.write('Epoch = {}\n'.format(itr))
            #
            mini_batch_x = []
            mini_batch_y = []
            num = 0
            for train_x, train_y in corpus_generator:
                num += 1
                mini_batch_x.append(train_x)
                mini_batch_y.append(train_y)
                if len(mini_batch_x) >= batch:
                    self.update(x=np.array(mini_batch_x),
                                y=np.array(mini_batch_y))
                    if verbos:
                        y_pre = self.predict(x=np.array(mini_batch_x))
                        train_loss = \
                            self.decisionfunc.loss(y=np.array(mini_batch_y),
                                                   y_pre=y_pre) \
                            / len(mini_batch_x)
                        sys.stderr.write('{}/{} train loss: {}\n'.
                                         format(num, total, train_loss))
                    mini_batch_x = []
                    mini_batch_y = []
            self.update(x=np.array(mini_batch_x), y=np.array(mini_batch_y))
            if total == 0:
                total = num
            if verbos:
                y_pre = self.predict(x=np.array(mini_batch_x))
                train_loss = \
                    self.decisionfunc.loss(y=np.array(mini_batch_y),
                                           y_pre=y_pre) / len(mini_batch_x)
                sys.stderr.write('{}/{} train loss: {}\n'.
                                 format(num, total, train_loss))


def lr_reg(train_data, test_x):
    """
    lr 回归
    :param x:
    :param y:
    :return:
    """
    ttrl = Ttrl(dim=len(train_data[0][0]), l1=0.0, l2=0.0, alpha=0.01, decisionfunc=LrReg)
    ttrl.train(corpus_generator=train_data)
    print('w: {}'.format(ttrl.w_value))
    pre = ttrl.predict(x=test_x)
    print('pre: {}'.format(pre))


def load_data(size=1000, weight=[[3], [-25]], b=13):
    """
    初始化训练集
    :param size:数据量
    :param weight: 两个维度的权重
    :return: 训练集
    """
    train_x = [[np.random.randint(low=0, high=size),
                np.random.randint(low=0, high=size)]
               for _ in range(size)]

    train_y = np.dot(train_x, weight) + b + np.random.normal()

    train_data = []
    for idx, mid_arr in enumerate(train_x):
        mid_arr.append(train_y[idx])
        train_data.append(mid_arr)

    # 归一化：[0, 1]
    min_max_scaler = MinMaxScaler()
    train_data = min_max_scaler.fit_transform(train_data)

    return np.array(train_data)[:, :2], np.array(train_data)[:, 2:]
    # return train_x, train_y


def sklearn_lr(x, y):
    lr = LinearRegression()
    lr.fit(x, y)

    print('lr.coef_[0]: {}'.format(lr.coef_[0]))
    print('lr.intercept_[0]: {}'.format(lr.intercept_[0]))

    pre = lr.predict(x[:2])
    print('y: {}, \ny_pre: {}'.format(y[:2], pre))

    return lr.predict(x)


def scores(y, y_pre):
    return np.power(y - y_pre, 2).sum()


def tf_kera_nn(x, y):
    """
    一层的全链接神经网络
    :return:
    """
    import tensorflow as tf

    model = tf.keras.Sequential()
    # 输出维度，第一层为线性时，需要指定输入维度
    # 可以增加L1和L2正则 kernel_regularizer=tf.keras.regularizers.l2(0.01)
    model.add(tf.keras.layers.Dense(units=len(y[0]), input_dim=len(x[0])))

    # 此处可以更替优化算法，损失函数
    model.compile(
        optimizer=tf.keras.optimizers.SGD(learning_rate=0.05),
        loss=tf.keras.losses.mean_squared_error)

    model.fit(np.array(x), np.array(y), epochs=100, batch_size=32)

    # 模型参数
    print('-' * 60)
    print('weight: {}'.format(model.weights[0].numpy()))
    print('weight: {}'.format(model.get_weights()))

    pre = model.predict(x[:2])
    print('y: {}, \ny_pre: {}'.format(y[:2], pre))

    # 模型检验
    return model.predict(x)


def run():
    x, y = load_data()

    train_x, test_x, train_y, test_y = train_test_split(x, y, test_size=0.8,
                                                        random_state=0)

    # sklearn
    lr_pre = sklearn_lr(train_x, train_y)
    sklearn_scores = scores(train_y, lr_pre)
    print('sklearn_scores: {}'.format(sklearn_scores))
    
    # tf
    tf_pre = tf_kera_nn(train_x, train_y)
    tf_scores = scores(train_y, tf_pre)
    print('tf_scores: {}'.format(tf_scores))

    """
    test_x = x[0]
    train_data = [[x[idx], num] for idx, num in enumerate(y)]
    # book
    lr_reg(train_data, test_x=test_x)
    print('test: {}, {}'.format(test_x, train_data[0][1]))
    """


if __name__ == '__main__':
    run()
