import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np
from utils.time_print import printbar


class LrModel(object):
    def __init__(self, w0, b0):
        self.w = tf.Variable(tf.random.normal(w0.shape))
        self.b = tf.Variable(tf.zeros_like(b0, dtype=tf.float32))

    # 正向传播
    def __call__(self, x):
        return tf.matmul(x, self.w) + self.b

    @staticmethod
    def loss_func(y_true, y_pre):
        return tf.reduce_mean((y_true - y_pre) ** 2 / 2)


class DnnModel(tf.Module):
    """
    DNN二分类模型定义
    """
    def __init__(self, name=None):
        super(DnnModel, self).__init__(name=name)
        self.eps = 1e-7

        self.w1 = tf.Variable(tf.random.truncated_normal([2, 4]), dtype=tf.float32)
        self.b1 = tf.Variable(tf.zeros([1, 4]), dtype=tf.float32)

        self.w2 = tf.Variable(tf.random.truncated_normal([4, 8]), dtype=tf.float32)
        self.b2 = tf.Variable(tf.zeros([1, 8]), dtype=tf.float32)

        self.w3 = tf.Variable(tf.random.truncated_normal([8, 1]), dtype=tf.float32)
        self.b3 = tf.Variable(tf.zeros([1, 1]), dtype=tf.float32)

    # 正向传播
    @tf.function(input_signature=[tf.TensorSpec(shape=[None, 2], dtype=tf.float32)])
    def __call__(self, x):
        x = tf.nn.relu(tf.matmul(x, self.w1) + self.b1)
        x = tf.nn.relu(tf.matmul(x, self.w2) + self.b2)
        return tf.nn.sigmoid(tf.matmul(x, self.w3) + self.b3)

    @tf.function(input_signature=[tf.TensorSpec(shape=[None, 1], dtype=tf.float32),
                                  tf.TensorSpec(shape=[None, 1], dtype=tf.float32)])
    def loss_func(self, y_true, y_pre):
        y_pre = tf.clip_by_value(y_pre, self.eps, 1.0 - self.eps)
        bce = -y_true * tf.math.log(y_pre) - (1 - y_true) * tf.math.log(1 - y_pre)
        return tf.reduce_mean(bce)

    @tf.function(input_signature=[tf.TensorSpec(shape=[None, 1], dtype=tf.float32),
                                  tf.TensorSpec(shape=[None, 1], dtype=tf.float32)])
    def metric_func(self, y_true, y_pre):
        """
        评估指标--准确率
        :param y_true:
        :param y_pre:
        :return:
        """
        y_pre = tf.where(y_pre > 0.5, tf.ones_like(y_pre, dtype=tf.float32), tf.zeros_like(y_pre, dtype=tf.float32))
        return tf.reduce_mean(1 - tf.abs(y_true - y_pre))


class LR(object):
    """

    """
    def __init__(self):
        # 样本集数量
        self.N = 400
        self.w0 = tf.constant([[2.0], [-3.0]])
        self.b0 = tf.constant([[3.0]])
        self.x, self.y = self.load_dataset()
        self.model = LrModel(w0=self.w0, b0=self.b0)

    def load_dataset(self):
        """
        生成数据集
        :return:
        """
        x = tf.random.uniform([self.N, 2], minval=-10, maxval=10)

        # y = tf.matmul(x, w0) + b0 + tf.random.normal([self.N, 1], mean=0.0, stddev=2.0)
        y = x @ self.w0 + self.b0 + tf.random.normal([self.N, 1], mean=0.0, stddev=2.0)
        return x, y

    def explore(self):
        """
        可视化探索
        :return:
        """
        plt.figure(figsize=(12, 5))

        ax1 = plt.subplot(121)
        ax1.scatter(self.x[:, 0], self.y[:, 0], c='b')
        plt.xlabel('x1')
        plt.ylabel('y', rotation=0)

        ax2 = plt.subplot(122)
        ax2.scatter(self.x[:, 1], self.y[:, 0], c='g')
        plt.xlabel('x2')
        plt.ylabel('y', rotation=0)
        plt.show()

    @staticmethod
    def data_iter(features, labels, batch_size=8):
        """
        构建数据管道迭代器
        :param features:
        :param labels:
        :param batch_size:
        :return:
        """
        num_examples = len(features)
        indices = list(range(num_examples))
        np.random.shuffle(indices)
        for i in range(0, num_examples, batch_size):
            indexs = indices[i: min(i + batch_size, num_examples)]
            # params: 被索引tensor，indices：一维索引tensor
            yield tf.gather(params=features, indices=indexs), tf.gather(params=labels, indices=indexs)

    @tf.function
    def train_step(self, features, labels, leanring_rate=0.001):
        """
        分布训练
        :param features:
        :param labels:
        :param leanring_rate:
        :return:
        """
        with tf.GradientTape() as tape:
            predic = self.model(x=features)
            loss = self.model.loss_func(y_true=labels, y_pre=predic)
        # 反向传播求梯度
        dloss_dw, dloss_db = tape.gradient(loss, [self.model.w, self.model.b])
        # 梯度下降法更新参数
        # tf.compat.v1.assign_sub(self.model.w, leanring_rate * dloss_dw)
        # tf.compat.v1.assign_sub(self.model.b, leanring_rate * dloss_db)
        # self.model.w.assign(self.model.w - leanring_rate * dloss_dw)
        # self.model.b.assign(self.model.b - leanring_rate * dloss_db)
        self.model.w.assign_sub(leanring_rate * dloss_dw)
        self.model.b.assign_sub(leanring_rate * dloss_db)
        return loss

    def train_model(self, epochs):
        """
        训练模型
        :param epochs:
        :return:
        """
        for epoch in tf.range(1, epochs + 1):
            for features, labels in self.data_iter(self.x, self.y, batch_size=10):
                loss = self.train_step(features=features, labels=labels)

            if epoch % 50 == 0:
                printbar()
                tf.print('epoch = ', epoch, ';loss = ', loss)
                tf.print('w = ', self.model.w, ';b = ', self.model.b)

    def explore_rs(self):
        """
        可视化探索
        :return:
        """
        plt.figure(figsize=(12, 5))

        ax1 = plt.subplot(121)
        ax1.scatter(self.x[:, 0], self.y[:, 0], c='b', label='samples')
        ax1.plot(self.x[:, 0], self.model.w[0] * self.x[:, 0] + self.model.b[0], '-r', label='model')
        plt.xlabel('x1')
        plt.ylabel('y', rotation=0)

        ax2 = plt.subplot(122)
        ax2.scatter(self.x[:, 1], self.y[:, 0], c='g')
        ax2.plot(self.x[:, 1], self.model.w[1] * self.x[:, 1] + self.model.b[0], '-r', label='model')
        plt.xlabel('x2')
        plt.ylabel('y', rotation=0)
        plt.show()

    def main(self):
        """
        features, labels = next(self.data_iter(features=self.x, labels=self.y, batch_size=10))
        print('features: ', features)
        print('labels: ', labels)
        loss = self.train_step(features=features, labels=labels)
        print('loss: ', loss)
        """
        self.train_model(epochs=200)
        self.explore_rs()


class ClassDnn(object):
    """
    DNN二分类
    """
    def __init__(self):
        self.n_positive = 2000
        self.n_negative = 2000
        self.r_p = 5.0
        self.r_n = 8.0
        self.load_dataset()
        self.model = DnnModel()

    def load_dataset(self):
        """
        生成数据集
        :return:
        """
        # 正样本：小圆环分布
        r_p = self.r_p + tf.random.normal(shape=[self.n_positive, 1], mean=0, stddev=1)
        theta_p = tf.random.uniform(shape=[self.n_positive, 1], minval=0, maxval=2 * np.pi)
        self.x_p = tf.concat([r_p * tf.cos(theta_p), r_p * tf.sin(theta_p)], axis=1)
        self.y_p = tf.ones_like(r_p)

        # 负样本：大圆环分布
        r_n = self.r_n + tf.random.normal(shape=[self.n_negative, 1], mean=0, stddev=1)
        theta_n = tf.random.uniform(shape=[self.n_positive, 1], minval=0, maxval=2 * np.pi)
        self.x_n = tf.concat([r_n * tf.cos(theta_n), r_n * tf.sin(theta_n)], axis=1)
        self.y_n = tf.ones_like(r_n)

        self.x = tf.concat([self.x_p, self.x_n], axis=0)
        self.y = tf.concat([self.y_p, self.y_n], axis=0)

    def explore(self):
        """
        可视化数据探索
        :return:
        """
        plt.figure(figsize=(6, 6))
        plt.scatter(self.x_p[:, 0].numpy(), self.x_p[:, 1].numpy(), c='r')
        plt.scatter(self.x_n[:, 0].numpy(), self.x_n[:, 1].numpy(), c='g')
        plt.legend(['positive', 'negative'])
        plt.show()

    @staticmethod
    def data_iter(features, labels, batch_size=8):
        """
        构建数据管道迭代器
        :param features:
        :param labels:
        :param batch_size:
        :return:
        """
        num_examples = len(features)
        indices = list(range(num_examples))
        np.random.shuffle(indices)
        for i in range(0, num_examples, batch_size):
            indexs = indices[i: min(i + batch_size, num_examples)]
            # params: 被索引tensor，indices：一维索引tensor
            yield tf.gather(params=features, indices=indexs), tf.gather(params=labels, indices=indexs)

    @tf.function
    def tain_step(self, features, labels, learning_rate=0.001):
        """

        :param features:
        :param labels:
        :param learning_rate:
        :return:
        """
        # 正向传播损失
        with tf.GradientTape() as tape:
            predic = self.model(x=features)
            loss = self.model.loss_func(y_true=labels, y_pre=predic)
        # 反向传播梯度
        grads = tape.gradient(loss, self.model.trainable_variables)
        # 梯度下降
        for p, dloos_dp in zip(self.model.trainable_variables, grads):
            p.assign_sub(learning_rate * dloos_dp)
        # 评估
        metric = self.model.metric_func(y_true=labels, y_pre=predic)

        return loss, metric

    def train_model(self, epochs):
        """

        :param epochs:
        :return:
        """
        for epoch in tf.range(1, epochs + 1):
            for features, labels in self.data_iter(features=self.x, labels=self.y, batch_size=100):
                loss, metric = self.tain_step(features=features, labels=labels)

            if epoch % 100 == 0:
                printbar()
                tf.print('epoch = ', epoch, ' ;loss = ', loss, ' ;accuracy = ', metric)

    def main(self):
        # features, labels = next(self.data_iter(features=self.x, labels=self.y, batch_size=10))
        # print('features: ', features)
        # print('labels: ', labels)
        self.train_model(epochs=600)


def run():
    """
    lr = LR()
    lr.main()
    """
    dnn = ClassDnn()
    dnn.main()


if __name__ == '__main__':
    run()
