import tensorflow as tf
from tensorflow.keras import layers, losses, optimizers, metrics
import numpy as np
from utils.time_print import printbar


class LR(object):
    """
    """
    def __init__(self):
        # 样本集数量
        N = 400
        buffer_size = 100
        batch_size = 10
        self.w0 = tf.constant([[2.0], [-3.0]])
        self.b0 = tf.constant([[3.0]])
        self.x = tf.random.uniform([N, 2], minval=-10, maxval=10)
        self.y = tf.matmul(self.x, self.w0) + self.b0 + tf.random.normal([N, 1], mean=0.0, stddev=2.0)
        # 构建数据管道
        self.ds = tf.data.Dataset.from_tensor_slices((self.x, self.y)).shuffle(buffer_size=buffer_size).\
            batch(batch_size=batch_size).prefetch(tf.data.experimental.AUTOTUNE)
        self.model = self.tf_model()

    @staticmethod
    def tf_model(input_shape=(2, ), learning_rate=0.001):
        """

        :param input_shape:
        :param learning_rate:
        :return:
        """
        model = layers.Dense(units=1)
        model.build(input_shape=input_shape)
        model.loss_func = losses.mean_squared_error
        model.optimizer = optimizers.SGD(learning_rate=learning_rate)
        return model

    @tf.function
    def train_step(self, features, labels):
        """
        分布训练
        :param features:
        :param labels:
        :return:
        """
        with tf.GradientTape() as tape:
            predic = self.model(features)
            loss = self.model.loss_func(y_true=tf.reshape(labels, [-1]), y_pred=tf.reshape(predic, [-1]))

        # 反向传播求梯度
        grads = tape.gradient(loss, self.model.variables)
        self.model.optimizer.apply_gradients(zip(grads, self.model.variables))
        return loss

    def train_model(self, epochs):
        """
        训练模型
        :param epochs:
        :return:
        """
        for epoch in tf.range(1, epochs + 1):
            loss = tf.constant(0.0)
            for features, labels in self.ds:
                loss = self.train_step(features=features, labels=labels)

            if epoch % 50 == 0:
                printbar()
                tf.print('epoch = ', epoch, ';loss = ', loss)
                tf.print('w = ', self.model.variables[0], ';b = ', self.model.variables[1])

    def main(self):
        """
        features, labels = next(self.data_iter(features=self.x, labels=self.y, batch_size=10))
        print('features: ', features)
        print('labels: ', labels)
        loss = self.train_step(features=features, labels=labels)
        print('loss: ', loss)
        """
        self.train_model(epochs=200)


class DnnModel(tf.Module):
    def __init__(self, name=None):
        super(DnnModel, self).__init__(name=name)
        self.dense1 = layers.Dense(4, activation='relu')
        self.dense2 = layers.Dense(8, activation='relu')
        self.dense3 = layers.Dense(1, activation='sigmoid')

    @tf.function(input_signature=[tf.TensorSpec(shape=[None, 2], dtype=tf.float32)])
    def __call__(self, x):
        x = self.dense1(x)
        x = self.dense2(x)
        return self.dense3(x)


class ClassDnn(object):
    """
    DNN二分类
    """
    def __init__(self):
        buffer_size = 4000
        batch_size = 100
        self.n_positive = 2000
        self.n_negative = 2000
        self.r_p = 5.0
        self.r_n = 8.0
        self.load_dataset()
        self.ds = tf.data.Dataset.from_tensor_slices((self.x, self.y)).shuffle(buffer_size=buffer_size).\
            batch(batch_size=batch_size).prefetch(tf.data.experimental.AUTOTUNE)
        self.model = DnnModel()
        self.tf_model()

    def load_dataset(self):
        """
        生成数据集
        :return:
        """
        # 正样本：小圆环分布
        r_p = self.r_p + tf.random.normal(shape=[self.n_positive, 1], mean=0, stddev=1)
        theta_p = tf.random.uniform(shape=[self.n_positive, 1], minval=0, maxval=2 * np.pi)
        self.x_p = tf.concat([r_p * tf.cos(theta_p), r_p * tf.sin(theta_p)], axis=1)
        self.y_p = tf.ones_like(r_p)

        # 负样本：大圆环分布
        r_n = self.r_n + tf.random.normal(shape=[self.n_negative, 1], mean=0, stddev=1)
        theta_n = tf.random.uniform(shape=[self.n_positive, 1], minval=0, maxval=2 * np.pi)
        self.x_n = tf.concat([r_n * tf.cos(theta_n), r_n * tf.sin(theta_n)], axis=1)
        self.y_n = tf.ones_like(r_n)

        self.x = tf.concat([self.x_p, self.x_n], axis=0)
        self.y = tf.concat([self.y_p, self.y_n], axis=0)

    def tf_model(self, learning_rate=0.001):
        """
        构建模型
        :return:
        """
        self.model.loss_func = losses.binary_crossentropy
        self.model.metric_func = metrics.binary_accuracy
        self.model.optimizer = optimizers.Adam(learning_rate=learning_rate)

    @tf.function
    def train_step(self, features, labels):
        with tf.GradientTape() as tape:
            predict = self.model(features)
            loss = self.model.loss_func(tf.reshape(labels, [-1]), tf.reshape(predict, [-1]))
        grads = tape.gradient(loss, self.model.trainable_variables)
        self.model.optimizer.apply_gradients(zip(grads, self.model.trainable_variables))

        metric = self.model.metric_func(tf.reshape(labels, [-1]), tf.reshape(predict, [-1]))

        return loss, metric

    def train_model(self, epochs):
        """
        训练模型
        :param epochs:
        :return:
        """
        for epoch in tf.range(1, epochs + 1):
            loss, metric = tf.constant(0.0), tf.constant(0.0)
            for features, lables in self.ds:
                loss, metric = self.train_step(features=features, labels=lables)

            if epoch % 10 == 0:
                printbar()
                tf.print('epoch = ', epoch, ' ;loss = ', loss, ' ;accuracy = ', metric)

    def main(self):
        self.train_model(epochs=60)


def run():
    # lr = LR()
    # lr.main()
    dnn = ClassDnn()
    dnn.main()


if __name__ == '__main__':
    run()
