import tensorflow as tf
from tensorflow.keras import models, layers, losses, optimizers, metrics
import numpy as np
from utils.time_print import printbar


class LRSequential(object):
    def __init__(self):
        # 样本集数量
        self.N = 400
        self.batch_size = 10
        self.epochs = 200
        self.w0 = tf.constant([[2.0], [-3.0]])
        self.b0 = tf.constant([[3.0]])
        self.x, self.y = self.load_dataset()
        self.model = self.tf_model()

    def load_dataset(self):
        """
        生成数据集
        :return:
        """
        x = tf.random.uniform([self.N, 2], minval=-10, maxval=10)

        # y = tf.matmul(x, w0) + b0 + tf.random.normal([self.N, 1], mean=0.0, stddev=2.0)
        y = x @ self.w0 + self.b0 + tf.random.normal([self.N, 1], mean=0.0, stddev=2.0)
        return x, y

    @staticmethod
    def tf_model():
        """
        构建模型
        :return:
        """
        tf.keras.backend.clear_session()

        model = models.Sequential()
        model.add(layers.Dense(1, input_shape=(2, )))
        return model

    def train_model(self):
        self.model.compile(optimizer='adam', loss='mse', metrics=['mae'])
        self.model.fit(self.x, self.y, batch_size=self.batch_size, epochs=self.epochs)

        tf.print('w = ', self.model.layers[0].kernel)
        tf.print('b = ', self.model.layers[0].bias)

    def main(self):
        tf.print(self.model.summary())
        self.train_model()


tf.keras.backend.clear_session()
class DnnModel(models.Model):
    """
    DNN二分类模型定义
    """
    def __init__(self):
        super(DnnModel, self).__init__()

    def build(self, input_shape):
        """

        :param input_shape:
        :return:
        """
        self.dense1 = layers.Dense(4, activation='relu', name='dense1')
        self.dense2 = layers.Dense(8, activation='relu', name='dense2')
        self.dense3 = layers.Dense(1, activation='sigmoid', name='dense3')
        super(DnnModel, self).build(input_shape)

    # 正向传播
    @tf.function(input_signature=[tf.TensorSpec(shape=[None, 2], dtype=tf.float32)])
    def call(self, x):
        x = self.dense1(x)
        x = self.dense2(x)
        return self.dense3(x)


class ClassDnn(object):
    """
    DNN二分类
    """
    def __init__(self):
        self.buffer_size = 1000
        self.batch_size = 10
        self.optimizer = optimizers.Adam(learning_rate=0.01)
        self.loss_func = tf.keras.losses.BinaryCrossentropy()
        self.train_loss = tf.keras.metrics.Mean(name='train_loss')
        self.train_metric = tf.keras.metrics.BinaryAccuracy(name='train_accuracy')
        self.valid_loss = tf.keras.metrics.Mean(name='valid_loss')
        self.valid_metric = tf.keras.metrics.BinaryAccuracy(name='valid_accuracy')

        self.n_positive = 2000
        self.n_negative = 2000
        self.r_p = 5.0
        self.r_n = 8.0
        self.x, self.y = self.load_dataset()
        self.ds_train = tf.data.Dataset.from_tensor_slices((self.x[: self.n_positive * 3 // 4, :],
                                                      self.y[: self.n_negative * 3 // 4, :])).\
            shuffle(buffer_size=self.buffer_size).batch(batch_size=self.batch_size).\
            prefetch(tf.data.experimental.AUTOTUNE).cache()
        self.ds_valid = tf.data.Dataset.from_tensor_slices((self.x[self.n_positive * 3 // 4:, :],
                                                      self.y[self.n_negative * 3 // 4:, :])).\
            batch(batch_size=self.batch_size).prefetch(tf.data.experimental.AUTOTUNE).cache()
        self.model = DnnModel()
        self.model.build(input_shape=(None, 2))

    def load_dataset(self):
        """
        生成数据集
        :return:
        """
        # 正样本：小圆环分布
        r_p = self.r_p + tf.random.normal(shape=[self.n_positive, 1], mean=0, stddev=1)
        theta_p = tf.random.uniform(shape=[self.n_positive, 1], minval=0, maxval=2 * np.pi)
        x_p = tf.concat([r_p * tf.cos(theta_p), r_p * tf.sin(theta_p)], axis=1)
        y_p = tf.ones_like(r_p)

        # 负样本：大圆环分布
        r_n = self.r_n + tf.random.normal(shape=[self.n_negative, 1], mean=0, stddev=1)
        theta_n = tf.random.uniform(shape=[self.n_positive, 1], minval=0, maxval=2 * np.pi)
        x_n = tf.concat([r_n * tf.cos(theta_n), r_n * tf.sin(theta_n)], axis=1)
        y_n = tf.ones_like(r_n)

        x = tf.concat([x_p, x_n], axis=0)
        y = tf.concat([y_p, y_n], axis=0)

        # shuffle
        data = tf.concat([x, y], axis=1)
        data = tf.random.shuffle(data)
        return data[:, :2], data[:, 2:]

    @tf.function
    def tain_step(self, features, labels):
        """

        :param features:
        :param labels:
        :return:
        """
        # 正向传播损失
        with tf.GradientTape() as tape:
            predic = self.model(features)
            loss = self.loss_func(labels, predic)
        # 反向传播梯度
        grads = tape.gradient(loss, self.model.trainable_variables)
        self.optimizer.apply_gradients(zip(grads, self.model.trainable_variables))

        self.train_loss.update_state(loss)
        self.train_metric.update_state(y_true=labels, y_pred=predic)

    @tf.function
    def valid_step(self, features, labels):
        """

        :param features:
        :param labels:
        :return:
        """
        predict = self.model(features)
        batch_loss = self.loss_func(labels, predict)

        self.valid_loss.update_state(batch_loss)
        self.valid_metric.update_state(labels, predict)

    @tf.function
    def train_model(self, epochs):
        """

        :param epochs:
        :return:
        """
        for epoch in tf.range(1, epochs + 1):
            for features, labels in self.ds_train:
                self.tain_step(features=features, labels=labels)

            for features, labels in self.ds_valid:
                self.valid_step(features=features, labels=labels)

            info = 'Epoch = {}, Loss: {}, Accuracy: {}, Valid Loss: {}, Valid Accuracy: {}'

            if epoch % 100 == 0:
                printbar()
                tf.print(tf.strings.format(info, (epoch, self.train_loss.result(), self.train_metric.result(),
                                                  self.valid_loss.result(), self.valid_metric.result())))

    def main(self):
        # features, labels = next(self.data_iter(features=self.x, labels=self.y, batch_size=10))
        # print('features: ', features)
        # print('labels: ', labels)
        # self.train_model(epochs=600)
        # tf.print(self.model.summary())
        self.train_model(epochs=1000)


def run():
    # lr = LRSequential()
    # lr.main()
    dnn = ClassDnn()
    dnn.main()


if __name__ == '__main__':
    run()
