# -*- coding: utf-8 -*-
# !/usr/bin/python3
"""
Author :      wu
Description :
"""

import os

import tensorflow as tf
from tensorflow.keras import layers, models, metrics, losses, optimizers
import matplotlib.pyplot as plt
import numpy as np
from sklearn.model_selection import train_test_split

os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"

W0 = tf.constant([[2.0], [-3.0]])
B0 = tf.constant([[3.0]])


# 生成数据集
def generate_dataset_lr():

    global W0, B0
    n = 400
    x = tf.random.uniform([n, 2], minval=-10, maxval=10)
    y = x @ W0 + B0 + tf.random.normal([n, 1], stddev=2.0, mean=0)

    # 数据可视化
    ax1 = plt.subplot(121)
    ax1.scatter(x[:, 0], y[:, 0], c="b")
    plt.xlabel("x1")
    plt.ylabel("y", rotation=0)

    ax2 = plt.subplot(122)
    ax2.scatter(x[:, 1], y[:, 0], c="g")
    plt.xlabel("x2")
    plt.ylabel("y", rotation=0)

    plt.show()
    plt.close()

    return x, y


def build_model_lr():
    tf.keras.backend.clear_session()

    model = models.Sequential()
    model.add(layers.Dense(1, input_shape=(None, 2)))
    model.summary()

    model.compile(optimizer="adam", loss="mse", metrics=["mae"])

    return model


def train_lr():

    x, y = generate_dataset_lr()
    model = build_model_lr()
    model.fit(x, y, epochs=200, batch_size=32)

    w, b = model.variables
    ax1 = plt.subplot(121)
    ax1.scatter(x[:, 0], y[:, 0], c="b", label="samples")
    ax1.plot(x[:, 0], w[0] * x[:, 0] + b[0], "-r", linewidth=5, label="model")
    ax1.legend()
    plt.xlabel("x1")
    plt.ylabel("y", rotation=0)

    ax2 = plt.subplot(122)
    ax2.scatter(x[:, 1], y[:, 0], c="g", label="samples")
    ax2.plot(x[:, 1], w[1] * x[:, 1] + b[0], "-r", linewidth=5, label="model")
    ax2.legend()
    plt.xlabel("x2")
    plt.ylabel("y", rotation=0)

    plt.show()


def generate_dataset_dnn():

    # 正负样本数量
    n_positive, n_negative = 2000, 2000

    # 正样本，小圆环分布
    r_p = tf.random.truncated_normal([n_positive, 1], 0.0, 1.0) + 5.0
    theta_p = tf.random.uniform([n_positive, 1], 0.0, 2 * np.pi)
    xp = tf.concat([r_p * tf.cos(theta_p), r_p * tf.sin(theta_p)], axis=1)
    yp = tf.ones_like(r_p)

    # 负样本，大圆环分布
    r_n = tf.random.truncated_normal([n_negative, 1], 0.0, 1.0) + 8.0
    theta_n = tf.random.uniform([n_negative, 1], 0.0, 2 * np.pi)
    xn = tf.concat([r_n * tf.cos(theta_n), r_n * tf.sin(theta_n)], axis=1)
    yn = tf.zeros_like(r_n)

    # 汇总
    x = tf.concat([xp, xn], axis=0)
    y = tf.concat([yp, yn], axis=0)

    # 数据可视化
    plt.scatter(xp[:, 0].numpy(), xp[:, 1].numpy(), c="r")
    plt.scatter(xn[:, 0].numpy(), xn[:, 1].numpy(), c="g")
    plt.legend(["positive", "negative"])
    plt.show()
    plt.close()

    return x, y


class DNNNModel(models.Model):

    def __init__(self):
        super(DNNNModel, self).__init__()
        self.loss_func = losses.BinaryCrossentropy()
        self.optimizer = optimizers.Adam(learning_rate=0.01)
        self.train_loss = metrics.Mean(name="train loss")
        self.train_metric = metrics.BinaryAccuracy(name="train accuracy")
        self.valid_loss = metrics.Mean(name="valid loss")
        self.valid_metric = metrics.BinaryAccuracy(name="valid accuracy")

    def build(self, input_shape):

        self.dense_1 = layers.Dense(4, activation="relu", name="dense_1")
        self.dense_2 = layers.Dense(8, activation="relu", name="dense_2")
        self.dense_3 = layers.Dense(1, activation="sigmoid", name="dense_3")
        
        super(DNNNModel, self).build(input_shape)

    @tf.function(input_signature=[tf.TensorSpec(shape=[None, 2], dtype=tf.float32)])
    def call(self, x):

        x = self.dense_1(x)
        x = self.dense_2(x)
        y = self.dense_3(x)

        return y

    @tf.function
    def train_step(self, feature, label):

        with tf.GradientTape() as tape:
            prediction = self.call(feature)
            loss = self.loss_func(label, prediction)

        grads = tape.gradient(loss, self.trainable_variables)
        self.optimizer.apply_gradients(zip(grads, self.trainable_variables))

        self.train_loss.update_state(loss)
        self.train_metric.update_state(label, prediction)

    @tf.function
    def valid_step(self, feature, label):

        prediction = self.call(feature)
        batch_loss = self.loss_func(label, prediction)

        self.valid_loss.update_state(batch_loss)
        self.valid_metric.update_state(label, prediction)


def train_dnn():

    x, y = generate_dataset_dnn()
    train_x, valid_x, train_y, valid_y = train_test_split(x.numpy(), y.numpy(),
                                                          test_size=0.3, shuffle=True)

    ds_train = tf.data.Dataset.from_tensor_slices((train_x, train_y)).shuffle(1000)\
        .batch(16).prefetch(tf.data.experimental.AUTOTUNE).cache()

    ds_valid = tf.data.Dataset.from_tensor_slices((valid_x, valid_y)).shuffle(1000)\
        .batch(16).prefetch(tf.data.experimental.AUTOTUNE).cache()

    tf.keras.backend.clear_session()
    model = DNNNModel()
    model.build(input_shape=(None, 2))
    model.summary()

    epochs = 100
    for epoch in tf.range(1, epochs + 1):
        for feature, label in ds_train:
            model.train_step(feature, label)

        for feature, label in ds_valid:
            model.valid_step(feature, label)

        logs = "epoch={}, loss:{}, accuracy:{}, valid_loss:{}, valid_accuracy:{}"
        if epoch % 10 == 0:
            tf.print(
                tf.strings.format(logs,
                                  (epoch, model.train_loss.result(), model.train_metric.result(),
                                   model.valid_loss.result(), model.valid_metric.result()
                                   )))
        model.train_loss.reset_state()
        model.train_metric.reset_state()
        model.valid_loss.reset_state()
        model.valid_metric.reset_state()

    xp = tf.boolean_mask(x, tf.squeeze(model(x) >= 0.5), axis=0)
    xn = tf.boolean_mask(x, tf.squeeze(model(x) < 0.5), axis=0)

    plt.scatter(xp[:, 0].numpy(), xp[:, 1].numpy(), label="positive", c="r")
    plt.scatter(xn[:, 0].numpy(), xn[:, 1].numpy(), label="negative", c="g")
    plt.legend()
    plt.title("y_pre")
    plt.show()
    plt.close()


def main():

    # train_lr()
    train_dnn()


if __name__ == "__main__":
    main()
