# -*- coding: utf-8 -*-
# !/usr/bin/python3
"""
Author :      wu
Description :
"""

from datetime import datetime
import os

import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.keras import layers, losses, metrics, optimizers

os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"

W0 = tf.constant([[2.0], [-3.0]])
B0 = tf.constant([[3.0]])


# 生成数据集
def generate_dataset_lr():

    global W0, B0
    n = 400
    x = tf.random.uniform([n, 2], minval=-10, maxval=10)
    y = x @ W0 + B0 + tf.random.normal([n, 1], stddev=2.0, mean=0)

    # 数据可视化
    ax1 = plt.subplot(121)
    ax1.scatter(x[:, 0], y[:, 0], c="b")
    plt.xlabel("x1")
    plt.ylabel("y", rotation=0)

    ax2 = plt.subplot(122)
    ax2.scatter(x[:, 1], y[:, 0], c="g")
    plt.xlabel("x2")
    plt.ylabel("y", rotation=0)

    plt.show()
    plt.close()

    return x, y


def generate_dataset_dnn():

    # 正负样本数量
    n_positive, n_negative = 2000, 2000

    # 正样本，小圆环分布
    r_p = tf.random.truncated_normal([n_positive, 1], 0.0, 1.0) + 5.0
    theta_p = tf.random.uniform([n_positive, 1], 0.0, 2 * np.pi)
    xp = tf.concat([r_p * tf.cos(theta_p), r_p * tf.sin(theta_p)], axis=1)
    yp = tf.ones_like(r_p)

    # 负样本，大圆环分布
    r_n = tf.random.truncated_normal([n_negative, 1], 0.0, 1.0) + 8.0
    theta_n = tf.random.uniform([n_negative, 1], 0.0, 2 * np.pi)
    xn = tf.concat([r_n * tf.cos(theta_n), r_n * tf.sin(theta_n)], axis=1)
    yn = tf.zeros_like(r_n)

    # 汇总
    x = tf.concat([xp, xn], axis=0)
    y = tf.concat([yp, yn], axis=0)

    # 数据可视化
    plt.scatter(xp[:, 0].numpy(), xp[:, 1].numpy(), c="r")
    plt.scatter(xn[:, 0].numpy(), xn[:, 1].numpy(), c="g")
    plt.legend(["positive", "negative"])
    # plt.show()
    plt.close()

    return x, y


def build_lr_model():

    model = layers.Dense(units=1)
    model.build(input_shape=(2, ))
    model.loss_func = losses.mean_squared_error
    model.optimizer = optimizers.SGD(learning_rate=0.01)

    return model


@tf.function
def train_steps(model, feature, label):

    with tf.GradientTape() as tape:
        predict = model(feature)
        loss = model.loss_func(tf.reshape(label, [-1]), tf.reshape(predict, [-1]))

    grads = tape.gradient(loss, model.variables)
    model.optimizer.apply_gradients(zip(grads, model.variables))

    return loss


def train_lr():

    x, y = generate_dataset_lr()
    ds = tf.data.Dataset.from_tensor_slices((x, y)).shuffle(buffer_size=10)\
        .batch(8).prefetch(tf.data.experimental.AUTOTUNE)
    model = build_lr_model()
    epochs = 10
    for epoch in tf.range(1, epochs + 1):
        for feature, label in ds:
            loss = train_steps(model, feature, label)
        if epoch % 2 == 0:
            tf.print("epoch = {}, loss = {}".format(epoch, loss))

    w, b = model.variables
    ax1 = plt.subplot(121)
    ax1.scatter(x[:, 0], y[:, 0], c="b", label="samples")
    ax1.plot(x[:, 0], w[0] * x[:, 0] + b[0], "-r", label="model", linewidth=5)
    plt.xlabel("x1")
    plt.ylabel("y", rotation=0)
    plt.legend()

    ax2 = plt.subplot(122)
    ax2.scatter(x[:, 1], y[:, 0], c="g", label="smaples")
    ax2.plot(x[:, 1], w[1] * x[:, 1] + b[0], "-r", label="model", linewidth=5)
    plt.xlabel("x2")
    plt.ylabel("y", rotation=0)
    plt.legend()

    plt.show()


class DNNModel(tf.Module):
    def __init__(self, name=None):
        super(DNNModel, self).__init__(name=name)
        self.dense_1 = layers.Dense(4, activation="relu")
        self.dense_2 = layers.Dense(8, activation="relu")
        self.dense_3 = layers.Dense(1, activation="sigmoid")
        self.loss_func = losses.binary_crossentropy
        self.optimizer = optimizers.Adam(learning_rate=0.01)
        self.metric_func = metrics.binary_accuracy

    # 正向传播
    @tf.function(input_signature=[tf.TensorSpec(shape=[None, 2], dtype=tf.float32)])
    def __call__(self, x):
        x = self.dense_1(x)
        x = self.dense_2(x)
        y = self.dense_3(x)

        return y

    @tf.function
    def train_steps(self, feature, label):

        with tf.GradientTape() as tape:
            prediction = self.__call__(feature)
            loss = self.loss_func(tf.reshape(label, [-1]), tf.reshape(prediction, [-1]))

        grads = tape.gradient(loss, self.trainable_variables)
        self.optimizer.apply_gradients(zip(grads, self.trainable_variables))

        metric = self.metric_func(tf.reshape(label, [-1]), tf.reshape(prediction, [-1]))

        return loss, metric


def train_dnn():

    x, y = generate_dataset_dnn()
    ds = tf.data.Dataset.from_tensor_slices((x, y)).shuffle(buffer_size=4000)\
        .batch(batch_size=32).prefetch(tf.data.experimental.AUTOTUNE)

    model = DNNModel()
    epochs = 18
    for epoch in tf.range(1, epochs + 1):
        for feature, label in ds:
            loss, acc = model.train_steps(feature, label)
        if epoch % 2 == 0:
            tf.print("epoch:{}, loss:{}, accuracy:{}".format(epoch, loss, acc))

    tf.print("train done")

    xp_pre = tf.boolean_mask(x, tf.squeeze(model(x) >= 0.5), axis=0)
    xn_pre = tf.boolean_mask(x, tf.squeeze(model(x) < 0.5), axis=0)
    plt.scatter(xp_pre[:, 0].numpy(), xp_pre[:, 1].numpy(), label="positive", c="r")
    plt.scatter(xn_pre[:, 0].numpy(), xn_pre[:, 1].numpy(), label="negative", c="g")
    plt.legend()
    plt.title("y_pred")
    plt.show()


def main():

    # train_lr()
    train_dnn()


if __name__ == "__main__":
    main()
