# -*- coding: utf-8 -*-
# !/usr/bin/python3
"""
Author :      wu
Description :
"""

import os
from datetime import datetime

import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt

os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"

W0 = tf.constant([[2.0], [-3.0]])
B0 = tf.constant([[3.0]])


# 生成数据集
def generate_dataset_lr():

    global W0, B0
    n = 400
    x = tf.random.uniform([n, 2], minval=-10, maxval=10)
    y = x @ W0 + B0 + tf.random.normal([n, 1], stddev=2.0, mean=0)

    # 数据可视化
    ax1 = plt.subplot(121)
    ax1.scatter(x[:, 0], y[:, 0], c="b")
    plt.xlabel("x1")
    plt.ylabel("y", rotation=0)

    ax2 = plt.subplot(122)
    ax2.scatter(x[:, 1], y[:, 0], c="g")
    plt.xlabel("x2")
    plt.ylabel("y", rotation=0)

    # plt.show()

    return x, y


def generate_dataset_dnn():

    # 正负样本数量
    n_positive, n_negative = 2000, 2000

    # 正样本，小圆环分布
    r_p = tf.random.truncated_normal([n_positive, 1], 0.0, 1.0) + 5.0
    theta_p = tf.random.uniform([n_positive, 1], 0.0, 2 * np.pi)
    xp = tf.concat([r_p * tf.cos(theta_p), r_p * tf.sin(theta_p)], axis=1)
    yp = tf.ones_like(r_p)

    # 负样本，大圆环分布
    r_n = tf.random.truncated_normal([n_negative, 1], 0.0, 1.0) + 8.0
    theta_n = tf.random.uniform([n_negative, 1], 0.0, 2 * np.pi)
    xn = tf.concat([r_n * tf.cos(theta_n), r_n * tf.sin(theta_n)], axis=1)
    yn = tf.zeros_like(r_n)

    # 汇总
    x = tf.concat([xp, xn], axis=0)
    y = tf.concat([yp, yn], axis=0)

    # 数据可视化
    plt.scatter(xp[:, 0].numpy(), xp[:, 1].numpy(), c="r")
    plt.scatter(xn[:, 0].numpy(), xn[:, 1].numpy(), c="g")
    plt.legend(["positive", "negative"])
    # plt.show()

    return x, y


# 构建数据管道迭代器
def data_iter(features, labels, batch_size=8):

    num_examples = len(features)
    indices = list(range(num_examples))

    np.random.shuffle(indices)
    for i in range(0, num_examples, batch_size):
        indexs = indices[i:min(i + batch_size, num_examples)]
        yield tf.gather(features, indexs), tf.gather(labels, indexs)


# 定义lr模型
class LinearRegression:

    def __init__(self):
        self.w = tf.Variable(tf.random.normal(W0.shape))
        self.b = tf.Variable(tf.zeros_like(B0, dtype=tf.float32))

    def __call__(self, x):
        return x @ self.w + self.b

    def loss_func(self, y_true, y_pre):
        return tf.reduce_mean((y_true - y_pre) ** 2 / 2)

    @tf.function
    def train_step(self, features, labels):
        with tf.GradientTape() as tape:
            predictions = self.__call__(features)
            loss = self.loss_func(labels, predictions)

        dloss_dw, dloss_db = tape.gradient(loss, [self.w, self.b])
        self.w.assign(self.w - 0.001 * dloss_dw)
        self.b.assign(self.b - 0.001 * dloss_db)

        return loss


def lr_train():

    x, y = generate_dataset_lr()

    model = LinearRegression()
    epochs = 10
    for epoch in tf.range(1, epochs + 1):
        for feature, label in data_iter(x, y, 8):
            loss = model.train_step(feature, label)
        if epoch % 2 == 0:
            tf.print(tf.strings.join(["=" * 10, datetime.now().strftime("%H:%M:%S"), "=" * 10]))
            tf.print("epoch:{}, loss:{}".format(epoch, loss))
            tf.print("w = {}\n b = {}".format(model.w.numpy(), model.b.numpy()))

    tf.print("".join(["=" * 10 + "train done" + "=" * 10]))


class DNNModel(tf.Module):
    def __init__(self, name=None):
        
        super(DNNModel, self).__init__(name=name)
        self.w1 = tf.Variable(tf.random.truncated_normal([2, 4], dtype=tf.float32))
        self.b1 = tf.Variable(tf.zeros([1, 4], dtype=tf.float32))
        self.w2 = tf.Variable(tf.random.truncated_normal([4, 8], dtype=tf.float32))
        self.b2 = tf.Variable(tf.zeros([1, 8], dtype=tf.float32))
        self.w3 = tf.Variable(tf.random.truncated_normal([8, 1], dtype=tf.float32))
        self.b3 = tf.Variable(tf.zeros(1, 1), dtype=tf.float32)

    # 正向传播
    @tf.function(input_signature=[tf.TensorSpec(shape=[None, 2], dtype=tf.float32)])
    def __call__(self, x):
        x = tf.nn.relu(x @ self.w1 + self.b1)
        x = tf.nn.relu(x @ self.w2 + self.b2)
        y = tf.nn.sigmoid(x @ self.w3 + self.b3)

        return y

    @tf.function(input_signature=[tf.TensorSpec(shape=[None, 1], dtype=tf.float32),
                                  tf.TensorSpec(shape=[None, 1], dtype=tf.float32)])
    def loss_func(self, y_true, y_pred):

        eps = 1e-7
        y_pred = tf.clip_by_value(y_pred, eps, 1.0 - eps)  # 限制最大最小值范围
        bce = - y_true * tf.math.log(y_pred) - (1 - y_true) * tf.math.log(1 - y_pred)

        return tf.reduce_mean(bce)

    @tf.function(input_signature=[tf.TensorSpec(shape=[None, 1], dtype=tf.float32),
                                  tf.TensorSpec(shape=[None, 1], dtype=tf.float32)])
    def metric_func(self, y_ture, y_pred):

        y_pred = tf.where(y_pred > 0.5, tf.ones_like(y_pred, dtype=tf.float32),
                          tf.zeros_like(y_pred, dtype=tf.float32))
        acc = tf.reduce_mean(1 - tf.abs(y_ture - y_pred))

        return acc

    @tf.function
    def train_steps(self, featurtes, labels):
        # 求loss
        with tf.GradientTape() as tape:
            prediction = self.__call__(featurtes)
            loss = self.loss_func(labels, prediction)

        # 梯度下降
        grads = tape.gradient(loss, self.trainable_variables)
        for p, dloss_dp in zip(self.trainable_variables, grads):
            p.assign(p - 0.001 * dloss_dp)

        # 评价指标
        metric = self.metric_func(labels, prediction)

        return loss, metric


def dnn_train():

    x, y = generate_dataset_dnn()
    model = DNNModel()
    epochs = 100
    for epoch in range(1, epochs + 1):
        for feature, label in data_iter(x, y, 8):
            loss, metric = model.train_steps(feature, label)
        if epoch % 10 == 0:
            tf.print(tf.strings.join(["=" * 10, datetime.now().strftime("%H:%M:%S"), "=" * 10]))
            tf.print("epoch:{}, loss:{}, accuracy:{}".format(epoch, loss, metric))

    tf.print("".join(["=" * 10 + "train done" + "=" * 10]))


def main():

    lr_train()
    dnn_train()


if __name__ == "__main__":
    main()
