import numpy as np
from mpl_toolkits import mplot3d
from matplotlib import pyplot as plt
import tensorflow as tf
import tensorflow_addons as tfa
from tensorflow.keras import layers, losses, metrics, optimizers
from python_ai.common.xcommon import *
import os

np.random.seed(777)
tf.random.set_seed(777)

#样本数量
n = 400
batch_size = 64
n_epoch = 10
alpha = 0.01

# 生成测试用数据集
X = tf.random.uniform([n,2],minval=-10,maxval=10)
w0 = tf.constant([[2.0],[-3.0]])
b0 = tf.constant([[3.0]])
Y = X@w0 + b0 + tf.random.normal([n,1],mean = 0.0,stddev= 2.0)  # @表示矩阵乘法,增加正态扰动

# scale
mu = tf.reduce_mean(X, axis=0)
sigma = tf.math.reduce_std(X, axis=0)
X -= mu
X /= sigma
mu = tf.reduce_mean(Y, axis=0)
sigma = tf.math.reduce_std(Y, axis=0)
Y -= mu
Y /= sigma

# visualization
fig = plt.figure(figsize=[12, 12])
spr = 1
spc = 1
spn = 0

spn += 1
ax = plt.subplot(spr, spc, spn, projection='3d')
ax.scatter3D(X[:, 0], X[:, 1], Y[:, 0])
ax.set_xlabel('X[:, 0]')
ax.set_ylabel('X[:, 1]')
ax.set_zlabel('Y[:, 0]')
ax.set_title('again')


# iterator
data_iter = tf.data.Dataset.from_tensor_slices((X, Y))\
    .shuffle(buffer_size=n, seed=777)\
    .batch(batch_size=batch_size)\
    .prefetch(buffer_size=tf.data.AUTOTUNE)

# test iterator
print('Test iterator')
total_batch = int(np.ceil(n / batch_size))
xgene = data_iter.as_numpy_iterator()
for i in range(total_batch):
    sep(i)
    bx, by = next(xgene)
    print(bx.shape)
    print(by.shape)

# model
# def r2_fn(y_true, y_pred):
#     metric = tfa.metrics.r_square.RSquare()
#     metric.update_state(y_true, y_pred)
#     result = metric.result()
#     return result
def r2_fn(y_true, y_pred):
    y_m = tf.reduce_mean(y_true)
    r2 = 1 - tf.reduce_mean((y_true - y_pred) ** 2) / tf.reduce_mean((y_true - y_m) ** 2)
    return r2


model = layers.Dense(units=1)
model.build(input_shape=2)  # ATTENTION
model.loss_func = losses.mean_squared_error
model.r2 = r2_fn
model.optimizer = optimizers.SGD(learning_rate=alpha)

# train
# @tf.function
def train_step(model, x, y):
    with tf.GradientTape() as tape:
        pred = model(x)
        loss = model.loss_func(tf.reshape(y, [-1]), tf.reshape(pred, [-1]))
        r2 = model.r2(tf.reshape(y, [-1]), tf.reshape(pred, [-1]))
    grads = tape.gradient(loss, model.variables)
    model.optimizer.apply_gradients(zip(grads, model.variables))
    return loss, r2


def train_model(model, n_epoch):
    writer = tf.summary.create_file_writer("./_log/" + os.path.basename(__file__))
    g_step = -1
    for epoch in range(n_epoch):
        i = -1
        xgene = data_iter.as_numpy_iterator()
        for bx, by in xgene:
            i += 1
            g_step += 1
            loss, r2 = train_step(model, bx, by)
            with writer.as_default():
                tf.summary.scalar('cost', loss, step=g_step)
                tf.summary.scalar('r2 impl', r2, step=g_step)
                writer.flush()
            print(f'#{g_step+1}: epoch#{epoch + 1}: batch#{i + 1}: cost again = {loss}, r2 impl = {r2}')


train_model(model, n_epoch)


# visualization
xx = tf.constant(np.array([
    [tf.reduce_min(X[:, 0]), tf.reduce_min(X[:, 1])],
    [tf.reduce_max(X[:, 0]), tf.reduce_max(X[:, 1])],
]))
yy = model(xx)
ax.plot3D(xx[:, 0], xx[:, 1], yy[:, 0], color='r')

# finally show all plotting
plt.show()
