import numpy as np
from mpl_toolkits import mplot3d
from matplotlib import pyplot as plt
import tensorflow as tf
from python_ai.common.xcommon import *
import os

np.random.seed(777)
tf.random.set_seed(777)

#样本数量
n = 400
batch_size = 64
n_epoch = 10
alpha = 0.01

# 生成测试用数据集
X = tf.random.uniform([n,2],minval=-10,maxval=10)
w0 = tf.constant([[2.0],[-3.0]])
b0 = tf.constant([[3.0]])
Y = X@w0 + b0 + tf.random.normal([n,1],mean = 0.0,stddev= 2.0)  # @表示矩阵乘法,增加正态扰动

# scale
mu = tf.reduce_mean(X, axis=0)
sigma = tf.math.reduce_std(X, axis=0)
X -= mu
X /= sigma
mu = tf.reduce_mean(Y, axis=0)
sigma = tf.math.reduce_std(Y, axis=0)
Y -= mu
Y /= sigma

# visualization
fig = plt.figure(figsize=[12, 12])
spr = 1
spc = 1
spn = 0

spn += 1
ax = plt.subplot(spr, spc, spn, projection='3d')
ax.scatter3D(X[:, 0], X[:, 1], Y[:, 0])
ax.set_xlabel('X[:, 0]')
ax.set_ylabel('X[:, 1]')
ax.set_zlabel('Y[:, 0]')


# iterator
def data_iter(x, y, batch_size=8):
    xlen = len(x)
    a = np.random.permutation(xlen).tolist()
    total_iters = int(np.ceil(xlen / batch_size))
    for i in range(total_iters):
        idx = a[i*batch_size:(i+1)*batch_size]
        # yield x[idx], y[idx]  # OverflowError: int too big to convert
        yield tf.gather(x, idx), tf.gather(y, idx)


# test iterator
total_iters = int(np.ceil(n / batch_size))
xgene = data_iter(X, Y, batch_size=batch_size)
for i in range(total_iters):
    sep(i)
    bx, by = next(xgene)
    print(tf.shape(bx))
    print(tf.shape(by))


# model
w = tf.Variable(tf.random.normal(w0.shape))
b = tf.Variable(tf.zeros_like(b0, dtype=tf.float32))


class LinRegre:

    def __call__(self, x):
        return x @ w + b

    @staticmethod
    def loss_func(y_true, y_pred):
        return tf.reduce_mean((y_true - y_pred) ** 2) / 2.

    @staticmethod
    def r2(y_true, y_pred):
        y_m = tf.reduce_mean(y_true)
        return 1 - tf.reduce_mean((y_true - y_pred) ** 2) / tf.reduce_mean((y_true - y_m) ** 2)


model = LinRegre()


# train

@tf.function
def train_step(model, x, y):
    with tf.GradientTape() as tape:
        tape.watch([w, b])
        pred = model(x)
        loss = model.loss_func(y, pred)
        r2 = model.r2(y, pred)
    dloss_dw, dloss_db = tape.gradient(loss, [w, b])
    w.assign(w - alpha * dloss_dw)
    b.assign(b - alpha * dloss_db)
    return loss, r2


def train_model(model, n_epoch):
    writer = tf.summary.create_file_writer("./_log/" + os.path.basename(__file__))
    g_step = -1
    for epoch in range(n_epoch):
        i = -1
        for bx, by in data_iter(X, Y, batch_size):
            i += 1
            g_step += 1
            loss, r2 = train_step(model, bx, by)
            with writer.as_default():
                tf.summary.scalar('cost', loss, step=g_step)
                tf.summary.scalar('r2', r2, step=g_step)
                writer.flush()
            print(f'#{g_step+1}: epoch#{epoch + 1}: batch#{i + 1}: cost = {loss}, r2 = {r2}')


train_model(model, n_epoch)


# visualization
xx = tf.constant(np.array([
    [tf.reduce_min(X[:, 0]), tf.reduce_min(X[:, 1])],
    [tf.reduce_max(X[:, 0]), tf.reduce_max(X[:, 1])],
]))
yy = model(xx)
ax.plot3D(xx[:, 0], xx[:, 1], yy[:, 0], color='r')

# finally show all plotting
plt.show()
