import numpy as np
from mpl_toolkits import mplot3d
from matplotlib import pyplot as plt
import tensorflow as tf
from tensorflow.keras import layers, losses, metrics, optimizers, models
from python_ai.common.xcommon import *
import os
import pickle

np.random.seed(777)
tf.random.set_seed(777)
filename = os.path.basename(__file__)

is_check_vars = False
is_use_cache = True
ver = 'v1.6'
alpha = 0.01
batch_size = 128
n_epoch = 3
L1 = 100
L2 = 200
#正负样本数量
n_positive,n_negative = 2000,2000

if is_check_vars:
    is_use_cache = False

#生成正样本, 小圆环分布
r_p = 5.0 + tf.random.truncated_normal([n_positive,1],0.0,1.0)
theta_p = tf.random.uniform([n_positive,1],0.0,2*np.pi)
Xp = tf.concat([r_p*tf.cos(theta_p),r_p*tf.sin(theta_p)],axis = 1)
Yp = tf.ones_like(r_p)

#生成负样本, 大圆环分布
r_n = 8.0 + tf.random.truncated_normal([n_negative,1],0.0,1.0)
theta_n = tf.random.uniform([n_negative,1],0.0,2*np.pi)
Xn = tf.concat([r_n*tf.cos(theta_n),r_n*tf.sin(theta_n)],axis = 1)
Yn = tf.zeros_like(r_n)

#汇总样本
X = tf.concat([Xp,Xn],axis = 0)
Y = tf.concat([Yp,Yn],axis = 0)
m, n = tf.shape(X)
m = m.numpy()
n = n.numpy()
total_batch = int(np.ceil(m / batch_size))
group = int(np.ceil(total_batch / 10))

#可视化
plt.figure(figsize = (6,6))
plt.scatter(Xp[:,0].numpy(),Xp[:,1].numpy(),c = "r")
plt.scatter(Xn[:,0].numpy(),Xn[:,1].numpy(),c = "g")
plt.legend(["positive","negative"]);
plt.show()

# batch
ds = tf.data.Dataset.from_tensor_slices((X, Y))\
    .shuffle(buffer_size=m)\
    .batch(batch_size)\
    .prefetch(buffer_size=tf.data.AUTOTUNE)  # ATTENTION

# model
class DNNModel(tf.Module):
    def __init__(self, name=None):
        super(DNNModel, self).__init__(name=name)
        self.dense1 = layers.Dense(units=L1, activation=tf.nn.relu)
        self.dense2 = layers.Dense(units=L2, activation=tf.nn.relu)
        self.dense3 = layers.Dense(units=1, activation=tf.nn.sigmoid)

    @tf.function(input_signature=[tf.TensorSpec(shape=[None, 2], dtype=tf.float32)])
    def __call__(self, x):
        x = self.dense1(x)
        x = self.dense2(x)
        x = self.dense3(x)
        return x


savepath = './_save/' + filename + '_' + ver
if is_use_cache and os.path.exists(savepath):

    # with open(savepath, 'br') as f:
    #     model = pickle.load(f)  # AttributeError: Can't pickle local object 'make_gradient_clipnorm_fn.<locals>.<lambda>'

    # model = models.load_model(savepath)  # AttributeError: 'DNNModel' object has no attribute 'outputs'

    model = tf.saved_model.load(savepath)

    print('LOADED')
else:
    model = DNNModel()
    model.loss_func = losses.binary_crossentropy
    model.metric_func = metrics.binary_accuracy
    model.optimizer = optimizers.Adam(learning_rate=alpha)


    @tf.function
    def train_step(model, x, y):
        with tf.GradientTape() as tape:
            h = model(x)
            yr = tf.reshape(y, [-1])
            hr = tf.reshape(h, [-1])
            loss = model.loss_func(yr, hr)

        if not hasattr(model, 't_vars'):  # ATTENTION api: hasattr
            """Note: this method uses reflection to find variables on the current instance and submodules. For performance 
            reasons you may wish to cache the result of calling this method if you don't expect the return value to change. """
            model.t_vars = model.trainable_variables
            print(model.t_vars)

        grads = tape.gradient(loss, model.t_vars)

        if is_check_vars:
            print(grads)

        model.optimizer.apply_gradients(zip(grads, model.t_vars))
        acc = model.metric_func(yr, hr)
        return loss, acc


    def train_model(n_epoch):
        writer = tf.summary.create_file_writer('./_log/' + filename + '_' + ver)

        g_step = -1
        for epoch in range(n_epoch):
            gene = ds.as_numpy_iterator()
            i = -1
            for bx, by in gene:
                g_step += 1
                i += 1
                if 0 == g_step:
                    tf.summary.trace_on()
                loss, acc = train_step(model, bx, by)

                if is_check_vars:
                    sys.exit(0)

                with writer.as_default():
                    if 0 == g_step:
                        tf.summary.trace_export('autograph_logistic', 0)
                    tf.summary.scalar('loss', loss, g_step)
                    tf.summary.scalar('acc_mid_logistic', acc, g_step)
                    writer.flush()
                if i % group == 0:
                    print(f'g_step#{g_step + 1}: epoch#{epoch + 1}: batch#{i + 1}: loss = {loss}, acc = {acc} [my mid api: logistic]')
            if i % group != 0:
                print(
                    f'g_step#{g_step + 1}: epoch#{epoch + 1}: batch#{i + 1}: loss = {loss}, acc = {acc} [my mid api: logistic]')


    train_model(n_epoch)

    # with open(savepath, 'bw') as f:
    #     pickle.dump(model, f)    # AttributeError: Can't pickle local object 'make_gradient_clipnorm_fn.<locals>.<lambda>'

    # models.save_model(model, savepath)  # AttributeError: 'DNNModel' object has no attribute 'outputs'

    tf.saved_model.save(model, savepath)

    print('SAVED')

# 结果可视化
fig, (ax1,ax2) = plt.subplots(nrows=1,ncols=2,figsize = (12,5))
ax1.scatter(Xp[:,0].numpy(),Xp[:,1].numpy(),c = "r")
ax1.scatter(Xn[:,0].numpy(),Xn[:,1].numpy(),c = "g")
ax1.legend(["positive","negative"]);
ax1.set_title("y_true");

Xp_pred = tf.boolean_mask(X,tf.squeeze(model(X)>=0.5),axis = 0)
Xn_pred = tf.boolean_mask(X,tf.squeeze(model(X)<0.5),axis = 0)

ax2.scatter(Xp_pred[:,0].numpy(),Xp_pred[:,1].numpy(),c = "r")
ax2.scatter(Xn_pred[:,0].numpy(),Xn_pred[:,1].numpy(),c = "g")
ax2.legend(["positive","negative"])
ax2.set_title("y_pred")
plt.show()