import tensorflow as tf
import numpy as np
import json
from tensorflow.keras import layers, Sequential, losses, optimizers
import os
import matplotlib.pyplot as plt
import glob
from tqdm import tqdm
from dataset_self import *
from specNet import MySpecNet


os.environ["CUDA_VISIBLE_DEVICES"] = '0'


# 定义损失函数
def my_loss(y_true, y_pred):
    loss1 = losses.mean_squared_error(y_true, y_pred)
    # loss1 = losses.mean_absolute_error(y_true, y_pred)
    return loss1


# 定义测试集误差函数
def my_error(y_true, y_pred):
    if len(y_true.shape.as_list()) > 1:
        err1 = tf.reduce_mean(tf.square((y_true - y_pred) / y_true))
    else:
        err1 = tf.reduce_mean(tf.square((y_true - y_pred) / y_true), axis=1)
    return err1


Set_GPU_Memory_Growth()

# 生成数据集
log_dir = './log/log_conv/'
sum_w = tf.summary.create_file_writer(log_dir)
path = '.\\result_duplicate\\all_para'
x_train, y_train, x_test, y_test, lam = res_to_data(get_R(path), 0.2)  # 生成数据集
norm = NormPara(x_train, y_train)  # 归一化初始
train_da = tf.data.Dataset.from_tensor_slices((x_train, y_train))
test_da = tf.data.Dataset.from_tensor_slices((x_test, y_test))
train_db = train_da.map(norm.preprocess).shuffle(10000).batch(3000)  # 训练数据集，归一化，打乱，批量
test_db = test_da.map(norm.process).shuffle(10000).batch(1000)  # 测试数据集，归一化，打乱，批量

# net.load_weights('./net_weights/weight5.ckpt')
# net.build(input_shape=(None, 6))
lr = tf.keras.optimizers.schedules.PiecewiseConstantDecay([1000, 2000], [5e-5, 1e-5, 1e-6])
opti = optimizers.Adam(learning_rate=lr)  # 优化器
epochs = 4000  # epochs
la = 0.5e-6  # l2正则化系数

net = MySpecNet()

loss_arr = []
loss_t_arr = []
ep_t = []
loss_t = tf.constant(0)

# 训练
with tqdm(total=epochs, desc='epochs') as t:
    for ep in range(epochs):
        for i, (x, y) in enumerate(train_db):
            with tf.GradientTape() as tape:
                y_hat = net(x)
                var = net.trainable_variables
                loss_mse = tf.reduce_mean(my_loss(y, y_hat))
                loss_l2 = tf.reduce_sum([tf.nn.l2_loss(v.numpy()) for v in var])

                # loss_l2 = 0
                loss = loss_mse + la * loss_l2
                # loss = loss_mse

            grad = tape.gradient(loss, var)
            opti.apply_gradients(zip(grad, var))
            # print('\r' + 'epochs: %d, loss: %.4f' % (ep, loss), end='')
            t.set_postfix({'train loss': loss.numpy(), 'test loss': loss_t.numpy()})
        loss_arr.append(loss)

        if (ep + 1) % 50 == 1:
            for j, (x_t, y_t) in enumerate(test_db):
                y_p = net(x_t)
                loss_t = tf.reduce_mean(my_loss(y_t, y_p)) + la * loss_l2
                loss_t_arr.append(loss_t)
                ep_t.append(ep)
        t.update(1)
        with sum_w.as_default():
            tf.summary.scalar('train loss', float(loss), step=ep)
            tf.summary.scalar('test loss', float(loss_t), step=ep)
        net.save_weights('./net_weights/weight10')

# 绘图
plt.figure()
plt.plot(range(1, epochs + 1), loss_arr)
plt.plot(ep_t, loss_t_arr)
plt.legend(['loss_train', 'loss_test'])
plt.xlabel('epochs')
plt.ylabel('loss')
plt.show()

# 预测
disp_num = 25
dis_n = np.array([])
y_ppp = np.zeros((disp_num, 201))
len_p = int(np.ceil(np.sqrt(disp_num)))
# disp_db = test_da.map(preprocess).shuffle(10000).batch(np.ceil(503 / disp_num))
loss_test = tf.constant([])
x_t, y_t = norm.process(x_test, y_test)
y_pall = net(x_t)
plt.figure(figsize=(13, 10))
for py in range(len_p):
    for px in range(len_p):
        n = px + py * len_p + 1
        plt.subplot(len_p, len_p, n)
        dis_temp = np.random.randint(0, x_test.shape[0])
        dis_n = np.append(dis_n, dis_temp)
        # err = my_error(y_test[dis_n, :], y_pd)
        plt.plot(lam, y_pall[dis_temp, :], label='predict')
        plt.plot(lam, y_test[dis_temp, :], label='true')
        plt.ylim(0, 0.8)
plt.legend()
plt.show()

# plt.figure()
# plt.hist(loss_test.numpy(), bins=40, density=True, edgecolor='black', color='red')
# plt.xlabel('MSE loss')
# plt.ylabel('Number')
# plt.show()