# %%
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from specNet import MySpecNet, SpecVAE
from dataset_self import *
from tensorflow.keras import optimizers, losses
import colour
from colour.utilities import first_item
from sampleFDTD import ModPack, sim_result
import multiprocessing as mp

Set_GPU_Memory_Growth()

# %% 颜色目标点->结构->预测反射谱->三个颜色点位置对比
preNN = MySpecNet()  # 预测网络，加载训练好的参数
preNN.load_weights('./net_weights/weight_v2')
VAE = SpecVAE()  # 逆向检索网络，加载训练好的参数
VAE.load_weights('./net_weights/VAE_weights_v2')
retrNN = VAE.decoder  # 只选用解码器网络


# %%
# 将xyz数据转为xyY数据在CIE1931中绘图
class XYZinCIE:
    def __init__(self):
        self.fig, self.ax = colour.plotting.plot_chromaticity_diagram_CIE1931(show_spectral_locus=True,
                                                                              standalone=False)
        plt.xlim([-0.1, 0.9])
        plt.ylim([-0.1, 0.9])

    def plotColorInCIE(self, xyz, marker=None, color=None, label=None):
        xyY = xyz / np.reshape(np.sum(xyz, axis=1), (-1, 1))
        cc = self.ax.scatter(xyY[:, 0], xyY[:, 1], marker=marker, color=color, label=label)
        return cc

    def show(self):
        plt.legend()
        plt.show()


# %%
# 进行检索和预测, 最远点
# p = tf.constant([[0.12908332826840088, 0.04971056950100272],
#                  [0.08328985595332158, 0.16082465203914176],
#                  [0.04810405397192, 0.282488056889321],
#                  [0.023522032456087028, 0.41117656354464227],
#                  [0.007775846350209642, 0.5451319516197652],
#                  [0.007960615085394723, 0.712532425697441],
#                  [0.034551753479608566, 0.804103810855163],
#                  [0.06990676472375856, 0.8357439980550658],
#                  [0.1581737069227496, 0.8055936303409712],
#                  [0.23228930894061864, 0.754329058530359],
#                  [0.29933313073603596, 0.6960316051783868],
#                  [0.38048550416337434, 0.6200819303470491],
#                  [0.4457536011669604, 0.5529778155959398],
#                  [0.5127876982921048, 0.48586981097672144],
#                  [0.5815858506047528, 0.41699580623594457],
#                  [0.6398016167264328, 0.3604799124779674],
#                  [0.7332945967300795, 0.26511979578192413],
#                  [0.6484838023460766, 0.22654008387528102],
#                  [0.5530692274965051, 0.18093527016349598],
#                  [0.48062821369962916, 0.14937677019388562],
#                  [0.39758341943718456, 0.11079316841913323],
#                  [0.3057047346988391, 0.06870479547802821],
#                  [0.22089394031483614, 0.030125083571385103],
#                  [0.17141676290038285, 0.003802346076703378]], dtype=tf.float64)

# p = tf.constant([[0.33, 0.33],
#                  [0.33, 0.33],
#                  [0.33, 0.33]], dtype=tf.float64)

p = tf.constant([[0.2095112600073421, 0.4821320470773577],
                 [0.2289781996144149, 0.4450847630694126],
                 [0.259054806076976, 0.39567930798909867],
                 [0.2873712386615871, 0.34099138633142323],
                 [0.3174420102936136, 0.2968722877154938],
                 [0.35637977939478216, 0.2192534820566805],
                 [0.4162256912450798, 0.3988573456869663],
                 [0.365042557795212, 0.37077625126725233],
                 [0.24324630521512292, 0.31818108882800145],
                 [0.17441280939796705, 0.28132829917121094]], dtype=tf.float64)
# point = p
point = tf.repeat(p, 5, axis=0)
z = np.reshape(1 - np.sum(point, axis=1), (-1, 1))
point = tf.concat([point, z], axis=1)

z_VAE = tf.Variable(tf.random.normal((z.shape[0], 4), dtype=tf.float64))

steps = 500
# lr = tf.keras.optimizers.schedules.PiecewiseConstantDecay([500], [1e-2, 1e-3])
lr = 1e-2
opti = optimizers.Adam(learning_rate=lr)
# for step in range(steps):
#     with tf.GradientTape() as tape:
#         tape.watch(z_VAE)
#         z_in = tf.concat([z_VAE, point], axis=1)
#         pre_struc = retrNN(z_in)
#         spec_pre = preNN(pre_struc)  # 逆向检索得到的结构（归一化）
#         xyz_pred = my_spec_to_xyz(spec_pre)
#         loss = tf.reduce_mean(losses.mean_squared_error(point, xyz_pred))
#         # g = tape.gradient(spec_pre, z_VAE)
#         grad = tape.gradient(loss, z_VAE)
#         opti.apply_gradients(zip([grad], [z_VAE]))
#     print('\r' + 'step: %d/%d  train loss: %.5f ' % (step, steps, loss), end='')

# 直接进行梯度下降优化

# pxy = tf.repeat(tf.constant([[0.4]], dtype=tf.float64), point.shape[0], axis=0)
#
# para_v = tf.Variable(tf.random.normal((point.shape[0], 6), dtype=tf.float64))
para_v = tf.Variable(tf.random.normal((point.shape[0], 7), dtype=tf.float64))

# para_norm = tf.nn.sigmoid(para)
# z_in = tf.concat([z_VAE, point], axis=1)
# para_v = tf.Variable(retrNN(z_in))
# spec_pre = preNN(para)
# xyz_pred = my_spec_to_xyz(spec_pre)

loss_arr = []
for step in range(steps):
    with tf.GradientTape() as tape:
        tape.watch(para_v)
        para_v2 = tf.nn.sigmoid(para_v)
        # para_norm = tf.nn.relu(1 - tf.nn.relu(-para + 1))
        # para_norm = tf.concat([pxy, para_v2], axis=1)

        spec_pre = preNN(para_v2)
        xyz_pred = my_spec_to_xyz(spec_pre)
        loss = tf.reduce_mean(losses.mean_squared_error(point, xyz_pred))
        grad = tape.gradient(loss, para_v)
        opti.apply_gradients(zip([grad], [para_v]))
        loss_arr.append(loss.numpy())

    print('\r' + 'step: %d/%d  train loss: %.5f ' % (step, steps, loss), end='')
    if step == 0:
        pos_arr = tf.reshape(xyz_pred, (1, -1, 3))
    elif (step + 1) % 50 == 0:
        pos_arr = tf.concat([pos_arr, tf.reshape(xyz_pred, (1, -1, 3))], axis=0)

scalar_norm = NormPara([0], [0])  # 去归一化
scalar_norm.min = tf.constant([3.00000011e-07, 5.99999979e-08,
                               5.99999979e-08, 5.00000006e-08,
                               5.00000006e-08, 1.00000001e-07, 1.00000001e-07], dtype=tf.float64)
scalar_norm.max = tf.constant([3.98999987e-07, 3.36999989e-07,
                               3.38000007e-07, 2.27000001e-07,
                               2.20000004e-07, 4.99000009e-07, 4.99000009e-07], dtype=tf.float64)

# sim_struc, _ = scalar_norm.back_para(pre_struc, 0)  # 预测的实际结构参数
# para_name = ['p', 'lx1', 'lx2', 'ly1', 'ly2', 'h1', 'h2']

# %%
# lam = np.linspace(380, 780, 201)
# fdtd_sim = ModPack('TiO2-gen.fsp', hide=False)
# R_sim_interp = np.zeros_like(spec_pre)
# for i in range(sim_struc.shape[0]):
#     fdtd_sim.set_my_para(dict(zip(para_name, sim_struc.numpy()[i, :].tolist())))
#     fdtd_sim.run_my_mod()
#     R_sim = np.flip(fdtd_sim.Rdata, axis=0)
#     R_sim_interp[i, :] = np.interp(lam, R_sim[:, 0]*1e9, R_sim[:, 1])


# %% 计算xyz坐标
xyz_pred, xyz_sim = get_xyz_pos([spec_pre, spec_pre])
plt.figure()
cie_plot = XYZinCIE()

cie_plot.plotColorInCIE(xyz_pred, marker='^', label='predict')
# cie_plot.plotColorInCIE(xyz_sim, marker='o', label='simulation')

cie_plot.ax.scatter(point[:, 0], point[:, 1], marker='o', edgecolors='red', color='', label='target', s=200)

# cie_plot.plotColorInCIE(point, marker='s', label='target')

plt.legend()
plt.show()

# %%
sim_struc, _ = scalar_norm.back_para(para_norm, 0)  # 预测的实际结构参数
sim_struc = sim_struc.numpy()
np.save('sim_struc_p340')
# sim_arr = np.split(sim_struc, 10, axis=0)
#
# pool = mp.Pool(processes=10)
# result = []
# for ii in range(10):
#     result.append(pool.apply_async(sim_result, args=(sim_arr[ii])))
#
# pool.join()

# %%
# 实例扫描颜色坐标
pp_arr = np.array([[0.171423951227093, 0.310707731681007],
                   [0.165114336500506, 0.310237453503340],
                   [0.163947407445979, 0.314453860021822],
                   [0.170705027988705, 0.323057256586300],
                   [0.177155861518061, 0.325524531431194],
                   [0.175358304847850, 0.336501972175607],
                   [0.159182859270046, 0.397608092383987],
                   [0.127411796314506, 0.504371432083569],
                   [0.120377843366226, 0.502673094206040],
                   [0.146234538211459, 0.449813713798098],
                   [0.172322877863391, 0.463410231212986]])
pz = (1 - pp_arr.sum(axis=1)).reshape((-1, 1))
pp_arr = np.concatenate([pp_arr, pz], axis=1)

# 单柱结构参数（对比验证）
struc_1p = np.array([[340, 140, 0, 140, 0, 200, 1],
                     [340, 160, 0, 120, 0, 350, 1],
                     [340, 170, 0, 120, 0, 350, 1],
                     [340, 180, 0, 100, 0, 300, 1],
                     [340, 180, 0, 130, 0, 300, 1],
                     [340, 190, 0, 130, 0, 300, 1],
                     [340, 200, 0, 140, 0, 300, 1],
                     [340, 220, 0, 150, 0, 300, 1],
                     [340, 270, 0, 160, 0, 300, 1],
                     [340, 280, 0, 170, 0, 300, 1]])
np.save('./struc_contract_1.npy', struc_1p)

# 双柱颜色坐标
pos_3 = np.array([[0.188153266883297, 0.121199722695897, 0.690647010420806],
                  [0.131449852816188, 0.231450690474476, 0.637099456709336],
                  [0.124850616523987, 0.361588310875670, 0.513561072600343],
                  [0.118984089307329, 0.532709952142410, 0.348305958550261],
                  [0.181615978001334, 0.612317020525307, 0.206067001473359],
                  [0.220728784888071, 0.614479155887261, 0.164792059224668],
                  [0.281168367980574, 0.589321487389686, 0.129510144629740],
                  [0.371620232896111, 0.518852689042583, 0.109527078061306],
                  [0.492408440733740, 0.392227680956429, 0.115363878309831],
                  [0.510020533338762, 0.399585041244314, 0.090394425416924]])

R = sim_result(struc_1p * 1e-9, False)
R = np.flip(R, axis=1)
lam = np.linspace(380, 780, 201)
R_interp = np.zeros((10, 201))
for num in range(10):
    R_interp[num, :] = np.interp(lam, R[num, :, 0] * 1e9, R[num, :, 1])

pos_1, _ = get_xyz_pos([R_interp, R_interp])
plt.figure()
cie_plot = XYZinCIE()

cie_plot.plotColorInCIE(pos_1, marker='s', color='red')
cie_plot.plotColorInCIE(pos_3, marker='o', color='black')
plt.show()
