import numpy as np

import tensorflow as tf
import os, json
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
tf.executing_eagerly()

from tensorflow.keras.models import load_model, model_from_json
from tensorflow.keras.layers import Input, Dense, Flatten, LeakyReLU
from tensorflow.keras.initializers import RandomNormal
from tensorflow.keras.regularizers import L2
from tensorflow.keras.models import Model as tfModel

regularizer = L2(2.5e-8)
initializer = RandomNormal(stddev=0.1)

def _get_nn(in_sh=(20,6), out_put_size=1, feature_num=10):
    ## To-detect Data Input
    input_layer = Input(
        shape=(in_sh[0], in_sh[1])
        )

    layer1a = Dense(
        feature_num * max(1, round(16/in_sh[1])),
        use_bias=False,
        kernel_initializer=initializer,
        kernel_regularizer=regularizer,
        )(input_layer)

    layer1b_ = Dense(
        feature_num * max(1, round(64/in_sh[1])),
        use_bias=True,
        kernel_initializer=initializer,
        kernel_regularizer=regularizer,
        bias_initializer=initializer,
        bias_regularizer=regularizer,
        activation="relu"
        )(input_layer)

    layer1b = Dense(
        feature_num * max(1, round(16/in_sh[1])),
        use_bias=True,
        kernel_initializer=initializer,
        kernel_regularizer=regularizer,
        bias_initializer=initializer,
        bias_regularizer=regularizer,
        activation="sigmoid"
        )(layer1b_)

    layer1 = tf.reduce_mean(layer1a * layer1b, axis=-2)
    
    layer2a = Dense(
        feature_num, 
        use_bias=False,
        kernel_initializer=initializer,
        kernel_regularizer=regularizer,
        )(layer1)

    layer2b_ = Dense(
        feature_num * 4, 
        use_bias=True,
        kernel_initializer=initializer,
        kernel_regularizer=regularizer,
        bias_initializer=initializer,
        bias_regularizer=regularizer,
        activation="sigmoid"
        )(layer1)

    layer2b = Dense(
        feature_num, 
        use_bias=True,
        kernel_initializer=initializer,
        kernel_regularizer=regularizer,
        bias_initializer=initializer,
        bias_regularizer=regularizer,
        activation="sigmoid"
        )(layer2b_)

    layer2 = layer2a * layer2b

    output_layer_ = Dense(
        out_put_size * 4, 
        use_bias=False,
        kernel_initializer=initializer,
        kernel_regularizer=regularizer,
        activation="relu",
        )(layer2)

    output_layer = (Dense(
        out_put_size, 
        use_bias=False,
        kernel_initializer=initializer,
        kernel_regularizer=regularizer,
        activation="sigmoid",
        )(output_layer_) - 0.5) * 2
    
    ## Package as Model
    model = tfModel(
        inputs=input_layer, 
        outputs=output_layer, 
        name="discriminator"
        )
    
    return model

class InverseModel:
    def __init__(self, func, func_in_num=1,
                lr=2e-7, stop_loss=1e-6, 
                 epoches=300, batch_size=64, n_add=100, model=None,
                 data_path=r"../data/inverse_data.json",
                 mdl_path=r"../model/inverse_mdl.h5",
                 feature_num=200, show_step=False):
        self.func = func
        self.func_in_num = func_in_num
        self.epoches = epoches
        self.lr = lr
        self._n_add = n_add
        self.batch_size = batch_size
        self.feature_num = feature_num
        self._opt = tf.keras.optimizers.Adam(learning_rate=lr, beta_1=0.75, beta_2=0.8)
        self._stop_loss = stop_loss
        self._model = model
        self._data = None
        self._data_path = data_path
        self._mdl_path = mdl_path
        self.load_data()
        self.load()
        self.fit(show_step=show_step)

    def _train_step(self, in_y, out_x):
        with tf.GradientTape() as _tape:
            _pred = tf.nn.relu(self._model(in_y, training=True))
            # print([out_x.max(), out_x.min()], [_pred.numpy().max(), _pred.numpy().min()])
            _loss =  tf.reduce_mean(tf.abs(out_x - _pred)**2)**0.5#*0.8 + tf.reduce_max(tf.abs(out_x - _pred)**2)**0.5*0.2
            _grad = _tape.gradient(_loss,
                                   self._model.trainable_variables)
            self._opt.apply_gradients(zip(_grad,self._model.trainable_variables))
            self._cumloss += float(_loss.numpy())

    def save_data(self):
        with open(self._data_path, "w+", encoding="gbk") as f:
            json.dump(self._data, f)

    def load_data(self):
        if os.path.exists(self._data_path):
            with open(self._data_path, "r+", encoding="gbk") as f:
                self._data = json.load(f)
        else:
            self._data = None
        
    def fit(self, show_step=True):
        if self._data is None:
            n_add = self._n_add
            add_part = round((self.func_in_num**0.75)*n_add)
            print("To generate samples with the scale of : ", self.func_in_num*n_add+add_part)
            x = np.zeros([
                self.func_in_num*n_add+add_part,
                self.func_in_num
                ])
            for xid in range(self.func_in_num):
                x[n_add*xid:n_add*(xid+1), xid] = np.linspace(0, 1, n_add)
            x[-add_part:, :] = np.random.uniform(0, 1, [add_part, self.func_in_num])
            y = np.array([self.func(xitm) for xitm in x], dtype="float32")
            inds = ((~np.isnan(y))&(~np.isinf(y))).all(axis=1).all(axis=1)
            x = x[inds,:]; y = y[inds, ...]
            self._data = {"x": x.tolist(), "y": y.tolist()}
            self.save_data()
        else:
            x = np.array(self._data["x"], dtype="float32")
            y = np.array(self._data["y"], dtype="float32")
        if self._model is None:
            self._model = _get_nn(np.shape(y[0]),
                                  out_put_size=self.func_in_num,
                                  feature_num=self.feature_num,)
        batch_num = int(len(y)*5/self.batch_size)
        for eid in range(self.epoches):
            self._cumloss = 0
            sid = np.random.permutation(len(y)*5)%len(y)          
            for n in range(batch_num):
                self._train_step(
                    y[sid[n*self.batch_size:(n+1)*self.batch_size]],
                    x[sid[n*self.batch_size:(n+1)*self.batch_size]]
                    )
            if show_step and ((eid+1) % int(show_step) == 0):
                print(f"\t[Epoch{eid+1}]:\tLoss = {round(self._cumloss/batch_num,9)}")
            if self._cumloss/batch_num < self._stop_loss:
                break
            
    def predict(self, in_y):
        return tf.nn.relu(self._model(
            np.array(in_y, dtype="float32"),
            training=False
            )).numpy().astype("float32")

    def save(self):
        self._model.save(self._mdl_path)

    def load(self):
        self._model = load_model(self._mdl_path)

def get_inverse_func(func, func_in_num=1, n_add=1000,
                     lr=1e-7, stop_loss=1e-6, 
                     epoches=0, batch_size=512,
                     feature_num=64, show_step=False, model=None,
                     return_mdl=False):
    mdl = InverseModel(func, func_in_num=max(abs(func_in_num), 1),
                       n_add=n_add, epoches=epoches, lr=lr,
                       batch_size=batch_size, stop_loss=stop_loss,
                       feature_num=feature_num, model=model,
                       show_step=show_step)
    if return_mdl:
        return mdl.predict, mdl
    else:
        return mdl.predict
        
if __name__ == "__main__":
    from fmu_func import simulator
    ref = simulator([0,0,0,0,0])
    func = lambda itm: simulator(itm) - ref
    inverse_func, mdl = get_inverse_func(func, func_in_num=5,
                                         show_step=True, return_mdl=True)
    mdl.save()
    import json
    with open(r"../data/data_1.json", "r+", encoding="gbk") as f:
        res = json.load(f)
    x = np.array(res["params"], dtype="float32")
    pred_x = inverse_func(np.array(res["signals"], dtype="float32") - ref)
    print(float(np.mean(np.abs(x - pred_x)**2)**0.5))
    
    import matplotlib.pyplot as plt
    n = len(x[0])
    fig = plt.figure()
    for i in range(n):
        ax = fig.add_subplot(n, 1, i+1)
        ax.plot(pred_x[:, i])
        ax.plot(x[:, i], "--")
    plt.show()
