from fmpy import dump, simulate_fmu
import numpy as np
import os, time, warnings

from tensorflow.keras.models import load_model

if os.path.exists(r'../model/'):
    INIT_MDL = load_model(r"../model/inverse_mdl.h5")
else:
    INIT_MDL = load_model(r"./model/inverse_mdl.h5")

if os.path.exists(r"./DishWasher_v2.fmu"):
    FMU = r"./DishWasher_v2.fmu"
else:
    FMU = r"./lib/DishWasher_v2.fmu"
    
## IMPORT THE FOLLOWING JUST FOR DATA GENERATION
from multiprocessing import Process, Queue

dump(FMU)
TARGET_ANGLE = np.pi/2
SAMPLE_DT = 0.05; TS = 10
TIMEREF = np.round(np.arange(round(TS/SAMPLE_DT)+1)*SAMPLE_DT, int(-np.log(SAMPLE_DT))+1)
DOOR_ANGLE = TIMEREF * TARGET_ANGLE / TS
DTYPE = [('time', "float32"), ('Door_Ang (rad)', "float32"), ('JL_Damage', "float32"), ('LS_Damage', "float32"),
         ('MFT_Damage', "float32"), ('SK_Damage', "float32"), ('TH_Damage', "float32")]

def simulator(theta):
    #t = time()
    res = simulate_fmu(FMU, record_events=False, validate=False,
                       input=np.array([(time_r, door_ang, *theta)
                                       for time_r, door_ang in zip(TIMEREF, DOOR_ANGLE)], dtype=DTYPE),
                       stop_time=TS, step_size=SAMPLE_DT).tolist()
    #print(time()-t)
    return np.array(res, dtype="float32")[:, 1:]

def _random_gene(x, n, q):
    try:
        if x is None:
            x_ = np.random.uniform(0,1,5).astype("float32")
        else:
            x_ = np.array(x, dtype="float32")
        y_ = simulator(x_)
    except Exception as e:
        warnings.warn(e)
    q.put([x_.tolist(), y_.tolist(), n])

class dishWasher:
    def __init__(self, x=None, theta=None, eps=1e-7,
                 beta=1e-6, thres=1e-5, N_iter=1000,
                 pb_alpha=1e-5, pb_beta=1e-12):
        self._eps = eps
        self._beta = beta
        self._ref = simulator([0,0,0,0,0])
        self._norm = np.linalg.norm(self._ref, axis=0)
        self._thres = thres
        self.iter_max = int(N_iter)
        if not (x is None or theta is None):
            self.reset(x, theta)

        self._pb_alpha0 = pb_alpha
        self._pb_beta0 = pb_beta
        self._pd_set()

    def _pd_set(self):
        self._pb_alpha = self._pb_alpha0
        self._pb_beta = self._pb_beta0
        self._act_ind = 0
        self._coordinate_mode = False
        self._iter = 0

    def _set_pd_coordinate_mode(self):
        self._coordinate_mode = True

    def batch_generate(self, n=500, x=None, worker_num=10, verbose=False, filter_na=True):
        if x is None:
            x = np.random.uniform(0,1, (n,5))
        n = min(n, len(x))
        xs = [None for _ in range(n)]; ys = [None for _ in range(n)]
        
        q = Queue(); n_in = 0
        workers_on = 0
        Workers = [None for _ in range(worker_num)]
        
        try:
            while n_in < n or workers_on > 0:
                for w in range(worker_num):
                    if Workers[w] is None:
                        if n_in < n:
                            Workers[w] = Process(target=_random_gene, args=(None if x is None or n_in >= len(x) else x[n_in], n_in, q,))
                            n_in += 1
                            if verbose and (n_in + 1) % 10 == 0:
                                print(f"Samples {n_in+1}/{n}")
                            if Workers[w]:
                                Workers[w].start()
                                workers_on += 1

                    elif not Workers[w].is_alive():
                        if n_in < n:
                            Workers[w].join()
                            workers_on -= 1
                            Workers[w] = Process(target=_random_gene, args=(None if x is None or n_in >= len(x) else x[n_in], n_in, q,))
                            n_in += 1
                            if verbose and (n_in + 1) % 10 == 0:
                                print(f"Samples {n_in+1}/{n}")
                            if Workers[w]:
                                Workers[w].start()
                                workers_on += 1
                        else:
                            Workers[w].join()
                            workers_on -= 1
                while not q.empty():
                    x_, y_, n_in_ = q.get_nowait()
                    xs[n_in_] = x_
                    ys[n_in_] = y_
                time.sleep(0.1)
            while not q.empty():
                x_, y_, n_in_ = q.get_nowait()
                xs[n_in_] = x_
                ys[n_in_] = y_
            if verbose:
                print("[Success] Data is already Generated")
        except Exception as e:
            raise Exception("[Error]",e)
        if filter_na:
            return [itm for itm in xs if not itm is None][:n], [itm for itm in ys if not itm is None][:n]
        else:
            #print([1 if _it is None else 0 for _it in ys])
            return xs, ys
        
    def reset(self, x=None, theta=None, true_theta=None):
        if x is None:
            if true_theta is None:
                self._true_theta = np.random.uniform(0,1,5)
            else:
                self._true_theta = np.array(true_theta, dtype="float32")
            x = simulator(self._true_theta)
        if theta is None:
            theta = np.minimum(1, np.maximum(0, INIT_MDL.predict(x[None,...] - self._ref)[0]))
        _x = np.array(x, dtype="float32")
        ## 记录实际状态值
        self._x = _x
        ## 初始前一步参数位置设置为正常状态，
        ## 将在代理模型所计算得到的参数设为第一步action得到的初始参数位置
        self._x_simu = (self._ref - _x)/np.mean(self._ref**2, axis=0)**0.5
        ## 记录初始参数位置
        self._theta = np.array(theta, dtype="float32")
        self._theta_pre = np.array(theta, dtype="float32")
        ## 初始计数器
        self._iter = 0
        ## 参数走到初始位置
        res = self.step(theta)
        self._pd_set()
        return res[0], res[2]

    def pb_action(self, theta=None, x=None, theta_pre=None, n_armijo=2, s_armijo=0.2):
        with warnings.catch_warnings():
            warnings.simplefilter("ignore")
            if theta is None:
                theta = self._theta[:]
            if theta_pre is None:
                theta_pre = self._theta_pre[:]
            if x is None:
                x = self._x_simu[:, :]*np.mean(self._ref**2, axis=0)**0.5
            _x = np.tile(theta, (len(theta)*2,1))
            _x[:len(theta)] += np.eye(len(theta), dtype="float32") * self._eps
            _x[len(theta):] -= np.eye(len(theta), dtype="float32") * self._eps
            _x = np.minimum(np.maximum(_x, 0), 1)

            _y = [simulator(x_itm).tolist() for x_itm in _x]
            #_, _y = self.batch_generate(x=_x, worker_num=min(5, len(_x)), verbose=False, filter_na=False)
            #_y = [simulator(x_itm).tolist() if y_itm is None else y_itm for x_itm, y_itm in zip(_x, _y)]

            _residu = np.mean(np.mean((np.array(_y, dtype="float32") - x)**2, axis=1)**0.5/np.mean(self._ref**2, axis=0)**0.5, axis=1)
            _gradient = np.array([_residu[i] - _residu[i+len(theta)]  for i in range(len(theta))], dtype="float32")/self._eps/2
            if np.linalg.norm(_gradient) == 0:
                _gradient = (theta - theta_pre)/self._pb_alpha
            _grad_n = np.max(np.abs(_gradient))
            if _grad_n > 1:
                _gradient /= _grad_n
            elif _grad_n ==0 or self._coordinate_mode:
                _gradient[self._act_ind] = 0.1
                self._act_ind = (self._act_ind + 1) % len(_gradient)
            ## 双参数双侧Armijio
            _alphas = [s_*a_ for a_ in self._pb_alpha*s_armijo**np.arange(-n_armijo, n_armijo+1) for s_ in [-1,1]]
            
            _y = [simulator(np.minimum(1, np.maximum(0,theta - _gradient*_alpha))).tolist() for _alpha in _alphas]
            #_x1 = np.minimum(1, np.maximum(0,theta - _gradient*_alphas[:,None]))
            #_, _y = self.batch_generate(x=_x1, worker_num=min(5, n_armijo*2+1), verbose=False, filter_na=False)
            #_y = [simulator(x_itm).tolist() if y_itm is None else y_itm for x_itm, y_itm in zip(_x1, _y)]
            
            _residu = np.mean(np.mean((np.array(_y, dtype="float32") - x)**2, axis=1)**0.5/np.mean(self._ref**2, axis=0)**0.5, axis=1)
            _alpha_ind = np.argmin(_residu)
            self._pb_alpha = _alphas[_alpha_ind]
            theta_ = np.minimum(1, np.maximum(0,theta - _gradient*self._pb_alpha))
            
        return theta_ - theta

    def correct_action(self, x, state, state_b, s_tep=0.2, n_step=2):
        with warnings.catch_warnings():
            warnings.simplefilter("ignore")
            ## 双参数双侧Armijio
            _alphas = [_s*_a for _a in s_tep**np.arange(1,n_step+1) for _s in [-1,1]]
            _direc = state - state_b
            _y = [simulator(np.minimum(1, np.maximum(0,state_b + _alpha*_direc))).tolist() for _alpha in _alphas]
            #_x1 = np.minimum(1, np.maximum(0,theta - _gradient*_alphas[:,None]))
            #_, _y = self.batch_generate(x=_x1, worker_num=min(5, n_armijo*2+1), verbose=False, filter_na=False)
            #_y = [simulator(x_itm).tolist() if y_itm is None else y_itm for x_itm, y_itm in zip(_x1, _y)]
            
            _residu = np.mean(np.mean((np.array(_y, dtype="float32") - x)**2, axis=1)**0.5/np.mean(self._ref**2, axis=0)**0.5, axis=1)
            _alpha_ind = np.argmin(_residu)
            _best_alpha = _alphas[_alpha_ind]
            theta_ = np.minimum(1, np.maximum(0,state_b + _best_alpha*_direc))
            
        return theta_ - state

    def cal_reward(self, _x_sim_pre, _x_simu, return_err=False, sature_thres=100):
        if self._beta != 0:
            x_diff_sign = np.sign(_x_sim_pre)
            x_diff_sign = np.where(x_diff_sign!=0, x_diff_sign,1)
            _x_sim_diff = (_x_sim_pre - _x_simu) / np.where(np.abs(_x_sim_pre)>self._thres,
                                                            _x_sim_pre,
                                                            x_diff_sign*self._thres)
            _x_sim_diff = np.minimum(np.maximum(_x_sim_diff, -(1/self._beta)**0.2), (1/self._beta)**0.2)

        err_pre = np.mean(np.mean(np.minimum(np.maximum(_x_sim_pre, -sature_thres), sature_thres)**2, axis=0)**0.5)
        err = np.mean(np.mean(np.minimum(np.maximum(_x_simu, -sature_thres), sature_thres)**2, axis=0)**0.5)
        reward = ((err_pre - err)/err_pre)
        
        if self._beta != 0:
            reward += self._beta*np.mean(_x_sim_diff)

        if return_err:
            return np.tanh(reward), err
        else:
            return np.tanh(reward)

    def step(self, action):
        ## 计数器加一
        self._iter += 1
        ## 参数迭代
        self._theta_pre, self._theta = self._theta, np.minimum(1, np.maximum(0, self._theta + action))
        ## 计算行动前后的拟合残差
        self._x_sim_pre, self._x_simu = self._x_simu, (simulator(self._theta) - self._x)/np.mean(self._ref**2, axis=0)**0.5
        ## 计算行动前后收益 = 残差 + α * 相对收敛效益
        reward, err = self.cal_reward(self._x_sim_pre, self._x_simu, return_err=True)
        return self._x_simu, reward,  err < self._thres, self._iter > self.iter_max, self._theta - self._theta_pre
        

if __name__ == "__main__":
    #"""
    res = simulator([0,0,0,0,0])
    import matplotlib.pyplot as plt
    for x in res.T:
        plt.plot(x)
    plt.legend(["Door_Forcex", "JL_Fz", "JL_Fy",
                "MFT_Force", "SK_Force", "TH_Force"]) #, "Door_Angle"])
    plt.show()
    #"""

    """
    dw = dishWasher()
    state_pre = dw.reset(simulator([0.1,0.1,0.1,0.1,0.1]), [0.1,0.3,0.5,0.1,0.])
    state, reward, terminate, truncated = dw.step([0.05,0.07,0.03,0.01,0])
    """
    
    """
    import json
    for i in range(2):
        dw = dishWasher()
        X, Y = dw.batch_generate(n=500, verbose=True)
        
        with open(f"data_{i+1}.json", "w+", encoding="gbk") as f:
            json.dump({"params": X, "signals": Y}, f)
    """
    """
    dw = dishWasher()
    X, Y = dw.batch_generate(n=500, x=np.minimum(np.maximum(np.cumsum(np.random.uniform(-0.1,1,(500, 5))/750, axis=0),0),1), verbose=True)
    import matplotlib.pyplot as plt
    for x in np.array(X, dtype="float32").T:
        plt.plot(x)
    plt.show()
    import json
    with open(f"data_test.json", "w+", encoding="gbk") as f:
        json.dump({"params": X, "signals": Y}, f)
    """
    
