import os, time
from threading import Thread
import numpy as np
from tensorflow.keras.models import load_model
if os.path.exists(r'./lib'):
    from lib.particular_filtering import ParticularFiltering
    from lib.fmu_func import simulator, dishWasher
    from lib.ddpg_module import DDPG
else:
    from .lib.particular_filtering import ParticularFiltering
    from .lib.fmu_func import simulator, dishWasher
    from .lib.ddpg_module import DDPG

DIR_PATH = r"./model/"
ENVIRONMENT = "dishWasher"

class StateMonitor:
    def __init__(self,
                 approx_err=1e-3, ## 强化学习的拟合误差容限
                 iter_max=100, ## 强化学习的最大迭代上限
                 noise=[1e-20,1e-20,1e-20,1e-20,1e-20],
                 alpha=0.3, beta=0.6, gamma=0.75, naive_gc=False,
                 thres=1e-3, bad_rl_max = 3):
        self._bad_rl_max = max(1, int(bad_rl_max))
        
        self._fault_diag = ParticularFiltering(noise=noise,
                                               alpha=alpha, beta=beta,
                                               gamma=gamma, thres=thres)
        self._init_predict_mdl = load_model(r"model/inverse_mdl.h5")
        
        num_actions = len(noise)
        self._ref = simulator([0 for _ in range(num_actions)])
        num_states = self._ref.shape
        
        self._naive_gc = naive_gc

        # 梯度下降
        self._dw = dishWasher()

        # 强化学习加载actor
        self._rl_model = DDPG(num_states, num_actions)
        self._rl_model.load_actor_weights(rf"{DIR_PATH}actor_ddpg-{ENVIRONMENT}.h5")
        self._rl_model.load_critic_weights(rf"{DIR_PATH}critic_ddpg-{ENVIRONMENT}.h5")
        
        self._iter_max = int(iter_max)
        self._approx_err = max(approx_err, 1e-9)

        self.restart()

    def restart(self):
        self._fault_diag = ParticularFiltering()
        self._rl_model.reset()
        try:
            self._rl_thread.join()
        except:
            pass
        self._rl_thread = Thread(target=self.rl_thread, daemon=True)
        self._rl_thread.start()

    def step(self, data, verbose=True):
        ti = time.time()
        x = np.array(data, dtype="float32")[None,...]
        state = np.minimum(1, np.maximum(0, self._init_predict_mdl.predict(x - self._ref)[0]))
        state_pre = np.array(state, dtype="float32")
        n_iter = 0
        _x_simu = (simulator(state) - x)/np.mean(self._dw._ref**2, axis=0)**0.5
        _x_err = np.mean(np.linalg.norm(_x_simu, axis=0))
        _x_err_pre = _x_err
        err = np.max(np.linalg.norm(_x_simu, axis=0))
        if verbose:
            print(f"[INFO] Convergence at STEP[0] with residu {err:.10f}")

        # 梯度下降初始化
        self._dw._pd_set()

        state_best = np.array(state, dtype="float32")
        err_best = err; quit_rl_count = 0
        n = 0
        while  err > self._approx_err and n_iter < self._iter_max:
            if n > self._iter_max*2.5:
                break
            elif n > self._iter_max*1.5:
                self._dw._set_pd_coordinate_mode()
            n += 1
            if self._naive_gc or quit_rl_count >= self._bad_rl_max:
                # 梯度下降
                if quit_rl_count == self._bad_rl_max:
                    state = state_best
                    quit_rl_count += 1
                d_state = self._dw.pb_action(state, x, state_pre)
                state_pre = np.array(state, dtype="float")
                if n_iter % (self._bad_rl_max*2) == 0:
                    d_state_ = self._rl_model.policy(_x_simu)
                    errA = np.mean(np.linalg.norm((simulator(state+d_state) - x)/np.mean(self._dw._ref**2, axis=0)**0.5, axis=0))
                    errB = np.mean(np.linalg.norm((simulator(state+d_state_) - x)/np.mean(self._dw._ref**2, axis=0)**0.5, axis=0))
                    if (err> errB and (err-errA)/1.5 < err - errB) or (err <= errB and err-errB < (err - errB)/1.5):
                       quit_rl_count  = round(quit_rl_count/2) 
                       d_state = d_state_
            else:
                # 强化学习 计算action
                d_state = self._rl_model.policy(_x_simu)
            
            state = np.minimum(1,np.maximum(0,state+d_state))
            
            ## 计算行动前后的拟合残差
            _x_sim_pre, _x_simu = _x_simu, (simulator(state) - x)/np.mean(self._dw._ref**2, axis=0)**0.5
            
            ## 计算行动前后收益 = 残差 + α * 相对收敛效益
            reward = self._dw.cal_reward(_x_sim_pre, _x_simu)
            
            self._rl_model.buffer.record((_x_sim_pre, d_state, reward, _x_simu))
            
            err = np.mean(np.linalg.norm(_x_simu, axis=0))
            #print(err)
            if err < err_best:
                err_best = err
                state_best = np.array(state, dtype="float32")
                n_iter += 1
                if verbose:
                    print(f"[INFO] Convergence at STEP[{n_iter}] with residu {err_best:.10f}")
            else:
                d_state = self._dw.correct_action(x, state, state_best)
                state = np.minimum(1,np.maximum(0,state+d_state))
                n += 1
                quit_rl_count += 1
            
        self._fault_diag.step(state_best)
        print(time.time() - ti)
        return self._fault_diag.predict()

    def rl_thread(self):
        while True:
            if self._rl_model.buffer.length > self._rl_model.buffer.batch_size * 100:
                self._rl_model.learn()
            time.sleep(30)

if __name__ == "__main__":
    sm = StateMonitor()
    import json
    with open(r"data/data_test.json", "r+", encoding="gbk") as f:
        data = json.load(f)
    x = []
    for itmId, itm in enumerate(data["signals"][:50]):
        print(f"[[ STEP {itmId + 1} ]]")
        info = sm.step(itm)
        x.append(sm._fault_diag.x)
        print(info)
    import matplotlib.pyplot as plt
    x = np.array(x, dtype="float32")
    x_ = np.array(data["params"], dtype="float32")
    plt.figure()
    for i in range(5):
        plt.plot(x_[:50,i], color="green", label="gnd")
        plt.plot(x[:50,i], color="red", label="pred")
    plt.legend()
    plt.show()
