from utils import *
import os
import sys
import numpy as np
from threading import Lock
import queue
from models.base_model import BaseModel, ModelConfig, LstmNet
import torch
import numpy as np
from config import config_model_singleton, config_singleton
from utils import sys_logger

sys.path.append(os.path.abspath(os.path.join(os.path.dirname("__file__"), '..')))


class Hydraulic_Servo1D_AnomalyDetector(BaseModel):
    """
    A 1D hydraulic servo model with AD
    # Example usage:
    # model = HydraulicServo1D_ADModel(config=ModelConfig())
    # model.train(data)
    # model.run()
    """

    def __init__(self, config: ModelConfig):
        super().__init__(config)

        self.run_cnt = 0

        # 输入变量相关
        self.input_dim = 0
        self.input_update_flag = dict()
        for k, v in config.inputs.items():
            # (flag, offset)
            self.input_update_flag[k] = [False, self.input_dim]
            self.input_dim += len(v)

        self.output_dim = len(config.outputs["var_names"])

        # mean and std
        self.mean = np.array(config.mean)
        self.std = np.array(config.std)
        self.need_preprocess = self._config.inputs_need_preprocess

        # dim0 为batchsize
        self.input_data = np.zeros((1, self.input_dim))  # 初始化为零

        self.output_data_queue = queue.Queue(maxsize=config.input_output_queue_size)
        self.input_data_queue = queue.Queue(maxsize=config.input_output_queue_size)
        # 检查cuda是否可用
        self.device = torch.device("cuda:0" if config_singleton.use_cuda and torch.cuda.is_available() else "cpu")

        self.model = LstmNet(input_size=config_model_singleton.lstm_config_for_servo1d["input_size"],
                             hidden_size=config_model_singleton.lstm_config_for_servo1d["hidden_size"],
                             lstm_layers=config_model_singleton.lstm_config_for_servo1d["lstm_layers"],
                             dropout_rate=config_model_singleton.lstm_config_for_servo1d["dropout_rate"],
                             output_size=config_model_singleton.lstm_config_for_servo1d["output_size"])

        self.model.to(self.device)
        # 加载模型参数
        self.load_weights(self._config.model_weigths_file)
        self.model.eval()  # 模型转换为推理模式
        # 隐藏层的数据默认为None
        self.hidden_pred = None

        # 线程锁
        self.lock = Lock()
        self.thread_stop = False
        self.output_table_num = self._config.outputs["table_num"]
        # 异常的阈值设置在3倍的标准差
        self.threshold = 4 * self.std[1:]
        self.lower_bound = self.mean[1:] - self.threshold
        self.upper_bound = self.mean[1:] + self.threshold

        # 开始和停止的检测条件
        self.start_od_conditions = self._config.start_conditions
        self.stop_od_conditions = self._config.stop_conditions
        self.od_start = False

        # 存取最近一段时间的故障标志的滤波数组
        self.filter_array = np.zeros((2, self.input_dim-1))    #初始化为零


    def start_od_confirm(self, msgbean: BaseBean):
        """
        启动检测条件确认,每种产品触发的条件不一样，需要特殊处理
        """
        for condition in self.start_od_conditions:
            if condition["table_num"] == msgbean.table_num:
                if condition["sub_function_no"] == msgbean.sub_function_no:
                    for item in msgbean.codes:
                        if item.code == condition["code"] and item.flag == 10:
                            self.od_start = True
                            return True
        
        return False
    
    def stop_od_confirm(self, msgbean: BaseBean):
        """
        停止检测条件确认
        """
        for condition in self.stop_od_conditions:
            if condition["table_num"] == msgbean.table_num:
                if condition["sub_function_no"] == msgbean.sub_function_no:
                    for item in msgbean.codes:
                        if item.code == condition["code"] and item.flag == 10:
                            self.od_start = False
                            return True
        
        return False


    def update_input_data(self, msgbean: BaseBean):
        """
        更新输入数据.
        根据数据所在的位置和所在的表号，更新输入数据.
        """
        table_num = msgbean.table_num

        # 判读是否具备启动和停止条件
        if table_num == 1999:
            self.start_od_confirm(msgbean)
            self.stop_od_confirm(msgbean)
            return

        if self.od_start == False:
            return
        
        # 获取当前表的偏移量, 存储在inputdata中的偏移量
        offset = self.input_update_flag[table_num][1]

        # cnt = len(self._config.inputs[table_num])
        with self.lock:
            # 根据数组中的值作为字典的key, 取出对应key的value
            # numpy 可以使用切片的方式。
            # self.input_data[0][offset: offset +
            #                    cnt] = msgbean.vars[self._config.inputs[table_num]]
            for index in self._config.inputs[table_num]:
                self.input_data[0][offset] = msgbean.vars[index]
                offset += 1
            # 更新标志位
            self.input_update_flag[table_num][0] = True

        if all(flag for [flag, _] in self.input_update_flag.values()):
            # TODO 考虑是否使用带时间，推理结束后无法对应于那个时间的数据
            if self.input_data_queue.full():
                sys_logger.error(f"{self._config.name} Input data queue is full and cannot accept new data.")
                return
            
            self.input_data_queue.put([msgbean.vars[1], self.input_data.copy()])
            # 数据放入了队列中，便可以把标志清理掉。
            # 先将标志重置，再进行推理
            with self.lock:
                for k, v in self.input_update_flag.items():
                    v[0] = False

            # sys_logger.info(f'\t\t{msgbean.vars[1]}\t{self.input_data}')

    def train(self, data): 
        return super().train(data)

    def run(self):
        """
        模型推理线程.
        一帧数据进来，就进行模型推理，并将推理结果放入输出队列中.
        需要启动多线程，一个线程负责接收数据。
        """
        while not self.thread_stop:
            # 阻塞式等待数据的到来。
            t, input = self.input_data_queue.get()

            if input is None or t is None:
                continue
            # sys_logger.info(f'\t{t}\t{input}')
            # 输入数据归一化转换
            input_normal = None
            if self.need_preprocess:
                input_normal = (input - self.mean) / self.std
            # 一步推理过程
            test_x = torch.from_numpy(input_normal).float().to(self.device)
            pred_x, self.hidden_pred = self.model(test_x, self.hidden_pred)
            # convert pred_x to numpy
            pred_x = pred_x.detach().cpu().numpy()

            # 逆归一化, 输出的数据比输入的数据少一维
            if self.need_preprocess:
                pred_x = pred_x * self.std[1:] + self.mean[1:]

            # TODO:
            # 增加异常检测功能, 如果大于超过3倍的sigema，则认为数据异常
            # anomaly_flag = pred_x[0][(pred_x[0] < self.lower_bound) or (pred_x[0] > self.upper_bound)]

            # 输出结果数组
            vals = np.zeros(self.output_dim, np.float32)
            # 输入数据原始值
            vals[0:6] = input[0, 1:self.input_dim]
            # 预测值
            vals[6:12] = pred_x[0]
            # 上下限
            up_limit = pred_x[0] + self.threshold
            down_limit = pred_x[0] - self.threshold
            vals[12:18] = up_limit
            vals[18:24] = down_limit
            # 将异常检测结果放入滤波数组
            self.filter_array[self.run_cnt % 2] = np.logical_or(input[0, 1:] < down_limit, input[0, 1:] > up_limit).astype(np.float32)
            # 数组请平均后做逻辑运算， 连续2个均异常，则认为检测到异常输出
            anomaly_flag = np.mean(self.filter_array, axis = 0).astype(np.float32)
            anomaly_flag[anomaly_flag > 0.8] = 1.0
            anomaly_flag[anomaly_flag < 0.8] = 0.0

            vals[24:30] = anomaly_flag

            # 将时间一起放进队列中
            # self.output_data_queue.put([t, pred_x[0]])
            self.output_data_queue.put([t, vals])
            self.run_cnt += 1

        sys_logger.info(f"{self._config.name} Model inference thread stopped. Total predict data num {self.run_cnt}.")


    def load_weights(self, weights_path):
        """
        load pytorch pth file to model
        """
        if config_singleton.Debug:
            real_path = os.path.join(os.path.dirname(__file__), '..', 'model_files', weights_path)
        else:
            real_path = os.path.join(os.path.dirname(os.path.realpath(sys.executable)), 'model_files', weights_path)

        if not os.path.exists(real_path):
            raise FileNotFoundError("Hydraulic Servo Model file not find, check file path or name.")

        if self.model.__class__.__name__ == 'LstmNet':
            # self.model = load(real_path)
            self.model.load_state_dict(torch.load(
                real_path, map_location=self.device, weights_only=True))


    def stop(self):
        self.thread_stop = True
        self.input_data_queue.put([None, None])
        self.output_data_queue.put([None, None])

    # 同时获取输出变量的名字和数值
    def get_output_varnames(self):
        
        return self._config.outputs["var_names"]


    def get_output_data(self):
        """
        获取异常检测的输出结果， 带时间标签，如果没有数据立即返回。
        """
        if self.output_data_queue.empty():
            vals = [None, None]
        else:
            vals = self.output_data_queue.get_nowait()

        return vals


    def get_model_name(self):

        return self._config.name

# Example usage
if __name__ == "__main__":

    # config example
    pass
