import json
import pickle
import time
import traceback
import logging
import pandas as pd
import redis
import torch.multiprocessing as tmp
from hd.evaluation import mse
from hd.operate import predict
from flask_app import _get_config
from flask_app.websocket.redis_pub_sub import RedisPub
from flask_app.util.log  import ModelLogger
from collections import deque
from loguru import logger
from celery.signals import worker_process_init
from . import celery_app
import random
ip = _get_config.REDIS_HOST
port = _get_config.REDIS_PORT
r = redis.Redis(host=ip, port=port, db=0, decode_responses=True)
result = redis.Redis(host=ip, port=port, db=1)
back_end = redis.Redis(host=ip, port=port, db=3, decode_responses=True)
websocket_pub = RedisPub(r)

@worker_process_init.connect
def init_function(*args, **kwargs):
    r.set("RModels", 0)

# model_dict = {}

@celery_app.task
def add(a, b):
    return str(a + b)


@celery_app.task
def test_celery_task(model_category, model_mid, model_version, yname, running_flag_key, cycle_time=600, ex_time=7200):
    ctx = tmp.get_context('spawn')
    if model_category == 'detection':
        return

    while True:
        try:
            begin = time.time()
            
            running_flag = result.get(running_flag_key)
            if running_flag is None or running_flag.decode() == 'stop':
                return
            df_dict = {'pred': [], 'target': []}

            # ts = int(time.time())
            ts = int(r.get('latest'))
            start_time = ts - cycle_time

            with result.pipeline() as p:
                for i in range(cycle_time):
                    key = yname + '-' + model_category + '-' + str(start_time + i)
                    p.get(key)
                res_list = p.execute()

            for res in res_list:
                if res:
                    data = json.loads(res.decode())
                    df_dict['pred'].append(data['pred']['value'])
                    df_dict['target'].append(data['current'])

            if len(df_dict['pred']) == len(df_dict['target']) and len(df_dict['pred']) > 0:
                eval_res = mse(df_dict['target'], df_dict['pred'])
            else:
                continue

            # key = 'M{}V{}-{}'.format(model_mid, model_version, model_category)
            key = 'M{}V{}_realtime_eval'.format(model_mid, model_version)
            new_eval = {
                'mse': eval_res,
                'ts': ts
            }
            if result.exists(key):
                # 若已有评估结果，则将最新结果插入到列表头
                eval_results = json.loads(result.get(key).decode())
                eval_results.insert(0, new_eval)
                # 若评估结果超出，则删除末尾结果（即最古老的结果）
                ex_num = int(ex_time / cycle_time)
                if len(eval_results) > ex_num:
                    eval_results.pop()
                result.set(key, json.dumps(eval_results), ex=ex_time)
            else:
                value = json.dumps([new_eval])
                result.set(key, value, ex=ex_time)

            print(key, new_eval)

            end = time.time()
            if end < begin + cycle_time:
                time.sleep(cycle_time + begin - end)
        except Exception:
            traceback.print_exc()
    # print(value)
    # except:
    #     traceback.print_exc()


@celery_app.task()
# def predict_celery_task(model_alg, model_category, model_path, model_mid, model_version, yname, sub_system, window, opt_use):
def predict_celery_task(model_key, model_category, model_mid, model_version, yname, sub_system, window,
                        running_flag_key, unit):
    ctx = tmp.get_context('spawn')
    model_logger = ModelLogger().get(model_mid , model_version)

    model_handler = result.get(model_key)
    if not model_handler:
        raise AttributeError('{} not found!'.format(model_key))
    model_handler = pickle.loads(model_handler)
    input_data = deque()
    last_update_time = None  # input_data最近更新的时间戳

    refresh_time = _get_config.REALTIME_DATA_STEP  # 实时数据刷新间隔（秒）
    wait_time = _get_config.MODEL_RUNNNING_TIME

    while True:
        try:
            
            begin = time.time()
            # 增加运行的模型个数
            running_flag = result.get(running_flag_key)
            if (not running_flag) or (running_flag.decode() == 'stop'):
                return
            total_running_models = r.get("RModels") 
            if total_running_models is None or  int(total_running_models) < _get_config.CELERYD_CONCURRENCY:
                r.incr("RModels")
            else:
                logger.warning(f"总运行模型个数超出:{_get_config.CELERYD_CONCURRENCY}")
                time.sleep(random.random() * wait_time * 0.9)
                continue
                

            # ts = int(time.time())
            ts = int(r.get('latest'))
            # start_time = ts - (window * model_handler.sample_step - 1) * refresh_time
            start_time = ts - (window - 1) * model_handler.sample_step
            if last_update_time is None:
                last_update_time = start_time - 1

            # max_key = 1614531305
            # with r.pipeline() as p:
            #     # for i in range(0, window * model_handler.sample_step * refresh_time, refresh_time):
            #     for i in range(start_time, ts + 1, model_handler.sample_step):
            #         # p.hmget(max_key-i, *model_handler.use_cols)
            #         keys = ['{}@{}'.format(i, point_name) for point_name in model_handler.use_cols]
            #         p.mget(*keys)
            #         # p.hmget(str(ts - i), *model_handler.use_cols)
            #     time_res = p.execute()

            # lst = [[float(tag_val) for tag_val in t] for t in time_res]
            # data = pd.DataFrame(lst, columns=model_handler.use_cols)
            if last_update_time <= (ts - refresh_time):
                # 只查询last_update_time到最新时间的数据
                with r.pipeline() as p:
                    for i in range(last_update_time + 1, ts + 1):
                        keys = ['{}@{}'.format(i, point_name) for point_name in model_handler.use_cols]
                        p.mget(*keys)
                    time_res = p.execute()
                new_time_val_tuples = list()
                for i in range(last_update_time + 1, ts + 1):
                    new_time_val_tuples.append((i, [float(tag_val) if tag_val else None for tag_val in time_res[i - last_update_time - 1]]))
                input_data.extend(new_time_val_tuples)
                # 丢弃start_time之前的旧数据
                data_checked = input_data[0]  # (timestamp, [...])
                while data_checked[0] < start_time:
                    input_data.popleft()
                    if len(input_data) <= 0:
                        break
                    data_checked = input_data[0]
            last_update_time = ts
            # with r.pipeline() as p:
            #     for i in range(start_time, ts + 1):
            #         keys = ['{}@{}'.format(i, point_name) for point_name in model_handler.use_cols]
            #         p.mget(*keys)
            #     time_res = p.execute()

            # raw_data = [[float(tag_val) if tag_val else None for tag_val in t] for t in time_res]
            # data = pd.DataFrame(raw_data, columns=model_handler.use_cols)
            raw_data = [time_vals[1] for time_vals in input_data]
            data = pd.DataFrame(raw_data, columns=model_handler.use_cols)

            # 判断是否存在全为空的列
            cols_all_nan = [col for col in data.columns if data[col].isna().all()]
            assert len(cols_all_nan) == 0, '{} not found'.format(cols_all_nan)

            data = data.ffill().bfill()
            # sample_perm = list(range(data.shape[0] - 1, -1, -model_handler.sample_step))
            # sample_perm.reverse()
            data = data.loc[range(0, data.shape[0], model_handler.sample_step)]
            del raw_data

            # etime = int(time.time())
            pred = predict(model_handler, data)

            if model_category == 'detection':
                value = {
                    'ts': ts,
                    'value': float(pred[0][0]),
                    'subsystem': sub_system,
                    'mid': model_mid,
                    'version': model_version
                }
                contribution = {}
                for i in range(len(pred[1])):
                    contribution[pred[1][i][0]] = float(pred[1][i][1])
                value['contribution'] = contribution
                key = 'Subsystem' + str(sub_system) + '-' + str(ts)
                websocket_pub.publish_data('detection', unit, value)
                latest_key = f'M{model_mid}V{model_version}_{sub_system}'

            else:
                current_value = r.get('{}@{}'.format(ts, yname))
                value = {
                    'ts': ts,
                    # 'current': data.loc[data.shape[0] - 1, yname],
                    'current': float(current_value) if current_value else None,
                    'yname': yname,
                    'pred': {
                        'mid': model_mid,
                        'version': model_version,
                        'value': float(pred[0])
                    },
                    'category': model_category
                }
                websocket_pub.publish_data('predict', unit, value)
                key = yname + '-' + model_category + '-' + str(ts)
                latest_key = yname + '-' + model_category + '-' + 'latest'
            
            value = json.dumps(value)
            result.set(key, value, ex=_get_config.MODEL_RES_EXPIRE_TIME)
            result.set(latest_key, value, ex=_get_config.MODEL_RES_EXPIRE_TIME)
            model_logger.info(f"success : {model_category} model predict for {key}:{value}, total cost : { time.time() - begin} seconds")
            # print(f"success : {model_category} model predict for {key}:{value}, total cost : { time.time() - begin} seconds")
            
            r.decr("RModels")
        # except TypeError:
        #     print('cannot find data in redis')
        except Exception:
            traceback.print_exc()
            model_logger.error(traceback.format_exc())
            r.decr("RModels")
        finally:
            end = time.time()
            if end < begin + wait_time:
                time.sleep(wait_time + begin - end)


@celery_app.task
def flush_db():
    back_end.flushdb(True)
    return '删除成功'


@celery_app.on_after_configure.connect
def setup_periodic_tasks(sender, **kwargs):
    # 每10min清空一下3号数据库
    sender.add_periodic_task(600.0, flush_db.s(), name='flush every 10min')
