# -*- coding: utf-8 -*- 
"""
@Author : Chan ZiWen
@Date : 2022/7/19 10:20
File Description:

"""

import sys
import time
import json
import psutil
import requests
import pandas as pd
from pyhive import hive

from datetime import datetime, timedelta
from tsmoothie import ConvolutionSmoother
from concurrent.futures import ThreadPoolExecutor, as_completed
from flask import jsonify, request, Blueprint, current_app
from common.feishu import FeishuMsgHandler
from threading import Thread

sleepWake = Blueprint('sleepWake', __name__)

interval_15 = 1814400
amin = 10

# 记录已经请求的date: ['m,s,e', 'm,s,e']
dict_requests = {}

# 预设分析时间
UpStartTime = " 03:00:00"
UpEndTime = " 10:00:00"
DownStartTime = " 20:00:00"
DownEndTime = " 02:00:00"


def asyncf(f):
    def wrapper(*args, **kwargs):
        thr = Thread(target=f, args=args, kwargs=kwargs)
        thr.start()

    return wrapper


def steady(df):
    """
    exponentially weighted averages	 (beta = 0.9)
    math equal:
        y_t = \beta * x_{t-1} + (1 - /beta) * x_t

    we can compute average how days :
        there's exponentially weighted average, just adapts more slowly when beta is so large.
    随着着前值大小将当前值
    :return:
    """
    n = len(df)
    df_new = df.copy()
    fraction = 40

    for i in range(1, n):
        diff = df[i] - df[i-1]
        fraction_ratio = (1 - diff / fraction) * 0.5
        df_new[i] = df[i] - diff * fraction_ratio
        # print(f"{diff: .4f} - {df[i]: .4f}, {diff * fraction_ratio: .4f} - {df_new[i]: .4f}  ")
    return df_new


def is_number(s):
    """str is number"""
    try:
        if s.lower() == "nan":
            return None
        return float(s)
    except ValueError:
        return None


def sleep(log_time, distance):
    """
    注意关屏状态才有蓝牙数据；
        关屏状态下已睡觉： 一直是平稳数据，则无睡眠时间
        关屏状态下马上睡觉： 睡眠时间
        关屏状态下过段时间睡觉： 睡眠时间
    :param log_time:
    :param distance:
    :return:
    """
    res = {}
    flag = False
    for t, d in zip(log_time, distance):
        t = int(t)
        dis = is_number(d)
        if dis is not None:
            res[t] = dis if dis < 40 else 40
        else:
            if flag:  # 第一个key-value必须是有效值
                res[t] = None
                # 减少离群值影响，2，删除，找干扰因素，根据现实情况
                # print("%s key no value，is %s " % (key, res[key]))

    ############ parameter
    window_sizes = 40

    keys = list(res.keys())
    ## 生成数据
    df = pd.DataFrame(
        {'BluetoothDistance': list(res.values())},
        index=keys
    )

    # a, interpolation
    df = df.interpolate(method='linear', limit_direction='forward')

    # a, smoothing
    steady_df = list(df["BluetoothDistance"])  # steady(list(df["蓝牙距离"]))
    smoother = ConvolutionSmoother(window_len=30, window_type='ones')
    smoother.smooth([steady_df])
    smooth_data = smoother.smooth_data[0]
    smooth_data = pd.Series(smooth_data)

    # b, difference
    diff = smooth_data.diff(periods=1).rolling(window=window_sizes)

    # c, window's variance
    # sum
    rwsum = diff.sum(ddof=1)
    thres = rwsum.max() / 20.
    # var
    rwvar = diff.std(ddof=1)

    flag = 0
    n = len(rwsum)
    # assert n > 150, Warning(f"Data that it's length is '{n}' is too small ")
    if n <= 150:
        return None
    """ 循环过滤 """
    a, b = 2, 1.3
    iteration = 0
    while not flag and iteration < 2:
        # 判别后方是否有大于当前的波动
        for i, (s, v) in enumerate(zip(rwsum, rwvar)):
            if i >= n - window_sizes:
                break
            if i >= window_sizes:
                post_sum, post_var = rwsum[i + window_sizes], rwvar[i + window_sizes]
                # var 大的时候， sum可能趋向0，通过abs解决；
                # 判断条件： sum和var 两部分 存在差值时，中间有一大段边缘， 通过记录前一时间的pre值判断是否变化
                if abs(v) > abs(post_var) * b:  # 方差大
                    if (abs(s) > abs(post_sum) * a) or (abs(s) < thres):  # 保证 波动值 大，或者 趋于了零
                        flag = i
                        break
            else:
                pass
        a -= 0.1
        iteration += 1

    if flag is None:
        analysisRes = None
    else:
        analysisRes = log_time[flag]  # .replace(":", "：")
    return analysisRes


def wake(log_time, distance):
    res = {}
    n = 0
    for t, d in zip(log_time, distance):
        t = int(t)
        dis = is_number(d)
        if dis is not None:
            res[t] = dis if dis < 40 else 40
            n += 1
            # 减少离群值影响，2，删除，找干扰因素，根据现实情况
            # print("%s key no value，is %s " % (key, res[key]))

    # assert n > 140, Warning(f"Data that it's length is '{n}' is too small ")
    if n < 190:
        return None

    ############ parameter
    window_sizes = 40

    keys = list(res.keys())
    ## 生成数据
    df = pd.DataFrame(
        {'BluetoothDistance': list(res.values())},
        index=keys
    )

    # a, interpolation
    df = df.interpolate(method='linear', limit_direction='forward')

    # a,删除为NaN的行
    df = df.dropna(axis=0, how='any')

    # a, 判断是否最近距离是否小于阈值
    if amin < df['BluetoothDistance'].min():
        return None

    # a, smoothing
    steady_df = steady(list(df["BluetoothDistance"]))
    smoother = ConvolutionSmoother(window_len=30, window_type='ones')
    smoother.smooth([steady_df])
    smooth_data = smoother.smooth_data[0]
    smooth_data = pd.Series(smooth_data)

    # b, difference
    diff = smooth_data.diff(periods=1).rolling(window=window_sizes)

    # c, window's variance
    # sum
    rwsum = diff.sum(ddof=1)
    min_sum = rwsum.min() / 3
    # var
    rwvar = diff.std(ddof=1)

    flag = None

    """ 循环过滤 """
    a, b = 2, 1.3
    iteration = 0
    while not flag and iteration < 2:
        best_sum, best_var = 0, 0
        for i, (s, v) in enumerate(zip(rwsum, rwvar)):
            if i >= n - window_sizes:
                break
            if i >= window_sizes:
                post_sum, post_var = rwsum[i + window_sizes // 2], rwvar[i + window_sizes // 2]
                # var 大的时候， sum可能趋向0，通过abs解决；
                # 判断条件： sum和var 两部分 存在差值时，中间有一大段边缘， 通过记录前一时间的pre值判断是否变化
                # if s < post_sum/a and abs(v) < abs(post_var)/b:
                if abs(v) < abs(post_var) / b:  # 方差大
                    if abs(s) < abs(post_sum) / a:  # 保证 波动值 大，或者 趋于了零
                        if best_sum < (abs(post_sum) - abs(s)) and best_var < (abs(post_var) - abs(v)):
                            # 排除离家情况 : 取 最大负值sum的一半进行比较即可
                            if post_sum > min_sum:
                                best_var = (abs(post_var) - abs(v))
                                best_sum = (abs(post_sum) - abs(s))
                                flag = i
            else:
                pass
        a -= 0.1
        iteration += 1
    if flag is None:
        analysisRes = None
    else:
        analysisRes = log_time[flag]  # .replace(":", "：")
    return analysisRes


def Connect(configs):
    con = hive.Connection(
                host=configs['host'],
                port=configs['port'],
                auth='LDAP',
                username=configs['user'],
                password=configs['password'],
                database=configs['database'])
    return con


def Batch_select(date: str = None, mode: str = 'wake', a: int = None, b: int = None, config_hive: dict = None):
    """
        Get all the pair of (active_id, mac) (mobile devices) according day and log_time (20:00-02:00 or 03:00-10:00)
    by "group by"
    """
    con = Connect(config_hive)

    start = time.time()
    cursor = con.cursor()
    if mode == 'wake':
        # 生成时间戳
        startTime = date + UpStartTime
        endTime = date + UpEndTime
        stamp = int(time.mktime(time.strptime(startTime, "%Y-%m-%d %H:%M:%S")) * 1000)
        etamp = int(time.mktime(time.strptime(endTime, "%Y-%m-%d %H:%M:%S")) * 1000)
        # 执行查询
        """,collect_set(rssi),collect_set(distance)"""
        sql = f'select * from (select t.tv_mac, t.active_id, t.mac, collect_list(t.log_time), collect_list(t.distance), count(*), row_number() over() as rowsa ' \
            f'from (SELECT * FROM {config_hive["table"]} WHERE `day` = "{date}") as t ' \
            f'where t.log_time>={stamp} AND t.log_time<={etamp} group by t.tv_mac, t.active_id, t.mac having count(*) > 200) f ' \
            f'where f.rowsa between {a} and {b}'
        print(sql)
        cursor.execute(sql)
    else:
        # 生成后一天date str
        date_next = str(datetime.fromtimestamp(time.mktime(time.strptime(date, "%Y-%m-%d")) + 86400)).split(' ')[0]

        # 生成时间戳
        startTime = date + DownStartTime
        endTime = date + DownEndTime
        stamp = int(time.mktime(time.strptime(startTime, "%Y-%m-%d %H:%M:%S")) * 1000)
        etamp = int((time.mktime(time.strptime(endTime, "%Y-%m-%d %H:%M:%S")) + 86400) * 1000)    # 加上一天的秒数
        # 执行查询

        sql = f'select * from (select t.tv_mac, t.active_id, t.mac, collect_list(t.log_time), collect_list(t.distance), count(*), row_number() over() as rowsa  ' \
            f'from (SELECT * FROM {config_hive["table"]} WHERE `day` in ("{date}", "{date_next}")) as t  ' \
            f'where t.log_time>={stamp} AND t.log_time<={etamp} group by t.tv_mac, t.active_id, t.mac having count(*) > 200) f ' \
            f'where f.rowsa between {a} and {b}'
        print(sql)
        cursor.execute(sql)
    y = cursor.fetchall()
    cursor.close()
    con.close()
    print("Duration(get all source data) time: ", time.time() - start)
    print(f"All device's data length: {len(y)} ({mode})")
    return y


def single_fn(id_mac_time, tv_mac: str = None, active_id: str = None, mac: str = None, log_time: str = None, distance: str = None,
              mode: str = 'wake', date: str = None):
    """
    :param log_time:  shape (n,)
    :param distance:  shape (n,)
    :return:
   {"mode":1,"activeId":"34","mac":"3243","tvMac":"23423","oneDayTime":15013452,"final15dTime":"12:00","analysisDatetime":"2022-02-12"},
    """
    # str to list
    log_time = log_time.strip(']').strip('[').split(',')
    distance = distance.strip(']').strip('[').split(',')

    # sort by string
    log_time, distance = zip(*sorted(zip(log_time, distance), key=lambda s: (s[0], s[1])))
    if mode == 'sleep':
        ans = sleep(log_time, distance)
        mode_ = 2
    else:   # wake
        ans = wake(log_time, distance)
        mode_ = 1

    if ans is not None:
        ans_date = int(ans) // 1000
        id_mac_time.append(
            {
                "mode": mode_,
                "tvMac": tv_mac,
                "activeId": active_id,
                "mac": mac,
                "oneDayTime": ans_date,
                "analysisDatetime": date
            })


def batch_analysis(date: str = None, mode: str = None, a: int = None, b: int = None, config_hive: dict = None,
                   wCKUrl: str = None):
    """ multi-thread processing
    procedure:
        1, creating threads
        2, ordering data by log_time
        3, calling single mac analysis function
    """
    id_mac_time = []
    # 分页处理
    start = time.time()
    res_list = Batch_select(date, mode, a, b, config_hive)

    executor_r = ThreadPoolExecutor(40)  # 参数设置线程池大小
    print('Created a threadPool which max_worker is 40.')
    features = []
    for res in res_list:
        tv_mac, active_id, mac, log_time, distance = res[0], res[1], res[2], res[3], res[4]
        features.append(executor_r.submit(single_fn, id_mac_time, tv_mac, active_id, mac, log_time, distance, mode, date))

    print('Waiting all threads shutdown')
    for fi in as_completed(features):
        if fi.result() is not None:
            FeishuMsgHandler().sendFeiShu(f"{fi}")
    del executor_r, features

    print(f"{mode} duration(read & analysis data({a} - {b})) time: {(time.time() - start) / 60}(m)")
    # 添加将list内容存储到mysql的函数，通过调用接口
    save2ck(id_mac_time, date, wCKUrl)
    print(f"{date}(lifetime)The length of data saving to the Clickhouse : {len(id_mac_time)} ")
    del id_mac_time


def post(date, datas, url, headers):
    datas = json.dumps(datas)
    y = requests.post(url, data=datas, headers=headers)
    # response = json.loads(requests.post(url, data=datas, headers=headers).text)
    response = json.loads(y.text)
    if response['code'] != 1000:
        raise RuntimeError(f"{date}({response})")


def save2ck(id_mac_time, date, wCKUrl):
    headers = {"Content-Type": "application/json"}
    inter_10000 = 10000
    n = len(id_mac_time)
    if n > 0:
        # 超过1w条：则分批存
        if n > inter_10000:
            nb = n // inter_10000
            for i in range(nb):
                datas_part = id_mac_time[i * inter_10000: (i + 1) * inter_10000]
                post(date, datas_part, wCKUrl, headers)
            if len(id_mac_time[(i + 1) * inter_10000:]) > 0:
                post(date, id_mac_time[(i + 1) * inter_10000:], wCKUrl, headers)
        else:
            post(date, id_mac_time, wCKUrl, headers)


def validate(date_text):
    try:
        datetime.strptime(date_text, '%Y-%m-%d')
        return 0
    except ValueError:
        return 1


@asyncf
def run(date, mode, startRow, endRow, config_hive, wCKUrl):
    print(f"Begin computing of the ({date})")
    batch_analysis(date, mode, startRow, endRow, config_hive, wCKUrl)


@sleepWake.route('/res', methods=['GET', 'POST'])
def res():
    """
    http://172.20.148.89:8688/analyze/sleepWake/res?date=2022-08-21&mode=sleep&s=1&e=50000
    http://172.20.148.94:8688/analyze/sleepWake/res?date=2022-07-30&mode=sleep&s=50001&e=100000
    http://172.20.148.94:8688/analyze/sleepWake/res?date=2022-07-29&mode=wake&s=1&e=50000
    :return:
    """
    config_hive = {
        'host': current_app.config['HIVE_HOST'],
        'port': current_app.config['HIVE_PORT'],
        'user': current_app.config['HIVE_USER'],
        'password': current_app.config['HIVE_PASSWORD'],
        'database': "coocaa_gdl",
        'table': "gdl_zdfw_sc_bluetooth_device"
    }
    wCKUrl = current_app.config['BATCH_SAVE_LITE_TIME']
    register_dict = request.args
    date = register_dict['date']
    if validate(date):
        return jsonify({"status": "Incorrect data format, should be YYYY-MM-DD"}), 404
    mode = register_dict['mode']
    if mode not in ['sleep', 'wake']:
        return jsonify({"status": "Invalid data 'mode', should be in {'sleep', 'wake'}"}), 404
    startRow = int(register_dict['s'])
    endRow = int(register_dict['e'])

    if date not in dict_requests:
        dict_requests.clear()
        dict_requests[date] = [f"{mode}{startRow},{endRow}"]
    else:
        tmp = f"{mode}{startRow},{endRow}"
        if tmp in dict_requests[date]:
            return jsonify({"status": "Warning: has been requested"}), 404
        else:
            dict_requests[date].append(tmp)

    run(date, mode, startRow, endRow, config_hive, wCKUrl)
    return jsonify({"status": "success"}), 200


@sleepWake.route('/counts', methods=['GET'])
def countS():
    config_hive = {
        'host': current_app.config['HIVE_HOST'],
        'port': current_app.config['HIVE_PORT'],
        'user': current_app.config['HIVE_USER'],
        'password': current_app.config['HIVE_PASSWORD'],
        'database': "coocaa_gdl",
        'table': "gdl_zdfw_sc_bluetooth_device"
    }

    """
    http://172.20.148.89:8688/analyze/sleepWake/counts?date=2022-08-22&mode=sleep
    """
    register_dict = request.args
    date = register_dict['date']
    if validate(date):
        return jsonify({"status": "Incorrect data format, should be YYYY-MM-DD"}), 404
    mode = register_dict['mode']
    if mode not in ['sleep', 'wake']:
        return jsonify({"status": "Invalid data 'mode', should be in {'sleep', 'wake'}"}), 404

    con = Connect(config_hive)
    cursor = con.cursor()
    # 执行查询
    """,collect_list / collect_set"""
    if mode == 'wake':
        startTime = date + UpStartTime
        endTime = date + UpEndTime
        etamp = int(time.mktime(time.strptime(endTime, "%Y-%m-%d %H:%M:%S")) * 1000)
    else:
        startTime = date + DownStartTime
        endTime = date + DownEndTime
        etamp = int((time.mktime(time.strptime(endTime, "%Y-%m-%d %H:%M:%S")) + 86400) * 1000)  # 加上一天的秒数
    stamp = int(time.mktime(time.strptime(startTime, "%Y-%m-%d %H:%M:%S")) * 1000)
    # sql = f'select active_id from {config_hive["table"]} where log_time>={stamp} AND log_time<={etamp} group by tv_mac, active_id, mac having count(*) > 200'

    date_next = str(datetime.fromtimestamp(time.mktime(time.strptime(date, "%Y-%m-%d")) + 86400)).split(' ')[0]

    sql = f'select t.tv_mac from (SELECT * FROM {config_hive["table"]} WHERE `day` in ("{date}", "{date_next}")) as t ' \
        f'where t.log_time>={stamp} AND t.log_time<={etamp} group by t.tv_mac, t.active_id, t.mac having count(*) > 200 '
    print(sql)
    cursor.execute(sql)
    count = cursor.fetchall()
    if len(count) > 0:
        count = len(count)
    else:
        count = 0
    cursor.close()
    con.close()
    if count == 0:
        FeishuMsgHandler.sendFeiShu("移动mac识别分析，hive缺少{0}的数据，赶紧排查", date)
    return jsonify({"Counts": count}), 200

