# -*- coding: utf-8 -*- 
"""
@Author : Chan ZiWen
@Date : 2022/8/1 12:58
File Description:

输入数据：时间段
06:00:00 - 12:00:00
"""
import time
import json
import math
import pandas as pd

from datetime import datetime, timedelta, date
from tsmoothie import ConvolutionSmoother
from flask import request, Blueprint, jsonify
from common.feishu import FeishuMsgHandler

wake = Blueprint('wake', __name__)

# 预设分析时间
UpStartTime = " 03:00:00"
UpEndTime = " 10:00:00"
amin = 10  # 最小距离，先暂时写死


def steady(df):
    """
    exponentially weighted averages	 (beta = 0.9)
    math equal:
        y_t = \beta * x_{t-1} + (1 - /beta) * x_t

    we can compute average how days :
        there's exponentially weighted average, just adapts more slowly when beta is so large.
    随着着前值大小将当前值
    :return:
    """
    n = len(df)
    df_new = df.copy()
    fraction = 40

    for i in range(1, n):
        diff = df[i] - df[i-1]
        fraction_ratio = (1 - diff / fraction) * 0.5
        df_new[i] = df[i] - diff * fraction_ratio
        # print(f"{diff: .4f} - {df[i]: .4f}, {diff * fraction_ratio: .4f} - {df_new[i]: .4f}  ")
    return df_new


def wake_compute(log_time, res):
    ############ parameter
    window_sizes = 40

    keys = list(res.keys())
    ## 生成数据
    df = pd.DataFrame(
        {'BluetoothDistance': list(res.values())},
        index=keys
    )

    # a, interpolation
    df = df.interpolate(method='linear', limit_direction='forward')

    # a,删除为NaN的行
    df = df.dropna(axis=0, how='any')

    # a, 判断是否最近距离是否小于阈值
    """
    10      0.26932668329177056  -----  51.85286783042394
    without 0.26848249027237353  -----  53.40466926070039
    """
    if amin < float(df['BluetoothDistance'].min()):     # 取出的一个numpy array的min
        return None

    # a, smoothing
    steady_df = steady(list(df["BluetoothDistance"]))
    smoother = ConvolutionSmoother(window_len=30, window_type='ones')
    smoother.smooth([steady_df])
    smooth_data = smoother.smooth_data[0]
    smooth_data = pd.Series(smooth_data)

    # b, difference
    diff = smooth_data.diff(periods=1).rolling(window=window_sizes)

    # c, window's variance
    # sum
    rwsum = diff.sum(ddof=1)
    min_sum = rwsum.min() / 3
    # var
    rwvar = diff.std(ddof=1)

    flag = None
    n = len(rwsum)

    """ 循环过滤 """
    a, b = 2, 1.3
    iteration = 0
    while not flag and iteration < 2:
        best_sum, best_var = 0, 0
        for i, (s, v) in enumerate(zip(rwsum, rwvar)):
            if i >= n - window_sizes:
                break
            if i >= window_sizes:
                post_sum, post_var = rwsum[i + window_sizes // 2], rwvar[i + window_sizes // 2]
                # var 大的时候， sum可能趋向0，通过abs解决；
                # 判断条件： sum和var 两部分 存在差值时，中间有一大段边缘， 通过记录前一时间的pre值判断是否变化
                # if s < post_sum/a and abs(v) < abs(post_var)/b:
                if abs(v) < abs(post_var) / b:  # 方差大
                    if abs(s) < abs(post_sum) / a:  # 保证 波动值 大，或者 趋于了零
                        if best_sum < (abs(post_sum) - abs(s)) and best_var < (abs(post_var) - abs(v)):
                            # 排除离家情况 : 取 最大负值sum的一半进行比较即可
                            if post_sum > min_sum:
                                best_var = (abs(post_var) - abs(v))
                                best_sum = (abs(post_sum) - abs(s))
                                flag = i
            else:
                pass
        a -= 0.1
        iteration += 1
    if flag is None:
        analysisRes = None
    else:
        analysisRes = log_time[flag]  # .replace(":", "：")

    return analysisRes


def f1(x):
    return math.pow(10, (abs(x) - 45) / 32.5)


def gen_seq(s=3, e=10):
    d = {}
    for h in range(s, e):
        for m in range(0, 60):
            s = f"{h:0>2}{m:0>2}"
            d[s] = None
    return d


def run(date_q, date_distance_dict=None):
    """ multi-thread processing
    procedure:
        1, creating threads
        2, ordering data by log_time
        3, calling single mac analysis function
    """
    id_mac_time = {}
    for k_mac, data in date_distance_dict.items():
        if len(k_mac) != 12:
            continue

        # jsons to list
        D = gen_seq()
        # num_nw = 0   # 记录 工作时间的出现次数, 超过50分钟在，则跳过该mac的分析
        num_ad = 0  # 记录 分析数据数量, 低于180分钟，则跳过该mac的分析

        try:
            for K, V in data.items():
                # if 1100 <= int(K) <= 1130:
                #     num_nw += 1
                if D.get(K, 0) != 0:
                    V = f1(V)
                    num_ad += 1
                    D[K] = V if V < 40 else 40
        except AttributeError:
            print(f'AttributeError, {k_mac}, {data}')
            continue
        if num_ad < 100:
            print(f'Number Error , {k_mac}  {num_ad},  ')
            continue
        log_time = list(D.keys())
        ans = wake_compute(log_time, D)
        if ans is None:
            continue
        id_mac_time[k_mac] = int(datetime.strptime(f'{date_q} {ans}', '%Y-%m-%d %H%M').timestamp())
    return id_mac_time


@wake.route('/res', methods=['POST'])
def res():
    """
    http://172.20.148.86:8668/analyze/wake/res
    """
    if request.json is None:
        json_q = json.loads(request.data)
    else:
        json_q = request.json
    json_q.pop('name')
    date_q = json_q.pop('date')

    try:
        res = run(date_q, json_q)
        return jsonify(res), 200
    except RuntimeError as e:
        return f'{e}', 404