# -*- coding: utf-8 -*- 
"""
@Author : Chan ZiWen
@Date : 2022/7/13 16:57
File Description:

"""
import sys
import time
import json
import psutil
import requests
import vthread
import pandas as pd
from pyhive import hive

from clickhouse_driver import Client
from datetime import datetime, timedelta
from tsmoothie import ConvolutionSmoother


# analysis from hive
config_hive = {
    'host': "192.168.1.88",
    'port': 10000,
    'user': "lifeservice",
    'password': "HnJs8OiOQX",
    'database': "coocaa_gdl",
    'table': "gdl_zdfw_sc_bluetooth_device"}

# （merge 15d） clickhouse configures
config_ck = {
    'host': "172.20.151.175",
    'url': "http://172.20.151.175:8123",
    'port': 9000,
    'user': "life_assistant_data",
    'password': "qZmtqgmqq",
    'database': "proactive_service_data",
    'table': "life_time"}

# clickhouse 写入api
wCKUrl = "http://beta-life-assistant.skysrt.com/conf/life/time/saveBatch"

interval_15 = 1814400

# 计算线程数量
pc_mem = psutil.virtual_memory()
div_gb_factor = (1024**2)
thread_n = int((pc_mem.available / div_gb_factor) // 50)
# 用于并行处理数据
pool1 = vthread.pool(thread_n, gqueue=1)  # 开40个伺服线程，组名为1
# 用于并行处理数据
pool2 = vthread.pool(thread_n, gqueue=2)  # 开40个伺服线程，组名为2

sleep_id_mac_time = []      # List[dict]
wake_id_mac_time = []       # List[dict]
# 记录15天最终时间的数据
final_time_info = []


def steady(df):
    """
    exponentially weighted averages	 (beta = 0.9)
    math equal:
        y_t = \beta * x_{t-1} + (1 - /beta) * x_t

    we can compute average how days :
        there's exponentially weighted average, just adapts more slowly when beta is so large.
    随着着前值大小将当前值
    :return:
    """
    n = len(df)
    df_new = df.copy()
    fraction = 40

    for i in range(1, n):
        diff = df[i] - df[i-1]
        fraction_ratio = (1 - diff / fraction) * 0.5
        df_new[i] = df[i] - diff * fraction_ratio
        # print(f"{diff: .4f} - {df[i]: .4f}, {diff * fraction_ratio: .4f} - {df_new[i]: .4f}  ")
    return df_new


def is_number(s):
    """str is number"""
    try:
        if s.lower() == "nan":
            return None
        return float(s)
    except ValueError:
        return None


def sleep(log_time, distance):
    """
    注意关屏状态才有蓝牙数据；
        关屏状态下已睡觉： 一直是平稳数据，则无睡眠时间
        关屏状态下马上睡觉： 睡眠时间
        关屏状态下过段时间睡觉： 睡眠时间
    :param log_time:
    :param distance:
    :return:
    """
    res = {}
    flag = False
    for t, d in zip(log_time, distance):
        t = int(t)
        dis = is_number(d)
        if dis is not None:
            res[t] = dis if dis < 40 else 40
        else:
            if flag:  # 第一个key-value必须是有效值
                res[t] = None
                # 减少离群值影响，2，删除，找干扰因素，根据现实情况
                # print("%s key no value，is %s " % (key, res[key]))

    ############ parameter
    window_sizes = 40

    keys = list(res.keys())
    ## 生成数据
    df = pd.DataFrame(
        {'BluetoothDistance': list(res.values())},
        index=keys
    )

    # a, interpolation
    df = df.interpolate(method='linear', limit_direction='forward')

    # a, smoothing
    steady_df = list(df["BluetoothDistance"])  # steady(list(df["蓝牙距离"]))
    smoother = ConvolutionSmoother(window_len=30, window_type='ones')
    smoother.smooth([steady_df])
    smooth_data = smoother.smooth_data[0]
    smooth_data = pd.Series(smooth_data)

    # b, difference
    diff = smooth_data.diff(periods=1).rolling(window=window_sizes)

    # c, window's variance
    # sum
    rwsum = diff.sum(ddof=1)
    thres = rwsum.max() / 20.
    # var
    rwvar = diff.std(ddof=1)

    flag = 0
    n = len(rwsum)
    # assert n > 150, Warning(f"Data that it's length is '{n}' is too small ")
    if n <= 150:
        return None
    """ 循环过滤 """
    a, b = 2, 1.3
    iteration = 0
    while not flag and iteration < 2:
        # 判别后方是否有大于当前的波动
        for i, (s, v) in enumerate(zip(rwsum, rwvar)):
            if i >= n - window_sizes:
                break
            if i >= window_sizes:
                post_sum, post_var = rwsum[i + window_sizes], rwvar[i + window_sizes]
                # var 大的时候， sum可能趋向0，通过abs解决；
                # 判断条件： sum和var 两部分 存在差值时，中间有一大段边缘， 通过记录前一时间的pre值判断是否变化
                if abs(v) > abs(post_var) * b:  # 方差大
                    if (abs(s) > abs(post_sum) * a) or (abs(s) < thres):  # 保证 波动值 大，或者 趋于了零
                        flag = i
                        break
            else:
                pass
        a -= 0.1
        iteration += 1

    if flag is None:
        analysisRes = None
    else:
        analysisRes = log_time[flag]  # .replace(":", "：")
    return analysisRes


def wake(log_time, distance):
    res = {}
    flag = False
    for t, d in zip(log_time, distance):
        t = int(t)
        dis = is_number(d)
        if dis is not None:
            res[t] = dis if dis < 40 else 40
        else:
            if flag:  # 第一个key-value必须是有效值
                res[t] = None
            # 减少离群值影响，2，删除，找干扰因素，根据现实情况
            # print("%s key no value，is %s " % (key, res[key]))

    ############ parameter
    window_sizes = 30

    keys = list(res.keys())
    ## 生成数据
    df = pd.DataFrame(
        {'BluetoothDistance': list(res.values())},
        index=keys
    )

    # a, interpolation
    df = df.interpolate(method='linear', limit_direction='forward')

    # a, smoothing
    steady_df = steady(list(df["BluetoothDistance"]))
    smoother = ConvolutionSmoother(window_len=30, window_type='ones')
    smoother.smooth([steady_df])
    smooth_data = smoother.smooth_data[0]
    smooth_data = pd.Series(smooth_data)

    # b, difference
    diff = smooth_data.diff(periods=1).rolling(window=window_sizes)

    # c, window's variance
    # sum
    rwsum = diff.sum(ddof=1)
    min_sum = rwsum.min() / 3
    # var
    rwvar = diff.std(ddof=1)

    flag = None
    n = len(rwsum)
    # assert n > 150, Warning(f"Data that it's length is '{n}' is too small ")
    if n <= 150:
        return None
    """ 循环过滤 """
    a, b = 2, 1.3
    iteration = 0
    while not flag and iteration < 2:
        best_sum, best_var = 0, 0
        for i, (s, v) in enumerate(zip(rwsum, rwvar)):
            if i >= n - window_sizes:
                break
            if i >= window_sizes:
                post_sum, post_var = rwsum[i + window_sizes // 2], rwvar[i + window_sizes // 2]
                # var 大的时候， sum可能趋向0，通过abs解决；
                # 判断条件： sum和var 两部分 存在差值时，中间有一大段边缘， 通过记录前一时间的pre值判断是否变化
                # if s < post_sum/a and abs(v) < abs(post_var)/b:
                if abs(v) < abs(post_var) / b:  # 方差大
                    if abs(s) < abs(post_sum) / a:  # 保证 波动值 大，或者 趋于了零
                        if best_sum < (abs(post_sum) - abs(s)) and best_var < (abs(post_var) - abs(v)):
                            # 排除离家情况 : 取 最大负值sum的一半进行比较即可
                            if post_sum > min_sum:
                                best_var = (abs(post_var) - abs(v))
                                best_sum = (abs(post_sum) - abs(s))
                                flag = i
            else:
                pass
        a -= 0.1
        iteration += 1
    if flag is None:
        analysisRes = None
    else:
        analysisRes = log_time[flag]  # .replace(":", "：")
    return analysisRes


def Connect(configs):
    con = hive.Connection(
                host=configs['host'],
                port=configs['port'],
                auth='LDAP',
                username=configs['user'],
                password=configs['password'],
                database=configs['database'])
    return con


def Batch_select(date: str = None, mode: str = 'wake', a: int = None, b: int = None):
    """
        Get all the pair of (active_id, mac) (mobile devices) according day and log_time (20:00-02:00 or 03:00-10:00)
    by "group by"
    """
    con = Connect(config_hive)
    # 预设分析时间
    UpStartTime = " 03:00:00"
    UpEndTime = " 10:00:00"
    DownStartTime = " 20:00:00"
    DownEndTime = " 02:00:00"

    start = time.time()
    cursor = con.cursor()
    if mode == 'wake':
        # 生成时间戳
        startTime = date + UpStartTime
        endTime = date + UpEndTime
        stamp = int(time.mktime(time.strptime(startTime, "%Y-%m-%d %H:%M:%S")) * 1000)
        etamp = int(time.mktime(time.strptime(endTime, "%Y-%m-%d %H:%M:%S")) * 1000)
        # 执行查询
        """,collect_set(rssi),collect_set(distance)"""
        # cursor.execute(
        #     f'select tv_mac, active_id, mac, collect_list(log_time), collect_list(distance), count(*) '
        #     f'from {config_hive["table"]} '
        #     f'where log_time>="{stamp}" AND log_time<="{etamp}" '
        #     f'group by tv_mac, active_id, bluetooth_name, mac '
        #     f'having count(*) > 200 ')
        cursor.execute(
            f'select * from (select tv_mac, active_id, mac, collect_list(log_time), collect_list(distance), count(*), row_number() over() as rowsa '
            f'from {config_hive["table"]} '
            f'where log_time>="{stamp}" AND log_time<="{etamp}" group by tv_mac, active_id, bluetooth_name, mac having count(*) > 200) t '
            f'where t.rowsa between {a} and {b}')
    else:
        # 生成时间戳
        startTime = date + DownStartTime
        endTime = date + DownEndTime
        stamp = int(time.mktime(time.strptime(startTime, "%Y-%m-%d %H:%M:%S")) * 1000)
        etamp = int((time.mktime(time.strptime(endTime, "%Y-%m-%d %H:%M:%S")) + 86400) * 1000)    # 加上一天的秒数
        # 执行查询
        cursor.execute(
            f'select * from (select tv_mac, active_id, mac, collect_list(log_time), collect_list(distance), count(*), row_number() over() as rowsa  '
            f'from {config_hive["table"]} '
            f'where log_time>="{stamp}" AND log_time<="{etamp}" group by tv_mac, active_id, bluetooth_name, mac having count(*) > 200) t '
            f'where t.rowsa between {a} and {b}')
    y = cursor.fetchall()
    cursor.close()
    con.close()
    print("Duration(get all source data) time: ", time.time() - start)
    print(f"All device's data length: {len(y)} ({mode})")
    return y


@pool1
def single_fn(tv_mac: str = None, active_id: str = None, mac: str = None, log_time: str = None, distance: str = None,
              mode: str = 'wake', date: str = None):
    """
    :param log_time:  shape (n,)
    :param distance:  shape (n,)
    :return:
   {"mode":1,"activeId":"34","mac":"3243","tvMac":"23423","oneDayTime":15013452,"final15dTime":"12:00","analysisDatetime":"2022-02-12"},
    """
    # str to list
    log_time = log_time.strip(']').strip('[').split(',')
    distance = distance.strip(']').strip('[').split(',')

    # sort by string
    log_time, distance = zip(*sorted(zip(log_time, distance), key=lambda s: (s[0], s[1])))
    if mode == 'sleep':
        ans = sleep(log_time, distance)
        if ans is not None:
            ans_date = int(ans) // 1000
            # ans_date = datetime.fromtimestamp(ans_date).strftime("%Y-%m-%d %H:%M:%S")
            # sleep_id_mac_time.append([2, tv_mac, active_id, mac, ans_date, date])
            # to dict
            sleep_id_mac_time.append(
                {
                    "mode": 2,
                    "tvMac": tv_mac,
                    "activeId": active_id,
                    "mac": mac,
                    "oneDayTime": ans_date,
                    "final15dTime": "",
                    "analysisDatetime": date
                })
        else:
            ans_date = None
    elif mode == 'wake':
        ans = wake(log_time, distance)
        if ans is not None:
            ans_date = int(ans) // 1000
            # ans_date = datetime.fromtimestamp(ans_date).strftime("%Y-%m-%d %H:%M:%S")
            # wake_id_mac_time.append([1, tv_mac, active_id, mac, ans_date, date])
            wake_id_mac_time.append(
                {
                    "mode": 1,
                    "tvMac": tv_mac,
                    "activeId": active_id,
                    "mac": mac,
                    "oneDayTime": ans_date,
                    "final15dTime": "",
                    "analysisDatetime": date
                })
        else:
            ans_date = None
    else:
        raise Warning(f"Can't find this mode ({mode}), there are some choices ('sleep', 'wake')")

    # print(f"mode: {mode} - active_id: {active_id} - mac: {mac} - ans: {ans} - ans: {ans_date}")


def batch_analysis(date: str = None, mode: str = None, rows: int = 50000):
    """ multi-thread processing
    procedure:
        1, creating threads
        2, ordering data by log_time
        3, calling single mac analysis function
    """
    # 分页处理
    i = 0
    flag = True
    while flag:
        start = time.time()
        a, b = (rows * i) + 1, rows * (i + 1)
        res_list = Batch_select(date, mode, a, b)

        if len(res_list) < rows:
            flag = False
        i += 1
        # date_stamp = int(time.mktime(time.strptime(date, "%Y-%m-%d")))
        for res in res_list:
            tv_mac, active_id, mac, log_time, distance = res[0], res[1], res[2], res[3], res[4]
            single_fn(tv_mac, active_id, mac, log_time, distance, mode, date)       # timestamp

        vthread.pool.wait(gqueue=1)
        print(f"{mode} duration(read & analysis data({a} - {b})) time: {(time.time() - start) / 60}(m)")
        # 添加将list内容存储到mysql的函数，通过调用接口
        save2ck(date, mode)
        print(f"The length of data saving to the Clickhouse : {len(wake_id_mac_time) + len(sleep_id_mac_time)} ")
        wake_id_mac_time.clear()
        sleep_id_mac_time.clear()


def post(date, datas, url, headers, mode='sleep'):
    datas = json.dumps(datas)
    response = json.loads(requests.post(url, data=datas, headers=headers).text)
    if response['code'] != 1000:
        raise RuntimeError(f"[{mode}] {date}({response})")


def save2ck(date, mode):
    """
    [
    {"mode":1,"activeId":"34","mac":"3243","tvMac":"23423","oneDayTime":15013452,"final15dTime":"12:00","analysisDatetime":"2022-02-12"},
    {"mode":1,"activeId":"34","mac":"3243","tvMac":"23423","oneDayTime":15013452,"final15dTime":"12:00","analysisDatetime":"2022-02-12"},
    ]
    :return:
    """
    headers = {"Content-Type": "application/json"}
    # if len(wake_id_mac_time) > 0:
    #     response = json.loads(requests.post(url, data=json.dumps(wake_id_mac_time), headers=headers).text)
    #     if response['code'] != 1000:
    #         raise RuntimeError(f"[wake] {date}({response})")
    # if len(sleep_id_mac_time) > 0:
    #     response = json.loads(requests.post(url, data=json.dumps(sleep_id_mac_time), headers=headers).text)
    #     if response['code'] != 1000:
    #         raise RuntimeError(f"[sleep] {date}({response})")
    inter_10000 = 10000
    if mode == 'wake':
        n = len(wake_id_mac_time)
        if n > 0:
            # 超过1w条：则分批存
            if n > inter_10000:
                nb = n // inter_10000
                for i in range(nb):
                    datas_part = wake_id_mac_time[i * inter_10000: (i + 1) * inter_10000]
                    post(date, datas_part, wCKUrl, headers, 'wake')
                if len(wake_id_mac_time[(i + 1) * inter_10000:]) > 0:
                    post(date, wake_id_mac_time[(i + 1) * inter_10000:], wCKUrl, headers, 'wake')
            else:
                post(date, wake_id_mac_time, wCKUrl, headers, 'wake')
    else:
        n = len(sleep_id_mac_time)
        if n > 0:
            # 超过1w条：则分批存
            if n > inter_10000:
                nb = n // inter_10000
                for i in range(nb):
                    datas_part = sleep_id_mac_time[i * inter_10000: (i + 1) * inter_10000]
                    post(date, datas_part, wCKUrl, headers)
                if len(sleep_id_mac_time[(i + 1) * inter_10000:]) > 0:
                    post(date, sleep_id_mac_time[(i + 1) * inter_10000:], wCKUrl, headers)
            else:
                post(date, sleep_id_mac_time, wCKUrl, headers)


@pool2
def util_15d(df: pd.DataFrame = None, Info: list = None):
    #
    mode = Info[0]
    active_id = Info[2]
    mac = Info[4]
    cur_time = Info[5]
    if len(df.values) == 0:
        final_time = cur_time
    else:
        ans = df.loc[df['mode'] == mode and df['active_id'] == active_id and df['mac'] == mac]['current_time']  # 取出 current_time 这一列
        res_days = ans.values

        # 开始计算最终值
        if len(res_days) <= 0:
            final_time = cur_time
        elif len(res_days) == 1:
            final_time = (res_days[0] + cur_time) // 2
        else:
            ans = ans.append([{'current_time': cur_time}], ignore_index=True)
            # remove maximum and minimum
            ans.sort_values(inplace=True)
            final_time = ans.iloc[1:-1].mean()
    log_time = int(time.time())
    Info.append(final_time)
    Info.append(log_time)
    final_time_info.append(Info)


def main(date):
    """
    Duration time:  515.8442580699921
    data length:  7219

    Duration time:  111.48466920852661
    data length:  7219
    """
    modes = ['sleep', 'wake']
    start = time.time()
    times = []
    for mode in modes:
        tmp = time.time()
        batch_analysis(date, mode)
        times.append(time.time() - tmp)

    # 去重
    optimizer()


def Parsers():
    date = str(datetime.now().date() - timedelta(days=1))
    args = sys.argv[1:]
    for arg in args:
        if '-' in arg:
            date = arg.strip(' ')
    return date


def optimizer():
    ck_client = Client(user=config_ck['user'], password=config_ck['password'], host=config_ck['host'],
                       port=config_ck['port'], database=config_ck['database'])
    ck_client.execute(f"optimize table {config_ck['table']}")


if __name__ == '__main__':
    date = Parsers()
    print(f"We will set {thread_n} threads in the threading pool.")
    print(f"Begin computing of the ({date})")
    print(date)
    main(date)

