# -*- coding: utf-8 -*-

"""
分析通过视频检测开关门状态算法的准确性
"""

import argparse
from datetime import datetime, timedelta, timezone
import pymysql
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
from PIL import ImageDraw
import pandas as pd


def get_begin_and_end_timestamp(date):
    assert isinstance(date, str)
    year, month, day = [int(e) for e in date.split('-')]
    begin_timestamp = datetime(year, month, day, 0, 0, 0, tzinfo=timezone(timedelta(hours=8))).timestamp()
    end_timestamp = datetime(year, month, day, 23, 59, 59, tzinfo=timezone(timedelta(hours=8))).timestamp()
    return int(begin_timestamp), int(end_timestamp)


def get_date_time_from_timestamp(timestamp):
    time = datetime.fromtimestamp(timestamp, tz=timezone(timedelta(hours=8)))
    return time


# 提取数据
def get_ground_truth_segments(date, dev_id, worker_id):
    segments = list()
    connection = pymysql.connect(host='10.20.0.20',
                                 user='test',
                                 password='xzy123',
                                 db='uits2',
                                 charset='utf8mb4',
                                 cursorclass=pymysql.cursors.DictCursor)
    try:
        with connection.cursor() as cursor:
            sql = ('SELECT cnt_begin_time, cnt_end_time FROM `uits_cnt_log_t_{}` '
                   'WHERE dev_id = {} AND cnt_worker_id = {}').format(date, dev_id, worker_id)
            cursor.execute(sql)
            results = cursor.fetchall()
            for result in results:
                segments.append([result['cnt_begin_time'], result['cnt_end_time']])
    finally:
        connection.close()
    if len(segments) <= 1:
        return segments

    # 目前终端机制每隔750ms上传一条数据，导致一次开关门可能上传多条客流数据
    # 如果两条连续的数据，第一条结束时间等于第二条开始时间，则需要将这两条数据合并
    merged_segments = list()
    merged_segment = segments[0]
    index = 1
    while index < len(segments):
        if segments[index][0] == merged_segment[1]:
            merged_segment[1] = segments[index][1]
        else:
            merged_segments.append(merged_segment)
            merged_segment = segments[index]
        index += 1
    merged_segments.append(merged_segment)
    return merged_segments


def get_predict_segments(date, dev_id, worker_id):
    # door_status_log_t 表中存储了所有日期的预测结果
    # 需要从中提取出当前date的预测结果
    begin_timestamp, end_timestamp = get_begin_and_end_timestamp(date)

    segments = list()
    connection = pymysql.connect(host='10.20.0.20',
                                 user='test',
                                 password='xzy123',
                                 db='uits2',
                                 charset='utf8mb4',
                                 cursorclass=pymysql.cursors.DictCursor)
    try:
        with connection.cursor() as cursor:
            sql = ('SELECT cnt_begin_time, cnt_end_time FROM door_status_log_t '
                   'WHERE dev_id = {} AND cnt_worker_id = {}').format(dev_id, worker_id)
            cursor.execute(sql)
            results = cursor.fetchall()
            for result in results:
                begin_time = result['cnt_begin_time'] - 1  # 终端在上传时做了加1处理，此处需要减掉
                end_time = result['cnt_end_time']
                if begin_timestamp < begin_time < end_timestamp and begin_timestamp < end_time < end_timestamp:
                    segments.append([begin_time, end_time])
    finally:
        connection.close()
    return segments


# 分析数据
def draw_image(predict_segments, ground_truth_segments):
    predict_segments = np.array(predict_segments)
    ground_truth_segments = np.array(ground_truth_segments)

    ground_truth_start = np.min(ground_truth_segments[:, 0])
    ground_truth_end = np.max(ground_truth_segments[:, 1])

    image_width = ((ground_truth_end - ground_truth_start) // 10000 + 1) * 1000
    image = Image.new('RGB', (image_width, 800), (255, 255, 255))
    draw = ImageDraw.Draw(image)

    for segment in ground_truth_segments:
        x1 = int((segment[0] - ground_truth_start) / 10)
        x2 = int((segment[1] - ground_truth_start) / 10)
        draw.line(xy=((x1, 200), (x2, 200)), fill=(0, 0, 255), width=15)
    for segment in predict_segments:
        x1 = int((segment[0] - ground_truth_start) / 10)
        x2 = int((segment[1] - ground_truth_start) / 10)
        if x2 == x1:
            continue
        draw.line(xy=((x1, 220), (x2, 220)), fill=(0, 255, 0), width=15)
    plt.imshow(np.asarray(image), origin='lower')
    plt.show()


def predict_overlap_ratio(predict_segments, ground_truth_segments):
    predict_segments = np.array(predict_segments)
    ground_truth_segments = np.array(ground_truth_segments)

    ground_truth_start = np.min(ground_truth_segments[:, 0])
    ground_truth_end = np.max(ground_truth_segments[:, 1])

    ground_truth_flags = np.zeros(shape=[ground_truth_end - ground_truth_start + 1, ],
                                  dtype=np.int32)
    predict_flags = np.zeros(shape=[ground_truth_end - ground_truth_start + 1, ],
                             dtype=np.int32)
    for segment in ground_truth_segments:
        start = segment[0] - ground_truth_start
        end = segment[1] - ground_truth_start
        ground_truth_flags[start: end] = 1
    for segment in predict_segments:
        start = max(segment[0] - ground_truth_start, 0)
        end = segment[1] - ground_truth_start
        predict_flags[start: end] = 1
    overlap = predict_flags & ground_truth_flags
    return np.sum(overlap) / np.sum(ground_truth_flags)


def recall_and_precision(predict_segments, ground_truth_segments, show_image=False):
    if show_image:
        draw_image(predict_segments, ground_truth_segments)

    predict_segments = np.array(predict_segments)
    ground_truth_segments = np.array(ground_truth_segments)

    m, n = predict_segments.shape[0], ground_truth_segments.shape[0]

    if m == 0:
        recall = 0
        precision = 1
        return recall, precision
    if n == 0:
        recall = 1
        precision = 0
        return recall, precision

    dist_mat = list()
    for predict_segment in predict_segments:
        query = np.array(predict_segment)
        query = np.expand_dims(query, axis=0)
        query = np.repeat(query, repeats=n, axis=0)
        min_start = np.minimum(query, ground_truth_segments)[:, 0]
        max_start = np.maximum(query, ground_truth_segments)[:, 0]
        min_end = np.minimum(query, ground_truth_segments)[:, 1]
        max_end = np.maximum(query, ground_truth_segments)[:, 1]
        dist = np.clip((min_end - max_start), a_min=0, a_max=None) / (max_end - min_start)
        dist_mat.append(dist)
    dist_mat = np.array(dist_mat)  # shape is m x n

    flags = np.zeros(shape=dist_mat.shape, dtype=np.uint32)
    # 每一个 predict segment 最多只能匹配上一个 ground truth segment
    for i in range(dist_mat.shape[0]):
        for j in range(dist_mat.shape[1]):
            if dist_mat[i, j] > 0.3:
                flags[i, j] = 1
                break
    # 每一个 ground truth segment 最多也只能对应上一个 predict segment
    for j in range(dist_mat.shape[1]):
        for i in range(dist_mat.shape[0]):
            if flags[i, j] == 1 and i < dist_mat.shape[0] - 1:
                flags[i+1:, j] = 0

    recall = np.sum(flags) / n
    precision = np.sum(flags) / m
    return recall, precision


# 输出结果
def color_negative_red(val):
    """
    Takes a scalar and returns a string with
    the css property `'color: red'` for negative
    strings, black otherwise.
    """
    v = float(val[:val.find('%')])
    return 'background-color: red' if 0 < v < 50 else ''


def export_result(date, statistic_data):
    result = pd.DataFrame.from_dict(statistic_data).transpose()
    columns = ['前门视频检测数', '前门门磁检测数', '前门召回率', '前门准确率',
               '后门视频检测数', '后门门磁检测数', '后门召回率', '后门准确率']
    result.columns = columns
    result.iloc[:, 0] = result.iloc[:, 0].apply(lambda x: '{:>4d}'.format(int(x)))
    result.iloc[:, 1] = result.iloc[:, 1].apply(lambda x: '{:>4d}'.format(int(x)))
    result.iloc[:, 2] = result.iloc[:, 2].apply(lambda x: '{:.2%}'.format(x))
    result.iloc[:, 3] = result.iloc[:, 3].apply(lambda x: '{:.2%}'.format(x))
    result.iloc[:, 4] = result.iloc[:, 4].apply(lambda x: '{:>4d}'.format(int(x)))
    result.iloc[:, 5] = result.iloc[:, 5].apply(lambda x: '{:>4d}'.format(int(x)))
    result.iloc[:, 6] = result.iloc[:, 6].apply(lambda x: '{:.2%}'.format(x))
    result.iloc[:, 7] = result.iloc[:, 7].apply(lambda x: '{:.2%}'.format(x))

    xml_file_path = './analysis_result/{}.xlsx'.format(date)
    result.style.applymap(
        color_negative_red, subset=pd.IndexSlice[:, [columns[2], columns[3], columns[6], columns[7]]]).\
        to_excel(xml_file_path, engine='openpyxl')


def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--date', type=str, help='query date, must be format as: "2018-04-27"')
    args = parser.parse_args()
    date = args.date

    dev_ids = [517, 539, 549, 575, 593,
               603, 618, 639, 1020, 1457]

    statistic_data = dict()
    for dev_id in dev_ids:
        dev_id_data = list()
        for worker_id in [0, 1]:
            ground_truth_segments = get_ground_truth_segments(date, dev_id, worker_id)
            predict_segments = get_predict_segments(date, dev_id, worker_id)
            recall, precision = recall_and_precision(predict_segments, ground_truth_segments, show_image=False)
            dev_id_data.extend([len(predict_segments), len(ground_truth_segments), recall, precision])
        statistic_data[dev_id] = dev_id_data
        print('{} process finished'.format(dev_id))

    export_result(date, statistic_data)


if __name__ == "__main__":
    main()
