import math
import os
from ctypes import *

import numpy as np
import pandas as pd
from tqdm import tqdm

from config.config import config
from graph.JsonWR import write_groups_to_json
from group import Group


# def perf_func_time(func):
#     def inner(*args, **kwargs):
#         old_time = time.time()
#         result = func(*args, **kwargs)
#         func_name = str(func).split(' ')[1]
#         print('{} use time: {}s'.format(func_name, time.time() - old_time))
#         return result
#
#     return inner


def partition_interval():
    """
    区间划分:左闭右开
    :return:
    """
    CONDITIONS = config["handler"]["conditions"]
    minVal = CONDITIONS[0]
    maxVal = CONDITIONS[1]
    interval = CONDITIONS[2]
    groupCount = int((maxVal - minVal) / interval)
    # print(dataSet[attr])
    ans = []
    for i in range(0, groupCount):
        leftMargin = round(minVal + interval * i, 3)
        rightMargin = round(leftMargin + interval, 3)
        key = "%.3f_%.3f" % (leftMargin, rightMargin)
        ans.append(key)
    # start = maxVal - interval
    # ans.append("%.3f_%.3f" % (start, 1.000))
    return ans


def read_pkl(path):
    """
    读取pkl文件
    :rtype: object
    :param path: 文件相对路径
    :return: DataFrame对象
    """
    import pickle
    datasetAbsPath = os.path.abspath(path)
    with open(datasetAbsPath, 'rb') as file:
        # print(file)
        dataSet = pickle.load(file)
    return dataSet


def init_group():
    interval = partition_interval()
    groupMap = {}
    for left in interval:
        for right in interval:
            group = Group(left.split("_")[0], right.split("_")[0])
            key = left.split("_")[0] + ":" + right.split("_")[0]
            groupMap[key] = group
    return groupMap


def blocks(originalArray, blockSize):
    blockNum = math.ceil(len(originalArray) / blockSize)
    res = np.array_split(originalArray, blockNum)
    return res


capacity = 0
queryNum = 100
dim = 384
matrix_multiplier = cdll.LoadLibrary('libmatrix_multiplier.so')
matrix_multiplier.init(200000, dim)


def insert_gallery(matrix_multiplier, features):
    for feature in features:
        feature_ctypes = feature.ctypes.data_as(POINTER(c_float))
        matrix_multiplier.insert_gallery(feature_ctypes, 1)
    print("insert %d features" % len(features))


def T_or_F(data, groups, groupId):
    features = data["feature"]
    persons = data["gt_person_id"]
    blurIntervals = data["blur_interval"]

    # 特征入库
    global capacity
    capacity = len(features)
    # print(features.head(2))
    # print(type(features))
    insert_gallery(matrix_multiplier, features)
    print("after insert gallery size:" + str(matrix_multiplier.size()))
    # 计算相似度
    queries = blocks(features, queryNum)
    queryPersons = blocks(persons, queryNum)
    blurs = blocks(blurIntervals, queryNum)
    # print(type(queries), type(blurs))
    print("T_or_F:%d" % groupId)
    for i in tqdm(range(len(queries))):
        curQuery = queries[i]
        query = np.zeros(queryNum * dim, dtype=np.float32)
        start = 0
        for item in curQuery:
            query[start:start + dim] = item
            start += dim
        # if bar.count == 0:
        #     print(len(query))with
        #     print(type(query))
        scores = np.zeros(queryNum * capacity, dtype=np.float32).ctypes.data_as(POINTER(c_float))
        query_data = query.ctypes.data_as(POINTER(c_float))
        matrix_multiplier.compute(query_data, queryNum, scores)

        # for j in range(queryNum):
        #     print("%d, score: %f" % (j, scores[j * int(capacity / queryNum) + j * capacity]))
        #     # print("%d, score: %f" % (j, scores[j]))

        # 记录结果
        set_value(groups, blurs[i], blurIntervals, queryPersons[i], persons, scores)
    # 清理gallery
    matrix_multiplier.clear()
    print("after clear gallery size:" + str(matrix_multiplier.size()))


def TF(dataT, dataF, groupsTF, groupId):
    featuresT = dataT["feature"]
    personsT = dataT["gt_person_id"]
    blurIntervalsT = dataT["blur_interval"]
    featuresF = dataF["feature"]
    personsF = dataF["gt_person_id"]
    blurIntervalsF = dataF["blur_interval"]

    # featuresF入库
    global capacity
    capacity = len(featuresF)
    # 特征入库
    insert_gallery(matrix_multiplier, featuresF)
    print("after insert gallery size:" + str(matrix_multiplier.size()))
    # 计算相似度,featuresT查询
    queries = blocks(featuresT, queryNum)
    queryPersons = blocks(personsT, queryNum)
    blurs = blocks(blurIntervalsT, queryNum)
    # print(type(queries), type(blurs))
    print("TF:%d" % groupId)
    for i in tqdm(range(len(queries))):
        curQuery = queries[i]
        query = np.zeros(queryNum * dim, dtype=np.float32)
        start = 0
        for item in curQuery:
            query[start:start + dim] = item
            start += dim
        # if bar.count == 0:
        #     print(len(query))
        #     print(type(query))
        scores = np.zeros(queryNum * capacity, dtype=np.float32).ctypes.data_as(POINTER(c_float))
        query_data = query.ctypes.data_as(POINTER(c_float))
        matrix_multiplier.compute(query_data, queryNum, scores)

        # for j in range(queryNum):
        #     print("%d, score: %f" % (j, scores[j * int(capacity / queryNum) + j * capacity]))
        #     # print("%d, score: %f" % (j, scores[j]))

        # 记录结果
        set_value(groupsTF, blurs[i], blurIntervalsF, queryPersons[i], personsF, scores)
    # 清理gallery
    matrix_multiplier.clear()
    print("after clear gallery size:" + str(matrix_multiplier.size()))


# @perf_func_time
def set_value(groups, queryBlurs, featureBlurs, queryPersons, persons, scores):
    queryPersons = queryPersons.reset_index(drop=True)
    persons = persons.reset_index(drop=True)
    # with tqdm(total=len(queryBlurs) * len(featureBlurs)) as bar:
    i = 0
    for queryBlur in queryBlurs:
        j = 0
        for featureBlur in featureBlurs:
            # 同人跳过统计
            # if i == 0 and j == 0:
            #     print(queryPersons.get(i), persons.get(j))
            # bar.update(1)
            if queryPersons.get(i) == persons.get(j):
                j += 1
                continue
            # print("j = %d" % j)
            # print('set value : capacity=%d' % capacity)
            simiOrg = scores[i * capacity + j]
            simi = simiOrg * 0.5 + 0.5
            # if i == 0 and j == 0:
            #     print("simi org:%f" % simiOrg)
            #     print("simi:%f" % simi)
            # print(queryBlur)  # '0.200_0.250'
            # print(featureBlur)  # '0.200_0.250'
            if 1.0 - simi <= 0.001:
                j += 1
                continue
            group = groups.get(
                queryBlur.split("_")[0] + ":" + featureBlur.split("_")[0])
            index = int(simi / 0.0001)  # simi左闭右开区间
            group.simiSumArray[index] += simi
            group.countArray[index] += 1
            # print(group)
            # group = groups.get(
            #     featureBlur.split("_")[0] + ":" + queryBlur.split("_")[0])
            # interval = group.simiDict.get(str(int(simi / 0.0001)))  # simi左闭右开区间
            # oldCount = interval["count"]
            # oldSum = interval["sum"]
            # interval["sum"] += simi
            # interval["count"] += 1
            j += 1
            # print(group)
        # if i == 0:
        #     print(j)
        i += 1
    # print(i)


def data_group(data):
    """
    数据分组
    :param data:
    :return: DataFrame列表
    """
    persons = np.array(list(data["gt_person_id"].drop_duplicates()))
    personGroups = np.array_split(persons, 5)
    dfGroups = []
    print("=====start data group=====")
    with tqdm(total=len(persons)) as bar:
        for persons in personGroups:
            df = pd.DataFrame()
            for person in persons:
                df = df.append(data[data["gt_person_id"] == person])
                bar.update(1)
            dfGroups.append(df)
    print("=====end data group=====")
    return dfGroups


def main_multi():
    # 1、加载数据
    projectPath = config["project_path"]
    dataDirPath = "%s/%s" % (projectPath, config["cluster"]["attrs_select_result_dir"])
    gtDataFileName = os.path.split(config["input"]["gt_data_file_path"])[1].split(".")[0]
    selectMode = config["cluster"]["select_mode"]
    dataPath = r"%s/%s_select_%s_%s_%d.pkl" % (dataDirPath, gtDataFileName, selectMode, config["handler"]["attr"], 100000)
    data = read_pkl(dataPath)
    # print(data.info())
    # print(data[["blur", "blur_interval"]])
    data = data.dropna(axis=0)
    # 数据分组：按gt_person_id将10w数据按每组1w个分组
    dfGroups = data_group(data)
    print(len(dfGroups), type(dfGroups[0]), dfGroups[0].shape)

    # print(type(list(data["feature"])[0][0]))
    # 统计结果记录
    groupsT = init_group()  # 均戴口罩
    groupsF = init_group()  # 均不戴口
    groupsTF = init_group()  # 一个戴一个不戴
    count = 0
    print("不同人统计.....")
    for df in dfGroups:
        count += 1
        print("==================================group %d==================================" % count)
        # df = data
        # 将数据分成戴口罩和不戴口罩两组
        dataF = df[df["mask"] == 1]
        dataT = df[df["mask"] == 2]

        print(df.shape, dataF.shape, dataT.shape)  # (459834, 8) (438121, 8) (21713, 8)
        assert df.shape[0] == dataF.shape[0] + dataT.shape[0]
        # T_or_F(dataT, groupsT, 0)
        # 统计
        T_or_F(dataT, groupsT, count)
        # write_groups_to_json("data/group_%s_%s_%d.json" % ("multi", "T", count), groupsT)
        T_or_F(dataF, groupsF, count)
        # write_groups_to_json("data/group_%s_%s_%d.json" % ("multi", "F", count), groupsF)
        TF(dataT, dataF, groupsTF, count)
        # write_groups_to_json("data/group_%s_%s_%d.json" % ("multi", "TF", count), groupsTF)

    print("统计结果存储目录:%s/%s" % (config["project_path"], config["cluster"]["attrs_select_result_dir"]))
    write_groups_to_json("data/group_%s_%s.json" % ("multi", "T"), groupsT)
    write_groups_to_json("data/group_%s_%s.json" % ("multi", "F"), groupsF)
    write_groups_to_json("data/group_%s_%s.json" % ("multi", "TF"), groupsTF)


if __name__ == '__main__':
    main_multi()
