"""
Top Down Greddy 匿名化算法
@Time       :   2024/06/07
@Author     :   Li Kuangyuan
@Version    :   1.0
@Contact    :   1767958859@qq.com
@Software   :   VsCode
"""

import copy
import os
import operator
import random
import sys
import time
from functools import cmp_to_key
from util.data import *
from util.types import *
from util.utility import *

ROUNDS = 3          # 回合数

class Partition(object):
    """
    Partition 类用于分组，保存记录
    self.flag: 是否允许拆分
    self.record: 分组中的记录
    self.gen_result: 保存该分组的泛化结果
    """

    def __init__(self, data, gen_result):
        """
        使用数据和中间结果初始化
        """
        self.flag = True
        self.record = data[:]
        self.gen_result = gen_result[:]

    def __len__(self):
        """
        返回分组中记录的数量
        """
        return len(self.record)

# 辅助函数
def get_middle_of_record(record_a, record_b):
    """
    获取两条记录的中间值
    """
    res = []
    for i in range(QUASI_INDEX_LEN):
        if not CATEGORY_FLAG_LIST[i]:
            splits = [
                get_num_list_from_str(record_a[i]),
                get_num_list_from_str(record_b[i])
            ]
            splits.sort(key=cmp_to_key)

            if splits[0] != splits[-1]:
                res.append(str(splits[0]) + ',' + str(splits[-1]))
            else: 
                res.append(splits[0])
        else:
            res.append(calculate_LCA(i,record_a[i], record_b[i]))
    return res

def calculate_LCA(index, value1, value2):
    """获取两个分类值在taxonomy tree中的的最低公共祖先

    Args:
        index (int): 标识符下表，即第index个标识符
        value1 (str): 分类值，如"Female"
        value2 (str): 分类值，如"Male"

    Returns:
        str: 最低公共祖先LCA
    """
    # 从中获取父节点列表
    if value1 == value2:
        return value1
    # 使用缓存
    if (value1 + value2) in CACHE_LCA[index].keys():
        return CACHE_LCA[index][value1 + value2]
    g_tree = TAXONOMY_TREES[index]
    p1, p2 = list(g_tree[value1].parent), list(g_tree[value2].parent)
    p1.insert(0, g_tree[value1])
    p2.insert(0, g_tree[value2])
    min_len = min(len(p1), len(p2))
    if min_len == 0:
        return '*'
    last = p1[-1]
    for i in range(1, min_len + 1):
        if p1[-i].value == p2[-i].value:
            last = p1[-i]
        else:
            break
    CACHE_LCA[index][value1 + value2] = last.value
    return last.value

def get_middle_of_group(group):
    """
    获取分组的中间值
    """
    group_len = len(group)
    mid = group[0]
    for i in range(1, group_len):
        mid = get_middle_of_record(mid, group[i])
    return mid

def get_pair_with_max_dis(partition):
    """
    获取分区中最大距离对。
    随机选择 a 并计算 a 和 b 的最大距离(ncp)，时间复杂度为 O(n)
    然后选择最大距离的 b, 重复 ROUNDS 次。
    经过多轮运行后, a 和 b 之间的距离接近最大值。
    """
    partition_len = len(partition)
    # 多次迭代寻找最大距离
    for i in range(ROUNDS):
        if i != 0:
            # 在后续轮次中，使用前一轮找到的最大距离索引 v 作为新的 u
            a = b
        else:
            # 在第一轮中，随机选择一个索引 u
            a = random.randrange(partition_len)
        max_ncp, index = -1, 0    # 初始化最大距离和索引
        for i in range(partition_len):  # 遍历分区记录
            if i != a:
                # 计算ncp距离
                gen_res = get_middle_of_record(partition.record[i], partition.record[a])
                ncp = calculate_NCP(gen_res)
                if ncp > max_ncp:
                    max_ncp = ncp
                    index = i
        b = index
    return a, b

def distribute_record(a, b, partition):
    """
    根据 NCP 距离分配记录。
    记录将被分配到更近的分组。
    """
    record_a, a_p, record_b, b_p = partition.record[a][:], [partition.record[a][:]], partition.record[b][:], [partition.record[b][:]]
    # 除去 u 和 v 的其余记录
    remain_records = []
    for index, value in enumerate(partition.record):
        if index not in set([a, b]):
            remain_records.append(value)
    # 遍历其余记录，将其分配到更近的分组
    res = []
    for record in remain_records:
        res_a, res_b = get_ncp_dis(record_a, record), get_ncp_dis(record_b, record)
        if res_a[0] <= res_b[0]:
            a_p.append(record)
        else:
            b_p.append(record)
    # 返回包含分配结果的两个新分区
    res.append(Partition(a_p, get_middle_of_group(a_p)))
    res.append(Partition(b_p, get_middle_of_group(b_p)))
    return res

def balance(sub_p_list, index):
    """
    两种平衡方法：
    1. 从其他组移动一些记录
    2. 与最近的组合并
    贪心选择其中一种具有最小 NCP 的方法
    """
    # 较少记录的分区和较多记录的分区
    less_p, more_p = sub_p_list.pop(index), sub_p_list.pop()
    require_len, total_len = TDG_K - len(less_p), len(less_p) + len(more_p)    # 要求长度， 总长度
    
    # 第一种方法: 与最近的组合并, 计算ncp和mid_val
    ncp_in_alg0, mid_in_alg0 = get_ncp_dis(less_p.gen_result, more_p.gen_result)
    ncp_in_alg0 *= total_len

    # 第二种方法: 从其他组移动一些记录
    # 排序ncp距离
    dist = {}
    for i, record in enumerate(more_p.record):
        dist[i], _ = get_ncp_dis(less_p.gen_result, record)
    # 获取最近的记录索引
    closest_index_list = []
    sorted_dist = sorted(dist.items(), key=operator.itemgetter(1))
    for t in sorted_dist[:require_len]:
        closest_index_list.append(t[0])

    # 将这些最近的记录从 more 中移出
    record_to_add, record_remain = [], []
    for i, t in enumerate(more_p.record):
        if i in set(closest_index_list):
            record_to_add.append(t)
        else:
            record_remain.append(t)
    # more_p中剩余部分的mid
    remain_middle = get_middle_of_group(record_remain)
    # 移动部分记录到less_p中, 计算ncp和mid_val
    ncp_in_alg1, mid_in_alg1 = merge_ncp_dis(less_p, record_to_add)
    # more_p中剩余部分的ncp
    ncp_in_alg1 += len(record_remain) * calculate_NCP(remain_middle)

    # 贪心选择
    if ncp_in_alg1 > ncp_in_alg0:
        less_p.flag = False # 不可再分
        less_p.record, less_p.gen_result = merge_list(less_p.record, more_p.record), mid_in_alg0
    else:
        more_p.record, more_p.gen_result = record_remain, remain_middle
        less_p.record, less_p.gen_result = merge_list(less_p.record, record_to_add), mid_in_alg1
        # 将更新后的more_p分区添加回子分区列表
        sub_p_list.append(more_p)

    # 将更新后的less_p分区添加回子分区列表
    sub_p_list.append(less_p)

def merge_list(list_a, list_b):
    res = list_a[:]
    res.extend(list_b)
    return res

def check_splitable(partition):
    """
    检查当前分区是否还可以再分
    """
    return partition.flag and len(partition) >= 2 * TDG_K


def calculate_NCP(record):
    """计算将record泛化为gen_result的NCP（归一化确定性惩罚）
        NCP_num = 泛化后的区间长度/属性的全局范围
        NCP_cat = LCA所在层次/最大层次

    Args:
        record (list): record

    Returns:
        float: NCP
    """
    ncp_record = 0.0
    ncp_key = ";".join(record)

    # 在缓存中直接返回
    if ncp_key in CACHE_NCP.keys():
        return CACHE_NCP[ncp_key]

    for i in range(QUASI_INDEX_LEN):
        width = 0.0  # 只有一个数，ncp=0
        if not CATEGORY_FLAG_LIST[i]:
            # 数值类型
            if "," in record[i]:
                nums = record[i].split(",")
                width = float(nums[1]) - float(nums[0])
        else:
            width = float(len(TAXONOMY_TREES[i][record[i]]))
        ncp_single = width / QUASI_INDEX_RANGE [i]
        ncp_record += ncp_single

    CACHE_NCP[ncp_key] = ncp_record
    return ncp_record


def get_ncp_dis(record1, record2):
    """
    使用两个记录的泛化结果作为距离
    """
    gen_res = get_middle_of_record(record1, record2)
    return calculate_NCP(gen_res), gen_res

def merge_ncp_dis(partition, addition_set):
    """
    将 addition_set 合并到当前分区，
    更新当前分区的中间结果
    """
    mid_val = get_middle_of_group(addition_set)
    mid_val = get_middle_of_record(mid_val, partition.gen_result)
    return (len(addition_set) + len(partition)) * calculate_NCP(mid_val), mid_val


def anonymize(partition):
    """
    tdg匿名化函数, 递归分割分区
    """
    # 能否继续分割
    if not check_splitable(partition):
        RESULT.append(partition)
        return
    # 获取范围
    rangE = get_pair_with_max_dis(partition)
    sub_p_list = distribute_record(rangE[0], rangE[1], partition)
    
    if len(sub_p_list[0]) < TDG_K:
        balance(sub_p_list, 0)
    elif len(sub_p_list[1]) < TDG_K:
        balance(sub_p_list, 1)

    # 递归分割区域
    for sub_p in sub_p_list:
        anonymize(sub_p)

def init(trees, data, k, quasi_index_num, not_quasi_index):
    """
    初始化所有的全局变量
    """
    global TDG_K, RESULT, QUASI_INDEX_LEN, TAXONOMY_TREES, QUASI_INDEX_RANGE, CATEGORY_FLAG_LIST, NOT_QUASI_INDEX, CACHE_LCA, CACHE_NCP
    TAXONOMY_TREES = trees
    CATEGORY_FLAG_LIST = []
    CACHE_LCA = []
    CACHE_NCP = {}
    for t in trees:
        CACHE_LCA.append(dict())
        # 是否为类别属性
        CATEGORY_FLAG_LIST.append(not isinstance(t, NRange))
    NOT_QUASI_INDEX = not_quasi_index
    QUASI_INDEX_LEN = quasi_index_num 
    TDG_K = k
    QUASI_INDEX_RANGE = []
    RESULT = []
    


def tdg(att_trees, data, k, quasi_index_num, not_quasi_index):
    """
    tdg: 一种启发式算法, 适用于包含数值和分类属性的关系数据集
    """
    # 初始化
    init(att_trees, data, k, quasi_index_num, not_quasi_index)
    result, gen_result = [], []
    
    for i in range(QUASI_INDEX_LEN):
        if CATEGORY_FLAG_LIST[i]:   # 是否为类别属性
            gen_result.append('*')  # GTree
            QUASI_INDEX_RANGE.append(len(TAXONOMY_TREES[i]['*']))
            
        else:   
            gen_result.append(TAXONOMY_TREES[i].value)  # NRange
            QUASI_INDEX_RANGE.append(TAXONOMY_TREES[i].range)
            
            
    # 分区
    partition = Partition(data, gen_result)

    # 递归地匿名化 + 计时
    start_time = time.time()
    anonymize(partition)
    end_time = time.time()
    run_time = float(end_time - start_time)

    # 计算结果
    for sub_p in RESULT:
        len_of_sub = len(sub_p)
        for i in range(len_of_sub):
            temp_list = []
            # 将敏感属性从记录中分离出来
            begin, end = len(sub_p.record[i]) - len(NOT_QUASI_INDEX), len(sub_p.record[i])
            for s in range(begin, end):
                temp_list = temp_list + [sub_p.record[i][s]]
            # 将泛化结果和敏感属性组合后添加到最终结果中
            result.append(sub_p.gen_result[:] + temp_list)


    return result

def tdg_get_result_one(att_trees, data, k, qi_index, not_quasi_index):
    result = tdg(att_trees, column_change(
        copy.deepcopy(data), qi_index), k, len(qi_index), not_quasi_index)
    return column_restore(result, qi_index)