# -*- encoding: utf-8 -*-
"""
@File       :   clustering_based_k_anonymity.py
@Time       :   2024/05/30 09:55:23
@Author     :   Yi Junquan 
@Version    :   1.0
@Contact    :   2696974822@qq.com
@Software   :   VsCode
@Paper      :   An Efficient Clustering Method for k-Anonymization
"""
import operator
import random
import time
import sys
import os
import copy
from functools import cmp_to_key
from util.types import NRange
from util.utility import cmp_num, get_num_list_from_str
from util.data import column_change, column_restore 

MAX_NUM = sys.maxsize

class Cluster(object):

    def __init__(self, member, center, information_loss=0.0):
        """簇属性

        Args:
            member (list): 簇中的成员数组
            center (list): 簇中心
            information_loss (float, optional): 信息损失. Defaults to 0.0.
        """
        self.member = member
        self.information_loss = information_loss
        self.center = center
        self.generalization_result = center
        for i in range(QUASI_INDEX_LEN):
            if not CATEGORY_FLAG_LIST[i]:
                len_member = len(self.member)
                nums = [float(t[i]) for t in self.member]
                self.center[i] = str(sum(nums) / len_member)

    def add(self, record):
        """将一条新的记录添加到簇中，更新相关属性

        Args:
            record (list): 包含各个属性
        """
        self.member.append(record)
        self.generalization_result = generalize(self.generalization_result, record)
        self.information_loss = len(self.member) * calculate_IL(self.generalization_result)

        # 更新距离中心
        member_num = len(self.member)
        for i in range(QUASI_INDEX_LEN):
            if not CATEGORY_FLAG_LIST[i]:
                self.center[i] = str(
                    (float(self.center[i]) * (member_num - 1) + float(record[i]))
                    / member_num
                )
            else:
                self.center[i] = self.generalization_result[i]

    def update_cluster(self):
        """当成员变化时更新信息损失
        """        
        self.gen_result = cluster_generalize(self.member)
        for i in range(QUASI_INDEX_LEN):
            if not CATEGORY_FLAG_LIST[i]:
                len_member = len(self.member)
                nums = [float(t[i]) for t in self.member]
                self.center[i] = str(sum(nums) * 1.0 / len_member)
            else:
                self.center[i] = self.gen_result[i]
        self.information_loss = len(self.member) * calculate_IL(self.gen_result)
     
    def __getitem__(self, index):
        """获取泛化结果中的某一个结果"""
        return self.generalization_result[index]

    def __len__(self):
        """获取簇中的记录数"""
        return len(self.member)

    def __str__(self):
        """簇的泛化结果字符化"""
        return str(self.generalization_result)

def cluster_generalize(records):
    """递归计算簇的泛化结果

    Args:
        records (list): 所有的record

    Returns:
        list: 泛化结果
    """
    gen_result = records[0]
    for i in range(1, len(records)):
        gen_result = generalize(gen_result, records[i])
    return gen_result

def get_record_distance(r1, r2):
    """使用IL计算两个record之间的距离

    Args:
        r1 (list): record
        r2 (list): record

    Returns:
        float: 距离
    """
    r1_gen = r1
    r2_gen = r2
    if r1_gen == r2_gen:
        return 0
    gen = generalize(r1_gen, r2_gen)
    distance = calculate_IL(gen)
    return distance


def calculate_IL(record):
    """计算将record泛化为generalization_result的信息损失
        IL_num = 泛化后的区间长度/属性的全局范围
        IL_cat = LCA所在层次/最大层次

    Args:
        record (list): record

    Returns:
        float: information loss
    """
    il_record = 0.0
    il_key = ";".join(record)
    try:
        return CACHE_IL[il_key]
    except KeyError:
        pass
    for i in range(QUASI_INDEX_LEN):
        width = 0.0  # 只有一个数，il=0
        if CATEGORY_FLAG_LIST[i]:
            width = float(len(TAXONOMY_TREES[i][record[i]]))
        else:  # 数值类型
            if "," in record[i]:
                width = float(record[i].split(",")[1]) - float(record[i].split(",")[0])
        il_single = width / QUASI_INDEX_RANGE[i]
        il_record += il_single
    CACHE_IL[il_key] = il_record
    return il_record


def calculate_LCA(index, value1, value2):
    """获取两个分类值在taxonomy tree中的的最低公共祖先

    Args:
        index (int): 标识符下表，即第index个标识符
        value1 (str): 分类值，如"Female"
        value2 (str): 分类值，如"Male"

    Returns:
        str: 最低公共祖先LCA
    """
    if value1 == value2:
        return value1
    try:
        return CACHE_LCA[index][value1 + value2]
    except KeyError:
        pass
    g_tree = TAXONOMY_TREES[index]
    p1, p2 = list(g_tree[value1].parent), list(g_tree[value2].parent)
    p1.insert(0, g_tree[value1])
    p2.insert(0, g_tree[value2])
    min_len = min(len(p1), len(p2))
    if min_len == 0:
        return "*"
    last = p1[-1]
    for i in range(min_len):
        i = i + 1
        if p1[-i].value != p2[-i].value:
            break
        else:
            last = p1[-i]
    CACHE_LCA[index][value1 + value2] = last.value
    return last.value

def generalize(record1, record2):
    """泛化两个record

    Args:
        record1 (list): 一条record
        record2 (list): 一条record

    Returns:
        list: 泛化后的结果，如
        ['*', '20-39', 'White', 'Never-married', '*', 'United-States', '*', 'Other']
    """
    """
    计算record1和record2的关系泛化结果
    """
    generalized_record = []
    for i in range(QUASI_INDEX_LEN):
        if not CATEGORY_FLAG_LIST[i]:
            split_number = []
            split_number.extend(record1[i].split(","))
            split_number.extend(record2[i].split(","))
            split_number = list(set(split_number))
            if len(split_number) > 1:
                split_number.sort(key=cmp_to_key(cmp_num))
                generalized_record.append(split_number[0] + "," + split_number[-1])         
            else:
                generalized_record.append(split_number[0])
        else:
            generalized_record.append(calculate_LCA(i, record1[i], record2[i]))
    return generalized_record


def adjust_cluster(cluster, extra_records, k):
    """调整簇，把最远的record去掉，只剩下k个大小的record

    Args:
        cluster (Cluster): 待调整的簇
        extra_records (list): 多余出来的record组成的list
        k (int): K匿名中的k
    """
    center = cluster.center
    distance_dict = {}
    for i in range(len(cluster.member)):
        distance = get_record_distance(center, cluster.member[i])
        distance_dict[i] = distance
    sorted_dict = sorted(distance_dict.items(), key=lambda item: item[1])
    adjust_index = [index for index, _ in sorted_dict[-k:]]
    adjust = [cluster.member[t] for t in adjust_index]
    extra_records.extend(adjust)
    new_member = [
        t for i, t in enumerate(cluster.member) if i not in set(adjust_index)
    ]
    cluster.member = new_member
    cluster.update_cluster()


def find_closest_cluster(record, clusters):
    """选择信息损失最小的簇加入

    Args:
        record (list): 一个记录
        clusters (list): 簇的list

    Returns:
        int: 信息损失最小的簇的下标
    """    
    min_distance = MAX_NUM
    min_index = 0
    for i, t in enumerate(clusters):
        distance = get_record_distance(record, t.generalization_result)
        if distance < min_distance:
            min_distance = distance
            min_index = i
    return min_index


def clustering_stage(data, k = 10):
    """oka算法的第一阶段, 聚类阶段

    Args:
        data (list): 所有数据
        k (int, optional): K匿名. Defaults to 10.

    Returns:
        list: 初步聚类的簇
    """    
    init_clusters = []
    #随机初始化聚类
    seed_index = random.sample(range(len(data)), int(len(data) / k))
    for index in seed_index:
        record = data[index]
        init_clusters.append(Cluster([record], record))
    # 不在初始化聚类中的record
    indices_to_keep = set(range(len(data))) - set(seed_index)
    data = [data[i] for i in indices_to_keep]
    # 把data中的record加入到最近的一个簇
    while len(data) > 0:
        record = data.pop()
        index = find_closest_cluster(record, init_clusters)
        init_clusters[index].add(record)
    return init_clusters

def adjustment_stage(init_clusters, k = 10):
    """oka算法的第二阶段, 调整阶段

    Args:
        init_clusters (list): 初始化聚类的簇
        k (int, optional): K匿名. Defaults to 10.

    Returns:
        list: 最后调整好的簇
    """    
    # 调整簇的大小
    extra_records = []
    insufficient_clusters = []
    clusters = []
    for cluster in init_clusters:
        if len(cluster) > k:
            adjust_cluster(cluster, extra_records, k)
            clusters.append(cluster)   
        elif len(cluster) == k:
            clusters.append(cluster) 
        else:
            insufficient_clusters.append(cluster)
            
    # 多余的record要加入到那些不满的簇中
    while len(extra_records) > 0:
        record = extra_records.pop()
        if len(insufficient_clusters) == 0:
            index = find_closest_cluster(record, clusters)
            clusters[index].add(record)
        else:
            # 将record加入到最近的簇中
            index = find_closest_cluster(record, insufficient_clusters)
            insufficient_clusters[index].add(record)
            if len(insufficient_clusters[index]) >= k:
                clusters.append(insufficient_clusters.pop(index))
    return clusters

def one_pass_k_means(data, k=10):
    """oka算法，核心算法

    Args:
        data (list): 数据
        k (int, optional): k. Defaults to 25.

    Returns:
        list: 计算后的簇
    """
    init_clusters = clustering_stage(data, k)
    clusters = adjustment_stage(init_clusters, k)
    return clusters



def init(trees, data, nqi_index_list, qi_num=-1):
    """初始化

    Args:
        trees (list): 分类树
        data (list): 数据集
        nqi_index_list (list): 非标识符下标list
        qi_num (int, optional): 标识符数量. Defaults to -1.
    """
    global TAXONOMY_TREES,  LEN_DATA, QUASI_INDEX_RANGE, CATEGORY_FLAG_LIST, QUASI_INDEX_LEN, CACHE_LCA, CACHE_IL, NQI_INDEX
    NQI_INDEX = nqi_index_list
    TAXONOMY_TREES = trees
    CACHE_IL = {}
    CACHE_LCA = []
    QUASI_INDEX_RANGE = []
    CATEGORY_FLAG_LIST = []
    LEN_DATA = len(data)
    if qi_num != -1:
        QUASI_INDEX_LEN = qi_num 
    else:
        QUASI_INDEX_LEN = len(data[0]) - 1
    for i in range(QUASI_INDEX_LEN):
        if isinstance(TAXONOMY_TREES[i], NRange):
            CATEGORY_FLAG_LIST.append(False)
            QUASI_INDEX_RANGE.append(TAXONOMY_TREES[i].range)
        else:
            CATEGORY_FLAG_LIST.append(True)
            QUASI_INDEX_RANGE.append(len(TAXONOMY_TREES[i]["*"]))
        CACHE_LCA.append(dict())


def clustering_based_k_anonymity(trees, data, k, qi_num, nqi_index_list):
    """基于聚类的K匿名方法主要函数

    Args:
        trees (list): 分类树
        data (list): 数据集
        k (int): k匿名中的k
        qi_num (int): 标识符数量
        nqi_index_list (list): 非标识符下标组成的list

    Returns:
        list, float, float: 匿名化后的数据，il，运行时间
    """
    init(trees, data, nqi_index_list, qi_num)
    results = []

    # clusters = greedy_k_member_clustering(data, k)    
    clusters = one_pass_k_means(data, k)    
    for cluster in clusters:
        temp_result = []
        for i in range(len(cluster)):
            tmp = []
            # 对于非标识符
            index_range = range(
                len(cluster.member[i]) - len(NQI_INDEX), len(cluster.member[i])
            )
            for s in index_range:
                tmp += [cluster.member[i][s]]
            temp_result.append(cluster.generalization_result + tmp)
        results.extend(temp_result)
    return results

def oka_get_result_one(trees, data, k, qi_index_list, nqi_index_list):
    """运行一次基于聚类的k-匿名化算法

    Args:
        trees (list): 分类树
        data (list): 数据集
        k (int): k
        qi_index_list (list): 标识符下标组成的list
        nqi_index_list (list): 非标识符下标组成的list

    Returns:
        list: k匿名化后的结果
    """
    ""
    result = clustering_based_k_anonymity(
        trees,
        column_change(copy.deepcopy(data), qi_index_list),
        k,
        len(qi_index_list),
        nqi_index_list,
    )  # 运行基于聚类的k-匿名化算法，得到结果和评估结果
    return column_restore(result, qi_index_list)  # 返回恢复原始列顺序后的结果
