# -*- coding: utf-8 -*-

"""
@Time       :   2024/05/22
@Author     :   Li Kuangyuan, Yi Junquan
@Version    :   1.0
@Contact    :   1767958859@qq.com
@Software   :   VsCode
"""
import argparse
import time
import numpy as np
import pandas as pd

from anon_alg.oka.one_pass_k_means import oka_get_result_one
from anon_alg.mondrian.mondrain import mon_get_result_one
from anon_alg.tdg.top_down_greddy import tdg_get_result_one
from ml.ml_model import create_model, get_model_metrics
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
from util.data import *
from util.types import *
from util.utility import * 
from util.metric import NCP, DM, CAVG

def start_exp(args):    
    np.random.seed(315)

    dataset = args.dataset
    anonymity_method = args.anonymity_method
    model = args.model

    if args.experiment == 'Y':
        if dataset == Dataset.MGM: #数据量太少了
            k_range = [2,5] + list(range(10, 101, 5))
        elif dataset == Dataset.ADULT:
            k_range = [2, 5, 10, 15, 20, 30, 40, 50, 100, 150, 200, 250, 300, 350, 400, 450, 500, 600, 700, 800, 900, 1000]
    else:
        k_range = [args.anonymity]

    # 路径
    base_dir = os.path.dirname(os.path.abspath(__file__))
    data_path = os.path.join(base_dir, 'datasets', '{}/{}.csv'.format(dataset, dataset))
    generalize_dir = os.path.join(base_dir, 'results', 'generalization', dataset)
    if not os.path.exists(generalize_dir):  
        os.makedirs(generalize_dir)
    statistic_dir = os.path.join(base_dir, "results", 'numeric')
    anon_dir = os.path.join(base_dir, 'results', 'anonymized_dataset')  # trailing /
    if not os.path.exists(anon_dir):  
        os.makedirs(anon_dir) 

    # 机器学习结果输出目录 
    res_path = os.path.join(base_dir, "results/ml/", f'{dataset}_{anonymity_method}_{model}_k_{k_range[0]}_{k_range[-1]}.csv')
    # 匿名化指标输出目录 
    res_path2 = os.path.join(base_dir, "results/kanonymity/", f'{dataset}_{anonymity_method}_k_{k_range[0]}_{k_range[-1]}.csv')


    # 读取数据
    data = pd.read_csv(data_path, delimiter=';')
    attributes = list(data.columns)

    # 读取数据集参数
    param_dict = None
    if dataset == Dataset.MGM:
        param_dict = Dataset.MGM.value
    elif dataset == Dataset.ADULT:
        param_dict = Dataset.ADULT.value


    # 准标识符属性名称, 类型标识, 非准标识符属性index, 非准标识符名称
    qi_names = list(np.array(attributes)[param_dict['quasi_index_list']])
    qi_index = param_dict['quasi_index_list']
    cat_flags = [True] * len(param_dict['quasi_index_list'])
    nqi_index = [index for index in range(len(attributes)) if index not in param_dict['quasi_index_list']]
    nqi_names = [attributes[i] for i in nqi_index]

    # one-hot编码, 对准标识符数据中, 种类类型的数据进行one-hot编码
    one_hot_original = [col for i, col in enumerate(data[qi_names].columns) if param_dict['category_flag_list'][i]]
    one_hot_anon = one_hot_original

    # 在原始数据上的实验
    # 目标变量的标签编码
    data[param_dict['target_var']] = data[param_dict['target_var']].astype('category').cat.codes
    # one-hot编码
    new_data = pd.get_dummies(data, columns=one_hot_original, drop_first=True)
    # 创建目标变量向量并从数据集中删除目标变量和ID
    output = data[param_dict['target_var']]
    inpuT = new_data.drop(nqi_names, axis=1)
    # 分割数据集
    train_input, test_input, train_output, test_output = train_test_split(inpuT, output, test_size=0.3, random_state=315)
    # 归一化
    scaler = MinMaxScaler()
    scaler.fit(train_input)
    train_input = scaler.transform(train_input)
    test_input = scaler.transform(test_input)
    # 构建模型
    ml_model = create_model(model)
    print(ml_model)
    # 训练模型
    ml_model.fit(train_input, train_output)
    test_predict = ml_model.predict(test_input)
    ml_result = []
    anonymized_result = []
    accuracy, precision, recall, f1_score = get_model_metrics(np.asarray(test_output), test_predict)
    ml_result.append((1, accuracy, precision, recall, f1_score))
    print(f"baseline: acc={accuracy:.4f}, precision={precision:.4f}, recall={recall:.4f}, f1={f1_score:.4f}")
    print("----------------------------------------------------------------------------------------------------------")
    
    # 以不同的k进行数据匿名和重复实验
    nodes_count = 1
    # 读取原始数据
    raw_data, header = read_data(dataset, param_dict['quasi_index_list'], 
                                    cat_flags)
    # 读取结构化数据
    tax_trees = read_tree_data(generalize_dir, statistic_dir, dataset, attributes,
                            param_dict['quasi_index_list'], cat_flags)
    
    for k in k_range:
        print(f"K = {k}")
        file = os.path.join(anon_dir,  dataset, anonymity_method, "anonymized_" + str(k) + '_0' + ".csv")
        if os.path.exists(file):
            print("匿名化数据已存在, 直接开始机器学习实验")
        else:
            print("匿名化数据不存在, 执行匿名化算法")
            anonymized_data = None
            start_time = time.time()
            if anonymity_method == KAnonymity.MONDRIAN:
                anonymized_data = mon_get_result_one(tax_trees, raw_data, k, param_dict['quasi_index_list'], nqi_index)
            elif anonymity_method == KAnonymity.OKA:
                anonymized_data = oka_get_result_one(tax_trees, raw_data, k,  param_dict['quasi_index_list'], nqi_index)
            elif anonymity_method == KAnonymity.TDG:
                anonymized_data = tdg_get_result_one(tax_trees, raw_data, k, param_dict['quasi_index_list'], nqi_index)
            runtime = time.time() - start_time
            # 写入匿名化数据
            nodes_count = write_anonymized_data(anonymized_data, anonymity_method, header, k,  dataset)

            # metric评估
            temp_data = anonymized_data
            ncp =  NCP(temp_data, qi_index, tax_trees).get_evaluation_score()
            cavg = CAVG(temp_data, qi_index, k).get_evaluation_score()
            dm = DM(temp_data, qi_index, k).get_evaluation_score()

            anonymized_result.append((k, ncp, cavg, dm, runtime))
            print(f"NCP: {ncp:.4f},  CVAG: {cavg:.4f}, DM: {dm}, runtime: {runtime:.4f}s")
            header_anony = ["k", "NCP", "CVAG", "DM", "runtime"]
            write_results(anonymized_result, header_anony, args.anonymity_method, res_path2)
            
        for node in range(nodes_count):
            # 读取匿名化数据
            anonymized_data = pd.read_csv(os.path.join(anon_dir, 
                        dataset, anonymity_method, "anonymized_" + str(k) + '_' + str(node) + ".csv"), delimiter=';')
            # 根据ID对数据进行排序（以防匿名算法重新排列条目）
            anonymized_data = anonymized_data.sort_values(by=['ID'])

            # 目标变量的标签编码
            anonymized_data[param_dict['target_var']] = anonymized_data[param_dict['target_var']].astype('category').cat.codes

            # 创建目标变量向量并从数据集中删除目标变量和ID
            output = anonymized_data[param_dict['target_var']]
            inpuT = anonymized_data.drop(nqi_names, axis=1)

            for row_index, row in inpuT.iterrows():
                cat_iter = iter(param_dict['category_flag_list'])
                for col_index, column in row.items():
                    if col_index not in qi_names:
                        continue
                    # 仅非分类属性
                    if next(cat_iter):
                        continue

                    # 将抑制的值替换为相应属性的最高值
                    if column == '*':
                        newval = param_dict['max_numeric'].get(col_index)
                        inpuT.at[row_index, col_index] = newval
                        continue

                    try:
                        # 检查这个列对应的值是不是一个范围
                        val = column.split('-')
                        if len(val) < 2:   # 不是范围
                            continue
                        if val[0] == "" or val[1] == "":    # 只有一个有效值
                            continue

                        # 将范围数据更换为中间值
                        mean_val = (float(val[0]) + float(val[1])) / 2
                        inpuT.at[row_index, col_index] = mean_val
                    except AttributeError:
                        pass

            # 将所有类别值替换为数值
            for item in param_dict['max_numeric'].keys():
                inpuT[item] = pd.to_numeric(inpuT[item])

            # one-hot编码
            inpuT = pd.get_dummies(inpuT, columns=one_hot_anon, drop_first=True)

            # 分割数据集
            train_input, test_input, train_output, test_output = train_test_split(inpuT, output, test_size=0.3, random_state=315)
            
            # 归一化
            scaler = MinMaxScaler()
            scaler.fit(train_input)
            train_input = scaler.transform(train_input)
            test_input = scaler.transform(test_input)

            # 构建模型
            ml_model = create_model(model)

            # 训练模型
            ml_model.fit(train_input, train_output)

            test_predict = ml_model.predict(test_input)

            # 将准确度、准确度、召回率和f1分数附加到ML结果中
            accuracy, precision, recall, f1_score = get_model_metrics(np.asarray(test_output), test_predict)
            ml_result.append((k, accuracy, precision, recall, f1_score))
            print(f"acc={accuracy:.4f}, precision={precision:.4f}, recall={recall:.4f}, f1={f1_score:.4f}")
            print("----------------------------------------------------------------------------------------------------------")

    header_ml = ["k", 'accuracy', 'precision', 'recall', 'f1']
    write_results(ml_result, header_ml, args.anonymity_method, res_path)

if __name__ == "__main__":
    parser = argparse.ArgumentParser(
        description='使用不同的算法对数据进行匿名化，并分析匿名化对数据的影响'
    )
    # 添加各种参数
    parser.add_argument(
        '--dataset', 
        # ["mgm", "adult"]
        choices= [Dataset.MGM.value['dataset'],
                  Dataset.ADULT.value['dataset']],  
        default= Dataset.MGM.value['dataset'],
        required=False,
        help='训练使用的数据集, 默认值为mgm数据集'
    )
    parser.add_argument(
        '--anonymity_method', 
        # ["mordrian", "oka", "tdg"]
        choices= list(KAnonymity),  
        default= KAnonymity.MONDRIAN,
        required=False,
        help='k匿名方法, 默认为mordrian'
    )
    # ["lr", "knn", "svm",  "nb", "gbt"]
    parser.add_argument('--model', choices=list(Model), default=Model.LR, required=False, help='机器学习模型, 默认值为LR') 
    parser.add_argument('--anonymity', default=50, type=int, required=False, help='k-匿名性值, 默认值为50')
    parser.add_argument('--experiment',choices=['Y', 'N'], default='N', required=False, help='如果开启实验模式, 将会测试多种k值')
    args = parser.parse_args()

    print("----------------------------------------------------------------------------------------------------------")
    print(args)
    print("----------------------------------------------------------------------------------------------------------")
    start_exp(args)
    
