import pandas as pd
import numpy as np
from constants import *
from config import *
import argparse
import os
import math
from matplotlib import pyplot as plt
from multiprocessing import Pool

plt.style.use ("seaborn")


def cos_sim(x, y):
    """
    计算x与y的余弦相似度
    如果两个都为0，认为不相似（不是正相似也不是负相似）
    :param x: pd.Series
    :param y: pd.Series
    :return: 余弦相似度
    """
    sim = np.dot (x, y)
    mod = np.linalg.norm (x) * np.linalg.norm (y)
    if mod == 0:
        return 0.
    else:
        return sim / mod


def min_sum_sim(x, y):
    """
    对于离散，计算相似度使用min sum 的方式，取两个数组相对位置的较小值相加,再去tanh，结果在0-1之间
    :return:
    """
    min_vector = np.apply_along_axis (min, axis=0, arr=[x, y])
    sim = sum (min_vector)
    sim = math.tanh (sim)
    return sim


def ensure_path_exist(path):
    if not os.path.exists (path):
        os.mkdir (path)


def sim(x, y, method="cos"):
    """
    计算相似度
    :param x:
    :param y:
    :param method: cos 或者 min
    :return:
    """
    if method == "cos":
        value = cos_sim (x, y)
    else:
        value = min_sum_sim (x, y)
    return value


def fill_relation_async(i):
    global relation_data, relation_mt
    x = relation_data[i, :]
    if i != len_data - 1:
        part = relation_data[i + 1:, :]
        sim_vector = np.apply_along_axis (lambda y: sim (x, y, sim_method), axis=1, arr=part)
        return sim_vector.squeeze ()
    else:
        return None


if __name__ == '__main__':
    parser = argparse.ArgumentParser (description='Training GNN on ogbn-products benchmark')
    parser.add_argument ('--source_type', type=str, default="norm", help="数据预处理类型，norm则使用z-score归一化，normal则不作处理")
    parser.add_argument ('--limit_size', type=int, default=2000, help="限制处理数量")
    parser.add_argument ('--nthread', type=int, default=5, help="处理进程数量")
    parser.add_argument ('--data_path', type=str, default=None,
                         help="源数据文件夹，config中的变量，可以覆盖指定，默认None则按照config处理")
    parser.add_argument ('--source_file_name', type=str, default=None,
                         help="源数据文件名，config中的变量，可以覆盖指定，默认None则按照config处理")
    parser.add_argument ('--processed_data_path', type=str, default=None,
                         help="目标数据文件夹，config中的变量，可以覆盖指定，默认None则按照config处理")
    args, unknown = parser.parse_known_args ()

    if args.data_path is not None:
        data_path = args.data_path
    if args.source_file_name is not None:
        source_file_name = args.source_file_name
    if args.processed_data_path is not None:
        processed_data_path = args.processed_data_path
    nthread = args.nthread
    ensure_path_exist (data_path)
    ensure_path_exist (processed_data_path)

    data = pd.read_csv (data_path + source_file_name, dtype={"客户ID号": np.str})

    non_duplicate = data['客户ID号'].drop_duplicates ()
    data = data.loc[non_duplicate.index, :]
    # data.drop_duplicates (inplace=True)
    limit_size = int (args.limit_size)
    if limit_size > 0:
        data = data.iloc[:limit_size, :]
    print (f"len_data {len (data)}")

    not_norm_feature = ['face_visual_gender']  # 已经是 0 1变量 不norm
    not_norm_feature.extend (relations['other_sim'])  # other_sim的处理为离散，不在这里norm
    if args.source_type == "norm":
        source = data.iloc[:, 6:]
        for feature, col in source.items ():
            if feature in not_norm_feature:
                continue
            col_mean = col.mean ()
            col_std = col.std ()
            source[feature] = col.map (lambda x: (x - col_mean) / col_std)
        data.iloc[:, 6:] = source

    data['uuid'] = pd.Categorical (data['客户ID号']).codes
    data.set_index ("uuid", drop=True, inplace=True)
    data.sort_index (inplace=True)
    data['客户ID号'].to_json (processed_data_path + "uuid_id.json")

    uuid = data.index.tolist ()
    uuid.sort ()
    len_data = len (data)

    for relation, para_names in relations.items ():
        print (f"\nrelation:{relation}")
        relation_mt = np.zeros (shape=(len_data, len_data), dtype=np.float)
        relation_data = data[para_names].to_numpy ()
        sim_method = "min" if relation in ['other_sim'] else "cos"

        with Pool (nthread) as p:
            result = p.map (fill_relation_async, uuid)

        for i, row in enumerate (result):
            if row is not None:
                relation_mt[i, i + 1:] = row
                relation_mt[i + 1:, i] = row
            relation_mt[i, i] = 1

        hist_plot_mt_indices = np.triu_indices_from (relation_mt, k=1)
        hist_plot = pd.Series (relation_mt[hist_plot_mt_indices].reshape (-1))
        plt.cla ()
        hist_plot.plot (kind="hist", bins=20)
        plt.title (f"{relation}.png")
        plt.savefig (processed_data_path + f"{relation}.png")
        plt.show ()

        np.save (processed_data_path + f"{relation}_{args.source_type}.npy", relation_mt)
    data.to_csv (processed_data_path + f"all_data_{args.source_type}.csv")
