#!/usr/bin/env python3
# -*- coding: utf-8 -*-

from my_utils import logging_config
logger = logging_config.init_logger()

import pandas as pd
import numpy as np
import csv
import argparse
import os
import sys
# 主成分分析 数据降维
from sklearn.decomposition import PCA
import random
seed = 1
random.seed(seed)
np.random.seed(seed)
import copy
import math

np.seterr(divide='ignore', invalid='ignore')

def devide(pca_result):
    length=len(pca_result)
    if length > 1:
        pca_result_x=pca_result[:(length//2)]
        pca_result_y=pca_result[(length//2):]
        pca_result_xy=[]
        pca_result_xy.append(pca_result_x)
        pca_result_xy.append(pca_result_y)

        return pca_result_xy
    else:
        return [pca_result]


def redundancy_delete(data):
    """
    data: dict
    """

    restore_data_after_redundancy = []
    for ip in data:
        metric_num = len(data[ip]['preprocessed'])
        # 获取类似于原来的二维表，一个ip对应一个表，每一行对应一个时间上的所有指标值（时间是相对的不是绝对的）
        raw_data = np.asarray(copy.deepcopy(data[ip]['preprocessed'])).T

        # 降维：
        # print(data[ip])
        # logger.info(f"metric_num: {metric_num}")
        to_dim = min(min(8, metric_num), len(data[ip]['preprocessed'][0]))
        if metric_num <= to_dim:
            pca_result = raw_data
        else:
            pca = PCA(n_components=to_dim)
            # train_data=train.iloc[:,0:44].values
            pca_result = pca.fit_transform(raw_data)
        no = []
        for i in range(len(raw_data)):
            no.append(i)
        pca_result = np.insert(pca_result, 0, values=no, axis=1)
        pca_result = np.concatenate((pca_result, raw_data), axis=1)

        # 数据划分：
        pca_result_0 = [copy.deepcopy(pca_result)]
        for i in range(to_dim):
            locals()['pca_result_' + str(i + 1)] = []
            for pca in locals()['pca_result_' + str(i)]:
                try:
                    pca_sorted = sorted(pca, key=lambda x: x[i + 1], reverse=True)
                except Exception as e:
                    logger.error(e)
                    print(pca)
                    exit(0)
                pca_result_xy = devide(pca_sorted)
                for pca_xy in pca_result_xy:
                    locals()['pca_result_' + str(i + 1)].append(pca_xy)
        devide_result = copy.deepcopy(locals()['pca_result_'+str(i+1)])

        # 计算压缩点（数据聚合）：
        # print(devide_result)
        keypoints = []
        property_num = metric_num + 1 + to_dim  # 加的1就是序号

        for group in devide_result:
            num_in_group = len(group)
            if property_num != len(group[0]):
                print("...")
            # assert property_num == len(group[0]), f"property_num != len(devide_result[0][0])"
            keypoint = [0 for i in range(property_num)]
            for record in group:
                for i in range(property_num):
                    keypoint[i] += record[i]
            keypoint = [x / num_in_group for x in keypoint]  # keypoint的第一列还是序号
            keypoints.append(keypoint)

        # 计算影响值（偏离值）：
        kp_num = len(keypoints)
        if kp_num <= 1:
            group_id = -1
        else:
            metric_sum = [0 for i in range(to_dim)]
            for keypoint in keypoints:
                for i in range(1, 1+to_dim):
                    metric_sum[i - 1] += keypoint[i]
            metric_sum = [i / kp_num for i in metric_sum]
            global_var = 0
            for metric_value in metric_sum:
                global_var += metric_value ** 2
            global_var = global_var ** 0.5

            logger.debug(f"全局数据方差：{global_var}")

            group_vars = []
            for i in range(kp_num):
                partial_metric_sum = [0 for i in range(to_dim)]  # 除了自身第i组的其他组的每个metric总和
                for j in range(kp_num):
                    if i == j:
                        continue
                    else:
                        keypoint = keypoints[j]
                        for k in range(1, 1+to_dim):
                            partial_metric_sum[k - 1] += keypoint[k]
                partial_metric_sum = [i / (kp_num - 1) for i in partial_metric_sum]
                groupd_var = 0
                for tar in partial_metric_sum:
                    groupd_var += tar ** 2
                groupd_var = groupd_var ** 0.5
                group_vars.append(groupd_var)

            affects = []
            for i, this_var in enumerate(group_vars):
                affect = math.fabs(this_var - global_var)
                affects.append(affect)
                logger.debug(f"第{i}组数据方差：{this_var}，与全局方差偏离值：{affect}")
            group_id = affects.index(min(affects))
            logger.debug(f"最小偏离值 {min(affects)}，组号 {group_id}")


        # 从划分后的所有数据中移除第group_id组：
        pca_result_devided = copy.deepcopy(devide_result)
        group_num = len(pca_result_devided)
        final_data = []

        original_sum = 0
        redundancy_num = 0
        for i in range(group_num):
            original_sum += len(pca_result_devided[i])
            if i == group_id:
                for records in  pca_result_devided[i]:
                    id = int(records[0])
                    for metric_index, rec in enumerate(records[1+to_dim:]):
                        data[ip]['redundancy_flag'][metric_index][id] = True
                        if data[ip]['complete_flag'][metric_index][id] == False:
                            #     r_num_sum += 1
                            redundancy_num += 1
                continue
            else:
                for x in pca_result_devided[i]:
                    data1 = []
                    data1.append(x[0])
                    data2 = x[1+to_dim:]
                    data3 = data1 + list(data2)
                    final_data.append(data3)
        logger.debug(f"remove redundant {redundancy_num} records in group_{group_id}")
        # r_num_sum += redundancy_num

        # preprocess 不改变数据数量，但冗余移除会改变，所以需要同时更新时间戳表格：
        data[ip]['redundancy-removed'] = [[] for _ in range(metric_num)]
        data[ip]['redundancy-removed-ts'] = [[] for _ in range(metric_num)]

        last_id = 0  # 经过冗余删除，上一条记录的id，用于计算当前记录id跳过了几条记录
        final_data.sort(key=(lambda x: x[0]))

        # 在final_data中根据被跳过去的id来推断为冗余记录会少一部分，直接在上面“冗余移除”的逻辑中对redundancy_flag进行更新
        # 就不会少。。。
        for record in final_data:
            id = int(record[0])
            # for skip_id in range(last_id+1, id):
            #     for metric_index, metric_value in enumerate(record[1:]):
            #         data[ip]['redundancy_flag'][metric_index][skip_id] = True
            last_id = id
            for metric_index, metric_value in enumerate(record[1:]):
                if data[ip]['complete_flag'][metric_index][id] == False:
                    ts_ = data[ip]['raw-ts'][metric_index][id]
                    data[ip]['redundancy-removed'][metric_index].append(metric_value)
                    data[ip]['redundancy-removed-ts'][metric_index].append(ts_)
                    restore_data_after_redundancy.append(
                        f"{data[ip]['cols'][metric_index]}:{ip.split('_')[0]}:{ts_}:{metric_value}|{ts_}"
                    )

    # 此时，redundancy-removed是一个二维表，每一行是一个metric，每一列是该metric在一个时间上的值（相对时间）
    # redundancy-removed-ts 是每个值对应的时间戳
    return restore_data_after_redundancy, data
