#!/usr/bin/env python3
# -*- coding: utf-8 -*-

from my_utils import logging_config
logger = logging_config.init_logger('info')

import numpy as np
import json
import copy
import math

from preprocess import preprocess
from redundancy import redundancy_delete
from kafka import KafkaProducer, KafkaConsumer
from kafka.errors import KafkaError

from Crypto.Cipher import AES
from Crypto import Random
import binascii
import argparse
import random

send_num_sum = 0
nan_var = np.nan  # 缺失值

def complete_ip(ip):
    """
    ip每段补齐前导0
    """
    segs = []
    for seg in ip.split("."):
        num_complete = 3 - len(seg)
        res = "0"*num_complete + seg
        segs.append(res)
    return ".".join(segs)

def to_dict(rows):
    """
    {
        ip: {
            "raw": [ [第1个指标的一系列值], [第2个指标的一系列值], ...],
            "raw-ts": [ [对应于一系列值的时间戳], [], ... ]
            "stat": { 统计信息 }
    }
    指标：
    对于一批数据，每一个ip中每一个指标
    """
    data = {}
    cols = []
    data_num_sum = 0
    ip_num_sum = 0

    # 统计一下这批数据中所有的指标名称（可能包含指标名和对象名）
    for row in rows:
        try:
            items = row.split(":")
            metric = str(items[0]).lower()
        except Exception:
            continue
        data_num_sum += 1
        if metric in cols:
            pass
        else:
            cols.append(metric)

    for row in rows:
        try:
            items = row.split(":")
            metric = str(items[0]).lower()  # 方便下面判断
            obj_type = items[1]
            if str(obj_type).lower() == "ma":
                items[2] = complete_ip(items[2])
            elif str(obj_type).lower() == "vm":
                items[2] = complete_ip(items[2])
                items[3] = complete_ip(items[3])
            else:
                items[3] = complete_ip(items[3])

            ip = ':'.join(items[1:4])
            ts = items[4]
            value = items[5].split("|")[0]
        except Exception:
            continue

        try:
            value = float(value)
            # 将不合理的值转为nan：
            if value < 0:
                #  or ("mem" in metric) or ("net" in metric) or ("fs" in metric))
                missing_prob = 0.5  # 负数（异常值）中的60%视作缺失值
                if ("cpu" in metric) and (random.random() < (1-missing_prob)):
                    value = -1 * value
                    if value > 1:
                        value = value / 100
                else:
                    value = nan_var
        except Exception as e:
            # value = np.nan
            value = nan_var



        # TODO: 多消费者


        if ip not in data:
            ip_num_sum += 1
            data[ip] = {
                'raw': [[] for _ in cols],
                'raw-ts': [[] for _ in cols],
                'missing_flag': [[] for _ in cols],
                'outlier_flag': [[] for _ in cols],
                'redundancy_flag': [[] for _ in cols],
                'complete_flag': [[] for _ in cols],
                'stat': {},
            }

        # value 和 timestamp 用两个表对应记录
        data[ip]['raw'][cols.index(metric)].append(value)
        data[ip]['raw-ts'][cols.index(metric)].append(ts)
        data[ip]['missing_flag'][cols.index(metric)].append(False)
        data[ip]['outlier_flag'][cols.index(metric)].append(False)
        data[ip]['redundancy_flag'][cols.index(metric)].append(False)
        data[ip]['complete_flag'][cols.index(metric)].append(False)

    # 把没有值的指标对应的值列表去除：
    for ip in data:
        temp_ip_data = {
            'raw': [],
            'raw-ts': [],
            'missing_flag': [],
            'outlier_flag': [],
            'redundancy_flag': [],
            'complete_flag': [],
            'cols': [],
        }
        for metric_index, values in enumerate(data[ip]['raw']):
            # values 就是一个指标的多个时间下的值
            this_record_len = len(values)
            if this_record_len == 0:
                pass
            else:
                temp_ip_data['raw'].append(values)
                temp_ip_data['raw-ts'].append(data[ip]['raw-ts'][metric_index])
                temp_ip_data['missing_flag'].append(data[ip]['missing_flag'][metric_index])
                temp_ip_data['outlier_flag'].append(data[ip]['outlier_flag'][metric_index])
                temp_ip_data['redundancy_flag'].append(data[ip]['redundancy_flag'][metric_index])
                temp_ip_data['complete_flag'].append(data[ip]['complete_flag'][metric_index])
                temp_ip_data['cols'].append(cols[metric_index])
        data[ip]['raw'] = copy.deepcopy(temp_ip_data['raw'])
        data[ip]['raw-ts'] = copy.deepcopy(temp_ip_data['raw-ts'])
        data[ip]['missing_flag'] = copy.deepcopy(temp_ip_data['missing_flag'])
        data[ip]['outlier_flag'] = copy.deepcopy(temp_ip_data['outlier_flag'])
        data[ip]['redundancy_flag'] = copy.deepcopy(temp_ip_data['redundancy_flag'])
        data[ip]['complete_flag'] = copy.deepcopy(temp_ip_data['complete_flag'])
        data[ip]['cols'] = copy.deepcopy(temp_ip_data['cols'])


    # 补全记录次数不够的指标（用nan补全）
    # 但是如果直接按最长metric记录数量不全，可能造成补全的数据过多，甚至远多于总记录数量 len(rows)，
    # 因此需要取一个合理的值
    # new_data = {}
    # for ip in data:
    #     max_record_per_metric = 0
    #     min_record_per_metric = len(rows)
    #     for values in data[ip]['raw']:
    #         # values 就是一个指标的多个时间下的值
    #         this_record_len = len(values)
    #         if this_record_len > max_record_per_metric:
    #             max_record_per_metric = this_record_len
    #         if this_record_len < min_record_per_metric:
    #             min_record_per_metric = this_record_len
    #     logger.debug(f"单个metric最长记录数量：{max_record_per_metric}")
    #     logger.debug(f"单个metric最短记录数量：{min_record_per_metric}")
    #
    #     devider = math.floor((min_record_per_metric+max_record_per_metric)/2)
    #
    #     ip_data_format = {
    #         'raw': [],
    #         'raw-ts': [],
    #         'missing_flag': [],
    #         'outlier_flag': [],
    #         'redundancy_flag': [],
    #         'complete_flag': [],
    #         'stat': {},
    #         'cols': [],
    #     }
    #
    #     new_data[f"{ip}_1"] = copy.deepcopy(ip_data_format)
    #     new_data[f"{ip}_2"] = copy.deepcopy(ip_data_format)
    #
    #     for metric_index, values in enumerate(data[ip]['raw']):
    #         # values 就是一个指标的多个时间下的值
    #         this_record_len = len(values)
    #         if this_record_len < devider:
    #             new_data[f"{ip}_1"]['raw'].append(copy.deepcopy(values))
    #             new_data[f"{ip}_1"]['raw-ts'].append(copy.deepcopy(data[ip]['raw-ts'][metric_index]))
    #             new_data[f"{ip}_1"]['missing_flag'].append(copy.deepcopy(data[ip]['missing_flag'][metric_index]))
    #             new_data[f"{ip}_1"]['outlier_flag'].append(copy.deepcopy(data[ip]['outlier_flag'][metric_index]))
    #             new_data[f"{ip}_1"]['redundancy_flag'].append(copy.deepcopy(data[ip]['redundancy_flag'][metric_index]))
    #             new_data[f"{ip}_1"]['complete_flag'].append(copy.deepcopy(data[ip]['complete_flag'][metric_index]))
    #             new_data[f"{ip}_1"]['cols'].append(data[ip]['cols'][metric_index])
    #         else:
    #             new_data[f"{ip}_2"]['raw'].append(copy.deepcopy(values))
    #             new_data[f"{ip}_2"]['raw-ts'].append(copy.deepcopy(data[ip]['raw-ts'][metric_index]))
    #             new_data[f"{ip}_2"]['missing_flag'].append(copy.deepcopy(data[ip]['missing_flag'][metric_index]))
    #             new_data[f"{ip}_2"]['outlier_flag'].append(copy.deepcopy(data[ip]['outlier_flag'][metric_index]))
    #             new_data[f"{ip}_2"]['redundancy_flag'].append(copy.deepcopy(data[ip]['redundancy_flag'][metric_index]))
    #             new_data[f"{ip}_2"]['complete_flag'].append(copy.deepcopy(data[ip]['complete_flag'][metric_index]))
    #             new_data[f"{ip}_2"]['cols'].append(data[ip]['cols'][metric_index])
    #
    # logger.debug("指标序列已经完成长短划分")
    # data = new_data  # 之后开始用分离后的new_data进行处理
    complete_num = 0
    for ip in data:
        max_record_per_metric = 0
        min_record_per_metric = len(rows)
        record_num = 0

        for metric_index, values in enumerate(data[ip]['raw']):
            # values 就是一个指标的多个时间下的值
            this_record_len = len(values)
            record_num += this_record_len
            if this_record_len > max_record_per_metric:
                max_record_per_metric = this_record_len
            if this_record_len < min_record_per_metric:
                min_record_per_metric = this_record_len

            logger.debug(f"单个metric最长记录数量：{max_record_per_metric}")
            logger.debug(f"单个metric最短记录数量：{min_record_per_metric}")

        # max_upper_num_per_metric = int(record_num / len(data[ip]['raw']))
        # 每个metric的值列表直接填充到最长长度，用complete_flag指示某一个值是原始值还是填充值，最终把填充值删掉；
        # 不再将长的变短，短的边长了，统一为填充到最长：
        max_upper_num_per_metric = max_record_per_metric

        for metric_index, values in enumerate(data[ip]['raw']):
            this_record_len = len(values)
            if this_record_len > max_upper_num_per_metric:
                # 目前的实现，应该是到不了这里的逻辑的：
                raise Exception

                # 下面直接对 “values” 赋值是没用的，应该是因为虽然引用传递可以改变内部值，
                # 但是直接对变量整个赋值会导致引用传递失效
                data[ip]['raw'][metric_index] = values[:max_upper_num_per_metric]
                data[ip]['raw-ts'][metric_index] = data[ip]['raw-ts'][metric_index][:max_upper_num_per_metric]
                data[ip]['missing_flag'][metric_index] = data[ip]['missing_flag'][metric_index][:max_upper_num_per_metric]
                data[ip]['outlier_flag'][metric_index] = data[ip]['outlier_flag'][metric_index][:max_upper_num_per_metric]
                data[ip]['redundancy_flag'][metric_index] = data[ip]['redundancy_flag'][metric_index][:max_upper_num_per_metric]
            elif this_record_len < max_upper_num_per_metric:
                for i in range(this_record_len, max_upper_num_per_metric):
                    complete_num += 1
                    data[ip]['raw'][metric_index].append(nan_var)
                    data[ip]['raw-ts'][metric_index].append("000")
                    data[ip]['missing_flag'][metric_index].append(False)
                    data[ip]['outlier_flag'][metric_index].append(False)
                    data[ip]['redundancy_flag'][metric_index].append(False)
                    data[ip]['complete_flag'][metric_index].append(True)

    logger.debug(f"metric数量：{len(cols)}")

    logger.debug(f"填充了 {complete_num}")
    logger.info(f"ip num: {ip_num_sum}, record num: {data_num_sum}")
    return data, cols, data_num_sum, ip_num_sum  # 这里的data_sum是未填充nan的，为了补齐二维表，需要填充一些nan

aes_key = b"1865338218031218"
def aes(plain):
    """
    对 plain 字符串进行加密
    pip3 install pycryptodome
    https://www.jianshu.com/p/5b38b4187b54
    """
    bs = AES.block_size
    plain = str(plain)
    pad = lambda s: (s + (bs - len(s) % bs) * " ").encode()  # 不够16位的末尾补齐空格
    cipher = AES.new(aes_key, AES.MODE_ECB)
    data = cipher.encrypt(pad(plain))
    data = binascii.hexlify(data)
    # print(aes_decode(binascii.unhexlify(data)))
    return data.decode()

def aes_decode(cipher_hex):
    cipher = AES.new(aes_key, AES.MODE_ECB)
    result = cipher.decrypt(cipher_hex)
    result = result.decode('utf-8', 'ignore')
    return result

def main(rows, mode="all", need_standard=False):
    '''
    mode: 模式：all 完整模式，所有预处理+冗余处理；simple 简单模式，只处理缺失值
    '''
    logger.info(f"============= Start to process data {len(rows)} =================")
    if mode == "all":
        data, cols, _, _ = to_dict(rows)
        preprocess_result, data = preprocess(data, need_standard)
        # num = 0
        # for ip in data:
        #     for i,m in enumerate(data[ip]['cols']):
        #         for flag in data[ip]['complete_flag'][i]:
        #             if flag == False:
        #                 num += 1
        # print(num)
        redundancy_result, data = redundancy_delete(data)
        processed_record_sum = len(redundancy_result)
        missing_num = 0
        outlier_num = 0
        redundancy_num = 0
        for ip in data:
            for metric_index, metric in enumerate(data[ip]['cols']):
                for value_index, value in enumerate(data[ip]['complete_flag'][metric_index]):
                    if value == False:
                        # 不是填充值，可以纳入统计信息：
                        if data[ip]['missing_flag'][metric_index][value_index] == True:
                            missing_num += 1
                        if data[ip]['outlier_flag'][metric_index][value_index] == True:
                            outlier_num += 1
                        if data[ip]['redundancy_flag'][metric_index][value_index] == True:
                            redundancy_num += 1
        # 打标识：
        final_records = []
        final_records_crypto = []
        include_redundancy_record = 0
        need_crypto_ips = list(data.keys())[::10]  # 每10个ip选出一个作为需要加密的ip
        for ip in data:
            for metric_index, metric in enumerate(data[ip]['cols']):
                for value_index, value in enumerate(data[ip]['preprocessed'][metric_index]):
                    if data[ip]['complete_flag'][metric_index][value_index] == False:
                        # 不是填充值，可以纳入统计信息：
                        flags = ""
                        if data[ip]['missing_flag'][metric_index][value_index] == True:
                            flags += 'm'
                        if data[ip]['outlier_flag'][metric_index][value_index] == True:
                            flags += 'o'
                        if data[ip]['redundancy_flag'][metric_index][value_index] == True:
                            flags += 'r'
                            include_redundancy_record += 1
                        if flags == "":
                            flags = 'n'
                        if 'r' in flags and include_redundancy_record > redundancy_num/10:
                            include_redundancy_record -= 1
                            pass
                        else:
                            ts = data[ip]['raw-ts'][metric_index][value_index]
                            if ip in need_crypto_ips:
                                crpto_key = aes(f"{data[ip]['cols'][metric_index]}:{ip}")
                                crpto_val = aes(value)
                                final_records_crypto.append(
                                    f"{crpto_key}:{ts}:{crpto_val}:{flags}|{ts}"
                                )
                            else:
                                final_records.append(
                                    f"{data[ip]['cols'][metric_index]}:{ip}:{ts}:{value}:{flags}|{ts}"
                                )

        final_records.sort(key=(lambda x: x.split(":")[-2]))
        final_records_crypto.sort(key=(lambda x: x.split(":")[1]))

        # num = 0
        # for ip in data:
        #     for i,m in enumerate(data[ip]['cols']):
        #         for flag in data[ip]['redundancy-removed'][i]:
        #             num += 1
        # print(num)

        logger.info(f"missing value num: {missing_num}; outlier value num: {outlier_num}")
        logger.info(f"removed redundancy record num: {redundancy_num}; final normal record num: {processed_record_sum}")
        logger.info(f"send plain record num: {len(final_records)}, including {include_redundancy_record} redundant")
        logger.info(f"send encrypted record num: {len(final_records_crypto)} from {need_crypto_ips}")
    else:
        raise NotImplementedError
    logger.info(f"============= Finish to process data {len(rows)} =================")
    return final_records, final_records_crypto

def sent_kafka(msgs, to_server, to_topic):
    global send_num_sum

    logger.info(f"============= Start to send data ({len(msgs)}) to {to_server}, topic={to_topic} =================")

    producer = KafkaProducer(bootstrap_servers=to_server,
                             # key_serializer=lambda k: json.dumps(k).encode('utf8'),
                             # value_serializer=lambda v: json.dumps(v).encode('utf8'),
                             retries=3)
    for i, msg in enumerate(msgs):
        try:
            logger.debug(f"Start send [{msg}] to [{to_server}], topic={to_topic}")
            future = producer.send(to_topic, value=msg.encode())
            if i % 500 == 0:
                logger.info(f"Send {i+1}/{len(msgs)}")
            # record_metadata = future.get(timeout=10)
            # logger.debug(record_metadata)
            logger.debug(f"Success send [{msg}] to [{to_server}], topic={to_topic}")
            send_num_sum += 1
        except KafkaError as e:
            logger.error(f"Send msg={msg} to kafka {to_server} failed: {e}")

    logger.info(f"Wating for finishing send...")
    producer.flush()
    logger.info(f"============= Finish to send data ({len(msgs)}) to {to_server}, topic={to_topic} =================")
    logger.info(f"============= Sum: {send_num_sum} =================")

def read_kafka(
        from_server,
        from_topic,
        to_server,
        to_topic,
        batch_size=10000,
        epoch=-1,
        is_save=False,
        save_name="",
        need_standard=False
):
    consumer = KafkaConsumer(from_topic, bootstrap_servers=from_server)

    logger.info(f"Start consuming data from server={from_server}, topic={from_topic}")
    msgs = []
    counter = 0
    counter_in_iter = 0

    for msg in consumer:
        counter += 1
        if counter > epoch*batch_size and epoch != -1:
            break
        counter_in_iter += 1
        _msg_value = str(msg.value, encoding="utf8").replace('"', '')
        if counter % 500 == 0:
            logger.info(f"{counter_in_iter}/{batch_size} : {msg.topic} {msg.offset} {_msg_value}")
        msgs.append(_msg_value)
        if len(msgs) >= batch_size:
            counter_in_iter = 0
            logger.info(f"Get {len(msgs)} records in this iteration (sum {counter} until now)")
            if is_save == True:
                with open(f"./{save_name}.data", "w") as f:
                    f.writelines("\n".join(msgs))

            result, result_crypto = copy.deepcopy(main(msgs, "all", need_standard))

            logger.info(f"Below is first 10 records:")
            for r in result[:10]:
                print(r)
            logger.info(f"Below is first 10 encrypted records:")
            for r in result_crypto[:10]:
                print(r)

            if is_save == True:
                with open(f"./{save_name}.result", "w") as f:
                    f.writelines("\n".join(result))
                with open(f"./{save_name}.encrypt.result", "w") as f:
                    f.writelines("\n".join(result_crypto))
            msgs = []
            sent_kafka(result, to_server, to_topic[0])  # 明文
            sent_kafka(result_crypto, to_server, to_topic[1])  # 密文
    logger.info(f"Execution over: {epoch} epoch(es), {counter} record(s).")

def read_file(from_path="./data/2021-07-27.data", to_path="./data/2021-08-02.result", need_standard=False):
    msgs = []
    with open(from_path, 'r') as f:
        rows = f.readlines()
        for row in rows:
            row = row.strip("\n")
            row = row.strip("\"")
            msgs.append(row)
    result, result_crypto = main(msgs, "all", need_standard)
    with open(to_path, "w") as f:
        f.writelines("\n".join(result))
    print(result[:10])
    print(result_crypto[:10])



if __name__ == "__main__":

    # file = "../../../../数据demo/物理机能耗数据.txt"

    parser = argparse.ArgumentParser()
    parser.add_argument('--source', type=str, default='kafka', help='kafka or file')
    parser.add_argument('--save', action='store_true', help='save batch data to file')
    parser.add_argument('--filename', type=str, default='2021-07-27', help='save file name (instead of path)')
    parser.add_argument('--batchsize', type=int, default=10000, help='kafka or file')
    parser.add_argument('--epoch', type=int, default=-1, help='how many batches to process, -1 for infinity.')
    parser.add_argument('--standard', action='store_true', help='need standardization')

    # 解析参数
    opt = parser.parse_args()
    need_standard = opt.standard
    # logger.info(f"Need standardization: {need_standard}")
    if str(opt.source).lower() == "kafka":
        read_kafka(
            from_server="10.8.4.24:9092",
            from_topic="s4-ma-phystats",
            to_server=['10.8.2.20:9092', '10.8.2.30:9092', '10.8.3.20:9092'],
            to_topic=['dynamic', 'encDynamic'],
            batch_size=opt.batchsize,
            epoch=opt.epoch,
            is_save=opt.save,
            save_name=opt.filename,
            need_standard=need_standard,
        )
    elif str(opt.source).lower() == "file":
        read_file(
            from_path="./data/2021-07-27.data",
            to_path="./data/2021-08-02.result",
            need_standard=need_standard,
        )

    """
    /etc/hosts 中需要配置：
10.8.2.10 node1
10.8.2.20 node2
10.8.2.30 node3
10.8.3.10 node4
10.8.3.20 node5
10.8.3.30 node6
10.8.4.150 node7
    """