#!/usr/bin/env python3
# -*- coding: utf-8 -*-

# author    : Xiangwei Wang
# email     : wangxw-cn@qq.com
# datetime  : 2021/5/6 21:00

"""
"""

import torch
from torch import nn, Tensor, optim
from torch.autograd import Variable
import torch.nn.functional as F
from typing import (
    TypeVar, Type, Union, Optional, Any,
    List, Dict, Tuple, Callable, NamedTuple
)

import numpy as np

import random
import time
import copy
import logging
from concurrent.futures import ThreadPoolExecutor
from concurrent import futures
import itertools
import os
import re


class Sample(NamedTuple):
    """
    数据集中的sample，由数据和标签构成
    tag 表示该数据来源，例如:
    训练集下的 49-15 sample 的 tag 为 train-49-15
    """
    data: List[Tuple[float, int]]  # 数据文件中所有传输记录 [Time,UpOrDown]
    label: int  # 标签, 0-49
    tag: str  # 记号

    def to_tensor_sample(self) -> Tuple[Tensor, int]:
        """
        转为 (Tensor,int)，数据部分为 (Channel,L) 格式
        时间和上下行流量标签各占一个 channel
        :return:
        """
        _data = Tensor(self.data).t()
        _label = self.label
        return _data, _label


def read_data(data_dir: str, read_num = None, max_workers: int = 8):
    """
    读取原始数据,全部加载到内存。
    :param data_dir: 数据集目录
    :param max_workers: 读取数据的线程数
    :param num_train: 读取的训练文件数, None 表示全部
    :param num_test: 读取的测试文件数, None 表示全部
    :return: 数据集
    """
    train_dir = os.path.join(data_dir, "defence")
    test_dir = os.path.join(data_dir, "undefence")
    train_files = os.listdir(train_dir)
    test_files = os.listdir(test_dir)
    file_name_pattern = re.compile(r"(\d+?)\-(\d+?)")

    def fn_train_tag(s):
        return f"train-{s}"

    def fn_test_tag(s):
        return f"test-{s}"

    # print(len(train_files), len(test_files)) # 4500 4500

    def build_raw_dataset(_dir: str, files: List[str], fn_tag: Callable[[str], str],
                          num_samples: Optional[int] = None) -> List[Sample]:
        samples = [None] * len(files)

        def f(file_name: str, idx: int):
            _res = file_name_pattern.findall(file_name)
            if len(_res) <= 0:
                # 文件名不符合规定，直接跳过
                return
            file_path = os.path.join(_dir, file_name)
            _label, _ = _res[0]
            _label = int(_label)
            _data = list()
            with open(file_path, "r") as fr:
                for line in fr.readlines():  # 依次读取每行
                    line = line.strip()  # 去掉每行头尾空白
                    _time_str, _stream_type_str = tuple(
                        re.split(r"\s+?", line))
                    _time, _stream_type = (
                        float(_time_str), int(_stream_type_str))
                    _data.append((_time, _stream_type))
            assert len(_data) > 0  # _data 不能为空
            sample = Sample(_data, _label, fn_tag(file_name))
            # samples.append(sample)
            samples[idx] = sample

        with ThreadPoolExecutor(max_workers=max_workers) as executor:
            _futures = []
            for i, file_name in enumerate(files):
                if num_samples is not None and i >= num_samples:
                    break
                fut = executor.submit(f, file_name, i)
                _futures.append(fut)
            futures.wait(_futures, return_when=futures.ALL_COMPLETED)
        # 去除读取失败的
        samples = [ele for ele in samples if ele is not None]
        return samples

    train_dataset = build_raw_dataset(
        train_dir, train_files, fn_train_tag, read_num)
    test_dataset = build_raw_dataset(
        test_dir, test_files, fn_test_tag, read_num)

    return train_dataset, test_dataset

def extract_features(dataset):
    '''
    第三篇论文的特征列表中的：
    1：下行包(incoming)的数量
    3：下行包占比
    5：上行包数量
    2：上行包占比
    10：total number of packets
    4：一个数列的标准差，数列中的第i个元素指的是第i个上行包之前的包数量
    7：上述数列的平均值
    12：下行包数列的标准差
    13：下行包数列的平均值
    16：每20个包组成一个chunk，每个chunk中outgoing包数量组成的数列，的标准差
    11：上述数列的平均值
    64：上述数列的中位数
    65：上述数列的最大值
    19：前30个包中incoming包数量
    20：前30个包中outgoing包数量
    50：最后30个包中incoming包数量
    55：最后30个包中outgoing包数量
    44：每秒的包的数量组成一个数列，求均值
    38：上述数列的标准差
    117：上述数列的最小值
    42：上述数列的最大值
    40-70共9个：total、incoming、outgoing时间间隔序列的最大值、均值、标准差

    # 增加特征：
    每20个包组成一个chunk，每个chunk中 incoming 包数量组成的数列，的标准差
    上述数列的平均值
    上述数列的中位数
    上述数列的最大值
    每50个包组成一个chunk，每个chunk中 incoming 包数量组成的数列，的标准差
    上述数列的平均值
    上述数列的中位数
    上述数列的最大值
    每50个包组成一个chunk，每个chunk中outgoing包数量组成的数列，的标准差
    上述数列的平均值
    上述数列的中位数
    上述数列的最大值
    前100个包中incoming包数量
    前100个包中outgoing包数量
    最后100个包中incoming包数量
    最后100个包中outgoing包数量



    注：数据集中的“1”应该是上行（outgoing），“-1”是下行（incoming）；
    :param dataset: [sample, sample, ...]
    :return: ([feature1, feature2, ...], label, tag)
    '''
    samples = [None] * len(dataset)
    for index, sample in enumerate(dataset):
        # try:
        # 每一个sample就是一个class Sample 的实例
        incoming_num = 0
        outgoing_num = 0
        outgoing_ordering_list = []
        incoming_ordering_list = []

        chunk_20_outgoing_list = []  # 每20个包中outgoing的包数量
        outgoing_num_in_chunk_20 = 0
        chunk_20_incoming_list = []  # 每20个包中incoming的包数量
        incoming_num_in_chunk_20 = 0
        chunk_50_outgoing_list = []  # 每50个包中outgoing的包数量
        outgoing_num_in_chunk_50 = 0
        chunk_50_incoming_list = []  # 每50个包中incoming的包数量
        incoming_num_in_chunk_50 = 0

        incoming_first_30_num = 0
        outgoing_first_30_num = 0
        incoming_last_30_num = 0
        outgoing_last_30_num = 0
        incoming_first_100_num = 0
        outgoing_first_100_num = 0
        incoming_last_100_num = 0
        outgoing_last_100_num = 0

        num_per_second_list = []
        num_this_second = 0
        last_second = 0  # 记录上一秒是第几秒

        total_time_interval_list = []
        last_total_time = -1
        incoming_time_interval_list = []
        last_incoming_time = -1
        outgoing_time_interval_list = []
        last_outgoing_time = -1

        last_dir = 1
        accum_incoming = 0
        accum_outgoing = 0
        burst_incoming_list = []
        burst_outgoing_list = []


        for seq, record in enumerate(sample.data):
            if last_total_time == -1:
                last_total_time = record[0]
            else:
                if record[0] - last_total_time <= 1:
                    total_time_interval_list.append(record[0] - last_total_time)
                last_total_time = record[0]

            if int(record[1]) == -1:
                incoming_num += 1
                incoming_num_in_chunk_20 += 1
                incoming_num_in_chunk_50 += 1
                incoming_ordering_list.append(seq)
                if seq < 30:
                    incoming_first_30_num += 1
                elif seq >= len(sample.data)-30:
                    incoming_last_30_num += 1
                if seq < 100:
                    incoming_first_100_num += 1
                elif seq >= len(sample.data)-100:
                    incoming_last_100_num += 1

                if last_incoming_time == -1:
                    last_incoming_time = record[0]
                else:
                    if record[0] - last_incoming_time <= 1:
                        incoming_time_interval_list.append(record[0] - last_incoming_time)
                    last_incoming_time = record[0]


                accum_incoming += 1
                if accum_outgoing >= 2:
                    burst_outgoing_list.append(accum_outgoing)
                accum_outgoing = 0

            else:
                outgoing_num += 1
                outgoing_num_in_chunk_20 += 1
                outgoing_num_in_chunk_50 += 1
                outgoing_ordering_list.append(seq)
                if seq < 30:
                    outgoing_first_30_num += 1
                elif seq >= len(sample.data)-30:
                    outgoing_last_30_num += 1
                if seq < 100:
                    outgoing_first_100_num += 1
                elif seq >= len(sample.data)-100:
                    outgoing_last_100_num += 1

                if last_outgoing_time == -1:
                    last_outgoing_time = record[0]
                else:
                    if record[0] - last_outgoing_time <= 1:
                        outgoing_time_interval_list.append(record[0] - last_outgoing_time)
                    last_outgoing_time = record[0]


                accum_outgoing += 1
                if accum_incoming >= 2:
                    burst_incoming_list.append(accum_incoming)
                accum_incoming = 0


            if seq % 20 == 19:
                chunk_20_outgoing_list.append(outgoing_num_in_chunk_20)
                chunk_20_incoming_list.append(incoming_num_in_chunk_20)
                outgoing_num_in_chunk_20 = 0
                incoming_num_in_chunk_20 = 0
            if seq % 50 == 49:
                chunk_50_outgoing_list.append(outgoing_num_in_chunk_50)
                chunk_50_incoming_list.append(incoming_num_in_chunk_50)
                outgoing_num_in_chunk_50 = 0
                incoming_num_in_chunk_50 = 0

            cur_second = int(record[0])
            if cur_second == last_second:
                num_this_second += 1
            else:
                if cur_second - last_second == 1:
                    num_per_second_list.append(num_this_second)
                    num_this_second = 1
                    last_second = cur_second
                else:
                    # 说明中间差了不止1秒
                    for i in range(cur_second - last_second - 1):
                        num_per_second_list.append(0)
                    num_this_second = 1
                    last_second = cur_second

        if accum_outgoing >= 2:
            burst_outgoing_list.append(accum_outgoing)
        if accum_incoming >= 2:
            burst_incoming_list.append(accum_incoming)
        if len(burst_incoming_list) == 0:
            print(f"burst incoming 0: {sample.tag}")
            burst_incoming_list = [0]
        if len(burst_outgoing_list) == 0:
            print(f"burst outgoing 0: {sample.tag}")
            burst_outgoing_list = [0]

        if len(sample.data) % 20 != 0:
            chunk_20_outgoing_list.append(outgoing_num_in_chunk_20)
            chunk_20_incoming_list.append(incoming_num_in_chunk_20)
        if len(sample.data) % 50 != 0:
            chunk_50_outgoing_list.append(outgoing_num_in_chunk_50)
            chunk_50_incoming_list.append(incoming_num_in_chunk_50)

        num_per_second_list.append(num_this_second)

        # 计算特征4、7、12、13：
        assert len(outgoing_ordering_list) == outgoing_num
        assert len(incoming_ordering_list) == incoming_num

        if len(outgoing_ordering_list) == 0:
            outgoing_ordering_list = [0]
        outgoing_list_sum = sum(outgoing_ordering_list)
        outgoing_list_avg = np.mean(outgoing_ordering_list)
        outgoing_list_dev = np.std(outgoing_ordering_list)

        if len(incoming_ordering_list) == 0:
            incoming_ordering_list = [0]
        incoming_list_sum = sum(incoming_ordering_list)
        incoming_list_avg = np.mean(incoming_ordering_list)
        incoming_list_dev = np.std(incoming_ordering_list)

        # 16、11、64、65：
        if len(sample.data) % 20 != 0:
            assert len(chunk_20_outgoing_list) == (len(sample.data) // 20) + 1
        else:
            assert len(chunk_20_outgoing_list) == (len(sample.data) // 20)

        if len(chunk_20_outgoing_list) == 0:
            chunk_20_outgoing_list = [0]
        chunk_dev = np.std(chunk_20_outgoing_list)
        chunk_avg = np.mean(chunk_20_outgoing_list)
        chunk_med = np.median(chunk_20_outgoing_list)
        chunk_max = np.max(chunk_20_outgoing_list)

        # 44、38、117、42：
        if len(num_per_second_list) == 0:
            num_per_second_list = [0]
        second_avg = np.mean(num_per_second_list)
        second_dev = np.std(num_per_second_list)
        second_min = np.min(num_per_second_list)
        second_max = np.max(num_per_second_list)

        # 40-70:
        if len(total_time_interval_list) == 0:
            total_time_interval_list = [0]
        if len(incoming_time_interval_list) == 0:
            incoming_time_interval_list = [0]
        if len(outgoing_time_interval_list) == 0:
            outgoing_time_interval_list = [0]
        total_interval_max = np.max(total_time_interval_list)
        total_interval_avg = np.mean(total_time_interval_list)
        total_interval_dev = np.std(total_time_interval_list)
        incoming_interval_max = np.max(incoming_time_interval_list)
        incoming_interval_avg = np.mean(incoming_time_interval_list)
        incoming_interval_dev = np.std(incoming_time_interval_list)
        outgoing_interval_max = np.max(outgoing_time_interval_list)
        outgoing_interval_avg = np.mean(outgoing_time_interval_list)
        outgoing_interval_dev = np.std(outgoing_time_interval_list)

        samples[index] = [
            [
                incoming_num, float(incoming_num) / len(sample.data),
                outgoing_num, float(outgoing_num) / len(sample.data),
                len(sample.data),

                outgoing_list_dev, outgoing_list_avg,
                incoming_list_dev, incoming_list_avg,
                # 补充指标，共提升0.5%左右（随机森林）
                np.median(outgoing_ordering_list),
                np.median(incoming_ordering_list),

                chunk_dev, chunk_avg, chunk_med, chunk_max,
                # 补充指标，共提升0.5%左右（随机森林）
                np.min(chunk_20_outgoing_list),
                np.min(chunk_20_incoming_list),

                incoming_first_30_num, outgoing_first_30_num,
                incoming_last_30_num, outgoing_last_30_num,
                second_avg, second_dev, second_min, second_max,
                # 补充指标，共提升0.5%左右（随机森林）
                np.median(num_per_second_list),

                total_interval_max, total_interval_avg, total_interval_dev,
                incoming_interval_max, incoming_interval_avg, incoming_interval_dev,
                outgoing_interval_max, outgoing_interval_avg, outgoing_interval_dev,

                # 加上“单位时间”的限制，加上以下的“最小值”和“中位数”，提升了3%（随机森林）
                np.min(total_time_interval_list), np.median(total_time_interval_list),
                np.min(incoming_time_interval_list), np.median(incoming_time_interval_list),
                np.min(outgoing_time_interval_list), np.median(outgoing_time_interval_list),

                np.std(chunk_20_incoming_list), np.mean(chunk_20_incoming_list),
                np.median(chunk_20_incoming_list), np.max(chunk_20_incoming_list),
                np.std(chunk_50_outgoing_list),np.mean(chunk_50_outgoing_list),
                np.median(chunk_50_outgoing_list),np.max(chunk_50_outgoing_list),
                np.std(chunk_50_incoming_list),np.mean(chunk_50_incoming_list),
                np.median(chunk_50_incoming_list),np.max(chunk_50_incoming_list),
                incoming_first_100_num, outgoing_first_100_num,
                incoming_last_100_num, outgoing_last_100_num,

                # 10个burst只提升了大约0.7%（随机森林）
                np.max(burst_incoming_list), np.mean(burst_incoming_list), np.median(burst_incoming_list),
                np.std(burst_incoming_list), len(burst_incoming_list),
                np.max(burst_outgoing_list), np.mean(burst_outgoing_list), np.median(burst_outgoing_list),
                np.std(burst_outgoing_list), len(burst_outgoing_list),
            ],
            sample.label,
            sample.tag
        ]
        # except BaseException as e:
        #     print(f"发生错误，{sample.tag} {sample.label} {sample.data}")
        #     print(repr(e))
        #     print(e.__traceback__)
        #     exit(0)


    return samples

if __name__ == "__main__":
    data_dir_path = "../../Data"

    print("正在读取数据...")
    # 读取data_dir_path中的defence和undefence数据：
    defence_dataset, undefence_dataset = read_data(data_dir_path, None)

    # 提取特征：
    print("正在提取 defence 数据特征...")
    defence_feature_samples = extract_features(defence_dataset)
    defence_10_samples = []
    defence_90_samples = []


    print("正在提取 undefence 数据特征...")
    undefence_feature_samples = extract_features(undefence_dataset)
    undefence_10_samples = []
    undefence_90_samples = []


    # 对50类样本中每一类提取10%的数据（9/90）作为Open World测试集
    for i in range(50):
        test_idx = []
        # 随机获取10%的索引：
        for j in range(9):
            idx = random.randint(0, 89)  # 随机一个索引
            while idx in test_idx:
                idx = random.randint(0, 89)
            test_idx.append(idx)
        # 按照索引将sample放入对应的数据集：
        cnt = 0
        for sample in defence_feature_samples:
            if sample[1] == i:
                if cnt in test_idx:
                    defence_10_samples.append(sample)
                else:
                    defence_90_samples.append(sample)
                cnt += 1

        cnt = 0
        for sample in undefence_feature_samples:
            if sample[1] == i:
                if cnt in test_idx:
                    undefence_10_samples.append(sample)
                else:
                    undefence_90_samples.append(sample)
                cnt += 1

    '''
    defence_feature_samples 是一个列表，
    列表中每一个元素是一个样本：[[feature1, feature2, ...], label, tag]，
    每个样本的第一项是特征值列表（各特征含义见“extract_features”函数注释），第2、3项含义同class Sample。
    '''
    np.save("./defence_100.npy", defence_feature_samples)
    np.save("./defence_10.npy", defence_10_samples)
    np.save("./defence_90.npy", defence_90_samples)
    np.save("./undefence_100.npy", undefence_feature_samples)
    np.save("./undefence_10.npy", undefence_10_samples)
    np.save("./undefence_90.npy", undefence_90_samples)

    # 读取 npy 文件：
    # defence_feature_samples = np.load("./defence_features.npy", allow_pickle=True)
    # undefence_feature_samples = np.load("./undefence_features.npy", allow_pickle=True)

    print(defence_feature_samples[0])
    print(len(defence_feature_samples[0][0]))