'''
主函数 用于提取数据特征
因为主机特征跟流特征相关性比较强，所以在这里顺带一并提取了

一个主机特征会对应到多个流
这里将每个流的特征结合为：
主机特征 + 流特征 + 包特征

即可能存在多个流共享同一个主机特征
'''
'''
使用方式：
目前只完善了针对单个被划分过的pcap文件的处理
后续会增加批量处理

'''

from scapy.all import *
from DivestFlowFromPcap import DivestFlow
from HostFeatureExtractor import HostFeatureExtractor
from FlowFeatureExtractor import FlowFeatureExtractor
from PacketFeatureExtractor import PacketFeatureExtractor
import os
import numpy as np

from keras import backend as K
from keras import models
import numpy as np

def Operate_One_Pcap(pcap_file_path):
    # 读取pcap文件
    divestflow = DivestFlow()
    divestflow.Divest(pcap_file_path, custom_filter="tcp")
    
    # 初始化类对象
    hfextarctor = HostFeatureExtractor(pcap_file_path.split('/')[-1])
    
    # 主机特征变量定义
    flow_packets_count = []
    flow_durations  = []
    
    # 使用列表保存流和包特征对象
    ffextractor_list = []
    pfextractor_list = []
    
    # 对各个流分别进行处理
    for pkt_list in divestflow.flow_data:
        # 初始化类对象
        # 同时处理得到特征
        # 记得修改hostIP

        # 如果单流大于五千的长度，则分割为多个流进行处理
        itertimes = lambda a : a // 5000 if a % 5000 == 0 else a // 5000 + 1
        for i in range(itertimes(len(pkt_list))):
            ffextractor = FlowFeatureExtractor(pkt_list[5000*i : 5000*(i+1)])
            pfextractor = PacketFeatureExtractor(pkt_list[5000*i : 5000*(i+1)], DFMODEL, hostIP="10.152.152.11")
            ffextractor_list.append(ffextractor)
            pfextractor_list.append(pfextractor)
            
            flow_packets_count.append(len(pkt_list))
            flow_durations.append(ffextractor.flow_duration)
    
    # 主机特征提取
    hfextarctor.flow_counts = len(divestflow.flow_data)
    hfextarctor.packets_counts_max = max(flow_packets_count)
    hfextarctor.packets_counts_min = min(flow_packets_count)
    hfextarctor.packets_counts_mean = np.mean(flow_packets_count)
    hfextarctor.packets_counts_std = np.std(flow_packets_count)
    
    hfextarctor.flow_duration_max = max(flow_durations)
    hfextarctor.flow_duration_min = min(flow_durations)
    hfextarctor.flow_duration_mean = np.mean(np.array(flow_durations).astype(float))
    hfextarctor.flow_duration_std = np.std(np.array(flow_durations).astype(float))
    
    # 组合特征并保存
    save_path = "./processed_dataset/nonTor_feature/" + pcap_file_path.split('/')[-1].split('.')[0] + '_feature.npy'
    
    write_in_buffer = []
    for i in range(len(ffextractor_list)):
        combined_feature = hfextarctor.Return_Features() + ffextractor_list[i].Return_Features() + pfextractor_list[i].Return_Features()
        # 保存特征
        write_in_buffer.append(combined_feature)
    # print(write_in_buffer)
    np.save(save_path, np.array(write_in_buffer, dtype = float)) # float32 会把很多数据四舍五入为0
    
    print("Finish processing " + pcap_file_path.split('/')[-1])
    
    # return write_in_buffer
        
       
if __name__ == '__main__':
    DFMODEL = models.load_model("../df-master/saved_trained_models/ClosedWorld_DF_NoDef.h5")
    
    # 跳过已经处理过的文件
    processed_filename_list = []
    for filename in os.listdir("./processed_dataset/nonTor_feature"):
        # 去除后缀和最后的_feature
        filename = filename.split(".")[0].split("_feature")[0]
        processed_filename_list.append(filename + ".pcap")
    
    # 读取pcap文件路径
    # 将pcap文件路径传入函数进行处理，单独生成npy文件，方便差错与处理
    
    '''
    for filename in os.listdir("./dataset/tor-split/"):
        pcap_file_path = "./dataset/tor-split/" + filename
        if pcap_file_path in processed_filename_list:
            continue
        Operate_One_Pcap(pcap_file_path)
    '''
    # 处理 nonTor-split 前记得要修改hostIP   
    for filename in os.listdir("./dataset/nonTor-split"):
        if filename in processed_filename_list:
            continue        
        pcap_file_path = "./dataset/nonTor-split/" + filename
        Operate_One_Pcap(pcap_file_path)
    
