import jsonlines
import json
import os
from tqdm import tqdm
import numpy as np

time_period = 60

cic_iot_2017_ip_list = {
    "192.168.10.50": "Victim network-Web server 16 Public",
    "205.174.165.68": "Victim network-Web server 16 Public",
    "192.168.10.51": "Victim network-Ubuntu server 12 Public",
    "205.174.165.66": "Victim network-Ubuntu server 12 Public",
    "192.168.10.19": "Victim network-Ubuntu 14.4, 32B",
    "192.168.10.17": "Victim network-Ubuntu 14.4, 64B",
    "192.168.10.16": "Victim network-Ubuntu 16.4, 32B",
    "192.168.10.12": "Victim network-Ubuntu 16.4, 64B",
    "192.168.10.9": "Victim network-Win 7 Pro, 64B",
    "192.168.10.5": "Victim network-Win 8.1, 64B",
    "192.168.10.8": "Victim network-Win Vista, 64B",
    "192.168.10.14": "Victim network-Win 10, pro 32B",
    "192.168.10.15": "Victim network-Win 10, 64B",
    "192.168.10.25": "Victim network-MAC",
    "205.174.165.80": "Firewall",
    "172.16.0.1": "Firewall",
    "192.168.10.3": "DNS+ DC Server",
    "205.174.165.73": "Attackers network-Kali",
    "205.174.165.69": "Attackers network-Win 1",
    "205.174.165.70": "Attackers network-Win 2",
    "205.174.165.71": "Attackers network-Win 3"
}

# 输出目录
output_dir = './dataset/CIC_IOT-dataset-2017'

# 创建输出目录
if not os.path.exists(output_dir):
    os.makedirs(output_dir)

ip_logs = {}
with jsonlines.open("dataset/CIC_IOT-dataset-2017/flowmeter_with_label.log", mode='r') as flowmeter_results:
    for flowmeter_row in tqdm(flowmeter_results):
        if len(np.array(json.loads(flowmeter_row['packet_timestamp_vector']))) < 10:
            continue
        src_ip = flowmeter_row['id.orig_h']
        des_ip = flowmeter_row['id.resp_h']
        if src_ip in cic_iot_2017_ip_list :
            src_name = cic_iot_2017_ip_list[src_ip]
        else :
            continue
        if des_ip in cic_iot_2017_ip_list :
            des_name = cic_iot_2017_ip_list[des_ip]
        else :
            continue
        flow_name = "" + src_name + " to " + des_name
        if flow_name not in ip_logs:
            ip_logs[flow_name] = []
        ip_logs[flow_name].append(flowmeter_row)
    flowmeter_results.close()

for flow_name, logs in ip_logs.items():
    ip_logs[flow_name].sort(key=lambda x: float(x['ts']))
    app_version = '0'
    group = []
    for flowrow in logs:
        if len(group) == 0 :
            group.append(flowrow)
        else :
            if (abs(eval(flowrow['ts']) - eval(group[0]['ts'])) < time_period) and len(group) < 1000 :
                group.append(flowrow)
            else :
                timestamp = min(log['ts'] for log in group)
                output_file = os.path.join(output_dir, flow_name, app_version, f'{timestamp}.pcap.json')
                os.makedirs(os.path.dirname(output_file), exist_ok=True)
                with open(output_file, 'w') as f:
                    f.write('[')
                    count_split = 0
                    for flowmeter_row in group:
                        packet_timestamp_vector_np = np.array(json.loads(flowmeter_row['packet_timestamp_vector']))
                        packet_direction_vector_np = 2 * np.array(json.loads(flowmeter_row["packet_direction_vector"].lower())) - 1
                        packet_payload_size_np = np.array(json.loads(flowmeter_row['packet_payload_size_vector'])) * packet_direction_vector_np
                        arrive_time_delta_np = packet_timestamp_vector_np - packet_timestamp_vector_np[0]
                        one_line = {"uid":flowmeter_row['uid'],
                            'start_timestamp':flowmeter_row['ts'],
                            'packet_length':packet_payload_size_np.tolist(),
                            'arrive_time_delta':arrive_time_delta_np.tolist(),
                            'id.orig_h':flowmeter_row['id.orig_h'],
                            'id.resp_h':flowmeter_row['id.resp_h'],
                            }
                        json_str = json.dumps(one_line)
                        f.write(json_str)
                        if count_split == len(group) - 1:
                            continue
                        else:
                            count_split += 1
                            f.write(',')
                    f.write(']')
                    f.close()
                    group = []