'''
This file attempts to process raw pcap dataset like CrossPlatform (China android) 
'''

from data_process import open_dataset_deal, dataset_generation
import os
import shutil
import tqdm
import scapy.all as scapy
import random
import json
import numpy as np
import csv
from sklearn.model_selection import StratifiedShuffleSplit

random.seed(40)

def split_one_cap(pcap_path, pcap_file_path, pcap_label):
    '''
    split a large pcap file into packets and save them in directory "[pcap_path]\splitcap\[label]\"
    
    input:
        pcap_path: root dir for all pcaps
        pcap_file_path: the path of the .pcap file
        pcap_label: the label (or the parent directory) of the file
    '''
    output_path = os.path.join(pcap_path, "splitcap", pcap_label)
    if not os.path.exists(output_path):
        os.mkdir(output_path)
    command = f"SplitCap.exe -r {pcap_file_path} -s packets 1 -o {output_path}"
    os.system(command)

def split_all_caps(pcap_path):
    '''
    split all .pcap under pcap_path to packets
    '''
    print("split_all_caps...")
    for parent, dirs, files in os.walk(pcap_path):
        if not os.path.exists(os.path.join(pcap_path, "splitcap")):
            os.mkdir(os.path.join(pcap_path, "splitcap"))
        for dir in tqdm.tqdm(dirs):
            for parent_, dirs_, files_ in os.walk(os.path.join(parent, dir)):
                for file in files_:
                    split_one_cap(pcap_path, os.path.join(parent_, file), dir)
                break
        break
    print("split_all_caps done!")

def convert_pcapng_2_pcap(path):
    '''
    convert one pcapng file into pcap and remove the original file
    '''
    tmp_name = path.replace('.pcap', '.pcapng')
    shutil.move(path, tmp_name)
    command = f"editcap.exe -F pcap {tmp_name} {path}"
    try:
        os.system(command)
    except Exception as e:
        print(e)
    finally:
        os.remove(tmp_name)

def all_into_pcap(pcap_path):
    '''
    convert all files possibly to be a pcapng to pcap files
    
    input:
        pcap_path: root path for all pcaps
    '''
    print("all_into_pcap...")
    for parent, dirs, files in os.walk(pcap_path):
        for dir in tqdm.tqdm(dirs):
            for parent_, dirs_, files_ in os.walk(os.path.join(parent, dir)):
                for file in files_:
                    convert_pcapng_2_pcap(os.path.join(parent_, file))
                break
        break
    print("all_into_pcap done!")

def qualified(p):
    '''
    return True if the packet is not too small or a DHCP, ICMP etc.
    '''
    if p.haslayer(scapy.DHCP) or p.haslayer(scapy.ICMP) or p.haslayer(scapy.ARP) or p.haslayer(scapy.DNS) or p.haslayer(scapy.DHCP6) or p.haslayer(scapy.LLMNRResponse) or p.haslayer(scapy.LLMNRQuery) or p.haslayer(scapy.NTP):
        # print("unqualified layer")
        return False
    if p.haslayer(scapy.TCP) and p.len < 140:
        # print("TCP too small")
        return False
    if p.haslayer(scapy.UDP) and p.len < 100:
        # print("UDP too small")
        return False
    return True

def split_and_filter_one(pcap_path, file_path, max_sample, label):
    '''
    split one pcap file into at most max_sample qualified packets
    
    input:
        pcap_path: root path for all pcaps
        file_path: the packet path
        max_sample: max number of packets to split into
        label: label of the packet
    '''
    output_path = os.path.join(pcap_path, "splitcap", label)
    if not os.path.exists(output_path):
        os.mkdir(output_path)
    pkts = scapy.rdpcap(file_path, count=50000)
    unselected = list(range(len(pkts)))
    sampled = 0
    while sampled < max_sample and len(unselected) > 0:
        idx = random.randint(0, len(unselected) - 1)
        packet = pkts[unselected[idx]]
        if qualified(packet):
            scapy.wrpcap(os.path.join(output_path, label + '_' + str(sampled + 1) + '.pcap'), packet)
            sampled += 1
        unselected.remove(unselected[idx])
        

def split_and_filter(pcap_path, max_sample):
    '''
    split pcap file into at most max_sample qualified packets. No too small packet.
    No DHCP, ICMP etc.
    
    input:
        pcap_path: root path for all pcaps
        max_sample: max number of packets sampled from a pcap file
    '''
    print("split and filter...")
    for parent, dirs, files in os.walk(pcap_path):
        if not os.path.exists(os.path.join(pcap_path, "splitcap")):
            os.mkdir(os.path.join(pcap_path, "splitcap"))
        for dir in tqdm.tqdm(dirs):
            for parent_, dirs_, files_ in os.walk(os.path.join(parent, dir)):
                for file in files_:
                    split_and_filter_one(pcap_path, os.path.join(parent_, file), max_sample, dir)
                break
        break
    print("split and filter done!")

def remove_tiny_pcap(pcap_path):
    '''
    remove the pcap file whose size is less than 1M
    
    input:
        pcap_path: root path for all pcaps
    '''
    print("remove_tiny_pcap...")
    for parent, dirs, files in os.walk(pcap_path):
        for file in files:
            file_path = os.path.join(parent, file)
            file_size = os.path.getsize(file_path)
            if file_size < 1000000:
                os.remove(file_path)
                print(f"remove {file_path}, size = {file_size / 1000}KB")
        break
    print("remove_tiny_pcap done")

def list_packets(pcap_path):
    '''
    check the packet number for each label to see if it meets the sample number
    
    input:
        pcap_path: root path for all pcaps
    
    return:
        packet_number: { label: number }. A dict to list the packet number for each label
    '''
    print("list_packets...")
    packet_number_dict = {}
    for parent, dirs, files in os.walk(os.path.join(pcap_path, 'splitcap')):
        for dir in dirs:
            for parent_, dirs_, files_ in os.walk(os.path.join(parent, dir)):
                packet_number_dict[dir] = len(files_)
        break
    print("list_packets done!")
    return packet_number_dict

def filter_pcap(pcap_path):
    '''
    remove too small pcap files
    
    input:
        pcap_path: root path for all pcaps
    '''
    print("filter_pcap...")
    for parent, dirs, files in os.walk(os.path.join(pcap_path, 'splitcap')):
        for dir in tqdm.tqdm(dirs):
            for parent_, dirs_, files_ in os.walk(os.path.join(parent, dir)):
                for file in tqdm.tqdm(files_):
                    current_file = os.path.join(parent_, file)
                    if not os.path.getsize(current_file):
                        os.remove(current_file)
                        print("current pcap %s is 0KB and is deleted"%current_file)
                        continue
                    current_packet = scapy.rdpcap(current_file, count=50000)
                    file_size = float(dataset_generation.size_format(os.path.getsize(current_file)))
                    try:
                        if 'TCP' in str(current_packet.res):
                            if file_size < 0.14:
                                os.remove(current_file)
                                # print("remove TCP sample: %s for its size is less than 0.14KB." % (
                                #             current_file))
                        elif 'UDP' in str(current_packet.res):
                            if file_size < 0.1:
                                os.remove(current_file)
                                # print("remove UDP sample: %s for its size is less than 0.1KB." % (
                                #             current_file))
                    except Exception as e:
                        print("error in data_generation 611: scapy read pcap and analyse error")
                        os.remove(current_file)
                        # print("remove packet sample: %s for reading error." % (current_file))
            break
        break
    print("filter_pcap done!")

def models_deal(x_payload_train, x_payload_test, x_payload_valid, y_train, y_test, y_valid, tsv_path):
    def write_dataset_tsv(data,label,file_dir,type):
        dataset_file = [["label", "text_a"]]
        for index in range(len(label)):
            dataset_file.append([label[index], data[index]])
        with open(os.path.join(file_dir, type + "_dataset.tsv"), 'w',newline='') as f:
            tsv_w = csv.writer(f, delimiter='\t')
            tsv_w.writerows(dataset_file)
        return 0
    
    print("Begin to write tsv...")
    
    save_dir = tsv_path
    write_dataset_tsv(x_payload_train, y_train, save_dir, "train")
    write_dataset_tsv(x_payload_test, y_test, save_dir, "test")
    write_dataset_tsv(x_payload_valid, y_valid, save_dir, "valid")
    print("finish generating pre-train's datagram dataset.\nPlease check in %s" % save_dir)


def generate_dataset(pcap_path,
               samples,
               dataset_save_path):
    '''
    generate dataset for pcaps
    
    input:
        pcap_path: root path for all pcaps
        samples: a dict of sample numbers for every label
        dataset_save_path: the path to save datasets
    '''
    print("generate_dataset_json...")
    pcap_path = os.path.join(pcap_path, 'splitcap')

    dataset = {}

    label_name_list = [] # label id -> label name
    label_id = {} # label name -> label id
    label_pcap_path = {} # directory path for each label

    for parent, dirs, files in os.walk(pcap_path):
        label_name_list.extend(dirs)
        for dir in dirs:
            label_pcap_path[dir] = os.path.join(parent, dir)
        break

    for idx, name in enumerate(label_name_list):
        label_id[name] = idx

    r_file_record = []
    print("\nBegin to generate features.")

    for key in tqdm.tqdm(label_pcap_path.keys()):
        # key is a label name
        if label_id[key] not in dataset:
            dataset[label_id[key]] = {
                "samples": 0,
                "payload": {}
            }

        target_all_files = [os.path.join(x[0], y) for x in [(p, f) for p, d, f in os.walk(label_pcap_path[key])] for y in x[1]]
        r_files = random.sample(target_all_files, samples[key])
        for r_f in r_files:
            feature_data = dataset_generation.get_feature_packet(r_f, payload_len=128, header_to_remove=('IP', 'TCP_PORT'))
            if feature_data == -1:
                continue
            dataset[label_id[key]]["samples"] += 1
            if len(dataset[label_id[key]]["payload"].keys()) > 0:
                dataset[label_id[key]]["payload"][str(dataset[label_id[key]]["samples"])] = \
                    feature_data[0]
            else:
                dataset[label_id[key]]["payload"]["1"] = feature_data[0]
            r_file_record.append((label_id[key], dataset[label_id[key]]["samples"], r_f))

    all_data_number = 0
    for index in range(len(label_name_list)):
        print("%s\t%s\t%d" %
              (label_id[label_name_list[index]], label_name_list[index],
               dataset[label_id[label_name_list[index]]]["samples"]))
        all_data_number += dataset[label_id[label_name_list[index]]]["samples"]
    print("all\t%d" % (all_data_number))

    if not os.path.exists(dataset_save_path):
        os.makedirs(dataset_save_path)
        print(f"create dir: {dataset_save_path}")
    with open(os.path.join(dataset_save_path, "chosen_files.txt"), "w", encoding='utf-8') as p_f:
        for id, sample_id, file in r_file_record:
            p_f.write(str(id)+"\t"+str(sample_id)+"\t"+file)
            p_f.write("\n")
    with open(os.path.join(dataset_save_path, "label_info.txt"),
              "w",
              encoding='utf-8') as p_f:
        p_f.write('idx' + '\t' + 'name' + '\t' + 'number' + '\n')
        for idx, (name, num) in enumerate(samples.items()):
            p_f.write(str(idx) + '\t' + name + '\t' + str(num) + '\n')
    with open(os.path.join(dataset_save_path, "dataset.json"), "w") as f:
        json.dump(dataset, fp=f, ensure_ascii=False, indent=4)
    
    X,Y = dataset_generation.obtain_data(
        pcap_path=None, samples=[samples[label_name_list[id]] for id in dataset.keys()], features=["payload"], dataset_save_path=None, json_data=dataset
    )
    
    print("generate npy...")
    X_payload= []
    Y_all = []
    for app_label in Y:
        for label in app_label:
            Y_all.append(int(label))

    for index_label in range(len(X[0])):
        for index_sample in range(len(X[0][index_label])):
            X_payload.append(X[0][index_label][index_sample])

    split_1 = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=41) 
    split_2 = StratifiedShuffleSplit(n_splits=1, test_size=0.5, random_state=42) 

    x_payload = np.array(X_payload)
    dataset_label = np.array(Y_all)

    x_payload_train = []
    y_train = []

    x_payload_valid = []
    y_valid = []

    x_payload_test = []
    y_test = []

    for train_index, test_index in split_1.split(x_payload, dataset_label):
        x_payload_train, y_train = \
            x_payload[train_index], \
            dataset_label[train_index]
        x_payload_test,y_test = \
            x_payload[test_index], \
            dataset_label[test_index]
    for test_index, valid_index in split_2.split(x_payload_test, y_test):
        x_payload_valid, y_valid = \
            x_payload_test[valid_index], y_test[valid_index]
        x_payload_test, y_test = \
            x_payload_test[test_index], y_test[test_index]

    if not os.path.exists(os.path.join(dataset_save_path, "npy")):
        os.mkdir(os.path.join(dataset_save_path, "npy"))

    output_x_payload_train = os.path.join(dataset_save_path + "npy/", 'x_datagram_train.npy')

    output_x_payload_test = os.path.join(dataset_save_path + "npy/", 'x_datagram_test.npy')

    output_x_payload_valid = os.path.join(dataset_save_path + "npy/", 'x_datagram_valid.npy')

    output_y_train = os.path.join(dataset_save_path, "npy/",'y_train.npy')
    output_y_test = os.path.join(dataset_save_path, "npy/", 'y_test.npy')
    output_y_valid = os.path.join(dataset_save_path, "npy/", 'y_valid.npy')

    np.save(output_x_payload_train, x_payload_train)
    np.save(output_x_payload_test, x_payload_test)
    np.save(output_x_payload_valid, x_payload_valid)

    np.save(output_y_train, y_train)
    np.save(output_y_test, y_test)
    np.save(output_y_valid, y_valid)

    if not os.path.exists(os.path.join(dataset_save_path, 'tsv')):
        os.mkdir(os.path.join(dataset_save_path, 'tsv'))
    models_deal(x_payload_train, x_payload_test, x_payload_valid,
                                       y_train, y_test, y_valid, tsv_path=os.path.join(dataset_save_path, 'tsv'))

    print("generate_dataset done!")

def clean_one_pcap(source_file):
    '''
    change the source file's name to *_dirty.pcap
    clean the pcap file
    remove the old file
    '''
    new_source_file = source_file.replace('.pcap', '_dirty.pcap')
    target_file = source_file
    shutil.move(source_file, new_source_file)
    clean_protocols = '"not arp and not dns and not stun and not dhcpv6 and not icmpv6 and not icmp and not dhcp and not llmnr and not nbns and not ntp and not igmp and frame.len > 80"'
    cmd = "tshark.exe -F pcap -r %s -Y %s -w %s"
    command = cmd % (new_source_file, clean_protocols, target_file)
    try:
        os.system(command)
    except Exception as e:
        print(e)
    finally:
        os.remove(new_source_file)

def clean_pcap(pcap_path):
    '''
    remove DHCP, ARP etc.
    
    input:
        pcap_path: root path for all pcaps
    '''
    print("clean_pcap...")
    for parent, dirs, files in os.walk(pcap_path):
        for dir in tqdm.tqdm(dirs):
            for parent_, dirs_, files_ in os.walk(os.path.join(parent, dir)):
                for file in files_:
                    clean_one_pcap(os.path.join(parent_, file))
                break
        break
    print("clean_pcap done!")

if __name__ == '__main__':
    # # CrossPlatform android china
    # pcap_path = "/home/spa/traffic_cls/resources/CrossPlatform-pcap/CP-android-china/"
    # dataset_save_path = "/home/spa/traffic_cls/resources/CrossPlatform/android/china/"
    
    # # CrossPlatform ios us
    pcap_path = "/home/spa/traffic_cls/resources/CrossPlatform-pcap/CP-ios-us/"
    dataset_save_path = "/home/spa/traffic_cls/resources/CrossPlatform/ios/us/"
    
    # # ISCX-Tor
    # pcap_path = "/home/spa/traffic_cls/resources/ISCX-Tor-pcap/"
    # dataset_save_path = "/home/spa/traffic_cls/resources/ISCX-Tor/"
    
    MAX_SAMPLE = 5000
    samples = {} # the number of picked samples per label in the dataset
    label_num = None # the number of labels in this dataset

    # choose whether to perform these operations
    check_num = True # please turn on this to get correct samples
    remove_tiny = False
    file2dir = False
    split_filter = False
    gen_dataset = True

    # remove tiny pcaps
    if remove_tiny:
        remove_tiny_pcap(pcap_path)

    # file to dir
    if file2dir:
        open_dataset_deal.dataset_file2dir(pcap_path)

    # split qualified pcaps into packets
    if split_filter:
        split_and_filter(pcap_path, MAX_SAMPLE)

    # check the packet number for each label to see if it meets the sample number
    if check_num:
        packet_info = list_packets(pcap_path)
        for label, num in packet_info.items():
            samples[label] = min(MAX_SAMPLE, num)
        print("sample number for each label:")
        print(json.dumps(samples, sort_keys=False, indent=4))

    # genearte datatet
    if gen_dataset:
        generate_dataset(pcap_path, samples, dataset_save_path)
