import pandas as pd
from sklearn.utils import resample
from sklearn.model_selection import train_test_split
import inspect

mult = 5


def load_file(path):
    data = pd.read_csv(path, sep=',', low_memory=False)

    is_benign = data[' Label'] == 'BENIGN'
    flows_ok = data[is_benign]
    flows_ddos_full = data[~is_benign]

    sizeDownSample = len(flows_ok) * mult  # tamanho do set final de dados anomalos

    # downsample majority
    if (len(flows_ok) * mult) < (len(flows_ddos_full)):
        flows_ddos_reduced = resample(flows_ddos_full,
                                      replace=False,  # sample without replacement
                                      n_samples=sizeDownSample,  # match minority n
                                      random_state=27)  # reproducible results
    else:
        flows_ddos_reduced = flows_ddos_full

    return flows_ok, flows_ddos_reduced


def load_huge_file(path):
    df_chunk = pd.read_csv(path, chunksize=500000)

    chunk_list_ok = []  # append each chunk df here
    chunk_list_ddos = []

    # Each chunk is in df format
    for chunk in df_chunk:
        # perform data filtering
        is_benign = chunk[' Label'] == 'BENIGN'
        flows_ok = chunk[is_benign]
        flows_ddos_full = chunk[~is_benign]

        if (len(flows_ok) * mult) < (len(flows_ddos_full)):
            sizeDownSample = len(flows_ok) * mult  # tamanho do set final de dados anomalos

            # downsample majority
            flows_ddos_reduced = resample(flows_ddos_full,
                                          replace=False,  # sample without replacement
                                          n_samples=sizeDownSample,  # match minority n
                                          random_state=27)  # reproducible results
        else:
            flows_ddos_reduced = flows_ddos_full

        # Once the data filtering is done, append the chunk to list
        chunk_list_ok.append(flows_ok)
        chunk_list_ddos.append(flows_ddos_reduced)

    # concat the list into dataframe
    flows_ok = pd.concat(chunk_list_ok)
    flows_ddos = pd.concat(chunk_list_ddos)

    return flows_ok, flows_ddos


# def string2numeric_hash(text):
#     import hashlib
#     return int(hashlib.md5(text).hexdigest()[:8], 16)


def process_dataset(samples, setType):
    # Flows Packet/s e Bytes/s - Replace infinity by 0
    samples = samples.replace('Infinity', '0')
    samples = samples.replace([float('inf'), float('inf')], 0)
    # samples = samples.replace(np.inf,0)
    # samples = samples.replace('nan','0')
    samples[' Flow Packets/s'] = pd.to_numeric(samples[' Flow Packets/s'])

    samples['Flow Bytes/s'] = samples['Flow Bytes/s'].fillna(0)
    samples['Flow Bytes/s'] = pd.to_numeric(samples['Flow Bytes/s'])

    samples[' Label'] = samples[' Label'].replace({
        'BENIGN': '0',
        'DrDoS_DNS': '1',
    }).astype(int)

    # colunaTime = pd.DataFrame(samples[' Timestamp'].str.split(' ', 1).tolist(), columns = ['dia','horas'])
    # colunaTime = pd.DataFrame(colunaTime['horas'].str.split('.', 1).tolist(), columns = ['horas','milisec'])
    # stringHoras = pd.DataFrame(colunaTime['horas'].str.encode('utf-8'))
    # samples[' Timestamp'] = pd.DataFrame(stringHoras['horas'].apply(string2numeric_hash))#colunaTime['horas']
    # print('Timestamp[0]:', samples[' Timestamp'][0])
    samples[' Timestamp'] = pd.to_datetime(samples[' Timestamp'])
    samples[' Timestamp'] = samples[' Timestamp'].apply(lambda x: x.timestamp())
    # timestamp_parts = samples[' Timestamp'].str.extract(r'(?P<dia>.+?)\s(?P<horas>.+?)\.(?P<milisec>\d+)')
    # timestamp_parts['encoded_horas'] = timestamp_parts['horas'].apply(lambda x: x.encode('utf-8'))
    # samples[' Timestamp'] = timestamp_parts['encoded_horas'].apply(string2numeric_hash)
    # del timestamp_parts

    # flowID - IP origem - IP destino - Simillar HTTP -> Drop (individual flow analysis)
    del samples[' Source IP']
    del samples[' Destination IP']
    del samples['Flow ID']
    del samples['SimillarHTTP']
    del samples['Unnamed: 0']

    args = inspect.getfullargspec(process_dataset).args
    samples.to_csv(f'cicddos2019/01-12/{setType}_set_proc.csv', index=None, header=True)
    print(f'{setType} processed')


flows_ok, flows_ddos = load_file('cicddos2019/01-12/DrDoS_DNS.csv')
# split the dataset
train_ok, temp_ok = train_test_split(flows_ok, test_size=0.2, random_state=42)
val_ok, test_ok = train_test_split(temp_ok, test_size=0.5, random_state=42)

train_ddos, temp_ddos = train_test_split(flows_ddos, test_size=0.2, random_state=42)
val_ddos, test_ddos = train_test_split(temp_ddos, test_size=0.5, random_state=42)

train_set = pd.concat([train_ok, train_ddos], ignore_index=True)
val_set = pd.concat([val_ok, val_ddos], ignore_index=True)
test_set = pd.concat([test_ok, test_ddos], ignore_index=True)

train_set = train_set.sort_values(' Timestamp')
val_set = val_set.sort_values(' Timestamp')
test_set = test_set.sort_values(' Timestamp')

print(f"train set's size: {train_set.shape}")
print(f"valid set's size: {val_set.shape}")
print(f"test set's size: {test_set.shape}")

# samples = flows_ok.append(flows_ddos,ignore_index=True)
train_set.to_csv(r'cicddos2019/01-12/train_set.csv', index = None, header=True)
val_set.to_csv(r'cicddos2019/01-12/valid_set.csv', index = None, header=True)
test_set.to_csv(r'cicddos2019/01-12/test_set.csv', index = None, header=True)

del flows_ddos, flows_ok

process_dataset(train_set, 'train')
process_dataset(val_set, 'valid')
process_dataset(test_set, 'test')

