import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder


def prepare_train_test_data(path="./", target_column="RT", features=["User ID", "Service ID"],
                            sparse_features=["User ID", "Service ID"], density=0.04,
                            seed=520):
    data = pd.read_csv(path + 'csv/tp_final.csv', sep='\t', usecols=features)
    # Label Encoding for sparse features,and do simple Transformation for dense features
    for feat in sparse_features:
        lbe = LabelEncoder()
        data[feat] = lbe.fit_transform(data[feat])

    train, test = train_test_split(data, train_size=density, random_state=seed)
    train.to_csv(path + "preprocessed_data/train_" + str(target_column) + "_" +
                 str(density) + "_" + str(seed) + ".csv", sep='\t', index=False)
    test.to_csv(path + "preprocessed_data/test_" + str(target_column) + "_" +
                 str(density) + "_" + str(seed) + ".csv", sep='\t', index=False)


if __name__ == '__main__':
    # 数据读取
    path = './dataset1/'
    target_column = "TP"
    features = ["User ID", "Service ID", "AS_x", "AS_y", "Service Provider",
                "Country_x", "Country_y", "coordinate_x", "coordinate_y", "IP Address_x",
                "IP Address_y", target_column]
    sparse_features = ["User ID", "Service ID", "AS_x", "AS_y", "Service Provider",
                       "Country_x", "Country_y", "coordinate_x", "coordinate_y", "IP Address_x",
                       "IP Address_y"]
    densities = [0.04, 0.08, 0.12, 0.16, 0.20]
    seed = 520
    for density in densities:
        print(f'当前处理数据训练集数据密度: {density: .2f}')
        prepare_train_test_data(path=path, target_column=target_column, features=features,
                                sparse_features=sparse_features, seed=seed,
                                density=density)



