import numpy as np
import csv
from time import *
from sklearn.preprocessing import StandardScaler

begin_time = time()                     # 读取文件开始时间
data_numerization = open("kddcup_data_numerization.txt")
lines = data_numerization.readlines()
line_nums = len(lines)
x_data = np.zeros((line_nums, 42))      # 创建line_nums行 para_num列的矩阵
for i in range(line_nums):
    line = lines[i].strip().split(',')
    x_data[i, :] = line[0:42]           # 获取42个特征
data_numerization.close()
print('数据集大小：',x_data.shape)

# 耗时分析
end_time = time()                      # 读取文件结束时间
total_time = end_time-begin_time       # 读取文件耗时
print('读取文件耗时：',total_time,'s')

# # 在上CPU，循环计算
# def Zscore_Normalization(x, n):
#     if np.std(x) == 0:
#         x_data[:, n] = 0
#     else:
#         i = 0
#         while i<len(x):
#             x_data[i][n] = (x[i] - np.mean(x)) / np.std(x)
#             i = i + 1
#     print("The ", n , "feature  is normalizing.")
#
# # # 在上CPU，并利用numpy的ndarray数组的广播机制做矩阵计算
# # def Zscore_Normalization(x, n):
# #     if np.std(x) == 0:
# #         x_data[:, n] = 0
# #     else:
# #         x_data[:, n] = (x - np.mean(x)) / np.std(x)
# #     print("The ", n , "feature  is normalizing.")
#
# begin_time = time()                     # 标准化开始时间
# for i in range(42):
#     Zscore_Normalization(x_data[:, i], i)
#
# # 耗时分析
# end_time = time()                      # 标准化结束时间
# total_time = end_time-begin_time       # 标准化耗时
# print('标准化耗时：',total_time,'s')

# 利用Sklearn库的StandardScaler实现数据标准化
begin_time = time()                               # 标准化开始时间
x_data = StandardScaler().fit_transform(x_data)   # 标准化，返回值为标准化后的数据

# 耗时分析
end_time = time()                                 # 标准化结束时间
total_time = end_time-begin_time                  # 标准化耗时
print('标准化耗时：',total_time,'s')


data_normalizing = open("kddcup_data_StandardScaler.txt",'w', newline='')
csv_writer = csv.writer(data_normalizing)
i = 0
while i<len(x_data[:, 0]):
    csv_writer.writerow(x_data[i, :])
    i = i + 1
data_normalizing.close()
print('数据标准化done！')

