import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
import time

# 1. 简化导入和版本打印
print(f"numpy version: {np.__version__}")
print(f"pandas version: {pd.__version__}")
print(f"scikit-learn version: {pd.__version__}")

# 2. 使用pandas高效读取数据
start_time = time.time()
df = pd.read_csv("kddcup_data_StandardScaler.txt", header=None)

# 3. 优化特征选择 - 直接选择所需列
feature_idx = [3, 4, 5, 6, 8, 10, 13, 23, 24, 37]
data_feature = df.iloc[:, feature_idx].values
data_labels = df.iloc[:, -1].values

print(f"数据加载耗时: {time.time() - start_time:.2f}秒")
print(f"数据集特征大小: {data_feature.shape}")
print(f"数据集标签大小: {data_labels.shape}")

# 4. 仅对特征进行标准化，标签保持不变（分类问题不需要标准化标签）
scaler = StandardScaler()
data_feature = scaler.fit_transform(data_feature)

# 5. 标签处理优化 - 转换为整数类型
data_labels = data_labels.astype(int)

# 6. 数据分割优化 - 明确变量命名
train_feature, test_feature, train_label, test_label = train_test_split(
    data_feature, data_labels,
    test_size=0.4,
    random_state=42,  # 使用更常用的随机种子
    stratify=data_labels  # 保持类别分布一致
)

# 7. 使用f-string格式化输出
print(f"训练集特征大小: {train_feature.shape}, 训练集标签大小: {train_label.shape}")
print(f"测试集特征大小: {test_feature.shape}, 测试集标签大小: {test_label.shape}")
print(f"类别分布 - 训练集: {np.bincount(train_label)}, 测试集: {np.bincount(test_label)}")