
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler, LabelEncoder
import numpy as np

from base import getEnvPath

path = getEnvPath()
# 设置数据文件路径
data_file = path + "data/dataset_train.csv"
# 读取CSV文件
df = pd.read_csv(data_file)  # 替换为你的文件路径
df = df.drop('Timestamp', axis=1)

# 2. 检查并处理无限大或过大的值
# 替换无限大为NaN，然后填充或删除
df.replace([np.inf, -np.inf], np.nan, inplace=True)

# 检查是否有NaN值
print(f"NaN值数量: {df.isna().sum().sum()}")

# 处理NaN值 - 这里我们选择删除包含NaN的行
df = df.dropna()

# 3. 分离特征和标签
X = df.drop('Label', axis=1)
y = df['Label']

# 4. 编码标签
label_encoder = LabelEncoder()
y_encoded = label_encoder.fit_transform(y)

# 5. 标准化特征数据
scaler = StandardScaler()

# 检查数据范围
print("数据统计信息:")
print(X.describe())

# 标准化前再次检查极值
print(f"最大值: {X.max().max()}")
print(f"最小值: {X.min().min()}")

# 标准化数据
try:
    X_scaled = scaler.fit_transform(X)
except ValueError as e:
    print(f"标准化错误: {e}")
    # 如果仍有问题，可以尝试RobustScaler
    from sklearn.preprocessing import RobustScaler
    print("尝试使用RobustScaler...")
    scaler = RobustScaler()
    X_scaled = scaler.fit_transform(X)

# 6. 分割数据集
X_train, X_temp, y_train, y_temp = train_test_split(
    X_scaled, y_encoded, test_size=0.3, random_state=42, stratify=y_encoded
)

X_val, X_test, y_val, y_test = train_test_split(
    X_temp, y_temp, test_size=0.5, random_state=42, stratify=y_temp
)

# 7. 保存数据集
feature_columns = X.columns.tolist()

pd.DataFrame(X_train, columns=feature_columns).to_csv(path + 'data/X_train.csv', index=False)
pd.DataFrame(X_val, columns=feature_columns).to_csv(path + 'data/X_val.csv', index=False)
pd.DataFrame(X_test, columns=feature_columns).to_csv(path + 'data/X_test.csv', index=False)

pd.Series(y_train).to_csv(path + 'data/y_train.csv', index=False, header=['Label'])
pd.Series(y_val).to_csv(path + 'data/y_val.csv', index=False, header=['Label'])
pd.Series(y_test).to_csv(path + 'data/y_test.csv', index=False, header=['Label'])

# 保存预处理对象
import joblib
joblib.dump(scaler, 'scaler.pkl')
joblib.dump(label_encoder, 'label_encoder.pkl')

print("数据处理完成，文件已保存！")
print(f"训练集大小: {X_train.shape[0]}")
print(f"验证集大小: {X_val.shape[0]}")
print(f"测试集大小: {X_test.shape[0]}")