import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import fetch_openml
mnist = fetch_openml('mnist_784', version=1, as_frame=False)
X, y = mnist.data, mnist.target.astype(np.uint8)
print("成功从sklearn加载数据集!")

# 划分训练集和测试集 (60000训练 + 10000测试)
print("划分训练集和测试集...")
X_train, X_test, y_train, y_test = train_test_split(
    X, y, test_size=10000/70000, random_state=42, shuffle=True
)

# 标准化处理
print("标准化数据...")
scaler = StandardScaler()
X_train_scaled = scaler.fit_transform(X_train.astype(np.float32))
X_test_scaled = scaler.transform(X_test.astype(np.float32))

# 保存数据集信息到文件
with open('数据集信息.txt', 'w', encoding='utf-8') as f:
    f.write("数据集信息:\n")
    f.write(f"训练集大小: {X_train_scaled.shape}\n")
    f.write(f"测试集大小: {X_test_scaled.shape}\n")
    f.write(f"类别数量: {len(np.unique(y))}\n")
    f.write(f"样本示例: 标签={y_train[0]}, 像素范围=[{X_train_scaled.min():.2f}, {X_train_scaled.max():.2f}]\n")
    f.write(f"\n测试集标签:\n{y_test.tolist()}\n")

# 保存处理后的数据以便后续使用
np.savez_compressed('mnist_processed.npz', 
                    X_train=X_train_scaled, 
                    X_test=X_test_scaled,
                    y_train=y_train,
                    y_test=y_test)
print("处理后的数据集已保存为 'mnist_processed.npz'")