import matplotlib.pyplot as plt
import torch
import numpy as np
from sklearn.datasets import load_digits
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler  # 通常使用StandardScaler处理图像数据

# (1)数据处理
# ①读取Digits数据集
digits = load_digits()
data = digits.data
target = digits.target

# ②标准化数值（虽然对于图像数据，标准化不是必需的，但可以做为预处理步骤）
scaler = StandardScaler()
data = scaler.fit_transform(data)

# ③数据切分处理
train_x, test_x, train_y, test_y = train_test_split(data, target, test_size=0.2, random_state=42)

# 将数据转换为Tensor
train_x = torch.Tensor(train_x)
test_x = torch.Tensor(test_x)
train_y = torch.LongTensor(train_y)
test_y = torch.LongTensor(test_y)

# (2)创建多层感知机模型
# 注意：输出层需要10个神经元来对应0-9的数字
model = torch.nn.Sequential(
    torch.nn.Linear(in_features=64, out_features=128),  # Digits数据集有64个特征
    torch.nn.ReLU(),
    torch.nn.Linear(in_features=128, out_features=10),  # 修改为输出10个类别
)

loss_fn = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(params=model.parameters(), lr=0.01)

# (3)模型训练
# 训练200次（可以根据需要调整）
loss_list = []
for epoch in range(200):  # 可以根据训练情况调整迭代次数
    # ①初始化梯度
    optimizer.zero_grad()

    # ②前向传播
    output = model(train_x)
    loss = loss_fn(output, train_y)
    loss_list.append(loss.item())

    # ③反向传播
    loss.backward()

    # ④更新权重
    optimizer.step()

    # ⑤每10次打印损失值和准确率
    if epoch % 10 == 0:
        print(f'Epoch {epoch}, Loss: {loss.item()}')

        # 计算测试集上的准确率
        with torch.no_grad():
            test_output = model(test_x)
            predicted = torch.argmax(test_output, dim=1)
            accuracy = (predicted == test_y).float().mean().item()
            print(f'Epoch {epoch}, Accuracy: {accuracy}')

        # (4)绘制损失值曲线
plt.plot(loss_list)
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.title('Training Loss')
plt.show()
