#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
import torch as t
import torch.nn as nn
import torch.utils.data as Data
import torch.nn.functional as F


#导入训练数据集与测试数据集
train_data = np.load(r"./fashion-mnist/t10k-images.npy")
train_label = np.load(r"./fashion-mnist/t10k-labels.npy")
test_data = np.load(r"./fashion-mnist/train-images.npy")
test_label = np.load(r"./fashion-mnist/train-labels.npy")

train_data = t.from_numpy(train_data)
train_label = t.from_numpy(train_label).type(t.LongTensor)
test_data = t.from_numpy(test_data)
test_label = t.from_numpy(test_label).type(t.LongTensor)

def sclear(x):
    scl = StandardScaler().fit(x)
    x = scl.transform(x)
    x = t.from_numpy(x).type(t.FloatTensor)
    return x

#split data
LR = 0.02
BATCH_SIZE_TRAIN = 200
BATCH_SIZE_TEST = 100
EPOCH = 15

train_set = Data.TensorDataset(train_data, train_label)
test_set = Data.TensorDataset(test_data, test_label)

train_loader = Data.DataLoader(
        dataset = train_set,
        batch_size = BATCH_SIZE_TRAIN,
        shuffle=True,    #训练时是否随机打乱数据
        num_workers=4,
)
test_loader = Data.DataLoader(
        dataset = test_set,
        batch_size = BATCH_SIZE_TEST,
        shuffle=True,    #训练时是否随机打乱数据
        num_workers=4,
)


# 使用 Sequential 定义 4 层神经网络
Net = nn.Sequential(
    nn.Linear(784, 450),
    nn.ReLU(),
    nn.Linear(450, 300),
    nn.ReLU(),
    nn.Linear(300, 150),
    nn.ReLU(),
    nn.Linear(150, 10)
)

optimizer = t.optim.SGD(Net.parameters(), lr=LR, momentum=0.8) #使用Adam优化
loss_func = nn.CrossEntropyLoss()

#start training
losses = []  #train loss
acces = []   #train accuracy
eval_losses = []   #test loss
eval_acces = []    #test accuracy

for epoch in range(EPOCH): 
    train_loss = 0
    train_acc = 0
    Net.train()   #train model
    for batch_x, batch_y in train_loader: #批次训练
        batch_x = sclear(batch_x)
        out = Net(batch_x)
        
        loss = loss_func(out, batch_y) #计算误差
        optimizer.zero_grad()  #将参数的梯度更新为0
        loss.backward()
        optimizer.step()
        #loss
        train_loss += loss.data[0].item()
        #accuracy
        pred_y = t.max(F.softmax(out),1)[1]
        targt_y = batch_y
        num_correct = float((pred_y == targt_y).sum().data[0])
        acc = num_correct / batch_x.shape[0]
        train_acc += acc
        
    losses.append(train_loss / len(train_loader))
    acces.append(train_acc / len(train_loader))
    
    # 在测试集上检验效果
    eval_loss = 0
    eval_acc = 0
    Net.eval() # 将模型改为预测模式
    for batch_x, batch_y in test_loader: #批次训练
        batch_x = sclear(batch_x)
        out = Net(batch_x)
        loss = loss_func(out, batch_y) #计算误差
        #loss
        eval_loss += loss.data[0].item()
        #accuracy
        pred_y = t.max(F.softmax(out),1)[1]
        targt_y = batch_y
        num_correct = float((pred_y == targt_y).sum().data[0])
        acc = num_correct / batch_x.shape[0]
        eval_acc += acc
        
    eval_losses.append(eval_loss / len(test_loader))
    eval_acces.append(eval_acc / len(test_loader))
    
    print('epoch: {}, Train Loss: {:.6f}, Train Acc: {:.6f}, Eval Loss: {:.6f}, Eval Acc: {:.6f}'
          .format(epoch, train_loss / len(train_loader), train_acc / len(train_loader), 
                     eval_loss / len(test_loader), eval_acc / len(test_loader)))


#train loss curve
plt.figure(num=1)
plt.title('train loss')
plt.plot(np.arange(len(losses)), losses)
plt.scatter(np.arange(len(losses)), losses)
plt.show()

#train acc curve
plt.figure(num=2)
plt.title('train acc')
plt.plot(np.arange(len(acces)), acces)
plt.scatter(np.arange(len(acces)), acces)
plt.show()

#test loss curve
plt.figure(num=3)
plt.title('test loss')
plt.plot(np.arange(len(eval_losses)), eval_losses)
plt.scatter(np.arange(len(eval_losses)), eval_losses)
plt.show()

#test acc curve
plt.figure(num=4)
plt.title('test acc')
plt.plot(np.arange(len(eval_acces)), eval_acces)
plt.scatter(np.arange(len(eval_acces)), eval_acces)
plt.show()



t.save(Net,'report_Net.pkl')









