import torch
from IPython import display
from d2l import torch as d2l
from data_generate import load_data_fashion_mnist
import sys
sys.path.append('E:\AI\DL')
from Train_ import train_func as tf
from torch import nn

lr = 0.1
batch_size = 256
num_workers = 4
train_iter,test_iter = load_data_fashion_mnist(batch_size,num_workers)

num_inputs = 784
num_outputs = 10
W = torch.normal(0,0.01,size=(num_inputs,num_outputs),requires_grad=True)
b = torch.zeros(num_outputs,requires_grad=True)
params = [W,b]

def softmax(X):
    X_exp = torch.exp(X)
    partition =  X_exp.sum(1,keepdim=True)
    return X_exp/partition

def net(X):
    z = torch.matmul(X.reshape((-1,W.shape[0])),W) + b
    return softmax(z)

def cross_entropy(y_hat,y):
    loss = -torch.log(y_hat[range(len(y_hat)),y])
    return loss
#返回的是每个样本的loss组成的向量

def trainer():
    with torch.no_grad():
        for param in params:
            param -= lr * param.grad / batch_size
            param.grad.zero_()

num_epochs = 10
tf.train(net,train_iter,test_iter,cross_entropy,num_epochs,trainer)


