import torch
from torch import nn
from d2l import torch as d2l
import sys
sys.path.append('E:\AI\DL')
from Train_ import train_func as tf
from SoftmaxRegression import data_generate as dg

batch_size = 256
num_workers = 4
train_iter,test_iter = dg.load_data_fashion_mnist(batch_size,num_workers)

num_inputs,num_hiddens,num_outputs = 784,256,10
W1 = torch.randn(num_inputs,num_hiddens,requires_grad=True)
b1 = torch.zeros(num_hiddens,requires_grad=True)
W2 = torch.randn(num_hiddens,num_outputs,requires_grad=True)
b2 = torch.zeros(num_outputs,requires_grad=True)
params = [W1,b1,W2,b2]


def Relu(X):
    a = torch.zeros_like(X)
    return torch.max(X,a)

def net(X,is_training=True):
    X = X.reshape((-1,num_inputs))
    H = Relu(X@W1 + b1)
    if is_training:
        H_d = dropout(H,0.2)
    else:
        H_d = H
    return (H_d@W2 + b2)

def dropout(X,p):
    assert 0<=p<=1
    if p == 1:
        return torch.zeros_like(X)
    if p == 0:
        return X
    mask = (torch.rand(X.shape)>p).float()
    #mask*X相比X[mask]更利好gpu
    return mask * X / (1 - p)


loss_func = nn.CrossEntropyLoss()

num_epochs = 10
lr = 0.1
trainer = torch.optim.SGD(params,lr=lr)
tf.train(net,train_iter,test_iter,loss_func,num_epochs,trainer)


