import torch
from torch import nn
from d2l import torch as d2l

# def relu(X):
#     a = torch.zeros_like(X)
#     return torch.max(X,a)
#
# def net(X):
#     X = X.reshape((-1,num_inputs))
#     H = relu(X @ W1 + b1)
#     return (H @ W2 + b2)
#
# loss = nn.CrossEntropyLoss()
#
# barch_size = 256
# train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size=barch_size)
#
# num_inputs, num_outputs ,num_hiddens = 784, 10 ,256
#
# W1 = nn.Parameter(torch.randn(num_inputs, num_hiddens, requires_grad=True))
# b1 = nn.Parameter(torch.zeros(num_hiddens,requires_grad=True))
# W2 = nn.Parameter(torch.randn(num_hiddens,num_outputs,requires_grad=True))
# b2 = nn.Parameter(torch.zeros(num_outputs,requires_grad=True))
#
# params = [W1, b1, W2, b2]
#
# num_epochs,lr = 10,0.01
# updater = torch.optim.SGD(params,lr)
# d2l.train_ch3(net, train_iter,test_iter,loss,num_epochs,updater=updater)


# 简洁实现

# net = nn.Sequential(nn.Flatten(),nn.Linear(784,256),nn.ReLU(),nn.Linear(256,10))

# 权重衰减
def init_params():
    w = torch.normal(0,1,size=(num_inputs,1),requires_grad=True)
    b = torch.zeros(1,requires_grad=True)
    return [w,b]

def l2_penalty(w):
    return torch.sum(w.pow(2))/2

def train(lambd):
    w ,b=init_params()
    net,loss=lambda X:d2l.linreg(X,w,b),d2l.squared_loss
    num_epochs , lr =100,0.003
    animator = d2l.Animator(xlabel='epochs',ylabel='loss',yscale='log')



n_train,n_test,num_inputs,batch_size = 20,100,200,5
true_w,true_b = torch.ones((num_inputs,1))*0.01,0.05
train_data = d2l.synthetic_data(true_w,true_b,n_train)
train_iter = d2l.load_array(train_data,batch_size)
test_data = d2l.synthetic_data(true_w,true_b,n_test)
test_iter = d2l.load_array(test_data,batch_size,is_train=False)

# Dropout

def dropout_layer(X,dropout):
    assert 0<=dropout<=1
    if dropout==1:
        return X
    elif dropout==0:
        return torch.zeros_like(X)
    mask = (torch.randn(X.shape)>dropout).__float__()
    return mask * X / (1.0 - dropout)
