import torch
from d2l import torch as d2l
from torch import nn
def init_weights4_3_1(m):
    if type(m) == nn.Linear:
        nn.init.normal_(m.weight, std=0.01)

def mytrain4_3_1(lr=0.1,num_hiddens=256,ac='ReLU',show=1,assertshow=1):
    #print(ac)
    net = nn.Sequential(nn.Flatten(),nn.Linear(784, num_hiddens))
    if ac=="Sigmoid":
        net.add_module('Sigmoid',nn.Sigmoid())
    elif ac=="Tanh":
        net.add_module('Tanh',nn.Tanh())
    else:
        net.add_module('ReLU',nn.ReLU())
    net.add_module('Linear',nn.Linear(num_hiddens, 10))
    net.apply(init_weights4_3_1);
    batch_size,  num_epochs = 256, 10
    loss = nn.CrossEntropyLoss(reduction='none')
    trainer = torch.optim.SGD(net.parameters(), lr=lr)
    train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)
    if show==1:
        animator = d2l.Animator(xlabel=f'epoch {ac} lr ={float(lr):f} num_hiddens={num_hiddens}',ylabel='Y', xlim=[1, num_epochs], ylim=[0.3, 0.9],
                        legend=['train loss', 'train acc', 'test acc'])
    for epoch in range(num_epochs):
        train_metrics = d2l.train_epoch_ch3(net, train_iter, loss,  trainer)
        test_acc = d2l.evaluate_accuracy(net, test_iter)
        if show==1:
            animator.add(epoch + 1, train_metrics + (test_acc,))
    train_loss, train_acc = train_metrics
    if assertshow==1:
        assert train_loss < 0.5, train_loss
        assert train_acc <= 1 and train_acc > 0.7, train_acc
        assert test_acc <= 1 and test_acc > 0.7, test_acc
    return train_loss,train_acc,test_acc
result=[]
for num_hiddens in [512,256,128,64,32,16]:
    animator2 = d2l.Animator(xlabel=f'lr num_hiddens={num_hiddens}', ylabel='Y',xlim=[1e-4,10], xscale='log',ylim=[0.3, 0.9],
                        legend=['train loss', 'train acc', 'test acc'])
    for lr in np.logspace(-4,2,5):
        train_loss,train_acc,test_acc=mytrain4_3_1(lr=lr,num_hiddens=num_hiddens,show=0,assertshow=0)
        animator2.add(lr, (train_loss,train_acc,test_acc))
        result.append([num_hiddens, lr, test_acc,train_acc,train_loss])
resultmax=sorted(result, key=lambda x: x[2])[-1]
print(f' max batch_size:{256},num_epochs:{10},num_hiddens:{resultmax[0]},lr:{resultmax[1]},test_acc:{resultmax[2]},train_acc:{resultmax[3]},train_loss:{resultmax[4]}')