import torch
import logging
from torch.nn import CrossEntropyLoss
from torch.autograd import Variable
from torch.optim import Adam
from cat_dog_torch.settings import *
from cat_dog_torch.model import AlexNet
from cat_dog_torch.loaders import train_dataloader

logging.basicConfig(level=logging.INFO)

def start_train():
    model = AlexNet().cuda()
    model.train()
    logging.info('train start')
    # 损失函数
    criterion = CrossEntropyLoss()
    loss_list = []
    # adam 优化器
    optimizer = Adam(model.parameters(), lr = RATE)
    logging.info('Iteration is %s' % len(train_dataloader))

    for epoch in range(EPOCHS):
        for i, (image, label) in enumerate(train_dataloader):
            images = Variable(image).cuda()
            labels = Variable(label).cuda()
            predict_labels = model(images)
            print(label[0])
            # print(predict_labels[0])

            loss = criterion(predict_labels, labels)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            loss_list.append(loss.item())

            i += 1

            if i % 100 == 0:
                logging.info("epoch: {}, step: {}, loss: {}".format(epoch + 1, i, loss.item()))

    torch.save(model.state_dict(), MODEL_NAME)
    logging.info('训练完成')


if __name__ == '__main__':
    start_train()
