def train_model(model, dataloaders, criterion, optimizer, num_epoches):
    best_acc = 0
    best_model_para = model.state_dict()
    val_acc_history = []

    
    for epoch in range(num_epoches):
        print('Epoch {}/{}'.format(epoch, num_epoches))
        print('-' * 10)

        for phase in ['train','valid']:
            if phase == 'train':
                model.train()
            if phase == 'valid':
                model.eval()
            
            running_loss = 0
            running_correct = 0

            for _, (inputs, labels) in enumerate(dataloaders['train']):
                inputs, labels = inputs.to(device), labels.to(device)
                optimizer.zero_grad()

                with torch.set_grad_enabled(phase == 'train'):

                    outputs = model(inputs)
                    loss = criterion(outputs, labels)

                     , preds = torch.max(outputs, 1)

                    if phase == 'train':
                        loss.backward()
                        optimizer.step()
                    
                running_loss += loss.item()*inputs.size[0]
                running_correct += torch.sum(preds == labels.data)
        
            epoch_loss = running_loss / len(dataloaders[phase])
            epoch_acc = running_correct / len(dataloaders[phase].dataset)

            print('{} Loss: {:.4f} Acc: {:.4f}'.format(phase, epoch_loss, epoch_acc))

            if phase == 'valid' and epoch_acc > best_acc:
                best_acc = epoch_acc
                best_model_para = model.state_dict()
            if phase == 'valid':
                val_acc_history.append(epoch_acc)


    print('Best val Acc: {:4f}'.format(best_acc))

    model.load_state_dict(best_model_para)

    return model, val_acc_history