import time
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torchvision.transforms as transforms
from torch import nn
from torch.nn.utils.rnn import pack_padded_sequence
from modelAttention import Encoder, DecoderWithAttention
from datasets import *
from utils import adjust_learning_rate, save_checkpoint_att, AverageMeter,clip_gradient,accuracy
from nltk.translate.bleu_score import corpus_bleu
from modelAttention import Encoder,Encoder_features


data_folder = './data'  # folder with data files saved by create_input_files.py
data_name = 'coco_5_cap_per_img_5_min_word_freq'  # base name shared by data files
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

fine_tune_encoder = False
encoder = Encoder().to(device)
encoder.fine_tune(fine_tune_encoder)

val_loader_images = CaptionDataset_att(data_folder, data_name, 'VAL', transform=transforms.Compose([normalize]))
val_loader_images = torch.utils.data.DataLoader(val_loader_images, batch_size=50, shuffle=False, num_workers=0, pin_memory=True)

val_loader_features = CaptionDataset_att_features(data_folder, data_name, 'VAL', transform=transforms.Compose([normalize]))
val_loader_features = torch.utils.data.DataLoader(val_loader_features, batch_size=50, shuffle=False, num_workers=0, pin_memory=True)


for i, (images, captions, caplens,*other) in enumerate(val_loader_images):
                images = images.to(device)
                captions = captions.to(device)
                caplens = caplens.to(device)
                images = encoder(images)
                print("images:",images)
