import h5py
import torch
from torch.autograd import Variable
from torch.utils.data import DataLoader, Dataset
import os
import pickle
import numpy as np
import torchvision.models as models
from utils import *
from datasets import *
import torchvision.transforms as transforms
from torch import nn
from tqdm import tqdm
from modelAttention import Encoder

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

class FeatureExtractor(nn.Module):
    def __init__(self):
        super(FeatureExtractor, self).__init__()
        resnet = models.resnet152(pretrained=True)  # 使用预训练的ResNet-152

        # 移除最后的全连接层和平均池化层
        modules = list(resnet.children())[:-2]
        self.resnet = nn.Sequential(*modules)
        for p in self.resnet.parameters():
            p.requires_grad = False

    def forward(self, images):
        """
        前向传播

        :param images: 图像张量，维度为 (batch_size, 3, image_size, image_size)
        :return: 编码后的图像特征
        """

        features = self.resnet(images)  # 提取特征

        return features

def main(args):
    data_folder = './data'  # folder with data files saved by create_input_files.py
    data_name = 'coco_5_cap_per_img_5_min_word_freq'  # base name shared by data files
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])
    model = FeatureExtractor().to(device)
    model.eval()
    # Load the dataset
    full_val_dataset = CaptionDataset_att_onlyimg(data_folder, data_name, 'TRAIN', transform=transforms.Compose([normalize]))
    print("len(full_val_dataset):",len(full_val_dataset))

    # Create DataLoader
    data_loader = torch.utils.data.DataLoader(full_val_dataset, batch_size=200, shuffle=False, num_workers=0, pin_memory=True)

    # Open an HDF5 file to save features
    feature_path = os.path.join("data", "features_test.hdf5")
    with h5py.File(feature_path, 'w') as h5f:
        # Create a dataset in the file with unlimited maxshape
        h5f.create_dataset('image_features', shape=(0, 2048, 8, 8), maxshape=(None, 2048, 8, 8), dtype='float32')

        with tqdm(total=len(data_loader), desc=f"Epoch 1/1") as pbar:
            for i, (images) in enumerate(data_loader):
                images = images.to(device)
                output = model(images)
                output_np = output.cpu().numpy()

                # Resize the dataset to accommodate the additional data
                h5f['image_features'].resize((h5f['image_features'].shape[0] + output_np.shape[0]), axis=0)
                h5f['image_features'][-output_np.shape[0]:] = output_np  # Append new data
                pbar.update(1)


if __name__ == "__main__":
    import argparse
    parser = argparse.ArgumentParser()
    parser.add_argument("--pretrained", type=str, help="vgg/resnet")
    parser.add_argument("--batch_size", type=int)
    args = parser.parse_args()
    main(args)