import torch
import torchvision.transforms as transforms
# from PIL import Image
import os
import numpy as np
from torch.utils.data import Dataset,DataLoader
#my own
from torch_dataset import GameDataset
from sklearn.decomposition import PCA
from tqdm import tqdm

IMG_WIDTH, IMG_HEIGHT=128,128
IMG_FOLDER = r'./datasets/imgs'
IMG_WIDTH, IMG_HEIGHT=128,128


dataset_transform = transforms.Compose([
          #
        transforms.Resize((IMG_WIDTH, IMG_HEIGHT)),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])


def save_each_img_feature(train_dataset,model,imgs_all_path):
    import PIL
    # 设置图片缓存的总大小，为原来的100倍 ,否则图片多了会报错
    PIL.PngImagePlugin.MAX_TEXT_MEMORY = 6710886400
    print("PIL.PngImagePlugin.MAX_TEXT_MEMORY", PIL.PngImagePlugin.MAX_TEXT_MEMORY)
    for (i, (Image)) in enumerate(tqdm(train_dataset)):

        testEncode = model(Image)
        feature=testEncode.cpu().detach().numpy()
        feature=feature.flatten()
        if i==0:
            print("feature.shape",feature.shape)
        save_feature(feature,imgs_all_path[i],outdir_name='features')

def save_feature(feature,img_in_path,outdir_name='features'):
    dirname,filename=os.path.split(img_in_path)
    out_dir=os.path.join(dirname,outdir_name)
    if not os.path.exists(out_dir):
        os.makedirs(out_dir)
    save_path=out_dir+"/"+filename.replace(".jpg", "")
    print(save_path)
    np.save(save_path, feature)



if __name__=="__main__":

    rn50w2 = torch.hub.load('facebookresearch/swav', 'resnet50w2').cuda()
    # rn50w4 = torch.hub.load('facebookresearch/swav', 'resnet50w4')
    # rn50w5 = torch.hub.load('facebookresearch/swav', 'resnet50w5')
    # 读取数据

    game_data = GameDataset(IMG_FOLDER,num_img=-1, transform=dataset_transform)
    imgs_all_path=game_data.img_paths
    train_dataloader = DataLoader(game_data, batch_size=1, shuffle=False)
    train_features = next(iter(train_dataloader))
    print(f"Feature batch shape: {train_features.size()}")
    # 推理
    rn50w2.eval()


    ###################
    save_each_img_feature(train_dataloader,rn50w2,imgs_all_path)
