import torch 
import torch.nn as nn 
import numpy as np 
import matplotlib.pyplot as plt 
import torchvision.transforms as transforms
import torchvision
from PIL import Image
from torchvision.transforms.functional import to_pil_image
from tqdm import tqdm 
import cv2 



class ImageEncoder(nn.Module):
    def __init__(self):
        super(ImageEncoder, self).__init__()
        model = torchvision.models.resnet152(pretrained=True)
        modules = list(model.children())[:-2]
        self.model = nn.Sequential(*modules)

    def forward(self, x):
        # out = self.pool(self.model(x))
        # out = torch.flatten(out, start_dim=2)
        # out = out.transpose(1, 2).contiguous()
        out = self.model(x)
        out = out.permute(0,2,3,1)
        out = out.view(-1,49,2048)
        return out  # BxNx2048



if __name__ == "__main__":
   
    device = torch.device("cuda:0")
    net = ImageEncoder().to(device)

    transform= transforms.Compose(
        [
            #transforms.ToPILImage(),
            transforms.Resize((224,224)),
            transforms.ToTensor(),
            transforms.Normalize([0.5,0.5,0.5],[0.5,0.5,0.5])
        ])

    path = "/mnt/ssd/Datasets_/t4sa/data_pro/train_all.npy"
    data = np.load(path,allow_pickle=True).tolist()
    
    for idx,item in tqdm(enumerate(data[219316:219317])):
        img_path = "/mnt/ssd/Datasets_/t4sa/"+item[1]
        img = Image.open(img_path).convert("RGB")
        #img = cv2.imread(img_path)
        #img = img[:,:,(2,1,0)]
        img_224 = transform(img).unsqueeze(0).to(device)
        out = net(img_224)
        torch.save(out.to("cpu"),"/mnt/ssd/Datasets_/t4sa/data_pro/img_2048_49/train/{}.pth".format(idx+219316))
        

    