#encoding=utf-8
from os import path
import pandas as pd
from PIL import Image,ImageFile
import numpy as np
from torch import nn
import torch
import random
from torchvision import models, transforms
import torchvision
import pickle as pkl
from transformers import XLNetModel, XLNetTokenizer
from tqdm import tqdm
ImageFile.LOAD_TRUNCATED_IMAGES = True

class DataProcess():
    def __init__(self,tsv_path, img_path,div_data = 1) -> None:
        self.df_data = pd.read_csv(tsv_path,sep='\t', header=0, index_col='index')
        self.df_data = self.df_data[:int(self.df_data.shape[0]/div_data)]
        self.img_path = img_path
        self.vgg19 = models.vgg19(pretrained = True)
        self.vgg19.classifier = nn.Sequential(*list(self.vgg19.classifier.children())[:1])
        #print(self.vgg19)

        self.tokenizer = XLNetTokenizer.from_pretrained('xlnet/xlnet-base-cased')
        model = XLNetModel.from_pretrained('xlnet/xlnet-base-cased')
        self.model = model.to('cuda')

        self.image_transform = torchvision.transforms.Compose(
        [
            torchvision.transforms.Resize(size=(224, 224)),
            torchvision.transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        ]
    )

    def readImg(self,path):
        image = Image.open(path)
        if image.mode!="RGB":
            image = image.convert("RGB")

        #img = image.resize([232,232],Image.BILINEAR)
        #img_np = np.asarray(img)

        return self.image_transform(image)
    
    def getTextTokenize(self,words):
        # XLNET Model from Hugging Face
        

        input_ids = torch.tensor(self.tokenizer.encode(words,pad_to_max_length = True, truncation = True,max_length = 50)).unsqueeze(0)
        input_ids = input_ids.to('cuda')
        with torch.no_grad():
            tempEmbedding = self.model(input_ids)

        #print(tempEmbedding.last_hidden_state.shape)
        return tempEmbedding.last_hidden_state.squeeze().cpu().numpy()
        

    def getdata(self):
        text_data, img_data, label_data = [], [], []

        for idx, data in tqdm(self.df_data.iterrows()):
            text_ori = data.iloc[2]
            text_emb = self.getTextTokenize(text_ori)
            text_data.append(text_emb)

            img_path = path.join(self.img_path,data.iloc[1])
            img_npy = self.readImg(img_path).unsqueeze(0)
            #print(img_npy.shape)

            with torch.no_grad():
                img = self.vgg19(img_npy)
            img_data.append(img.squeeze().numpy())

            #label_tensor = torch.tensor([1 if i==data.iloc[0] else 0  for i in range(2)])
            label_tensor = data.iloc[0]
            label_data.append(label_tensor)

        return np.asarray(text_data), np.asarray(img_data), np.asarray(label_data)

if __name__=="__main__":
    dp = DataProcess("/root/autodl-tmp/fakeddit/test.tsv","/root/autodl-tmp/fakeddit/")
    text_data, img_data, label_data = dp.getdata()
    print(text_data.shape, img_data.shape,label_data.shape)