import torch
import skimage.io as io
import clip
from PIL import Image
import pickle
import json
import os
from tqdm import tqdm
import argparse
import sys
from torch.utils.data import Dataset, DataLoader

from typing import Tuple

from torchinfo import summary
from transformers import GPT2Tokenizer


# def main(clip_model_type: str):
#     # device = torch.device('cuda:0')
#     device = torch.device('mps')
#     clip_model_name = clip_model_type.replace('/', '_')
#     out_path = f"./data/coco/oscar_split_{clip_model_name}_train.pkl"
#     # preprocess：与模型对应的图像预处理函数，用于将输入图像转换为模型所需的格式。
#     clip_model, preprocess = clip.load(clip_model_type, device=device, jit=False)
#     print(summary(clip_model))
#     with open('./data/coco/annotations/train_caption.json', 'r') as f:
#         data = json.load(f)
#     print("%0d captions loaded from json " % len(data))
#     all_embeddings = []
#     all_captions = []
#     for i in tqdm(range(len(data))):
#         d = data[i]
#         img_id = d["image_id"]
#         filename = f"./data/coco/train2014/COCO_train2014_{int(img_id):012d}.jpg"
#         if not os.path.isfile(filename):
#             filename = f"./data/coco/val2014/COCO_val2014_{int(img_id):012d}.jpg"
#         image = io.imread(filename)
#         image = preprocess(Image.fromarray(image)).unsqueeze(0).to(device)
#         with torch.no_grad():
#             prefix = clip_model.encode_image(image).cpu()
#         d["clip_embedding"] = i
#         all_embeddings.append(prefix)
#         all_captions.append(d)
#         if (i + 1) % 10000 == 0:
#             with open(out_path, 'wb') as f:
#                 pickle.dump({"clip_embedding": torch.cat(all_embeddings, dim=0), "captions": all_captions}, f)

#     with open(out_path, 'wb') as f:
#         pickle.dump({"clip_embedding": torch.cat(all_embeddings, dim=0), "captions": all_captions}, f)

#     print('Done')
#     print("%0d embeddings saved " % len(all_embeddings))
#     return 0


# if __name__ == '__main__':
#     parser = argparse.ArgumentParser()
#     parser.add_argument('--clip_model_type', default="ViT-B/32", choices=('RN50', 'RN101', 'RN50x4', 'ViT-B/32'))
#     args = parser.parse_args()
#     exit(main(args.clip_model_type))

def get_image_clip_embed(args):
    device = args.device
    clip_model_name = args.clip_model_type.replace('/', '-')
    out_file = f'./data/coco/oscar_split_{clip_model_name}_train.pkl'
    model, preprocess = clip.load(args.clip_model_type, device=device, jit=False)
    with open('./data/coco/annotations/train_caption.json', 'r') as f:
        caption = json.load(f)

    print(f'{len(caption)} data are readed!')
    all_embeds = []
    all_captions = []
    for i in tqdm(range(len(caption))):
        one_caption = caption[i]
        image_id = one_caption['image_id']
        one_caption['embed_id'] = i
        file_name = f'./data/coco/train2014/COCO_train2014_{int(image_id):012d}.jpg'
        if not os.path.isfile(file_name):
            continue
            file_name = f'./data/coco/val2014/COCO_val2014_{int(image_id):012d}.jpg'
        image = io.imread(file_name)
        image = preprocess(Image.fromarray(image)).unsqueeze(0).to(device)
        with torch.no_grad():
            embed = model.encode_image(image).cpu()
        all_embeds.append(embed)
        all_captions.append(one_caption)
        """但是这个分段存储其实没必要，因为是进行覆盖式的存储，每次都要全部存储一遍"""
        # 防止程序意外退出导致之前的数据丢失
        if (i + 1) % 10000 == 0:
            with open(out_file, 'wb') as f:
                pickle.dump({"clip_embedding": torch.cat(all_embeds, dim=0), "captions": all_captions}, f)
            # break
    with open(out_file, 'wb') as f:
        pickle.dump({"clip_embedding": torch.cat(all_embeds, dim=0), "captions": all_captions}, f)
    
    print(f'{len(all_captions)} Embedding done!')



class CocoDs(Dataset):
    def __init__(self, tokenizer_type='gpt2', device='mps'):
        with open('./data/coco/oscar_split_ViT-b-32_train.pkl', 'rb') as f:
            all_data = pickle.load(f)
        all_embeds = all_data['clip_embedding']
        all_captions = all_data['captions']
        self.prefix, self.tokens = [], []
        self.tokenizer = GPT2Tokenizer.from_pretrained(tokenizer_type)
        self.max_tokenlen = 0
        for i in range(len(all_captions)):
            one_caption = all_captions[i]
            self.prefix.append(all_embeds[one_caption['embed_id']])
            token = torch.tensor(self.tokenizer.encode(one_caption['caption']), dtype=torch.int64)
            self.tokens.append(token)
            # self.max_tokenlen = max(self.max_tokenlen, len(token))
        all_tokenlen = torch.tensor([len(token) for token in self.tokens]).float()
        self.max_tokenlen = min(int(all_tokenlen.mean() + all_tokenlen.std() * 10), int(all_tokenlen.max()))

    
    def __len__(self):
        return len(self.tokens)
    
    def __getitem__(self, index):
        token, mask = self.pad_seq(index)
        prefix = self.prefix[index]
        return token, mask, prefix

    def pad_seq(self, index):
        token = self.tokens[index]
        token = torch.cat([token, torch.zeros(self.max_tokenlen - len(token)) - 1])
        mask = token.ge(0)
        return token, mask


class ClipCocoDataset(Dataset):

    def __len__(self) -> int:
        return len(self.captions_tokens)

    def pad_tokens(self, item: int):
        tokens = self.captions_tokens[item]
        padding = self.max_seq_len - tokens.shape[0]
        if padding > 0:
            tokens = torch.cat((tokens, torch.zeros(padding, dtype=torch.int64) - 1))
            self.captions_tokens[item] = tokens
        elif padding < 0:
            tokens = tokens[:self.max_seq_len]
            self.captions_tokens[item] = tokens
        mask = tokens.ge(0)  # mask is zero where we out of sequence
        tokens[~mask] = 0
        mask = mask.float()
        mask = torch.cat((torch.ones(self.prefix_length), mask), dim=0)  # adding prefix mask
        return tokens, mask

    def __getitem__(self, item: int) -> Tuple[torch.Tensor, ...]:
        tokens, mask = self.pad_tokens(item)
        prefix = self.prefixes[self.caption2embedding[item]]
        if self.normalize_prefix:
            prefix = prefix.float()
            prefix = prefix / prefix.norm(2, -1)
        return tokens, mask, prefix

    def __init__(self, data_path: str,  prefix_length: int, gpt2_type: str = "gpt2",
                 normalize_prefix=False):
        self.tokenizer = GPT2Tokenizer.from_pretrained(gpt2_type)
        self.prefix_length = prefix_length
        self.normalize_prefix = normalize_prefix
        with open(data_path, 'rb') as f:
            all_data = pickle.load(f)
        print("Data size is %0d" % len(all_data["clip_embedding"]))
        sys.stdout.flush()
        self.prefixes = all_data["clip_embedding"]
        captions_raw = all_data["captions"]
        self.image_ids = [caption["image_id"] for caption in captions_raw]
        self.captions = [caption['caption'] for caption in captions_raw]
        if os.path.isfile(f"{data_path[:-4]}_tokens.pkl"):
            with open(f"{data_path[:-4]}_tokens.pkl", 'rb') as f:
                self.captions_tokens, self.caption2embedding, self.max_seq_len = pickle.load(f)
        else:
            self.captions_tokens = []
            self.caption2embedding = []
            max_seq_len = 0
            for caption in captions_raw:
                self.captions_tokens.append(torch.tensor(self.tokenizer.encode(caption['caption']), dtype=torch.int64))
                self.caption2embedding.append(caption["embed_id"])
                max_seq_len = max(max_seq_len, self.captions_tokens[-1].shape[0])
            # self.max_seq_len = max_seq_len
            with open(f"{data_path[:-4]}_tokens.pkl", 'wb') as f:
                pickle.dump([self.captions_tokens, self.caption2embedding, max_seq_len], f)
        """为了避免极端情况的出现， 比如： 有几个很长的tokens序列, 
        其他都是正常的， 那么这个token的最大长度就要按照大多数的情况来计算"""
        all_len = torch.tensor([len(self.captions_tokens[i]) for i in range(len(self))]).float()
        self.max_seq_len = min(int(all_len.mean() + all_len.std() * 10), int(all_len.max()))



if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--clip_model_type', type=str, default='ViT-B/32', choices=('RN50', 'RN101', 'RN50x4', 'ViT-B/32'))
    parser.add_argument('--device', default='cpu', choices=('cpu', 'cuda', 'mps'))
    args = parser.parse_args()
    # if torch.cuda.is_available():
    #     args.device = 'cuda'
    # elif torch.backends.mps.is_available():
    #     args.device = 'mps'
    # else:
    #     args.device = 'cpu'
    get_image_clip_embed(args)


    
    # dataset1 = CocoDs()
    # for i, (tokens, mask, prefix) in enumerate(dataset1):
    #     print(prefix.shape)
    #     print(mask.shape)
    #     print(tokens.shape)
    #     break
    # dataset = ClipCocoDataset('./data/coco/oscar_split_ViT-B-32_train.pkl', prefix_length=10)
    # for i, (tokens, mask, prefix) in enumerate(dataset):
    #     print(prefix.shape)
    #     print(mask.shape)
    #     print(tokens.shape)
    #     break



