from torch.utils.data import Dataset
import torch
import pickle
from typing import Tuple
import os
import sys
from transformers import GPT2Tokenizer



# class ClipCocoDataset(Dataset):

#     def __len__(self) -> int:
#         return len(self.captions_tokens)

#     def pad_tokens(self, item: int):
#         tokens = self.captions_tokens[item]
#         padding = self.max_seq_len - tokens.shape[0]
#         if padding > 0:
#             tokens = torch.cat((tokens, torch.zeros(padding, dtype=torch.int64) - 1))
#             self.captions_tokens[item] = tokens
#         elif padding < 0:
#             tokens = tokens[:self.max_seq_len]
#             self.captions_tokens[item] = tokens
#         mask = tokens.ge(0)  # mask is zero where we out of sequence
#         tokens[~mask] = 0
#         mask = mask.float()
#         mask = torch.cat((torch.ones(self.prefix_length), mask), dim=0)  # adding prefix mask
#         return tokens, mask

#     def __getitem__(self, item: int) -> Tuple[torch.Tensor, ...]:
#         tokens, mask = self.pad_tokens(item)
#         prefix = self.prefixes[self.caption2embedding[item]]
#         if self.normalize_prefix:
    #             prefix = prefix.float()
    #             prefix = prefix / prefix.norm(2, -1)
#         return tokens, mask, prefix

#     def __init__(self, data_path: str,  prefix_length: int, gpt2_type: str = "gpt2",
#                  normalize_prefix=False):
#         self.tokenizer = GPT2Tokenizer.from_pretrained(gpt2_type)
#         self.prefix_length = prefix_length
#         self.normalize_prefix = normalize_prefix
#         with open(data_path, 'rb') as f:
#             all_data = pickle.load(f)
#         print("Data size is %0d" % len(all_data["clip_embedding"]))
#         sys.stdout.flush()
#         self.prefixes = all_data["clip_embedding"]
#         captions_raw = all_data["captions"]
#         self.image_ids = [caption["image_id"] for caption in captions_raw]
#         self.captions = [caption['caption'] for caption in captions_raw]
#         if os.path.isfile(f"{data_path[:-4]}_tokens.pkl"):
#             with open(f"{data_path[:-4]}_tokens.pkl", 'rb') as f:
#                 self.captions_tokens, self.caption2embedding, self.max_seq_len = pickle.load(f)
#         else:
#             self.captions_tokens = []
#             self.caption2embedding = []
#             max_seq_len = 0
#             for caption in captions_raw:
#                 self.captions_tokens.append(torch.tensor(self.tokenizer.encode(caption['caption']), dtype=torch.int64))
#                 self.caption2embedding.append(caption["clip_embedding"])
#                 max_seq_len = max(max_seq_len, self.captions_tokens[-1].shape[0])
#             # self.max_seq_len = max_seq_len
#             with open(f"{data_path[:-4]}_tokens.pkl", 'wb') as f:
#                 pickle.dump([self.captions_tokens, self.caption2embedding, max_seq_len], f)
#         all_len = torch.tensor([len(self.captions_tokens[i]) for i in range(len(self))]).float()
#         self.max_seq_len = min(int(all_len.mean() + all_len.std() * 10), int(all_len.max()))



class CocoDs(Dataset):
    def __init__(self, data_path, prefix_length, tokenizer_type='gpt2', 
                 normalize_prefix=False, device='mps'):
        with open(data_path, 'rb') as f:
            all_data = pickle.load(f)
        self.normalize_prefix = normalize_prefix
        self.prefix_length = prefix_length
        all_embeds = all_data['clip_embedding']
        all_captions = all_data['captions']
        self.prefix, self.tokens = [], []
        self.tokenizer = GPT2Tokenizer.from_pretrained(tokenizer_type)
        self.max_tokenlen = 0
        for i in range(len(all_captions)):
            one_caption = all_captions[i]
            self.prefix.append(all_embeds[one_caption['embed_id']])
            token = torch.tensor(self.tokenizer.encode(one_caption['caption']), dtype=torch.int64)
            self.tokens.append(token)
            # self.max_tokenlen = max(self.max_tokenlen, len(token))
        all_tokenlen = torch.tensor([len(token) for token in self.tokens]).float()
        self.max_tokenlen = min(int(all_tokenlen.mean() + all_tokenlen.std() * 10), int(all_tokenlen.max()))

    
    def __len__(self):
        return len(self.tokens)
    
    def __getitem__(self, index):
        token, mask = self.pad_seq(index)
        prefix = self.prefix[index]
        if self.normalize_prefix:
            prefix = prefix.float()
            prefix = prefix / prefix.norm(2, -1)
        return token, mask, prefix

    def pad_seq(self, index):
        token = self.tokens[index]
        padding_size = self.max_tokenlen - len(token)
        if padding_size > 0:
            token = torch.cat([token, torch.zeros(self.max_tokenlen - len(token)) - 1])
            self.tokens[index] = token
        elif padding_size < 0:
            token = token[:self.max_tokenlen]
            self.tokens = token
        mask = token.ge(0)
        token[~mask] = 0
        mask = mask.float()
        mask = torch.cat([torch.ones(self.prefix_length), mask], dim=0)
        return token, mask