import os
import pickle
import random

import nltk
import numpy as np
import pandas as pd
import torch
from torch import nn
from torch.utils import data
from torchvision import transforms
from transformers import BertTokenizer, BertModel

from mymodel import bert_model_path
# from mymodel import bert_model_path, model_config
from arguments import getparser
from vocab import deserialize_vocab

tokenizer = BertTokenizer.from_pretrained(bert_model_path)  # 通过词典导入分词器


# class ImgDataUnique(data.Dataset):
#     def __init__(self, opts, split, vocab=None, transform=None):
#         super().__init__()
#         self.dataset_name = opts.data_name
#         self.path = opts.data_path
#         self.split = split
#         # self.vocab = vocab  # 语料库
#         # self.origin_txt = []  # 唯一的文本
#         # self.tokenized_txt = []  # 唯一的token化的文本
#         self.l_map = None  # 标签,pandas
#
#     def __getitem__(self, index):
#         pass
#
#     def __len__(self):
#         pass


class MyDataset(data.Dataset):
    def __init__(self, opts, split, vocab=None, transform=None, train=True, ):
        self.dataset_name = opts.data_name
        self.path = opts.data_path
        self.bs = opts.batch_size
        self.train = train
        self.split = split
        self.vocab = vocab  # 语料库

        self.origin_txt = []  # 唯一的文本
        self.tokenized_txt = []  # 唯一的token化的文本
        self.l_map = None  # 标签,pandas

        cap_txt = os.path.join(self.path, f'{self.split}_caps.txt')
        label_csv = os.path.join(self.path, f'{self.split}_map.csv')

        with open(cap_txt, 'r', encoding='utf8') as f:
            for line in f:
                self.origin_txt.append(line.strip().lower())
        self.l_map = pd.read_csv(label_csv, dtype=np.int32).values

        self.img_size = np.unique(self.l_map[:, 0]).shape[0]
        self.min_img_id = np.min(self.l_map[:, 0])
        self.max_img_id = np.max(self.l_map[:, 0])

        self.txt_size = np.unique(self.l_map[:, 3]).shape[0]
        self.min_txt_id = np.min(self.l_map[:, 3])
        self.max_txt_id = np.max(self.l_map[:, 3])
        #######################新加的faster rcnn
        self.precomp_imgpath = os.path.join(self.path, f'{self.split}_ims.npy')
        self.precomp_img = np.load(self.precomp_imgpath)

        ###########################BERT

        if transform is None:
            self.transforms = transforms.Compose([
                transforms.Resize([224, 224]),  # 将输入图片resize成统一尺寸
                transforms.ToTensor(),
                transforms.Normalize(  # 标准化处理-->转换为标准正太分布（高斯分布），使模型更容易收敛
                    mean=[0.485, 0.456, 0.406],
                    std=[0.229, 0.224, 0.225])  # 其中 mean=[0.485,0.456,0.406]与std=[0.229,0.224,0.225] 从数据集中随机抽样计算得到的。
            ])

    def __getitem__(self, ind):

        # row = self.l_map[ind]
        img_id = self.l_map[ind][1]
        txt_id = self.l_map[ind][2]
        tab_img_id = self.l_map[ind][0]
        tab_txt_id = self.l_map[ind][3]
        # img = os.path.join(self.path, self.split, f'{str(img_id)}.jpg')
        origin_t = self.origin_txt[tab_txt_id]
        target = process_caption(self.vocab, origin_t, self.train)

        # cut_sentences = nltk.word_tokenize(origin_t)
        # RNN
        # caption_tokens = self.tokenizer.basic_tokenizer.tokenize(caption)
        # 使用bert token 符号化
        # tokenized = self.tokenizer(origin_t)
        # # tokenized = self.tokenizer.encode()
        # input_ids = tokenized['input_ids']
        # txt = torch.tensor(input_ids)
        # tokenized_txt = self.tokenized_txt[txt_id]
        # tokenized = tokenizer(txt)
        # input_ids = tokenized['input_ids']
        # caption = torch.tensor(input_ids, dtype=torch.float32)

        # img = Image.open(img).convert('RGB')
        # img = self.transforms(img)

        img = torch.from_numpy(self.precomp_img[tab_img_id])
        if self.train:  # Size augmentation for region feature
            num_features = img.shape[0]
            rand_list = np.random.rand(num_features)
            img = img[np.where(rand_list > 0.20)]
        return img, target, ind, int(tab_img_id), int(tab_txt_id)

    def __len__(self):
        return len(self.l_map)


def process_caption(vocab, caption, drop=False):
    if not drop:
        tokens = nltk.tokenize.word_tokenize(caption.lower())
        caption = list()
        caption.append(vocab('<start>'))
        caption.extend([vocab(token) for token in tokens])
        caption.append(vocab('<end>'))
        target = torch.Tensor(caption)
        return target
    else:
        # Convert caption (string) to word ids.
        tokens = ['<start>', ]
        tokens.extend(nltk.tokenize.word_tokenize(caption.lower()))
        tokens.append('<end>')
        deleted_idx = []
        for i, token in enumerate(tokens):
            prob = random.random()
            if prob < 0.20:
                prob /= 0.20
                # 50% randomly change token to mask token
                if prob < 0.5:
                    tokens[i] = vocab.word2idx['<mask>']
                # 10% randomly change token to random token
                elif prob < 0.6:
                    tokens[i] = random.randrange(len(vocab))
                # 40% randomly remove the token
                else:
                    tokens[i] = vocab(token)
                    deleted_idx.append(i)
            else:
                tokens[i] = vocab(token)
        if len(deleted_idx) != 0:
            tokens = [tokens[i] for i in range(len(tokens)) if i not in deleted_idx]
        target = torch.Tensor(tokens)
        return target


class Collate:
    def __init__(self, ):
        pass

    def __call__(self, d):
        img, txt, index, tab_img_id, tab_txt_id = zip(*d)
        # 整合图像区域
        img_len = [len(image) for image in img]
        all_images = torch.zeros(len(img), max(img_len), img[0].size(-1))
        for i, image in enumerate(img):
            end = img_len[i]
            all_images[i, :end] = image[:end]
        img_len = torch.Tensor(img_len)

        # 整合文本区域
        txt_len = [len(cap) for cap in txt]
        input_ids = torch.zeros(len(txt), max(txt_len)).long()
        for i, cap in enumerate(txt):
            end = txt_len[i]
            input_ids[i, :end] = cap[:end]

        return (all_images, img_len,
                input_ids, txt_len,
                index, tab_img_id, tab_txt_id)


# def collate_fn(d):
#     img, txt, index, tab_img_id, tab_txt_id = zip(*d)
#     # img = torch.stack(img, 0)
#     # bert
#     # to = tokenizer.batch_encode_plus(batch_text_or_text_pairs=txt, padding=True, max_length=100, truncation=True,
#     #                                  return_tensors="pt", return_length=True)
#     # input_ids = to['input_ids']
#     # lengths = to['length']
#     # RNN
#     # 整合图像区域
#     img_len = [len(image) for image in img]
#     all_images = torch.zeros(len(img), max(img_len), img[0].size(-1))
#     for i, image in enumerate(img):
#         end = img_len[i]
#         all_images[i, :end] = image[:end]
#     img_len = torch.Tensor(img_len)
#     # 整合文本区域
#     txt_len = [len(cap) for cap in txt]
#     error_cap = []
#     for i, cap in enumerate(txt):
#         pos_tag = nltk.pos_tag(cap)
#         NN = [s1 for (s1, s2) in pos_tag if s2 in ['NN', 'NNP', 'NNPS', 'NNS']]
#
#         cap_index = cap.index(random.choice(NN))
#         cap[cap_index] = random.choice(nn_list)
#         error_cap.append(cap)
#
#     input_ids = torch.zeros(len(txt), max(txt_len)).long()
#     for i, cap in enumerate(txt):
#         end = txt_len[i]
#         cap = ca
#         cap_tensor = torch.tensor(cap)
#         input_ids[i, :end] = cap_tensor[:end]
#     # tab_img_id = torch.stack(tab_img_id, 0)
#     # tab_txt_id = torch.stack(tab_txt_id, 0)
#     return all_images, img_len, input_ids, txt_len, index, tab_img_id, tab_txt_id


def get_tokenized_alltxt(labeltext, vocab):
    captions_token = []
    for index in range(len(labeltext)):
        caption = labeltext[index]
        # Convert caption (string) to word ids.
        tokens = nltk.word_tokenize(caption.lower())
        caption = []
        caption.append(vocab("<start>"))
        caption.extend([vocab(token) for token in tokens])
        caption.append(vocab("<end>"))
        captions_token.append(caption)

    return captions_token


def get_tokenized_txt(one_line, vocab):
    # Convert caption (string) to word ids.
    tokens = one_line
    # print(tokens)
    caption = [vocab("<start>")]
    caption.extend([vocab(token) for token in tokens])
    caption.append(vocab("<end>"))

    return caption


def get_dataloader():
    pass


# class MyTrain(nn.Module):
#     def __init__(self):
#         super().__init__()
#         self.fc = nn.Linear(768, 512)
#         self.bert = BertModel.from_pretrained(bert_model_path, config=model_config)
#
#     def forward(self, x, attention_mask, lengths):
#         with torch.no_grad():
#             out = self.bert(input_ids=x, attention_mask=attention_mask)
#         return out.last_hidden_state


if __name__ == '__main__':
    opts = getparser()
    # d = MyDataset(opts, 'val', deserialize_vocab(os.path.join(opts.vocab_path, f'{opts.data_name}.json')))
    # print(len(d))
    # i = random.randint(0, len(d))
    # img, txt, index, tab_img_id, tab_txt_id = d[i]
    # # img.show()

    # model = MyTrain()
    # origin_txt = []
    # with open(r'D:\AAExp\Example\used_for_test\val_caps.txt', 'r', encoding='utf8') as f:
    #     for line in f:
    #         origin_txt.append(line.strip().lower())
    # print(origin_txt)
    # l = tokenizer.batch_encode_plus(batch_text_or_text_pairs=origin_txt, padding=True, truncation=True,
    #                                 max_length=100,
    #                                 return_tensors='pt', return_length=True)
    #
    # ids_ = l['input_ids']
    # lengths_ = l['length']
    # attention_mask = l['attention_mask']
    # model1 = model(ids_, attention_mask, lengths_)
    # print(l)

    # token = get_tokenizer()
    # print(txt)
    # # print(t)
    vocab = deserialize_vocab(os.path.join(opts.vocab_path, f'{opts.data_name}.json'))
    txt = get_tokenized_txt('i love all the people.', vocab)
    print(txt)
