import torch
from transformers import BertTokenizer
from transformers import GPT2Tokenizer
import random

#
# https://huggingface.co/gpt2
"""
from transformers import GPT2Tokenizer, GPT2Model
tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
model = GPT2Model.from_pretrained('gpt2')
text = "Replace me by any text you'd like."
encoded_input = tokenizer(text, return_tensors='pt')
output = model(**encoded_input)
"""
bert_tokenizer = BertTokenizer.from_pretrained("code/models/bert")
gpt2_tokenizer = GPT2Tokenizer.from_pretrained("code/models/gpt2")
word2id_bert = bert_tokenizer.get_vocab()
word2id_gpt2 = gpt2_tokenizer.get_vocab()

id2word_bert = {word2id_bert[key]: key for key in word2id_bert}
id2word_gpt2 = {word2id_gpt2[key]: key for key in word2id_gpt2}


def make_error_data(encoded_input):
    lst = list(encoded_input["input_ids"][0])
    position = {i: lst[i] for i in range(len(lst))}
    new_lst = []
    not_move = []
    for j in position:
        if position[j] < 999:
            not_move.append(j)
            pass
        else:
            new_lst.append(j)
    random.shuffle(new_lst)
    new_item_list = []
    for i in range(len(lst)):
        if i in not_move:
            new_item_list.append(i)
        else:
            new_item_list.append(-i)
    for j in range(len(new_item_list)):
        if new_item_list[j] < 0:
            new_item_list[j] = new_lst[-(new_item_list[j] + 1)]

    new_res = list([position[i].item() for i in new_item_list])
    _word_list = [id2word_bert[i] for i in new_res]
    new_dict = {}
    new_dict["input_ids"] = torch.Tensor(new_res).unsqueeze(0)
    new_dict["token_type_ids"] = encoded_input["token_type_ids"]
    new_dict["attention_mask"] = encoded_input["attention_mask"]
    new_dict["word_list"] = _word_list
    new_dict["label"] = 0

    return new_dict


# print(id2word_bert)
# 999之前都带中括号

def encode_by_gpt2(tokenizer=gpt2_tokenizer, word2id=word2id_gpt2, id2word=id2word_gpt2,
                   text="Replace me by any text you'd like."):
    encoded_input = gpt2_tokenizer(text, return_tensors='pt')
    vocab_list = [id2word[id] for id in encoded_input["input_ids"].tolist()[0]]
    return encoded_input, vocab_list


def encode_by_bert(tokenizer=bert_tokenizer, word2id=word2id_bert, id2word=id2word_bert,
                   text="Replace me by any text you'd like."):
    # 将 text内容进行分词处理
    encoded_input = tokenizer(text, return_tensors='pt')
    vocab_list = [id2word[id] for id in encoded_input["input_ids"].tolist()[0]]
    #  以下是错误的分词方法，打乱数据并做测试用。
    encoded_input.update({
        "word_list": vocab_list,
        "label": 1
    })
    return encoded_input


def make_single_dataset(text):
    datas = []
    right_data = encode_by_bert(bert_tokenizer, word2id_bert, id2word_bert, text)
    datas.append(right_data)
    for i in range(9):
        err_data = make_error_data(right_data)
        datas.append(err_data)
    return datas



#
# lst = [101, 5672, 2033, 2011, 2151, 3793, 2017, 1005, 1040, 2066, 1012, 102]
