import torch

from torch.utils.data import DataLoader, Dataset
from tqdm import tqdm

from .model.test_n_1 import Seq2SeqModel
from .model.roberta_model_0 import BertConfig
from .tokenizer import load_chinese_base_vocab, Tokenizer
import io
from lattice.utils_ import Trie, get_skip_path
from Data.lattice.TestLattice import load_cival_rules_rich_pretrain_word_list

w_trie = Trie()
def get_w_tire():
    w_list = load_cival_rules_rich_pretrain_word_list("Data/lattice/toumu.txt",
                                                      _refresh=False,
                                                      _cache_fp='cache/{}'.format("rules_lattice")
                                                      )
    for w in w_list:  # 构建词典树
        w_trie.insert(w)

def create_dataset():
    get_w_tire()
    input_result = []
    input_result_0 = []
    output_result = []
    target = io.open("./Data/6000_data_result/question_test", encoding='UTF-8')
    source = io.open("./Data/6000_data_result/source_test", encoding='UTF-8')
    for scr, tar in zip(source, target):
        scr = scr.replace("\n", "")

        tar = tar.replace("\n", "")
        if len(scr) + len(tar) > 100:
            continue
        input_result_0.append(scr)
        lexicons = get_skip_path(scr, w_trie)
        for i in range(len(lexicons) - 1, 0, -1):
            for j in range(i - 1, -1, -1):
                if lexicons[i][2] == lexicons[j][2]:
                    del lexicons[i]
                    break
        tempLexicons = list(map(lambda x: x[2], lexicons))
        lexicons.append(tempLexicons)
        lexicons.insert(0, scr)
        input_result.append(lexicons)

        output_result.append(tar)
    print(len(input_result))

    # with open("tar_0_padding_99_7.9条", "a", encoding='UTF-8') as f:
    #     for i in range(len(output_result)):
    #         f.write(output_result[i] + "\n")
    # with open("scr_0_padding_99_7.9条", "a", encoding='UTF-8') as f:
    #     for i in range(len(input_result_0)):
    #         f.write(input_result_0[i] + "\n")
    return input_result, output_result



class PoemDataset(Dataset):
    """
    针对特定数据集，定义一个相关的取数据的方式
    """

    def __init__(self):
        ## 一般init函数是加载所有数据
        super(PoemDataset, self).__init__()
        # 读原始数据
        self.sents_src, self.sents_tgt = create_dataset()
        self.word2idx = load_chinese_base_vocab()
        self.idx2word = {k: v for v, k in self.word2idx.items()}
        self.tokenizer = Tokenizer(self.word2idx)
        self.pidding_idx = 0
        # print(self.sents_src[:3])

    def deal(self, i):
        src = []
        l = len(self.sents_src[i][-1])
        len_position = self.tokenizer._tokenize(self.sents_src[i][0])

        relation_position_s = [self.pidding_idx]+list(range(1, len(len_position)+1))
        relation_position_e = [self.pidding_idx]+list(range(1, len(len_position)+1))
        pre_start = 0
        start = 0
        for index in range(len(self.sents_src[i]) - 1):
            temp = self.sents_src[i][index]
            if index == 0:
                src.append(temp)
                continue
            isAppend = 0
            for k in temp[2]:
                flat = len(temp[2])
                if k == temp[2][0]:
                    try:
                        if pre_start == 0 or pre_start < 0:
                            start = len_position.index(k)+1
                        else:
                            start = len_position.index(k, pre_start)+1
                    except:
                        break
                    relation_position_s.append(start)
                    relation_position_e.append(self.pidding_idx)
                if k == temp[2][-1]:
                    try:
                        if pre_start == 0 or pre_start < 0:
                            end = len_position.index(k)+1
                        else:
                            end = len_position.index(k, pre_start)+1
                            if end - start != flat - 1:
                                isAppend = 1
                                for m in range(len(temp[2]) - 1):
                                    relation_position_s.pop(-1)
                                    relation_position_e.pop(-1)
                                break
                        pre_start = end - 2
                    except:
                        for m in range(len(temp[2])-1):
                            relation_position_s.pop(-1)
                            relation_position_e.pop(-1)
                        break
                    relation_position_e.append(end)
                    relation_position_s.append(self.pidding_idx)
                elif k != temp[2][0] and k != temp[2][-1]:
                    relation_position_s.append(self.pidding_idx)
                    relation_position_e.append(self.pidding_idx)
            if isAppend == 0:
                src.append(temp[2])
        src = ''.join(src)
        print(len(src))

        return src, relation_position_s, relation_position_e, l

    def __getitem__(self, i):
        ## 得到单个数据
        src, relation_position_s, relation_position_e, lex_num = self.deal(i)
        token_ids, token_type_ids = self.tokenizer.encode(src)

        output = {
            "token_ids": token_ids,
            "token_type_ids": token_type_ids,
            "relation_position_s": relation_position_s,
            "relation_position_e": relation_position_e,
            "lex_num": lex_num,
            }
        return output

    def __len__(self):

        return len(self.sents_src)

def collate_fn(batch):
    """
    动态padding， batch为一部分sample
    """

    def padding(indice, max_length, pad_idx=0):
        """
        pad 函数
        注意 token type id 右侧pad是添加1而不是0，1表示属于句子B
        """
        pad_indice = [item + [pad_idx] * max(0, max_length - len(item)) for item in indice]
        return torch.tensor(pad_indice)

    token_ids = [data["token_ids"] for data in batch]
    max_length = max([len(t) for t in token_ids])
    token_type_ids = [data["token_type_ids"] for data in batch]
    relation_position_s = [data["relation_position_s"] for data in batch]
    relation_position_e = [data["relation_position_e"] for data in batch]
    lex_num = [data["lex_num"] for data in batch]
    padding_idx = 0

    token_ids_padded = padding(token_ids, max_length)
    token_type_ids_padded = padding(token_type_ids, max_length, pad_idx=1)
    relation_position_s_padded = padding(relation_position_s, max_length, pad_idx=padding_idx)
    relation_position_e_padded = padding(relation_position_e, max_length, pad_idx=padding_idx)
    target_ids_padded = token_ids_padded[:, 1:].contiguous()


    return token_ids_padded, token_type_ids_padded, target_ids_padded, relation_position_s_padded, relation_position_e_padded, lex_num

word2idx =load_chinese_base_vocab()
config_A = BertConfig(len(word2idx))
bert_seq2seq = Seq2SeqModel(config_A)
## 加载参数文件
checkpoint = torch.load("train_model_1/bert_poem.model.epoch-0.4", map_location=torch.device("cuda"))
## 加载state dict参数
bert_seq2seq.load_state_dict(checkpoint, strict=False)
bert_seq2seq.to(torch.device("cuda"))
bert_seq2seq.train()
dataset = PoemDataset()
dataloader = DataLoader(dataset, batch_size=1, shuffle=False, collate_fn=collate_fn)


candidates = []
for token_ids, token_type_ids, _, relation_position_s, relation_position_e, lex_num in tqdm(dataloader, position=0, leave=True):
    device = torch.device("cuda")
    token_ids = token_ids.to(device).view(1, -1)

    token_type_ids = token_type_ids.to(device).view(1, -1)

    relation_position_s = relation_position_s.to(device)
    relation_position_e = relation_position_e.to(device)
    temp = []
    for i in range(0, 30):
        result= bert_seq2seq.generate(token_ids, token_type_ids, relation_position_s, relation_position_e, lex_num, beam_size=1, device=device)
        temp.append(result)
        print(result)
    candidates.append(temp)

with open("rresult_padding_99_7.12_继承分类参数测试_调整学习率", "a", encoding='UTF-8') as f:
    for i in range(len(candidates)):
        for j in range(len(candidates[i])):
            f.write(candidates[i][j]+"\n")
        f.write("\n")
