"""
使用训练好的模型进行预测。
全部预测完成后，找到一个相关的，即为所得。
如果有多个相关的，那么哪个得的分高就是谁。
如果都不想管，那么在相关的中找谁得的分高。
"""
import os

import paddle
from paddle.io import Dataset, DataLoader
from paddlenlp.transformers import BertForMaskedLM, BertTokenizer, BertConfig
from tqdm import tqdm

tokenizer = BertTokenizer.from_pretrained("bert-base-chinese")

data_dir = os.path.join(os.getcwd(), 'paddle_data')
with open(os.path.join(data_dir, 'class_ch.txt'), encoding='utf-8') as f:
    class_list = f.read().splitlines()

with open(os.path.join(data_dir, 'fou_shi.txt'), encoding='utf-8') as f:
    whether_list = f.read().splitlines()
    whether_list = [tokenizer.convert_tokens_to_ids(item) for item in whether_list]


class TestDataSet(Dataset):
    def __init__(self):
        super(TestDataSet, self).__init__()
        test_list = get_test_content()
        self.result = []
        for line in test_list:
            for class_name in class_list:
                prompt = f'这句话与{class_name}[MASK]关：' + line
                self.result.append(prompt)

    def __getitem__(self, item):
        return self.result[item]

    def __len__(self):
        return len(self.result)


def collate_fn(batch):
    output = tokenizer(text=batch, padding=True, return_attention_mask=True, return_tensors="pd")
    return_data = {
        "original_text": batch,
        "token_type_ids": output['token_type_ids'],
        "input_ids": output['input_ids'],
        "attention": output['attention_mask'],
    }
    return return_data


def get_test_content():
    test_path = os.path.join(os.getcwd(), 'paddle_data', 'test.txt')
    with open(test_path, encoding='utf-8') as f:
        test_list = f.read().splitlines()
    return test_list


if __name__ == '__main__':
    load_dir = os.path.join(os.getcwd(), 'onlyMask_best_save_model_86')

    json_file = os.path.join(os.getcwd(), 'model_hub', 'bert-base-chinese', 'config.json')
    bert_config = BertConfig.from_json_file(json_file)
    model = BertForMaskedLM(bert_config)
    # 载入模型参数、优化器参数和最后一个epoch保存的检查点
    layer_state_dict = paddle.load(os.path.join(load_dir, "model.pdparams"))
    # 将load后的参数与模型关联起来
    model.set_state_dict(layer_state_dict)
    # 模型加载完成，可以进行预测了

    data_loader = DataLoader(TestDataSet(), shuffle=False, batch_size=512, drop_last=False, collate_fn=collate_fn)
    result = []
    model.eval()

    with paddle.no_grad():
        for batch in tqdm(data_loader):
            original_text = batch['original_text']

            input_ids = batch['input_ids']
            attention_mask = batch['attention']
            token_type_ids = batch['token_type_ids']
            logits = model(input_ids=input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask)
            mask_probability = logits[:, 7]  # [batch*vocab_size]

            last_probability = paddle.index_select(mask_probability, index=paddle.to_tensor(whether_list),
                                                   axis=1)  # [batch*2]
            for index in range(last_probability.shape[0]):
                begin_index = original_text[index].index("：")
                txt = original_text[index][begin_index + 1:]
                result.append((original_text[index], last_probability[index]))
    # 结束模型的推理，开始整理结果
    class_length = len(class_list)
    begin = 0
    assert len(result) % class_length == 0
    final_result = []
    for _ in range(int(len(result) / class_length)):
        line = result[begin:begin + class_length]
        text = line[0][0]

        tensor_list = [item[1] for item in line]
        tensor_list = paddle.concat(tensor_list).reshape([class_length, 2])
        begin += class_length
        predict = paddle.argmax(tensor_list, axis=1)
        count = paddle.count_nonzero(predict).item()
        if count == 1:
            indexxx = paddle.where(predict == 1)[0].item()
            final_result.append(class_list[indexxx])
        else:
            indexxx = paddle.argmax(tensor_list, axis=0)[1].item()
            final_result.append(class_list[indexxx])

    with open("result.txt", mode='w', encoding='utf-8') as f:
        f.write('\n'.join(final_result))
