import paddle
from paddle.io import Dataset, DataLoader
import os

from paddlenlp.transformers import BertTokenizer
import numpy as np

data_dir = os.path.join(os.getcwd(), 'paddle_data')

tokenizer = BertTokenizer.from_pretrained("bert-base-chinese")
with open(os.path.join(data_dir, 'class_ch.txt'), encoding='utf-8') as f:
    class_list = f.read().splitlines()
    class_list = [tokenizer.convert_tokens_to_ids(word) for item in class_list for word in item]


class MyDataSet(Dataset):
    def __init__(self, mode: str, version: str = 'old'):
        super(MyDataSet, self).__init__()
        self.result = []
        if version == 'old':
            self.old_version(mode)
        else:
            self.new_version(mode)

    def new_version(self, mode: str):
        with open(os.path.join(os.getcwd(), 'new_paddle_data', f'{mode}.txt'), encoding='utf-8') as f:
            line_list = f.read().splitlines()
        for line in line_list:
            txt, label = line.rsplit('\t')
            self.result.append((txt, int(label)))

    def old_version(self, mode: str):
        with open(os.path.join(data_dir, f'{mode}.txt'), encoding='utf-8') as f:
            line_list = f.read().splitlines()

        with open(os.path.join(data_dir, 'class_ch.txt'), encoding='utf-8') as f:
            original_class_list = f.read().splitlines()
        for item in line_list:
            text, label = item.rsplit("	", 1)
            for other in original_class_list:
                prompt = "这句话与" + other + "[MASK]关："
                if other == label:
                    self.result.append((prompt + text, 1))
                else:
                    self.result.append((prompt + text, 0))

    def __getitem__(self, item):
        text = self.result[item][0]
        label = self.result[item][1]
        return text, label

    def __len__(self):
        return len(self.result)


class PromptDataSet(Dataset):
    def __init__(self, mode: str):
        super(PromptDataSet, self).__init__()
        prompt = '这句话与[MASK][MASK]有关：'
        with open(os.path.join(data_dir, f'{mode}.txt'), encoding='utf-8') as f:
            data_list = f.read().splitlines()
        self.result = []
        for line in data_list:
            txt, label = line.rsplit('	', 1)
            txt = prompt + txt
            self.result.append((txt, label))

    def __getitem__(self, item):
        text = self.result[item][0]
        label = self.result[item][1]
        return text, label

    def __len__(self):
        return len(self.result)


def collate_fn(batch):
    batch_numpy = np.asarray(batch)
    text_list = batch_numpy[np.arange(0, len(batch_numpy)), 0].tolist()
    label_list = batch_numpy[np.arange(0, len(batch_numpy)), 1].tolist()
    label_list_ids = paddle.to_tensor(
        [tokenizer.convert_tokens_to_ids(word) for item in label_list for word in item]).reshape([-1, 2])

    output = tokenizer(text=text_list, padding=True, return_attention_mask=True, return_tensors="pd")
    label_model = paddle.full_like(output['input_ids'], -100)
    label_model = paddle.index_add(label_model, index=paddle.to_tensor([5, 6]), axis=1,
                                   value=label_list_ids + 100)

    return_data = {
        "original_text": text_list,
        "input_ids": output['input_ids'],
        "attention": output['attention_mask'],
        "token_type_ids": output['token_type_ids'],
        "label_model": label_model,
        "label": label_list_ids
    }
    return return_data
