# -*- coding:utf8 -*-
# @Time : 2023/1/31 15:42
# @Author : WanJie Wu

import os

import torch
from transformers import BertTokenizer
from torch.utils.data import Dataset, DataLoader


def load_label_system(readme_path):
    labels = list()
    with open(readme_path, "r", encoding="utf8") as f:
        for line in f.readlines():
            if "#" in line.strip() or not line.strip():
                continue
            labels.append(line.strip().split()[0])

    return {
        "label2id": {key: i for i, key in enumerate(labels)},
        "id2label": {i: key for i, key in enumerate(labels)},
    }


class ClfDataset(Dataset):
    def __init__(self, data_dir, file_name):
        self.data_dir = data_dir
        self.file_name = file_name
        self.label2id = load_label_system(os.path.join(data_dir, "readme.txt"))["label2id"]
        self.clf_data = self.read_dataset()

    def __getitem__(self, idx):
        return self.clf_data[idx]

    def __len__(self):
        return len(self.clf_data)

    def read_dataset(self):
        """读取数据"""
        clf_data = list()
        with open(os.path.join(self.data_dir, self.file_name), "r", encoding="utf8") as f:
            for line in f.readlines():
                split_line = line.strip().split()
                _id = self.label2id[split_line[0]]
                _text = split_line[1]
                clf_data.append({
                    "id": _id,
                    "text": _text
                })
        return clf_data


class ClfCollate:
    def __init__(self, tokenizer, max_seq_length):
        self.max_seq_length = max_seq_length
        self.tokenizer = tokenizer

    def __call__(self, batch_data):
        id_lst = [item["id"] for item in batch_data]
        text_lst = [item["text"] for item in batch_data]
        tokenizer_examples = self.tokenizer(
            text=text_lst,
            padding=True,
            truncation=True,
            max_length=self.max_seq_length,
            return_tensors="pt"
        )
        tokenizer_examples["labels"] = torch.tensor(id_lst, dtype=torch.float32)
        return tokenizer_examples


def gen_dataloader(dataset_dir, model_name_or_path, batch_size=8, max_seq_length=512):
    train_dataset = ClfDataset(dataset_dir, "train.txt")
    tokenizer = BertTokenizer.from_pretrained(pretrained_model_name_or_path=model_name_or_path)
    train_loader = DataLoader(
        dataset=train_dataset,
        batch_size=batch_size,
        shuffle=True,
        collate_fn=ClfCollate(
            tokenizer=tokenizer,
            max_seq_length=max_seq_length,
        )
    )

    train_dataset = ClfDataset(dataset_dir, "dev.txt")
    dev_loader = DataLoader(
        dataset=train_dataset,
        batch_size=batch_size,
        shuffle=True,
        collate_fn=ClfCollate(
            tokenizer=tokenizer,
            max_seq_length=max_seq_length,
        )
    )

    train_dataset = ClfDataset(dataset_dir, "test.txt")
    test_loader = DataLoader(
        dataset=train_dataset,
        batch_size=batch_size,
        shuffle=True,
        collate_fn=ClfCollate(
            tokenizer=tokenizer,
            max_seq_length=max_seq_length,
        )
    )
    return train_loader, dev_loader, test_loader


if __name__ == "__main__":
    sample_path = "test.txt"
    res = gen_dataloader(
        dataset_dir="/home/wuwanjie/projects/nlp_algo/app/data/clf",
        model_name_or_path="/data/sdv1/wuwanjie/transformers/bert-base-chinese",
    )
    for _item in res:
        print(_item)
        import pdb
        pdb.set_trace()
        print("==")
