import torch
from torch.utils.data import Dataset, DataLoader
from transformers import BertTokenizer

import FeatExtractor
import configs

tokenizer = BertTokenizer.from_pretrained(configs.PRETRAINED_BERT_PATH)


class AnsDataset(Dataset):
    input_id: torch.Tensor
    attention: torch.Tensor
    stat: torch.Tensor
    labels: torch.Tensor

    def __init__(self, ds):
        h = [str(i[0]) for i in ds["human_answers"] if i[0] is not None]
        a = [str(i[0]) for i in ds["chatgpt_answers"] if i[0] is not None]

        humans = tokenizer(h, return_tensors='pt', padding=True, truncation=True, max_length=256)
        ais = tokenizer(a, return_tensors='pt', padding=True, truncation=True, max_length=256)

        print("[data.py:AnsDataset::__init__] tokenized")

        # 数据初始化 human=0;ai=1
        self.input_id = torch.cat([humans["input_ids"], ais["input_ids"]])
        self.attention = torch.cat([humans["attention_mask"], ais["attention_mask"]])
        self.labels = torch.cat([torch.tensor([[1, 0] for _ in range(len(h))], dtype=torch.float32),
                                 torch.tensor([[0, 1] for _ in range(len(a))], dtype=torch.float32)])
        print("[data.py:AnsDataset::__init__] inID,attention,label prepared")

        hu_stat = [FeatExtractor.extract_statistical_features(x) for x in h]
        ai_stat = [FeatExtractor.extract_statistical_features(x) for x in a]
        self.stat = torch.tensor(hu_stat + ai_stat, dtype=torch.float32)
        print("[data.py:AnsDataset::__init__] stat prepared")

    def __len__(self):
        # 返回数据集大小
        return len(self.input_id)

    def __getitem__(self, idx):
        # 按索引返回数据和标签
        return self.input_id[idx], self.attention[idx], self.stat[idx], self.labels[idx]


def load() -> AnsDataset:
    print("[data.py:load] loading dataset")
    from modelscope.msdatasets import MsDataset
    ds = MsDataset.load(configs.DATASET_NAME, subset_name=configs.DATASET_SUBSET, split='train')
    ds_train = ds.train_test_split(test_size=configs.test_size, seed=configs.random_seed)["train"]
    print("rate",configs.test_size,"\nds",len(ds),"\ntrain",len(ds_train))
    return AnsDataset(ds_train)


def loadTestDs() -> AnsDataset:
    from modelscope.msdatasets import MsDataset
    ds = MsDataset.load(configs.DATASET_NAME, subset_name=configs.DATASET_SUBSET, split='train')
    ds_test = ds.train_test_split(test_size=configs.test_size, seed=configs.random_seed)["test"]
    return AnsDataset(ds_test)


def getLoader(dataset, batch_size: int) -> DataLoader:
    print("[data.py:getLoader] creating loader,batch_size:", batch_size)
    return DataLoader(dataset, batch_size=batch_size, shuffle=True)
