import pandas as pd
import torch

from src.bert.my_dataset import load_raw_data, TextDataset
from utils.config import Config

config = Config('E:/Python+AI/group4_nlp_project')

def collate_fn(batch):
    texts = []
    for item in batch:
        text = item[0]  # 文本是元组的第一个元素
        # 确保文本是字符串类型且非空
        if pd.isna(text) or not isinstance(text, str):
            text = ""  # 用空字符串替代无效值
        # 限制文本长度为150个字符
        text = text[:150]
        texts.append(text)

    labels = [item[1] for item in batch]      # 标签是元组的第二个元素
    cat_labels = [item[2] for item in batch]  # 分类是元组的第三个元素

    text_tokens = config.tokenizer(texts, padding=True, truncation=True, return_tensors='pt', max_length=300)
    input_ids = text_tokens['input_ids']
    attention_mask = text_tokens['attention_mask']
    # 将labels转为tensor
    labels = torch.tensor(labels)
    cat_labels = torch.tensor(cat_labels)

    return input_ids, attention_mask, labels, cat_labels


def my_dataLoader(path):
    dev_path = load_raw_data(path)
    dataset = TextDataset(dev_path)
    train_loader = torch.utils.data.DataLoader(dataset, batch_size=config.batch_size, shuffle=True,
                                               collate_fn=collate_fn)
    return train_loader


if __name__ == '__main__':
    train_loader = my_dataLoader(config.train_path)
    for input_ids, attention_mask, labels, cat_labels in train_loader:
        print("Input IDs shape:", input_ids.shape)
        print("Attention mask shape:", attention_mask.shape)
        print("Labels:", labels)
        print("Category labels:", cat_labels)
        break
