
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader

from transformers import BertTokenizer
from transformers.utils import PaddingStrategy

from src.Commons.base_config_loader import BaseConfigLoader

base_config = BaseConfigLoader()
model_bert_src_path = base_config.get("system.resources.bert.models.model_bert_src_path")

def load_row_data(file_path):
    result = []
    with open(file_path, "r", encoding="utf-8") as f:
        for line in f:
            if not line.strip():
                continue
            line = line.strip().split("\t")
            result.append((line[0], int(line[1])))

    return result

class RowDataSet(Dataset):
    def __init__(self, data):
        self.data = data

    def __len__(self):
        return len(self.data)

    def __getitem__(self, index: int) :
        row = self.data[index]
        return row[0], row[1]


def collate_batch(batch):
    """按批次分词、填充、转张量
    : return 批处理结果元组 (input_ids, attention_mask, labels)
    input_ids: 分词后ID，如(batch_size, max_seq_len)
    attention_mask: 注意力掩码张量, 用于注意力计算, 1 表示有效位置, 0 表示填充位置
    labels: 标签张量, 如(batch_size)
    """
    words, labels = zip(*batch)
    tokenizer = BertTokenizer.from_pretrained(model_bert_src_path)
    tokenizer_output = tokenizer.batch_encode_plus(
        words,
        add_special_tokens=True, # 自动添加特殊标记[CLS]和[SEP]
        padding="max_length",
        truncation=True,
        max_length=32,
        return_tensors="pt",
        return_special_tokens_mask=True
    )
    input_ids = tokenizer_output["input_ids"]
    attention_mask = tokenizer_output["attention_mask"]
    # token_type_ids = tokenizer_output["token_type_ids"]
    labels = torch.tensor(labels)

    return input_ids, attention_mask, labels

def get_dataloader(train_dataset, test_dataset, dev_dataset):
    train_dataloader = DataLoader(train_dataset, batch_size=64, shuffle=True, collate_fn=collate_batch)
    test_dataloader = DataLoader(test_dataset, batch_size=64, shuffle=False, collate_fn=collate_batch)
    dev_dataloader = DataLoader(dev_dataset, batch_size=64, shuffle=False, collate_fn=collate_batch)

    return train_dataloader, test_dataloader, dev_dataloader

def use_get_dataloader():
    config_loader = BaseConfigLoader()
    # 原始文件路径
    train_path = config_loader.get("system.resources.src_data.train_path")
    test_path = config_loader.get("system.resources.src_data.test_path")
    dev_path = config_loader.get("system.resources.src_data.dev_path")

    # 加载原始数据
    train_data = load_row_data(train_path)
    test_data = load_row_data(test_path)
    dev_data = load_row_data(dev_path)

    train_dataset = RowDataSet(train_data)
    test_dataset = RowDataSet(test_data)
    dev_dataset = RowDataSet(dev_data)

    # for item in train_dataset:
    #     print(item)
    #     break
    train_dataloader, test_dataloader, dev_dataloader = get_dataloader(train_dataset, test_dataset, dev_dataset)
    # for batch in train_dataloader:
    #     print()
    #     break

    return train_dataloader, test_dataloader, dev_dataloader


if __name__ == '__main__':
    use_get_dataloader()


