# -*- coding: UTF-8 -*-
'''
@File ：my_datasets.py
@IDE ：PyCharm
@Author ：chaojie
@Date ：2025/7/4 
@Introduce:
'''


import numpy as np
import pandas as pd
import json
import jieba
import torch

from torch.utils.data import Dataset, DataLoader


class cls_dataset(Dataset):
    def __init__(self, vocab_file, label2id_file, data_path):
        super().__init__()
        self.tokenizer = self.get_tokenizer(vocab_file)  # 文本向量转化器
        self.labe2id = self.get_label2id(label2id_file)
        self.datas = self.load_data(data_path)

    def load_data(self, data_file):
        data = []
        data_list = pd.read_csv(data_file, header=None, sep='\t')

        for words, label_name in zip(data_list[0], data_list[1]):
            words = list(jieba.cut_for_search(words))   # 分词

            words = [self.tokenizer.get(word, 1) for word in words]     # 转换为 vector
            label = self.labe2id[label_name]    # 转换为 id
            data.append({"word": words, 'label': label})
        return data

    def get_label2id(self, label2id_file):
        df = pd.read_csv(label2id_file, sep=',')
        label2id = {}

        for name, label in zip(df['cls_name'], df['id']):
            label2id[name] = label
        return label2id

    def get_tokenizer(self, vocab_file):
        with open(vocab_file, 'r', encoding='utf-8') as f:
            json_data = json.load(f)
        self.vocab_size = len(json_data)
        return json_data

    def __len__(self):
        return len(self.datas)

    def __getitem__(self, index):
        return self.datas[index]

def collate_fn(batch):

    keys = batch[0].keys()      # 获取键
    data = {}
    for key in keys:
        values = [batch[i][key] for i in range(len(batch))]
        data[key] = values

    world_values = data['word']
    max_len = max([len(x) for x in world_values])
    [(x.extend([0] * (max_len - len(x)))) for x in world_values]

    return torch.tensor(world_values), torch.tensor(data['label'])

def get_dataloader(vocab_file, label2if_file, data_path, batch, shuffle=True, num_workers=0, **kwargs):
    dataset = cls_dataset(vocab_file, label2if_file, data_path)
    return DataLoader(dataset, batch_size=batch, shuffle=shuffle, num_workers=num_workers, collate_fn=collate_fn)




if __name__ == '__main__':
    vocab_file      = r'D:\linuxFiles\nlp_demo\datas/cls/vocab.json'
    label2if_file   = r'D:\linuxFiles\nlp_demo\datas/cls/label_2_id.csv'
    data_path       = r'D:\linuxFiles\nlp_demo\datas/cls/val.csv'

    dataloader = get_dataloader(vocab_file, label2if_file, data_path, batch=4)

    for batch in dataloader:
        print(batch)
