# -*- coding: utf-8 -*-
# Program function：
# 构造dataset
import torch
from torch.utils.data import Dataset,DataLoader
import pandas as pd
from config import *
from collections import defaultdict
def get_data(csv_file):
    data = pd.read_csv(csv_file,sep='\t',header=None,names = ['text', 'label'])
    result = []
    for i in range(len(data)):
        text = data.iloc[i,0]
        label = data.iloc[i,1]
        result.append((text,label))
    return result

# 只读取每个类别的前10000条数据
def get_data_num(csv_file,num):
    data = pd.read_csv(csv_file,sep='\t',header=None,names = ['text', 'label'])
    result = []
    label_dict = defaultdict(int)  # 默认值为 0
    for i in range(len(data)):
        text = data.iloc[i,0]
        label = data.iloc[i,1]
        label_dict[label] = label_dict.get(label,0) + 1
        if label_dict[label] <= num:
            result.append((text,label))
    return result

class BertDataset(Dataset):
    def __init__(self,csv_file,num = False):
        if num:
            self.data_list = get_data_num(csv_file,num)
        else:
            self.data_list = get_data(csv_file)
    def __getitem__(self,index):
        text = self.data_list[index][0]
        label = self.data_list[index][1]
        return text,label
    def __len__(self):
        return len(self.data_list)

def collate_fn(batch):
    texts, labels = zip(*batch)
    texts_tokens = bert_tokenizer.batch_encode_plus(texts, padding='max_length', truncation=True, max_length=max_len, return_tensors="pt")
    inputs = texts_tokens['input_ids'].clone().detach()
    attention_masks = texts_tokens['attention_mask'].clone().detach()
    labels = torch.tensor(labels)
    return inputs,attention_masks,labels


def get_loader(num = False):
    train_dataset = BertDataset(train_path)
    test_dataset = BertDataset(test_path)
    dev_dataset = BertDataset(dev_path)
    train_loader = DataLoader(train_dataset,batch_size=batch_size,shuffle=True,collate_fn=collate_fn)
    test_loader = DataLoader(test_dataset,batch_size=batch_size,shuffle=True,collate_fn=collate_fn)
    dev_loader = DataLoader(dev_dataset,batch_size=batch_size,shuffle=True,collate_fn=collate_fn)

    if num:
        train_loader = DataLoader(BertDataset(train_path, num), batch_size=batch_size, shuffle=True, collate_fn=collate_fn)
        test_loader = DataLoader(BertDataset(test_path,num), batch_size=batch_size, shuffle=True, collate_fn=collate_fn)
        dev_loader = DataLoader(BertDataset(dev_path,num), batch_size=batch_size, shuffle=True, collate_fn=collate_fn)

    return train_loader,test_loader,dev_loader