from utils import *
from torch.utils.data import Dataset, DataLoader
from transformers import AutoTokenizer

# load dataset
train_path = '../Input/raw/train.json'
valid_path = '../Input/raw/dev.json'
test_path = '../Input/raw/test.json'

used_colum = ['statuses_count', 'favourites_count', 'profile_image_url_https',
              'screen_name', 'name', 'listed_count', 'followers_count',
              'friends_count','description', 'location', 'lang', 'created_at',
              'geo_enabled','verified', 'protected']

mode_path_mode = {
    'train' : train_path,
    'valid' : train_path,
    'test'  : valid_path
}

def processData(mode='test'):
    file_path = mode_path_mode[mode]
    print(f'mode is {mode}, file_path is {mode_path_mode[mode]}')
    # 读取数据集，进行数据处理
    pd_data = pd.read_json(file_path)
    pd_data.drop(['created_at'], axis=1, inplace=True)
    pd_data = pd_data.fillna('')
    user_df = pd_data['user'].apply(pd.Series)
    pd_data = pd.concat([pd_data, user_df], axis=1)
    pd_data['label'] = pd_data['label'].map({'bot': 1, 'human': 0})
    # 删除不需要的字段
    '''
    profile_image_url(用户的个人资料图片 URL) 与  profile_image_url_https重复
    profile_background_image_url(用户的个人资料背景图片 URL)
    '''
    pd_data.drop(['user', 'id', 'id_str', 'profile_image_url', 'profile_background_image_url'], axis=1, inplace=True)


    pd_data['text'] = ''
    # 遍历数据框的每一列
    for column in pd_data.columns:
        # 如果列的名称不是"label"
        # if column != 'label':
        if column in used_colum:
            # 将该列的信息添加到新的列中
            # pd_data['text'] += pd_data[column].astype(str) + ' '
            pd_data['text'] += str(column) + ':' + pd_data[column].astype(str) + ' '

    max_length = pd_data['text'].str.len().max()
    min_length = pd_data['text'].str.len().min()
    # print(f'{mode} min_len:{min_length} max_length:{max_length}' )  #train min_len:334 max_length:579

    if mode != 'test':
        data_train, data_valid = train_test_split(pd_data, test_size=0.2, random_state=42)
        if mode == 'train':
            return data_train   #train
        else:
            return data_valid  #valid
    else:
       return pd_data         #test

# 构建Dataset
class MyDataset(Dataset):

    def __init__(self, mode='train'):
        super(MyDataset, self).__init__()
        self.mode = mode
        # 拿到对应的数据
        self.dataset = processData(mode)

    def __getitem__(self, index):
        # 取第index条
        data = self.dataset.iloc[index]
        # 取其内容
        cur_text = data['text']
        cur_label = data['label']
        # 返回内容和label
        return cur_text, cur_label

    def __len__(self):
        return len(self.dataset)


#获取Bert预训练模型
tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased")

#完成对句子进行编码、填充、组装batch等动作：
def collate_fn(batch):
    """
    将一个batch的文本句子转成tensor，并组成batch。
    :param batch: 一个batch的句子，例如: [('推文', target), ('推文', target), ...]
    :return: 处理后的结果，例如：
             src: {'input_ids': tensor([[ 101, ..., 102, 0, 0, ...], ...]), 'attention_mask': tensor([[1, ..., 1, 0, ...], ...])}
             target：[1, 1, 0, ...]
    """
    text, label = zip(*batch)   #解压缩
    text, label = list(text), list(label)

    # src是要送给bert的，所以不需要特殊处理，直接用tokenizer的结果即可
    # padding='max_length' 不够长度的进行填充  text_max_length = 128
    # truncation=True 长度过长的进行裁剪
    src = tokenizer(text, padding='max_length', max_length=text_max_length, return_tensors='pt', truncation=True)
    return src, torch.LongTensor(label)
