import json
from tqdm import tqdm
import pickle
import os
from loader.vocab import Vocab
from collections import defaultdict
import pandas as pd
from fish_tool import logs


def save_pickle(dic, save_path):
    with open(save_path, 'wb') as f:
        pickle.dump(dic, f)


def read_jsonlist(path):
    data = []
    for line in open(path, encoding='utf8').readlines():
        line = line.strip('\n').strip(' ')
        if line:
            data.append(json.loads(line))
    return data


def save_pretrain_data():
    # 存储 百度情感分析数据
    pre_train_data_path = f'E:/code/data/LIC2022-百度比赛/百度-2022语言与智能技术竞赛：情感可解释评测/nezha_pretrain_data_20220508.pkl'
    pre_model_dir = 'E:/code/data/pretrain_model_file/nezha-cn-base'
    vocab = Vocab(pre_model_dir)
    raw_data_paths = ['E:/code/data/LIC2022-百度比赛/百度-2022语言与智能技术竞赛：情感可解释评测/senti_ch_part1.txt']
    for dtype in ['train', 'valid', 'test']:
        raw_data_paths.append(f'E:/code/data/LIC2022-百度比赛/百度-2022语言与智能技术竞赛：情感可解释评测/ChnSentiCorp/{dtype}.jsonlist')

    all_data = defaultdict(list)
    for raw_path in raw_data_paths:
        docs = read_jsonlist(raw_path)
        for doc in docs:
            if 'sent_token' not in doc:
                doc['sent_token'] = list(doc['context'])
            inputs_dict = vocab.encode_plus(doc['sent_token'], add_special_tokens=True, return_token_type_ids=True, return_attention_mask=True)

            all_data['input_ids'].append(inputs_dict['input_ids'])
            all_data['token_type_ids'].append(inputs_dict['token_type_ids'])
            all_data['attention_mask'].append(inputs_dict['attention_mask'])
    logs.print(f'数据~{len(all_data["input_ids"])}')
    os.makedirs(os.path.dirname(pre_train_data_path), exist_ok=True)
    save_pickle(all_data, pre_train_data_path)


if __name__ == '__main__':
    save_pretrain_data()
