

import os
import logging
import subprocess
import bz2
import os
import json

# root_path = /mnt/workspace
root_path = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
datasetpath = os.path.join(root_path, "datasets/wiki/")
output_dir = os.path.join(datasetpath, "text")
input_file ='/mnt/workspace/datasets/wiki/enwiki-latest-pages-articles1.xml-p1p41242.bz2',

def wiki_dataset(input_file, output_dir):
    print(f'wikidataset 可以用来做wiki的数据提取  ')
    # 使用说明：
    # 数据集下载：https://dumps.wikimedia.org/enwiki/
    # 可能会用到的命令scp -p 1024 enwiki-latest-pages-articles1.xml-p1p41242.bz2  root@8.130.100.85:/mnt/workspace/datasets/pretrain
    # pip install torch numpy wikiextractor pybind torchvision nltk pyramid regex six
    # 以下命令直接进行格式转换
    # python3 -m wikiextractor.WikiExtractor --json -o output enwiki-latest-pages-articles1.xml-p1p41242.bz2

def wudao_dataset():
    #参照Pai-Megatron-Patch的数据处理方法，进行数据处理
    #preprocess_wudao2，单独下载，对于iput文件夹的文件进行汇总输出
    input_dir = '/mnt/workspace/datasets/wudao/WuDaoCorpus2.0_base_sample'
    output_dir = '/mnt/workspace/datasets/wudao'

    split_data_cmd = f"""    
    #! /bin/bash
    set -ex
    # 请在此处设置原始数据所在路径
    data_dir={input_dir}
    #开始数据清洗流程
    dataset_dir={output_dir}
    mkdir -p {output_dir}/cleaned_wudao_dataset
    cd {output_dir}/cleaned_wudao_dataset
    wget https://atp-modelzoo-wlcb-pai.oss-cn-wulanchabu.aliyuncs.com/release/models/pai-megatron-patch/llama2-codes/preprocess_wudao2.py
    # 此处与上一节不同，增加了key参数设为text
    python preprocess_wudao2.py -i {input_dir} -o {output_dir}/cleaned_wudao_dataset -k text -p 32
    # 合并清洗后的数据
    mkdir {output_dir}/wudao
    cd {output_dir}/wudao
    find {output_dir}/cleaned_wudao_dataset -name "*.json" -exec cat {{}} + > {output_dir}/wudao/merged_wudao_cleaned.json
    rm -rf {output_dir}/cleaned_wudao_dataset
    """
    with os.popen(split_data_cmd, 'r') as file:
        output = file.read()
        print(output)

def wudao_processing():
    dataset_dir = '/mnt/workspace/datasets/wudao'
    #利用第一节生成的merged_wudao_cleaned.json文件，将数据拆分成若干组并压缩，便于后续实现多线程处理：
    zst_data_cmd = f"""  
    #! /bin/bash
    NUM_PIECE=10
    mkdir -p {dataset_dir}/cleaned_zst
    mkdir {dataset_dir}/split
    
    NUM=$(sed -n '$=' {dataset_dir}/wudao/merged_wudao_cleaned.json)
    echo "total line of dataset is $NUM, data will be split into $NUM_PIECE pieces for processing"
    NUM=`expr $NUM / $NUM_PIECE`
    echo "each group is processing $NUM sample"
    split -l $NUM --numeric-suffixes --additional-suffix=.jsonl {dataset_dir}/wudao/merged_wudao_cleaned.json {dataset_dir}/split/
    # 数据压缩
    files=$(ls {dataset_dir}/split/*.jsonl)
    for filename in $files
    do
       f=$(basename $filename)
       zstd -z $filename -o {dataset_dir}/cleaned_zst/$f.zst &
    done
    """
    with os.popen(zst_data_cmd, 'r') as file:
        output = file.read()
        print(output)
def wudao_trans():
    # 请在此处设置数据集路径和工作路径
    # 分别为训练集、验证集生成mmap格式预训练数据集
    dataset_dir='/mnt/workspace/datasets/wudao'
    WORK_DIR='/mnt/workspace/megetron_learning'
    zst_data_cmd = f"""  
    cd {WORK_DIR}/Pai-Megatron-Patch/toolkits/pretrain_data_preprocessing
    bash run_make_pretraining_dataset.sh \
    ../.. \
    {dataset_dir}/cleaned_zst/ \
    llamabpe \
    {dataset_dir}/ \
    {WORK_DIR}/llama3-ckpts/Meta-Llama-3-8B
    """
    with os.popen(zst_data_cmd, 'r') as file:
        output = file.read()
        print(output)
if __name__ == "__main__":
    print(f' step1 ： wiki的数据提取及转换 后续无处理 ')
    # wiki_dataset(input_file,output_dir)

    print(f' step2 ：wudao数据集，数据清洗  ')
    #wudao_dataset()
    print(f' step3 ：wudao数据集，数据分解，压缩   ')
    #wudao_processing()
    print(f' step4 ：wudao数据集，数据分解，压缩  ')
    wudao_trans()

