import sys
from typing import Union
from pathlib import Path
import pandas as pd
from tqdm import tqdm


def data_to_parquet(data_dir: Union[Path, str],
                    save_dir: Union[Path, str],
                    batch_size=1000,
                    chunk_size=5000000):

    parquet_idx = 0
    data_dir = Path(data_dir)
    save_dir = Path(save_dir)
    save_dir.mkdir(parents=True, exist_ok=True)
    df = pd.DataFrame()
    batch_count = 0
    batch_datas = []
    for idx in tqdm(range(0, 100000)):
        src_audio_file = data_dir / f'{idx}.mp3'
        src_text_file = data_dir / f'{idx}.txt'
        trans_audio_file = data_dir / f'{idx}-zh.mp3'
        trans_text_file = data_dir / f'{idx}-zh.txt'
        if all([src_audio_file.exists(), src_text_file.exists(), trans_audio_file.exists(), trans_text_file.exists()]):
            item = {
                'src_audio': src_audio_file,
                'src_text': src_text_file,
                'trans_audio': trans_audio_file,
                'trans_text': trans_text_file
            }
            batch_datas.append(item)
            batch_count += 1
            if batch_count % batch_size == 0:
                df_batch = pd.DataFrame(batch_datas)
                df_batch['src_audio'] = df_batch['src_audio'].apply(
                    lambda x: {
                        'bytes': open(x, 'rb').read(),
                        'path': x.name
                    }
                )
                df_batch['src_text'] = df_batch['src_text'].apply(
                    lambda x: open(x, 'r', encoding='utf-8').read()
                )
                df_batch['trans_audio'] = df_batch['trans_audio'].apply(
                    lambda x: {
                        'bytes': open(x, 'rb').read(),
                        'path': x.name
                    }
                )
                df_batch['trans_text'] = df_batch['trans_text'].apply(
                    lambda x: open(x, 'r', encoding='utf-8').read()
                )
                df = pd.concat([df, df_batch], ignore_index=True)
                batch_datas = []

                memory_used = df.memory_usage().sum()
                if memory_used >= chunk_size:
                    df.to_parquet(save_dir / f'{parquet_idx: 05d}.parquet')
                    df = pd.DataFrame()
                    parquet_idx += 1

    if batch_count != 0:
        df_batch = pd.DataFrame(batch_datas)
        df_batch['src_audio'] = df_batch['src_audio'].apply(
            lambda x: {
                'bytes': open(x, 'rb').read(),
                'path': x.name
            }
        )
        df_batch['src_text'] = df_batch['src_text'].apply(
            lambda x: open(x, 'r', encoding='utf-8').read()
        )
        df_batch['trans_audio'] = df_batch['trans_audio'].apply(
            lambda x: {
                'bytes': open(x, 'rb').read(),
                'path': x.name
            }
        )
        df_batch['trans_text'] = df_batch['trans_text'].apply(
            lambda x: open(x, 'r', encoding='utf-8').read()
        )
        df = pd.concat([df, df_batch], ignore_index=True)
        df.to_parquet(save_dir / f'{parquet_idx: 05d}.parquet')


if __name__ == '__main__':

    data_dir = r'X:\工作文件\迈越\first_no_spilt'
    save_dir = r'X:\工作文件\迈越\MalayData'
    data_to_parquet(data_dir, save_dir, chunk_size=250000)