# test parquet
import io

import torchaudio
from gxl_ai_utils.utils import utils_file
import pyarrow.parquet as pq
parquet_list_path = "l/parquet.list"
parquet_list = utils_file.load_list_file_clean(parquet_list_path)
for parquet_path in parquet_list:
    """"""
    for df in pq.ParquetFile(parquet_path).iter_batches(batch_size=32):
        df = df.to_pandas()
        for i in range(len(df)):
            sample = dict(df.loc[i])
            # NOTE do not return sample directly, must initialize a new dict
            res_dict = {
                **sample
            }
            # print(f'parquet opener: res_dict: {res_dict}')
            # assert 'text' in res_dict, f'text not in res_dict'
            assert "audio_data" in res_dict or "user_audio_data" in res_dict, f'audio_data not in res_dict'
            txt = res_dict["user_text"]
            utils_file.logging_info(f'txt: {txt}')
            wav_bytes =  res_dict.pop('audio_data') if 'audio_data' in res_dict else res_dict.pop('user_audio_data')
            wav_file = io.BytesIO(wav_bytes)
            waveform, sample_rate = torchaudio.load(wav_file)
            utils_file.logging_info(f'wavform.shape: {waveform.shape}, sample_rate: {sample_rate}')
