import argparse

import pandas as pd
import os
from typing import Callable

def rewrite_user_text_simple(parquet_path: str, new_text_dict: dict):
    # 读入（如内存允许，直接全读；否则至少要连同 user_audio_data 一起读）
    df = pd.read_parquet(parquet_path, engine="pyarrow")   # 三列全读
    # 基于 utt 生成新的 user_text
    df["user_text"] = [new_text_dict.get(u, "") for u in df["utt"]]
    tmp = parquet_path + ".tmp"
    df.to_parquet(tmp, engine="pyarrow", compression="zstd", index=False)
    os.replace(tmp, parquet_path)  # 原子替换

import os
from typing import Callable, Optional
import pyarrow as pa
import pyarrow.parquet as pq

def rewrite_user_text_streaming(
    parquet_path: str,
    new_text_dict: dict,
    batch_size: int = 4096,
    compression: str = "zstd",
):
    """
    读取原 parquet 的 'utt' 与 'user_audio_data' 两列，生成新的 'user_text'，
    按原列顺序 ['utt','user_text','user_audio_data'] 流式写入新文件并原子替换。
    """
    pf = pq.ParquetFile(parquet_path)
    tmp_path = parquet_path + ".tmp"

    # 用原文件 schema 写出，确保类型/编码一致（尤其是二进制列）
    # 如果原文件没有 'user_text' 或类型不同，也没关系：以写入的表为准
    # 但通常你现有文件就是三列且 'user_text' 字符串类型。
    with pq.ParquetWriter(tmp_path, schema=pf.schema_arrow, compression=compression) as writer:
        # 只读取必要列：utt + user_audio_data（旧的 user_text 不用读）
        for batch in pf.iter_batches(columns=["utt", "user_audio_data"], batch_size=batch_size):
            # 取出 utt 列并生成新文本
            utt_py = batch.column("utt").to_pylist()                 # list[str]
            new_text_py = [new_text_dict.get(u, "") for u in utt_py] # list[str]

            # 组装成新的一批（保持列顺序）
            new_table = pa.Table.from_arrays(
                [
                    batch.column("utt"),
                    pa.array(new_text_py, type=pa.string()),
                    batch.column("user_audio_data"),
                ],
                names=["utt", "user_text", "user_audio_data"],
            )
            writer.write_table(new_table)

    os.replace(tmp_path, parquet_path)  # 原子替换，不影响 utt2parquet_file 的映射


parser = argparse.ArgumentParser()
parser.add_argument('--num_nodes', type=int, help='')
parser.add_argument('--node_id', type=int, help='')
args = parser.parse_args()
num_nodes = args.num_nodes
node_id = args.node_id

from gxl_ai_utils.utils import utils_file
parquet_list_path = "/mnt/apdcephfs_sgfd/share_303841515/Tealab/user/xuelonggeng/data/8W_asr_data_hq/parquet.list"
text_scp_path = "/mnt/apdcephfs_sgfd/share_303841515/Tealab/user/xuelonggeng/data/8W_asr_data_hq/text_clean.scp"
text_dict = utils_file.load_dict_from_scp(text_scp_path, silence=True)

def little_func(little_path_list):
    for parquet_path in utils_file.tqdm(little_path_list):
        """"""
        try:
            rewrite_user_text_simple(parquet_path, text_dict)
        except Exception as e:
            print(f"Error in {parquet_path}: {e}")


parquet_list_all = utils_file.load_list_file_clean(parquet_list_path)
parquet_list_all_list = utils_file.do_split_list(parquet_list_all, num_nodes)

parquet_list = parquet_list_all_list[node_id]

runner = utils_file.GXLMultiprocessingWithReturn(num_processes=10)
runner.run(little_func, parquet_list)





