#!/usr/bin/env python3
# Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import logging
import os
import json
from typing import List, Dict

import numpy as np
import torch
import torchaudio
from tqdm import tqdm
import pandas as pd
import multiprocessing
import time
from sys import argv
import soundfile as sf
import io




def convert_to_bytes(x):
    if isinstance(x, float):
        return str(x).encode()  # 将float转为字符串再编码为bytes
    return x


# =============================================================================================


def load_and_resample(input_path: str, target_sr: int = 16000):
    """
    用 torchaudio 解码任意音频（含 opus），并自动重采样到 target_sr。
    返回 (waveform[np.ndarray[int16]], target_sr)
    """
    # waveform: [C, T], float32, 值域 [-1, 1]
    waveform, sr = torchaudio.load(input_path)
    if sr != target_sr:
        waveform = torchaudio.functional.resample(waveform, sr, target_sr)
    # 转单声道
    if waveform.shape[0] > 1:
        waveform = waveform.mean(dim=0, keepdim=True)

    # 转 int16 PCM
    wav = (waveform.squeeze(0).clamp(-1, 1) * 32767.0).to(torch.int16).cpu().numpy()
    return wav, target_sr

def load_and_resample_from_ark(ark_str: str, target_sr: int = 16000):
    """
    ark标准形式 ： /jizhicfs/adrenzhou/rawdata/audio_archives/cs3/cs3.0.ark:1340076,125036
    """
    segs = ark_str.split(':')
    offset, length = segs[1].split(',')
    with open(segs[0], 'rb') as f:
        f.seek(int(offset))
        waveform, sr = torchaudio.load(
            io.BytesIO(f.read(int(length))))
    if sr != target_sr:
        waveform = torchaudio.functional.resample(waveform, sr, target_sr)
    # 转单声道
    if waveform.shape[0] > 1:
        waveform = waveform.mean(dim=0, keepdim=True)
    # 转 int16 PCM
    wav = (waveform.squeeze(0).clamp(-1, 1) * 32767.0).to(torch.int16).cpu().numpy()
    return wav, target_sr



def job_xlgeng(dict_list:List[Dict], parquet_file, utt2parquet_file):
    try:
        start_time = time.time()
        utt_list = []
        user_data_list = []
        user_text_list = []
        finish_file = parquet_file + '.finished'
        if os.path.exists(finish_file):
            utils_file.logging_info(f"finished {finish_file}, skip")
            return 0
        for dict_i in tqdm(dict_list):
            if 'wav_ark' not in dict_i or 'text' not in dict_i or 'text' not in dict_i:
                utils_file.logging_warning(f"wav_ark or text or uid not found in {dict_i}")
                continue
            wav_np, sr = load_and_resample_from_ark(dict_i['wav_ark'], target_sr=16000)
            buffer = io.BytesIO()
            sf.write(buffer, wav_np, 16000, 'PCM_16', format='WAV')
            wav_data = buffer.getvalue()
            buffer.close()
            user_data_list.append(wav_data)
            user_text_list.append(dict_i['text'])
            utt_list.append(dict_i['uid'])

        # 保存到parquet,utt2parquet_file,spk2parquet_file
        df = pd.DataFrame()
        df['utt'] = utt_list
        df['user_text'] = user_text_list
        df['user_audio_data'] = user_data_list


        df.to_parquet(parquet_file)
        with open(utt2parquet_file, 'w') as f:
            json.dump({k: parquet_file for k in utt_list}, f, ensure_ascii=False, indent=2)
        logging.info(f'spend time {time.time() - start_time}s, parquet file {parquet_file}')
        with open(finish_file, 'w') as f:
            pass
    except Exception as e:
        print('error')
        print(e)

from gxl_ai_utils.utils import utils_file
# Using process pool to speedup
# num_processes = 250
# num_utts_per_parquet = 3000
# des_dir = "/mnt/apdcephfs_sgfd/share_303841515/Tealab/user/xuelonggeng/data/8W_asr_data_hq/parquet2"
# parser = argparse.ArgumentParser()
# parser.add_argument('--num_nodes', type=int, help='')
# parser.add_argument('--node_id', type=int, help='')
# args = parser.parse_args()
# num_nodes = args.num_nodes
# node_id = args.node_id
# utils_file.logging_info(f"num_nodes: {num_nodes}, node_id: {node_id}")
#
#
#
# pool = multiprocessing.Pool(processes=num_processes)
# parquet_list, utt2parquet_list = [], []
# data_list_path = "/mnt/apdcephfs_sgfd/share_303841515/Tealab/user/xuelonggeng/data/8W_asr_data_hq/data.jsonl"
# data_list_all = utils_file.load_dict_list_from_jsonl(data_list_path)
#
# data_list_split = utils_file.do_split_list(data_list_all, num_nodes)
# data_list = data_list_split[node_id]
#
# print("uttslen:", len(data_list))
# for i, j in enumerate(range(0, len(data_list), num_utts_per_parquet)):
#     parquet_file = os.path.join(des_dir, 'parquet_node{:02d}_{:09d}.tar'.format(node_id,i))
#     utt2parquet_file = os.path.join(des_dir, 'utt2parquet__node{:02d}_{:09d}.json'.format(node_id,i))
#     parquet_list.append(parquet_file)
#     utt2parquet_list.append(utt2parquet_file)
#     pool.apply_async(job_xlgeng, (data_list[j: j + num_utts_per_parquet], parquet_file, utt2parquet_file))
#     # job_xlgeng(data_list[j: j + num_utts_per_parquet], parquet_file, utt2parquet_file)
# pool.close()
# pool.join()
#
# with open('{}/data.list'.format(des_dir), 'w', encoding='utf8') as f1, \
#         open('{}/utt2data.list'.format(des_dir), 'w', encoding='utf8') as f2:
#     for name in parquet_list:
#         f1.write(name + '\n')
#     for name in utt2parquet_list:
#         f2.write(name + '\n')
def load_ark(ark: str, **kwargs):
    segs = ark.split(':')
    offset, length = segs[1].split(',')
    with open(segs[0], 'rb') as f:
        f.seek(int(offset))
        return torchaudio.load(
            io.BytesIO(f.read(int(length))), **kwargs)
def do_merge_list():
    """"""
    jsonl_paths = [
    "/jizhicfs/anhaoxing/share/data/cs2.jsonl",
    "/jizhicfs/anhaoxing/share/data/cs3.jsonl",
    "/jizhicfs/anhaoxing/share/data/cs.jsonl",
    "/jizhicfs/anhaoxing/share/data/en-mls4m.jsonl",
    "/jizhicfs/anhaoxing/share/data/en-subset.jsonl"
    ]
    big_list_path = "/mnt/apdcephfs_sgfd/share_303841515/Tealab/user/xuelonggeng/data/en_codeswitch_hq/data.jsonl"
    big_list = []
    for jsonl_path in jsonl_paths:
        big_list.extend(utils_file.load_list_file_clean(jsonl_path))
    utils_file.write_list_to_file(big_list, big_list_path)


def do_make_parquet():
    """"""

    parser = argparse.ArgumentParser()
    parser.add_argument('--num_nodes', type=int, help='')
    parser.add_argument('--node_id', type=int, help='')
    args = parser.parse_args()
    num_nodes = args.num_nodes
    node_id = args.node_id
    utils_file.logging_info(f"num_nodes: {num_nodes}, node_id: {node_id}")

    num_processes = 32
    num_utts_per_parquet = 1000
    des_dir = "/mnt/apdcephfs_sgfd/share_304127040/Tealab/user/xuelonggeng/data/en_codeswitch_hq/parquet"
    utils_file.makedir_sil(des_dir)
    pool = multiprocessing.Pool(processes=num_processes)
    parquet_list, utt2parquet_list = [], []
    data_list_path = "/mnt/apdcephfs_sgfd/share_303841515/Tealab/user/xuelonggeng/data/en_codeswitch_hq/data.jsonl"
    data_list_all = utils_file.load_dict_list_from_jsonl(data_list_path)

    data_list_split = utils_file.do_split_list(data_list_all, num_nodes)
    data_list = data_list_split[node_id]

    # 先得到所有未来会补全的路径集合
    print("uttslen:", len(data_list))
    parquet_file_list_path = "/mnt/apdcephfs_sgfd/share_303841515/Tealab/user/xuelonggeng/data/en_codeswitch_hq/parquet_node{:02d}.list".format(node_id)
    parquet_file_list = []
    for i, j in enumerate(range(0, len(data_list), num_utts_per_parquet)):
        parquet_file = os.path.join(des_dir, 'parquet_node{:02d}_{:09d}.tar'.format(node_id, i))
        parquet_file_list.append(parquet_file)
    utils_file.write_list_to_file(parquet_file_list, parquet_file_list_path)

    print("uttslen:", len(data_list))
    for i, j in enumerate(range(0, len(data_list), num_utts_per_parquet)):
        parquet_file = os.path.join(des_dir, 'parquet_node{:02d}_{:09d}.tar'.format(node_id,i))
        utt2parquet_file = os.path.join(des_dir, 'utt2parquet__node{:02d}_{:09d}.json'.format(node_id,i))
        parquet_list.append(parquet_file)
        utt2parquet_list.append(utt2parquet_file)
        pool.apply_async(job_xlgeng, (data_list[j: j + num_utts_per_parquet], parquet_file, utt2parquet_file))
        # job_xlgeng(data_list[j: j + num_utts_per_parquet], parquet_file, utt2parquet_file)
    pool.close()
    pool.join()

    with open('{}/data.list'.format(des_dir), 'w', encoding='utf8') as f1, \
            open('{}/utt2data.list'.format(des_dir), 'w', encoding='utf8') as f2:
        for name in parquet_list:
            f1.write(name + '\n')
        for name in utt2parquet_list:
            f2.write(name + '\n')


if __name__ == '__main__':
    do_make_parquet()
