from __future__ import print_function
import copy
import os
import time

import torch
import yaml
from gxl_ai_utils.utils import utils_file
from torch.utils.data import DataLoader
from cn2an import an2cn
from wenet.dataset.dataset import Dataset
from wenet.utils.config import override_config
from wenet.utils.init_tokenizer import init_tokenizer
from wenet.utils.common import TORCH_NPU_AVAILABLE  # noqa just ensure to check torch-npu
import logging
import  sys

import torch

from common_utils.utils4infer import get_feat_from_wav_path, load_model_and_tokenizer, token_list2wav, do_format_shard_manifest4one

from patches import modelling_qwen2_infer_gpu  # 打patch
from tts.cosyvoice.utils.file_utils import load_wav

from cn2an import an2cn
import re

import argparse

def convert_numbers_in_string(s):
    # 正则表达式匹配数字（支持整数、小数、负数）
    pattern = r'-?\d+\.?\d*'

    def replace_func(match):
        num_str = match.group()
        try:
            # 尝试转换数字
            return an2cn(num_str)
        except ValueError:
            # 若转换失败（如非有效数字），返回原内容
            return num_str

    # 替换字符串中所有匹配的数字
    return re.sub(pattern, replace_func, s)

def get_test_conf(config_path):
    with open(config_path, 'r') as fin:
        configs = yaml.load(fin, Loader=yaml.FullLoader)
    configs['dataset_conf']['filter_conf']['filter_no_extra_info'] = False
    test_conf = copy.deepcopy(configs['dataset_conf'])

    # test_conf['filter_conf']['max_length'] = 3000 # whisper最长处理30s 102400
    test_conf['filter_conf']['min_length'] = 10
    test_conf['filter_conf']['token_max_length'] = 102400
    test_conf['filter_conf']['token_min_length'] = 1
    test_conf['filter_conf']['max_output_input_ratio'] = 102400
    test_conf['filter_conf']['min_output_input_ratio'] = 0
    test_conf['filter_conf']['filter_no_extra_info'] = False
    test_conf['filter_conf']['max_seq_len'] = 102400
    test_conf['speed_perturb'] = False
    test_conf['spec_aug'] = False
    test_conf['spec_sub'] = False
    test_conf['spec_trim'] = False
    test_conf['shuffle'] = False
    test_conf['sort'] = False
    test_conf['cycle'] = 1
    test_conf['list_shuffle'] = True
    if 'fbank_conf' in test_conf:
        test_conf['fbank_conf']['dither'] = 0.0
    elif 'mfcc_conf' in test_conf:
        test_conf['mfcc_conf']['dither'] = 0.0
    test_conf['batch_conf']['batch_type'] = "static"
    test_conf['batch_conf']['batch_size'] = 1
    test_conf['split_num'] = 1
    test_conf['multi_num'] = 1
    test_conf['other_tokenze_conf'] = {"is_print": False}
    test_conf['other_filter_conf'] = {}
    test_conf['data_recover'] = False
    return configs, test_conf


parser = argparse.ArgumentParser()
parser.add_argument('--test_data_path', type=str, help='config path')
parser.add_argument('--infer_res_path', type=str,  help='data type')
parser.add_argument('--gpu_id', type=int, help='gpu id')
parser.add_argument('--task', choices=['asr', 'asr_think'], help='task type')
parser.add_argument('--data_type', type=str, help='task type')

args = parser.parse_args()



config_path = './conf/ct_config.yaml'
# data_type = 'shard_full_data'  # shard_full_data or raw


# test_data_path = "/home/A02_tmpdata2/data/context_asr_sentence_few/wav_shards/shards/shards_list_test.list"
# infer_res_path = "/home/A02_tmpdata2/ckpt/osum_chat_new_start_0810/context_asr/epoch0/step_9999_infer_think/infer_res.scp"
# gpu_id = 7
test_data_path="/home/A02_tmpdata2/data/context_asr_sentence_few/wav_shards/shards/test_raw_data/data.list"
test_data_path="/home/A02_tmpdata2/ckpt/osum_chat_new_start_0810/context_asr/epoch0/step_9999_infer_think/split_data/tmpdata_0.list"
infer_res_path="/home/A02_tmpdata2/ckpt/osum_chat_new_start_0810/context_asr/epoch0/step_9999_infer_think/infer_res.scp"
data_type="raw"  # 数据类型，raw或shards_full_data
if data_type == "shards_full_data":
    test_data_path = do_format_shard_manifest4one(test_data_path)
dtype = torch.float32
configs, test_conf = get_test_conf(config_path)

tokenizer = init_tokenizer(configs)

test_dataset = Dataset(data_type,
                       test_data_path,
                       tokenizer,
                       test_conf,
                       partition=False)

test_data_loader = DataLoader(test_dataset,
                              batch_size=None,
                              num_workers=5)

infer_dict = {}
with torch.no_grad():
    # logging.info(f'utt_num: {utt_num}')
    for batch_idx, batch in enumerate(test_data_loader):
        keys = batch["keys"]
        feats = batch["feats"].to(torch.bfloat16)
        feats_lengths = batch["feats_lengths"]
        txts = batch["txts"]
        batch_size = feats.size(0)
        print(f"batch_idx: {batch_idx}, batch_size: {batch_size} feats_size: {feats.size()} feats_lengths_size: {feats_lengths}")




