from __future__ import print_function
import numpy as np
import argparse
import copy
import logging
import os
import io
import json

import torch
import yaml
import torchaudio
import torchaudio.compliance.kaldi as kaldi
from torch.nn.utils.rnn import pad_sequence

from wenet.utils.config import override_config
from wenet.utils.init_model import init_model
from wenet.utils.init_tokenizer import init_tokenizer
from wenet.utils.context_graph import ContextGraph
from wenet.utils.ctc_utils import get_blank_id

def get_args():
    parser = argparse.ArgumentParser(description='recognize with your model')
    parser.add_argument('--config', help='config file', default="inference/train.yaml")
    parser.add_argument('--test_data', help='test data file', default="inference/data.list")
    parser.add_argument('--data_type', default='raw', choices=['raw', 'shard'], help='data type')
    parser.add_argument('--gpu', type=int, default=-1, help='gpu id, -1 for cpu')
    parser.add_argument('--checkpoint', help='checkpoint model', default="inference/final.pt")
    parser.add_argument('--beam_size', type=int, default=10, help='beam size for search')
    parser.add_argument('--length_penalty', type=float, default=0.0, help='length penalty')
    parser.add_argument('--blank_penalty', type=float, default=0.0, help='blank penalty')
    parser.add_argument('--result_dir', help='asr result file', default="result_inference")
    parser.add_argument('--batch_size', type=int, default=16, help='batch size')
    parser.add_argument('--modes', nargs='+', 
                        help="decoding modes",
                        default=["attention_rescoring"])
    parser.add_argument('--search_ctc_weight', type=float, default=1.0, help='ctc weight for nbest generation')
    parser.add_argument('--search_transducer_weight', type=float, default=0.0, help='transducer weight for nbest generation')
    parser.add_argument('--ctc_weight', type=float, default=0.0, help='ctc weight for rescoring')
    parser.add_argument('--transducer_weight', type=float, default=0.0, help='transducer weight for rescoring')
    parser.add_argument('--attn_weight', type=float, default=0.0, help='attention weight for rescoring')
    parser.add_argument('--decoding_chunk_size', type=int, default=-1, help='decoding chunk size')
    parser.add_argument('--num_decoding_left_chunks', type=int, default=-1, help='number of left chunks for decoding')
    parser.add_argument('--simulate_streaming', action='store_true', help='simulate streaming inference')
    parser.add_argument('--reverse_weight', type=float, default=0.0, help='right to left weight for attention rescoring')
    parser.add_argument('--override_config', action='append', default=[], help='override yaml config')
    parser.add_argument('--word', default='', type=str, help='word file for hlg decode')
    parser.add_argument('--hlg', default='', type=str, help='hlg file for hlg decode')
    parser.add_argument('--lm_scale', type=float, default=0.0, help='lm scale for hlg attention rescore decode')
    parser.add_argument('--decoder_scale', type=float, default=0.0, help='decoder scale for hlg attention rescore decode')
    parser.add_argument('--r_decoder_scale', type=float, default=0.0, help='r_decoder scale for hlg attention rescore decode')
    parser.add_argument('--context_bias_mode', type=str, default='', help='context bias mode')
    parser.add_argument('--context_list_path', type=str, default='', help='Context list path')
    parser.add_argument('--context_graph_score', type=float, default=0.0, help='context graph score')
    args = parser.parse_args()
    return args

def load_data_from_list(data_list_file):
    """从data.list加载多个音频样本"""
    samples = []
    with open(data_list_file, 'r', encoding='utf-8') as f:
        for line in f:
            try:
                data = json.loads(line.strip())
                sample = {
                    'key': data['key'],
                    'wav': data['wav'],
                    'txt': data['txt'] if 'txt' in data else ''
                }
                samples.append(sample)
            except json.JSONDecodeError:
                logging.warning(f"跳过格式错误的行: {line}")
    return samples

def load_audio(sample):
    """加载单个音频文件"""
    try:
        waveform, sample_rate = torchaudio.load(sample['wav'])
        sample['wav_data'] = waveform
        sample['sample_rate'] = sample_rate
        return sample
    except Exception as e:
        logging.error(f"加载音频 {sample['key']} 失败: {e}")
        return None

def tokenize(sample, tokenizer):
    """文本转token"""
    if 'txt' in sample and sample['txt']:
        tokens, label = tokenizer.tokenize(sample['txt'])
        sample['tokens'] = tokens
        sample['label'] = label
    return sample

def compute_fbank(sample, num_mel_bins=80, frame_length=25, frame_shift=10, dither=0.0):
    """提取fbank特征"""
    if 'sample_rate' in sample and 'wav_data' in sample:
        waveform = sample['wav_data']
        sample_rate = sample['sample_rate']
        # 转换为Kaldi期望的格式
        waveform = waveform * (1 << 15)
        mat = kaldi.fbank(waveform,
                          num_mel_bins=num_mel_bins,
                          frame_length=frame_length,
                          frame_shift=frame_shift,
                          dither=dither,
                          energy_floor=0.0,
                          sample_frequency=sample_rate)
        sample['feat'] = mat
    return sample

def resample(sample, resample_rate=16000):
    """重采样"""
    if 'sample_rate' in sample and 'wav_data' in sample:
        sample_rate = sample['sample_rate']
        waveform = sample['wav_data']
        if sample_rate != resample_rate:
            sample['sample_rate'] = resample_rate
            sample['wav_data'] = torchaudio.transforms.Resample(
                orig_freq=sample_rate, new_freq=resample_rate)(waveform)
    return sample

def padding(batch_samples):
    """批量数据padding"""
    assert isinstance(batch_samples, list) and len(batch_samples) > 0
    feats_length = torch.tensor([x['feat'].size(0) for x in batch_samples], dtype=torch.int32)
    order = torch.argsort(feats_length, descending=True)
    feats_lengths = torch.tensor([batch_samples[i]['feat'].size(0) for i in order], dtype=torch.int32)
    sorted_feats = [batch_samples[i]['feat'] for i in order]
    sorted_keys = [batch_samples[i]['key'] for i in order]
    sorted_labels = [torch.tensor(batch_samples[i]['label'], dtype=torch.int64) for i in order]
    
    label_lengths = torch.tensor([x.size(0) for x in sorted_labels], dtype=torch.int32)
    padded_feats = pad_sequence(sorted_feats, batch_first=True, padding_value=0)
    padding_labels = pad_sequence(sorted_labels, batch_first=True, padding_value=-1)
    
    batch = {
        "keys": sorted_keys,
        "feats": padded_feats,
        "target": padding_labels,
        "feats_lengths": feats_lengths,
        "target_lengths": label_lengths,
    }
    return batch

def process_batch(model, batch, args, tokenizer, device, blank_id, modes, files, max_format_len):
    """处理一个批次的数据"""
    keys = batch["keys"]
    feats = batch["feats"].to(device)
    feats_lengths = batch["feats_lengths"].to(device)
    
    results = model.decode(
        modes,
        feats,
        feats_lengths,
        args.beam_size,
        decoding_chunk_size=args.decoding_chunk_size,
        num_decoding_left_chunks=args.num_decoding_left_chunks,
        ctc_weight=args.ctc_weight,
        simulate_streaming=args.simulate_streaming,
        reverse_weight=args.reverse_weight,
        context_graph=None,  # 暂时禁用ContextGraph
        blank_id=blank_id,
        blank_penalty=args.blank_penalty,
        length_penalty=args.length_penalty
    )
    
    for i, key in enumerate(keys):
        for mode, hyps in results.items():
            tokens = hyps[i].tokens
            line = f"{key} {tokenizer.detokenize(tokens)[0]}"
            logging.info(f"{mode.ljust(max_format_len)} {line}")
            files[mode].write(line + '\n')

def load_data_test():
    logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s %(message)s')
    args = get_args()
    
    # 加载配置文件
    with open(args.config, 'r') as fin:
        configs = yaml.load(fin, Loader=yaml.FullLoader)
    
    # 覆盖配置
    configs = override_config(configs, args.override_config)
    
    # 初始化tokenizer
    configs['tokenizer_conf']['symbol_table_path'] = 'inference/units.txt'
    tokenizer = init_tokenizer(configs)
    
    # 加载模型
    configs['cmvn_conf']['cmvn_file'] = 'inference/global_cmvn'  # 替换为实际路径
    model, configs = init_model(args, configs)
    use_cuda = args.gpu >= 0 and torch.cuda.is_available()
    device = torch.device('cuda' if use_cuda else 'cpu')
    model = model.to(device)
    model.eval()
    
    # 获取blank_id
    _, blank_id = get_blank_id(configs, tokenizer.symbol_table)
    logging.info(f"blank_id is {blank_id}")
    
    # 准备结果文件
    files = {}
    for mode in args.modes:
        dir_name = os.path.join(args.result_dir, mode)
        os.makedirs(dir_name, exist_ok=True)
        file_name = os.path.join(dir_name, 'text')
        files[mode] = open(file_name, 'w')
    max_format_len = max([len(mode) for mode in args.modes])
    
    # 加载data.list中的所有样本
    samples = load_data_from_list(args.test_data)
    logging.info(f"从{args.test_data}加载了{len(samples)}个样本")
    
    # 批量处理样本
    batch_samples = []
    for sample in samples:
        # 加载音频
        sample = load_audio(sample)
        if sample is None:
            continue
        
        # 重采样
        sample = resample(sample)
        
        # 提取特征
        sample = compute_fbank(sample)
        
        # 文本转token
        sample = tokenize(sample, tokenizer)
        
        batch_samples.append(sample)
        
        # 达到批次大小或处理完所有样本时进行推理
        if len(batch_samples) >= args.batch_size or sample == samples[-1]:
            if batch_samples:
                batch = padding(batch_samples)
                process_batch(model, batch, args, tokenizer, device, blank_id, args.modes, files, max_format_len)
                batch_samples = []
    
    # 关闭所有文件
    for mode, f in files.items():
        f.close()      
    logging.info("推理完成，结果已保存")

if __name__ == '__main__':
    load_data_test()