import gc
import gzip
import io
import json
import math
import os
import pdb
import random
import re
import struct
import subprocess
import sys
from copy import deepcopy
from multiprocessing import Process, Queue

import numpy as np
import torch
import torchaudio
from torch.utils.data import Dataset, default_collate
from tqdm import tqdm
from transformers import AutoProcessor

from megatron.legacy.data.data_samplers import MegatronPretrainingRandomSampler
from megatron.training import get_args


def open_or_fd(file, mode='rb'):
    """ fd = open_or_fd(file)
     copied from kaldi_io
     Open file, gzipped file, pipe, or forward the file-descriptor.
     Eventually seeks in the 'file' argument contains ':offset' suffix.
    """
    offset = None
    try:
        # strip 'ark:' prefix from r{x,w}filename (optional),
        if re.search('^(ark|scp)(,scp|,b|,t|,n?f|,n?p|,b?o|,n?s|,n?cs)*:', file):
            (prefix,file) = file.split(':',1)
        # separate offset from filename (optional),
        if re.search(':[0-9]+$', file):
            (file,offset) = file.rsplit(':',1)
        # input pipe?
        if file[-1] == '|':
            fd = popen(file[:-1], 'rb') # custom,
        # output pipe?
        elif file[0] == '|':
            fd = popen(file[1:], 'wb') # custom,
        # is it gzipped?
        elif file.split('.')[-1] == 'gz':
            fd = gzip.open(file, mode)
        # a normal file...
        else:
            fd = open(file, mode)
    except TypeError:
        # 'file' is opened file descriptor,
        fd = file
    # Eventually seek to offset,
    if offset != None: fd.seek(int(offset))
    return fd
def load_ark(ark: str, **kwargs):
    segs = ark.split(':')
    offset, length = segs[1].split(',')
    with open(segs[0], 'rb') as f:
        f.seek(int(offset))
        return torchaudio.load(
            io.BytesIO(f.read(int(length))), **kwargs)

def read_wav_from_fd(ark_with_idx):
    """
    从当前 fd 的 seek 位置读取一个完整的 WAV 文件
    返回 (waveform, sample_rate)
    """
    fd = open_or_fd(ark_with_idx)
    # 1. 读取前 44 字节（WAV 头）
    header = fd.read(44)
    if len(header) < 44:
        raise ValueError("Invalid WAV header (too short)")

    # 2. 解析关键字段
    riff, file_size, wave = struct.unpack('<4sI4s', header[:12])
    if riff != b'RIFF' or wave != b'WAVE':
        raise ValueError("Not a valid WAV file")

    # 3. 解析 fmt 子块（确保是 PCM 格式）
    fmt_chunk = header[12:36]
    audio_format, num_channels, sample_rate, byte_rate, block_align, bits_per_sample = \
        struct.unpack('<HHI I HH', fmt_chunk[8:24])
    if audio_format != 1:
        raise ValueError("Only PCM WAV format is supported")

    # 4. 找到 'data' 块的位置和大小
    pos = 36  # 跳过 RIFF + fmt 子块
    while pos < len(header):
        subchunk_id, subchunk_size = struct.unpack('<4sI', header[pos:pos+8])
        if subchunk_id == b'data':
            data_size = subchunk_size
            break
        pos += 8 + subchunk_size
    else:
        raise ValueError("Could not find 'data' chunk in WAV header")

    # 5. 计算完整 WAV 大小（44 + data_size）
    wav_size = pos + 8 + data_size  # 包含所有子块的总大小

    # 6. 重新读取完整 WAV 数据（从当前 seek 位置开始）
    fd.seek(-44, io.SEEK_CUR)  # 回退到 WAV 开头
    wav_bytes = fd.read(wav_size)

    # 7. 用 torchaudio 加载
    waveform, sr = torchaudio.load(io.BytesIO(wav_bytes))
    return waveform, sr



def spec_aug(sample, num_t_mask=2, num_f_mask=2, max_t=50, max_f=15, max_w=80, seed=42):
    """ Do spec augmentation
        Inplace operation

        Args:
            sample: {key, feat, ...}
            num_t_mask: number of time mask to apply
            num_f_mask: number of freq mask to apply
            max_t: max width of time mask
            max_f: max width of freq mask
            max_w: max width of time warp

        Returns
            {key, feat, ....}
    """
    random.seed(seed)
    assert 'feat' in sample
    x = sample['feat']
    feature_mask = sample['feature_attention_mask']
    max_frames = feature_mask.sum(-1)[0].item()
    assert isinstance(x, torch.Tensor)
    y = x.clone().detach()
    # max_frames = y.size(0)
    max_freq = y.size(1)
    # time mask
    for i in range(num_t_mask):
        start = random.randint(0, max_frames - 1)
        length = random.randint(1, max_t)
        end = min(max_frames, start + length)
        y[start:end, :] = 0
    # freq mask
    for _ in range(num_f_mask):
        start = random.randint(0, max_freq - 1)
        length = random.randint(1, max_f)
        end = min(max_freq, start + length)
        y[:, start:end] = 0
    sample['feat'] = y
    del x
    return sample

def get_audio_length_via_header(file_path):
    info = torchaudio.info(file_path)
    return info.num_frames / info.sample_rate

class Qwen2AudioDatasetV2(Dataset):
    def __init__(self, ann_paths, model_path, seq_len=1024, ret_id=False, data_aug=False, sample_merge=False):
        super().__init__()
        self.annotation = []

        if isinstance(ann_paths, list):  # multiple json files
            for f in ann_paths:
                    # self.annotation += json.load(open(f, "r", encoding='utf-8'))['annotation']
                self.annotation += [json.loads(line) for line in open(f, 'r', encoding='utf-8')]
        else:
            self.annotation = [json.loads(line) for line in open(ann_paths, 'r', encoding='utf-8')]
            # self.annotation = json.load(open(ann_paths, "r",  encoding='utf-8'))['annotation']
        

        def check_audio_text_length(ann, 
                                max_duration=30, 
                                max_token_rate=15, 
                                min_duration=0.4, 
                                min_token_num=2, 
                                min_token_rate=0.5):
            """
            检查音频和文本长度是否符合配置要求
            
            参数:
                ann: 包含音频和文本信息的字典，应有wav、text、utt2dur字段
                max_duration: 最大音频时长(秒)
                max_token_rate: 每秒最大token数
                min_duration: 最小音频时长(秒)
                min_token_num: 最小token数
                min_token_rate: 每秒最小token数
            
            返回:
                bool: 是否符合所有要求
            """
            # 0. 首先检查wav文件是否存在（如果是本地路径）
            wav_path = ann.get('wav', '')
#            if wav_path and os.path.isabs(wav_path):  # 判断是否是本地绝对路径
#                if not os.path.isfile(wav_path):  # 检查文件是否存在
#                    return False
            
            # 1. 获取音频时长
            utt2dur = ann.get('utt2dur', 10)
            
            # 2. 检查音频时长是否在允许范围内
            if not (min_duration <= utt2dur <= max_duration):
                return False
            
            # 3. 获取文本长度
            text = ann.get('text', '')
            if not text:
                # 如果没有text字段，检查segments中的文本
                segments = ann.get('segments', [])
                text_length = sum(len(seg.get('text', '')) for seg in segments)
            else:
                text_length = len(text)
            
            # 4. 计算token数量（文本长度/1.5）
            token_num = text_length / 1.5
            
            # 5. 检查最小token数
            if token_num < min_token_num:
                return False
            
            # 6. 检查token速率
            if utt2dur > 0:
                token_rate = token_num / utt2dur
                
                # 检查最大token速率
                if token_rate > max_token_rate:
                    return False
                    
                # 检查最小token速率
                if token_rate < min_token_rate:
                    return False
            
            # 所有检查通过
            return True

        orig_len = len(self.annotation)
        self.annotation = [ann for ann in self.annotation if self.check_length(ann, seq_len - 100)]
        self.annotation = [ann for ann in self.annotation if check_audio_text_length(ann)]
        #random.seed(42)
        #random.shuffle(self.annotation)
        print(f"Loaded {len(self.annotation)}/{orig_len} annotation samples from {ann_paths}")
        self.ret_id = ret_id
        self.data_aug = data_aug
        self.processor = AutoProcessor.from_pretrained(model_path)
        self.seq_len = seq_len
        self.sample_merge = sample_merge

        self.pad_id = 151643 # or zero? <|endoftext|>, zero: !
        self.sep_id = [151644, 77091, 198] # <im_start>assistant\n
        self.audio_id = 151644
        self.end_of_turn = 151645 # eos
        self.audio_token_index = 151646

        if len(self.annotation) > 0 and sample_merge:
            # merge short to SEQ LEN
            self.new_annotation = []
            tmp_ann = []
            cur_len = 0
            original_sample_num = len(self.annotation)
            for ann in tqdm(self.annotation, desc="Merging Data Samples"):
                all_token_len = self.count_sample_token_len(ann)
                if all_token_len + cur_len < seq_len and len(tmp_ann) < self.sample_merge_max_batchsize():
                    tmp_ann.append(ann)
                    cur_len += all_token_len
                elif len(tmp_ann) == 0:
                    pass
                else:
                    self.new_annotation.append(tmp_ann)
                    cur_len = all_token_len
                    tmp_ann = [] # --> tmp_ann = [ann]
            if tmp_ann:
                self.new_annotation.append(tmp_ann)
            print(f"Merging Data Completed! Orig Len: {original_sample_num}; Curr Len: {len(self.new_annotation)}")
            self.annotation = self.new_annotation



        print(f"Dataset Processing Done, dataset has {len(self.annotation)} samples")
    def sample_merge_max_batchsize(self):
        return 4
    def check_length(self, ann, seq_len):
        all_token_len = self.count_sample_token_len(ann)

        # utt2dur = ann.get('utt2dur', 30)
        # # audio_length = math.ceil(utt2dur / 30) * 750
        # audio_length = math.ceil(utt2dur / 30 * 750)
        # if 'text' not in ann or not ann['text']:
        #     lis = ann.get('segments', [])
        #     response_length = 0
        #     for seg in lis:
        #         response_length += len(seg['text'])
        #     if not response_length:
        #         print(ann.get('id', ''))
        #         return False # skip


        # else:
        #     response_length = len(ann.get("text", ""))
        # context_length = len(ann.get("context_text", "")) 
        # ctc_hyp_length = len(ann.get("ctc_hyp", ""))
        # instruct_length = 20
        # all_token_len = audio_length + (response_length + context_length + instruct_length) / 1.5
        # print(all_token_len)
        # try truncate context
        if all_token_len > seq_len:
            # rest_context_len = int((seq_len - audio_length - (response_length + instruct_length)/1.5))
            cut_off_len = all_token_len - seq_len
            context_length = int(len(ann.get("context_text", "")))
            rest_context_len = int(context_length - cut_off_len)
            if context_length > 0 and rest_context_len > 0:
                # pdb.set_trace()
                new_context = ann['context_text'][:rest_context_len]
                ann['context_text'] = new_context
                return True
            else:
                return False
        else:
            return True    
    def count_sample_token_len(self, ann):
        audio_dur = ann.get("utt2dur", 30)
        text = ann.get("text", "")
        context_text = ann.get("context_text", "") + ann.get("ctc_hyp", "")
        # context_length = len(ann.get("context_text", "")) + len(ann.get("ctc_hyp", ""))
        instruct_length = 20 
        # sample_len = audio_dur * 100 / 6 + 
        # audio_length = math.ceil(audio_dur / 30) * 750
        audio_length = math.ceil(audio_dur / 30 * 750)
        all_token_len = audio_length * 4  + (len(text) + len(context_text) + instruct_length) 
        return all_token_len   

    def __len__(self):
        return len(self.annotation)

    def get_suffix_prompt(self, task, index):
        mapper = {
            "asr": ["转录上述语音内容。"],
            "asr_zh": ["转录上述语音内容。"],
            "asr_en": ["转录上述语音内容。"],
            "asr_zhen": ["转录上述语音内容。"],
            "asr_draft": ["基于给定辅助信息，转录上述语音内容。"],
            "asr_multi": ['按不同发言人，转录上述语音内容。']
        }

        prompt_list = mapper[task]
        return prompt_list[index % len(prompt_list)]
        # return random.choice(mapper[task])
    



    def _get_feat_extract_output_lengths(self, input_lengths: torch.LongTensor):
        """
        Computes the output length of the convolutional layers and the output length of the audio encoder
        """
        input_lengths = (input_lengths - 1) // 2 + 1
        output_lengths = (input_lengths - 2) // 2 + 1
        return input_lengths, output_lengths
    
    def prepare_loss_mask_and_labels(self, raw_ids, input_ids, num_audio_tokens):
        """
        Create a loss mask that marks positions between sep_id and end_of_turn with 1s.
        
        Args:
            raw_ids (torch.Tensor): Input tensor of token IDs
            
        Returns:
            torch.Tensor: Binary mask tensor
        """
        # Convert raw_ids to a list if it's a tensor
        if isinstance(raw_ids, torch.Tensor):
            raw_ids = raw_ids.tolist()
        
        # seq_len = len(raw_ids)
        seq_len = self.seq_len + 1
        loss_mask = torch.zeros(1, seq_len, dtype=torch.float32)
        
        # Length of separator pattern
        sep_len = len(self.sep_id)
        
        i = 0
        while i < seq_len:
            # Check if we find the separator pattern
            if i + sep_len <= seq_len and raw_ids[i:i+sep_len] == self.sep_id:
                # Skip the separator itself
                i += sep_len
                # Mark everything as 1 until we find end_of_turn
                while i < seq_len and raw_ids[i] != self.end_of_turn:
                    loss_mask[0, i] = 1
                    i += 1
                # Mark the end_of_turn position as 1
                if i < seq_len and raw_ids[i] == self.end_of_turn:
                    loss_mask[0, i] = 1
                    i += 1
            else:
                i += 1
                
        
        batch_indices, non_audio_indices = torch.where(
            (input_ids != self.audio_token_index)
        )        
        token_placeholder_num = torch.zeros_like(input_ids)
        special_audio_token_mask = input_ids == self.audio_token_index
        token_placeholder_num[special_audio_token_mask] = num_audio_tokens.long() - 1
        token_placeholder_num = token_placeholder_num + 1
        new_token_positions = torch.cumsum(token_placeholder_num, -1) - 1
        max_token_num = token_placeholder_num.sum(-1).max()
        text_to_overwrite = new_token_positions[batch_indices, non_audio_indices]
        new_loss_mask = torch.zeros(input_ids.shape[0], max(max_token_num, seq_len), dtype=torch.float)
        new_loss_mask[batch_indices, text_to_overwrite] = loss_mask[batch_indices, non_audio_indices]

        labels = torch.full((input_ids.shape[0], max(max_token_num, seq_len)), -100).to(torch.long)
        labels[batch_indices, text_to_overwrite] = input_ids[batch_indices, non_audio_indices]
        labels = labels[:, 1:seq_len]
        loss_mask = new_loss_mask[:, 1:seq_len]
        return loss_mask, labels
        
    def load_audio_from_stream(self, command: str, output_sample_rate: int = 16000):
        """Load audio from a command stream while preventing memory leaks.
        
        Args:
            command: The command to execute that produces audio output
            output_sample_rate: Desired sample rate for the output audio
            
        Returns:
            tuple: (waveform, sample_rate)
            
        Raises:
            RuntimeError: If the command fails or audio cannot be loaded
        """
        # Initialize variables to ensure cleanup in finally block
        pipe = None
        audio_data = None
        
        try:
            # Use shell to execute the command
            pipe = subprocess.Popen(
                command,
                shell=True,
                stdout=subprocess.PIPE,
                stderr=subprocess.PIPE,
                # Prevent subprocess from inheriting file descriptorsf
                close_fds=True
            )
            
            # Read the output with a timeout to prevent hanging
            stdout, stderr = pipe.communicate(timeout=30)
            
            if pipe.returncode != 0:
                error_msg = stderr.decode('utf-8', errors='replace')
                raise RuntimeError(f"Command failed with return code {pipe.returncode}: {error_msg}")
            
            # Create a memory-efficient BytesIO object
            audio_data = io.BytesIO(stdout)
            
            # Load audio with torchaudio
            waveform, sample_rate = torchaudio.load(
                audio_data,
                format='wav',  # Explicit format helps with stream processing
                normalize=True
            )
            
            # Resample if needed
            if sample_rate != output_sample_rate:
                transform = torchaudio.transforms.Resample(
                    orig_freq=sample_rate,
                    new_freq=output_sample_rate
                )
                waveform = transform(waveform)
                sample_rate = output_sample_rate
                
            return waveform, sample_rate
            
        except subprocess.TimeoutExpired:
            if pipe:
                pipe.kill()
                stdout, stderr = pipe.communicate()
            raise RuntimeError("Command timed out while loading audio")
            
        except Exception as e:
            raise RuntimeError(f"Error loading audio from stream: {str(e)}")
            
        finally:
            # Ensure resources are cleaned up
            if audio_data is not None:
                audio_data.close()
            if pipe is not None and pipe.poll() is None:
                pipe.terminate()
                try:
                    pipe.wait(timeout=1)
                except subprocess.TimeoutExpired:
                    pipe.kill()
    # def load_audio_from_stream(self, command: str, output_sample_rate: int = 16000):
    #     # 修改命令行，确保 wget 和 ffmpeg 正确管道处理
    #     #command = f"{command} | ffmpeg -i pipe:0 -ar {output_sample_rate} -f wav pipe:1"
    #     command = f"{command} "
        
    #     # 使用 subprocess 执行命令并获得音频流
    #     pipe = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
    #     stdout, stderr = pipe.communicate()
        
    #     if pipe.returncode != 0:
    #         raise RuntimeError(f"Error: {stderr.decode('utf-8')}")
        
    #     # 将标准输出的音频流包装为二进制文件对象
    #     audio_data = io.BytesIO(stdout)
        
    #     # 使用 torchaudio 读取音频数据，ffmpeg 后端应该自动识别和处理流
    #     waveform, sample_rate = torchaudio.load(audio_data, normalize=True)
        
    #     return waveform, sample_rate        
    

    def transform_segments(self, segments):
        speakers = []
        for seg in segments:
            speaker = seg['speaker']
            if speaker not in speakers:
                speakers.append(speaker)
        speakers = speakers[:99] # truncate to 100
        speakers = {speaker:f"SPK" + "%02d" % (i+1) for i,speaker in enumerate(speakers)}
        orig_start_time = segments[0]['start_time']
        to_ret = ""
        for seg in segments:
            speaker = speakers.get(seg['speaker'], "SPK")
            # do not use time first
            text = seg['text']
            to_ret += f"{speaker}: {text}\n"
        # speakers = set(speakers)
        to_ret = to_ret.rstrip("\n")
        return to_ret
    def conversation_processor(self, ann, index, audio_part_lens):
        conversation = []

        if 'context_text' in ann: # and ann['task'] in ["asr_draft", "asr_hotword"]:
            if index % 10 == 0 and False:
                # 0.1 prob to drop
                conversation.append({"type": "text", "text": "###Context\n" + "\n###Instruct\n"})
            else:
                conversation.append({"type": "text", "text": "###Context\n" +ann['context_text'].replace("\\n", "\n") + "\n###Instruct\n"})
        for _ in range(audio_part_lens):
            conversation.append({"type": "audio", "audio_url": ann["wav"]})

        conversation.append({"type": "text", "text": self.get_suffix_prompt(ann["task"], index)})
        if 'segments' in ann:
            response = self.transform_segments(ann['segments'])
        else:
            response = ann['text']
        
        conversation = [{"role": "user", "content": conversation}, {"role": "assistant", "content": response}]         
        return conversation

            
    def __getitem__(self, index):
        with torch.no_grad():
            anns = self.annotation[index]

            if type(anns) == dict:
                anns = [anns]

            all_inputs = []
            max_audio_len_in_s = 30
            for ann in anns:
                # Load and process audio
                audio = None
                dummy_flag = False
                try:
                    if "wav-copy" in ann['wav']:
                        audio, sr = read_wav_from_fd(ann['wav'].split(" ")[1])
                    elif ".ark" in ann['wav']:
                        # audio, sr = read_wav_from_fd(ann['wav'].split(",")[0])
                        audio, sr = load_ark(ann['wav'])
                    else:
                        audio, sr = torchaudio.load(ann["wav"])
                except:
                    # command
                    try:
                        audio, sr = self.load_audio_from_stream(ann['wav'])
                    except:
                        if audio is None:
                            # generate dummy data
                            dummy_flag=True
                            #print(f"Warning: {ann['wav']} Load Failed")
                            audio = torch.zeros((1,16000), dtype=torch.float)
                            # random noise
                            #audio = torch.rand((1,16000), dtype=torch.float)*2 - 1
                            sr = 16000

                if sr != 16000:
                    resampler = torchaudio.transforms.Resample(orig_freq=sr, new_freq=16000)
                    audio = resampler(audio)
                    sr = 16000

                if audio.shape[0] >= 2:  # stereo, multichannel to mono
                    # audio = torch.mean(audio, dim=0)
                    # use first channel
                    audio = audio[:1]
                audio = audio.squeeze(0)
                if audio.size(0) < sr:  # pad audio to at least 1s
                    sil = torch.zeros(sr - audio.size(0))
                    audio = torch.cat((audio, sil), dim=0)

                # Split audio into parts of 30s or less
                
                audio_parts = [audio[i:i + sr * max_audio_len_in_s] for i in range(0, len(audio), sr * max_audio_len_in_s)]
                audios = [part.numpy() for part in audio_parts]
                if dummy_flag:
                    # silence -> None content
                    ann['text'] = ""
                if 'text_input_no_token' in ann:
                    text_input = ann['text_input_no_token']
                else:
                    conversation = self.conversation_processor(ann, index, len(audio_parts))           
                    text_input = self.processor.apply_chat_template(conversation, add_generation_prompt=False, tokenize=False)
                    # print(text_input)
                # print(f"Rank: {torch.distributed.get_rank()} path: {ann['wav']} text: {ann['text']}")
                # Generate audio features
                inputs = self.processor(audios=audios,text=text_input, return_tensors="pt", padding=True, sampling_rate=sr)
                all_inputs.append(inputs)
                del audios
                del audio
                del audio_parts

            # input_ids, attention_mask, input_features, feature_attention_mask
            input_features = torch.cat([x['input_features'] for x in all_inputs], dim=0)
            feature_attention_mask = torch.cat([x['feature_attention_mask'] for x in all_inputs], dim=0)
            attention_mask = torch.ones(1, self.seq_len + 1, dtype=torch.int64)
            # attention_mask = torch.tril(torch.ones(1, 1, self.seq_len + 1, self.seq_len+1, dtype=torch.int64)) # all ones
                
            raw_ids = []
            for inputs in all_inputs:
                raw_ids += inputs['input_ids'].squeeze().tolist()
                raw_ids += [self.end_of_turn]

            raw_ids = torch.tensor(raw_ids, dtype=torch.int64)
            input_ids = torch.full((1, self.seq_len + 1), self.pad_id, dtype=torch.int64)
            input_ids[0,:len(raw_ids)] = raw_ids
            # construct loss_mask
            
            # only answers are ones, follow labels

            _, audio_emb_lengths = self._get_feat_extract_output_lengths(
                feature_attention_mask.sum(-1) 
            )

            # add label processing
            loss_mask, labels = self.prepare_loss_mask_and_labels(raw_ids,input_ids, audio_emb_lengths)

            # labels = input_ids.clone()
            input_ids = input_ids[:, :-1]
            # labels = labels[:, 1:]
            attention_mask = attention_mask[:, :-1]
            if self.data_aug:
                sample = {'feat': input_features, 'feature_attention_mask': feature_attention_mask}
                sample = spec_aug(sample, seed=index)
                input_features = sample['feat']
            del all_inputs
            feature_lengths = torch.ones((input_features.shape[0]), dtype=torch.int64)
            ctc_token = torch.ones((input_features.shape[0], self.seq_len), dtype=torch.int64)
            ctc_token_lengths = torch.ones((input_features.shape[0]), dtype=torch.int64)
            if self.ret_id:
                uttid = 'comb' + str(len(anns)) + '-'.join(ann['wav'].split('/')[-2:])
                return deepcopy({
                    "uttid": uttid,
                    "input_features": input_features,
                    "input_ids": input_ids,
                    "attention_mask": attention_mask,
                    "feature_attention_mask": feature_attention_mask,
                    "feature_lengths": feature_lengths,
                    "ctc_token": ctc_token,
                    "ctc_token_lengths": ctc_token_lengths,
                    "labels": labels,
                    "loss_mask": loss_mask
                })
            else:
                return deepcopy({
                    "input_features": input_features,
                    "input_ids": input_ids,
                    "attention_mask": attention_mask,
                    "feature_attention_mask": feature_attention_mask,
                    "feature_lengths": feature_lengths,
                    "ctc_token": ctc_token,
                    "ctc_token_lengths": ctc_token_lengths,
                    "labels": labels,
                    "loss_mask": loss_mask
                })            
        

def collate_fn(batch):
    new_batches = {}
    for key in batch[0]:
        value = batch[0][key]
        if type(value) == torch.Tensor:
            final_val = torch.cat([item[key] for item in batch], dim=0)

        else:
            final_val = [item[key] for item in batch]
        new_batches[key] = final_val   

    # feature_attention_mask = new_batches['feature_attention_mask']
    # input_features = new_batches['input_features']
    # max_audio_len = max(feature_attention_mask.sum(-1))
    # max_audio_len = max_audio_len + (4 - max_audio_len % 4)
    # feature_attention_mask = feature_attention_mask[:,:max_audio_len ]
    # input_features = input_features[:,:,:max_audio_len] # truncate to max len         
    # new_batches["input_features"] = input_features
    # new_batches["feature_attention_mask"] = feature_attention_mask
    return deepcopy(new_batches)

    pass
# Debugging entries for dataset initialization
if __name__ == "__main__":
#    from pympler import tracker, muppy, summary



    ann_paths = [
        "audio_data/train_20250324_llmasr.jsonl",
        "audio_data/train_20231206_slidespeech_487h.jsonl",
        "audio_data/train_20250417_cn_slides_generated_context_all_upper.jsonl",
        "audio_data/train_20250410_cn_slides_draft_8kh_open_ocr_all_upper.jsonl"
    ]
    ann_paths = [
        "audio_data/train_ver250513/zh2.json.tr"
    ]
    # model_path = "/apdcephfs_qy3/share_976139/users/adrenzhou/nlp_workdir/Megatron-LM/Qwen-audio-whisper-tiny-qwen-0.5B"
    ann_paths = ['audio_data/dev_aishell1.jsonl', ]
    # ann_paths = ['audio_data/context_test/yuanbao_pst_test_long_exact_kw_wemeet.jsonl']
    # ann_paths = ["/apdcephfs_qy3/share_976139/users/adrenzhou/audio_llm/workdir_2025612_find_conseq_letter/continuous_letters.jsonl"]
    model_path = "/apdcephfs_qy3/share_976139/users/adrenzhou/nlp_workdir/pretrained_models/Qwen/Qwen2-Audio-7B"

    dataset = Qwen2AudioDatasetV2(ann_paths, model_path,seq_len=2048, ret_id=True, data_aug=True, sample_merge=True)

    # 配置 DataLoader
    batch_size = 2
    num_workers = 0
    from torch.utils.data import DataLoader

    # dataloader = DataLoader(
    #     dataset=dataset,
    #     batch_size=batch_size,
    #     shuffle=True,
    #     num_workers=num_workers,
    #     pin_memory=True,
    #     collate_fn=collate_fn
    # )
    # 测试读取几个 batch 的数据
    from tqdm import tqdm

    batch_sampler = MegatronPretrainingRandomSampler(
        dataset=dataset,
        total_samples=len(dataset),
        consumed_samples=0,
        micro_batch_size=batch_size,
        data_parallel_rank=0,
        data_parallel_size=1,
        data_sharding=False,
    )    

    train_ds = DataLoader(
        dataset=dataset,
        batch_sampler=batch_sampler,
        num_workers=num_workers,
        pin_memory=True,
        collate_fn=collate_fn,
        # prefetch_factor=1,
        persistent_workers=True if False else False,
    )
    import gc

    # pdb.set_trace()
    for epoch in range(1,3):
        for batch_idx, batch in tqdm(enumerate(train_ds)):
            if batch_idx % 1000 == 0:
                gc.collect()
            # pdb.set_trace()
            # if batch['uttid'][0].startswith("comb2"):
                # pdb.set_trace()
            #print(f"epoch: {train_ds.batch_sampler.epoch}, batch_idx: {batch_idx}, uttids: {batch['uttid']}")
            #torch.save(batch, "penguins_input.pt")
            # print()
            del batch
            pass
        # pdb.set_trace()
        # if batch_idx >= 3:  # 只查看前3个batch
        #     break
        # print(f"Batch {batch_idx}:")
        # print(f"Batch size: {len(batch)}")
        # print(f"Data in batch: {batch}")
        # print("-" * 50)

    # # Try reading a few data samples
    # for i in range(3):
    #     sample = dataset[i]
    #     pdb.set_trace()
    #     print(f"Sample {i}: {sample}")
