import gc
import gzip
import io
import json
import math
import os
import pdb
import random
import re
import struct
import subprocess
import sys
from copy import deepcopy
from multiprocessing import Process, Queue

import librosa
import numpy as np
import torch
import torch.nn.functional as F
import torchaudio
from torch.utils.data import Dataset, default_collate
from tqdm import tqdm
from transformers import AutoProcessor, Qwen2_5OmniProcessor

from megatron.training import get_args

'''
'''
from megatron.legacy.data.data_samplers import MegatronPretrainingRandomSampler


def open_or_fd(file, mode='rb'):
    """ fd = open_or_fd(file)
     copied from kaldi_io
     Open file, gzipped file, pipe, or forward the file-descriptor.
     Eventually seeks in the 'file' argument contains ':offset' suffix.
    """
    offset = None
    try:
        # strip 'ark:' prefix from r{x,w}filename (optional),
        if re.search('^(ark|scp)(,scp|,b|,t|,n?f|,n?p|,b?o|,n?s|,n?cs)*:', file):
            (prefix,file) = file.split(':',1)
        # separate offset from filename (optional),
        if re.search(':[0-9]+$', file):
            (file,offset) = file.rsplit(':',1)
        # input pipe?
        if file[-1] == '|':
            fd = popen(file[:-1], 'rb') # custom,
        # output pipe?
        elif file[0] == '|':
            fd = popen(file[1:], 'wb') # custom,
        # is it gzipped?
        elif file.split('.')[-1] == 'gz':
            fd = gzip.open(file, mode)
        # a normal file...
        else:
            fd = open(file, mode)
    except TypeError:
        # 'file' is opened file descriptor,
        fd = file
    # Eventually seek to offset,
    if offset != None: fd.seek(int(offset))
    return fd

def load_ark(ark: str, **kwargs):
    segs = ark.split(':')
    offset, length = segs[1].split(',')
    with open(segs[0], 'rb') as f:
        f.seek(int(offset))
        return torchaudio.load(
            io.BytesIO(f.read(int(length))), **kwargs)

def read_wav_from_fd(ark_with_idx):
    """
    从当前 fd 的 seek 位置读取一个完整的 WAV 文件
    返回 (waveform, sample_rate)
    """
    fd = open_or_fd(ark_with_idx)
    # 1. 读取前 44 字节（WAV 头）
    header = fd.read(44)
    if len(header) < 44:
        raise ValueError("Invalid WAV header (too short)")

    # 2. 解析关键字段
    riff, file_size, wave = struct.unpack('<4sI4s', header[:12])
    if riff != b'RIFF' or wave != b'WAVE':
        raise ValueError("Not a valid WAV file")

    # 3. 解析 fmt 子块（确保是 PCM 格式）
    fmt_chunk = header[12:36]
    audio_format, num_channels, sample_rate, byte_rate, block_align, bits_per_sample = \
        struct.unpack('<HHI I HH', fmt_chunk[8:24])
    if audio_format != 1:
        raise ValueError("Only PCM WAV format is supported")

    # 4. 找到 'data' 块的位置和大小
    pos = 36  # 跳过 RIFF + fmt 子块
    while pos < len(header):
        subchunk_id, subchunk_size = struct.unpack('<4sI', header[pos:pos+8])
        if subchunk_id == b'data':
            data_size = subchunk_size
            break
        pos += 8 + subchunk_size
    else:
        raise ValueError("Could not find 'data' chunk in WAV header")

    # 5. 计算完整 WAV 大小（44 + data_size）
    wav_size = pos + 8 + data_size  # 包含所有子块的总大小

    # 6. 重新读取完整 WAV 数据（从当前 seek 位置开始）
    fd.seek(-44, io.SEEK_CUR)  # 回退到 WAV 开头
    wav_bytes = fd.read(wav_size)

    # 7. 用 torchaudio 加载
    waveform, sr = torchaudio.load(io.BytesIO(wav_bytes))
    return waveform, sr



def spec_aug(sample, num_t_mask=2, num_f_mask=2, max_t=50, max_f=15, max_w=80, seed=42):
    """ Do spec augmentation
        Inplace operation

        Args:
            sample: {key, feat, ...}
            num_t_mask: number of time mask to apply
            num_f_mask: number of freq mask to apply
            max_t: max width of time mask
            max_f: max width of freq mask
            max_w: max width of time warp

        Returns
            {key, feat, ....}
    """
    random.seed(seed)
    assert 'feat' in sample
    x = sample['feat']
    feature_mask = sample['feature_attention_mask']
    max_frames = feature_mask.sum(-1)[0].item()
    assert isinstance(x, torch.Tensor)
    y = x.clone().detach()
    # max_frames = y.size(0)
    max_freq = y.size(1)
    # time mask
    for i in range(num_t_mask):
        if max_frames <= 1:
            break
        start = random.randint(0, max_frames - 1)
        length = random.randint(1, max_t)
        end = min(max_frames, start + length)
        y[start:end, :] = 0
    # freq mask
    for _ in range(num_f_mask):
        start = random.randint(0, max_freq - 1)
        length = random.randint(1, max_f)
        end = min(max_freq, start + length)
        y[:, start:end] = 0
    sample['feat'] = y
    del x
    return sample

def get_audio_length_via_header(file_path):
    info = torchaudio.info(file_path)
    return info.num_frames / info.sample_rate

def count_string_units(s: str) -> int:
            if not s:
                return 0
            total = 0      # 总计数
            in_word = False  # 标记当前是否在英文单词中
            word_start = -1  # 记录当前单词的起始位置
            for i, char in enumerate(s):
                if 'a' <= char <= 'z' or 'A' <= char <= 'Z':
                    if not in_word:
                        in_word = True
                        word_start = i
                else:
                    if in_word:
                        total += 1
                        in_word = False
                    total += 1
            if in_word:
                total += 1
            return total


class Qwen2_5OmniThinkerDataset(Dataset):
    def __init__(self, ann_paths, model_path, seq_len=1024, ret_id=False, data_aug=False, sample_merge=False):
        super().__init__()
        self.annotation = []

        self.sil_matrix = None
        if isinstance(ann_paths, list):  # multiple json files
            for f in ann_paths:
                    # self.annotation += json.load(open(f, "r", encoding='utf-8'))['annotation']
                # self.annotation += [json.loads(line) for line in open(f, 'r', encoding='utf-8')]
                for line in open(f, 'r', encoding='utf-8'):
                    line = line.strip()
                    if not line: continue  # 跳过空行
                    try:
                        self.annotation.append(json.loads(line))
                    except json.JSONDecodeError:
                        continue  # 或记录日志后跳过

        else:
            self.annotation = [json.loads(line) for line in open(ann_paths, 'r', encoding='utf-8')]
            # self.annotation = json.load(open(ann_paths, "r",  encoding='utf-8'))['annotation']
        
        def check_length(ann, seq_len,
                        min_token_num=2):
            durations = ann.get('durations', [])
            durations = sum(float(d) for d in durations)
            audio_length = durations * 25
            if 'output' not in ann or not ann['output']:
                lis = ann.get('segments', [])
                response_length = 0
                for seg in lis:
                    response_length += len(seg['output'])
                if not response_length:
                    # print(ann.get('uttid', ''))
                    return False # skip
            else:
                response_length = count_string_units(ann.get("output", ""))
            if 'context' not in ann or ann['context'] is None:
                context_length = 0
            else:
                context_length = count_string_units(ann.get("context", ""))
            if 'input' not in ann or ann['input'] is None:
                instruct_length = 50
            else:
                instruct_length = count_string_units(ann.get("input", "")) + 50

            all_token_len = audio_length + (response_length + context_length + instruct_length)
            # try truncate context
            if all_token_len < min_token_num:
                return False
            elif all_token_len > seq_len:
                # if audio_length > 750:
                #     print(f"{ann.get('uttid', '')}: {all_token_len} > {seq_len}   {audio_length} {response_length} {context_length} {instruct_length}")
                return False
            else:
                return True
        orig_len = len(self.annotation)
        self.annotation = [ann for ann in self.annotation if check_length(ann, seq_len - 150)]
        print(f"After filtering, {len(self.annotation)} annotation samples from {ann_paths}")
        #random.seed(42)
        #random.shuffle(self.annotation)
        print(f"Loaded {len(self.annotation)}/{orig_len} annotation samples from {ann_paths}")
        self.ret_id = ret_id
        self.data_aug = data_aug
        self.processor = Qwen2_5OmniProcessor.from_pretrained(model_path)
        self.seq_len = seq_len
        self.sample_merge = sample_merge

        self.pad_id = 151643 # or zero? <|endoftext|>, zero: !
        self.sep_id = [151644, 77091, 198] # <im_start>assistant\n
        self.audio_id = 151644
        self.end_of_turn = 151645 # eos
        self.audio_token_index = 151646

        if len(self.annotation) > 0 and sample_merge:
            # merge short to SEQ LEN
            self.new_annotation = []
            tmp_ann = []
            cur_len = 0
            batch_max_audio_len = 0
            original_sample_num = len(self.annotation)
            for ann in tqdm(self.annotation, desc="Merging Data Samples", mininterval=1):
                all_token_len, batch_max_audio_len = self.count_sample_token_len(ann, batch_max_audio_len)
                if all_token_len + cur_len < seq_len - 200 and len(tmp_ann) < self.sample_merge_max_batchsize():
                    tmp_ann.append(ann)
                    cur_len += all_token_len
                elif len(tmp_ann) == 0:
                    pass
                else:
                    self.new_annotation.append(tmp_ann)
                    cur_len = all_token_len
                    tmp_ann = [ann]
                    batch_max_audio_len = self.get_audio_len(ann)
            if tmp_ann:
                self.new_annotation.append(tmp_ann)
            print(f"Merging Data Completed! Orig Len: {original_sample_num}; Curr Len: {len(self.new_annotation)}")
            self.annotation = self.new_annotation



        print(f"Dataset Processing Done, dataset has {len(self.annotation)} samples")
    def sample_merge_max_batchsize(self):
        return 6
    def get_audio_len(self, ann):
        durations = ann.get('durations', [])
        durations = sum(float(d) for d in durations)
        return durations * 25

    def count_sample_token_len(self, ann, batch_max_audio_len):
        durations = ann.get('durations', [])
        durations = sum(float(d) for d in durations)
        audio_length = durations * 25
        max_audio_length = max(audio_length, batch_max_audio_len)
        if audio_length != 0:
            audio_length = max(audio_length, batch_max_audio_len)
        response_length = count_string_units(ann.get("output", ""))
        if 'context' not in ann or ann['context'] is None:
            context_length = 0
        else:
            context_length = count_string_units(ann.get("context", ""))
        if 'input' not in ann or ann['input'] is None:
            instruct_length = 50
        else:
            instruct_length = count_string_units(ann.get("input", "")) + 50

        all_token_len = audio_length + (response_length + context_length + instruct_length) 
        return all_token_len, max_audio_length

    def __len__(self):
        return len(self.annotation)

    def get_user_prompt(self, task, index):
        mapper = {
            "asr": ["请将这段中文语音转换为纯文本，去掉标点符号。<|audio_placeholder|>"],
            "t2t_small_talk": [""],
            "s2t_small_talk": ["<|audio_placeholder|>"],
            "t2t_casual_qa": [""],
            "s2t_casual_qa": ["<|audio_placeholder|>"],
            "t2t_formal_qa": [""],
            "s2t_formal_qa": ["<|audio_placeholder|>"]
        }

        prompt_list = mapper[task]
        return prompt_list[index % len(prompt_list)]
        # return random.choice(mapper[task])
    
    def _get_feat_extract_output_lengths(self, input_lengths: torch.LongTensor):
        """
        Computes the output length of the convolutional layers and the output length of the audio encoder
        """
        input_lengths = (input_lengths - 1) // 2 + 1
        output_lengths = (input_lengths - 2) // 2 + 1
        return input_lengths, output_lengths
    
    def prepare_loss_mask_and_labels(self, raw_ids, input_ids, num_audio_tokens):
        """
        Create a loss mask that marks positions between sep_id and end_of_turn with 1s.
        
        Args:
            raw_ids (torch.Tensor): Input tensor of token IDs
            
        Returns:
            torch.Tensor: Binary mask tensor
        """
        # Convert raw_ids to a list if it's a tensor
        if isinstance(raw_ids, torch.Tensor):
            raw_ids = raw_ids.tolist()
        
        # seq_len = len(raw_ids)
        seq_len = self.seq_len + 1
        loss_mask = torch.zeros(1, seq_len, dtype=torch.float32)
        
        # Length of separator pattern
        sep_len = len(self.sep_id)
        
        i = 0
        while i < seq_len:
            # Check if we find the separator pattern
            if i + sep_len <= seq_len and raw_ids[i:i+sep_len] == self.sep_id:
                # Skip the separator itself
                i += sep_len
                # Mark everything as 1 until we find end_of_turn
                while i < seq_len and raw_ids[i] != self.end_of_turn:
                    loss_mask[0, i] = 1
                    i += 1
                # Mark the end_of_turn position as 1
                if i < seq_len and raw_ids[i] == self.end_of_turn:
                    loss_mask[0, i] = 1
                    i += 1
            else:
                i += 1
                
        
        batch_indices, non_audio_indices = torch.where(
            (input_ids != self.audio_token_index)
        )        
        token_placeholder_num = torch.zeros_like(input_ids)
        special_audio_token_mask = input_ids == self.audio_token_index
        if len(num_audio_tokens) != 0:
            zero_mask = num_audio_tokens != 0
            num_audio_tokens = num_audio_tokens[zero_mask]
            token_placeholder_num[special_audio_token_mask] = num_audio_tokens.long() - 1
        token_placeholder_num = token_placeholder_num + 1
        new_token_positions = torch.cumsum(token_placeholder_num, -1) - 1
        max_token_num = token_placeholder_num.sum(-1).max()
        text_to_overwrite = new_token_positions[batch_indices, non_audio_indices]
        new_loss_mask = torch.zeros(input_ids.shape[0], max(max_token_num, seq_len), dtype=torch.float)
        new_loss_mask[batch_indices, text_to_overwrite] = loss_mask[batch_indices, non_audio_indices]

        labels = torch.full((input_ids.shape[0], max(max_token_num, seq_len)), -100).to(torch.long)
        labels[batch_indices, text_to_overwrite] = input_ids[batch_indices, non_audio_indices]
        labels = labels[:, 1:seq_len]
        loss_mask = new_loss_mask[:, 1:seq_len]
        return loss_mask, labels
        
    def load_audio_from_stream(self, command: str, output_sample_rate: int = 16000):
        """Load audio from a command stream while preventing memory leaks.
        
        Args:
            command: The command to execute that produces audio output
            output_sample_rate: Desired sample rate for the output audio
            
        Returns:
            tuple: (waveform, sample_rate)
            
        Raises:
            RuntimeError: If the command fails or audio cannot be loaded
        """
        # Initialize variables to ensure cleanup in finally block
        pipe = None
        audio_data = None
        
        try:
            # Use shell to execute the command
            pipe = subprocess.Popen(
                command,
                shell=True,
                stdout=subprocess.PIPE,
                stderr=subprocess.PIPE,
                # Prevent subprocess from inheriting file descriptors
                close_fds=True
            )
            
            # Read the output with a timeout to prevent hanging
            stdout, stderr = pipe.communicate(timeout=30)
            
            if pipe.returncode != 0:
                error_msg = stderr.decode('utf-8', errors='replace')
                raise RuntimeError(f"Command failed with return code {pipe.returncode}: {error_msg}")
            
            # Create a memory-efficient BytesIO object
            audio_data = io.BytesIO(stdout)
            
            # Load audio with torchaudio
            waveform, sample_rate = torchaudio.load(
                audio_data,
                format='wav',  # Explicit format helps with stream processing
                normalize=True
            )
            
            # Resample if needed
            if sample_rate != output_sample_rate:
                transform = torchaudio.transforms.Resample(
                    orig_freq=sample_rate,
                    new_freq=output_sample_rate
                )
                waveform = transform(waveform)
                sample_rate = output_sample_rate
                
            return waveform, sample_rate
            
        except subprocess.TimeoutExpired:
            if pipe:
                pipe.kill()
                stdout, stderr = pipe.communicate()
            raise RuntimeError("Command timed out while loading audio")
            
        except Exception as e:
            raise RuntimeError(f"Error loading audio from stream: {str(e)}")
            
        finally:
            # Ensure resources are cleaned up
            if audio_data is not None:
                audio_data.close()
            if pipe is not None and pipe.poll() is None:
                pipe.terminate()
                try:
                    pipe.wait(timeout=1)
                except subprocess.TimeoutExpired:
                    pipe.kill()

    def conversation_processor(self, ann, index, audios_lens):
        conversation = []
        if ann['task'] == 'asr':
            conversation.append({
                "role": "system",
                "content": [{
                    "type": "text", 
                    "text": "You are a speech recognition model."
                }]
            })
        elif "casual_qa" in ann['task']:
            conversation.append({
                "role": "system",
                "content": [{
                    "type": "text", 
                    "text": "You are a helpful assistant, capable of processing textual and auditory inputs, generating text and speech responses in a conversational, natural style."
                }]
            })
        elif "formal_qa" in ann['task']:
            conversation.append({
                "role": "system",
                "content": [{
                    "type": "text", 
                    "text": "You are a helpful assistant, capable of processing textual and auditory inputs, generating text responses in a formal style."
                }]
            })
        elif "small_talk" in ann['task']:
            conversation.append({
                "role": "system",
                "content": [{
                    "type": "text", 
                    "text": "You are a helpful assistant, capable of processing textual and auditory inputs, generating colloquial text and speech responses for informal conversations."
                }]
            })
        else:
            raise ValueError(f"Invalid task type {ann['task']}")
        # else:
        #     conversation.append({
        #         "role": "system",
        #         "content": [{
        #             "type": "text", 
        #             "text": "You are Qwen, a virtual human developed by the Qwen Team, Alibaba Group, capable of perceiving auditory and visual inputs, as well as generating text and speech."
        #         }]
        #     })    

        user_content = []
        for i in range(audios_lens):
            user_content.append({"type": "audio", "audio_url": ann["wavs"][i]})
        if 'context' in ann and ann['context'] is not None and ann['context'] != "":
            user_content.append({
                    "type": "text",
                    "text": "###Context\n" + ann['context'].replace("\\n", "\n").replace("\\N", "\n") + "###Instruct\n"
                })
        if self.get_user_prompt(ann["task"], index) != "":
            user_content.append({"type": "text", "text": self.get_user_prompt(ann["task"], index)})
        if 'input' in ann and ann['input'] is not None and ann['input'] != "":
            user_content.append({
                    "type": "text",
                    "text": ann['input'].replace("\\n", "\n").replace("\\N", "\n")
                })

        conversation.append({"role": "user", "content": user_content})
        conversation.append({"role": "assistant", "content": ann['output']})

        return conversation

            
    def __getitem__(self, index):
        with torch.no_grad():
            anns = self.annotation[index]

            if type(anns) == dict:
                anns = [anns]

            all_inputs = []
            wo_audio_index = []
            ann_index = -1
            ann_dur_list = []
            for ann in anns:
                ann_index += 1
                audios = []
                durations = ann.get("durations", [])
                for d in durations:
                    ann_dur_list.append(float(d))
                if len(ann['wavs']) == 0:
                    sr = 16000
                    audio = np.zeros(sr, dtype=np.float32)
                    wo_audio_index.append(ann_index)
                    # audios.append(audio)
                else:
                    for wav_path in ann['wavs']:
                        if "wav-copy" in wav_path:
                            audio, sr = read_wav_from_fd(wav_path.split(" ")[1])
                            if audio.shape[0] >= 2:  # stereo, multichannel to mono
                                audio = audio[:1]
                            if sr != 16000:
                                resampler = torchaudio.transforms.Resample(orig_freq=sr, new_freq=16000)
                                audio = resampler(audio)
                                sr = 16000
                            audio = audio.squeeze(0).numpy()
                            if len(audio) < 16000:
                                audio = np.pad(audio, (0, 16000 - len(audio)))
                        elif ".ark" in wav_path:
                            audio, sr = load_ark(wav_path)
                            if audio.shape[0] >= 2:  # stereo, multichannel to mono
                                audio = audio[:1]
                            if sr != 16000:
                                resampler = torchaudio.transforms.Resample(orig_freq=sr, new_freq=16000)
                                audio = resampler(audio)
                                sr = 16000
                            audio = audio.squeeze(0).numpy()
                            if len(audio) < 16000:
                                audio = np.pad(audio, (0, 16000 - len(audio)))
                        else:
                            sr = 16000
                            audio = librosa.load(
                                wav_path,
                                sr=16000,
                            )[0]
                        audios.append(audio)

                if 'text_input_no_token' in ann:
                    text_input = ann['text_input_no_token']
                elif len(ann['wavs']) == 0:
                    conversation = self.conversation_processor(ann, index, 0)
                    text_input = self.processor.apply_chat_template(conversation, add_generation_prompt=True, tokenize=False)
                else:
                    conversation = self.conversation_processor(ann, index, len(audios))
                    text_input = self.processor.apply_chat_template(conversation, add_generation_prompt=True, tokenize=False)
                text_input = [t.replace("<|audio_bos|><|AUDIO|><|audio_eos|>", "") for t in text_input]
                text_input = [t.replace("<|audio_placeholder|>", "<|audio_bos|><|AUDIO|><|audio_eos|>") for t in text_input]
                # print(f"Rank: {torch.distributed.get_rank()} path: {ann['wavs']} text: {ann['context']} \n conversation: {conversation} \n text_input: {text_input}")
                # Generate audio features
                if len(audios) == 0:
                    audios = None
                inputs = self.processor(audio=audios,text=text_input, return_tensors="pt", padding=True, sampling_rate=sr)
                all_inputs.append(inputs)
                del audios, audio
                del text_input, conversation
                # del audio_parts
            # input_ids, attention_mask, input_features, feature_attention_mask

            # input_features = torch.cat([x['input_features'] for x in all_inputs], dim=0)
            # feature_attention_mask = torch.cat([x['feature_attention_mask'] for x in all_inputs], dim=0)
            
            input_features = []
            feature_attention_mask = []
            for x in all_inputs:
                if 'input_features' in x and x['input_features'] is not None:
                    input_features.append(x['input_features'])
                    feature_attention_mask.append(x['feature_attention_mask'])
            if len(input_features) == 0: 
                input_features = None
                feature_attention_mask = None
            else:
                input_features = torch.cat(input_features, dim=0)
                feature_attention_mask = torch.cat(feature_attention_mask, dim=0)

            attention_mask = torch.ones(1, self.seq_len + 1, dtype=torch.int64)
            # attention_mask = torch.tril(torch.ones(1, 1, self.seq_len + 1, self.seq_len+1, dtype=torch.int64)) # all ones
                
            raw_ids = []
            for inputs in all_inputs:
                if len(raw_ids) + len(inputs['input_ids'].squeeze().tolist()) + len([self.end_of_turn]) > self.seq_len:
                    ann_uttids = []
                    for ann in anns:
                        ann_uttids += ann.get("uttid", "_#_")
                    print("too long data: " + "".join(ann_uttids))
                    break
                raw_ids += inputs['input_ids'].squeeze().tolist()
                raw_ids += [self.end_of_turn]
            raw_ids = torch.tensor(raw_ids, dtype=torch.int64)
            
            #keep first audio token for each audio segment
            audio_positions = torch.where(raw_ids == self.audio_token_index)[0]
            if len(audio_positions) > 0:
                is_start = torch.ones_like(audio_positions, dtype=torch.bool)
                is_start[1:] = (audio_positions[1:] - audio_positions[:-1]) > 1
                keep_mask = torch.ones_like(raw_ids, dtype=torch.bool)
                non_start_positions = audio_positions[~is_start]
                keep_mask[non_start_positions] = False
                raw_ids = raw_ids[keep_mask]
            
            audio_positions = torch.where(raw_ids == self.audio_token_index)[0]
            if len(audio_positions) > 0:
                input_features = input_features[:len(audio_positions)]
                feature_attention_mask = feature_attention_mask[:len(audio_positions)]


            input_ids = torch.full((1, self.seq_len + 1), self.pad_id, dtype=torch.int64)
            input_ids[0,:len(raw_ids)] = raw_ids
            # construct loss_mask
            
            # only answers are ones, follow labels
            if input_features is not None:
                _, audio_emb_lengths = self._get_feat_extract_output_lengths(
                    feature_attention_mask.sum(-1) 
                )
            else:
                audio_emb_lengths = []

            # # print(f"Audio Embedding Lengths: {audio_emb_lengths} {feature_attention_mask.shape}")
            # for ann_index in wo_audio_index:
            #     audio_emb_lengths[ann_index] = 0
            #     feature_attention_mask[ann_index,:] = 0

            # add label processing
            loss_mask, labels = self.prepare_loss_mask_and_labels(raw_ids,input_ids, audio_emb_lengths)


            # labels = input_ids.clone()
            input_ids = input_ids[:, :-1]
            # labels = labels[:, 1:]
            attention_mask = attention_mask[:, :-1]
            if self.data_aug:
                sample = {'feat': input_features, 'feature_attention_mask': feature_attention_mask}
                sample = spec_aug(sample, seed=index)
                input_features = sample['feat']
            del all_inputs
            if self.ret_id:
                uttid = ann['uttid'] if 'uttid' in ann else f"utt_{index}"
                return deepcopy({
                    "uttid": uttid,
                    "input_features": input_features,
                    "input_ids": input_ids,
                    "attention_mask": attention_mask,
                    "feature_attention_mask": feature_attention_mask,
                    "labels": labels,
                    "loss_mask": loss_mask
                })
            else:
                return deepcopy({
                    "input_features": input_features,
                    "input_ids": input_ids,
                    "attention_mask": attention_mask,
                    "feature_attention_mask": feature_attention_mask,
                    "labels": labels,
                    "loss_mask": loss_mask
                })            
        

def collate_fn(batch):
    new_batches = {}
    for key in batch[0]:
        value = batch[0][key]
        if key == 'input_features' or key == 'feature_attention_mask':
            for idx, item in enumerate(batch):
                if idx == 0 or item[key] is None:
                    continue
                if value is None:
                    value = item[key]
                else:
                    value = torch.cat([value, item[key]], dim=0)
            final_val = value
        else:
            if type(value) == torch.Tensor:
                final_val = torch.cat([item[key] for item in batch], dim=0)
            else:
                final_val = [item[key] for item in batch]
        new_batches[key] = final_val   

    if new_batches['input_features'] is not None:
        input_features = new_batches['input_features']
        feature_attention_mask = new_batches['feature_attention_mask']
        max_audio_len = max(feature_attention_mask.sum(-1))
        max_audio_len = int(max_audio_len + (4 - max_audio_len % 4))
        feature_attention_mask = feature_attention_mask[:,:max_audio_len]
        input_features = input_features[:,:,:max_audio_len] # truncate to max len
        # print(f"input_features shape: {input_features.shape}, feature_attention_mask shape: {feature_attention_mask.shape}")
        new_batches["input_features"] = input_features
        new_batches["feature_attention_mask"] = feature_attention_mask

    return deepcopy(new_batches)

# Debugging entries for dataset initialization
if __name__ == "__main__":
    from pympler import muppy, summary, tracker

    # ann_paths = ["audio_data/train_20240322_cn_slides_1100h.jsonl","audio_data/train_20231206_slidespeech_487h.jsonl","audio_data/train_sub_asr_zhen_train_20240322_cn_slides_asr_multi.jsonl"]
    # ann_paths = ["/teaspeech_ceph/share_976139/users/dennswxiao/data/PenguinsASR_Tokens/Baseline/train_20240322_cn_slides_1100h.jsonl"]
    # model_path = "/apdcephfs_qy3/share_976139/users/adrenzhou/nlp_workdir/pretrained_models/Qwen/Qwen2-Audio-7B-Instruct"
    ann_paths = [
#        "/teaspeech_ceph/share_976139/users/dennswxiao/PenguinsLLM/wp3_PenguinsTokens/Model01_Lite/utils/json_asr_new/valid_20240322_cn_slides_1100h.jsonl"
        "/apdcephfs_qy3/share_976139/users/adrenzhou/share/penguins_data/train_20250318_cn_slides_1100h.jsonl"
    ]

    ann_paths = [
        "audio_data/train_20250324_llmasr.jsonl",
        "audio_data/train_20231206_slidespeech_487h.jsonl",
        "audio_data/train_20250417_cn_slides_generated_context_all_upper.jsonl",
        "audio_data/train_20250410_cn_slides_draft_8kh_open_ocr_all_upper.jsonl"
    ]
    # model_path = "/apdcephfs_qy3/share_976139/users/adrenzhou/nlp_workdir/Megatron-LM/Qwen-audio-whisper-tiny-qwen-0.5B"
    model_path = "/apdcephfs_qy3/share_976139/users/adrenzhou/nlp_workdir/pretrained_models/Qwen/Qwen2-Audio-7B"

    dataset = Qwen2AudioDataset(ann_paths, model_path, ret_id=True, data_aug=True, sample_merge=False)

    # 配置 DataLoader
    batch_size = 2
    num_workers = 0
    from torch.utils.data import DataLoader

    # dataloader = DataLoader(
    #     dataset=dataset,
    #     batch_size=batch_size,
    #     shuffle=True,
    #     num_workers=num_workers,
    #     pin_memory=True,
    #     collate_fn=collate_fn
    # )
    # 测试读取几个 batch 的数据
    from tqdm import tqdm

    batch_sampler = MegatronPretrainingRandomSampler(
        dataset=dataset,
        total_samples=len(dataset),
        consumed_samples=0,
        micro_batch_size=batch_size,
        data_parallel_rank=0,
        data_parallel_size=1,
        data_sharding=False,
    )    

    train_ds = DataLoader(
        dataset=dataset,
        batch_sampler=batch_sampler,
        num_workers=1,
        pin_memory=True,
        collate_fn=collate_fn,
        # prefetch_factor=1,
        persistent_workers=True if False else False,
    )
    import gc
    for epoch in range(1,3):
        for batch_idx, batch in tqdm(enumerate(train_ds)):
            if batch_idx % 1000 == 0:
                gc.collect()
            
            del batch
            pass

    # # Try reading a few data samples
    # for i in range(3):
    #     sample = dataset[i]
