import json
import numpy as np
import gc
import torch
from torch.utils.data import Dataset, default_collate
import torchaudio
import random
from transformers import AutoProcessor
import subprocess
import io
import pdb
import math
from tqdm import tqdm
from copy import deepcopy
from multiprocessing import Process, Queue
import json
from megatron.training import get_args
from megatron.legacy.data.data_samplers import MegatronPretrainingRandomSampler
import sys, os, re, gzip, struct
import struct

def open_or_fd(file, mode='rb'):
    """ fd = open_or_fd(file)
     copied from kaldi_io
     Open file, gzipped file, pipe, or forward the file-descriptor.
     Eventually seeks in the 'file' argument contains ':offset' suffix.
    """
    offset = None
    try:
        # strip 'ark:' prefix from r{x,w}filename (optional),
        if re.search('^(ark|scp)(,scp|,b|,t|,n?f|,n?p|,b?o|,n?s|,n?cs)*:', file):
            (prefix,file) = file.split(':',1)
        # separate offset from filename (optional),
        if re.search(':[0-9]+$', file):
            (file,offset) = file.rsplit(':',1)
        # input pipe?
        if file[-1] == '|':
            fd = popen(file[:-1], 'rb') # custom,
        # output pipe?
        elif file[0] == '|':
            fd = popen(file[1:], 'wb') # custom,
        # is it gzipped?
        elif file.split('.')[-1] == 'gz':
            fd = gzip.open(file, mode)
        # a normal file...
        else:
            fd = open(file, mode)
    except TypeError:
        # 'file' is opened file descriptor,
        fd = file
    # Eventually seek to offset,
    if offset != None: fd.seek(int(offset))
    return fd


def read_wav_from_fd(ark_with_idx):
    """
    从当前 fd 的 seek 位置读取一个完整的 WAV 文件
    返回 (waveform, sample_rate)
    """
    fd = open_or_fd(ark_with_idx)
    # 1. 读取前 44 字节（WAV 头）
    header = fd.read(44)
    if len(header) < 44:
        raise ValueError("Invalid WAV header (too short)")

    # 2. 解析关键字段
    riff, file_size, wave = struct.unpack('<4sI4s', header[:12])
    if riff != b'RIFF' or wave != b'WAVE':
        raise ValueError("Not a valid WAV file")

    # 3. 解析 fmt 子块（确保是 PCM 格式）
    fmt_chunk = header[12:36]
    audio_format, num_channels, sample_rate, byte_rate, block_align, bits_per_sample = \
        struct.unpack('<HHI I HH', fmt_chunk[8:24])
    if audio_format != 1:
        raise ValueError("Only PCM WAV format is supported")

    # 4. 找到 'data' 块的位置和大小
    pos = 36  # 跳过 RIFF + fmt 子块
    while pos < len(header):
        subchunk_id, subchunk_size = struct.unpack('<4sI', header[pos:pos+8])
        if subchunk_id == b'data':
            data_size = subchunk_size
            break
        pos += 8 + subchunk_size
    else:
        raise ValueError("Could not find 'data' chunk in WAV header")

    # 5. 计算完整 WAV 大小（44 + data_size）
    wav_size = pos + 8 + data_size  # 包含所有子块的总大小

    # 6. 重新读取完整 WAV 数据（从当前 seek 位置开始）
    fd.seek(-44, io.SEEK_CUR)  # 回退到 WAV 开头
    wav_bytes = fd.read(wav_size)

    # 7. 用 torchaudio 加载
    waveform, sr = torchaudio.load(io.BytesIO(wav_bytes))
    return waveform, sr



def spec_aug(sample, num_t_mask=2, num_f_mask=2, max_t=50, max_f=15, max_w=80, seed=42):
    """ Do spec augmentation
        Inplace operation

        Args:
            sample: {key, feat, ...}
            num_t_mask: number of time mask to apply
            num_f_mask: number of freq mask to apply
            max_t: max width of time mask
            max_f: max width of freq mask
            max_w: max width of time warp

        Returns
            {key, feat, ....}
    """
    random.seed(seed)
    assert 'feat' in sample
    x = sample['feat']
    feature_mask = sample['feature_attention_mask']
    max_frames = feature_mask.sum(-1)[0].item()
    assert isinstance(x, torch.Tensor)
    y = x.clone().detach()
    # max_frames = y.size(0)
    max_freq = y.size(1)
    # time mask
    for i in range(num_t_mask):
        start = random.randint(0, max_frames - 1)
        length = random.randint(1, max_t)
        end = min(max_frames, start + length)
        y[start:end, :] = 0
    # freq mask
    for _ in range(num_f_mask):
        start = random.randint(0, max_freq - 1)
        length = random.randint(1, max_f)
        end = min(max_freq, start + length)
        y[:, start:end] = 0
    sample['feat'] = y
    del x
    return sample

def get_audio_length_via_header(file_path):
    info = torchaudio.info(file_path)
    return info.num_frames / info.sample_rate

class Qwen2AudioDataset(Dataset):
    def __init__(self, ann_paths, model_path, seq_len=1024, ret_id=False, data_aug=False, sample_merge=False, read_penguins=False):
        super().__init__()
        self.annotation = []
        self.read_penguins = read_penguins

        if read_penguins:
            #self.sil_matrix = torch.tensor(np.load("/apdcephfs_qy3/share_976139/users/adrenzhou/nlp_workdir/Megatron-LM/scripts/Silence.npy"))
            self.sil_matrix = torch.zeros(1,2048,1600)
            #self.sil_matrix = torch.tensor(np.load("/apdcephfs_qy3/share_976139/users/adrenzhou/nlp_workdir/Megatron-LM/scripts/Silence.npy"))
        else:
            self.sil_matrix = None
        if isinstance(ann_paths, list):  # multiple json files
            for f in ann_paths:
                if f.endswith(".jsonl"):
                    # self.annotation += json.load(open(f, "r", encoding='utf-8'))['annotation']
                    self.annotation += [json.loads(line) for line in open(f, 'r', encoding='utf-8')]
        else:
            self.annotation = [json.loads(line) for line in open(ann_paths, 'r', encoding='utf-8')]
            # self.annotation = json.load(open(ann_paths, "r",  encoding='utf-8'))['annotation']
        
        def check_length(ann, seq_len):
            utt2dur = ann.get('utt2dur', 30)
            audio_length = math.ceil(utt2dur / 30) * 750
            if 'text' not in ann or not ann['text']:
                lis = ann.get('segments', [])
                response_length = 0
                for seg in lis:
                    response_length += len(seg['text'])
                if not response_length:
                    print(ann.get('id', ''))
                    return False # skip


            else:
                response_length = len(ann.get("text", ""))
            context_length = len(ann.get("context_text", ""))
            instruct_length = 20
            all_token_len = audio_length + (response_length + context_length + instruct_length) / 1.5
            # try truncate context
            if all_token_len > seq_len:
                rest_context_len = int((seq_len - audio_length - (response_length + instruct_length)/1.5))
                if context_length > 0 and rest_context_len > 0:
                    new_context = ann['context_text'][:rest_context_len]
                    ann['context_text'] = new_context
                    return True
                else:
                    return False
            else:
                return True
        orig_len = len(self.annotation)
        self.annotation = [ann for ann in self.annotation if check_length(ann, seq_len)]
        #random.seed(42)
        #random.shuffle(self.annotation)
        print(f"Loaded {len(self.annotation)}/{orig_len} annotation samples from {ann_paths}")
        self.ret_id = ret_id
        self.data_aug = data_aug
        self.processor = AutoProcessor.from_pretrained(model_path)
        self.seq_len = seq_len
        self.sample_merge = sample_merge

        self.pad_id = 151643 # or zero? <|endoftext|>, zero: !
        self.sep_id = [151644, 77091, 198] # <im_start>assistant\n
        self.audio_id = 151644
        self.end_of_turn = 151645 # eos
        self.audio_token_index = 151646

        if len(self.annotation) > 0 and sample_merge and 'audio_length' not in self.annotation[0]:
                
            print(f"Computing audio_length and tokens in dataset")

            # Calculate total audio length and frame count
            total_audio_length = 0
            total_frames = 0
            n_processes = 32
            annotation_chunks = [self.annotation[i:: n_processes] for i in range(n_processes)]

            def single_process_annotation(annotation_chunk, queue):
                result = []
                for ann in tqdm(annotation_chunk):
                    audio_length = get_audio_length_via_header(ann["path"])
                    estimated_frames = math.ceil(audio_length * 100)
                    # Split audio length into parts of 30s or less
                    sr = 16000
                    max_audio_len_in_s = 30
                    audio_parts_count = math.ceil((int(audio_length)) / (max_audio_len_in_s))

                    # Create conversation with repeated audio parts
                    conversation = []
                    for _ in range(audio_parts_count):
                        conversation.append({"type": "audio", "audio_url": ann["path"]})
                    conversation.append({"type": "text", "text": self.get_suffix_prompt(ann["task"])});
                    conversation = [{"role": "user", "content": conversation}, {"role": "assistant", "content": ann["text"]}]

                    # Generate text inputs without tokenization
                    text_input_no_token = self.processor.apply_chat_template(conversation, add_generation_prompt=False, tokenize=False)
                    text_input_token = self.processor.tokenizer.encode(text_input_no_token)
                    # Generate text inputs with tokenization
                    # text_input_token = self.processor.apply_chat_template(conversation, add_generation_prompt=False, tokenize=True)
                    audio_inp_len = torch.tensor(estimated_frames)
                    if len(audio_inp_len.shape) == 1:
                        audio_inp_len = audio_inp_len.unsqueeze(0)
                    audio_feat_lengths, audio_output_lengths = self._get_feat_extract_output_lengths(
                        audio_inp_len
                    )
                    ann['text_input_no_token'] = text_input_no_token
                    ann["text_input_token"] = text_input_token
                    ann["audio_length"] = audio_length
                    ann["audio_output_lengths"] = audio_output_lengths.tolist()
                    ann["estimated_frames"] = int(audio_length * 100)  # estimated frame count based on audio length
                    # ann['sequence_whole_length'] = len(text_input_token) + 2.5 * audio_output_lengths.tolist() # add encoder activations and parameters
                    ann['sequence_whole_length'] = len(text_input_token) + audio_parts_count * 750 # padding to 3k
                    result.append(ann)

                merged_results = []
                last_anns = []
                last_sum_len = 0
                for ann in result:
                    if last_sum_len + ann['sequence_whole_length'] + 1 < seq_len: # can be merged
                        last_anns.append(ann)
                        last_sum_len += (ann['sequence_whole_length'] + 1)
                    else:
                        # refresh
                        merged_results.append(last_anns)
                        last_anns = []
                        last_sum_len = 0

                        # add current sample
                        if ann['sequence_whole_length'] >= seq_len:
                            # skip this sample for output fit the sequence length
                            pass
                        else:
                            last_anns.append(ann)
                            last_sum_len += ann['sequence_whole_length']
                
                if last_anns:
                    merged_results.append(last_anns)
                    last_sum_len = 0


                queue.put(merged_results)
                return merged_results

            
            processes = []
            queue = Queue()
            all_num = len(self.annotation)
            self.annotation = []
            # single_process_annotation(annotation_chunks[0], queue)
            for i in range(n_processes):
                p = Process(target=single_process_annotation, args=(annotation_chunks[i], queue))
                processes.append(p)
                p.start()
            
            for i in range(n_processes):
                lis = queue.get()
                self.annotation += lis

            for p in processes:
                p.join()
        print(f"Dataset Processing Done, dataset has {len(self.annotation)} samples")
        
    def __len__(self):
        return len(self.annotation)

    def get_suffix_prompt(self, task, index):
        mapper = {
            "asr": ["转录上述语音内容。"],
            "asr_zh": ["转录上述语音内容。"],
            "asr_en": ["转录上述语音内容。"],
            "asr_zhen": ["转录上述语音内容。"],
            "asr_draft": ["基于给定辅助信息，转录上述语音内容。"],
            "asr_multi": ['按不同发言人，转录上述语音内容。']
        }
        # mapper = {
        #     "asr": [
        #         "转录上述语音内容。"
        #     ],
        #     "asr_zh": [
        #         "转录上述语音内容，已知语言为: <|zh|>",
        #         "转录上述语音内容，已知语言为: <|zh|>",
        #         "转录上述语音内容，已知语言为: <|zh|>",
        #         "转录上述语音内容，已知语言为: <|zh|>",
        #         "转录上述语音内容，已知语言为: <|zh|>",
        #         "转录上述语音内容，已知语言为: <|zh|>",
        #         "转录上述语音内容，已知语言为: <|zh|>",
        #         "转录上述语音内容，已知语言为: <|zh|>",
        #         "转录上述语音内容，已知语言为: <|zh|>",
        #         "前面的语音说了什么？",
        #         "请将语音中的内容写下来。",
        #         "请识别这段中文语音。",
        #         "听前面的音频，写出对方说的内容。",
        #         "写下你听到的内容。",
        #         "仔细听这段语音，记下语音中的话",
        #         "将你听到的话写下来",
        #         "请将语音转换为文字",
        #         "转录上述语音内容",
        #         "做ASR",
        #         "识别内容",
        #         "请转录音频。",
        #         "帮我听写。",
        #         "转成文字。",
        #         "写下来。",
        #         "听写内容。",
        #         "请听音频并记录。",
        #         "说了什么",
        #         "请将上述语音准确地转成文字，保持原有的语言，不要添加或遗漏任何信息。",
        #         "前面是一段语音，请准确无误地转成文字，遵循语音本身的语言，不要回复其他内容。",
        #         "请将你听到的全部内容转成文字，确保没有任何遗漏。",
        #         "请将音频中的讲话内容逐字记录下来，保持原有的语气和措辞。",
        #         "请完整地将语音内容转录为文字，不要添加任何注释或解释。",
        #         "能帮我把音频里的话写出来吗？",
        #         "听听这段录音，告诉我内容是什么。",
        #         "把刚才的声音内容记录下来。",
        #         "请把你听到的转成文字。",
        #         "麻烦把这段话的文字版写一下。",
        #         "你能把音频中的对话写出来吗？",
        #         "听一下这段音频，写下里面的话。",
        #         "请将听到的内容整理成文字。",
        #         "请将语音内容文字化。",
        #         "帮我听一下这段录音，写下内容。",
        #         "请把音频中的话记录下来。",
        #         "请把刚才的语音转换成文字。",
        #         "听完这段音频，请写出内容。",
        #         "能不能帮我把这段声音转成文字？",
        #         "听一下前面的语音，说了些什么？",
        #         "请将你听到的内容写下来。",
        #         "这段语音的内容是什么？",
        #         "请把语音内容写成文字。",
        #         "把录音里的内容抄下来。",
        #         "请将音频中的话语记录下来。",
        #         "帮我把听到的内容写成文字。",
        #         "请把这段语音内容转成文字。",
        #         "你能听一下这段语音并写下内容吗？",
        #         "请将语音转录为文字。",
        #         "请听音频并写下所听到的内容。",
        #         "麻烦把音频中的话转成文字版。",
        #         "请把音频里的话整理出来。"                
        #     ],
        #     "speaker_diarization_zh": [
        #         "依次标出每位发言人及发言内容。",
        #         "依次写下每位发言者的内容。",
        #         "请辨认每位发言者并写出对应的发言内容。",
        #         "请将每位发言者的发言内容写下来。"
        #     ],
        #     "speaker_verification": [
        #         "这两句语音是同一个人说的吗？",
        #         "请判断这两句话是否是同一个人说的。",
        #         "这两句话是同一个人说的吗？"
        #     ],
        #     "accent_classification": [
        #         "请识别音频中的方言种类。",
        #         "请识别音频中的方言，并从以下选项中选择一项：汉语普通话官话、台湾重口音、河南方言、东北重口音、广东重口音、江西重口音、四川方言、江苏重口音、福建重口音、湖南重口音、陕西方言、天津方言、山东方言、湖北方言、河北方言、山西方言、甘肃方言、宁夏方言、长沙方言、云南方言",
        #         "这句话是用什么方言说的？",
        #         "这句话用的是哪个地方的方言？"
        #     ],
        #     "multilingual_classification": [
        #         "请识别这句话是用什么语言说的。",
        #         "请识别音频中的语言，从以下选项中选择一项：英语、中文、日语、俄语。",
        #         "这句话是用什么语言说的？"
        #     ]
        # }

        prompt_list = mapper[task]
        return prompt_list[index % len(prompt_list)]
        # return random.choice(mapper[task])
    



    def _get_feat_extract_output_lengths(self, input_lengths: torch.LongTensor):
        """
        Computes the output length of the convolutional layers and the output length of the audio encoder
        """
        input_lengths = (input_lengths - 1) // 2 + 1
        output_lengths = (input_lengths - 2) // 2 + 1
        return input_lengths, output_lengths
    
    def prepare_loss_mask_and_labels(self, raw_ids, input_ids, num_audio_tokens):
        """
        Create a loss mask that marks positions between sep_id and end_of_turn with 1s.
        
        Args:
            raw_ids (torch.Tensor): Input tensor of token IDs
            
        Returns:
            torch.Tensor: Binary mask tensor
        """
        # Convert raw_ids to a list if it's a tensor
        if isinstance(raw_ids, torch.Tensor):
            raw_ids = raw_ids.tolist()
        
        # seq_len = len(raw_ids)
        seq_len = self.seq_len + 1
        loss_mask = torch.zeros(1, seq_len, dtype=torch.float32)
        
        # Length of separator pattern
        sep_len = len(self.sep_id)
        
        i = 0
        while i < seq_len:
            # Check if we find the separator pattern
            if i + sep_len <= seq_len and raw_ids[i:i+sep_len] == self.sep_id:
                # Skip the separator itself
                i += sep_len
                # Mark everything as 1 until we find end_of_turn
                while i < seq_len and raw_ids[i] != self.end_of_turn:
                    loss_mask[0, i] = 1
                    i += 1
                # Mark the end_of_turn position as 1
                if i < seq_len and raw_ids[i] == self.end_of_turn:
                    loss_mask[0, i] = 1
                    i += 1
            else:
                i += 1
                
        
        batch_indices, non_audio_indices = torch.where(
            (input_ids != self.audio_token_index)
        )        
        token_placeholder_num = torch.zeros_like(input_ids)
        special_audio_token_mask = input_ids == self.audio_token_index
        token_placeholder_num[special_audio_token_mask] = num_audio_tokens.long() - 1
        token_placeholder_num = token_placeholder_num + 1
        new_token_positions = torch.cumsum(token_placeholder_num, -1) - 1
        max_token_num = token_placeholder_num.sum(-1).max()
        text_to_overwrite = new_token_positions[batch_indices, non_audio_indices]
        new_loss_mask = torch.zeros(input_ids.shape[0], max(max_token_num, seq_len), dtype=torch.float)
        new_loss_mask[batch_indices, text_to_overwrite] = loss_mask[batch_indices, non_audio_indices]

        labels = torch.full((input_ids.shape[0], max(max_token_num, seq_len)), -100).to(torch.long)
        labels[batch_indices, text_to_overwrite] = input_ids[batch_indices, non_audio_indices]
        labels = labels[:, 1:seq_len]
        loss_mask = new_loss_mask[:, 1:seq_len]
        return loss_mask, labels
        
    def load_audio_from_stream(self, command: str, output_sample_rate: int = 16000):
        """Load audio from a command stream while preventing memory leaks.
        
        Args:
            command: The command to execute that produces audio output
            output_sample_rate: Desired sample rate for the output audio
            
        Returns:
            tuple: (waveform, sample_rate)
            
        Raises:
            RuntimeError: If the command fails or audio cannot be loaded
        """
        # Initialize variables to ensure cleanup in finally block
        pipe = None
        audio_data = None
        
        try:
            # Use shell to execute the command
            pipe = subprocess.Popen(
                command,
                shell=True,
                stdout=subprocess.PIPE,
                stderr=subprocess.PIPE,
                # Prevent subprocess from inheriting file descriptors
                close_fds=True
            )
            
            # Read the output with a timeout to prevent hanging
            stdout, stderr = pipe.communicate(timeout=30)
            
            if pipe.returncode != 0:
                error_msg = stderr.decode('utf-8', errors='replace')
                raise RuntimeError(f"Command failed with return code {pipe.returncode}: {error_msg}")
            
            # Create a memory-efficient BytesIO object
            audio_data = io.BytesIO(stdout)
            
            # Load audio with torchaudio
            waveform, sample_rate = torchaudio.load(
                audio_data,
                format='wav',  # Explicit format helps with stream processing
                normalize=True
            )
            
            # Resample if needed
            if sample_rate != output_sample_rate:
                transform = torchaudio.transforms.Resample(
                    orig_freq=sample_rate,
                    new_freq=output_sample_rate
                )
                waveform = transform(waveform)
                sample_rate = output_sample_rate
                
            return waveform, sample_rate
            
        except subprocess.TimeoutExpired:
            if pipe:
                pipe.kill()
                stdout, stderr = pipe.communicate()
            raise RuntimeError("Command timed out while loading audio")
            
        except Exception as e:
            raise RuntimeError(f"Error loading audio from stream: {str(e)}")
            
        finally:
            # Ensure resources are cleaned up
            if audio_data is not None:
                audio_data.close()
            if pipe is not None and pipe.poll() is None:
                pipe.terminate()
                try:
                    pipe.wait(timeout=1)
                except subprocess.TimeoutExpired:
                    pipe.kill()
    # def load_audio_from_stream(self, command: str, output_sample_rate: int = 16000):
    #     # 修改命令行，确保 wget 和 ffmpeg 正确管道处理
    #     #command = f"{command} | ffmpeg -i pipe:0 -ar {output_sample_rate} -f wav pipe:1"
    #     command = f"{command} "
        
    #     # 使用 subprocess 执行命令并获得音频流
    #     pipe = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
    #     stdout, stderr = pipe.communicate()
        
    #     if pipe.returncode != 0:
    #         raise RuntimeError(f"Error: {stderr.decode('utf-8')}")
        
    #     # 将标准输出的音频流包装为二进制文件对象
    #     audio_data = io.BytesIO(stdout)
        
    #     # 使用 torchaudio 读取音频数据，ffmpeg 后端应该自动识别和处理流
    #     waveform, sample_rate = torchaudio.load(audio_data, normalize=True)
        
    #     return waveform, sample_rate        
    

    def transform_segments(self, segments):
        speakers = []
        for seg in segments:
            speaker = seg['speaker']
            if speaker not in speakers:
                speakers.append(speaker)
        speakers = speakers[:99] # truncate to 100
        speakers = {speaker:f"SPK" + "%02d" % (i+1) for i,speaker in enumerate(speakers)}
        orig_start_time = segments[0]['start_time']
        to_ret = ""
        for seg in segments:
            speaker = speakers.get(seg['speaker'], "SPK")
            # do not use time first
            text = seg['text']
            to_ret += f"{speaker}: {text}\n"
        # speakers = set(speakers)
        to_ret = to_ret.rstrip("\n")
        return to_ret
    def conversation_processor(self, ann, index, audio_part_lens):
        conversation = []

        for _ in range(audio_part_lens):
            conversation.append({"type": "audio", "audio_url": ann["wav"]})
        if 'context_text' in ann: # and ann['task'] in ["asr_draft", "asr_hotword"]:
            if index % 10 == 0 and False:
                # 0.1 prob to drop
                conversation.append({"type": "text", "text": "###Context\n" + "\n###Instruct\n"})
            else:
                conversation.append({"type": "text", "text": "###Context\n" +ann['context_text'].replace("\\n", "\n") + "\n###Instruct\n"})
        conversation.append({"type": "text", "text": self.get_suffix_prompt(ann["task"], index)})
        if 'segments' in ann:
            response = self.transform_segments(ann['segments'])
        else:
            response = ann['text']
        
        conversation = [{"role": "user", "content": conversation}, {"role": "assistant", "content": response}]         
        return conversation

            
    def __getitem__(self, index):
        with torch.no_grad():
            anns = self.annotation[index]

            if type(anns) == dict:
                anns = [anns]

            all_inputs = []
            max_audio_len_in_s = 30
            for ann in anns:
                if self.read_penguins:

                    PENGUINS_RATE = 50
                    # (1, 56, Time in s  * 50) # 50 Hz  B, D, T
                    matrix = torch.tensor(np.load(ann['pg_npy']))
                    if matrix.size(2) < PENGUINS_RATE:
                        sil = self.sil_matrix[:,:,:PENGUINS_RATE-matrix.size(2)].clone()
                        # sil = torch.zeros(50 - matrix.size(2))
                        matrix = torch.cat((matrix, sil), dim=2)

                    audio_parts = [matrix[i: i+ PENGUINS_RATE * max_audio_len_in_s] for i in range(0, matrix.shape[-1], PENGUINS_RATE * max_audio_len_in_s)]
                    


                    if 'text_input_no_token' in ann:
                        text_input = ann['text_input_no_token']
                    else:
                        conversation = self.conversation_processor(ann, index, len(audio_parts))           
                        text_input = self.processor.apply_chat_template(conversation, add_generation_prompt=False, tokenize=False)
                        # print(text_input)
                    # print(f"Rank: {torch.distributed.get_rank()} path: {ann['wav']} text: {ann['text']}")
                    # Generate audio features
                    inputs = self.processor(text=text_input, return_tensors="pt", padding=True)
                    #dict_keys(['input_ids', 'attention_mask', 'input_features', 'feature_attention_mask']) 50Hz -> max 1500
                    # construct input_features, feature_attention_mask
                    final_input_features = []
                    final_feature_attention_mask = []
                    for part in audio_parts:
                        part_len = part.shape[-1]
                        feature_attention_mask = torch.zeros((1, PENGUINS_RATE * max_audio_len_in_s), dtype=torch.int32)
                        feature_attention_mask[:,:part_len] = 1
                        input_features = self.sil_matrix[:,:,:PENGUINS_RATE * max_audio_len_in_s].clone()
                        input_features[:,:,:part_len] = part
                        final_input_features.append(input_features)
                        final_feature_attention_mask.append(feature_attention_mask)
                    inputs['input_features'] = torch.cat(final_input_features, dim=0)
                    inputs['feature_attention_mask'] = torch.cat(final_feature_attention_mask, dim=0)

                    # feature_attention_mask = 
                    all_inputs.append(inputs)
                    

                else:
                    # Load and process audio
                    try:
                        if "wav-copy" in ann['wav']:
                            audio, sr = read_wav_from_fd(ann['wav'].split(" ")[1])
                        else:
                            audio, sr = torchaudio.load(ann["wav"])
                    except:
                        # command
                        audio, sr = self.load_audio_from_stream(ann['wav'])

                    if sr != 16000:
                        resampler = torchaudio.transforms.Resample(orig_freq=sr, new_freq=16000)
                        audio = resampler(audio)
                        sr = 16000

                    if audio.shape[0] >= 2:  # stereo, multichannel to mono
                        # audio = torch.mean(audio, dim=0)
                        # use first channel
                        audio = audio[:1]
                    audio = audio.squeeze(0)
                    if audio.size(0) < sr:  # pad audio to at least 1s
                        sil = torch.zeros(sr - audio.size(0))
                        audio = torch.cat((audio, sil), dim=0)

                    # Split audio into parts of 30s or less
                    
                    audio_parts = [audio[i:i + sr * max_audio_len_in_s] for i in range(0, len(audio), sr * max_audio_len_in_s)]
                    audios = [part.numpy() for part in audio_parts]
                    if 'text_input_no_token' in ann:
                        text_input = ann['text_input_no_token']
                    else:
                        conversation = self.conversation_processor(ann, index, len(audio_parts))           
                        text_input = self.processor.apply_chat_template(conversation, add_generation_prompt=False, tokenize=False)
                        # print(text_input)
                    # print(f"Rank: {torch.distributed.get_rank()} path: {ann['wav']} text: {ann['text']}")
                    # Generate audio features
                    inputs = self.processor(audios=audios,text=text_input, return_tensors="pt", padding=True, sampling_rate=sr)
                    all_inputs.append(inputs)
                    del audios
                    del audio
                    del audio_parts

            # input_ids, attention_mask, input_features, feature_attention_mask
            input_features = torch.cat([x['input_features'] for x in all_inputs], dim=0)
            feature_attention_mask = torch.cat([x['feature_attention_mask'] for x in all_inputs], dim=0)
            attention_mask = torch.ones(1, self.seq_len + 1, dtype=torch.int64)
            # attention_mask = torch.tril(torch.ones(1, 1, self.seq_len + 1, self.seq_len+1, dtype=torch.int64)) # all ones
                
            raw_ids = []
            for inputs in all_inputs:
                raw_ids += inputs['input_ids'].squeeze().tolist()
                raw_ids += [self.end_of_turn]

            raw_ids = torch.tensor(raw_ids, dtype=torch.int64)
            input_ids = torch.full((1, self.seq_len + 1), self.pad_id, dtype=torch.int64)
            input_ids[0,:len(raw_ids)] = raw_ids
            # construct loss_mask
            
            # only answers are ones, follow labels
            if self.read_penguins:
                audio_emb_lengths = feature_attention_mask.sum(-1) // 2
            else:
                _, audio_emb_lengths = self._get_feat_extract_output_lengths(
                    feature_attention_mask.sum(-1) 
                )

            # add label processing
            loss_mask, labels = self.prepare_loss_mask_and_labels(raw_ids,input_ids, audio_emb_lengths)


            # labels = input_ids.clone()
            input_ids = input_ids[:, :-1]
            # labels = labels[:, 1:]
            attention_mask = attention_mask[:, :-1]
            if self.data_aug and not self.read_penguins:
                sample = {'feat': input_features, 'feature_attention_mask': feature_attention_mask}
                sample = spec_aug(sample, seed=index)
                input_features = sample['feat']
            del all_inputs
            if self.ret_id:
                uttid = 'comb' + str(len(anns)) + '-'.join(ann['wav'].split('/')[-2:])
                return deepcopy({
                    "uttid": uttid,
                    "input_features": input_features,
                    "input_ids": input_ids,
                    "attention_mask": attention_mask,
                    "feature_attention_mask": feature_attention_mask,
                    "labels": labels,
                    "loss_mask": loss_mask
                })
            else:
                return deepcopy({
                    "input_features": input_features,
                    "input_ids": input_ids,
                    "attention_mask": attention_mask,
                    "feature_attention_mask": feature_attention_mask,
                    "labels": labels,
                    "loss_mask": loss_mask
                })            
        

def collate_fn(batch):
    new_batches = {}
    for key in batch[0]:
        value = batch[0][key]
        if type(value) == torch.Tensor:
            final_val = torch.cat([item[key] for item in batch], dim=0)

        else:
            final_val = [item[key] for item in batch]
        new_batches[key] = final_val   

    # feature_attention_mask = new_batches['feature_attention_mask']
    # input_features = new_batches['input_features']
    # max_audio_len = max(feature_attention_mask.sum(-1))
    # max_audio_len = max_audio_len + (4 - max_audio_len % 4)
    # feature_attention_mask = feature_attention_mask[:,:max_audio_len ]
    # input_features = input_features[:,:,:max_audio_len] # truncate to max len         
    # new_batches["input_features"] = input_features
    # new_batches["feature_attention_mask"] = feature_attention_mask
    return deepcopy(new_batches)

    pass
# Debugging entries for dataset initialization
if __name__ == "__main__":
    from pympler import tracker, muppy, summary


    # ann_paths = ["audio_data/train_20240322_cn_slides_1100h.jsonl","audio_data/train_20231206_slidespeech_487h.jsonl","audio_data/train_sub_asr_zhen_train_20240322_cn_slides_asr_multi.jsonl"]
    # ann_paths = ["/teaspeech_ceph/share_976139/users/dennswxiao/data/PenguinsASR_Tokens/Baseline/train_20240322_cn_slides_1100h.jsonl"]
    # model_path = "/apdcephfs_qy3/share_976139/users/adrenzhou/nlp_workdir/pretrained_models/Qwen/Qwen2-Audio-7B-Instruct"
    ann_paths = [
#        "/teaspeech_ceph/share_976139/users/dennswxiao/PenguinsLLM/wp3_PenguinsTokens/Model01_Lite/utils/json_asr_new/valid_20240322_cn_slides_1100h.jsonl"
        "/apdcephfs_qy3/share_976139/users/adrenzhou/share/penguins_data/train_20250318_cn_slides_1100h.jsonl"
    ]

    ann_paths = [
        "audio_data/train_20250324_llmasr.jsonl",
        "audio_data/train_20231206_slidespeech_487h.jsonl",
        "audio_data/train_20250417_cn_slides_generated_context_all_upper.jsonl",
        "audio_data/train_20250410_cn_slides_draft_8kh_open_ocr_all_upper.jsonl"
    ]
    # model_path = "/apdcephfs_qy3/share_976139/users/adrenzhou/nlp_workdir/Megatron-LM/Qwen-audio-whisper-tiny-qwen-0.5B"
    model_path = "/apdcephfs_qy3/share_976139/users/adrenzhou/nlp_workdir/pretrained_models/Qwen/Qwen2-Audio-7B"

    dataset = Qwen2AudioDataset(ann_paths, model_path, ret_id=True, data_aug=True, sample_merge=False, read_penguins=False)

    # 配置 DataLoader
    batch_size = 2
    num_workers = 0
    from torch.utils.data import DataLoader
    # dataloader = DataLoader(
    #     dataset=dataset,
    #     batch_size=batch_size,
    #     shuffle=True,
    #     num_workers=num_workers,
    #     pin_memory=True,
    #     collate_fn=collate_fn
    # )
    # 测试读取几个 batch 的数据
    from tqdm import tqdm

    batch_sampler = MegatronPretrainingRandomSampler(
        dataset=dataset,
        total_samples=len(dataset),
        consumed_samples=0,
        micro_batch_size=batch_size,
        data_parallel_rank=0,
        data_parallel_size=1,
        data_sharding=False,
    )    

    train_ds = DataLoader(
        dataset=dataset,
        batch_sampler=batch_sampler,
        num_workers=1,
        pin_memory=True,
        collate_fn=collate_fn,
        # prefetch_factor=1,
        persistent_workers=True if False else False,
    )
    import gc
    pdb.set_trace()
    for epoch in range(1,3):
        for batch_idx, batch in tqdm(enumerate(train_ds)):
            if batch_idx % 1000 == 0:
                gc.collect()
            
            # pdb.set_trace()
            #print(f"epoch: {train_ds.batch_sampler.epoch}, batch_idx: {batch_idx}, uttids: {batch['uttid']}")
            #torch.save(batch, "penguins_input.pt")
            # print()
            del batch
            pass
        # pdb.set_trace()
        # if batch_idx >= 3:  # 只查看前3个batch
        #     break
        # print(f"Batch {batch_idx}:")
        # print(f"Batch size: {len(batch)}")
        # print(f"Data in batch: {batch}")
        # print("-" * 50)

    # # Try reading a few data samples
    # for i in range(3):
    #     sample = dataset[i]
    #     pdb.set_trace()
    #     print(f"Sample {i}: {sample}")
