import json
import numpy as np
import gc
import torch
from torch.utils.data import Dataset, default_collate
import torchaudio
import random
from transformers import AutoProcessor
import subprocess
import io
import pdb
import math
from tqdm import tqdm
from copy import deepcopy
from multiprocessing import Process, Queue
import pdb
import json
from megatron.training import get_args
from megatron.legacy.data.data_samplers import MegatronPretrainingRandomSampler
# from megatron.core.datasets import Qwen2AudioDataset
from megatron.core.datasets.qwen2_audio_dataset_v1 import Qwen2AudioDataset, collate_fn
import sys, os, re, gzip, struct
import struct

def open_or_fd(file, mode='rb'):
    """ fd = open_or_fd(file)
     copied from kaldi_io
     Open file, gzipped file, pipe, or forward the file-descriptor.
     Eventually seeks in the 'file' argument contains ':offset' suffix.
    """
    offset = None
    try:
        # strip 'ark:' prefix from r{x,w}filename (optional),
        if re.search('^(ark|scp)(,scp|,b|,t|,n?f|,n?p|,b?o|,n?s|,n?cs)*:', file):
            (prefix,file) = file.split(':',1)
        # separate offset from filename (optional),
        if re.search(':[0-9]+$', file):
            (file,offset) = file.rsplit(':',1)
        # input pipe?
        if file[-1] == '|':
            fd = popen(file[:-1], 'rb') # custom,
        # output pipe?
        elif file[0] == '|':
            fd = popen(file[1:], 'wb') # custom,
        # is it gzipped?
        elif file.split('.')[-1] == 'gz':
            fd = gzip.open(file, mode)
        # a normal file...
        else:
            fd = open(file, mode)
    except TypeError:
        # 'file' is opened file descriptor,
        fd = file
    # Eventually seek to offset,
    if offset != None: fd.seek(int(offset))
    return fd


def read_wav_from_fd(ark_with_idx):
    """
    从当前 fd 的 seek 位置读取一个完整的 WAV 文件
    返回 (waveform, sample_rate)
    """
    fd = open_or_fd(ark_with_idx)
    # 1. 读取前 44 字节（WAV 头）
    header = fd.read(44)
    if len(header) < 44:
        raise ValueError("Invalid WAV header (too short)")

    # 2. 解析关键字段
    riff, file_size, wave = struct.unpack('<4sI4s', header[:12])
    if riff != b'RIFF' or wave != b'WAVE':
        raise ValueError("Not a valid WAV file")

    # 3. 解析 fmt 子块（确保是 PCM 格式）
    fmt_chunk = header[12:36]
    audio_format, num_channels, sample_rate, byte_rate, block_align, bits_per_sample = \
        struct.unpack('<HHI I HH', fmt_chunk[8:24])
    if audio_format != 1:
        raise ValueError("Only PCM WAV format is supported")

    # 4. 找到 'data' 块的位置和大小
    pos = 36  # 跳过 RIFF + fmt 子块
    while pos < len(header):
        subchunk_id, subchunk_size = struct.unpack('<4sI', header[pos:pos+8])
        if subchunk_id == b'data':
            data_size = subchunk_size
            break
        pos += 8 + subchunk_size
    else:
        raise ValueError("Could not find 'data' chunk in WAV header")

    # 5. 计算完整 WAV 大小（44 + data_size）
    wav_size = pos + 8 + data_size  # 包含所有子块的总大小

    # 6. 重新读取完整 WAV 数据（从当前 seek 位置开始）
    fd.seek(-44, io.SEEK_CUR)  # 回退到 WAV 开头
    wav_bytes = fd.read(wav_size)

    # 7. 用 torchaudio 加载
    waveform, sr = torchaudio.load(io.BytesIO(wav_bytes))
    return waveform, sr



def spec_aug(sample, num_t_mask=2, num_f_mask=2, max_t=50, max_f=10, max_w=80, seed=42):
    """ Do spec augmentation
        Inplace operation

        Args:
            sample: {key, feat, ...}
            num_t_mask: number of time mask to apply
            num_f_mask: number of freq mask to apply
            max_t: max width of time mask
            max_f: max width of freq mask
            max_w: max width of time warp

        Returns
            {key, feat, ....}
    """
    random.seed(seed)
    assert 'feat' in sample
    x = sample['feat']
    feature_mask = sample['feature_attention_mask']
    max_frames = feature_mask.sum(-1)[0].item()
    assert isinstance(x, torch.Tensor)
    y = x.clone().detach()
    # max_frames = y.size(0)
    max_freq = y.size(1)
    # time mask
    for i in range(num_t_mask):
        start = random.randint(0, max_frames - 1)
        length = random.randint(1, max_t)
        end = min(max_frames, start + length)
        y[start:end, :] = 0
    # freq mask
    for _ in range(num_f_mask):
        start = random.randint(0, max_freq - 1)
        length = random.randint(1, max_f)
        end = min(max_freq, start + length)
        y[:, start:end] = 0
    sample['feat'] = y
    del x
    return sample

def get_audio_length_via_header(file_path):
    info = torchaudio.info(file_path)
    return info.num_frames / info.sample_rate

class KaldiFbankAudioDataset(Qwen2AudioDataset):
    def __init__(self, ann_paths, model_path, seq_len=1024, ret_id=False, data_aug=False, sample_merge=False, read_penguins=False):
        super().__init__(ann_paths, model_path, seq_len=1024, ret_id=False, data_aug=False, sample_merge=False, read_penguins=False)
        self.audio_token_index = 151665
        
    def _get_feat_extract_output_lengths(self, input_lengths: torch.LongTensor):
        """
        Computes the output length of the convolutional layers and the output length of the audio encoder
        """
        # input_lengths = (input_lengths - 1) // 2 + 1
        # output_lengths = (input_lengths - 2) // 2 + 1
        input_lengths = (input_lengths - 5) // 6
        output_lengths = input_lengths
        return input_lengths, output_lengths
    
            
    def make_fbank(self, wav_data):
        # pdb.set_trace()
        # pass
        return torchaudio.compliance.kaldi.fbank(wav_data.unsqueeze(0) * (1 << 15), num_mel_bins=80, frame_length=25, frame_shift=10, sample_frequency=16000)

    def pad_audio_to_max(self, wav_data, max_length_in_s=30, sr=16000):
        empty_tensor = torch.zeros(max_length_in_s * sr + int(0.08*sr)).to(wav_data.dtype)
        empty_tensor[:wav_data.shape[0]] = wav_data
        return empty_tensor


    def conversation_processor(self, ann, index, audio_part_lens):
        conversation = []

        if 'context_text' in ann: # and ann['task'] in ["asr_draft", "asr_hotword"]:
            if index % 10 == 0 and False:
                # 0.1 prob to drop
                conversation.append({"type": "text", "text": "###Context\n" + "\n###Instruct\n"})
            else:
                conversation.append({"type": "text", "text": "###Context\n" +ann['context_text'].replace("\\n", "\n") + "\n###Instruct\n"})
        # First context Then Audio Last Prompt
        for _ in range(audio_part_lens):
            conversation.append({"type": "audio", "audio_url": ann["wav"]})

        conversation.append({"type": "text", "text": self.get_suffix_prompt(ann["task"], index)})
        if 'segments' in ann:
            response = self.transform_segments(ann['segments'])
        else:
            response = ann['text']
        
        conversation = [{"role": "user", "content": conversation}, {"role": "assistant", "content": response}]         
        return conversation

    def __getitem__(self, index):
        with torch.no_grad():
            anns = self.annotation[index]

            if type(anns) == dict:
                anns = [anns]

            all_inputs = []
            max_audio_len_in_s = 30
            for ann in anns:
                # Load and process audio
                try:
                    if "wav-copy" in ann['wav']:
                        audio, sr = read_wav_from_fd(ann['wav'].split(" ")[1])
                    else:
                        audio, sr = torchaudio.load(ann["wav"])
                except:
                    # command
                    audio, sr = self.load_audio_from_stream(ann['wav'])

                if sr != 16000:
                    resampler = torchaudio.transforms.Resample(orig_freq=sr, new_freq=16000)
                    audio = resampler(audio)
                    sr = 16000

                if audio.shape[0] >= 2:  # stereo, multichannel to mono
                    # audio = torch.mean(audio, dim=0)
                    # use first channel
                    audio = audio[:1]
                audio = audio.squeeze(0)
                if audio.size(0) < sr:  # pad audio to at least 1s
                    sil = torch.zeros(sr - audio.size(0))
                    audio = torch.cat((audio, sil), dim=0)

                # Split audio into parts of 30s or less
                
                audio_parts = [audio[i:i + sr * max_audio_len_in_s] for i in range(0, len(audio), sr * max_audio_len_in_s)]
                fbank_frame_rate = 100
                fbank_lengths = []
                for audio in audio_parts:
                    fbank_lengths.append(math.ceil(audio.shape[0] / sr * 100))

                padded_audios = [self.pad_audio_to_max(part, max_audio_len_in_s, sr) for part in audio_parts]
                # Pad to max_length
                audios = [part.numpy() for part in audio_parts]
                fbanks = [self.make_fbank(part) for part in padded_audios]
                if 'text_input_no_token' in ann:
                    text_input = ann['text_input_no_token']
                else:
                    conversation = self.conversation_processor(ann, index, 1)           

                    text_input = self.processor.apply_chat_template(conversation, add_generation_prompt=False, tokenize=False)
                    # pdb.set_trace()
                    # print(text_input)
                # print(f"Rank: {torch.distributed.get_rank()} path: {ann['wav']} text: {ann['text']}")
                # Generate audio features
                fbank_lengths = torch.tensor(fbank_lengths, dtype=torch.int64)
                # inputs = self.processor(audios=audios,text=text_input, return_tensors="pt", padding=True, sampling_rate=sr)
                inputs = self.processor(text=text_input, return_tensors="pt", padding=True, sampling_rate=sr)
                inputs['input_features'] = torch.stack(fbanks).contiguous()
                inputs['feature_lengths'] = fbank_lengths
                # pdb.set_trace()
                all_inputs.append(inputs)
                del audios
                del audio
                del audio_parts

            # input_ids, attention_mask, input_features, feature_attention_mask
            input_features = torch.cat([x['input_features'] for x in all_inputs], dim=0)
            # feature_attention_mask = torch.cat([x['feature_attention_mask'] for x in all_inputs], dim=0)
            feature_lengths = torch.cat([x['feature_lengths'] for x in all_inputs], dim=0)
            attention_mask = torch.ones(1, self.seq_len + 1, dtype=torch.int64)
            # attention_mask = torch.tril(torch.ones(1, 1, self.seq_len + 1, self.seq_len+1, dtype=torch.int64)) # all ones
                
            raw_ids = []
            for inputs in all_inputs:
                raw_ids += inputs['input_ids'].squeeze().tolist()
                raw_ids += [self.end_of_turn]

            raw_ids = torch.tensor(raw_ids, dtype=torch.int64)
            input_ids = torch.full((1, self.seq_len + 1), self.pad_id, dtype=torch.int64)
            input_ids[0,:len(raw_ids)] = raw_ids
            # construct loss_mask
            
            # only answers are ones, follow labels
            if self.read_penguins:
                audio_emb_lengths = feature_attention_mask.sum(-1) // 2
            else:
                _, audio_emb_lengths = self._get_feat_extract_output_lengths(
                    feature_lengths
                )
            # if audio_emb_lengths < 0:
            #     pdb.set_trace()
            # add label processing
            loss_mask, labels = self.prepare_loss_mask_and_labels(raw_ids,input_ids, audio_emb_lengths)


            # labels = input_ids.clone()
            input_ids = input_ids[:, :-1]
            # labels = labels[:, 1:]
            attention_mask = attention_mask[:, :-1]
            if self.data_aug and not self.read_penguins:
                sample = {'feat': input_features, 'feature_lengths': feature_lengths}
                sample = spec_aug(sample, seed=index)
                input_features = sample['feat']
            del all_inputs
            if self.ret_id:
                uttid = 'comb' + str(len(anns)) + '-'.join(ann['wav'].split('/')[-2:])
                return deepcopy({
                    "uttid": uttid,
                    "input_features": input_features,
                    "input_ids": input_ids,
                    "attention_mask": attention_mask,
                    "feature_attention_mask": torch.zeros((input_features.shape[0], input_features.shape[-1]), dtype=torch.int32),
                    "feature_lengths": feature_lengths,
                    "labels": labels,
                    "loss_mask": loss_mask
                })
            else:
                return deepcopy({
                    "input_features": input_features,
                    "input_ids": input_ids,
                    "attention_mask": attention_mask,
                    "feature_attention_mask": torch.zeros((input_features.shape[0], input_features.shape[-1]), dtype=torch.int32),                    
                    "feature_lengths": feature_lengths,
                    "labels": labels,
                    "loss_mask": loss_mask
                })            


    # pass
# Debugging entries for dataset initialization
if __name__ == "__main__":
    # from pympler import tracker, muppy, summary


    # ann_paths = ["audio_data/train_20240322_cn_slides_1100h.jsonl","audio_data/train_20231206_slidespeech_487h.jsonl","audio_data/train_sub_asr_zhen_train_20240322_cn_slides_asr_multi.jsonl"]
    # ann_paths = ["/teaspeech_ceph/share_976139/users/dennswxiao/data/PenguinsASR_Tokens/Baseline/train_20240322_cn_slides_1100h.jsonl"]
    # model_path = "/apdcephfs_qy3/share_976139/users/adrenzhou/nlp_workdir/pretrained_models/Qwen/Qwen2-Audio-7B-Instruct"
    ann_paths = [
#        "/teaspeech_ceph/share_976139/users/dennswxiao/PenguinsLLM/wp3_PenguinsTokens/Model01_Lite/utils/json_asr_new/valid_20240322_cn_slides_1100h.jsonl"
        "/apdcephfs_qy3/share_976139/users/adrenzhou/share/penguins_data/train_20250318_cn_slides_1100h.jsonl"
    ]

    ann_paths = [
        # "audio_data/train_20250324_llmasr.jsonl",
        "audio_data/train_20231206_slidespeech_487h.jsonl",
        "audio_data/train_20240322_cn_slides_1100h.jsonl"  
        # "audio_data/train_20250417_cn_slides_generated_context_all_upper.jsonl",
        # "audio_data/train_20250410_cn_slides_draft_8kh_open_ocr_all_upper.jsonl"
    ]
    # model_path = "/apdcephfs_qy3/share_976139/users/adrenzhou/nlp_workdir/Megatron-LM/Qwen-audio-whisper-tiny-qwen-0.5B"
    model_path = "/apdcephfs_qy3/share_976139/users/adrenzhou/nlp_workdir/Megatron-LM/Qwen2.5-7b-for-audio"

    dataset = KaldiFbankAudioDataset(ann_paths, model_path, ret_id=True, data_aug=True, sample_merge=False, read_penguins=False)

    # 配置 DataLoader
    batch_size = 2
    num_workers = 0
    from torch.utils.data import DataLoader
    # dataloader = DataLoader(
    #     dataset=dataset,
    #     batch_size=batch_size,
    #     shuffle=True,
    #     num_workers=num_workers,
    #     pin_memory=True,
    #     collate_fn=collate_fn
    # )
    # 测试读取几个 batch 的数据
    from tqdm import tqdm

    batch_sampler = MegatronPretrainingRandomSampler(
        dataset=dataset,
        total_samples=len(dataset),
        consumed_samples=0,
        micro_batch_size=batch_size,
        data_parallel_rank=0,
        data_parallel_size=1,
        data_sharding=False,
    )    

    train_ds = DataLoader(
        dataset=dataset,
        batch_sampler=batch_sampler,
        num_workers=0,
        pin_memory=True,
        collate_fn=collate_fn,
        # prefetch_factor=1,
        persistent_workers=True if False else False,
    )
    import gc
    # pdb.set_trace()
    for epoch in range(1,3):

        for batch_idx, batch in tqdm(enumerate(train_ds)):
            if batch_idx % 1000 == 0:
                gc.collect()
            # pdb.set_trace()
            # pdb.set_trace()
            #print(f"epoch: {train_ds.batch_sampler.epoch}, batch_idx: {batch_idx}, uttids: {batch['uttid']}")
            #torch.save(batch, "penguins_input.pt")
            # print()
            del batch
            pass

        # pdb.set_trace()
        # if batch_idx >= 3:  # 只查看前3个batch
        #     break
        # print(f"Batch {batch_idx}:")
        # print(f"Batch size: {len(batch)}")
        # print(f"Data in batch: {batch}")
        # print("-" * 50)

    # # Try reading a few data samples
    # for i in range(3):
    #     sample = dataset[i]
    #     pdb.set_trace()
    #     print(f"Sample {i}: {sample}")
