import json
import gc
import torch
from torch.utils.data import Dataset
import torchaudio
import random
from transformers import WhisperFeatureExtractor

class SimpleTokenizer:
    def __init__(self, vocab_file):
        self.token_to_id = {}
        self.id_to_token = {}
        with open(vocab_file, "r", encoding="utf-8") as f:
            for line in f:
                token, idx = line.strip().split()
                idx = int(idx)
                self.token_to_id[token] = idx
                self.id_to_token[idx] = token

    def encode(self, text):
        return [self.token_to_id.get(token, self.token_to_id["<unk>"]) for token in text]

    def decode(self, token_ids):
        return "".join([self.id_to_token.get(idx, "<unk>") for idx in token_ids])

def spec_aug(sample, num_t_mask=2, num_f_mask=2, max_t=50, max_f=15, max_w=80):
    """ Do spec augmentation
        Inplace operation

        Args:
            sample: {key, feat, ...}
            num_t_mask: number of time mask to apply
            num_f_mask: number of freq mask to apply
            max_t: max width of time mask
            max_f: max width of freq mask
            max_w: max width of time warp

        Returns
            {key, feat, ....}
    """
    assert 'feat' in sample
    x = sample['feat']
    assert isinstance(x, torch.Tensor)
    y = x.clone().detach()
    max_frames = y.size(0)
    max_freq = y.size(1)
    # time mask
    for i in range(num_t_mask):
        start = random.randint(0, max_frames - 1)
        length = random.randint(1, max_t)
        end = min(max_frames, start + length)
        y[start:end, :] = 0
    # freq mask
    for _ in range(num_f_mask):
        start = random.randint(0, max_freq - 1)
        length = random.randint(1, max_f)
        end = min(max_freq, start + length)
        y[:, start:end] = 0
    sample['feat'] = y
    return sample

class AudioLLMDataset(Dataset):
    def __init__(self, ann_paths, whisper_path, vocab_file, ret_id=False, data_aug=False):
        super().__init__()

        self.annotation = []
        if isinstance(ann_paths, list):  # multiple json files
            for f in ann_paths:
                if f.endswith(".json"):
                    self.annotation += json.load(open(f, "r", encoding='utf-8'))['annotation']
        else:
            self.annotation = json.load(open(ann_paths, "r",  encoding='utf-8'))['annotation']
        print(f"Loaded {len(self.annotation)} annotation samples from {ann_paths}")
        self.ret_id = ret_id
        self.data_aug = data_aug
        self.wav_processor = WhisperFeatureExtractor.from_pretrained(whisper_path)
        self.tokenizer = SimpleTokenizer(vocab_file)
                        
    def __len__(self):
        return len(self.annotation)

    def __getitem__(self, index):
        ann = self.annotation[index]

        audio, sr = torchaudio.load(ann["path"])
        if sr != 16000:
            resampler = torchaudio.transforms.Resample(orig_freq=sr, new_freq=16000)
            audio = resampler(audio)
            sr = 16000

        if audio.shape[0] == 2:  # stereo to mono
            audio = torch.mean(audio, dim=0)
        audio = audio.squeeze(0)
        if "expand_wav" in ann:
            for p in ann["expand_wav"]:
                expand_audio, sr = torchaudio.load(p)
                if sr != 16000:
                    resampler = torchaudio.transforms.Resample(orig_freq=sr, new_freq=16000)
                    expand_audio = resampler(expand_audio)
                    sr = 16000
                if expand_audio.shape[0] == 2:
                    expand_audio = torch.mean(expand_audio, dim=0)
                expand_audio = expand_audio.squeeze(0)
                sil = torch.zeros(1600)
                audio = torch.cat((audio, sil, expand_audio), dim=0)
        if audio.size(0) < sr:  # pad audio to at least 1s
            sil = torch.zeros(sr - audio.size(0))
            audio = torch.cat((audio, sil), dim=0)
        audio = audio[: sr * 30]  # truncate audio to at most 30s
        audio_len = int(audio.size(0) / sr * 100)
        spectrogram = self.wav_processor(audio.numpy(), sampling_rate=sr, return_tensors="pt")["input_features"].squeeze()
        text = ann["text"]
        encoded_text = self.tokenizer.encode(text)
        text_length = len(encoded_text)
        assert text_length <= 1500, "Encoded text length exceeds the maximum allowed length of 1500"
        encoded_text = torch.tensor(encoded_text, dtype=torch.int64)
        encoded_text = torch.nn.functional.pad(encoded_text, (0, 1500 - text_length), value=-1)
        Q = ann.get("Q", "")

        # Release audio memory
        raw_wav_length = audio.size(0)
        del audio
        gc.collect()

        # Calculate padding mask for spectrogram
        seq_length = spectrogram.size(1)  # assuming (Dim, Seq) format
        max_seq_length = 1500  # corresponds to 30s with 2x downsampling
        padding_mask = torch.ones(1, 1, max_seq_length, dtype=torch.bool)
        valid_length = min(audio_len // 2, max_seq_length)
        
        padding_mask[:, :, valid_length:] = False

        if self.data_aug:
            sample = {'feat': spectrogram}
            sample = spec_aug(sample)
            spectrogram = sample['feat']

        if self.ret_id:
            uttid = '-'.join(ann['path'].split('/')[-2:])
            return {
                "uttid": uttid,
                "spectrogram": spectrogram,
                # "raw_wav_length": raw_wav_length,
                "text": encoded_text,
                # "Q": Q,
                # "id": ann["path"],
                "padding_mask": padding_mask
            }
        else:
            return {
                "spectrogram": spectrogram,
                # "raw_wav_length": raw_wav_length,
                "text": encoded_text,
                # "Q": Q,
                # "id": ann["path"],
                "padding_mask": padding_mask
            }