
import torch.nn as nn
import torchaudio
import random
import torch
from transformers import EncodecModel, AutoProcessor
from my_py_toolkit.mllms.utils import modality_tokens_ids_to_string, modality_ids_to_tokens

"""
music、audio to tokens
"""


model_cls_name_mapping = {
    'encodec_32khz': EncodecModel
}

processor_cls_name_mapping = {
    'encodec_32khz': AutoProcessor
}


def load_audio(audio_path, sample_rate, segment_duration=5, one_channel=True, start_from_begin=False):
    metadata = torchaudio.info(audio_path)
    num_frames = metadata.num_frames
    orig_sample_rate = metadata.sample_rate
    segment_length = segment_duration * orig_sample_rate if segment_duration != -1 else num_frames

    # 确定读取音频的起始位置
    start_frame = 0 if start_from_begin else random.randint(0, max(0, num_frames - segment_length))
    waveform, or_sample_rate = torchaudio.load(
        audio_path,
        frame_offset=start_frame,
        num_frames=segment_length,
    )

    # 重采样音频
    if or_sample_rate != sample_rate:
        resampler = torchaudio.transforms.Resample(orig_freq=or_sample_rate, new_freq=sample_rate)
        waveform = resampler(waveform)

    # 检查音频是否是双通道
    # 如果是多通道，求平均
    if one_channel and waveform.shape[0] >= 2:
        waveform = waveform.mean(dim=0).unsqueeze(0)
    
    return waveform

class MusicTokenizerGeneral(nn.Module):
    def __init__(self, model_path, model_cls_or_name, processor_cls_or_name, music_sample_rate=32000, music_segment_duration=20, device='cuda'):
        super().__init__()

        if isinstance(model_cls_or_name, str):
            model_cls_or_name = model_cls_name_mapping[model_cls_or_name]
        if isinstance(processor_cls_or_name, str):
            processor_cls_or_name = processor_cls_name_mapping[processor_cls_or_name]
        
        # "facebook/encodec_32khz"
        self.device = device
        self.music_tokenizer = model_cls_or_name.from_pretrained(model_path)
        self.music_tokenizer.eval()
        self.music_tokenizer.to(device=self.device)
        self.music_processor = processor_cls_or_name.from_pretrained(model_path)
        self.music_sample_rate = music_sample_rate
        self.music_segment_duration = music_segment_duration

    def encode_music2token_ids(self, audio_torch_or_path, one_channel=True, start_from_begin=True):
        # load the audio as a PyTorch tensor
        if isinstance(audio_torch_or_path, str):
            waveform = load_audio(audio_torch_or_path, self.music_sample_rate, self.music_segment_duration, one_channel).squeeze(0)
        elif isinstance(audio_torch_or_path, (list, tuple)):
            waveform = [load_audio(p, self.music_sample_rate, self.music_segment_duration, 
                                    one_channel, start_from_begin).numpy() for p in audio_torch_or_path]
        else:
            waveform = audio_torch_or_path
        inputs = self.music_processor(raw_audio=waveform, sampling_rate=self.music_sample_rate, return_tensors="pt")
        with torch.no_grad():
            encoder_outputs = self.music_tokenizer.encode(inputs["input_values"].to(self.device) , inputs["padding_mask"].to(self.device) )
        return encoder_outputs.audio_codes
    
    def encode_music2token(self, audio_torch_or_path, one_channel=True, start_from_begin=True):
        token_ids = self.encode_music2token_ids(audio_torch_or_path, one_channel, start_from_begin)
        tokens = modality_ids_to_tokens(token_ids, 'music')

        if len(tokens) == 1:
            tokens = tokens[0]
        
        return tokens
    
    def encode_music2text(self, audio_torch_or_path, one_channel=True, start_from_begin=True):
        # 单 music shape: (1, 1, 4, 253)
        # 多 music shape: (batch_size, 1, 4, 253)? 
        # batch >1 报错：Expected mono audio but example has 1 channels
        token_ids = self.encode_music2token_ids(audio_torch_or_path, one_channel, start_from_begin)
        token_ids = token_ids.squeeze(1).squeeze(0)
        texts = modality_tokens_ids_to_string(token_ids, 'music')

        if len(texts) == 1:
            texts = texts[0]
        
        return texts




if __name__ == '__main__':
    model_name = "facebook/encodec_32khz"
    model_cls_name = 'encodec_32khz'
    tokenizer = MusicTokenizerGeneral(model_name, model_cls_name, model_cls_name)
    music_path = '/home/centos/.cache/huggingface/hub/datasets--fnlp--AnyInstruct/snapshots/a77e5ac74151672c44e96954f75081d0893e206d/part1/music/32c45916_0.wav'
    # music_path = [music_path, music_path]
    print(tokenizer.encode_music2text(music_path))


