import torch.nn as nn

import torch, torchaudio
from soundstorm_speechtokenizer import SoundStorm, ConformerWrapper
from speechtokenizer import SpeechTokenizer
from einops import rearrange
import torch
import os
import re
import json
from my_py_toolkit.mllms.utils import modality_tokens_ids_to_string, modality_ids_to_tokens

# ======================== clone voice
def load_soundstorm(model_path):
    conformer = ConformerWrapper(codebook_size=1024,
                                num_quantizers=7,
                                conformer={'dim':1024, 
                                        'depth': 12, 
                                        'heads':8, 
                                        'dim_head': 128, 
                                        'attn_flash': False
                                        },
                                    )

    soundstorm = SoundStorm(net=conformer,
                            num_semantic_token_ids=1024,
                            semantic_pad_id=1024,
                            pad_id=1024,
                            schedule = 'cosine')
    soundstorm.load(model_path)
    return soundstorm


def semantic2acoustic(semantic_tokens, prompt_tokens, soundstorm, tokenizer, steps = 1, greedy = True):
    '''
    We aslo support unprompt mode, just let:
    prompt_path = None
    '''
    generated = soundstorm.generate(semantic_tokens=semantic_tokens,
                                    prompt_tokens=prompt_tokens,
                                    steps=steps,
                                    greedy=greedy) 
    wavs = tokenizer.decode(rearrange(generated, 'b n q -> q b n', b=semantic_tokens.size(0))) # wav: (b, 1, t)
    return wavs


model_name_cls_mapping = {
    'speech_tokenizer': SpeechTokenizer
}

# ========================  speech tokenizer

class SpeechTokenizerGeneral(nn.Module):
    def __init__(self, model_dir, model_cls_or_name='speech_tokenizer', device='cuda', **kwargs):
        super().__init__()

        # 模型路径
        tokenizer_path = os.path.join(model_dir, 'speechtokenizer/ckpt.dev')
        tokenizer_config_path = os.path.join(model_dir, 'speechtokenizer/config.json')
        soundstorm_path = os.path.join(model_dir, 'soundstorm/speechtokenizer_soundstorm_mls.pt')


        if isinstance(model_cls_or_name, str) and model_cls_or_name in model_name_cls_mapping:
            model_cls_or_name = model_name_cls_mapping[model_cls_or_name]

        self.device = device
        self.speech_tokenizer = model_cls_or_name.load_from_checkpoint(tokenizer_config_path, tokenizer_path)
        self.speech_tokenizer.eval()
        self.speech_tokenizer.to(device=self.device)
        self.soundstorm = load_soundstorm(soundstorm_path)
        self.soundstorm.eval()
        self.soundstorm.to(device=self.device)

    def encode(self, speech_tensor, sr=None):
        if isinstance(speech_tensor, str):
            wav, sr = torchaudio.load(speech_tensor)
            # monophonic checking
            if wav.shape[0] > 1:
                wav = wav[:1, ]
            if sr != self.speech_tokenizer.sample_rate:
                wav = torchaudio.functional.resample(wav, sr, self.speech_tokenizer.sample_rate)
            speech_tensor = wav.unsqueeze(0)

        speech_tensor = speech_tensor.to(self.device)
        # Extract discrete codes from SpeechTokenizer
        with torch.no_grad():
            codes = self.speech_tokenizer.encode(speech_tensor) # codes: (n_q, B, T)
        return codes[0, 0, :]
    
    
    def encode2token(self, speech_tensor, sr=None):
        tokens = []
        token_ids = self.encode(speech_tensor, sr)
        batch_size, *_ = token_ids.shape
        for i in range(batch_size):
            tokens.append(modality_ids_to_tokens(token_ids[i], 'speech'))

        if len(tokens) == 1:
            tokens = tokens[0]

        return tokens

    
    def encode2text(self, speech_tensor, sr=None):
        texts = []
        token_ids = self.encode(speech_tensor, sr)
        batch_size, *_ = token_ids.shape
        for i in range(batch_size):
            texts.append(modality_tokens_ids_to_string(token_ids[i], 'speech'))

        if len(texts) == 1:
            texts = texts[0]
        return texts
        
        
    def decode(self, content, prompt_path=None, vc_steps=3):
        if prompt_path:
            # get tokens of prompt
            prompt_wav, sr = torchaudio.load(prompt_path)
            prompt_wav = prompt_wav.to(self.device)
            if sr != self.speech_tokenizer.sample_rate:
                prompt_wav = torchaudio.functional.resample(prompt_wav, sr, self.speech_tokenizer.sample_rate)
            # If it is stereo, take the average to mono
            if prompt_wav.shape[0] == 2:
                prompt_wav = prompt_wav.mean(dim=0).unsqueeze(0)
            prompt_tokens = rearrange(self.speech_tokenizer.encode(prompt_wav.unsqueeze(0)), 'q b n -> b n q')
        else:
            prompt_tokens = None
        # print(prompt_tokens)
        # codes.shape：(1, 1, n)
        semantic_codes = [[int(num) for num in re.findall(r'\d+', content)]]
        # wav: (b, 1, t)
        wav = semantic2acoustic(torch.Tensor(semantic_codes).int().to(self.device), prompt_tokens, 
                                self.soundstorm, self.speech_tokenizer, steps=vc_steps)
        wav = wav.squeeze(0).detach().cpu()
        return wav


if __name__ == '__main__':
    model_dir = '/home/centos/.cache/huggingface/hub/models--fnlp--AnyGPT-speech-modules/snapshots/47320f954f02f108d0359fb80cd481038bba6be9'
    audio_path = '/home/centos/.cache/huggingface/hub/datasets--fnlp--AnyInstruct/snapshots/a77e5ac74151672c44e96954f75081d0893e206d/speech_conv/speech/chunk_00002/0001.mp3'
    tokenizer = SpeechTokenizerGeneral(model_dir, 'speech_tokenizer')
    print(tokenizer.encode2text(audio_path))



