from hyperpyyaml import load_hyperpyyaml
import os 
import sys
from pathlib import Path
sys.path.append(str(Path(os.path.abspath(__file__)).parent.parent.parent.parent.absolute()))
# print(sys.path)

import torch
from collections import OrderedDict
import torchaudio
from typing import Union, Dict
import tqdm

import fire

def load_pretrained_modules(model, ckpt_path):
    model_info = torch.load(ckpt_path, map_location='cpu')
    state_dict = OrderedDict()
    for k, v in model_info['model_state_dict'].items():
        name = k.replace("module.", "").replace("convolution_", "convolution_module.")   # remove 'module.'
        state_dict[name] = v
    model.load_state_dict(state_dict)



def read_2column_text(path: Union[Path, str]) -> Dict[str, str]:
    """Read a text file having 2 column as dict object.

    Examples:
        wav.scp:
            key1 /some/path/a.wav
            key2 /some/path/b.wav

        >>> read_2column_text('wav.scp')
        {'key1': '/some/path/a.wav', 'key2': '/some/path/b.wav'}

    """

    data = {}
    with Path(path).open("r", encoding="utf-8") as f:
        for linenum, line in enumerate(f, 1):
            sps = line.rstrip().split(maxsplit=1)
            if len(sps) == 1:
                k, v = sps[0], ""
            else:
                k, v = sps
            if k in data:
                raise RuntimeError(f"{k} is duplicated ({path}:{linenum})")
            data[k] = v
    return data


@torch.no_grad()
def main(config:str, model_ckpt:str, mix_scp:str, ref_scp:str, output_dir:str, ref_audio_max_ds: Union[bool, int]=5):
    """
    Arguments:
        ref_audio_max_ds: maximum duration seconds of reference audio
    """
    with open("config.yaml", "r") as f:
        config = load_hyperpyyaml(f)
    model = config.get("MaskNet")
    load_pretrained_modules(model, model_ckpt)
    model.cuda()
    mix_dict = read_2column_text(mix_scp)
    ref_dict = read_2column_text(ref_scp)

    os.makedirs(output_dir, exist_ok=True)

    output_dir = Path(output_dir)

    for key, mix_path in tqdm.tqdm(mix_dict.items()):
        ref_path = ref_dict[key] # str
        mix_audio, sr = torchaudio.load(mix_path)
        ref_audio, sr = torchaudio.load(ref_path)
        if ref_audio_max_ds is not False:
            ref_audio = ref_audio[:,:sr*ref_audio_max_ds]
        mix_audio, ref_audio = mix_audio.cuda(), ref_audio.cuda()
        output = model(mix_audio, ref_audio)
        torchaudio.save(str(output_dir / f"{Path(mix_path).stem}.wav"), output.cpu(), sample_rate= sr)
    pass


if __name__ == "__main__":
    fire.Fire(main)