# Copyright 2023 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import os
import argparse
import time
from multiprocessing import Process, Queue

import numpy as np
import torch
import torchaudio

from constants import *
from punctuator import PunctuationModel
from utils import compute_fbank, logger, load_yaml, check_config, normalize
from encoder import OnlineEncoder
from decoder import RescoringDecoder
from vad import VADModel


def check_asr_input(data_dict):
    if not isinstance(data_dict, dict) or 'waveform' not in data_dict or 'listen_start' not in data_dict:
        raise ValueError('Invalid input for ASR module. '
                         'The input requires a dict with keys: waveform, listen_start')

    waveform, listen_start = data_dict['waveform'], data_dict['listen_start']
    if not isinstance(waveform, np.ndarray) or len(waveform.shape) != 2 or waveform.shape[0] != 1:
        raise ValueError('Invalid input for ASR module. waveform should be a numpy array with shape (1, x)')

    if waveform.dtype != 'float32':
        raise ValueError('Invalid input for ASR module. waveform should be a numpy array with dtype float32')

    return waveform, listen_start


def vad_consumer(in_queue, out_queue, asr_out_queue, msg_queue, cfg):
    model = VADModel(cfg)
    msg_queue.put(1)

    silence_standby_time = 0
    end_cur_session, process_cur_session = True, False
    while True:
        data_dict = in_queue.get()
        waveform, listen_start = check_asr_input(data_dict)

        if end_cur_session and not listen_start:
            # If the current session is ended and the current audio data is not the start of a new session,
            # then skip the current audio data.
            continue

        if process_cur_session and not listen_start:
            # If the current session is running, send the audio data directly to the next module.
            out_queue.put({
                'waveform': waveform,
                'listen_start': False,
            })
            continue

        if listen_start:
            # A new session is coming, reset the VAD model and start monitoring the speech probability.
            process_cur_session = False
            end_cur_session = False
            silence_standby_time = 0
            model.reset()

        speech_prob = model(waveform)
        if speech_prob < cfg.get('vad_start_threshold'):
            # Monitor the silence standby time. If the silence standby time is greater than the threshold, then
            # end the current session and return the silence result.
            silence_standby_time += waveform.shape[1] / ASR_SAMPLE_RATE
            if silence_standby_time >= cfg.get('silence_standby_time'):
                logger.debug(f'Current silence standby time: {silence_standby_time:.2f} s. ')
                asr_out_queue.put({
                    'silence': True,
                    'text': '',
                })
                end_cur_session = True
        else:
            # If the current speech probability is greater than the threshold, start the ASR process.
            process_cur_session = True
            out_queue.put({
                'waveform': waveform,
                'listen_start': True,
            })


def preprocess_audio(in_queue, out_queue):
    remained_waveform = None
    while True:
        data_dict = in_queue.get()
        if not isinstance(data_dict, dict) or 'waveform' not in data_dict or 'listen_start' not in data_dict:
            raise ValueError('Invalid input for ASR preprocessing module. '
                             'The input requires a dict with keys: waveform, listen_start')

        waveform, listen_start = data_dict['waveform'], data_dict['listen_start']
        if listen_start:
            remained_waveform = None

        norm_waveform = torch.from_numpy(normalize(waveform))
        if remained_waveform is None:
            # prepend the first waveform with two silence chunks to improve the accuracy
            remained_waveform = torch.cat([torch.zeros(1, ASR_SAMPLE_RATE), norm_waveform], dim=1)
        else:
            remained_waveform = torch.cat([remained_waveform, norm_waveform], dim=1)

        # compute audio features
        chunk_xs = compute_fbank(remained_waveform, ASR_SAMPLE_RATE)

        # update remained_waveform
        remained_waveform = remained_waveform[:, 160 * chunk_xs.shape[0]:]
        out_queue.put((waveform, chunk_xs, listen_start))


def encoder_consumer(in_queue, out_queue, asr_out_queue, msg_queue, cfg):
    def finish_cur_session():
        """Finish the current session and send the ASR result to the next module."""
        nonlocal encoder, asr_out_queue, hyps, end_cur_session
        end_cur_session = True
        token_len = len(hyps[0][1]) if hyps else 0
        if token_len > 0:
            encoder_features = np.concatenate(encoder.encoder_outs, axis=1)
            out_queue.put((hyps, encoder_features))
        else:
            asr_out_queue.put({
                'silence': True,
                'text': '',
            })

    encoder = OnlineEncoder(cfg)
    vad_model = VADModel(cfg)
    msg_queue.put(1)

    end_cur_session = True
    waveform, chunk_xs, listen_start = in_queue.get()
    while True:
        if listen_start:
            vad_model.reset()
            encoder.reset()
            end_cur_session = False
            # Explicitly process the first voice chunk
            # to improve the accuracy of VAD for the following chunks.
            vad_model(waveform)
        elif end_cur_session:
            waveform, chunk_xs, listen_start = in_queue.get()
            continue

        # Retrieve the next audio data to check whether the current session is ended.
        waveform_next, chunk_xs_next, listen_start = in_queue.get()
        speech_prob = vad_model(waveform_next)

        if encoder.num_chunks >= MAX_CHUNKS - 1:
            hyps = encoder(chunk_xs, is_end=True)
            logger.warning(f'Exceed the maximum number of chunks: {MAX_CHUNKS}. Prepare to end the current session.')
            finish_cur_session()
        elif speech_prob < cfg.get('vad_end_threshold'):
            logger.debug(f'Current speech probability is lower than the threshold: {speech_prob:.2f}. '
                         f'Prepare to end the current session.')
            hyps = encoder(chunk_xs, is_end=True)
            finish_cur_session()
        else:
            hyps = encoder(chunk_xs, is_end=False)

        waveform, chunk_xs = waveform_next, chunk_xs_next


def decoder_consumer(in_queue, out_queue, msg_queue, cfg):
    decoder = RescoringDecoder(cfg)
    msg_queue.put(1)
    while True:
        hyps, encoder_features = in_queue.get()
        text = decoder(hyps, encoder_features)
        out_queue.put(text)


def punctuation_consumer(in_queue, out_queue, msg_queue, cfg):
    punctuator = PunctuationModel(cfg)
    msg_queue.put(1)
    while True:
        text = in_queue.get()
        text_with_punc = punctuator(text)
        out_queue.put({
            'silence': False,
            'text': text_with_punc,
        })


def start_asr_inference(asr_in_queue, asr_out_queue, init_done_event):
    """API for ASR inference.

    Args:
        asr_in_queue(multiprocessing.Queue): asr input, e.g.
            {
                'waveform': waveform data with shape (1, x), the sample rate must be 16k
                'listen_start': True,   # Signify the start of a new session
            }

        asr_out_queue(multiprocessing.Queue): asr output, there are two types of output:
            1. When the ASR module is ready, it will send a message to asr_out_queue, e.g.
                {
                    'asr_ready': True,
                }
            2. When the ASR module finishes the inference, it will send the ASR result to asr_out_queue, e.g.
                {
                    'silence': False,  # If True, the text is empty
                    'text': '你好，世界。',
                }
        init_done_event: a placeholder for passing signal to the server
    """
    cfg = load_yaml('./config.yaml')  # change this path to the absolute path of config.yaml during deployment
    check_config(cfg)

    vad_out_queue = Queue()
    feat_queue = Queue()
    rescore_queue = Queue()
    raw_text_queue = Queue()
    msg_queue = Queue()

    p_vad = Process(target=vad_consumer, args=(asr_in_queue, vad_out_queue, asr_out_queue, msg_queue, cfg))
    p_preprocess = Process(target=preprocess_audio, args=(vad_out_queue, feat_queue))
    p_encoder = Process(target=encoder_consumer, args=(feat_queue, rescore_queue, asr_out_queue, msg_queue, cfg))
    p_decoder = Process(target=decoder_consumer, args=(rescore_queue, raw_text_queue, msg_queue, cfg))
    p_punctuation = Process(target=punctuation_consumer, args=(raw_text_queue, asr_out_queue, msg_queue, cfg))

    asr_processes = [p_vad, p_preprocess, p_encoder, p_decoder, p_punctuation]
    num_model_processes = len(asr_processes) - 1

    try:
        for p in asr_processes:
            p.start()

        while msg_queue.qsize() < num_model_processes:
            time.sleep(0.1)

        # release msg_queue
        for _ in range(num_model_processes):
            msg_queue.get()

        init_done_event.set()
        asr_out_queue.put({
            'asr_ready': True,
        })
    except KeyboardInterrupt:
        print("Shutting down the server and saving any final audio data.")
        for p in [p_preprocess, p_encoder, p_decoder, p_punctuation]:
            p.join()


def test_wav(audio_dir, asr_in_queue, asr_out_queue):
    start_e2e = time.time()

    for filename in sorted(os.listdir(audio_dir)):
        if filename.endswith('.wav'):
            audio_data, src_sample_rate = torchaudio.load(os.path.join(audio_dir, filename))
            asr_in_queue.put({
                'waveform': audio_data.numpy(),
                'listen_start': True if '001' in filename else False,
            })

    while True:
        asr_out = asr_out_queue.get()
        logger.info(f'E2E inference time: {(time.time() - start_e2e) * 1000:.2f} ms')
        logger.info(f'================= ASR output: {asr_out} =================')


class Temp:
    """A placeholder for passing signal to the server."""
    def set(self):
        pass


def parse_args():
    parser = argparse.ArgumentParser()
    parser.add_argument('--audio_dir', help='audio dir')
    args = parser.parse_args()
    return args


if __name__ == '__main__':
    in_queue = Queue()
    out_queue = Queue()
    start_asr_inference(in_queue, out_queue, Temp())

    while True:
        res = out_queue.get()
        if 'asr_ready' not in res:
            time.sleep(0.1)
        else:
            logger.info('================ ASR ready ===================')
            break

    args = parse_args()
    test_wav(args.audio_dir, in_queue, out_queue)
