#!/usr/bin/env python3

import argparse
import logging
import queue
import sys
import threading
import time

import numpy as np
import sherpa_onnx
import soundfile as sf
import pyaudio
import struct
import os


# buffer saves audio samples to be played
buffer = queue.Queue()

# started is set to True once generated_audio_callback is called.
started = False

# stopped is set to True once all the text has been processed
stopped = False

# killed is set to True once ctrl + C is pressed
killed = False

# Note: When started is True, and stopped is True, and buffer is empty,
# we will exit the program since all audio samples have been played.

event = threading.Event()

first_message_time = None

def generated_audio_callback(samples: np.ndarray, progress: float):
    """This function is called whenever max_num_sentences sentences
    have been processed.

    Note that it is passed to C++ and is invoked in C++.

    Args:
      samples:
        A 1-D np.float32 array containing audio samples
    """
    global first_message_time
    if first_message_time is None:
        first_message_time = time.time()

    buffer.put(samples)
    global started

    if started is False:
        logging.info("Start playing ...")
    started = True

    # 1 means to keep generating
    # 0 means to stop generating
    if killed:
        return 0

    return 1


def play_audio_callback(
    in_data, frame_count, time_info, status
):
    frames = 1024
    outdata = np.zeros([frames,1], dtype=np.float32) 
    # print('called play_audio_callback', frames, outdata.shape)

    if killed or (started and buffer.empty() and stopped):
        event.set()

    # outdata is of shape (frames, num_channels)
    if buffer.empty():
        outdata.fill(0)
        return (outdata[:, 0], pyaudio.paContinue)

    n = 0
    while n < frames and not buffer.empty():
        remaining = frames - n
        # print(n)
        k = buffer.queue[0].shape[0]

        if remaining <= k:
            outdata[n:, 0] = buffer.queue[0][:remaining]
            buffer.queue[0] = buffer.queue[0][remaining:]
            n = frames
            if buffer.queue[0].shape[0] == 0:
                buffer.get()

            break

        outdata[n : n + k, 0] = buffer.get()
        n += k

    if n < frames:
        outdata[n:, 0] = 0
    return (outdata[:, 0], pyaudio.paContinue)


class SpeechSynthesizer:
    def __init__(self):
        # args = get_args()
        # print(args)
        self.busy = False
        model_dir = os.path.expanduser(
                "~")+"/Lepi_Data/ros/tts/vits-piper-zh_CN-huayan-medium/"
        vits_model = os.path.join(model_dir, "zh_CN-huayan-medium.onnx")
        vits_lexicon = os.path.join(model_dir, "zh_CN-huayan-medium.onnx")
        vits_data_dir = os.path.join(model_dir, "espeak-ng-data")
        vits_tokens = os.path.join(model_dir, "tokens.txt")
        tts_config = sherpa_onnx.OfflineTtsConfig(
            model=sherpa_onnx.OfflineTtsModelConfig(
                vits=sherpa_onnx.OfflineTtsVitsModelConfig(
                    model=vits_model,
                    lexicon=vits_lexicon,
                    data_dir=vits_data_dir,
                    # dict_dir=args.vits_dict_dir,
                    tokens=vits_tokens,
                ),
                # provider=args.provider,
                # debug=args.debug,
                num_threads=2,
            ),
            # rule_fsts=args.tts_rule_fsts,
            max_num_sentences=1,
        )

        if not tts_config.validate():
            raise ValueError("Please check your config")
        self.list_audio_devices()
        logging.info("Loading model ...")
        self.model = sherpa_onnx.OfflineTts(tts_config)
        print(dir(self.model))
        logging.info("Loading model done.")
        
    def list_audio_devices(self):
        p = pyaudio.PyAudio()
        device_count = p.get_device_count()
        for i in range(0, device_count):
            info = p.get_device_info_by_index(i)
            # print("Device {} = {}".format(info["index"], info["name"]))
            if 'wm8960' in info["name"]:
                self.input_device_index = info["index"]
                print("Device {} = {}".format(info["index"], info["name"]))
                print((i,info['maxInputChannels'],info['maxOutputChannels']))
                
    def tts(self, text):
        global started
        global stopped
        global killed
        global buffer
        if not self.busy:
            self.busy = True
            started = False
            stopped = False
            killed = False
        else:
            return
        buffer = queue.Queue()
        play_back_thread = threading.Thread(target=self.play)
        play_back_thread.start()

        logging.info("Start generating ...")
        start_time = time.time()
        audio = self.model.generate(
            text,
            sid=0,
            speed=1,
            callback=generated_audio_callback,
        )
        end_time = time.time()
        logging.info("Finished generating!")
        stopped = True

        if len(audio.samples) == 0:
            print("Error in generating audios. Please read previous error messages.")
            killed = True
            play_back_thread.join()
            return

        elapsed_seconds = end_time - start_time
        audio_duration = len(audio.samples) / audio.sample_rate
        real_time_factor = elapsed_seconds / audio_duration
        output_filename = self.out_path

        logging.info(f"The text is '{text}'")
        logging.info(
            "Time in seconds to receive the first "
            f"message: {first_message_time-start_time:.3f}"
        )
        logging.info(f"Elapsed seconds: {elapsed_seconds:.3f}")
        logging.info(f"Audio duration in seconds: {audio_duration:.3f}")
        logging.info(
            f"RTF: {elapsed_seconds:.3f}/{audio_duration:.3f} = {real_time_factor:.3f}"
        )

        logging.info(f"***  Saved to {output_filename} ***")
        print(f"***  Saved to {output_filename} ***")

        # print("\n   >>>>>>>>> You can safely press ctrl + C to stop the play <<<<<<<<<<\n")

        play_back_thread.join()
        self.busy = False

        try:
            sf.write(
                output_filename,
                audio.samples,
                samplerate=audio.sample_rate,
                subtype="PCM_16",
            )
        except Exception as e:
            print(e)

    def play(self):
        #define stream chunk 
        chunk = 1024
        #instantiate PyAudio
        p = pyaudio.PyAudio()
        #open stream
        stream = p.open(format = pyaudio.paFloat32,
                        channels = 1,
                        rate = self.model.sample_rate,
                        output_device_index=self.input_device_index,
                        frames_per_buffer=chunk,
                        stream_callback=play_audio_callback,
                        output = True)

        while True:
            if killed or (started and buffer.empty() and stopped):
                event.set()
                break
        
            time.sleep(chunk/self.model.sample_rate)
        self.busy = False
        #stop stream
        stream.stop_stream()
        stream.close()
        
        #close PyAudio
        p.terminate()

    def kill(self):
        global killed
        killed = True

if __name__ == "__main__":
    formatter = "%(asctime)s %(levelname)s [%(filename)s:%(lineno)d] %(message)s"

    logging.basicConfig(format=formatter, level=logging.INFO)
    try:
        # main()
        text = '春眠不觉晓，处处闻啼鸟。夜来风雨声，花落知多少。赏析这首诗是诗人隐居在鹿门山时所做，意境十分优美。诗人抓住春天的早晨刚刚醒来时的一瞬间展开描写和联想'
        synthesizer = SpeechSynthesizer()
        synthesizer.tts(text)
        synthesizer.tts("中文可以的亲。")
    except KeyboardInterrupt:
        print("\nCaught Ctrl + C. Exiting")
        killed = True
        sys.exit(0)