
import soundfile as sf
import sherpa_onnx
import time
import os
import pyaudio
import wave
import importlib

class SpeechSynthesizer:
    def __init__(self):
        self.model_dir = os.path.expanduser(
            "~")+"/Lepi_Data/ros/tts/vits-piper-zh_CN-huayan-medium/"
        self.out_path = "/tmp/out.wav"
        vits_model = os.path.join(self.model_dir, "zh_CN-huayan-medium.onnx")
        vits_lexicon = os.path.join(self.model_dir, "zh_CN-huayan-medium.onnx")
        vits_data_dir = os.path.join(self.model_dir, "espeak-ng-data")
        vits_tokens = os.path.join(self.model_dir, "tokens.txt")
        tts_config = sherpa_onnx.OfflineTtsConfig(
            model=sherpa_onnx.OfflineTtsModelConfig(
                vits=sherpa_onnx.OfflineTtsVitsModelConfig(
                    model=vits_model,
                    lexicon=vits_lexicon,
                    data_dir=vits_data_dir,
                    tokens=vits_tokens,
                ),
                provider='cpu',
                debug=False,
                num_threads=4,
            ),
            # rule_fsts=args.tts_rule_fsts,
            max_num_sentences=2,
        )
        self.model = sherpa_onnx.OfflineTts(tts_config)
        self.list_audio_devices()

    def list_audio_devices(self):
        p = pyaudio.PyAudio()
        device_count = p.get_device_count()
        for i in range(0, device_count):
            info = p.get_device_info_by_index(i)
            # print("Device {} = {}".format(info["index"], info["name"]))
            if 'wm8960' in info["name"]:
                self.input_device_index = info["index"]
                print("Device {} = {}".format(info["index"], info["name"]))
                print((i,info['maxInputChannels'],info['maxOutputChannels']))
        # while True:
        #     # importlib.reload(pyaudio)
        #     p.terminate()
        #     p = pyaudio.PyAudio()
        #     info = p.get_device_info_by_index(2)
        #     print((info["name"],info['maxInputChannels'],info['maxOutputChannels']))
        #     if info['maxOutputChannels'] > 0:
        #         break
        #     else:
        #         time.sleep(2)
                
    def tts(self, text):
        start = time.time()
        audio = self.model.generate(text, sid=0, speed=1)
        end = time.time()

        if len(audio.samples) == 0:
            print("Error in generating audios. Please read previous error messages.")
            return

        elapsed_seconds = end - start
        audio_duration = len(audio.samples) / audio.sample_rate
        real_time_factor = elapsed_seconds / audio_duration

        sf.write(
            self.out_path,
            audio.samples,
            samplerate=audio.sample_rate,
            subtype="PCM_16",
        )
        print(f"Saved to {self.out_path}")
        print(f"The text is '{text}'")
        print(f"Elapsed seconds: {elapsed_seconds:.3f}")
        print(f"Audio duration in seconds: {audio_duration:.3f}")
        print(
            f"RTF: {elapsed_seconds:.3f}/{audio_duration:.3f} = {real_time_factor:.3f}")

    def play(self, file):
        #define stream chunk 
        chunk = 1024
        #open a wav format music
        f = wave.open(file,"rb")
        #instantiate PyAudio
        p = pyaudio.PyAudio()
        print(f.getnchannels())
        #open stream
        stream = p.open(format = p.get_format_from_width(f.getsampwidth()),
                        channels = f.getnchannels(),
                        rate = f.getframerate(),
                        output_device_index=self.input_device_index,
                        output = True)
        #read data
        data = f.readframes(chunk)
        
        #paly stream
        while len(data) > 0:
            stream.write(data)
            data = f.readframes(chunk)
        
        #stop stream
        stream.stop_stream()
        stream.close()
        
        #close PyAudio
        p.terminate()

if __name__ == "__main__":
    synthesizer = SpeechSynthesizer()
    synthesizer.tts("中文可以的亲。")
