# -*-coding:utf-8-*-

import os
import sys
import time
import wave

import numpy as np
from pyaudio import PyAudio, paInt16


# 这里使用 Log.log() 会和 LogUtils 循环引用
from AutoWool.main.base.Singleton import Singleton


class AudioUtils(Singleton):
    audio_path = "./record.wav"
    num_samples = 2000  # pyaudio内置缓冲大小
    sampling_rate = 8000  # 取样频率
    level = 1500  # 声音保存的阈值
    count_num = 20  # AudioUtils.count_num个取样之内出现AudioUtils.count_num个大于AudioUtils.level的取样则记录声音
    save_length = 8  # 声音记录的最小长度：AudioUtils.save_length * AudioUtils.num_samples 个取样
    voice_string = []

    # 录制完成后保存音频文件
    @staticmethod
    def _save_wav(file_path: str):
        wf = wave.open(file_path, 'wb')
        wf.setnchannels(1)
        wf.setsampwidth(2)
        wf.setframerate(AudioUtils.sampling_rate)
        wf.writeframes(np.array(AudioUtils.voice_string).tostring())
        wf.close()
        print('保存音频文件成功，FilePath=' + file_path)

    @staticmethod
    def _start_record_audio(time_count):  # time_count 是录制时间 s

        print('开始录制频')
        pa = PyAudio()
        stream = pa.open(format=paInt16, channels=1, rate=AudioUtils.sampling_rate, input=True,
                         frames_per_buffer=AudioUtils.num_samples)

        save_count = 0
        save_buffer = []
        pre_time_stamp = time.time()
        print('pre_time_stamp=' + str(pre_time_stamp))

        while True:

            # 读入AudioUtils.num_samples个取样
            string_audio_data = stream.read(AudioUtils.num_samples)
            # 将读入的数据转换为数组
            audio_data = np.fromstring(string_audio_data, dtype=np.short)
            # print('录制的音频数据 audio_data= ' + str(audio_data))
            # 计算大于 AudioUtils.level 的取样的个数
            large_sample_count = np.sum(audio_data > AudioUtils.level)
            # print('录制的音频数据 audio_data 的最大值为:' + str(np.max(audio_data)))
            # print(' 大于 AudioUtils.level ' + str(AudioUtils.level) + ' 的取样个数为 ' + str(large_sample_count))

            # 如果个数大于AudioUtils.count_num，则至少保存AudioUtils.save_length个块
            # print('save_count=' + str(save_count))
            if large_sample_count > AudioUtils.count_num:
                save_count = AudioUtils.save_length
            else:
                save_count -= 1
            if save_count < 0:
                save_count = 0

            if save_count > 0:
                save_buffer.append(string_audio_data)
            else:
                if len(save_buffer) > 0:
                    AudioUtils.voice_string = save_buffer
                    save_buffer = []
                    print("录制音频成功！ return True")
                    return True

            cur_time_stamp = time.time()
            # print('开始比较 cur_time_stamp= ' + str(cur_time_stamp) + ' 和 pre_time_stamp= ' + str(pre_time_stamp) + ' time_count=' + str(time_count))
            if cur_time_stamp - pre_time_stamp > time_count:
                # print('录制超过 ' + str(time_count) + ' 秒，开始结束录制')
                if len(save_buffer) > 0:
                    AudioUtils.voice_string = save_buffer
                    save_buffer = []
                    print("录制音频成功！inner_time_stamp  return True")
                    return True
                else:
                    print("录制音频失败！len(save_buffer) =0")
                    return False
            else:
                pass
                # print('录制音频没有超过 ' + str(time_count) + ' 秒，继续录制')

    @staticmethod
    def _read_audio(file_path):
        wf = wave.open(file_path, 'rb')
        print('开始读取音频,音频文件路径为' + file_path)
        p = PyAudio()
        stream = p.open(format=p.get_format_from_width(wf.getsampwidth()),
                        channels=wf.getnchannels(),
                        rate=wf.getframerate(),
                        output=True)

        print('开始播放音频')
        while True:
            data = wf.readframes(1000)  # 从音频流中读取1000个采样数据，data类型为str.注意对音频流的读写都是字符串
            if data == "" or data == b'':  # 判断是否结束
                print('读取不到 frames，停止读取')
                break
            # print('wf.readframes data=' + str(data))
            stream.write(data)  # 从wf中读数据，然后写到stream中。就是从文件中读取数据然后写到声卡里

        stream.stop_stream()  # 暂停
        stream.close()  # 关闭
        p.terminate()
        print('读取音频文件完毕，播放完毕')

    @staticmethod
    def _raw_judge_has_sound(file_path) -> bool:
        print('开始检测音频是否有声音')
        wf = wave.open(file_path, 'rb')
        print('开始读取音频,音频文件路径为' + file_path)
        p = PyAudio()
        is_has_sound = True

        while True:
            data = wf.readframes(1000)  # 从音频流中读取1000个采样数据，data类型为str.注意对音频流的读写都是字符串
            if data == "" or data == b'':  # 判断是否结束
                print('读取不到 frames，停止读取')
                is_has_sound = False
                break
            # print('读取音频 1000 个采样数据为 data=' + str(data))

        p.terminate()
        if not is_has_sound:
            print('没有读取到声音，开始删除' + AudioUtils.audio_path)
            # os.remove(AudioUtils.audio_path)
        print('读取音频文件完毕，检测完毕')
        return is_has_sound

    @staticmethod
    def record_judge_has_sound() -> bool:
        AudioUtils._start_record_audio(3)
        AudioUtils._save_wav(AudioUtils.audio_path)
        return AudioUtils._raw_judge_has_sound(AudioUtils.audio_path)

# AudioUtils.record_judge_has_sound()
