import pyaudio
import time
import threading
import wave
import winsound
import matplotlib.pyplot as pl
import numpy as np
from pydub import AudioSegment
import os
from PyQt5.QtCore import QObject, pyqtSignal


class Recorder(QObject):
    generated_mp3 = pyqtSignal(str)

    """
        初始化
    """

    def __init__(self, chunk=256, channels=1, rate=16000, data_dir="."):

        super().__init__()

        self.CHUNK = chunk
        self.FORMAT = pyaudio.paInt16
        self.CHANNELS = channels
        self.RATE = rate
        self._running = False
        self._frames = []
        self.data_dir = data_dir
        self.mp3FileName = None
        self.waveFileName = None

    def __recording(self):
        if self._running:
            return

        self._running = True
        print('start recording')

        self._frames = []
        p = pyaudio.PyAudio()

        stream = p.open(format=self.FORMAT,
                        channels=self.CHANNELS,
                        rate=self.RATE,
                        input=True,
                        frames_per_buffer=self.CHUNK)

        while self._running:
            data = stream.read(self.CHUNK)
            self._frames.append(data)

        # 关闭
        stream.stop_stream()

        stream.close()
        p.terminate()

    """
        开始录音
    """

    def start(self):
        threading._start_new_thread(self.__recording, ())

    # 停止
    def stop(self):
        self._running = False

    # 保存
    def save(self):
        self._running = False

        p = pyaudio.PyAudio()
        current_file_name = int(time.time())
        wavefile = "{0}.{1}".format(current_file_name, "wav")

        wf = wave.open(self.data_dir + wavefile, 'wb')
        wf.setnchannels(self.CHANNELS)
        wf.setsampwidth(p.get_sample_size(self.FORMAT))
        wf.setframerate(self.RATE)
        wf.writeframes(b''.join(self._frames))

        wf.close()
        self.waveFileName = self.data_dir + wavefile
        mp3_file = self.trans2_mp3(wavefile)

        print("Saved")
        self.generated_mp3.emit(mp3_file)
        self.mp3FileName = mp3_file

    # 播放
    def play(self):
        winsound.PlaySound(self.waveFileName, winsound.SND_FILENAME)

        # 打开wav文档
        file = wave.open(self.waveFileName, "rb")

        # 读取参数信息
        # nchannels, sampwidth, framerate, nframes, comptype, compname
        params = file.getparams()
        nchannels, sampwidth, framerate, nframes = params[:4]
        print(params)

        # 将字符转格式的数据转成int型
        str_data = file.readframes(nframes)
        wave_data = np.frombuffer(str_data, dtype=np.short)

        # 归一化
        wave_data = wave_data * 1.0 / max(abs(wave_data))

        # 将音频信号规整成每行一路通道信号的格式，即该矩阵一行为一个通道的采样点，共nchannels行
        wave_data = np.reshape(wave_data, [nframes, nchannels]).T  # T表示转置
        print(wave_data)

        # 文件使用完毕，关闭文件
        file.close()

        # 绘制语音波形
        time = np.arange(0, nframes) * (1.0 / framerate)  # 时间=n/fs
        time = np.reshape(time, [nframes, 1]).T

        pl.plot(time[0, :nframes], wave_data[0, :nframes], c="b")

        pl.xlabel("time(seconds)")
        pl.ylabel("amplitude")
        pl.title("original wave")

        pl.show()

    def trans2_mp3(self, file):
        temp = AudioSegment.from_file(self.data_dir + file)
        mp3file = "{0}.{1}".format(os.path.splitext(file)[0], "mp3")
        temp.export(self.data_dir + mp3file, format="mp3")
        return self.data_dir + mp3file
