{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import matplotlib\n",
    "import matplotlib.pyplot as plt\n",
    "import numpy as np\n",
    "import wave\n",
    "\n",
    "class Wav_FFT(object):\n",
    "    '''\n",
    "    对音频数据做fft，然后判断是否是人类的声音。\n",
    "    '''\n",
    "    def __init__(self, wav_path=''):\n",
    "        self.path = wav_path\n",
    "        \n",
    "    def read_wav(self):\n",
    "        with  wave.open(self.path, \"rb\") as f:\n",
    "            parameters = f.getparams()\n",
    "            self.nchannels, self.sampwidth, self.framerate, self.nframes = parameters[:4]\n",
    "            self.time_len = self.nframes*1.0 / self.framerate #声音时长\n",
    "            print(\"声道数: \", self.nchannels) #声道数：可以是单声道或者是双声道\n",
    "            print(\"量化位数[byte]: \", self.sampwidth)#量化位数：一次采样所采集的数据的字节数\n",
    "            print(\"采样频率[Hz]: \", self.framerate) #采样频率：一秒内对声音信号的采集次数，常用的有8kHz, 16kHz, 32kHz, 48kHz, 11.025kHz, 22.05kHz, 44.1kHz\n",
    "            print(\"采样点数: \", self.nframes)#采样点数\n",
    "            print(\"声音时长[s]: \", round(self.time_len,3))#声音时长\n",
    "            # 读取波形数据\n",
    "            str_data = f.readframes(self.nframes)\n",
    "            wave_data = np.fromstring(str_data, dtype=np.short)\n",
    "            wave_data.shape = -1, self.nchannels\n",
    "            self.wave_data = wave_data.T\n",
    "            \n",
    "    def FFT(self):\n",
    "        yf = np.fft.fft(self.wave_data,self.nframes)# FFT\n",
    "        bias =  (yf[:, 0] / self.nframes).real\n",
    "        yf_amplitude = np.abs(yf)* (2.0/self.nframes)\n",
    "        yf_amplitude[:, 0] = bias #直流分量(0 Hz处)修正\n",
    "        self.yf_amplitude = yf_amplitude[:, 0:self.nframes//2]#有效信息只有一半\n",
    "        #ts = pd.Series(self.yf_amplitude[0] * self.framerate / self.nframes)\n",
    "        #ts.plot(figsize=(30,6))\n",
    "\n",
    "    def plot(self):\n",
    "        #self.freq = np.arange(0,self.nframes//2) * self.framerate / self.nframes #实际频率\n",
    "        start = int(200 / (self.framerate / self.nframes)) # 人类最低频率\n",
    "        end = int(1100 / (self.framerate / self.nframes))   # 人类发生最高频率\n",
    "        human_rate = self.yf_amplitude[0][start:end]\n",
    "        x = 0\n",
    "        for i in range(len(human_rate)):\n",
    "            #print(human_rate[i])\n",
    "            if human_rate[i] >= 10.0:\n",
    "                x = x+1\n",
    "        #print(x)\n",
    "        peo_label = 0\n",
    "        if x >= 1600.0:\n",
    "            label = 'people'\n",
    "            #print(label)\n",
    "        else:\n",
    "            label = 'not people'\n",
    "            #print(label)\n",
    "        return label\n",
    "  \n",
    "        \n",
    "if __name__ == \"__main__\":\n",
    "    wav = Wav_FFT(wav_path='audio_wav_2/linkunling2/linkunling21583301748.wav')\n",
    "    wav.read_wav()\n",
    "    wav.FFT()\n",
    "    result = wav.plot()\n",
    "    print(result)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
