#!/usr/bin/env python
# coding: utf-8

# In[3]:


import wave
import matplotlib.pyplot as plt
import numpy as np
import os


# In[4]:


def plot_energy(audio):
    f = wave.open(audio, 'rb')
    params = f.getparams()
    nchannels, sampwidth, framerate, nframes = params[:4]
    strData = f.readframes(nframes)#读取音频，字符串格式
    waveData = np.fromstring(strData,dtype=np.int16)#将字符串转化为int
    print(len(strData), len( waveData))
    #waveData = np.reshape(waveData,[nframes,nchannels]).T
    #waveData = waveData*1.0/(max(abs(waveData)))#wave幅值归一化
    # plot the wave
    time = np.arange(0,nframes)*(1.0 / framerate)
    print(max(waveData), min(waveData))
    plt.plot(time,waveData)
    plt.xlabel("Time(s)")
    plt.ylabel("Amplitude")
    plt.title("Single channel wavedata")
    plt.grid('on')#标尺，on：有，off:无。


# In[5]:


def plot_yuputu(audio):
    #绘制频谱
    f = wave.open(audio, 'rb')
    params = f.getparams()
    nchannels, sampwidth, framerate, nframes = params[:4]
    strData = f.readframes(nframes)#读取音频，字符串格式
    waveData = np.fromstring(strData,dtype=np.int16)
    
    print("plotting spectrogram...")
    framelength = 0.025 #帧长20~30ms
    framesize = framelength*16000 #每帧点数 N = t*fs,通常情况下值为256或512,要与NFFT相等\
                                        #而NFFT最好取2的整数次方,即framesize最好取的整数次方

    #找到与当前framesize最接近的2的正整数次方
    nfftdict = {}
    lists = [32,64,128,256,512,1024]
    for i in lists:
        nfftdict[i] = abs(framesize - i)
    sortlist = sorted(nfftdict.items(), key=lambda x: x[1])#按与当前framesize差值升序排列
    framesize = int(sortlist[0][0])#取最接近当前framesize的那个2的正整数次方值为新的framesize

    NFFT = framesize #NFFT必须与时域的点数framsize相等，即不补零的FFT
    overlapSize = 1.0/3 * framesize #重叠部分采样点数overlapSize约为每帧点数的1/3~1/2
    overlapSize = int(round(overlapSize))#取整
    waveData = np.reshape(waveData,[nframes,nchannels]).T
    spectrum,freqs,ts,fig = plt.specgram(waveData[0],NFFT = NFFT,Fs =framerate,window=np.hanning(M = framesize),
                                         noverlap=overlapSize,mode='default',scale_by_freq=True,sides='default',
                                         scale='dB',xextent=None)#绘制频谱图 
    #print(ts* 1000)
    print(np.max(ts) *1000)
    plt.ylabel('Frequency')
    plt.xlabel('Time(s)')
    plt.title('Spectrogram')


# In[15]:


import wave as we
import matplotlib.pyplot as plt
import numpy as np
from scipy.io import wavfile
 
WAVE = we.open('audio_wav/surroundings/surrounding1su1578565858.wav')
print('---------声音信息------------')
for item in enumerate(WAVE.getparams()):
     print(item)
a = WAVE.getparams().nframes    # 帧总数
f = WAVE.getparams().framerate  # 采样频率
sample_time = 1/f               # 采样点的时间间隔
time = a/f                      #声音信号的长度
sample_frequency, audio_sequence = wavfile.read('audio_wav/surroundings/surrounding1su1578565858.wav')
print(audio_sequence)           #声音信号每一帧的“大小”
x_seq = np.arange(0,time,sample_time)
 
plt.plot(x_seq,audio_sequence,'blue')
plt.xlabel("time (s)")


# In[10]:


plot_energy('audio_wav/surroundings/surrounding1su1578565858.wav')


# In[26]:


plot_yuputu('audio_wav_2/surroundings2/surroundings21583303827.wav')


# In[27]:


#plot_energy('audio_wav_2/surroundings2/surroundings21583303614.wav')
plot_yuputu('audio_wav_2/surroundings2/surroundings21583303614.wav')


# In[28]:


#plot_energy('audio_wav_2/surroundings2/surroundings21583303731.wav')
plot_yuputu('audio_wav_2/surroundings2/surroundings21583303731.wav')


# In[29]:


#plot_energy('audio_wav_2/surroundings2/surroundings21583303935.wav')
plot_yuputu('audio_wav_2/surroundings2/surroundings21583303935.wav')


# In[30]:


#plot_energy('audio_wav_2/surroundings2/surroundings21583304000.wav')
plot_yuputu('audio_wav_2/surroundings2/surroundings21583304000.wav')


# In[31]:


#plot_energy('audio_wav_2/linkunling2/linkunling21583301748.wav')
plot_yuputu('audio_wav_2/linkunling2/linkunling21583301748.wav')


# In[32]:


#plot_energy('audio_wav_2/linkunling2/linkunling21583301987.wav')
plot_yuputu('audio_wav_2/linkunling2/linkunling21583301987.wav')


# In[33]:


#plot_energy('audio_wav_2/linkunling2/linkunling21583302280.wav')
plot_yuputu('audio_wav_2/linkunling2/linkunling21583302280.wav')


# In[34]:


#plot_energy('audio_wav_2/suboss2/suboss21583308823.wav')
plot_yuputu('audio_wav_2/suboss2/suboss21583308823.wav')


# In[35]:


#plot_energy('audio_wav_2/suboss2/suboss21583308959.wav')
plot_yuputu('audio_wav_2/suboss2/suboss21583308959.wav')


# In[15]:


#plot_energy('audio_wav_2/xiefei/xiefei1583305609.wav')
plot_yuputu('audio_wav_2/xiefei/xiefei1583305609.wav')


# In[56]:


plot_energy('audio_wav_2/xiefei/xiefei1583305694.wav')
plot_yuputu('audio_wav_2/xiefei/xiefei1583305694.wav')


# In[58]:


plot_energy('audio_wav_2/xiefei/xiefei1583305854.wav')
plot_yuputu('audio_wav_2/xiefei/xiefei1583305854.wav')


# In[ ]:




