{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 41,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████████████████████████████████████████████████████████████████████████| 1980/1980 [06:16<00:00,  5.25it/s]\n",
      "100%|██████████████████████████████████████████████████████████████████████████████████| 20/20 [00:04<00:00,  4.72it/s]\n"
     ]
    }
   ],
   "source": [
    "import os\n",
    "import random\n",
    "\n",
    "import librosa\n",
    "import pandas as pd\n",
    "import numpy as np\n",
    "import tensorflow as tf\n",
    "from tqdm import tqdm\n",
    "\n",
    "\n",
    "# 获取浮点数组\n",
    "def _float_feature(value):\n",
    "    if not isinstance(value, list):\n",
    "        value = [value]\n",
    "    return tf.train.Feature(float_list=tf.train.FloatList(value=value))\n",
    "\n",
    "\n",
    "# 获取整型数据\n",
    "def _int64_feature(value):\n",
    "    if not isinstance(value, list):\n",
    "        value = [value]\n",
    "    return tf.train.Feature(int64_list=tf.train.Int64List(value=value))\n",
    "\n",
    "\n",
    "# 把数据添加到TFRecord中\n",
    "def data_example(data, label):\n",
    "    feature = {\n",
    "        'data': _float_feature(data),\n",
    "        'label': _int64_feature(label),\n",
    "    }\n",
    "    return tf.train.Example(features=tf.train.Features(feature=feature))\n",
    "\n",
    "\n",
    "# 开始创建tfrecord数据\n",
    "def create_data_tfrecord(data_list_path, save_path):\n",
    "    with open(data_list_path, 'r') as f:\n",
    "        data = f.readlines()\n",
    "    with tf.io.TFRecordWriter(save_path) as writer:\n",
    "        for d in tqdm(data):\n",
    "            try:\n",
    "                path, label = d.replace('\\n', '').split('\\t')\n",
    "                wav, sr = librosa.load(path, sr=16000)\n",
    "                intervals = librosa.effects.split(wav, top_db=80)\n",
    "                wav_output = []\n",
    "                # [可能需要修改参数] 音频长度 16000 * 秒数\n",
    "                wav_len = int(16000 * 2.04)\n",
    "                for sliced in intervals:\n",
    "                    wav_output.extend(wav[sliced[0]:sliced[1]])\n",
    "                for i in range(5):\n",
    "                    # 裁剪过长的音频，过短的补0\n",
    "                    if len(wav_output) > wav_len:\n",
    "                        l = len(wav_output) - wav_len\n",
    "                        r = random.randint(0, l)\n",
    "                        wav_output = wav_output[r:wav_len + r]\n",
    "                    else:\n",
    "                        wav_output.extend(np.zeros(shape=[wav_len - len(wav_output)], dtype=np.float32))\n",
    "                    wav_output = np.array(wav_output)\n",
    "                    # 转成梅尔频谱\n",
    "                    ps = librosa.feature.melspectrogram(y=wav_output, sr=sr, hop_length=256).reshape(-1).tolist()\n",
    "                    # [可能需要修改参数] 梅尔频谱shape ，librosa.feature.melspectrogram(y=wav_output, sr=sr, hop_length=256).shape\n",
    "                    if len(ps) != 128 * 128: continue\n",
    "                    tf_example = data_example(ps, int(label))\n",
    "                    writer.write(tf_example.SerializeToString())\n",
    "                    if len(wav_output) <= wav_len:\n",
    "                        break\n",
    "            except Exception as e:\n",
    "                print(e)\n",
    "\n",
    "\n",
    "# 生成数据列表\n",
    "def get_data_list(audio_path, list_path):\n",
    "    sound_sum = 0\n",
    "    audios = os.listdir(audio_path)\n",
    "\n",
    "    f_train = open(os.path.join(list_path, 'train_list.txt'), 'w')\n",
    "    f_test = open(os.path.join(list_path, 'test_list.txt'), 'w')\n",
    "\n",
    "    for i in range(len(audios)):\n",
    "#         sounds = os.listdir(os.path.join(audio_path, audios[i]))\n",
    "#         for sound in sounds:\n",
    "        lb=audios[i].split('-')[3].split('.')[0]\n",
    "        sound_path = os.path.join(audio_path, audios[i])\n",
    "        t = librosa.get_duration(filename=sound_path)\n",
    "            # [可能需要修改参数] 过滤小于2.1秒的音频\n",
    "        if t >= 2.1:\n",
    "            if sound_sum % 100 == 0:\n",
    "                f_test.write('%s\\t%s\\n' % (sound_path, lb))\n",
    "            else:\n",
    "                f_train.write('%s\\t%s\\n' % (sound_path, lb))\n",
    "            sound_sum += 1\n",
    "#         print(\"Audio：%d/%d\" % (i + 1, len(audios)))\n",
    "\n",
    "    f_test.close()\n",
    "    f_train.close()\n",
    "\n",
    "\n",
    "# 创建UrbanSound8K数据列表\n",
    "def get_urbansound8k_list(path, urbansound8k_cvs_path):\n",
    "    data_list = []\n",
    "    data = pd.read_csv(urbansound8k_cvs_path)\n",
    "    # 过滤掉长度少于3秒的音频\n",
    "    valid_data = data[['slice_file_name', 'fold', 'classID', 'class']][data['end'] - data['start'] >= 3]\n",
    "    valid_data['path'] = 'fold' + valid_data['fold'].astype('str') + os.sep + valid_data['slice_file_name'].astype('str')\n",
    "    for row in valid_data.itertuples():\n",
    "        data_list.append([row.path, row.classID])\n",
    "\n",
    "    f_train = open(os.path.join(path, 'train_list.txt'), 'w')\n",
    "    f_test = open(os.path.join(path, 'test_list.txt'), 'w')\n",
    "\n",
    "    for i, data in enumerate(data_list):\n",
    "        sound_path = os.path.join('dataset/UrbanSound8K/audio/', data[0])\n",
    "        if i % 100 == 0:\n",
    "            f_test.write('%s\\t%d\\n' % (sound_path, data[1]))\n",
    "        else:\n",
    "            f_train.write('%s\\t%d\\n' % (sound_path, data[1]))\n",
    "\n",
    "    f_test.close()\n",
    "    f_train.close()\n",
    "\n",
    "\n",
    "if __name__ == '__main__':\n",
    "    # get_urbansound8k_list('dataset', 'dataset/UrbanSound8K/metadata/UrbanSound8K.csv')\n",
    "    get_data_list('dataset'+os.sep+'audio', 'dataset')\n",
    "    create_data_tfrecord('dataset'+os.sep+'train_list.txt', 'dataset'+os.sep+'train.tfrecord')\n",
    "    create_data_tfrecord('dataset'+os.sep+'test_list.txt', 'dataset'+os.sep+'test.tfrecord')\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 42,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Model: \"sequential\"\n",
      "_________________________________________________________________\n",
      "Layer (type)                 Output Shape              Param #   \n",
      "=================================================================\n",
      "resnet50v2 (Functional)      (None, 4, None, 2048)     23558528  \n",
      "_________________________________________________________________\n",
      "activity_regularization (Act (None, 4, None, 2048)     0         \n",
      "_________________________________________________________________\n",
      "dropout (Dropout)            (None, 4, None, 2048)     0         \n",
      "_________________________________________________________________\n",
      "global_max_pooling2d (Global (None, 2048)              0         \n",
      "_________________________________________________________________\n",
      "dense (Dense)                (None, 50)                102450    \n",
      "=================================================================\n",
      "Total params: 23,660,978\n",
      "Trainable params: 23,615,538\n",
      "Non-trainable params: 45,440\n",
      "_________________________________________________________________\n"
     ]
    }
   ],
   "source": [
    "import tensorflow as tf\n",
    "import reader\n",
    "import numpy as np\n",
    "import os\n",
    "\n",
    "class_dim = 50\n",
    "EPOCHS = 100\n",
    "BATCH_SIZE = 32\n",
    "init_model = None\n",
    "\n",
    "model = tf.keras.models.Sequential([\n",
    "    tf.keras.applications.ResNet50V2(include_top=False, weights=None, input_shape=(128, None, 1)),\n",
    "    tf.keras.layers.ActivityRegularization(l2=0.5),\n",
    "    tf.keras.layers.Dropout(rate=0.5),\n",
    "    tf.keras.layers.GlobalMaxPooling2D(),\n",
    "    tf.keras.layers.Dense(units=class_dim, activation=tf.nn.softmax)\n",
    "])\n",
    "\n",
    "model.summary()\n",
    "\n",
    "\n",
    "# 定义优化方法\n",
    "optimizer = tf.keras.optimizers.Adam(learning_rate=1e-3)\n",
    "\n",
    "train_dataset = reader.train_reader_tfrecord('dataset'+os.sep+'train.tfrecord', EPOCHS, batch_size=BATCH_SIZE)\n",
    "test_dataset = reader.test_reader_tfrecord('dataset'+os.sep+'test.tfrecord', batch_size=BATCH_SIZE)\n",
    "\n",
    "if init_model:\n",
    "    model.load_weights(init_model)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    " "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 46,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'models\\\\200resnet50.h5'"
      ]
     },
     "execution_count": 46,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "'models'+os.sep+str(batch_id)+'resnet50.h5'"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 47,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Batch 0, Loss 3.533340, Accuracy 0.093750\n",
      "Batch 40, Loss 2.951904, Accuracy 0.156250\n",
      "Batch 80, Loss 3.566919, Accuracy 0.062500\n",
      "Batch 120, Loss 2.893398, Accuracy 0.218750\n",
      "Batch 160, Loss 2.666479, Accuracy 0.218750\n",
      "Batch 200, Loss 2.581537, Accuracy 0.281250\n",
      "=================================================\n",
      "Test, Loss 2.734011, Accuracy 0.200000\n",
      "=================================================\n",
      "Batch 240, Loss 2.732410, Accuracy 0.250000\n",
      "Batch 280, Loss 1.995521, Accuracy 0.343750\n",
      "Batch 320, Loss 1.820781, Accuracy 0.500000\n",
      "Batch 360, Loss 2.255757, Accuracy 0.375000\n",
      "Batch 400, Loss 1.992220, Accuracy 0.437500\n",
      "=================================================\n",
      "Test, Loss 3.128675, Accuracy 0.200000\n",
      "=================================================\n",
      "Batch 440, Loss 1.561840, Accuracy 0.593750\n",
      "Batch 480, Loss 1.726945, Accuracy 0.531250\n",
      "Batch 520, Loss 1.198723, Accuracy 0.656250\n",
      "Batch 560, Loss 0.968574, Accuracy 0.687500\n",
      "Batch 600, Loss 1.028559, Accuracy 0.687500\n",
      "=================================================\n",
      "Test, Loss 2.508014, Accuracy 0.300000\n",
      "=================================================\n",
      "Batch 640, Loss 0.732929, Accuracy 0.750000\n",
      "Batch 680, Loss 1.020443, Accuracy 0.687500\n",
      "Batch 720, Loss 0.988706, Accuracy 0.718750\n",
      "Batch 760, Loss 0.534858, Accuracy 0.875000\n",
      "Batch 800, Loss 0.936994, Accuracy 0.781250\n",
      "=================================================\n",
      "Test, Loss 2.414377, Accuracy 0.400000\n",
      "=================================================\n",
      "Batch 840, Loss 0.741856, Accuracy 0.750000\n",
      "Batch 880, Loss 0.491015, Accuracy 0.812500\n",
      "Batch 920, Loss 0.731383, Accuracy 0.750000\n",
      "Batch 960, Loss 0.417791, Accuracy 0.843750\n",
      "Batch 1000, Loss 0.457134, Accuracy 0.812500\n",
      "=================================================\n",
      "Test, Loss 2.626027, Accuracy 0.400000\n",
      "=================================================\n",
      "Batch 1040, Loss 0.443513, Accuracy 0.843750\n",
      "Batch 1080, Loss 0.319935, Accuracy 0.937500\n",
      "Batch 1120, Loss 0.338265, Accuracy 0.906250\n",
      "Batch 1160, Loss 0.478958, Accuracy 0.875000\n",
      "Batch 1200, Loss 0.500433, Accuracy 0.875000\n",
      "=================================================\n",
      "Test, Loss 3.449042, Accuracy 0.350000\n",
      "=================================================\n",
      "Batch 1240, Loss 0.356545, Accuracy 0.843750\n",
      "Batch 1280, Loss 0.317320, Accuracy 0.906250\n",
      "Batch 1320, Loss 0.368176, Accuracy 0.906250\n",
      "Batch 1360, Loss 0.431702, Accuracy 0.875000\n",
      "Batch 1400, Loss 0.190795, Accuracy 0.937500\n",
      "=================================================\n",
      "Test, Loss 3.225308, Accuracy 0.450000\n",
      "=================================================\n",
      "Batch 1440, Loss 0.182751, Accuracy 0.968750\n",
      "Batch 1480, Loss 0.318278, Accuracy 0.875000\n",
      "Batch 1520, Loss 0.630856, Accuracy 0.843750\n",
      "Batch 1560, Loss 0.251629, Accuracy 0.937500\n",
      "Batch 1600, Loss 0.280315, Accuracy 0.937500\n",
      "=================================================\n",
      "Test, Loss 3.056355, Accuracy 0.450000\n",
      "=================================================\n",
      "Batch 1640, Loss 0.327205, Accuracy 0.937500\n",
      "Batch 1680, Loss 0.152419, Accuracy 0.937500\n",
      "Batch 1720, Loss 0.300738, Accuracy 0.906250\n",
      "Batch 1760, Loss 0.160329, Accuracy 0.968750\n",
      "Batch 1800, Loss 0.189739, Accuracy 0.937500\n",
      "=================================================\n",
      "Test, Loss 3.854344, Accuracy 0.450000\n",
      "=================================================\n",
      "Batch 1840, Loss 0.004416, Accuracy 1.000000\n",
      "Batch 1880, Loss 0.222920, Accuracy 0.937500\n",
      "Batch 1920, Loss 0.094991, Accuracy 0.968750\n",
      "Batch 1960, Loss 0.291883, Accuracy 0.937500\n",
      "Batch 2000, Loss 0.017321, Accuracy 1.000000\n",
      "=================================================\n",
      "Test, Loss 4.377239, Accuracy 0.450000\n",
      "=================================================\n",
      "Batch 2040, Loss 0.120003, Accuracy 0.968750\n",
      "Batch 2080, Loss 0.233875, Accuracy 0.937500\n",
      "Batch 2120, Loss 0.259312, Accuracy 0.937500\n",
      "Batch 2160, Loss 0.172207, Accuracy 0.937500\n",
      "Batch 2200, Loss 0.168171, Accuracy 0.875000\n",
      "=================================================\n",
      "Test, Loss 5.994026, Accuracy 0.400000\n",
      "=================================================\n",
      "Batch 2240, Loss 0.381432, Accuracy 0.781250\n",
      "Batch 2280, Loss 0.355187, Accuracy 0.843750\n",
      "Batch 2320, Loss 0.265028, Accuracy 0.875000\n",
      "Batch 2360, Loss 0.974807, Accuracy 0.843750\n",
      "Batch 2400, Loss 0.390142, Accuracy 0.875000\n",
      "=================================================\n",
      "Test, Loss 4.191075, Accuracy 0.400000\n",
      "=================================================\n",
      "Batch 2440, Loss 0.376469, Accuracy 0.906250\n",
      "Batch 2480, Loss 0.042721, Accuracy 1.000000\n",
      "Batch 2520, Loss 0.279509, Accuracy 0.906250\n",
      "Batch 2560, Loss 0.150098, Accuracy 0.968750\n",
      "Batch 2600, Loss 0.127768, Accuracy 0.968750\n",
      "=================================================\n",
      "Test, Loss 4.804163, Accuracy 0.350000\n",
      "=================================================\n",
      "Batch 2640, Loss 0.052123, Accuracy 1.000000\n",
      "Batch 2680, Loss 0.210315, Accuracy 0.937500\n",
      "Batch 2720, Loss 0.232213, Accuracy 0.875000\n",
      "Batch 2760, Loss 0.163480, Accuracy 0.906250\n",
      "Batch 2800, Loss 0.169015, Accuracy 0.937500\n",
      "=================================================\n",
      "Test, Loss 4.767446, Accuracy 0.350000\n",
      "=================================================\n",
      "Batch 2840, Loss 0.151670, Accuracy 0.968750\n",
      "Batch 2880, Loss 0.016436, Accuracy 1.000000\n",
      "Batch 2920, Loss 0.074167, Accuracy 0.968750\n",
      "Batch 2960, Loss 0.037970, Accuracy 0.968750\n",
      "Batch 3000, Loss 0.094486, Accuracy 0.968750\n",
      "=================================================\n",
      "Test, Loss 4.636966, Accuracy 0.400000\n",
      "=================================================\n",
      "Batch 3040, Loss 0.076989, Accuracy 0.968750\n",
      "Batch 3080, Loss 0.376846, Accuracy 0.968750\n",
      "Batch 3120, Loss 0.011723, Accuracy 1.000000\n",
      "Batch 3160, Loss 0.043861, Accuracy 1.000000\n",
      "Batch 3200, Loss 0.732259, Accuracy 0.906250\n",
      "=================================================\n",
      "Test, Loss 4.366788, Accuracy 0.550000\n",
      "=================================================\n",
      "Batch 3240, Loss 0.040385, Accuracy 0.968750\n",
      "Batch 3280, Loss 0.028031, Accuracy 1.000000\n",
      "Batch 3320, Loss 0.014159, Accuracy 1.000000\n",
      "Batch 3360, Loss 0.014766, Accuracy 1.000000\n",
      "Batch 3400, Loss 0.218708, Accuracy 0.937500\n",
      "=================================================\n",
      "Test, Loss 3.335043, Accuracy 0.500000\n",
      "=================================================\n",
      "Batch 3440, Loss 0.141225, Accuracy 0.937500\n",
      "Batch 3480, Loss 0.002641, Accuracy 1.000000\n",
      "Batch 3520, Loss 0.007849, Accuracy 1.000000\n",
      "Batch 3560, Loss 0.000828, Accuracy 1.000000\n",
      "Batch 3600, Loss 0.195873, Accuracy 0.937500\n",
      "=================================================\n",
      "Test, Loss 4.472800, Accuracy 0.350000\n",
      "=================================================\n",
      "Batch 3640, Loss 0.089819, Accuracy 0.968750\n",
      "Batch 3680, Loss 0.070358, Accuracy 0.968750\n",
      "Batch 3720, Loss 0.166913, Accuracy 0.937500\n",
      "Batch 3760, Loss 0.464217, Accuracy 0.906250\n",
      "Batch 3800, Loss 0.285482, Accuracy 0.906250\n",
      "=================================================\n",
      "Test, Loss 4.292542, Accuracy 0.400000\n",
      "=================================================\n",
      "Batch 3840, Loss 0.150707, Accuracy 0.937500\n",
      "Batch 3880, Loss 0.165344, Accuracy 0.968750\n",
      "Batch 3920, Loss 0.113518, Accuracy 0.968750\n",
      "Batch 3960, Loss 0.332440, Accuracy 0.906250\n",
      "Batch 4000, Loss 0.094820, Accuracy 0.968750\n",
      "=================================================\n",
      "Test, Loss 2.685768, Accuracy 0.650000\n",
      "=================================================\n",
      "Batch 4040, Loss 0.417459, Accuracy 0.875000\n",
      "Batch 4080, Loss 0.036540, Accuracy 0.968750\n",
      "Batch 4120, Loss 0.107030, Accuracy 0.968750\n",
      "Batch 4160, Loss 0.213118, Accuracy 0.937500\n",
      "Batch 4200, Loss 0.139211, Accuracy 0.937500\n",
      "=================================================\n",
      "Test, Loss 2.947766, Accuracy 0.600000\n",
      "=================================================\n",
      "Batch 4240, Loss 0.102454, Accuracy 0.968750\n",
      "Batch 4280, Loss 0.045823, Accuracy 0.968750\n",
      "Batch 4320, Loss 0.142689, Accuracy 0.968750\n",
      "Batch 4360, Loss 0.350027, Accuracy 0.937500\n",
      "Batch 4400, Loss 0.036202, Accuracy 0.968750\n",
      "=================================================\n",
      "Test, Loss 2.917231, Accuracy 0.450000\n",
      "=================================================\n",
      "Batch 4440, Loss 0.311403, Accuracy 0.937500\n",
      "Batch 4480, Loss 0.215830, Accuracy 0.937500\n",
      "Batch 4520, Loss 0.138663, Accuracy 0.968750\n",
      "Batch 4560, Loss 0.125071, Accuracy 0.968750\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Batch 4600, Loss 0.136515, Accuracy 0.937500\n",
      "=================================================\n",
      "Test, Loss 3.405659, Accuracy 0.350000\n",
      "=================================================\n",
      "Batch 4640, Loss 0.074587, Accuracy 0.968750\n",
      "Batch 4680, Loss 0.209796, Accuracy 0.937500\n",
      "Batch 4720, Loss 0.070937, Accuracy 0.968750\n",
      "Batch 4760, Loss 0.015959, Accuracy 1.000000\n",
      "Batch 4800, Loss 0.003111, Accuracy 1.000000\n",
      "=================================================\n",
      "Test, Loss 4.438962, Accuracy 0.450000\n",
      "=================================================\n",
      "Batch 4840, Loss 0.427853, Accuracy 0.875000\n",
      "Batch 4880, Loss 0.244768, Accuracy 0.906250\n",
      "Batch 4920, Loss 0.452443, Accuracy 0.875000\n",
      "Batch 4960, Loss 0.058182, Accuracy 1.000000\n",
      "Batch 5000, Loss 0.003656, Accuracy 1.000000\n",
      "=================================================\n",
      "Test, Loss 3.982904, Accuracy 0.350000\n",
      "=================================================\n",
      "Batch 5040, Loss 0.024565, Accuracy 1.000000\n",
      "Batch 5080, Loss 0.001842, Accuracy 1.000000\n",
      "Batch 5120, Loss 0.162433, Accuracy 0.937500\n",
      "Batch 5160, Loss 0.004116, Accuracy 1.000000\n",
      "Batch 5200, Loss 0.000518, Accuracy 1.000000\n",
      "=================================================\n",
      "Test, Loss 3.270356, Accuracy 0.550000\n",
      "=================================================\n",
      "Batch 5240, Loss 0.024480, Accuracy 1.000000\n",
      "Batch 5280, Loss 0.066724, Accuracy 0.968750\n",
      "Batch 5320, Loss 0.277925, Accuracy 0.937500\n",
      "Batch 5360, Loss 0.089248, Accuracy 0.968750\n",
      "Batch 5400, Loss 0.372049, Accuracy 0.906250\n",
      "=================================================\n",
      "Test, Loss 4.418758, Accuracy 0.300000\n",
      "=================================================\n",
      "Batch 5440, Loss 0.362740, Accuracy 0.937500\n",
      "Batch 5480, Loss 0.120977, Accuracy 0.968750\n",
      "Batch 5520, Loss 0.089523, Accuracy 1.000000\n",
      "Batch 5560, Loss 0.068706, Accuracy 0.968750\n",
      "Batch 5600, Loss 0.349158, Accuracy 0.937500\n",
      "=================================================\n",
      "Test, Loss 3.676811, Accuracy 0.300000\n",
      "=================================================\n",
      "Batch 5640, Loss 0.015712, Accuracy 1.000000\n",
      "Batch 5680, Loss 0.028808, Accuracy 0.968750\n",
      "Batch 5720, Loss 0.009855, Accuracy 1.000000\n",
      "Batch 5760, Loss 0.021660, Accuracy 1.000000\n",
      "Batch 5800, Loss 0.182896, Accuracy 0.937500\n",
      "=================================================\n",
      "Test, Loss 4.840262, Accuracy 0.400000\n",
      "=================================================\n",
      "Batch 5840, Loss 0.000051, Accuracy 1.000000\n",
      "Batch 5880, Loss 0.126197, Accuracy 0.968750\n",
      "Batch 5920, Loss 0.056769, Accuracy 1.000000\n",
      "Batch 5960, Loss 0.067689, Accuracy 0.968750\n",
      "Batch 6000, Loss 0.000031, Accuracy 1.000000\n",
      "=================================================\n",
      "Test, Loss 4.553424, Accuracy 0.450000\n",
      "=================================================\n",
      "Batch 6040, Loss 0.000007, Accuracy 1.000000\n",
      "Batch 6080, Loss 0.060921, Accuracy 0.968750\n",
      "Batch 6120, Loss 0.326197, Accuracy 0.937500\n",
      "Batch 6160, Loss 0.000173, Accuracy 1.000000\n"
     ]
    }
   ],
   "source": [
    "\n",
    "\n",
    "\n",
    "\n",
    "for batch_id, data in enumerate(train_dataset):\n",
    "    # [可能需要修改参数】 设置的梅尔频谱的shape\n",
    "    sounds = data['data'].numpy().reshape((-1, 128, 128, 1))\n",
    "    labels = data['label']\n",
    "    # 执行训练\n",
    "    with tf.GradientTape() as tape:\n",
    "        predictions = model(sounds)\n",
    "        # 获取损失值\n",
    "        train_loss = tf.keras.losses.sparse_categorical_crossentropy(labels, predictions)\n",
    "        train_loss = tf.reduce_mean(train_loss)\n",
    "        # 获取准确率\n",
    "        train_accuracy = tf.keras.metrics.sparse_categorical_accuracy(labels, predictions)\n",
    "        train_accuracy = np.sum(train_accuracy.numpy()) / len(train_accuracy.numpy())\n",
    "\n",
    "    # 更新梯度\n",
    "    gradients = tape.gradient(train_loss, model.trainable_variables)\n",
    "    optimizer.apply_gradients(zip(gradients, model.trainable_variables))\n",
    "\n",
    "    if batch_id % 40 == 0:\n",
    "        print(\"Batch %d, Loss %f, Accuracy %f\" % (batch_id, train_loss.numpy(), train_accuracy))\n",
    "\n",
    "    if batch_id % 200 == 0 and batch_id != 0:\n",
    "        test_losses = list()\n",
    "        test_accuracies = list()\n",
    "        for d in test_dataset:\n",
    "            # [可能需要修改参数】 设置的梅尔频谱的shape\n",
    "            test_sounds = d['data'].numpy().reshape((-1, 128, 128, 1))\n",
    "            test_labels = d['label']\n",
    "\n",
    "            test_result = model(test_sounds)\n",
    "            # 获取损失值\n",
    "            test_loss = tf.keras.losses.sparse_categorical_crossentropy(test_labels, test_result)\n",
    "            test_loss = tf.reduce_mean(test_loss)\n",
    "            test_losses.append(test_loss)\n",
    "            # 获取准确率\n",
    "            test_accuracy = tf.keras.metrics.sparse_categorical_accuracy(test_labels, test_result)\n",
    "            test_accuracy = np.sum(test_accuracy.numpy()) / len(test_accuracy.numpy())\n",
    "            test_accuracies.append(test_accuracy)\n",
    "\n",
    "        print('=================================================')\n",
    "        print(\"Test, Loss %f, Accuracy %f\" % (\n",
    "            sum(test_losses) / len(test_losses), sum(test_accuracies) / len(test_accuracies)))\n",
    "        print('=================================================')\n",
    "\n",
    "        # 保存模型\n",
    "        model.save(filepath='models'+os.sep+str(batch_id)+'resnet50.h5')\n",
    "        model.save_weights(filepath='models'+os.sep+str(batch_id)+'model_weights.h5')\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 55,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:No training configuration found in the save file, so the model was *not* compiled. Compile it manually.\n",
      "WARNING:tensorflow:11 out of the last 11 calls to <function Model.make_predict_function.<locals>.predict_function at 0x000002020101EC18> triggered tf.function retracing. Tracing is expensive and the excessive number of tracings could be due to (1) creating @tf.function repeatedly in a loop, (2) passing tensors with different shapes, (3) passing Python objects instead of tensors. For (1), please define your @tf.function outside of the loop. For (2), @tf.function has experimental_relax_shapes=True option that relaxes argument shapes that can avoid unnecessary retracing. For (3), please refer to https://www.tensorflow.org/guide/function#controlling_retracing and https://www.tensorflow.org/api_docs/python/tf/function for  more details.\n",
      "音频：dataset\\20211105_214759.wav 的预测结果标签为：7\n"
     ]
    }
   ],
   "source": [
    "import librosa\n",
    "import numpy as np\n",
    "import tensorflow as tf\n",
    "import os\n",
    "\n",
    "\n",
    "model = tf.keras.models.load_model('models/2800resnet50.h5')\n",
    "\n",
    "\n",
    "# 读取音频数据\n",
    "def load_data(data_path):\n",
    "    wav, sr = librosa.load(data_path, sr=16000)\n",
    "    intervals = librosa.effects.split(wav, top_db=20)\n",
    "    wav_output = []\n",
    "    for sliced in intervals:\n",
    "        wav_output.extend(wav[sliced[0]:sliced[1]])\n",
    "    assert len(wav_output) >= 8000, \"有效音频小于0.5s\"\n",
    "    wav_output = np.array(wav_output)\n",
    "    ps = librosa.feature.melspectrogram(y=wav_output, sr=sr, hop_length=256).astype(np.float32)\n",
    "    ps = ps[np.newaxis, ..., np.newaxis]\n",
    "    return ps\n",
    "\n",
    "\n",
    "def infer(audio_path):\n",
    "    data = load_data(audio_path)\n",
    "    result = model.predict(data)\n",
    "    lab = tf.argmax(result, 1)\n",
    "    return lab\n",
    "\n",
    "\n",
    "if __name__ == '__main__':\n",
    "    # 要预测的音频文件\n",
    "    path = 'dataset'+os.sep+'20211105_214759.wav'\n",
    "    label = infer(path)\n",
    "    print('音频：%s 的预测结果标签为：%d' % (path, label))\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [],
   "source": [
    "model.compile()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "音频：dataset\\20211105_214759.wav 的预测结果标签为：30\n"
     ]
    }
   ],
   "source": [
    "path = 'dataset'+os.sep+'20211105_214759.wav'\n",
    "data = load_data(path)\n",
    "result = model.predict(data)\n",
    "lab = tf.argmax(result, 1)\n",
    "print('音频：%s 的预测结果标签为：%d' % (path, lab))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
