{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "amateur-gallery",
   "metadata": {},
   "outputs": [],
   "source": [
    "import glob\n",
    "import numpy as np\n",
    "import tensorflow as tf\n",
    "from tensorflow import keras\n",
    "from tensorflow.keras import layers\n",
    "import os\n",
    "import matplotlib.pyplot as plt\n",
    "%matplotlib inline"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "measured-generator",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "['Dataset\\\\Hazardous_waste\\\\haz_bet1.jpeg',\n",
       " 'Dataset\\\\Hazardous_waste\\\\haz_bet10.jpeg',\n",
       " 'Dataset\\\\Hazardous_waste\\\\haz_bet100.jpeg']"
      ]
     },
     "execution_count": 2,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "image_path = glob.glob('Dataset/*/*')\n",
    "image_path[:3]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "beneficial-assist",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "['Hazardous_waste' 'Kitchen_waste' 'Other_trash' 'Recyclable_trash']\n"
     ]
    }
   ],
   "source": [
    "all_label_name = [img_p.split('\\\\')[1] for img_p in image_path]\n",
    "label_name = np.unique(all_label_name)\n",
    "print(label_name)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "funky-connectivity",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "{'Hazardous_waste': 0,\n",
       " 'Kitchen_waste': 1,\n",
       " 'Other_trash': 2,\n",
       " 'Recyclable_trash': 3}"
      ]
     },
     "execution_count": 4,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "#英文标签转为数字标签\n",
    "label_to_index = dict((name, i) for i,name in enumerate(label_name))\n",
    "label_to_index"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "nasty-uncertainty",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "{0: 'Hazardous_waste',\n",
       " 1: 'Kitchen_waste',\n",
       " 2: 'Other_trash',\n",
       " 3: 'Recyclable_trash'}"
      ]
     },
     "execution_count": 5,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "index_to_label = dict((f, q) for q,f in label_to_index.items())\n",
    "index_to_label"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "consistent-cookbook",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[3, 3, 3, 3, 3]"
      ]
     },
     "execution_count": 6,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "all_index = [label_to_index.get(name) for name in all_label_name]\n",
    "all_index[-5:]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "manufactured-cycle",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 随机打乱图片路径和标签\n",
    "np.random.seed(2021)\n",
    "random_index =np.random.permutation(len(image_path))\n",
    "image_path = np.array(image_path)[random_index]\n",
    "all_index = np.array(all_index)[random_index]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "residential-circular",
   "metadata": {},
   "outputs": [],
   "source": [
    "num = int(len(image_path)*0.8)\n",
    "image_train = image_path[:num]\n",
    "index_train = all_index[:num]\n",
    "image_val = image_path[num:]\n",
    "index_val = all_index[num:]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "latest-trigger",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "(1899, 1) (475, 1)\n"
     ]
    }
   ],
   "source": [
    "index_train = tf.expand_dims(index_train, -1)\n",
    "index_val = tf.expand_dims(index_val, -1)\n",
    "print(index_train.shape, index_val.shape)\n",
    "\n",
    "dataset_train = tf.data.Dataset.from_tensor_slices((image_train, index_train))\n",
    "dataset_val = tf.data.Dataset.from_tensor_slices((image_val, index_val))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "id": "classical-hobby",
   "metadata": {},
   "outputs": [],
   "source": [
    "def load_image(path,label):\n",
    "    image = tf.io.read_file(path)\n",
    "    image = tf.image.decode_jpeg(image, channels=3)\n",
    "    image = tf.image.resize(image, [64, 64])\n",
    "    # 数据增强(随机翻转)\n",
    "    image = tf.image.random_flip_left_right(image)\n",
    "    image = tf.image.random_flip_up_down(image)\n",
    "    # image = tf.image.random_crop(image, [64, 64, 3])\n",
    "    image = tf.cast(image, tf.float32)/255\n",
    "    label = tf.convert_to_tensor(label)\n",
    "    return image, label"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "id": "pointed-simon",
   "metadata": {},
   "outputs": [],
   "source": [
    "BATCH_SIZE = 16\n",
    "AUTOTUNE = tf.data.experimental.AUTOTUNE #多线程设置，自动模式\n",
    "dataset_train = dataset_train.map(load_image, num_parallel_calls=AUTOTUNE)\n",
    "dataset_train = dataset_train.batch(BATCH_SIZE)\n",
    "dataset_val = dataset_val.map(load_image, num_parallel_calls=AUTOTUNE)\n",
    "dataset_val = dataset_val.batch(BATCH_SIZE)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "distributed-lobby",
   "metadata": {},
   "source": [
    "训练部分"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "id": "democratic-deposit",
   "metadata": {},
   "outputs": [],
   "source": [
    "# create CNN\n",
    "def CNNmodel(input_shape,filters=64, kernel=(3,3),size=4,dropout=0.2,**kwargs):\n",
    "    _inputs = layers.Input(shape=input_shape)\n",
    "    x = layers.Conv2D(8,(3,3),padding='same',use_bias=False,strides=(2,2), name='conv_0')(_inputs)\n",
    "    x = layers.BatchNormalization(axis=-1, name='conv_0_bn')(x)\n",
    "    x = layers.ReLU(6., name='conv_0_relu')(x)\n",
    "    \n",
    "    x = layers.AveragePooling2D(name='avg_1')(x)\n",
    "    x = layers.Dropout(dropout, name='dropout_1')(x)\n",
    "\n",
    "    for block_id in range(2,size+2):\n",
    "        x = layers.Conv2D(filters,kernel,padding='same',use_bias=False,strides=(1,1), name='conv_%d'%block_id)(x)\n",
    "        x = layers.BatchNormalization(axis=-1, name='conv_%d_bn'%block_id)(x)\n",
    "        x = layers.ReLU(6., name='conv_%d_relu'%block_id)(x)\n",
    "\n",
    "    x = layers.Conv2D(64,(3, 3),padding='same',use_bias=False,strides=(2,2), name='conv_1')(x)\n",
    "    x = layers.AveragePooling2D()(x)\n",
    "    x = layers.Dropout(dropout, name='dropout_2')(x)\n",
    "    x = layers.GlobalAveragePooling2D(name='avg_2')(x)\n",
    "    x = layers.Dropout(dropout, name='dropout_0')(x)\n",
    "    x = layers.Dense(4)(x)\n",
    "    x = layers.Softmax()(x)\n",
    "    return keras.Model(inputs=_inputs,outputs=x)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "id": "breathing-google",
   "metadata": {},
   "outputs": [],
   "source": [
    "reduce_lr = keras.callbacks.ReduceLROnPlateau(monitor='accuracy', factor=0.5, patience=4, min_lr=0.0001,verbose=1)\n",
    "earlystop = keras.callbacks.EarlyStopping(monitor='val_accuracy', patience=8,verbose=1)\n",
    "\n",
    "model = CNNmodel(input_shape=(64,64,3),filters=64, kernel=(3,3),size=3)\n",
    "model.compile(optimizer='SGD',loss='sparse_categorical_crossentropy',metrics=['accuracy'])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "id": "leading-occasions",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "119/119 - 4s - loss: 1.0877 - accuracy: 0.5271 - val_loss: 1.4423 - val_accuracy: 0.2358 - lr: 0.0100\n"
     ]
    }
   ],
   "source": [
    "history = model.fit(dataset_train,validation_data=dataset_val,callbacks=[reduce_lr],verbose=2,epochs=1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "organizational-public",
   "metadata": {},
   "outputs": [],
   "source": [
    "plt.plot(history.history['val_accuracy'],label='val_acc')\n",
    "plt.legend()\n",
    "plt.xlabel('Epochs')\n",
    "plt.ylabel('Acc')\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "temporal-object",
   "metadata": {},
   "outputs": [],
   "source": [
    "model.save('model.h5')"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "opened-civilization",
   "metadata": {},
   "source": [
    "模型读取+验证"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "eligible-treasure",
   "metadata": {},
   "outputs": [],
   "source": [
    "import cv2"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "responsible-escape",
   "metadata": {},
   "outputs": [],
   "source": [
    "keras_file = 'model.h5'\n",
    "model_read = tf.keras.models.load_model(keras_file)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "excited-caution",
   "metadata": {},
   "outputs": [],
   "source": [
    "test_path = glob.glob('Dataset-test/Recyclable_trash/*')\n",
    "num=0\n",
    "for n in range(30):\n",
    "    print(test_path[n])\n",
    "    image = cv2.imread(test_path[n])\n",
    "    image = cv2.resize(image, (64, 64))\n",
    "    image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n",
    "    image_bn = image.astype(\"float32\") / 255.0\n",
    "    image = np.expand_dims(image, axis=0)\n",
    "    image_bn = np.expand_dims(image_bn, axis=0)\n",
    "    pred = model.predict(image_bn)\n",
    "    max_index = np.argmax(pred)\n",
    "    print(max_index)\n",
    "    if max_index==1:\n",
    "        print('厨余垃圾')\n",
    "        num+=1\n",
    "    elif max_index==3:\n",
    "        print('可回收垃圾')\n",
    "        \n",
    "    elif max_index==2:\n",
    "        print('其他垃圾') \n",
    "        \n",
    "    else:\n",
    "        print('有害垃圾')\n",
    "        num+=1\n",
    "print(num/30)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "successful-directory",
   "metadata": {},
   "source": [
    "数据集重命名"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "faced-access",
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "path='Dataset/Recyclable_trash'    \n",
    "\n",
    "#获取该目录下所有文件，存入列表中\n",
    "fileList=os.listdir(path)\n",
    "n=0\n",
    "for i in fileList:\n",
    "    \n",
    "    #设置旧文件名（就是路径+文件名）\n",
    "    oldname=path+ os.sep + fileList[n]   # os.sep添加系统分隔符\n",
    "    \n",
    "    #设置新文件名\n",
    "    newname=path + os.sep +'rec_gla'+str(n+1)+'.jpeg'\n",
    "    \n",
    "    os.rename(oldname,newname)   #用os模块中的rename方法对文件改名\n",
    "    n+=1"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "theoretical-enough",
   "metadata": {},
   "source": [
    "文件异常检测"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "dated-crash",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 文件可能报错，提示一堆格式错误\n",
    "# 修改数据集路径dir，和错误中出现格式如：BM\n",
    "import os\n",
    "dir = 'Dataset/Recyclable_trash/'\n",
    "for i, filename in enumerate(os.listdir(dir)):\n",
    "    filename = dir + filename\n",
    "    with open(filename, 'rb') as imageFile:\n",
    "        if imageFile.read().startswith(b'BM'):\n",
    "            print(f\"{i}: {filename} - found!\")"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
