{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "import time\n",
    "import tensorflow as tf\n",
    "import matplotlib.pyplot as plt\n",
    "from loss import *\n",
    "from Unet import Unet\n",
    "from tensorflow.keras.optimizers import Adam\n",
    "from tensorflow.keras.preprocessing.image import ImageDataGenerator\n",
    "os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\n",
    "gpus = tf.config.experimental.list_physical_devices('GPU')\n",
    "tf.config.experimental.set_memory_growth(gpus[0], True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "###########################train_set\n",
    "\n",
    "def trainGenerator(batch_size):\n",
    "\n",
    "    aug_dict = dict(horizontal_flip=True,\n",
    "                        fill_mode='nearest')\n",
    "    aug_dict = dict(horizontal_flip=True,\n",
    "                        fill_mode='nearest')\n",
    "\n",
    "    image_datagen = ImageDataGenerator(**aug_dict)\n",
    "    mask_datagen = ImageDataGenerator(**aug_dict)\n",
    "    image_generator = image_datagen.flow_from_directory(\n",
    "        \"F:/dataset/0_stone\",\n",
    "        classes=[\"image\"],\n",
    "        color_mode = \"rgb\",\n",
    "        target_size = (480, 640),\n",
    "        class_mode = None,\n",
    "        batch_size = batch_size, seed=1)\n",
    "\n",
    "    mask_generator = mask_datagen.flow_from_directory(\n",
    "        \"F:/dataset/0_stone\",\n",
    "        classes=[\"label\"],\n",
    "        color_mode = \"grayscale\",\n",
    "        target_size = (480, 640),\n",
    "        class_mode = None,\n",
    "        batch_size = batch_size, seed=1)\n",
    "\n",
    "    train_generator = zip(image_generator, mask_generator)\n",
    "    for (img,mask) in train_generator:\n",
    "        img = img / 255.\n",
    "        mask = mask / 255.\n",
    "        mask[mask > 0.1] = 1\n",
    "        mask[mask <= 0.1] = 0  \n",
    "        mask = tf.cast(mask,dtype = tf.int32)\n",
    "        mask = tf.squeeze(mask)\n",
    "        mask = tf.one_hot(mask,depth = 2, axis = 3)\n",
    "        yield (img,mask)\n",
    "\n",
    "trainset = trainGenerator(batch_size=2)\n",
    "\n",
    "###########################test_set\n",
    "\n",
    "def testGenerator(batch_size):\n",
    "\n",
    "    aug_dict = dict(horizontal_flip=True,\n",
    "                        fill_mode='nearest')\n",
    "    aug_dict = dict(horizontal_flip=True,\n",
    "                        fill_mode='nearest')\n",
    "\n",
    "    tset_image_datagen = ImageDataGenerator(**aug_dict)\n",
    "    tset_mask_datagen = ImageDataGenerator(**aug_dict)\n",
    "    tset_image_generator = tset_image_datagen.flow_from_directory(\n",
    "        \"F:/dataset/0_stone\",\n",
    "        classes=[\"img_test\"],\n",
    "        color_mode = \"rgb\",\n",
    "        target_size = (480, 640),\n",
    "        class_mode = None,\n",
    "        batch_size = batch_size, seed=2)\n",
    "\n",
    "    tset_mask_generator = tset_mask_datagen.flow_from_directory(\n",
    "        \"F:/dataset/0_stone\",\n",
    "        classes=[\"label_test\"],\n",
    "        color_mode = \"grayscale\",\n",
    "        target_size = (480, 640),\n",
    "        class_mode = None,\n",
    "        batch_size = batch_size, seed=2)\n",
    "\n",
    "    test_generator = zip(tset_image_generator, tset_mask_generator)\n",
    "    for (img,mask) in test_generator:\n",
    "        img = img / 255.\n",
    "        mask = mask / 255.\n",
    "        mask[mask > 0.1] = 1\n",
    "        mask[mask <= 0.1] = 0  \n",
    "        mask = tf.cast(mask,dtype = tf.int32)\n",
    "        mask = tf.squeeze(mask,axis = 3)\n",
    "        mask = tf.one_hot(mask,depth = 2, axis = 3)\n",
    "        yield (img,mask)\n",
    "\n",
    "testset = testGenerator(batch_size=1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {
    "scrolled": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Found 2600 images belonging to 1 classes.\n",
      "Found 2600 images belonging to 1 classes.\n",
      "WARNING:tensorflow:From C:\\Users\\ZZK\\Anaconda3\\envs\\learn\\lib\\site-packages\\tensorflow\\python\\ops\\math_grad.py:1250: add_dispatch_support.<locals>.wrapper (from tensorflow.python.ops.array_ops) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Use tf.where in 2.0, which has the same broadcast rule as np.where\n",
      "1299/1300 [============================>.] - ETA: 0s - loss: 0.2141 - accuracy: 0.9089 - mean_io_u: 0.2731Found 100 images belonging to 1 classes.\n",
      "Found 100 images belonging to 1 classes.\n",
      "1300/1300 [==============================] - 424s 326ms/step - loss: 0.2139 - accuracy: 0.9089 - mean_io_u: 0.2730 - val_loss: 0.4524 - val_accuracy: 0.7336 - val_mean_io_u: 0.2500\n",
      "1300/1300 [==============================] - 415s 319ms/step - loss: 0.1693 - accuracy: 0.9346 - mean_io_u: 0.2782 - val_loss: 0.4653 - val_accuracy: 0.7251 - val_mean_io_u: 0.2500\n",
      "1300/1300 [==============================] - 414s 318ms/step - loss: 0.1561 - accuracy: 0.9447 - mean_io_u: 0.2926 - val_loss: 0.6928 - val_accuracy: 0.7285 - val_mean_io_u: 0.2577\n",
      "1300/1300 [==============================] - 412s 317ms/step - loss: 0.1516 - accuracy: 0.9486 - mean_io_u: 0.2958 - val_loss: 0.5840 - val_accuracy: 0.7016 - val_mean_io_u: 0.2566\n",
      "1300/1300 [==============================] - 411s 316ms/step - loss: 0.1334 - accuracy: 0.9556 - mean_io_u: 0.3009 - val_loss: 0.6170 - val_accuracy: 0.7515 - val_mean_io_u: 0.2612\n",
      "1300/1300 [==============================] - 412s 317ms/step - loss: 0.1138 - accuracy: 0.9648 - mean_io_u: 0.3111 - val_loss: 0.5311 - val_accuracy: 0.8071 - val_mean_io_u: 0.2510\n",
      "1300/1300 [==============================] - 412s 317ms/step - loss: 0.0998 - accuracy: 0.9711 - mean_io_u: 0.3334 - val_loss: 0.8255 - val_accuracy: 0.8006 - val_mean_io_u: 0.2835\n",
      "1300/1300 [==============================] - 412s 317ms/step - loss: 0.0795 - accuracy: 0.9773 - mean_io_u: 0.3374 - val_loss: 0.5580 - val_accuracy: 0.8514 - val_mean_io_u: 0.3096\n",
      "1300/1300 [==============================] - 412s 317ms/step - loss: 0.0856 - accuracy: 0.9779 - mean_io_u: 0.3450 - val_loss: 1.1428 - val_accuracy: 0.7863 - val_mean_io_u: 0.3094\n",
      "1300/1300 [==============================] - 412s 317ms/step - loss: 0.0659 - accuracy: 0.9821 - mean_io_u: 0.3567 - val_loss: 1.0757 - val_accuracy: 0.8114 - val_mean_io_u: 0.3382\n"
     ]
    }
   ],
   "source": [
    "\n",
    "###########################Train\n",
    "\n",
    "model = Unet(2, W = 480, H = 640)\n",
    "model.compile(optimizer = Adam(lr = 2e-4), loss = 'binary_crossentropy', metrics = ['accuracy',tf.keras.metrics.MeanIoU(num_classes=2)])\n",
    "# model.summary()\n",
    "# model.load_weights(\"Unet.h5\")\n",
    "\n",
    "for epoch in range(10):\n",
    "    history = model.fit_generator(trainset,steps_per_epoch=1300,epochs=1,validation_data=testset,validation_steps=100, validation_freq=1)\n",
    "    model.save_weights(\"Unet\"+str(epoch)+\".h5\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": false
   },
   "outputs": [],
   "source": [
    "images,mask = next(trainset)\n",
    "# model.load_weights(\"model3.h5\")\n",
    "total_time = 0\n",
    "for ti in range(10):\n",
    "    start_time = time.time()\n",
    "    pred = model.predict(images)\n",
    "    total_time += (time.time() - start_time)\n",
    "print('480x640平均预测时间：',total_time/20)\n",
    "pred[pred > 0.5] = 1\n",
    "pred[pred <= 0.5] = 0 \n",
    "\n",
    "for i in range(2):\n",
    "    fig = plt.figure(figsize = (8,16))\n",
    "    plt.axis('off')\n",
    "    plt.imshow(images[i])\n",
    "    plt.show()\n",
    "    fig = plt.figure(figsize = (8,16))\n",
    "    plt.axis('off')\n",
    "    plt.imshow(tf.squeeze(pred[i]))\n",
    "    plt.show()\n",
    "#     o = 0\n",
    "#     m = tf.keras.metrics.MeanIoU(num_classes=2)\n",
    "#     for i in range(100):\n",
    "#         image,mask = next(testset)\n",
    "#         out = model.predict(image)\n",
    "#         _ = m.update_state(tf.squeeze(out),tf.squeeze(mask))\n",
    "#         o += m.result().numpy()\n",
    "#     print('epoch:',epoch,'m miou:',o/100.)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.9"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
