{
  "nbformat": 4,
  "nbformat_minor": 0,
  "metadata": {
    "colab": {
      "name": "FallDetection_Pose_Based_Models.ipynb",
      "version": "0.3.2",
      "provenance": [],
      "collapsed_sections": [
        "BrNBIjvKP-85"
      ],
      "include_colab_link": true
    },
    "kernelspec": {
      "name": "python3",
      "display_name": "Python 3"
    },
    "accelerator": "GPU"
  },
  "cells": [
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "view-in-github",
        "colab_type": "text"
      },
      "source": [
        "<a href=\"https://colab.research.google.com/github/ryankemmer/FallDetection/blob/master/FallDetection_Pose_Based_Models.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "2hv7AU1iyGXL",
        "colab_type": "text"
      },
      "source": [
        "#**Initialize Runtime**"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "R8wtmVy9ZGS3",
        "colab_type": "code",
        "outputId": "375cfe4f-96e7-4589-afa9-c906e5ad31cb",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 122
        }
      },
      "source": [
        "from google.colab import drive\n",
        "drive.mount('/content/drive', force_remount=True)\n",
        "\n",
        "#initialize array of categoires to be included in data\n",
        "categories = ['Coffee_room_01','Home_01','validation','test']"
      ],
      "execution_count": 0,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "Go to this URL in a browser: https://accounts.google.com/o/oauth2/auth?client_id=947318989803-6bn6qk8qdgf4n4g3pfee6491hc0brc4i.apps.googleusercontent.com&redirect_uri=urn%3Aietf%3Awg%3Aoauth%3A2.0%3Aoob&scope=email%20https%3A%2F%2Fwww.googleapis.com%2Fauth%2Fdocs.test%20https%3A%2F%2Fwww.googleapis.com%2Fauth%2Fdrive%20https%3A%2F%2Fwww.googleapis.com%2Fauth%2Fdrive.photos.readonly%20https%3A%2F%2Fwww.googleapis.com%2Fauth%2Fpeopleapi.readonly&response_type=code\n",
            "\n",
            "Enter your authorization code:\n",
            "··········\n",
            "Mounted at /content/drive\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "kF0E86YT1TmJ",
        "colab_type": "text"
      },
      "source": [
        "# Build Openpose (Run Once)"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "iRsH4LzqNO7y",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "import os\n",
        "from os.path import exists, join, basename, splitext\n",
        "\n",
        "git_repo_url = 'https://github.com/CMU-Perceptual-Computing-Lab/openpose.git'\n",
        "project_name = splitext(basename(git_repo_url))[0]\n",
        "if not exists(project_name):\n",
        "  # see: https://github.com/CMU-Perceptual-Computing-Lab/openpose/issues/949\n",
        "  # install new CMake becaue of CUDA10\n",
        "  !wget -q https://cmake.org/files/v3.13/cmake-3.13.0-Linux-x86_64.tar.gz\n",
        "  !tar xfz cmake-3.13.0-Linux-x86_64.tar.gz --strip-components=1 -C /usr/local\n",
        "  # clone openpose\n",
        "  !git clone -q --depth 1 $git_repo_url\n",
        "  !sed -i 's/execute_process(COMMAND git checkout master WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}\\/3rdparty\\/caffe)/execute_process(COMMAND git checkout f019d0dfe86f49d1140961f8c7dec22130c83154 WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}\\/3rdparty\\/caffe)/g' openpose/CMakeLists.txt\n",
        "  # install system dependencies\n",
        "  !apt-get -qq install -y libatlas-base-dev libprotobuf-dev libleveldb-dev libsnappy-dev libhdf5-serial-dev protobuf-compiler libgflags-dev libgoogle-glog-dev liblmdb-dev opencl-headers ocl-icd-opencl-dev libviennacl-dev\n",
        "  # install python dependencies\n",
        "  !pip install -q youtube-dl\n",
        "  # build openpose\n",
        "  !cd openpose && rm -rf build || true && mkdir build && cd build && cmake .. && make -j`nproc`\n"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "BrNBIjvKP-85",
        "colab_type": "text"
      },
      "source": [
        "#Preprocess Data (Run Once)"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "RbAPXiBuKlHx",
        "colab_type": "text"
      },
      "source": [
        "**Determine categories to be preprocessed (optional)**"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "Gs3LPaJKImfo",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "categories = ['test2']"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "E4avRIShzNhh",
        "colab_type": "text"
      },
      "source": [
        "**Convert videos to images (run once)**"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "fZGYHNO8zIik",
        "colab_type": "code",
        "outputId": "3270c9c1-bd3d-4456-c64c-3c570fbf34c7",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 187
        }
      },
      "source": [
        "import os\n",
        "import cv2\n",
        "import math \n",
        "import glob\n",
        "\n",
        "#Move to fall detection folder\n",
        "os.chdir('/content/drive/My Drive/Fall Detection')\n",
        "\n",
        "#make testdata directory for each category\n",
        "\n",
        "for i in categories:\n",
        "  os.mkdir(os.path.join(str(i),'testdata'))\n",
        "  \n",
        "#for every category, loop through videos and turn them into pictures\n",
        "for i in categories:\n",
        "\n",
        "  path = str(i) + '/Videos/'\n",
        "  list = os.listdir(path) \n",
        "  numvideos = len(list)\n",
        "  j = 0\n",
        "  while (j < numvideos):\n",
        "    print(\"Writing Video: \" + str(j))\n",
        "    \n",
        "    if(str(i) == 'validation' and j == 10):\n",
        "      break\n",
        "    \n",
        "    os.mkdir(str(i) + '/testdata' + '/video' + str(j)) # make new directory for each video\n",
        "    vidcap = cv2.VideoCapture(str(i) + '/Videos/' +'video (' + str(j+1) + ').avi')\n",
        "    success,image = vidcap.read()\n",
        "    count = 0\n",
        "    success = True\n",
        "    path2 = str(i) + '/testdata' + '/video' + str(j)\n",
        "    while success:\n",
        "      cv2.imwrite(os.path.join(path2 , \"frame%d.jpg\" % count), image)\n",
        "      success,image = vidcap.read()\n",
        "      count += 1\n",
        "    j+=1"
      ],
      "execution_count": 0,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "Writing Video: 0\n",
            "Writing Video: 1\n",
            "Writing Video: 2\n",
            "Writing Video: 3\n",
            "Writing Video: 4\n",
            "Writing Video: 5\n",
            "Writing Video: 6\n",
            "Writing Video: 7\n",
            "Writing Video: 8\n",
            "Writing Video: 9\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "14OIcr-DIQ6j",
        "colab_type": "text"
      },
      "source": [
        "**Build openpose libarary to process images**\n",
        "\n"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "iOJxp1k8ye87",
        "colab_type": "text"
      },
      "source": [
        "**Run help reference to ensure libraries are correctly installed**"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "vGgNH7SF_vEC",
        "colab_type": "code",
        "outputId": "4cf241e7-ad98-46e6-bcf9-ca0c7429ad20",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 88
        }
      },
      "source": [
        "!cd openpose && ./build/examples/openpose/openpose.bin --help"
      ],
      "execution_count": 0,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "shell-init: error retrieving current directory: getcwd: cannot access parent directories: Transport endpoint is not connected\n",
            "chdir: error retrieving current directory: getcwd: cannot access parent directories: Transport endpoint is not connected\n",
            "/bin/bash: line 0: cd: openpose: Transport endpoint is not connected\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "1E8j0bjvIgEw",
        "colab_type": "text"
      },
      "source": [
        "**Turn images into json files with pose data (20 videos per category) (run once)**"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "UvG0kiNGWY6x",
        "colab_type": "code",
        "outputId": "46b1f167-1284-4a96-8e1e-22771f81d5ea",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 1000
        }
      },
      "source": [
        "import os\n",
        "import cv2\n",
        "import math \n",
        "import glob\n",
        "\n",
        "for i in categories:\n",
        "     \n",
        "  print(\"Entering folder: \" + str(i))\n",
        "  os.chdir('/content/drive/My Drive/Fall Detection')\n",
        "  os.mkdir(os.path.join(str(i),'jsondata'))\n",
        "  category = str(i)\n",
        "  testpath = str(i) + '/testdata/'\n",
        "  list = os.listdir(testpath) \n",
        "  numvideos = len(list)\n",
        "  j = 0\n",
        "  while (j < numvideos):\n",
        "    print(\"Rendering video: \" + str(j))\n",
        "    os.chdir('/content/drive/My Drive/Fall Detection')\n",
        "    os.mkdir(str(i) + '/jsondata' + '/video' + str(j)) # make new directory for each video\n",
        "    video = str(j)\n",
        "    os.chdir('/content')\n",
        "    !cd openpose && ./build/examples/openpose/openpose.bin -image_dir ../drive/My\\ Drive/Fall\\ Detection/$category/testdata/video$video  --render_pose 0 -keypoint_scale 3 --display 0 -write_json ../drive/My\\ Drive/Fall\\ Detection/$category/jsondata/video$video\n",
        "    j += 1\n"
      ],
      "execution_count": 0,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "Entering folder: test2\n",
            "Rendering video: 0\n",
            "Starting OpenPose demo...\n",
            "Configuring OpenPose...\n",
            "Starting thread(s)...\n",
            "Auto-detecting all available GPUs... Detected 1 GPU(s), using 1 of them starting at GPU 0.\n",
            "OpenPose demo successfully finished. Total time: 22.633570 seconds.\n",
            "Rendering video: 1\n",
            "Starting OpenPose demo...\n",
            "Configuring OpenPose...\n",
            "Starting thread(s)...\n",
            "Auto-detecting all available GPUs... Detected 1 GPU(s), using 1 of them starting at GPU 0.\n",
            "OpenPose demo successfully finished. Total time: 33.126619 seconds.\n",
            "Rendering video: 2\n",
            "Starting OpenPose demo...\n",
            "Configuring OpenPose...\n",
            "Starting thread(s)...\n",
            "Auto-detecting all available GPUs... Detected 1 GPU(s), using 1 of them starting at GPU 0.\n",
            "OpenPose demo successfully finished. Total time: 39.459350 seconds.\n",
            "Rendering video: 3\n",
            "Starting OpenPose demo...\n",
            "Configuring OpenPose...\n",
            "Starting thread(s)...\n",
            "Auto-detecting all available GPUs... Detected 1 GPU(s), using 1 of them starting at GPU 0.\n",
            "OpenPose demo successfully finished. Total time: 26.926901 seconds.\n",
            "Rendering video: 4\n",
            "Starting OpenPose demo...\n",
            "Configuring OpenPose...\n",
            "Starting thread(s)...\n",
            "Auto-detecting all available GPUs... Detected 1 GPU(s), using 1 of them starting at GPU 0.\n",
            "OpenPose demo successfully finished. Total time: 26.066601 seconds.\n",
            "Rendering video: 5\n",
            "Starting OpenPose demo...\n",
            "Configuring OpenPose...\n",
            "Starting thread(s)...\n",
            "Auto-detecting all available GPUs... Detected 1 GPU(s), using 1 of them starting at GPU 0.\n",
            "OpenPose demo successfully finished. Total time: 27.563977 seconds.\n",
            "Rendering video: 6\n",
            "Starting OpenPose demo...\n",
            "Configuring OpenPose...\n",
            "Starting thread(s)...\n",
            "Auto-detecting all available GPUs... Detected 1 GPU(s), using 1 of them starting at GPU 0.\n",
            "OpenPose demo successfully finished. Total time: 16.203610 seconds.\n",
            "Rendering video: 7\n",
            "Starting OpenPose demo...\n",
            "Configuring OpenPose...\n",
            "Starting thread(s)...\n",
            "Auto-detecting all available GPUs... Detected 1 GPU(s), using 1 of them starting at GPU 0.\n",
            "OpenPose demo successfully finished. Total time: 27.925220 seconds.\n",
            "Rendering video: 8\n",
            "Starting OpenPose demo...\n",
            "Configuring OpenPose...\n",
            "Starting thread(s)...\n",
            "Auto-detecting all available GPUs... Detected 1 GPU(s), using 1 of them starting at GPU 0.\n",
            "OpenPose demo successfully finished. Total time: 15.415627 seconds.\n",
            "Rendering video: 9\n",
            "Starting OpenPose demo...\n",
            "Configuring OpenPose...\n",
            "Starting thread(s)...\n",
            "Auto-detecting all available GPUs... Detected 1 GPU(s), using 1 of them starting at GPU 0.\n",
            "OpenPose demo successfully finished. Total time: 19.153592 seconds.\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "KAGtDDtFFE3t",
        "colab_type": "text"
      },
      "source": [
        "**Fix json files to be read correctly (only run once)**"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "6eTfnY_0_uJZ",
        "colab_type": "code",
        "outputId": "775aa149-b4ed-4570-8e2f-9b419f1b90ca",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 187
        }
      },
      "source": [
        "os.chdir('/content/drive/My Drive/Fall Detection')\n",
        "\n",
        "for i in categories:\n",
        "  \n",
        "  testpath = str(i) + '/testdata/'\n",
        "  list = os.listdir(testpath) \n",
        "  numvideos = len(list)\n",
        "  j = 0\n",
        "  while(j < numvideos):\n",
        "    print(\"Rewriting json datafrom folder:\" + str(i) + \"Video: \" + str(j))\n",
        "    if(str(i) == 'validation' and j == 11):\n",
        "      break\n",
        "      \n",
        "    testpath = '/content/drive/My Drive/Fall Detection/' + str(i) + '/jsondata/video' + str(j)\n",
        "    list = os.listdir(testpath) \n",
        "    numframes = len(list)\n",
        "    k = 0 \n",
        "    while(k < numframes):\n",
        "      with open('/content/drive/My Drive/Fall Detection/' + str(i) + '/jsondata/video' + str(j) + '/frame' + str(k) + '_keypoints.json', \"a\") as f:\n",
        "        f.write(\"\\n]\")\n",
        "      with open('/content/drive/My Drive/Fall Detection/' + str(i) + '/jsondata/video' + str(j) + '/frame' + str(k) + '_keypoints.json', \"r+\") as f:\n",
        "        content = f.read()\n",
        "        f.seek(0, 0)\n",
        "        f.write('[\\n' + content)     \n",
        "      k += 1  \n",
        "    j += 1\n",
        "\n",
        "        "
      ],
      "execution_count": 0,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "Rewriting json datafrom folder:test2Video: 0\n",
            "Rewriting json datafrom folder:test2Video: 1\n",
            "Rewriting json datafrom folder:test2Video: 2\n",
            "Rewriting json datafrom folder:test2Video: 3\n",
            "Rewriting json datafrom folder:test2Video: 4\n",
            "Rewriting json datafrom folder:test2Video: 5\n",
            "Rewriting json datafrom folder:test2Video: 6\n",
            "Rewriting json datafrom folder:test2Video: 7\n",
            "Rewriting json datafrom folder:test2Video: 8\n",
            "Rewriting json datafrom folder:test2Video: 9\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "LUlm3i1oQY_R",
        "colab_type": "text"
      },
      "source": [
        "# **Load in Training Data**"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "kzwO0vW7T1fb",
        "colab_type": "code",
        "outputId": "1ede41ce-76c3-4900-ef37-2e9ebf4139cc",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 1000
        }
      },
      "source": [
        "import matplotlib.pyplot as plt    # for plotting the images\n",
        "%matplotlib inline\n",
        "import numpy as np\n",
        "from PIL import Image\n",
        "from keras.utils import np_utils\n",
        "from skimage.transform import resize\n",
        "import pandas as pd\n",
        "import os\n",
        "import glob\n",
        "import numpy as np\n",
        "from keras.utils import np_utils\n",
        "from keras.utils import to_categorical\n",
        "import json\n",
        "\n",
        "#count successful poses\n",
        "nullposes = 0\n",
        "successfulposes = 0\n",
        "\n",
        "#total videos\n",
        "TotalTrain = 40\n",
        "TotalTest = 10\n",
        "\n",
        "#number of samples per activity (walking, falling, laying down)\n",
        "A = 20\n",
        "#of frames per sample (timesteps)\n",
        "B = 30\n",
        "\n",
        "X_train = np.zeros((TotalTrain*A*2,B,75)) #creating an empty array for image json data\n",
        "X_test = np.zeros((TotalTest*A*2,B,75))\n",
        "\n",
        "Y_train = np.zeros((TotalTrain*A*2,1)) #create empty array for y data\n",
        "Y_test = np.zeros((TotalTest*A*2,1))\n",
        "\n",
        "#initialize counters\n",
        "trainframecount = 0\n",
        "testframecount = 0\n",
        "\n",
        "#loop through categories\n",
        "for i in categories:\n",
        "  \n",
        "  if(str(i) == 'test'):\n",
        "    break\n",
        "  \n",
        "  #count the number of videos in category\n",
        "  path = '/content/drive/My Drive/Fall Detection/' + str(i) + '/Annotation_files'\n",
        "  list = os.listdir(path)\n",
        "  numvideos = len(list)\n",
        "  \n",
        "  #change into annotations file folder\n",
        "  print(\"Entering folder \" + str(i) + \", which has \" + str(numvideos) + \" videos to process\")\n",
        "  os.chdir(path)\n",
        "  \n",
        "  #separate if test dataset\n",
        "  if (str(i) == 'validation'):\n",
        "     checker = True\n",
        "  else:\n",
        "     checker = False   \n",
        "    \n",
        "  #loop through each category\n",
        "  j = 0\n",
        "  while (j < 20):\n",
        "    \n",
        "    #break if 13+ videos are processed from the validation set\n",
        "    if(j > 9 and checker == True):\n",
        "      break\n",
        "    \n",
        "    #count the number of frames in each video\n",
        "    path = '/content/drive/My Drive/Fall Detection/' + str(i) + '/testdata/video' + str(j)\n",
        "    list = os.listdir(path) \n",
        "    numframes = len(list)\n",
        "    \n",
        "    #read in frames and determine beginning and ending frames of the fall\n",
        "    filename = '/content/drive/My Drive/Fall Detection/' + str(i) + '/Annotation_files/video (' + str(j+1) + ').txt'\n",
        "    dummydata = pd.read_csv(filename, sep=\" \", header=None, nrows=2)\n",
        "    frame1 = dummydata[0][0] \n",
        "    frame2 = dummydata[0][1] \n",
        "    frame1 = int(frame1) - 5\n",
        "    frame2 = int(frame2) + 30\n",
        "    \n",
        "    #change directory to jsondata folder\n",
        "    path = '/content/drive/My Drive/Fall Detection/' + str(i) + \"/jsondata/video\" + str(j)\n",
        "    os.chdir(path)\n",
        "    print(\"Video \" + str(j) + \": Processing \" + str(numframes) + \" frames.\")\n",
        "    \n",
        "    \n",
        "    #add all frames into array\n",
        "    k = 0\n",
        "    vidarray = []\n",
        "    \n",
        "    while (k < numframes):\n",
        "      with open('frame' + str(k) + '_keypoints.json') as f:  \n",
        "        try:\n",
        "          data = json.loads(f.read())\n",
        "          list = data[0]['people'][0]['pose_keypoints_2d']\n",
        "          successfulposes +=1\n",
        "        except:\n",
        "          list = np.zeros(75)\n",
        "          nullposes +=1\n",
        "      vidarray.append(list)\n",
        "      k += 1\n",
        "      \n",
        "    videoX = np.array(vidarray)\n",
        "    \n",
        "    #split video array into sets for fall, walking, and afterfall\n",
        "    FallSet = videoX[frame1:frame2,:]\n",
        "    WalkSet = videoX[0:frame1-1,:]\n",
        "    AfterFallSet = videoX[frame2+1:len(videoX),:]  \n",
        " \n",
        "    #print(len(FallSet))\n",
        "    #print(len(WalkSet))\n",
        "    #print(len(AfterFallSet))\n",
        "    \n",
        "    #capture falls\n",
        "    for _ in range(A):\n",
        "      idx = np.random.randint(0,len(FallSet)-B)\n",
        "      if (checker == False):\n",
        "        X_train[trainframecount,:,:] = FallSet[idx:idx+B,:]\n",
        "        Y_train[trainframecount] = 0\n",
        "        trainframecount += 1\n",
        "      else:\n",
        "        X_test[testframecount,:,:] = FallSet[idx:idx+B,:]\n",
        "        Y_test[testframecount] = 0\n",
        "        testframecount += 1\n",
        "      \n",
        "    #capture walking\n",
        "    for _ in range(A):\n",
        "      idx = np.random.randint(0,len(WalkSet)-B)\n",
        "      if (checker == False):\n",
        "        X_train[trainframecount,:,:] = WalkSet[idx:idx+B,:]\n",
        "        Y_train[trainframecount] = 1\n",
        "        trainframecount += 1\n",
        "      else:\n",
        "        X_test[testframecount,:,:] = WalkSet[idx:idx+B,:]\n",
        "        Y_test[testframecount] = 1\n",
        "        testframecount += 1 \n",
        "    \n",
        "    j += 1   \n",
        "\n",
        "#reshape Y data to categorical\n",
        "Y_train = to_categorical(Y_train)\n",
        "Y_test = to_categorical(Y_test)\n",
        "\n",
        "#normalize input\n",
        "X_train = X_train/X_train.max()\n",
        "X_test = X_test/X_test.max()\n",
        "\n",
        "print(\"Finished creating input array.\")\n",
        "print(\"Total train inputs: \" + str(trainframecount))\n",
        "print(\"Total test inputs: \" +  str(testframecount))\n",
        "\n",
        "print(\"X train: \" + str(X_train.shape))\n",
        "print(\"Y train: \" + str(Y_train.shape))\n",
        "print(\"X test: \" + str(X_test.shape))\n",
        "print(\"Y test: \" + str(Y_test.shape))\n",
        "print(\"null poses: \" + str(nullposes))\n",
        "print(\"successful poses:\" + str(successfulposes))"
      ],
      "execution_count": 0,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "Entering folder Coffee_room_01, which has 20 videos to process\n",
            "Video 0: Processing 157 frames.\n",
            "Video 1: Processing 306 frames.\n",
            "Video 2: Processing 304 frames.\n",
            "Video 3: Processing 207 frames.\n",
            "Video 4: Processing 181 frames.\n",
            "Video 5: Processing 239 frames.\n",
            "Video 6: Processing 174 frames.\n",
            "Video 7: Processing 258 frames.\n",
            "Video 8: Processing 206 frames.\n",
            "Video 9: Processing 362 frames.\n",
            "Video 10: Processing 483 frames.\n",
            "Video 11: Processing 182 frames.\n",
            "Video 12: Processing 244 frames.\n",
            "Video 13: Processing 176 frames.\n",
            "Video 14: Processing 140 frames.\n",
            "Video 15: Processing 177 frames.\n",
            "Video 16: Processing 269 frames.\n",
            "Video 17: Processing 213 frames.\n",
            "Video 18: Processing 444 frames.\n",
            "Video 19: Processing 182 frames.\n",
            "Entering folder Home_01, which has 25 videos to process\n",
            "Video 0: Processing 264 frames.\n",
            "Video 1: Processing 240 frames.\n",
            "Video 2: Processing 240 frames.\n",
            "Video 3: Processing 240 frames.\n",
            "Video 4: Processing 192 frames.\n",
            "Video 5: Processing 192 frames.\n",
            "Video 6: Processing 216 frames.\n",
            "Video 7: Processing 285 frames.\n",
            "Video 8: Processing 336 frames.\n",
            "Video 9: Processing 312 frames.\n",
            "Video 10: Processing 312 frames.\n",
            "Video 11: Processing 216 frames.\n",
            "Video 12: Processing 240 frames.\n",
            "Video 13: Processing 288 frames.\n",
            "Video 14: Processing 238 frames.\n",
            "Video 15: Processing 264 frames.\n",
            "Video 16: Processing 240 frames.\n",
            "Video 17: Processing 216 frames.\n",
            "Video 18: Processing 192 frames.\n",
            "Video 19: Processing 216 frames.\n",
            "Entering folder validation, which has 15 videos to process\n",
            "Video 0: Processing 216 frames.\n",
            "Video 1: Processing 240 frames.\n",
            "Video 2: Processing 216 frames.\n",
            "Video 3: Processing 271 frames.\n",
            "Video 4: Processing 192 frames.\n",
            "Video 5: Processing 382 frames.\n",
            "Video 6: Processing 159 frames.\n",
            "Video 7: Processing 718 frames.\n",
            "Video 8: Processing 133 frames.\n",
            "Video 9: Processing 215 frames.\n",
            "Finished creating input array.\n",
            "Total train inputs: 1600\n",
            "Total test inputs: 400\n",
            "X train: (1600, 30, 75)\n",
            "Y train: (1600, 2)\n",
            "X test: (400, 30, 75)\n",
            "Y test: (400, 2)\n",
            "null poses: 1011\n",
            "successful poses:11574\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "EIyluW1fQks_",
        "colab_type": "text"
      },
      "source": [
        "# Train Model1, LSTM Network "
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "I-VEvtKwLzBP",
        "colab_type": "code",
        "outputId": "e4d55a68-3768-4711-d690-b2b79afedd75",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 1000
        }
      },
      "source": [
        "from keras.models import Sequential\n",
        "from keras.applications.vgg16 import VGG16\n",
        "from keras.layers import Dense, InputLayer, Dropout, Flatten, Activation\n",
        "from keras.layers.recurrent import LSTM\n",
        "from keras.layers import TimeDistributed\n",
        "from keras.layers.convolutional import Conv1D, Conv2D\n",
        "from keras.layers.convolutional import MaxPooling1D, MaxPooling2D\n",
        "from tensorflow import keras\n",
        "from keras.utils import to_categorical\n",
        "\n",
        "model1 = Sequential()\n",
        "model1.add(LSTM(100, input_shape=(B,75)))\n",
        "model1.add(Dense(100, activation='relu'))\n",
        "model1.add(Dropout(0.5))\n",
        "model1.add(Dense(2, activation='softmax'))\n",
        "print(model1.summary())\n",
        "model1.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n",
        "\n",
        "history = model1.fit(X_train, Y_train, epochs=100, validation_data=(X_test, Y_test))\n",
        "\n",
        "# summarize history for accuracy\n",
        "plt.plot(history.history['acc'])\n",
        "plt.plot(history.history['val_acc'])\n",
        "plt.title('model accuracy')\n",
        "plt.ylabel('accuracy')\n",
        "plt.xlabel('epoch')\n",
        "plt.legend(['train', 'test'], loc='upper left')\n",
        "plt.show()\n",
        "# summarize history for loss\n",
        "plt.plot(history.history['loss'])\n",
        "plt.plot(history.history['val_loss'])\n",
        "plt.title('model loss')\n",
        "plt.ylabel('loss')\n",
        "plt.xlabel('epoch')\n",
        "plt.legend(['train', 'test'], loc='upper left')\n",
        "plt.show()\n"
      ],
      "execution_count": 8,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "E0711 08:09:28.051169 140174518364032 ultratb.py:152] Internal Python error in the inspect module.\n",
            "Below is the traceback from this internal error.\n",
            "\n"
          ],
          "name": "stderr"
        },
        {
          "output_type": "stream",
          "text": [
            "_________________________________________________________________\n",
            "Layer (type)                 Output Shape              Param #   \n",
            "=================================================================\n",
            "lstm_4 (LSTM)                (None, 100)               70400     \n",
            "_________________________________________________________________\n",
            "dense_6 (Dense)              (None, 100)               10100     \n",
            "_________________________________________________________________\n",
            "dropout_5 (Dropout)          (None, 100)               0         \n",
            "_________________________________________________________________\n",
            "dense_7 (Dense)              (None, 2)                 202       \n",
            "=================================================================\n",
            "Total params: 80,702\n",
            "Trainable params: 80,702\n",
            "Non-trainable params: 0\n",
            "_________________________________________________________________\n",
            "None\n",
            "Traceback (most recent call last):\n",
            "  File \"/usr/local/lib/python3.6/dist-packages/IPython/core/interactiveshell.py\", line 2882, in run_code\n",
            "    exec(code_obj, self.user_global_ns, self.user_ns)\n",
            "  File \"<ipython-input-8-de2408e3074b>\", line 19, in <module>\n",
            "    history = model1.fit(X_train, Y_train, epochs=100, validation_data=(X_test, Y_test))\n",
            "  File \"/usr/local/lib/python3.6/dist-packages/keras/engine/training.py\", line 952, in fit\n",
            "    batch_size=batch_size)\n",
            "  File \"/usr/local/lib/python3.6/dist-packages/keras/engine/training.py\", line 751, in _standardize_user_data\n",
            "    exception_prefix='input')\n",
            "  File \"/usr/local/lib/python3.6/dist-packages/keras/engine/training_utils.py\", line 128, in standardize_input_data\n",
            "    'with shape ' + str(data_shape))\n",
            "ValueError: Error when checking input: expected lstm_4_input to have 3 dimensions, but got array with shape (1600, 5, 6, 75)\n",
            "\n",
            "During handling of the above exception, another exception occurred:\n",
            "\n",
            "Traceback (most recent call last):\n",
            "  File \"/usr/local/lib/python3.6/dist-packages/IPython/core/interactiveshell.py\", line 1823, in showtraceback\n",
            "    stb = value._render_traceback_()\n",
            "AttributeError: 'ValueError' object has no attribute '_render_traceback_'\n",
            "\n",
            "During handling of the above exception, another exception occurred:\n",
            "\n",
            "Traceback (most recent call last):\n",
            "  File \"/usr/local/lib/python3.6/dist-packages/IPython/core/ultratb.py\", line 1132, in get_records\n",
            "    return _fixed_getinnerframes(etb, number_of_lines_of_context, tb_offset)\n",
            "  File \"/usr/local/lib/python3.6/dist-packages/IPython/core/ultratb.py\", line 313, in wrapped\n",
            "    return f(*args, **kwargs)\n",
            "  File \"/usr/local/lib/python3.6/dist-packages/IPython/core/ultratb.py\", line 358, in _fixed_getinnerframes\n",
            "    records = fix_frame_records_filenames(inspect.getinnerframes(etb, context))\n",
            "  File \"/usr/lib/python3.6/inspect.py\", line 1490, in getinnerframes\n",
            "    frameinfo = (tb.tb_frame,) + getframeinfo(tb, context)\n",
            "  File \"/usr/lib/python3.6/inspect.py\", line 1448, in getframeinfo\n",
            "    filename = getsourcefile(frame) or getfile(frame)\n",
            "  File \"/usr/lib/python3.6/inspect.py\", line 696, in getsourcefile\n",
            "    if getattr(getmodule(object, filename), '__loader__', None) is not None:\n",
            "  File \"/usr/lib/python3.6/inspect.py\", line 725, in getmodule\n",
            "    file = getabsfile(object, _filename)\n",
            "  File \"/usr/lib/python3.6/inspect.py\", line 709, in getabsfile\n",
            "    return os.path.normcase(os.path.abspath(_filename))\n",
            "  File \"/usr/lib/python3.6/posixpath.py\", line 383, in abspath\n",
            "    cwd = os.getcwd()\n",
            "OSError: [Errno 107] Transport endpoint is not connected\n"
          ],
          "name": "stdout"
        },
        {
          "output_type": "error",
          "ename": "ValueError",
          "evalue": "ignored",
          "traceback": [
            "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m"
          ]
        }
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "YrMT6LjWaIe3",
        "colab_type": "text"
      },
      "source": [
        "#Train Model2, Conv1D + LSTM Network"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "wl75WbRWl5ep",
        "colab_type": "code",
        "outputId": "b33ac5e8-d9e6-4403-e969-c5f0057112a2",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 1000
        }
      },
      "source": [
        "from keras.models import Sequential\n",
        "from keras.applications.vgg16 import VGG16\n",
        "from keras.layers import Dense, InputLayer, Dropout, Flatten, Activation\n",
        "from keras.layers.recurrent import LSTM\n",
        "from keras.layers import TimeDistributed\n",
        "from keras.layers.convolutional import Conv1D\n",
        "from keras.layers.convolutional import MaxPooling1D, MaxPooling2D\n",
        "from tensorflow import keras\n",
        "from keras.utils import to_categorical\n",
        "\n",
        "X_train = X_train.reshape(TotalTrain*A*2,5,6,75)\n",
        "X_test = X_test.reshape(TotalTest*A*2,5,6,75)\n",
        "\n",
        "model2 = Sequential()\n",
        "\n",
        "model2.add(TimeDistributed(Conv1D(filters=64, kernel_size=3, activation='relu'), input_shape=(5,6,75)))\n",
        "model2.add(TimeDistributed(Conv1D(filters=64, kernel_size=3, activation='relu')))\n",
        "model2.add(TimeDistributed(Dropout(0.5)))\n",
        "model2.add(TimeDistributed(MaxPooling1D(pool_size=1)))\n",
        "model2.add(TimeDistributed(Flatten()))\n",
        "\n",
        "model2.add(LSTM(100))\n",
        "model2.add(Dropout(0.5))\n",
        "model2.add(Dense(100, activation='relu'))\n",
        "model2.add(Dropout(0.5))\n",
        "model2.add(Dense(100, activation='relu'))\n",
        "model2.add(Dense(2, activation='softmax'))\n",
        "print(model2.summary())\n",
        "#model2.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n",
        "\n",
        "history = model2.fit(X_train, Y_train, epochs=100, validation_data=(X_test, Y_test))\n",
        "\n",
        "# summarize history for accuracy\n",
        "plt.plot(history.history['acc'])\n",
        "plt.plot(history.history['val_acc'])\n",
        "plt.title('model accuracy')\n",
        "plt.ylabel('accuracy')\n",
        "plt.xlabel('epoch')\n",
        "plt.legend(['train', 'test'], loc='upper left')\n",
        "plt.show()\n",
        "# summarize history for loss\n",
        "plt.plot(history.history['loss'])\n",
        "plt.plot(history.history['val_loss'])\n",
        "plt.title('model loss')\n",
        "plt.ylabel('loss')\n",
        "plt.xlabel('epoch')\n",
        "plt.legend(['train', 'test'], loc='upper left')\n",
        "plt.show()"
      ],
      "execution_count": 11,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "E0711 08:10:57.493880 140174518364032 ultratb.py:152] Internal Python error in the inspect module.\n",
            "Below is the traceback from this internal error.\n",
            "\n"
          ],
          "name": "stderr"
        },
        {
          "output_type": "stream",
          "text": [
            "_________________________________________________________________\n",
            "Layer (type)                 Output Shape              Param #   \n",
            "=================================================================\n",
            "time_distributed_16 (TimeDis (None, 5, 4, 64)          14464     \n",
            "_________________________________________________________________\n",
            "time_distributed_17 (TimeDis (None, 5, 2, 64)          12352     \n",
            "_________________________________________________________________\n",
            "time_distributed_18 (TimeDis (None, 5, 2, 64)          0         \n",
            "_________________________________________________________________\n",
            "time_distributed_19 (TimeDis (None, 5, 2, 64)          0         \n",
            "_________________________________________________________________\n",
            "time_distributed_20 (TimeDis (None, 5, 128)            0         \n",
            "_________________________________________________________________\n",
            "lstm_7 (LSTM)                (None, 100)               91600     \n",
            "_________________________________________________________________\n",
            "dropout_13 (Dropout)         (None, 100)               0         \n",
            "_________________________________________________________________\n",
            "dense_14 (Dense)             (None, 100)               10100     \n",
            "_________________________________________________________________\n",
            "dropout_14 (Dropout)         (None, 100)               0         \n",
            "_________________________________________________________________\n",
            "dense_15 (Dense)             (None, 100)               10100     \n",
            "_________________________________________________________________\n",
            "dense_16 (Dense)             (None, 2)                 202       \n",
            "=================================================================\n",
            "Total params: 138,818\n",
            "Trainable params: 138,818\n",
            "Non-trainable params: 0\n",
            "_________________________________________________________________\n",
            "None\n",
            "Traceback (most recent call last):\n",
            "  File \"/usr/local/lib/python3.6/dist-packages/IPython/core/interactiveshell.py\", line 2882, in run_code\n",
            "    exec(code_obj, self.user_global_ns, self.user_ns)\n",
            "  File \"<ipython-input-11-7a7f3de35795>\", line 31, in <module>\n",
            "    history = model2.fit(X_train, Y_train, epochs=100, validation_data=(X_test, Y_test))\n",
            "  File \"/usr/local/lib/python3.6/dist-packages/keras/engine/training.py\", line 952, in fit\n",
            "    batch_size=batch_size)\n",
            "  File \"/usr/local/lib/python3.6/dist-packages/keras/engine/training.py\", line 681, in _standardize_user_data\n",
            "    raise RuntimeError('You must compile a model before '\n",
            "RuntimeError: You must compile a model before training/testing. Use `model.compile(optimizer, loss)`.\n",
            "\n",
            "During handling of the above exception, another exception occurred:\n",
            "\n",
            "Traceback (most recent call last):\n",
            "  File \"/usr/local/lib/python3.6/dist-packages/IPython/core/interactiveshell.py\", line 1823, in showtraceback\n",
            "    stb = value._render_traceback_()\n",
            "AttributeError: 'RuntimeError' object has no attribute '_render_traceback_'\n",
            "\n",
            "During handling of the above exception, another exception occurred:\n",
            "\n",
            "Traceback (most recent call last):\n",
            "  File \"/usr/local/lib/python3.6/dist-packages/IPython/core/ultratb.py\", line 1132, in get_records\n",
            "    return _fixed_getinnerframes(etb, number_of_lines_of_context, tb_offset)\n",
            "  File \"/usr/local/lib/python3.6/dist-packages/IPython/core/ultratb.py\", line 313, in wrapped\n",
            "    return f(*args, **kwargs)\n",
            "  File \"/usr/local/lib/python3.6/dist-packages/IPython/core/ultratb.py\", line 358, in _fixed_getinnerframes\n",
            "    records = fix_frame_records_filenames(inspect.getinnerframes(etb, context))\n",
            "  File \"/usr/lib/python3.6/inspect.py\", line 1490, in getinnerframes\n",
            "    frameinfo = (tb.tb_frame,) + getframeinfo(tb, context)\n",
            "  File \"/usr/lib/python3.6/inspect.py\", line 1448, in getframeinfo\n",
            "    filename = getsourcefile(frame) or getfile(frame)\n",
            "  File \"/usr/lib/python3.6/inspect.py\", line 696, in getsourcefile\n",
            "    if getattr(getmodule(object, filename), '__loader__', None) is not None:\n",
            "  File \"/usr/lib/python3.6/inspect.py\", line 725, in getmodule\n",
            "    file = getabsfile(object, _filename)\n",
            "  File \"/usr/lib/python3.6/inspect.py\", line 709, in getabsfile\n",
            "    return os.path.normcase(os.path.abspath(_filename))\n",
            "  File \"/usr/lib/python3.6/posixpath.py\", line 383, in abspath\n",
            "    cwd = os.getcwd()\n",
            "OSError: [Errno 107] Transport endpoint is not connected\n"
          ],
          "name": "stdout"
        },
        {
          "output_type": "error",
          "ename": "RuntimeError",
          "evalue": "ignored",
          "traceback": [
            "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m"
          ]
        }
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "wzCXBf5O6sPm",
        "colab_type": "text"
      },
      "source": [
        "# **Test Models**\n"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "sQqpXksEPu-X",
        "colab_type": "text"
      },
      "source": [
        "**Load in Test Data and test accuracy of both models**\n",
        "\n",
        "\n",
        "\n",
        "---\n",
        "\n",
        "\n"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "XRrKbSjYK10T",
        "colab_type": "code",
        "outputId": "f8b514fb-0924-4ca9-99f8-f24ddf647626",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 572
        }
      },
      "source": [
        "def loaddata(testnumber):\n",
        "\n",
        "  os.chdir('/content/drive/My Drive/Fall Detection')\n",
        "\n",
        "  #count successful poses\n",
        "  nullposes = 0\n",
        "  successfulposes = 0\n",
        "\n",
        "  #total videos\n",
        "  Videos = 10\n",
        "\n",
        "  #number of samples per activity (walking, falling, laying down)\n",
        "  A = 20\n",
        "  #of frames per sample (timesteps)\n",
        "  B = 30\n",
        "\n",
        "  X = np.zeros((Videos*A*2,B,75)) #creating an empty array for image json data\n",
        "  Y = np.zeros((Videos*A*2,1)) #create empty array for y data\n",
        "\n",
        "  #initialize counters\n",
        "  trainframecount = 0\n",
        "  testframecount = 0\n",
        "\n",
        "  i = str(testnumber)\n",
        "\n",
        "  #count the number of videos in category\n",
        "  path = '/content/drive/My Drive/Fall Detection/test/Annotation_files'\n",
        "  list = os.listdir(path)\n",
        "  numvideos = len(list)\n",
        "\n",
        "  #change into annotations file folder\n",
        "  #print(\"Entering folder \" + str(i) + \", which has \" + str(numvideos) + \" videos to process\")\n",
        "  os.chdir(path)\n",
        "  \n",
        "  #loop through each category\n",
        "  j = 0\n",
        "  while (j < numvideos):\n",
        "    \n",
        "    #count the number of frames in each video\n",
        "    path = '/content/drive/My Drive/Fall Detection/' + str(i) + '/testdata/video' + str(j)\n",
        "    list = os.listdir(path) \n",
        "    numframes = len(list)\n",
        "    \n",
        "    #read in frames and determine beginning and ending frames of the fall\n",
        "    filename = '/content/drive/My Drive/Fall Detection/' + str(i) + '/Annotation_files/video (' + str(j+1) + ').txt'\n",
        "    dummydata = pd.read_csv(filename, sep=\" \", header=None, nrows=2)\n",
        "    frame1 = dummydata[0][0] \n",
        "    frame2 = dummydata[0][1] \n",
        "    frame1 = int(frame1) - 5\n",
        "    frame2 = int(frame2) + 30\n",
        "    \n",
        "    #change directory to jsondata folder\n",
        "    path = '/content/drive/My Drive/Fall Detection/' + str(i) + \"/jsondata/video\" + str(j)\n",
        "    os.chdir(path)\n",
        "    #print(\"Video \" + str(j) + \": Processing \" + str(numframes) + \" frames.\")\n",
        "    \n",
        "    \n",
        "    #add all frames into array\n",
        "    k = 0\n",
        "    vidarray = []\n",
        "    \n",
        "    while (k < numframes):\n",
        "      with open('frame' + str(k) + '_keypoints.json') as f:  \n",
        "        try:\n",
        "          data = json.loads(f.read())\n",
        "          list = data[0]['people'][0]['pose_keypoints_2d']\n",
        "          successfulposes +=1\n",
        "        except:\n",
        "          list = np.zeros(75)\n",
        "          nullposes +=1\n",
        "      vidarray.append(list)\n",
        "      k += 1\n",
        "      \n",
        "    videoX = np.array(vidarray)\n",
        "    \n",
        "    #split video array into sets for fall, walking, and afterfall\n",
        "    FallSet = videoX[frame1:frame2,:]\n",
        "    WalkSet = videoX[0:frame1-1,:]\n",
        "    AfterFallSet = videoX[frame2+1:len(videoX),:]  \n",
        " \n",
        "    #capture falls\n",
        "    for _ in range(A):\n",
        "      idx = np.random.randint(0,len(FallSet)-B)\n",
        "      X[testframecount,:,:] = FallSet[idx:idx+B,:]\n",
        "      Y[testframecount,:] = 0\n",
        "      testframecount += 1\n",
        "      \n",
        "    #capture walking\n",
        "    for _ in range(A):\n",
        "      idx = np.random.randint(0,len(WalkSet)-B)\n",
        "      X[testframecount,:,:] = WalkSet[idx:idx+B,:]\n",
        "      Y[testframecount,:] = 1\n",
        "      testframecount += 1 \n",
        "    \n",
        "    j += 1   \n",
        "  \n",
        "  #reshape Y data to categorical\n",
        "  Y = to_categorical(Y)\n",
        "\n",
        "  #normalize input\n",
        "  X = X /X.max()\n",
        "  \n",
        "  return X, Y\n",
        "\n",
        "'''\n",
        "print(\"Finished creating input array.\")\n",
        "print(\"Total train inputs: \" + str(trainframecount))\n",
        "print(\"Total test inputs: \" +  str(testframecount))\n",
        "\n",
        "print(\"X train: \" + str(X.shape))\n",
        "print(\"Y train: \" + str(Y.shape))\n",
        "print(\"X test: \" + str(X.shape))\n",
        "print(\"Y test: \" + str(Y.shape))\n",
        "print(\"null poses: \" + str(nullposes))\n",
        "print(\"successful poses:\" + str(successfulposes))\n",
        "'''\n",
        "\n",
        "from numpy import mean\n",
        "from numpy import std\n",
        "\n",
        "# repeat experiment\n",
        "scores1 = []\n",
        "scores2 = []\n",
        "scores3 = []\n",
        "scores4 = []\n",
        "\n",
        "for r in range(20):\n",
        "  X,Y = loaddata('test')\n",
        "  accuracy1 = model1.evaluate(X, Y, verbose = 0)\n",
        "  accuracy1 = accuracy1[1] * 100.0000\n",
        "  print(accuracy1)\n",
        "  scores1.append(accuracy1)\n",
        "\n",
        "for r in range(20):\n",
        "  X,Y = loaddata('test')\n",
        "  X = X.reshape(Videos*A*2,5,6,75)\n",
        "  accuracy2 = model2.evaluate(X, Y, verbose = 0)\n",
        "  accuracy2 = accuracy2[1] * 100.0000\n",
        "  print(accuracy2)\n",
        "  scores2.append(accuracy2)\n",
        "  \n",
        "for r in range(20):\n",
        "  X,Y = loaddata('test2')\n",
        "  accuracy3 = model1.evaluate(X, Y, verbose = 0)\n",
        "  accuracy3 = accuracy3[1] * 100.0000\n",
        "  print(accuracy3)\n",
        "  scores3.append(accuracy3)\n",
        "\n",
        "for r in range(20):\n",
        "  X,Y = loaddata('test2')\n",
        "  X = X.reshape(Videos*A*2,5,6,75)\n",
        "  accuracy4 = model2.evaluate(X, Y, verbose = 0)\n",
        "  accuracy4 = accuracy4[1] * 100.0000\n",
        "  print(accuracy4)\n",
        "  scores4.append(accuracy4)\n",
        "\n",
        "m1, s1 = mean(scores1), std(scores1)\n",
        "m2, s2 = mean(scores2), std(scores2)\n",
        "m3, s3 = mean(scores3), std(scores3)\n",
        "m4, s4 = mean(scores4), std(scores4)\n",
        "print('Accuracy 1, familiar data: %.3f%% (+/-%.3f)' % (m1, s1))\n",
        "print('Accuracy 2, familiar data: %.3f%% (+/-%.3f)' % (m2, s2))\n",
        "print('Accuracy 1, nonfamilar data: %.3f%% (+/-%.3f)' % (m3, s3))\n",
        "print('Accuracy 2, nonfamilar data: %.3f%% (+/-%.3f)' % (m4, s4))"
      ],
      "execution_count": 0,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "97.25\n",
            "95.5\n",
            "95.25\n",
            "96.5\n",
            "96.25\n",
            "95.75\n",
            "95.5\n",
            "96.0\n",
            "96.75\n",
            "96.5\n",
            "96.25\n",
            "96.75\n",
            "95.5\n",
            "95.75\n",
            "96.5\n",
            "95.5\n",
            "97.0\n",
            "97.25\n",
            "96.5\n",
            "95.25\n"
          ],
          "name": "stdout"
        },
        {
          "output_type": "error",
          "ename": "NameError",
          "evalue": "ignored",
          "traceback": [
            "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
            "\u001b[0;31mNameError\u001b[0m                                 Traceback (most recent call last)",
            "\u001b[0;32m<ipython-input-7-257c34df47bb>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m()\u001b[0m\n\u001b[1;32m    134\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mr\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mrange\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;36m20\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    135\u001b[0m   \u001b[0mX\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mY\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mloaddata\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'test'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 136\u001b[0;31m   \u001b[0mX\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mX\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mreshape\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mTest\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0mA\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0;36m2\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;36m5\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;36m6\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;36m75\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m    137\u001b[0m   \u001b[0maccuracy2\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mmodel2\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mevaluate\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mX\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mY\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mverbose\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;36m0\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    138\u001b[0m   \u001b[0maccuracy2\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0maccuracy2\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m*\u001b[0m \u001b[0;36m100.0000\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
            "\u001b[0;31mNameError\u001b[0m: name 'Test' is not defined"
          ]
        }
      ]
    }
  ]
}