{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "import tensorflow as tf\n",
    "import numpy as np\n",
    "import os\n",
    "import random\n",
    "import PIL.Image as Image\n",
    "import cv2\n",
    "import copy\n",
    "import time"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "NUM_CLASS=3\n",
    "CROP_SIZE=160\n",
    "NUM_FRAMES_PER_CLIP=16\n",
    "BATCH_SIZE=12\n",
    "RGB_CHANNEL=3\n",
    "IS_TRAIN=True\n",
    "BLOCK_EXPANSION=4\n",
    "IS_DA=True  #True if you using data augmentation"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "def get_conv_weight(name,kshape,wd=0.0005):\n",
    "    with tf.device('/cpu:0'):\n",
    "        var=tf.get_variable(name,shape=kshape,initializer=tf.contrib.layers.xavier_initializer())\n",
    "    if wd!=0:\n",
    "        weight_decay = tf.nn.l2_loss(var)*wd\n",
    "        tf.add_to_collection('weightdecay_losses', weight_decay)\n",
    "    return var"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "def convS(name,l_input,in_channels,out_channels):\n",
    "    return tf.nn.bias_add(tf.nn.conv3d(l_input,get_conv_weight(name=name,\n",
    "                                                               kshape=[1,3,3,in_channels,out_channels]),\n",
    "                                                               strides=[1,1,1,1,1],padding='SAME'),\n",
    "                                              get_conv_weight(name+'_bias',[out_channels],0))\n",
    "def convT(name,l_input,in_channels,out_channels):\n",
    "    return tf.nn.bias_add(tf.nn.conv3d(l_input,get_conv_weight(name=name,\n",
    "                                                               kshape=[3,1,1,in_channels,out_channels]),\n",
    "                                                               strides=[1,1,1,1,1],padding='SAME'),\n",
    "                                              get_conv_weight(name+'_bias',[out_channels],0))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "class Bottleneck():\n",
    "    def __init__(self,l_input,inplanes,planes,stride=1,downsample='',n_s=0,depth_3d=47):\n",
    "        \n",
    "        self.X_input=l_input\n",
    "        self.downsample=downsample\n",
    "        self.planes=planes\n",
    "        self.inplanes=inplanes\n",
    "        self.depth_3d=depth_3d\n",
    "        self.ST_struc=('A','B','C')\n",
    "        self.len_ST=len(self.ST_struc)\n",
    "        self.id=n_s\n",
    "        self.n_s=n_s\n",
    "        self.ST=list(self.ST_struc)[self.id % self.len_ST]\n",
    "        self.stride_p=[1,1,1,1,1]\n",
    "       \n",
    "        if self.downsample!='':\n",
    "            self.stride_p=[1,1,2,2,1]\n",
    "        if n_s<self.depth_3d:\n",
    "            if n_s==0:\n",
    "                self.stride_p=[1,1,1,1,1]\n",
    "        else:\n",
    "            if n_s==self.depth_3d:\n",
    "                self.stride_p=[1,2,2,2,1]\n",
    "            else:\n",
    "                self.stride_p=[1,1,1,1,1]\n",
    "    #P3D has three types of bottleneck sub-structions.\n",
    "    def ST_A(self,name,x):\n",
    "        x=convS(name+'_S',x,self.planes,self.planes)\n",
    "        x=tf.layers.batch_normalization(x,training=IS_TRAIN)\n",
    "        x=tf.nn.relu(x)\n",
    "        x=convT(name+'_T',x,self.planes,self.planes)\n",
    "        x=tf.layers.batch_normalization(x,training=IS_TRAIN)\n",
    "        x=tf.nn.relu(x)\n",
    "        return x\n",
    "    \n",
    "    def ST_B(self,name,x):\n",
    "        tmp_x=convS(name+'_S',x,self.planes,self.planes)\n",
    "        tmp_x=tf.layers.batch_normalization(tmp_x,training=IS_TRAIN)\n",
    "        tmp_x=tf.nn.relu(tmp_x)\n",
    "        x=convT(name+'_T',x,self.planes,self.planes)\n",
    "        x=tf.layers.batch_normalization(x,training=IS_TRAIN)\n",
    "        x=tf.nn.relu(x)\n",
    "        return x+tmp_x\n",
    "    \n",
    "    def ST_C(self,name,x):\n",
    "        x=convS(name+'_S',x,self.planes,self.planes)\n",
    "        x=tf.layers.batch_normalization(x,training=IS_TRAIN)\n",
    "        x=tf.nn.relu(x)\n",
    "        tmp_x=convT(name+'_T',x,self.planes,self.planes)\n",
    "        tmp_x=tf.layers.batch_normalization(tmp_x,training=IS_TRAIN)\n",
    "        tmp_x=tf.nn.relu(tmp_x)\n",
    "        return x+tmp_x\n",
    "    \n",
    "    def infer(self):\n",
    "        residual=self.X_input\n",
    "        if self.n_s<self.depth_3d:\n",
    "            out=tf.nn.conv3d(self.X_input,get_conv_weight('conv3_{}_1'.format(self.id),[1,1,1,self.inplanes,self.planes]),\n",
    "                             strides=self.stride_p,padding='SAME')\n",
    "            out=tf.layers.batch_normalization(out,training=IS_TRAIN)\n",
    "            \n",
    "        else:\n",
    "            param=self.stride_p\n",
    "            param.pop(1)\n",
    "            out=tf.nn.conv2d(self.X_input,get_conv_weight('conv2_{}_1'.format(self.id),[1,1,self.inplanes,self.planes]),\n",
    "                             strides=param,padding='SAME')\n",
    "            out=tf.layers.batch_normalization(out,training=IS_TRAIN)\n",
    "    \n",
    "        out=tf.nn.relu(out)    \n",
    "        if self.id<self.depth_3d:\n",
    "            if self.ST=='A':\n",
    "                out=self.ST_A('STA_{}_2'.format(self.id),out)\n",
    "            elif self.ST=='B':\n",
    "                out=self.ST_B('STB_{}_2'.format(self.id),out)\n",
    "            elif self.ST=='C':\n",
    "                out=self.ST_C('STC_{}_2'.format(self.id),out)\n",
    "        else:\n",
    "            out=tf.nn.conv2d(out,get_conv_weight('conv2_{}_2'.format(self.id),[3,3,self.planes,self.planes]),\n",
    "                                  strides=[1,1,1,1],padding='SAME')\n",
    "            out=tf.layers.batch_normalization(out,training=IS_TRAIN)\n",
    "            out=tf.nn.relu(out)\n",
    "\n",
    "        if self.n_s<self.depth_3d:\n",
    "            out=tf.nn.conv3d(out,get_conv_weight('conv3_{}_3'.format(self.id),[1,1,1,self.planes,self.planes*BLOCK_EXPANSION]),\n",
    "                             strides=[1,1,1,1,1],padding='SAME')\n",
    "            out=tf.layers.batch_normalization(out,training=IS_TRAIN)\n",
    "        else:\n",
    "            out=tf.nn.conv2d(out,get_conv_weight('conv2_{}_3'.format(self.id),[1,1,self.planes,self.planes*BLOCK_EXPANSION]),\n",
    "                             strides=[1,1,1,1],padding='SAME')\n",
    "            out=tf.layers.batch_normalization(out,training=IS_TRAIN)\n",
    "           \n",
    "        if len(self.downsample)==1:\n",
    "            residual=tf.nn.conv2d(residual,get_conv_weight('dw2d_{}'.format(self.id),[1,1,self.inplanes,self.planes*BLOCK_EXPANSION]),\n",
    "                                  strides=[1,2,2,1],padding='SAME')\n",
    "            residual=tf.layers.batch_normalization(residual,training=IS_TRAIN)\n",
    "        elif len(self.downsample)==2:\n",
    "            residual=tf.nn.conv3d(residual,get_conv_weight('dw3d_{}'.format(self.id),[1,1,1,self.inplanes,self.planes*BLOCK_EXPANSION]),\n",
    "                                  strides=self.downsample[1],padding='SAME')\n",
    "            residual=tf.layers.batch_normalization(residual,training=IS_TRAIN)\n",
    "        out+=residual\n",
    "        out=tf.nn.relu(out)\n",
    "        \n",
    "        return out"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "class make_block():\n",
    "    def __init__(self,_X,planes,num,inplanes,cnt,depth_3d=47,stride=1):\n",
    "        self.input=_X\n",
    "        self.planes=planes\n",
    "        self.inplanes=inplanes\n",
    "        self.num=num\n",
    "        self.cnt=cnt\n",
    "        self.depth_3d=depth_3d\n",
    "        self.stride=stride\n",
    "        if self.cnt<depth_3d:\n",
    "            if self.cnt==0:\n",
    "                stride_p=[1,1,1,1,1]\n",
    "            else:\n",
    "                stride_p=[1,1,2,2,1]\n",
    "            if stride!=1 or inplanes!=planes*BLOCK_EXPANSION:\n",
    "                self.downsample=['3d',stride_p]\n",
    "        else:\n",
    "            if stride!=1 or inplanes!=planes*BLOCK_EXPANSION:\n",
    "                self.downsample=['2d']\n",
    "    def infer(self):\n",
    "        x=Bottleneck(self.input,self.inplanes,self.planes,self.stride,self.downsample,n_s=self.cnt,depth_3d=self.depth_3d).infer()\n",
    "        self.cnt+=1\n",
    "        self.inplanes=BLOCK_EXPANSION*self.planes\n",
    "        for i in range(1,self.num):\n",
    "            x=Bottleneck(x,self.inplanes,self.planes,n_s=self.cnt,depth_3d=self.depth_3d).infer()\n",
    "            self.cnt+=1\n",
    "        return x"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "def inference_p3d(_X,_dropout,BATCH_SIZE):\n",
    "    cnt=0\n",
    "    conv1_custom=tf.nn.conv3d(_X,get_conv_weight('firstconv1',[1,7,7,RGB_CHANNEL,64]),strides=[1,1,2,2,1],padding='SAME')\n",
    "    conv1_custom_bn=tf.layers.batch_normalization(conv1_custom,training=IS_TRAIN)\n",
    "    conv1_custom_bn_relu=tf.nn.relu(conv1_custom_bn)\n",
    "    x=tf.nn.max_pool3d(conv1_custom_bn_relu,[1,2,3,3,1],strides=[1,2,2,2,1],padding='SAME')\n",
    "    b1=make_block(x,64,3,64,cnt)\n",
    "    x=b1.infer()\n",
    "    cnt=b1.cnt\n",
    "   \n",
    "    x=tf.nn.max_pool3d(x,[1,2,1,1,1],strides=[1,2,1,1,1],padding='SAME')\n",
    "    \n",
    "    b2=make_block(x,128,8,256,cnt,stride=2)\n",
    "    x=b2.infer()\n",
    "    cnt=b2.cnt\n",
    "    x=tf.nn.max_pool3d(x,[1,2,1,1,1],strides=[1,2,1,1,1],padding='SAME')\n",
    "    \n",
    "    b3=make_block(x,256,36,512,cnt,stride=2)\n",
    "    x=b3.infer()\n",
    "    cnt=b3.cnt\n",
    "    x=tf.nn.max_pool3d(x,[1,2,1,1,1],strides=[1,2,1,1,1],padding='SAME')\n",
    "    \n",
    "    shape=x.shape.as_list()\n",
    "    x=tf.reshape(x,shape=[-1,shape[2],shape[3],shape[4]])\n",
    "    \n",
    "    x=make_block(x,512,3,1024,cnt,stride=2).infer()\n",
    "    \n",
    "    #Caution:make sure avgpool on the input which has the same shape as kernelsize has been setted padding='VALID'\n",
    "    x=tf.nn.avg_pool(x,[1,5,5,1],strides=[1,1,1,1],padding='VALID')\n",
    "    \n",
    "    x=tf.reshape(x,shape=[-1,2048])\n",
    "    if(IS_TRAIN):\n",
    "        x=tf.nn.dropout(x,keep_prob=0.5)\n",
    "    else:\n",
    "        x=tf.nn.dropout(x,keep_prob=1)\n",
    "    \n",
    "    x=tf.layers.dense(x,NUM_CLASS)\n",
    "    \n",
    "    return x"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "def compute_loss(name_scope,logit,labels):\n",
    "    cross_entropy_mean=tf.reduce_mean(\n",
    "                    tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels,logits=logit))\n",
    "    tf.summary.scalar(name_scope+'_cross_entropy',\n",
    "                     cross_entropy_mean\n",
    "                     )\n",
    "    weight_decay_loss=tf.get_collection('weightdecay_losses')\n",
    "    tf.summary.scalar(name_scope+'_weight_decay_loss',tf.reduce_mean(weight_decay_loss))\n",
    "    total_loss=cross_entropy_mean+weight_decay_loss\n",
    "    tf.summary.scalar(name_scope+'_total_loss',tf.reduce_mean(total_loss))\n",
    "    return total_loss"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "def compute_accuracy(logit,labels):\n",
    "    correct=tf.equal(tf.argmax(logit,1),labels)\n",
    "    acc=tf.reduce_mean(tf.cast(correct,tf.float32))\n",
    "    return acc"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "class DataAugmenter():\n",
    "    def __init__(self,isFlip=True,isShift=True,isScale=True,isBrightness=True,isHistogram_eq=True):\n",
    "        #flipcode depends on your own dataset.\n",
    "        self.tp=0\n",
    "        self.angle_val=np.random.uniform(6,10)\n",
    "        self.angle=np.random.choice([-self.angle_val,self.angle_val])\n",
    "        self.scale=np.random.uniform(1.0,1.1)\n",
    "        self.random_br=np.random.uniform(0.5,2.0)\n",
    "        self.x_shift=None\n",
    "        self.y_shift=None\n",
    "    def Flip(self):\n",
    "        if self.tp==0:\n",
    "            self.input=cv2.flip(self.input,1)\n",
    "        else:\n",
    "            self.input=cv2.flip(self.input,0)\n",
    "    def Shift(self):\n",
    "        M=np.float32([[1,0,self.x_shift],[0,1,self.y_shift]])  \n",
    "        self.input=cv2.warpAffine(self.input,M,(self.cols,self.rows))\n",
    "        \n",
    "    def Scale(self):\n",
    "        M=cv2.getRotationMatrix2D((self.cols/2,self.rows/2),0,self.scale)\n",
    "        self.input=cv2.warpAffine(self.input, M, (self.cols, self.rows))\n",
    "        \n",
    "    def Brightness(self):\n",
    "        hsv=cv2.cvtColor(self.input,cv2.COLOR_RGB2HSV)\n",
    "        mask=hsv[:,:,2] * self.random_br >255\n",
    "        v_channel=np.where(mask,255,hsv[:,:,2] * self.random_br)\n",
    "        hsv[:,:,2]=v_channel\n",
    "        self.input=cv2.cvtColor(hsv,cv2.COLOR_HSV2RGB)\n",
    "        \n",
    "    def Histogram(self):\n",
    "        tpimg=cv2.cvtColor(self.input,cv2.COLOR_RGB2HSV)\n",
    "        tpimg[:,:,2] = cv2.equalizeHist(tpimg[:,:,2])\n",
    "        self.input=cv2.cvtColor(tpimg,cv2.COLOR_HSV2RGB)\n",
    "    \n",
    "    def Rotate(self): \n",
    "        M=cv2.getRotationMatrix2D((self.cols/2,self.rows/2),self.angle,1)\n",
    "        self.input=cv2.warpAffine(self.input, M, (self.cols, self.rows)) \n",
    "        \n",
    "    def show(self):\n",
    "        self.input=Image.fromarray(self.input)\n",
    "        self.input.show()\n",
    "        self.input=np.array(self.input)\n",
    "    \n",
    "    def Apply(self,_Clip):\n",
    "        res=[]\n",
    "        #set the possibility of all measures of DA:\n",
    "        # 50 % possibility for Flip , Rotate , Scale , Brightness changing , Histogram-equal ops.\n",
    "        pro_flip=random.choice([0,1])\n",
    "        pro_rotate=random.choice([0,1])\n",
    "        #pro_shift=random.choice([0,1])\n",
    "        pro_scale=random.choice([0,1])\n",
    "        pro_bri=random.choice([0,1])\n",
    "        pro_his=random.choice([0,1])\n",
    "        # DO NOT USE SHIFT\n",
    "        pro_shift=0 \n",
    "        #reset new property of DA\n",
    "        self.__init__()\n",
    "        for pic in _Clip:\n",
    "            self.input=pic\n",
    "            self.rows,self.cols,_=pic.shape\n",
    "            if pro_his:\n",
    "                self.Histogram()   \n",
    "                \n",
    "            if pro_bri:\n",
    "                self.Brightness()\n",
    "                \n",
    "            if pro_flip:  \n",
    "                self.Flip()\n",
    "            if pro_rotate:\n",
    "                self.Rotate()\n",
    "            if pro_shift:\n",
    "                if self.x_shift==None:     \n",
    "                    x=np.random.randint(self.cols/20,self.cols/16)\n",
    "                    y=np.random.randint(self.rows/20,self.rows/16)\n",
    "                    x_shift=random.choice([-x,x])\n",
    "                    y_shift=random.choice([-y,y])\n",
    "                    self.x_shift=x_shift\n",
    "                    self.y_shift=y_shift\n",
    "                self.Shift()\n",
    "            if pro_scale:\n",
    "                self.Scale()\n",
    "            res.append(self.input)\n",
    "        return res "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "#Utils function for extracting frames of each video to compose a clip.\n",
    "DA=DataAugmenter()\n",
    "\n",
    "def get_frames_data(filename,num_frames_per_clip=16,is_da=False):\n",
    "    \n",
    "    ret_arr=[]\n",
    "    s_index=0\n",
    "    for parent, dirnames,filenames in os.walk(filename):\n",
    "        if(len(filenames)<num_frames_per_clip):\n",
    "            print(\"Get invaild data!\")\n",
    "            return [],s_index\n",
    "        filenames=sorted(filenames)\n",
    "        s_index=random.randint(0,len(filenames)-num_frames_per_clip)\n",
    "        for i in range(s_index,s_index+num_frames_per_clip):\n",
    "            image_name=str(filename)+'/'+str(filenames[i])\n",
    "            img=Image.open(image_name)\n",
    "            img_data=np.array(img)\n",
    "            ret_arr.append(img_data)\n",
    "    if is_da:\n",
    "        return DA.Apply(ret_arr),s_index\n",
    "    else:\n",
    "        return ret_arr,s_index"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "class DataGenerator:\n",
    "    def __init__(self,filename,batch_size,num_frames_per_clip,shuffle=True,crop_size=160,is_da=IS_DA):\n",
    "        \n",
    "        self.index=0\n",
    "        self.lines=open(filename,'r')\n",
    "        self.lines=list(self.lines)\n",
    "        self.len=len(self.lines)\n",
    "        self.batch_size=batch_size\n",
    "        self.num_frames_per_clip=num_frames_per_clip\n",
    "        self.indexlist=[]\n",
    "        self.crop_size=crop_size\n",
    "        self.is_da=is_da\n",
    "        if shuffle:\n",
    "            self.video_indices=range(len(self.lines))\n",
    "            random.seed(time.time())\n",
    "            random.shuffle(self.video_indices)\n",
    "        else:\n",
    "            self.video_indices=range(0,len(self.lines))\n",
    "    def next_batch(self):\n",
    "        data=[]\n",
    "        labels=[]\n",
    "        crop_size=self.crop_size\n",
    "        self.indexlist=[]\n",
    "        \n",
    "        if self.index + self.batch_size > self.len:\n",
    "            self.index=0\n",
    "        for index in self.video_indices[self.index:self.index+self.batch_size]:\n",
    "            self.indexlist.append(index)\n",
    "            line=self.lines[index].strip('\\n').split()\n",
    "            dirname=line[0]\n",
    "            label=line[1]\n",
    "    \n",
    "            tmp_data,_=get_frames_data(dirname,self.num_frames_per_clip,self.is_da)\n",
    "            img_datas=[]\n",
    "            if(len(tmp_data)!=0):\n",
    "#                 first=True    \n",
    "                for j in xrange(len(tmp_data)):\n",
    "                    \n",
    "                    img=Image.fromarray(tmp_data[j].astype(np.uint8))  \n",
    "                        \n",
    "                    if(img.width>img.height):\n",
    "                        scale=float(crop_size)/float(img.height)\n",
    "                        img=np.array(cv2.resize(np.array(img),(int(img.width*scale+1),crop_size))).astype(np.float32)      \n",
    "                    else:                    \n",
    "                        scale=float(crop_size)/float(img.width)\n",
    "                        img=np.array(cv2.resize(np.array(img),(crop_size,int(img.height*scale+1)))).astype(np.float32)\n",
    "                    crop_x=int((img.shape[0]-crop_size)/2)\n",
    "                    crop_y=int((img.shape[1]-crop_size)/2)\n",
    "                    img=img[crop_x:crop_x+crop_size,crop_y:crop_y+crop_size,:] #-np_mean[j] \n",
    "                    img_datas.append(img)\n",
    "#                     if first:\n",
    "#                         t_img=img.astype(np.uint8)\n",
    "#                         Image.fromarray(t_img).show()\n",
    "#                         first=False\n",
    "                data.append(img_datas)\n",
    "                labels.append(int(label))\n",
    "               \n",
    "        self.index+=self.batch_size          \n",
    "        return np.array(data).astype(np.float32),np.array(labels).astype(np.int64),self.indexlist"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "#Create dataloader to fetch each batch data.\n",
    "dataloader=DataGenerator(filename='train.list',\n",
    "                                batch_size=BATCH_SIZE,\n",
    "                                num_frames_per_clip=NUM_FRAMES_PER_CLIP,\n",
    "                                shuffle=True,is_da=False)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "MOVING_AVERAGE_DECAY=0.9\n",
    "MODEL_PATH=''\n",
    "USE_PRETRAIN=False\n",
    "MAX_STEPS=36000"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "# #Test Result for Singe Batch :\n",
    "# IS_TRAIN=False\n",
    "\n",
    "# input_placeholder=tf.placeholder(tf.float32,shape=(BATCH_SIZE,NUM_FRAMES_PER_CLIP,CROP_SIZE,CROP_SIZE,RGB_CHANNEL))\n",
    "# label_placeholder=tf.placeholder(tf.int64,shape=(BATCH_SIZE))\n",
    "# logit=inference_p3d(input_placeholder,1,BATCH_SIZE)\n",
    "# acc=compute_accuracy(logit,label_placeholder)\n",
    "# loss=compute_loss('default_loss',logit,label_placeholder)\n",
    "# saver=tf.train.Saver(tf.global_variables())\n",
    "# init=tf.global_variables_initializer()\n",
    "# sess=tf.Session(config=tf.ConfigProto(allow_soft_placement=True))\n",
    "# if USE_PRETRAIN:\n",
    "#     saver.restore(sess,MODEL_PATH)\n",
    "#     print('checkpoint reloaded.')\n",
    "# else:\n",
    "#     print('train from sratch.')\n",
    "# train_images,train_labels,_=dataloader.next_batch()\n",
    "# curacc,curloss,curlogit=sess.run([acc,loss,logit],feed_dict={\n",
    "#                         input_placeholder:train_images,\n",
    "#                         label_placeholder:train_labels})\n",
    "# print(curacc,np.mean(curloss))\n",
    "# print(curlogit)\n",
    "# print(train_labels)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "# Make sure IS_TRAIN==True before traininig.\n",
    "IS_TRAIN"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "#Cell for Training.\n",
    "\n",
    "with tf.Graph().as_default():\n",
    "    global_step=tf.get_variable('global_step',[],initializer=tf.constant_initializer(0),trainable=False)\n",
    "    \n",
    "    input_placeholder=tf.placeholder(tf.float32,shape=(BATCH_SIZE,NUM_FRAMES_PER_CLIP,CROP_SIZE,CROP_SIZE,RGB_CHANNEL))\n",
    "    label_placeholder=tf.placeholder(tf.int64,shape=(BATCH_SIZE))\n",
    "    \n",
    "    #set dropout to 0.5\n",
    "    logit=inference_p3d(input_placeholder,0.5,BATCH_SIZE)\n",
    "    acc=compute_accuracy(logit,label_placeholder)\n",
    "    tf.summary.scalar('accuracy',acc)\n",
    "    loss=compute_loss('default_loss',logit,label_placeholder)\n",
    "    \n",
    "    \n",
    "    varlist1=[]\n",
    "    varlist2=[]\n",
    "    for param in tf.trainable_variables():\n",
    "        if param.name!='dense/bias:0' and param.name!='dense/kernel:0':\n",
    "            varlist1.append(param)\n",
    "        else:\n",
    "            varlist2.append(param)\n",
    "    \n",
    "    learning_rate_stable = tf.train.exponential_decay(0.0005,\n",
    "                                               global_step,decay_steps=2100,decay_rate=0.6,staircase=True)\n",
    "    learning_rate_finetune = tf.train.exponential_decay(0.0005,\n",
    "                                               global_step,decay_steps=2100,decay_rate=0.6,staircase=True)\n",
    "    \n",
    "    opt_stable=tf.train.AdamOptimizer(learning_rate_stable)\n",
    "    opt_finetuning=tf.train.AdamOptimizer(learning_rate_finetune)\n",
    "    \n",
    "    #when using BN,this dependecy must be built.\n",
    "    update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) \n",
    "    optim_op1=opt_stable.minimize(loss,var_list=varlist1)\n",
    "    optim_op2=opt_finetuning.minimize(loss,var_list=varlist2,global_step=global_step)\n",
    "    \n",
    "    with tf.control_dependencies(update_ops):\n",
    "        optim_op_group=tf.group(optim_op1,optim_op2)\n",
    "        \n",
    "    \n",
    "    \n",
    "    variable_averages=tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY,num_updates=global_step)\n",
    "    variable_averages_op=variable_averages.apply(tf.trainable_variables())\n",
    "    \n",
    "    train_op=tf.group(optim_op_group,variable_averages_op)\n",
    "    \n",
    "    #when using BN,only store trainable parameters is not enough,cause MEAN and VARIANCE for BN is not\n",
    "    #trainable but necessary for test stage.\n",
    "    saver=tf.train.Saver(tf.global_variables())\n",
    "    init=tf.global_variables_initializer()\n",
    "    sess=tf.Session(config=tf.ConfigProto(allow_soft_placement=True))\n",
    "    sess.run(init)\n",
    "    if USE_PRETRAIN:\n",
    "        saver.restore(sess,MODEL_PATH)\n",
    "        print('checkpoint reloaded.')\n",
    "    else:\n",
    "        print('train from sratch.')\n",
    "    merged=tf.summary.merge_all()\n",
    "    train_writer=tf.summary.FileWriter('./visual_logs/train',sess.graph)\n",
    "    test_writer=tf.summary.FileWriter('./visual_logs/test',sess.graph)\n",
    "    duration=0\n",
    "    print('Start training.')\n",
    "    for step in xrange(1,MAX_STEPS):\n",
    "        sess.graph.finalize()\n",
    "        start_time=time.time()\n",
    "        train_images,train_labels,_=dataloader.next_batch()\n",
    "        sess.run(train_op,feed_dict={\n",
    "                        input_placeholder:train_images,\n",
    "                        label_placeholder:train_labels})\n",
    "        duration+=time.time()-start_time\n",
    "        \n",
    "        \n",
    "        if step!=0 and step % 10==0:\n",
    "            curacc,curloss=sess.run([acc,loss],feed_dict={\n",
    "                        input_placeholder:train_images,\n",
    "                        label_placeholder:train_labels})\n",
    "            print('Step %d: %.2f sec -->loss : %.4f =====acc : %.2f' % (step, duration,np.mean(curloss),curacc))\n",
    "            duration=0\n",
    "        if step!=0 and step % 50==0:\n",
    "            mer=sess.run(merged,feed_dict={\n",
    "                        input_placeholder:train_images,\n",
    "                        label_placeholder:train_labels})\n",
    "            train_writer.add_summary(mer, step)\n",
    "        if step >7000 and step % 800==0 or (step+1)==MAX_STEPS:\n",
    "            saver.save(sess,'./TFCHKP_{}'.format(step),global_step=step)\n",
    "        \n",
    "    print('done')   \n",
    "            \n",
    "        \n",
    "    "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true,
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "#Cell for Testing\n",
    "MOVING_AVERAGE_DECAY=0.99\n",
    "tf.reset_default_graph()\n",
    "#when testing ,make sure IS_TRAIN==False,or you will get bad result for testing.\n",
    "IS_TRAIN=False\n",
    "final_acc=0\n",
    "IS_DA=False\n",
    "testloader=DataGenerator(filename='testrr.list',\n",
    "                                batch_size=BATCH_SIZE,\n",
    "                                num_frames_per_clip=NUM_FRAMES_PER_CLIP,\n",
    "                                shuffle=False,is_da=IS_DA)\n",
    "\n",
    "c=0\n",
    "with tf.Graph().as_default():\n",
    "    \n",
    "    global_step=tf.get_variable('global_step',[],initializer=tf.constant_initializer(0),trainable=False)\n",
    "    input_placeholder=tf.placeholder(tf.float32,shape=(BATCH_SIZE,NUM_FRAMES_PER_CLIP,CROP_SIZE,CROP_SIZE,RGB_CHANNEL))\n",
    "    label_placeholder=tf.placeholder(tf.int64,shape=(BATCH_SIZE))\n",
    "    \n",
    "    #when testing,make sure dropout=1.0(keep_prob)\n",
    "    logit=inference_p3d(input_placeholder,1,BATCH_SIZE)\n",
    "    variable_averages=tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY,num_updates=global_step)\n",
    "    variable_averages_op=variable_averages.apply(tf.trainable_variables())\n",
    "    acc=compute_accuracy(logit,label_placeholder)\n",
    "    init=tf.global_variables_initializer()\n",
    "    variable_avg_restore=variable_averages.variables_to_restore()\n",
    "    \n",
    "    avglist=[]\n",
    "\n",
    "    saver=tf.train.Saver(tf.global_variables())\n",
    "    # You can also restore the moving_average parameters ,like this:\n",
    "    # saver=tf.train.Saver(variable_avg_restore)\n",
    "    \n",
    "    sess=tf.Session(config=tf.ConfigProto(allow_soft_placement=True))\n",
    "    sess.run(init)\n",
    "    #restore your checkpoint file\n",
    "    saver.restore(sess,'da_tfckp_it_33600-33600')\n",
    "\n",
    "    for step in range(testloader.len/BATCH_SIZE):\n",
    "        image,label,_=testloader.next_batch()\n",
    "        accuracy=sess.run(acc,feed_dict={input_placeholder:image,\n",
    "                                    label_placeholder:label})\n",
    "        print('->',accuracy)\n",
    "        final_acc+=accuracy\n",
    "        c+=1\n",
    "    print(final_acc/c)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "IS_DA"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true,
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "#Cell for Testing :Using the same checkpoint file,you can continue testing without resotring checkpoint again.\n",
    "testloader=DataGenerator(filename='res.list',\n",
    "                                batch_size=BATCH_SIZE,\n",
    "                                num_frames_per_clip=NUM_FRAMES_PER_CLIP,\n",
    "                                shuffle=False,is_da=IS_DA)\n",
    "final_acc=0\n",
    "c=0\n",
    "with tf.Graph().as_default():\n",
    "    for step in range(testloader.len/BATCH_SIZE):\n",
    "        image,label,_=testloader.next_batch()\n",
    "        accuracy=sess.run(acc,feed_dict={input_placeholder:image,\n",
    "                                    label_placeholder:label})\n",
    "        print('->',accuracy)\n",
    "        final_acc+=accuracy\n",
    "        c+=1\n",
    "    print(final_acc/c)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 2",
   "language": "python",
   "name": "python2"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.12"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
