{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "# % matplotlib inline\n",
    "# import matplotlib.pyplot as plt\n",
    "import tensorflow as tf\n",
    "import numpy as np\n",
    "import os\n",
    "import random\n",
    "import glob\n",
    "import time"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "# # check our data\n",
    "# test_file_dir = './imageData//楷书/丐/张浚张浚0.jpg'\n",
    "# test_img = plt.imread(test_file_dir)\n",
    "# print \"test image shape is : \\n\"\n",
    "# print test_img.shape\n",
    "# plt.imshow(test_img)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "# return nohiden files dir\n",
    "def listdir_nohidden(path):\n",
    "    return glob.glob(os.path.join(path, '*'))\n",
    "\n",
    "# get file name and label list from disk\n",
    "def readFileList():\n",
    "    char_styles = ['篆书','隶书','楷书','行书','草书']\n",
    "    # fileNamesList and fileLabelList\n",
    "    fileNameList = []\n",
    "    fileLabelList = []\n",
    "    # iterate all styles\n",
    "    for style in char_styles:\n",
    "        print 'start iterate: %s'% style\n",
    "        # iterate all chars under this style\n",
    "        for chars in listdir_nohidden('./imageData/'+ style):\n",
    "            # there is at least one item \n",
    "            for font in listdir_nohidden(chars):\n",
    "                if len(listdir_nohidden(chars)) > 0:\n",
    "                # just get the first font images under this chars\n",
    "                char_item =  listdir_nohidden(chars)[0]                \n",
    "                print 'saving : ' + char_item\n",
    "                fileNameList.append(char_item)\n",
    "                fileLabelList.append(char_styles.index(style))\n",
    "            else:\n",
    "                print 'there is no img under dir: ' + chars\n",
    "                continue\n",
    "                \n",
    "    return fileNameList,fileLabelList\n",
    "               "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "# image operation\n",
    "def image_operation(input_queue,grayscale,heights,weights):\n",
    "    label = input_queue[1]\n",
    "    contents = tf.read_file(input_queue[0])\n",
    "    image = tf.image.decode_jpeg(contents)\n",
    "    # resize\n",
    "    image = tf.image.resize_images(image,[heights,weights])\n",
    "    # grayscale\n",
    "    image = tf.image.rgb_to_grayscale(image) if grayscale else image\n",
    "    image=tf.reshape(image,tf.stack([1,heights,weights]))\n",
    "    return image,label"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "# we will build ConvNet in the following step\n",
    "\n",
    "# define convolutional layer \n",
    "def conv_layer(name,input_tensor,filter_h,filter_w,input_channels,output_channels,use_relu=True):\n",
    "    with tf.name_scope(name):\n",
    "        # conv,input,filter,strides,padding,\n",
    "        weight = tf.Variable(tf.truncated_normal(shape=[filter_h,filter_w,input_channels,output_channels]),name=\"conv_w\")\n",
    "        conv = tf.nn.conv2d(\n",
    "            input=input_tensor,\n",
    "            filter=weight,\n",
    "            strides=[1,1,1,1],\n",
    "            padding=\"SAME\",\n",
    "            name=\"conv_op\",\n",
    "            data_format=\"NCHW\"\n",
    "        )\n",
    "        biase = tf.Variable(tf.truncated_normal(shape=[output_channels]),name=\"conv_b\")\n",
    "        \n",
    "        tf.summary.histogram('conv_w',weight)\n",
    "        tf.summary.histogram('conv_b',biase)\n",
    "        \n",
    "        conv = tf.nn.bias_add(conv,biase,name=\"conv_add\",data_format=\"NCHW\")\n",
    "        conv = tf.nn.relu(conv,name=\"conv_relu\") if use_relu else conv\n",
    "        \n",
    "        return conv\n",
    "        \n",
    "# define pool layer\n",
    "def pool_layer(name,input_value):\n",
    "    with tf.name_scope(name):\n",
    "        pool = tf.nn.max_pool(\n",
    "            value = input_value,\n",
    "            ksize = [1,1,2,2],\n",
    "            strides = [1,1,2,2],\n",
    "            padding=\"SAME\",\n",
    "            name = \"pool_op\",\n",
    "            data_format=\"NCHW\"            \n",
    "        )\n",
    "        return pool\n",
    "    \n",
    "# define fully connected lay\n",
    "def fl_layer(name,input_tensor,input_channels,output_channels):\n",
    "    with tf.name_scope(name):\n",
    "        weight = tf.Variable(tf.truncated_normal(shape=[input_channels,output_channels]),name=\"fl_w\")\n",
    "        biase = tf.Variable(tf.truncated_normal(shape=[output_channels]),name=\"fl_b\")\n",
    "        fl =  tf.add(tf.matmul(input_tensor,weight),biase)\n",
    "        \n",
    "        tf.summary.histogram('fl_w',weight)\n",
    "        tf.summary.histogram('fl_b',biase)        \n",
    "        return fl\n",
    "# generate train and test batch    \n",
    "def generate_batch(fileNameList,fileLabelList):\n",
    "    with tf.name_scope('convert_to_tensor'):\n",
    "        # convert python list to tensor list\n",
    "        fileNameList_tensor = tf.convert_to_tensor(value=fileNameList,\n",
    "                                               dtype=tf.string)\n",
    "        fileLabelList_tensor = tf.convert_to_tensor(value=fileLabelList,\n",
    "                                                dtype=tf.int64)\n",
    "    with tf.name_scope('label_one_hot_encoding'):\n",
    "        # one-hot encode\n",
    "        fileLabelList_tensor = tf.one_hot(indices = fileLabelList_tensor,\n",
    "                                      depth = 5,\n",
    "                                      on_value = 1,\n",
    "                                      off_value = 0,\n",
    "                                      axis = -1)\n",
    "    with tf.name_scope('dynamic_partition'):\n",
    "        # dynamic partition\n",
    "        trainFnameList,testFnameList = tf.dynamic_partition(data=fileNameList_tensor,\n",
    "                                                        partitions=partitions,\n",
    "                                                        num_partitions=2)\n",
    "        trainLabelList,testLabelList = tf.dynamic_partition(data=fileLabelList_tensor,\n",
    "                                                        partitions=partitions,\n",
    "                                                        num_partitions=2)    \n",
    "        \n",
    "    with tf.name_scope('set_queues'):\n",
    "        # put tensorlist to queues\n",
    "        train_input_queue = tf.train.slice_input_producer(tensor_list=[trainFnameList,trainLabelList],\n",
    "                                                      shuffle=True)\n",
    "        test_input_queue = tf.train.slice_input_producer(tensor_list=[testFnameList,testLabelList],\n",
    "                                                    shuffle=True)\n",
    "\n",
    "    with tf.name_scope('image_operation'):\n",
    "        # new image size\n",
    "        resize_heights = 128\n",
    "        resize_widths = 128\n",
    "        train_images,train_labels = image_operation(train_input_queue,\n",
    "                                                grayscale=True,\n",
    "                                                heights=resize_heights,\n",
    "                                                weights = resize_widths)\n",
    "        test_images,test_labels = image_operation(test_input_queue,\n",
    "                                              grayscale=True,\n",
    "                                              heights=resize_heights,\n",
    "                                              weights = resize_widths)\n",
    "    with tf.name_scope('set_batch'):\n",
    "        # set batch\n",
    "        bat_size = 100\n",
    "        threads_cout = 4\n",
    "        train_batch = tf.train.batch(tensors=[train_images,train_labels],\n",
    "                                 batch_size=bat_size,\n",
    "                                 num_threads=threads_cout,)\n",
    "        test_batch = tf.train.batch(tensors=[test_images,test_labels],\n",
    "                                 batch_size=500,\n",
    "                                 num_threads=threads_cout,)\n",
    "    return  train_batch,test_batch\n",
    "    "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "# main model\n",
    "\n",
    "def cnn_model(lr=1e-3,fileNameList=None,fileLabelList=None,partitions=None,iteration=3000):\n",
    "    \n",
    "    \n",
    "    # reset graph\n",
    "    tf.reset_default_graph()\n",
    "    \n",
    "    resize_heights = 128\n",
    "    resize_widths = 128\n",
    "    \n",
    "    train_batch,test_batch = generate_batch(fileNameList,fileLabelList)\n",
    "    ##********************************************************************************************\n",
    "\n",
    "\n",
    "    # input x , [-1,128,128,1]\n",
    "    x = tf.placeholder(dtype=tf.float32,\n",
    "                       shape=[None,1,resize_heights,resize_widths],\n",
    "                       name='input_x')\n",
    "    # must use transpose\n",
    "    tf.summary.image(\"input_images\",tensor=tf.transpose(a=x[:3],perm=[0,2,3,1]))\n",
    "    # one-hot encoded labels \n",
    "    y_true = tf.placeholder(dtype=tf.int16,shape=[None,5])\n",
    "    \n",
    "    \n",
    "    # conv 1 , [-1,128,128,32]\n",
    "    conv_1 = conv_layer(\"conv_1\",x,3,3,1,32,use_relu=True)\n",
    "    # pool 1 , [-1,64,64,32]    \n",
    "    pool_1 = pool_layer(\"pool_1\",conv_1)\n",
    "    # conv 2 , [-1,64,64,64]    \n",
    "    conv_2 = conv_layer(\"conv_2\",pool_1,5,5,32,64,use_relu=True)\n",
    "    # pool 2 , [-1,32,32,64]    \n",
    "    pool_2 = pool_layer(\"pool_2\",conv_2)\n",
    "    # conv_3 , [-1,32,32,128]\n",
    "    conv_3 = conv_layer(\"conv_3\",pool_2,7,7,64,128,use_relu=True)\n",
    "    # pool 3 , [-1,16,16,128]    \n",
    "    pool_3 = pool_layer(\"pool_3\",conv_3)\n",
    "    \n",
    "    # conv_4 , [-1,32,32,128]\n",
    "    conv_4 = conv_layer(\"conv_4\",pool_3,9,9,128,256,use_relu=True)\n",
    "    # pool 4 , [-1,16,16,128]    \n",
    "    pool_4 = pool_layer(\"pool_4\",conv_4)\n",
    "    \n",
    "#     # conv_5 , [-1,32,32,128]\n",
    "#     conv_5 = conv_layer(\"conv_5\",pool_4,11,11,256,512,use_relu=True)\n",
    "#     # pool 5 , [-1,16,16,128]    \n",
    "#     pool_5 = pool_layer(\"pool_5\",conv_5)\n",
    "\n",
    "      \n",
    "    # flat layer , [-1,16*16*128]\n",
    "    flat_layer = tf.reshape(pool_4,shape=[-1,8*8*256])\n",
    "    # fl_1 , [-1,128]\n",
    "    fl_1 = fl_layer(\"fl_1\",flat_layer,8*8*256,128)\n",
    "    # fl_2 , [-1,5]    \n",
    "    fl_2 = fl_layer(\"fl_2\",fl_1,128,5)\n",
    "        \n",
    "    \n",
    "    with tf.name_scope(\"cost\"):\n",
    "        cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=fl_2,labels=y_true,name=\"cross_entropy\")\n",
    "        cost = tf.reduce_mean(cross_entropy)\n",
    "        tf.summary.scalar('cost',cost)\n",
    "        \n",
    "    with tf.name_scope(\"optimizer\"):\n",
    "        optimizer = tf.train.AdamOptimizer(learning_rate=lr,name=\"AdamOptimizer\").minimize(cost)\n",
    "    \n",
    "    with tf.name_scope(\"accuracy\"):\n",
    "        y_pred_cls = tf.arg_max(fl_2,1)\n",
    "        y_true_cls = tf.arg_max(y_true,1)        \n",
    "        \n",
    "        whether_equals  = tf.equal(y_pred_cls,y_true_cls)\n",
    "        # must set float32\n",
    "        accuracy = tf.reduce_mean(tf.cast(whether_equals,dtype=tf.float32)) \n",
    "        tf.summary.scalar('cost',accuracy)        \n",
    "\n",
    "        \n",
    "\n",
    "    session = tf.Session()\n",
    "\n",
    "    # initial all variables\n",
    "    ini_group = tf.group(tf.global_variables_initializer(),\n",
    "                        tf.local_variables_initializer())\n",
    "    session.run(ini_group)\n",
    "    \n",
    "    summary_merge = tf.summary.merge_all()\n",
    "    \n",
    "    # write graph and summaries to train writer\n",
    "    train_writer = tf.summary.FileWriter('./op_log/train_lr_'+str(lr) ,graph=session.graph)\n",
    "    # writw summaries to test writer\n",
    "    test_writer = tf.summary.FileWriter('./op_log/test_lr_'+str(lr) )\n",
    "    \n",
    "\n",
    "    # start queues\n",
    "    coordinator = tf.train.Coordinator()\n",
    "    threads = tf.train.start_queue_runners(sess=session,\n",
    "                                           coord=coordinator)\n",
    "    test_x,test_y = session.run(test_batch)\n",
    "    feed_dict_test = {\n",
    "        x : test_x,\n",
    "        y_true:test_y\n",
    "    }\n",
    "\n",
    "    with tf.name_scope(\"train\"):\n",
    "        for i in range(iteration):\n",
    "            train_x,train_y = session.run(train_batch)\n",
    "            feed_dict_train = {\n",
    "                x : train_x,\n",
    "                y_true:train_y\n",
    "            }\n",
    "            print 'training step:'+str(i)\n",
    "            if i % 5 == 0:\n",
    "                acc,sm = session.run([accuracy,summary_merge],feed_dict=feed_dict_train)\n",
    "                train_writer.add_summary(sm,i)\n",
    "                temp_time = time.time()\n",
    "                tem_str =  ', time used : %0.2f s'  % (temp_time - start_time)\n",
    "                print 'train accuracy:{:0.1%}' . format(acc) + tem_str\n",
    "                \n",
    "            if i % 100 == 0:\n",
    "                acc,sm = session.run([accuracy,summary_merge],feed_dict=feed_dict_test)\n",
    "                test_writer.add_summary(sm,i)\n",
    "                temp_time = time.time()\n",
    "                tem_str =  ', time used : %0.2f s'  % (temp_time - start_time)\n",
    "                print \"Test acc:{:0.1%}\".format(acc)+ tem_str\n",
    "            \n",
    "            session.run(optimizer,feed_dict=feed_dict_train)\n",
    "        end_time = time.time()\n",
    "        print 'total time : %0.2f s'  % (end_time - start_time)\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true,
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "# get file names and labels list\n",
    "fileNameList,fileLabelList = readFileList()\n",
    "\n",
    "# partitions all data to train and test parts\n",
    "all_data_counts = len(fileLabelList)\n",
    "partitions = [0]*all_data_counts\n",
    "\n",
    "# split all data (15585) into 3/4(11689) train data , and 1/4 (3896) test data\n",
    "test_size = all_data_counts/4\n",
    "partitions[:test_size] = [1]*test_size\n",
    "# must shuffle\n",
    "random.shuffle(partitions)\n",
    "\n",
    "\n",
    "print 'all data is %s,train data is %s ,test data is %s' % (all_data_counts,(all_data_counts-test_size),test_size)\n",
    "\n",
    "start_time = time.time()\n",
    "\n",
    "for lr in [1e-3]:\n",
    "    cnn_model(lr,fileNameList,fileLabelList,partitions,iteration=50000)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 2",
   "language": "python",
   "name": "python2"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.13"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
