{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "# % matplotlib inline\n",
    "# import matplotlib.pyplot as plt\n",
    "import tensorflow as tf\n",
    "import numpy as np\n",
    "import os\n",
    "import random\n",
    "import glob\n",
    "from skimage.feature import hog\n",
    "from PIL import Image\n",
    "# import cv2\n",
    "from sklearn.model_selection import train_test_split"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "# test_file_dir = './imageData/楷书/丐/张浚张浚0.jpg'\n",
    "# img = Image.open(test_file_dir).resize((100,100)).convert('L')\n",
    "# a = list(img.getdata())\n",
    "# b = np.reshape(a,(100,100))\n",
    "\n",
    "# features,img = hog(b, orientations=4, pixels_per_cell=(6,6),cells_per_block=(1,1),visualise=True)\n",
    "# plt.imshow(img,cmap='gray')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "# return nohiden files dir\n",
    "def listdir_nohidden(path):\n",
    "    return glob.glob(os.path.join(path, '*'))\n",
    "\n",
    "# get hog features\n",
    "def image_reader(file_name,new_size):\n",
    "    # read images\n",
    "    # resize it \n",
    "    # set to grayscale\n",
    "    # (optional) get hog features\n",
    "    img = Image.open(file_name).resize(new_size).convert('L')\n",
    "    img = list(img.getdata())\n",
    "    img = np.reshape(img,(100,100))\n",
    "    return img\n",
    "\n",
    "# get file name and label list from disk\n",
    "def get_file_hog_label_list_from_disk():\n",
    "    char_styles = ['篆书','隶书','楷书','行书','草书']\n",
    "    # fileNamesList and fileLabelList\n",
    "    fileFeaturesList = []\n",
    "    fileLabelList = []\n",
    "    # iterate all styles\n",
    "    for style in char_styles:\n",
    "        print 'start iterate: %s'% style\n",
    "        # iterate all chars under this style\n",
    "        for chars in listdir_nohidden('./imageData/'+ style):\n",
    "            # there is at least one item \n",
    "            if len(listdir_nohidden(chars)) > 0:\n",
    "                # just get the first font images under this chars\n",
    "                char_item =  listdir_nohidden(chars)[0]                \n",
    "                img = image_reader(char_item,(100,100))\n",
    "                features = hog(img, orientations=4, pixels_per_cell=(6,6),cells_per_block=(1,1))\n",
    "                features = list(features)\n",
    "                print 'saving : ' + char_item\n",
    "                fileFeaturesList.append(features)\n",
    "#                 fileFeaturesList.append(char_item)\n",
    "                fileLabelList.append(char_styles.index(style))\n",
    "            else:\n",
    "                print 'there is no img under dir: ' + chars\n",
    "                continue\n",
    "                \n",
    "    return fileFeaturesList,fileLabelList\n",
    "           "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "# get file hog features and labels list\n",
    "fileFeaturesList,fileLabelList = get_file_hog_label_list_from_disk()\n",
    "\n",
    "\n",
    "# split data into train and test data\n",
    "X_train, X_test, y_train, y_test = train_test_split(fileFeaturesList, fileLabelList,\n",
    "                                                    test_size=0.25, random_state=42)\n",
    "\n",
    "# convert python list to tensor list\n",
    "X_train = tf.convert_to_tensor(value=X_train,dtype=tf.float32)\n",
    "X_test = tf.convert_to_tensor(value=X_test,dtype=tf.float32)\n",
    "\n",
    "y_train = tf.convert_to_tensor(value=y_train,dtype=tf.int64)\n",
    "y_test = tf.convert_to_tensor(value=y_test,dtype=tf.int64)\n",
    "\n",
    "\n",
    "# one-hot encode\n",
    "y_train_onehot = tf.one_hot(indices = y_train,\n",
    "                            depth = 5,\n",
    "                            on_value = 1,\n",
    "                            off_value = 0,\n",
    "                            axis = -1)\n",
    "y_test_onehot = tf.one_hot(indices = y_test,\n",
    "                                      depth = 5,\n",
    "                                      on_value = 1,\n",
    "                                      off_value = 0,\n",
    "                                      axis = -1)\n",
    "\n",
    "# put tensorlist to queues\n",
    "train_input_queue = tf.train.slice_input_producer(tensor_list=[X_train,y_train_onehot],\n",
    "                                                      shuffle=True)\n",
    "test_input_queue = tf.train.slice_input_producer(tensor_list=[X_test,y_test_onehot],\n",
    "                                                    shuffle=True)\n",
    "# new image size\n",
    "resize_heights = 128\n",
    "resize_widths = 128\n",
    "\n",
    "train_images,train_labels =train_input_queue[0],train_input_queue[1]\n",
    "\n",
    "test_images,test_labels = test_input_queue[0],test_input_queue[1]\n",
    "\n",
    "# set batch\n",
    "bat_size = 100\n",
    "threads_cout = 4\n",
    "train_batch = tf.train.batch(tensors=[train_images,train_labels],\n",
    "                             batch_size=bat_size,\n",
    "                             num_threads=threads_cout)\n",
    "test_batch = tf.train.batch(tensors=[test_images,test_labels],\n",
    "                                 batch_size=y_test.shape[0],\n",
    "                                 num_threads=threads_cout,)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "# input x\n",
    "x = tf.placeholder(dtype=tf.float32,shape=[None,1024],name=\"input_x\")\n",
    "\n",
    "# weights & biases\n",
    "w = tf.Variable(tf.truncated_normal(shape=[1024,5]),name=\"w\")\n",
    "b = tf.Variable(tf.truncated_normal(shape=[5]),name=\"b\")\n",
    "\n",
    "tf.summary.histogram(\"w\",w)\n",
    "tf.summary.histogram(\"b\",b)\n",
    "\n",
    "\n",
    "logits = tf.add(tf.matmul(x,w,name=\"muti\"),b,name=\"add\")\n",
    "\n",
    "y_true = tf.placeholder(dtype=tf.float32,shape=[None,5],name=\"y_true\")\n",
    "\n",
    "with tf.name_scope(\"cost\"):\n",
    "    cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=logits,labels=y_true,name=\"cross_entropy\")\n",
    "    cost = tf.reduce_mean(cross_entropy)\n",
    "    tf.summary.scalar('cost',cost)\n",
    "\n",
    "with tf.name_scope(\"optimizer\"):\n",
    "    optimizer = tf.train.GradientDescentOptimizer(learning_rate=1e-3).minimize(cost)\n",
    "\n",
    "with tf.name_scope(\"accuracy\"):\n",
    "    y_pred_cls = tf.arg_max(logits,1)\n",
    "    y_true_cls = tf.arg_max(y_true,1)\n",
    "    \n",
    "    whether_equals = tf.equal(y_true_cls,y_pred_cls,name=\"w_equals\")\n",
    "    accuracy = tf.reduce_mean(tf.cast(whether_equals,dtype=tf.float32,name=\"cast\"),name=\"accuracy_mean\")\n",
    "    \n",
    "    \n",
    "    tf.summary.scalar(\"accuracy\",accuracy)\n",
    "    \n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "session = tf.Session()\n",
    "init_group = tf.group(tf.global_variables_initializer(),\n",
    "                      tf.local_variables_initializer())\n",
    "\n",
    "# initial variables\n",
    "session.run(init_group)\n",
    "# merge summary\n",
    "summary_merged = tf.summary.merge_all()\n",
    "\n",
    "# train and test writer\n",
    "train_writer = tf.summary.FileWriter(logdir='./log/train',graph=session.graph)\n",
    "test_writer = tf.summary.FileWriter(logdir='./log/test')\n",
    "\n",
    "# start queue\n",
    "coordinator = tf.train.Coordinator()\n",
    "threads = tf.train.start_queue_runners(sess=session,coord=coordinator)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "# iterations steps\n",
    "num_iterations = 200000\n",
    "x_test,y_test = session.run(test_batch)\n",
    "feed_dict_test = {\n",
    "    x:x_test,\n",
    "    y_true:y_test\n",
    "}\n",
    "\n",
    "for i in range(num_iterations):\n",
    "    x_batch,y_batch = session.run(train_batch)\n",
    "    \n",
    "    feed_dict_train = {\n",
    "        x:x_batch,\n",
    "        y_true:y_batch\n",
    "    }\n",
    "    \n",
    "    print \"start %s\" % i\n",
    "    if i % 5 ==0:\n",
    "        # print and summary train accuracy\n",
    "        s,acc = session.run([summary_merged,accuracy],feed_dict=feed_dict_train)\n",
    "        train_writer.add_summary(summary=s,global_step=i)\n",
    "        print \"Train accuracy:{:0.1%}\".format(acc)\n",
    "    if i % 10 == 0:\n",
    "        # print and summary test accuracy \n",
    "        s,acc = session.run([summary_merged,accuracy],feed_dict=feed_dict_test)\n",
    "        test_writer.add_summary(summary=s,global_step=i)\n",
    "        print \"Test accuracy:{:0.1%}\".format(acc)\n",
    "        \n",
    "        \n",
    "    session.run(optimizer,feed_dict=feed_dict_train)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 2",
   "language": "python",
   "name": "python2"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.13"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
