#coding:utf-8
#Author:zhengqings
#file:
#file:myocr.py
from __future__ import absolute_import
from __future__ import print_function
import os
from PIL import Image
import numpy as np
import h5py
import cv2
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.models import model_from_json
from keras.layers.core import Dense, Dropout, Activation, Flatten

from keras.layers.convolutional import MaxPooling2D
from keras.layers.convolutional import Conv2D
from keras import backend as K

from keras.optimizers import SGD, Adadelta, Adagrad
from keras.utils import np_utils, generic_utils
import random
data_augmentation=True
#imagelen=42005;
vallen=4206
imagewidth=28
imageheight=28;
labellen=82
batch_size=128
#imagepaths='/alidata/cnn/testmodel/'
imagepaths='/work/ocr/resource/charSamples/ModelImg'
imagetestpath='/work/ocr/resource/charSamples/ModelImg/gray_SamS.28.jpg'


def load_label(paths):
    dict={};
    for root, sub_dirs, files in os.walk(paths):
        for special_file in files:
            if special_file.startswith(".DS_Store"):
                continue;
            type=special_file.split(".")[0][8:]
            label=special_file.split(".")[1];
            dict[label]=type;
    return dict;

#遍历当前目录下的所有子目录下的文件
def load_data(paths):
   length=sum([len(x) for _, _, x in os.walk(os.path.dirname(paths))])
   length=82;
   print('filelength=',length)
   data = np.empty((length,1,22,14),dtype="float32")
   label = np.empty((length,),dtype="uint8")
   i=0
   for root, sub_dirs, files in os.walk(paths):

    for special_file in files:
        if special_file.startswith(".DS_Store"):
            continue;
        if special_file.endswith('.jpg'):
              filepath= os.path.join(root,special_file)
              img = Image.open(filepath)
              arr = np.asarray(img,dtype="float32")
              data[i,:,:,:] = arr
             # a=root.rindex('/')+1;
              type=special_file.split(".")[1]
              label[i] = type;
              i+=1
   #data /= np.max(data)
   #data -= np.mean(data)
   return data,label

def xunlian():
    dict=load_label(imagepaths);
    data, Label = load_data(imagepaths)
    testdata,testLabel=load_data(imagepaths)
    img_rows, img_cols = 22, 14
    # tf或th为后端，采取不同参数顺序
    if K.image_data_format() == 'channels_first':
        data = data.reshape(data.shape[0], 1, img_rows, img_cols)
        # -x_train.shape:(60000, 1, 28, 28)
        testdata = testdata.reshape(testdata.shape[0], 1, img_rows, img_cols)
        # x_test.shape:(10000, 1, 28, 28)
        # 单通道灰度图像,channel=1
        input_shape = (1, img_rows, img_cols)
    else:
        data = data.reshape(data.shape[0], img_rows, img_cols, 1)
        testdata = testdata.reshape(testdata.shape[0], img_rows, img_cols, 1)
        input_shape = (img_rows, img_cols, 1)

    print('X_train shape:', data.shape)
    print(data.shape[0], 'train samples')
    print(data.shape[0], 'train samples')
    label=np_utils.to_categorical(Label,len(Label));
    testlabel=np_utils.to_categorical(testLabel,len(Label));
    valdata,vallabel=(data[0:vallen],label[0:vallen])

    #构建训练模型
    model = Sequential()

    model.add(Conv2D(16, (6, 4), padding='valid',input_shape=input_shape))
    model.add(Activation('tanh'))

    model.add(Conv2D(8, (3, 2), padding='valid'))
    model.add(Activation('tanh'))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Conv2D(16, (3, 2), padding='valid'))
    model.add(Activation('tanh'))
    model.add(MaxPooling2D(pool_size=(2, 2)))



    model.add(Flatten())
    model.add(Dense(128, init='normal'))
    model.add(Activation('tanh'))


    #Softmax分类，输出是10类别
    model.add(Dense(len(Label), init='normal'))
    model.add(Activation('softmax'))

    sgd=SGD(lr=0.05, decay=1e-6, momentum=0.9, nesterov=True)
    model.compile(loss='categorical_crossentropy',optimizer=sgd)

    if not data_augmentation:
       print('type=model.fit')
       model.fit(data,label,batch_size=100,nb_epoch=10,shuffle=True,verbose=1,validation_split=0.2)
    else:
        print('type=data_generator')
        datagen = ImageDataGenerator(
            featurewise_center=False,
            samplewise_center=False,
            featurewise_std_normalization=False,
            samplewise_std_normalization=False,
            zca_whitening=False,  # apply ZCA whitening
            rotation_range=0.2,  # randomly rotate images in the range (degrees, 0 to 180)
            width_shift_range=0.2,  # randomly shift images horizontally (fraction of total width)
            height_shift_range=0.2,  # randomly shift images vertically (fraction of total height)
            horizontal_flip=True,  # randomly flip images
            vertical_flip=False)  # r
        datagen.fit(data)
        model.fit_generator(datagen.flow(data, label,
                                         batch_size=100),
                            samples_per_epoch=data.shape[0],
                            nb_epoch=200,
                            validation_data=(data, label))
    #保存模型
    json_string=model.to_json();
    open('my_model_architecture.json','w').write(json_string)
    model.save_weights('my_model_weights.h5')

    #获取测试指标结果
    score = model.evaluate(testdata, testlabel, verbose=0)
    print('test score',score)


def predict(images):
    dict=load_label(imagepaths);
    model = Sequential();
    # 加载离线生成的CNN模型
    model = model_from_json(open('my_model_architecture.json').read())
    model.load_weights('my_model_weights.h5')
    piclens=len(images)
    v=''
    for i in range(piclens):
        b = images[i].reshape(1,22, 14, 1)
        c=images[i].reshape(1,1,22,14);
        a = model.predict_classes(b, batch_size=32, verbose=1)

        temp=dict[str(a[0])]
        v= v + temp;
    return v  ;



def test():
    image = cv2.imread(imagetestpath, cv2.IMREAD_COLOR)
    image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    images=[];
    images.append(image);
    predict(images);


#test();
xunlian();