from keras.models import Sequential
from keras.layers import Conv2D, MaxPool2D, Activation, Dense, Flatten, BatchNormalization
from keras.callbacks import ModelCheckpoint
from keras.utils import multi_gpu_model
from sklearn.utils import shuffle
from sklearn.preprocessing import LabelEncoder, OneHotEncoder

from utils import MyImageDataGenerator
import keras.backend as K
import tensorflow as tf
import pandas as pd
import os
import cv2
import numpy as np
import pickle as pkl

def toGray(img):
    t = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
    print('aha')
    t.resize((t.shape[0],t.shape[1],1))
    return t

class Classifier(object):
    def __init__(self,lenOutput):
        self.batSZ = 32
        self.epochs = 1000
        self.lenOutput = lenOutput
        config = tf.ConfigProto()
        config.gpu_options.per_process_gpu_memory_fraction = 0.4
        K.set_session(tf.Session(config = config))


        self.m = Sequential()
        self.m.add(Conv2D(64, 3, strides = 3,activation = 'relu',padding = 'same',input_shape = (600,1050,1)))
        self.m.add(Conv2D(64, 3, activation = 'relu',padding = 'same'))
        self.m.add(BatchNormalization())
        self.m.add(MaxPool2D((2,2),strides=(2,2),padding = 'same'))

        self.m.add(Conv2D(128, 3, strides = 3,activation = 'relu',padding = 'same'))
        self.m.add(Conv2D(128, 3, strides = 3,activation = 'relu',padding = 'same'))
        self.m.add(BatchNormalization())
        self.m.add(MaxPool2D((2,2),strides=(2,2),padding = 'same'))

        self.m.add(Conv2D(256, 3, strides = 3,activation = 'relu',padding = 'same'))
        self.m.add(Conv2D(256, 3, strides = 3,activation = 'relu',padding = 'same'))
        self.m.add(BatchNormalization())
        self.m.add(MaxPool2D((2,2),strides=(2,2),padding = 'same'))

        self.m.add(Conv2D(512, 3, strides = 3,activation = 'relu', padding = 'same'))
        self.m.add(Conv2D(512, 3, strides = 3,activation = 'relu', padding = 'same'))
        self.m.add(BatchNormalization())
        self.m.add(MaxPool2D((2,2),strides=(2,2),padding = 'same'))

        self.m.add(Flatten())
        self.m.add(Dense(4096,activation = 'relu'))
        self.m.add(Dense(self.lenOutput,activation = 'softmax'))

        self.MultiGPUModel = multi_gpu_model(self.m,gpus = 2)
        self.MultiGPUModel.compile(optimizer='adam',loss='categorical_crossentropy',metrics=['categorical_accuracy'])
        #self.m.compile(optimizer='adam',loss='categorical_crossentropy',metrics=['categorical_accuracy'])
        

        self.le = LabelEncoder()
        self.ohe = OneHotEncoder(sparse = False)

    def fit(self,trainList):
        lindex = self.le.fit_transform(trainList[:,1])
        onehot = self.ohe.fit_transform( lindex.reshape((-1,1)) )
        ig = MyImageDataGenerator(rotation_range=45,width_shift_range=0.1,height_shift_range=0.1,fill_mode='constant',cval=0,horizontal_flip=True,vertical_flip=True)
        self.MultiGPUModel.fit_generator(ig.flow_from_list(trainList[:,0],onehot, reshape=(1050,600),batch_size=self.batSZ,flags = cv2.IMREAD_GRAYSCALE),
                             steps_per_epoch = len(trainList) / self.batSZ + 1,
                             epochs = self.epochs,
                             callbacks = [ModelCheckpoint('w_{epoch:02d}-{categorical_accuracy:.2f}.h5',monitor='categorical_accuracy',save_best_only=True,save_weights_only=True)],
                             verbose = 1,
                             workers = 4)

    def predict(self,testX):
        p = self.m.predict(testX,verbose = 1)
        lindex = np.flip(p.argsort(axis = 1),axis = 1)[:,:5]
        tmp = self.le.inverse_transform(lindex)
        return np.array([' '.join(row) for row in tmp])


    def load(self,filename):
        self.m.load_weights(filename)

    def save(self,filename):
        self.m.save_weights(filename)

def getTrainDataset():
    f = pd.read_csv('train.csv')
    
    cnt = f.groupby('Id').size().reset_index(name='count')
    mat = cnt.values
    for i in range(len(mat)):
        if mat[i,1] < 3:
            sam = f[f['Id'] == mat[i,0]].sample(n = 10,replace = True)
            f.append(sam)

    f['Image'] = './train/' + f['Image']
    shuffle(f)

    return f[['Image','Id']]


if __name__ == "__main__":
    trainDataSet = getTrainDataset()
    cl = Classifier(len(trainDataSet['Id'].unique()))
    try:
        cl.fit(trainDataSet.values)
    except KeyboardInterrupt:
        cl.save('currupt.h5')
        exit(0)
    cl.save('finish.h5')

    #ig = MyImageDataGenerator(rotation_range=45,width_shift_range=0.1,height_shift_range=0.1,fill_mode='constant',cval=0,horizontal_flip=True,vertical_flip=True)
    #for x,y in ig.flow_from_list(trainDataSet[['Image','Id']].values,reshape=(1050,600),flags = cv2.IMREAD_GRAYSCALE,batch_size=3,seed = None):
    #    print(x[0].shape)
    #    cv2.imshow('1',x[np.random.randint(0,3)].astype('uint8'));cv2.waitKey(0);
