# coding: utf-8
import argparse
import math
import os
import time
from scipy.ndimage import filters
import cv2
import numpy as np
from numpy import *
from scipy import misc
from sklearn.externals import joblib
from sklearn.multiclass import OneVsRestClassifier
from sklearn.svm import SVC,LinearSVC

os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'

flags = argparse.ArgumentParser()
flags.add_argument('--gpu', default=0, type=float, help='gpu number')
FLAGS = flags.parse_args()

# 使用GPU
gpun = FLAGS.gpu
os.environ['CUDA_VISIBLE_DEVICES'] = str(gpun)

from sklearn.model_selection import train_test_split
from keras.preprocessing.image import ImageDataGenerator, img_to_array, load_img
#source data dir
original_dataset_ONE_dir = '/data/station/photobase/ONE/'
original_dataset_TWO_dir = '/data/station/photobase/TWO/'
original_dataset_THREE_dir = '/data/station/photobase/THREE/'
original_dataset_FOUR_dir = '/data/station/photobase/FOUR/'
original_dataset_FIVE_dir = '/data/station/photobase/FIVE/'
original_dataset_OTHER_dir = '/data/station/photobase/OTHERBAK/'

print('original ONE images:', len(os.listdir(original_dataset_ONE_dir)))
print('original TWO images:', len(os.listdir(original_dataset_TWO_dir)))
print('original THREE images:', len(os.listdir(original_dataset_THREE_dir)))
print('original FOUR images:', len(os.listdir(original_dataset_FOUR_dir)))
print('original FIVE images:', len(os.listdir(original_dataset_FIVE_dir)))
print('original OTHER images:', len(os.listdir(original_dataset_OTHER_dir)))


DIR1 = original_dataset_ONE_dir
DIR2 = original_dataset_TWO_dir
DIR3 = original_dataset_THREE_dir
DIR4 = original_dataset_FOUR_dir
DIR5 = original_dataset_FIVE_dir
DIR6 = original_dataset_OTHER_dir

data_ONE = [(DIR1+i) for i in os.listdir(DIR1)]
data_TWO = [(DIR2+i) for i in os.listdir(DIR2)]
data_THREE = [(DIR3+i) for i in os.listdir(DIR3)]
data_FOUR = [(DIR4+i) for i in os.listdir(DIR4)]
data_FIVE = [(DIR5+i) for i in os.listdir(DIR5)]
data_OTHER = [(DIR6+i) for i in os.listdir(DIR6)]



#數據增強
'''
datagen = ImageDataGenerator(
    rotation_range=40,
    fill_mode="constant",
    cval=0,
   # width_shift_range=0.2,
   # height_shift_range=0.2,
    horizontal_flip=True,
    vertical_flip=True,
    #zoom_range = 0.3,
    channel_shift_range=50,
   # rescale=1/255
)
'''

#增强數據目錄
#base_dir = '/data/station/augument_data/'
base_dir = '/data/station/'
Augument_dataset_ONE_dir = os.path.join(base_dir, 'ONE/')
Augument_dataset_TWO_dir = os.path.join(base_dir, 'TWO/')
Augument_dataset_THREE_dir = os.path.join(base_dir, 'THREE/')
Augument_dataset_FOUR_dir = os.path.join(base_dir, 'FOUR/')
Augument_dataset_FIVE_dir = os.path.join(base_dir, 'FIVE/')
Augument_dataset_OTHER_dir = os.path.join(base_dir, 'OTHER/')



# 图片增强
ROWS = 297
COLS = 396
CHANNELS = 3

original_ONE_len = len(os.listdir(original_dataset_ONE_dir))
original_TWO_len = len(os.listdir(original_dataset_TWO_dir))
original_THREE_len = len(os.listdir(original_dataset_THREE_dir))
original_FOUR_len = len(os.listdir(original_dataset_FOUR_dir))
original_FIVE_len = len(os.listdir(original_dataset_FIVE_dir))

print(time.strftime('%Y/%m/%d %H:%M:%S', time.localtime(time.time())))
DIR11 = Augument_dataset_ONE_dir
DIR22 = Augument_dataset_TWO_dir
DIR33 = Augument_dataset_THREE_dir
DIR44 = Augument_dataset_FOUR_dir
DIR55 = Augument_dataset_FIVE_dir
DIR66 = Augument_dataset_OTHER_dir

augu_ONE = [(DIR11+i) for i in os.listdir(DIR11)]
augu_TWO = [(DIR22+i) for i in os.listdir(DIR22)]
augu_THREE = [(DIR33+i) for i in os.listdir(DIR33)]
augu_FOUR = [(DIR44+i) for i in os.listdir(DIR44)]
augu_FIVE = [(DIR55+i) for i in os.listdir(DIR55)]

augu_OTHER = [(DIR66+i) for i in os.listdir(DIR66)]



'''
train = data_ONE + data_TWO + data_THREE + data_FOUR + data_FIVE + data_OTHER
print "train lens: ",len(train)
target = np.concatenate((np.repeat(0.,len(data_ONE)),np.repeat(1.,len(data_TWO)),np.repeat(2.,len(data_THREE)),np.repeat(3.,len(data_FOUR)),np.repeat(4.,len(data_FIVE)),np.repeat(5,len(data_OTHER))))
'''

#'''
train = data_ONE + augu_ONE + data_TWO + augu_TWO + data_THREE + augu_THREE + data_FOUR + augu_FOUR + data_FIVE + augu_FIVE + data_OTHER + augu_OTHER
target = np.concatenate((np.repeat(0.,len(data_ONE)+len(augu_ONE)),np.repeat(1.,len(data_TWO)+len(augu_TWO)),np.repeat(2.,len(data_THREE)+len(augu_THREE)),np.repeat(3.,len(data_FOUR)+len(augu_FOUR)),np.repeat(4.,len(data_FIVE)+len(augu_FIVE)),np.repeat(5,len(data_OTHER)+len(augu_OTHER))))
#'''

train_X,test_X, train_y, test_y = train_test_split(train,
                                                   target,
                                                   test_size = 0.02,
                                                   random_state = 0)

train_images = []
for i in range(len(train_X)):
    train_images.append((train_X[i],int(train_y[i])))

test_images = []
for i in range(len(test_X)):
    test_images.append((test_X[i],int(test_y[i])))


def read_image(tuple_set):
    file_path = tuple_set[0]
    label = tuple_set[1]
    img = misc.imread(file_path)
    if img.shape[0] != 297:
       img = cv2.resize(img,dsize=(COLS,ROWS)) 
    return img, label

def prep_data(images):
    no_images = len(images)
    data = np.ndarray((no_images, ROWS, COLS,CHANNELS), dtype=np.uint8)
    labels = []
    for i, image_file in enumerate(images):
        image, label = read_image(image_file)
        data[i] = image
        labels.append(label)
    return data, labels


#convert class vector to binary class matrices
x_train, y_train = prep_data(train_images)
x_test, y_shit = prep_data(test_images)


train_length = len(x_train)
test_length = len(x_test)

radius = 1
n_point = radius * 8
    
def do_crop(img):
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    th2 = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 9, 2)
    contours, hierarchy = cv2.findContours(th2, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
    i = 0
    maxarea = -1
    roci = (0, 0, 0, 0)
    for contour in contours:
        x, y, w, h = cv2.boundingRect(contour)
        if (not abs(h - y) < 5) and not (abs(h - y) > 50):  # 比例>0.2
            # cv2.rectangle(self.img, (x, y), (x + w, y + h), (0, 255, 0), 2)
            tarea = w * h
            if maxarea < tarea:
                maxarea = tarea
                roci = (x, y, w, h)
    # print("矩形宽，高", roci[2], '    ', roci[3])
    try:
        img__ = gray[roci[1]:roci[1] + roci[3], roci[0]:roci[0] + roci[2]]
        img__ = cv2.resize(img__,dsize=(123,126))
    except:
        print("size is error")
        img__ = None
        #img__ = cv2.resize(gray,dsize=(123,126))
    return img__



def sobel_extract(img):
    im = do_crop(img)
    if im is None:
        return None
    # soble 导数滤波器  使用 Sobel 滤波器来计算 x 和 y 的方向导数，
    imx = zeros(im.shape)
    filters.sobel(im, 1, imx)
    #
    imy = zeros(im.shape)
    filters.sobel(im, 0, imy)

    magnitude = sqrt(imx ** 2 + imy ** 2)

    # print(magnitude)
    def deal_with(a):
        for i in range(len(a)):
            if a[i] < 50:
                a[i] = 0
            elif a[i] > 200:
                a[i] = 255
        return a

    a = np.apply_along_axis(deal_with, 1, magnitude)
    return magnitude


def batch_deal(x_train,y_train):
    data = []
    labels = []
    #for img in x_train:
    for img,label in zip(x_train,y_train):
        t = sobel_extract(img)
        if t is None:
            continue
        j=t.ravel()
        data.append(j)
        labels.append(label)
#    print 'sf', data[0]
    return np.asarray(data),labels

x_train,y_train = batch_deal(x_train,y_train)
x_test, y_shit = batch_deal(x_test,y_shit)

print("x_train", type(x_train), len(x_train), x_train.shape)
print("xtest", len(x_test))
svr_rbf = SVC(kernel="linear",probability=True,C=1e3,gamma=0.1)
Rest_c = OneVsRestClassifier(svr_rbf,-1)

Rest_c.fit(x_train,y_train)
score = Rest_c.score(x_test,y_shit)

print("test score: {0}".format(score))
print(time.strftime('%Y/%m/%d %H:%M:%S', time.localtime(time.time())))

joblib.dump(Rest_c,"/hadoop/station_photo_determine/model/newmodel/ws6_sobel_svm_other_line_model_1101.h5")

print("dump done!")
