import keras
from keras.applications import mobilenet_v2 as mv2
import keras.backend as K
from keras.models import Model
import cv2
from keras import optimizers
from keras import utils
import random
import tensorflow as tf
from matplotlib import pyplot as plt
import time
from keras.utils import np_utils
from keras.models import Sequential
from keras.regularizers import L1L2
from keras.callbacks import Callback, EarlyStopping, ReduceLROnPlateau, ModelCheckpoint
from keras.layers import Convolution2D, Activation, MaxPooling2D, Flatten, Dense, Dropout, Input, Reshape, Lambda, \
    ZeroPadding2D,GlobalAveragePooling1D
import os
import numpy as np
plt.switch_backend('agg')

os.environ["CUDA_VISIBLE_DEVICES"] = "2"
load_size = (140, 260)
H = 120
W = 240
C = 3
classes = 2
rotates = [-5, 20]
mean = [0.485, 0.456, 0.406]
batch = 16
imgroot = "/data/soft/javad/threeone/data/1"
nb_epoch = 300


def sign_sqrt(x):
    return K.sign(x) * K.sqrt(K.abs(x) + 1e-10)


def l2_norm(x):
    return K.l2_normalize(x, axis=-1)


def batch_dot(cnn_ab):
    return K.batch_dot(cnn_ab[0], cnn_ab[1], axes=[1, 1])

def multi_category_focal_loss1(alpha, gamma=2.0):
    """
    focal loss for multi category of multi label problem
    适用于多分类或多标签问题的focal loss
    alpha用于指定不同类别/标签的权重，数组大小需要与类别个数一致
    当你的数据集不同类别/标签之间存在偏斜，可以尝试适用本函数作为loss
    Usage:
     model.compile(loss=[multi_category_focal_loss1(alpha=[1,2,3,2], gamma=2)], metrics=["accuracy"], optimizer=adam)
    """
    epsilon = 1.e-7
    alpha = tf.constant(alpha, dtype=tf.float32)
    #alpha = tf.constant([[1],[1],[1],[1],[1]], dtype=tf.float32)
    #alpha = tf.constant_initializer(alpha)
    gamma = float(gamma)
    def multi_category_focal_loss1_fixed(y_true, y_pred):
        y_true = tf.cast(y_true, tf.float32)
        y_pred = tf.clip_by_value(y_pred, epsilon, 1. - epsilon)
        y_t = tf.multiply(y_true, y_pred) + tf.multiply(1-y_true, 1-y_pred)
        ce = -tf.log(y_t)
        weight = tf.pow(tf.subtract(1., y_t), gamma)
        fl = tf.matmul(tf.multiply(weight, ce), alpha)
        loss = tf.reduce_mean(fl)
        return loss
    return multi_category_focal_loss1_fixed

def get_model():
    m: keras.Model = mv2.MobileNetV2(weights=None, include_top=False, input_shape=(H, W, C))
    feature = m.output
    reshape_v2 = Reshape([int(feature.shape[1])* int(feature.shape[2]),
                          int(feature.shape[3])])(feature)
    cnn_dot_out = Lambda(batch_dot,name="bdot")([reshape_v2, reshape_v2])
    sign_sqrt_out = Lambda(sign_sqrt,name="ssqrt")(cnn_dot_out)
    l2_norm_out = Lambda(l2_norm,name="lnorm")(sign_sqrt_out)
    flatten = Flatten()(l2_norm_out)
    dropout = Dropout(0.5)(flatten)
    output = Dense(1, activation='sigmoid')(dropout)

    model = Model(m.input, output)
    model.compile(loss='binary_crossentropy', optimizer=optimizers.SGD(lr=1e-3, momentum=0.9, decay=1e-6),
                  metrics=['accuracy'])
    # print(feature.shape)
    return model
    # new_m = keras.Model()


# get_model()

def crop(img):
    img = cv2.resize(img, (load_size[1], load_size[0]), interpolation=cv2.INTER_LINEAR)

    h, w, _ = img.shape
    y = int(random.uniform(0, h - H))  # 不会越界
    x = int(random.uniform(0, w - W))

    img = img.copy()[y:y + H, x:x + W, :]
    # Y = cv2.resize(Y, (w, h), interpolation=cv2.INTER_CUBIC)
    return img


def cutout(img: np.ndarray, mask_color=[90, 90, 90]):
    h, w, _ = img.shape
    mask = np.zeros((70, 70), dtype=img.dtype)
    mh, mw = mask.shape
    assert h > mh, w > mw
    lx = np.random.randint(0, h - mh)
    ly = np.random.randint(0, w - mw)

    img[lx:lx + mh, ly:ly + mw] = mask_color
    return img

def rotate(image, angle):
    h, w, _ = image.shape
    matrix = cv2.getRotationMatrix2D((w / 2, h / 2), angle, 1)
    image = cv2.warpAffine(image, matrix, (w, h), flags=cv2.INTER_LINEAR)
    return image


def enhance(img):
    def _convert(image, alpha=1, beta=0):
        tmp = image.astype(float) * alpha + beta
        tmp[tmp < 0] = 0
        tmp[tmp > 255] = 255
        image[:] = tmp

    image = img.copy()
    if random.randrange(2):
        _convert(image, beta=random.uniform(-3, 9))
    if random.randrange(2):
        _convert(image, alpha=random.uniform(0.2, 1.3))

    image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
    if random.randrange(2):
        _convert(image[:, :, 1], alpha=random.uniform(0.5, 1.5))
    # hue distortion
    if random.randrange(2):
        tmp = image[:, :, 0].astype(int) + random.randint(-18, 18)
        tmp %= 180
        image[:, :, 0] = tmp

    image = cv2.cvtColor(image, cv2.COLOR_HSV2BGR)
    return image


def read_image(img_path,train):
    img = cv2.imread(img_path)
    if train:
        img = crop(img)
        angle = rotates[0] + (rotates[1] - rotates[0]) * random.random()
        img = rotate(img, angle)
        img = enhance(img)
    else:
        img = cv2.resize(img,(W,H),interpolation=cv2.INTER_LINEAR)
    img = img.astype(np.float)
    img /= 255.
    img -= mean
    return img


class Dse(utils.Sequence):
    def __init__(self, images_root, shuffle=True,train=True):
        self.images_root = images_root
        self.pair = list(zip(*self.init()))
        random.shuffle(self.pair)
        self.shuffle = shuffle
        self.train=train

    def init(self):
        imgs = []
        labels = []
        dirs = os.listdir(self.images_root)
        dirs.sort()
        for i, dir in enumerate(dirs):
            cdir = os.path.join(self.images_root, dir)
            for img in os.listdir(cdir):
                imgs.append(os.path.join(dir, img))
                labels.append(i)
        return imgs, labels

    def on_epoch_end(self):
        if self.shuffle:
            random.shuffle(self.pair)

    def __len__(self):
        return len(self.pair) // batch

    def __getitem__(self, item):
        pairs = self.pair[item * batch:(item + 1) * batch]
        images, labels = list(zip(*pairs))
        images = [os.path.join(self.images_root, im) for im in images]
        images = [read_image(impath,self.train) for impath in images]

        return np.array(images, np.float), np.array(labels,dtype=np.uint8)


class LossHistory(Callback):
    # classes = len(np.unique(target))
    def on_train_begin(self, logs={}):
        self.losses = []
        self.val_losses = []
        self.acces = []
        self.val_acces = []

    def on_epoch_end(self, batch, logs={}):
        self.losses.append(logs.get('loss'))
        self.val_losses.append(logs.get('val_loss'))
        self.acces.append(logs.get('acc'))
        # self.val_acces.append(logs.get('val_acc'))


model_save_dir = './model/1/'
if not os.path.exists(model_save_dir):
    os.makedirs(model_save_dir)
ckpt = ModelCheckpoint(
    os.path.join(model_save_dir, "three0_{epoch:02d}_loss-{val_loss:.4f}_.h5"),
    monitor='loss', verbose=1, save_best_only=True, save_weights_only=True)
early_stopping = EarlyStopping(monitor='val_loss', patience=10, verbose=0, mode='auto')
history = LossHistory()
reduce_lr2 = ReduceLROnPlateau(monitor='val_loss',patience=6)
model = get_model()
train_gen = Dse(imgroot)
step_per_epoch = len(train_gen)
val_gen = Dse(imgroot,train=False)
# step_test = n_test // batch_size
step_val = len(val_gen)
print(step_val)
model.fit_generator(train_gen, steps_per_epoch=step_per_epoch, epochs=nb_epoch, verbose=1, shuffle=True,
                    validation_data=val_gen,
                    validation_steps=step_val,
                    callbacks=[early_stopping,history,reduce_lr2,ckpt])
# predictions = model.predict_generator(test_gen, steps=step_test)

# loss, accuracy = model.evaluate_generator(test_gen, steps=step_test)
# print('test loss: ', loss)
# print('test accuracy: ', accuracy)

loss = history.losses
val_loss = history.val_losses
acc = history.acces
# val_acc = history.val_acces

print('train loss:', loss)
# print('val loss:', val_loss)
print('train acc:', acc)
# print('val acc:', val_acc)

print(time.strftime('%Y/%m/%d %H:%M:%S', time.localtime(time.time())))
DateName = "three410" + "_" + time.strftime('%Y%m%d_%H%M', time.localtime(time.time()))
# save modename
h5Model = "model/1/" + DateName + ".h5"

model.save(h5Model)


def h5_to_tflite(h5, tflite):
    # REW:
    cmd = 'CUDA_VISIBLE_DEVICES={} tflite_convert --keras_model_file={} --output_file={} --output_format=tflite' \
        .format(str(gpun), h5, tflite)
    res = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
                           )
    res.wait()


x = [i + 1 for i in range(len(loss))]

plt.figure()
plt.plot(x, loss, label="train_loss", color="red", linewidth=2)
plt.plot(x, val_loss, label="val_loss", color="green", linewidth=2)
plt.legend()
plt.xlabel("step")
plt.ylabel("loss")
plt.title("train")
figname = "model/1/" + DateName + ".jpg"
plt.savefig(figname)
