# coding: utf-8
from keras.applications.mobilenetv2 import MobileNetV2
from keras.layers import GlobalAveragePooling2D, Dense
from keras import utils
import itertools
import threading
from keras.utils import np_utils
from keras.models import Sequential
from keras.optimizers import RMSprop
import subprocess
import warnings
from keras.callbacks import Callback, EarlyStopping, ReduceLROnPlateau
from sklearn.model_selection import train_test_split
from keras.preprocessing.image import ImageDataGenerator, img_to_array, load_img
from scipy import misc
import numpy as np
import base64
import socket
import time
import os
import sys
from keras.models import load_model
import tensorflow as tf
import cv2
from keras import Model
import argparse
from keras import backend as K
from matplotlib import pyplot as plt
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, VARCHAR, Integer, FLOAT, String
from sqlalchemy.orm import sessionmaker
import pysequoiadb
from pysequoiadb import client
from pysequoiadb.error import SDBBaseError
from pysequoiadb.error import SDBEndOfCursor

import pandas as pd

plt.switch_backend('agg')
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'

warnings.filterwarnings('ignore')


def DefineArgs():
    flags = argparse.ArgumentParser()

    flags.add_argument('-gpu', default=0, type=float, help='gpu number')
    flags.add_argument('-tflite', type=int, help='whether need to convert tflite')
    flags.add_argument('-pb', type=int, help='hether need to convert pb')
    flags.add_argument('-name', help='name of model', required=True)
    flags.add_argument('-ip', type=str, default="10.244.171.143", help='ip of sdb that will to be checked')

    sdbGroup = flags.add_argument_group('dbse')
    # 改变FLAGS后面引用的变量名
    sdbGroup.add_argument('-msql', type=str,
                          default="mysql+pymysql://cddba:foxconn2019..@10.244.171.143:3306/aimonitor",
                          help='jdbc of mysql', dest='mysql')
    sdbGroup.add_argument('-ddir', help='the dir of data to store', dest='dir')
    sdbGroup.add_argument('-modelid', help='id of current model', dest="modelid")

    HyperParamGroup = flags.add_argument_group('HyperParam')
    # 存储固定值
    # HyperParamGroup.add_argument('-epoch', action='store_const', const=1000, help='nums of iter')
    HyperParamGroup.add_argument('-epoch', type=int, help='nums of iter')
    HyperParamGroup.add_argument('-batch', type=int, help='batch size')
    HyperParamGroup.add_argument('-factor', type=float, default=0.1, help='')
    HyperParamGroup.add_argument('-lr', type=float, help='learn rate', default=1e-4)
    #
    ModelParamGroup = flags.add_argument_group('model parameter')
    ModelParamGroup.add_argument('-rows', type=int, help='height of image', dest='ROWS')
    ModelParamGroup.add_argument('-cols', type=int, help='width of image', dest='COLS')
    ModelParamGroup.add_argument('-mode', help='mode to read image', default="L")
    ModelParamGroup.add_argument('-channels', type=int, help='channel of image', dest='CHANNELS')
    ModelParamGroup.add_argument('-alpha', type=float, help='controls the width of the network')
    ModelParamGroup.add_argument('-patience', type=int,
                                 help='number of epochs with no improvement after which training will be stopped')

    FLAGS = flags.parse_args()
    return FLAGS


base = declarative_base()


class trainInfo(base):
    __tablename__ = 'traininfo_mobilenetv2'

    modelid = Column(VARCHAR(40), primary_key=True)
    modelname = Column(VARCHAR(30), nullable=False)
    status = Column(Integer, default=0, nullable=False)
    description = Column(VARCHAR(80), nullable=True)
    create_time = Column(VARCHAR(30), nullable=False)
    update_time = Column(VARCHAR(30), nullable=False)
    tflite_path = Column(VARCHAR(100), nullable=False)
    pb_path = Column(VARCHAR(100), nullable=False)
    loss_path = Column(VARCHAR(100), nullable=False)
    accuracy = Column(FLOAT(), nullable=False)
    label_txt = Column(VARCHAR(100))


def MysqlConnect(jdbc: str, status=0, tflite=" ", pb=" ", fig=" ", acc=0.0, txt=' '):
    engine = create_engine(jdbc)
    Session = sessionmaker(engine)
    session = Session()
    print(f'Flags.modelid,{FLAGS.modelid}')
    traininfo = session.query(trainInfo).filter(trainInfo.modelid == FLAGS.modelid).one_or_none()
    if traininfo is None:
        print('modelid not find')
        session.close()
        return
    print(f'update status {status}')
    print(traininfo.modelname)
    traininfo.status = status
    traininfo.update_time = time.strftime('%Y/%m/%d %H:%M:%S', time.localtime(time.time()))
    traininfo.tflite_path = tflite
    traininfo.pb_path = pb
    traininfo.loss_path = fig
    traininfo.accuracy = acc
    traininfo.label_txt = txt
    session.add(traininfo)
    session.commit()

    session.close()


def getGpuFromSdb(sdb_ip, gpu, sdb_port="11810", sdb_user="peter", sdb_password="Foxconn99."):
    try:
        db = client(sdb_ip, sdb_port, sdb_user, sdb_password)
    except SDBBaseError as e:
        print(e)
        print("连接出错 请校验")
        sys.exit(0)
    colln = 'monitor'
    csn = 'gpucpu'
    status = 0
    try:
        cursor = db.exec_sql(f"select gpumem,nvidia_gpu from {colln}.{csn} "
                             f"where ip='{sdb_ip}' order by time desc limit 1")
        while True:
            try:
                record = cursor.next()
                gpumem = record.get('gpumem')
                nvidia_gpu = record.get("nvidia_gpu")
                gm = None
                for g in gpumem:
                    if gpu in g.keys():
                        gm = g
                ng = None
                for g in nvidia_gpu:
                    if gpu in g.keys():
                        ng = g
                print(gm[gpu], ng[gpu])
                if gm[gpu] == 0.0 and ng[gpu] == 0.0:
                    status = 1
            except SDBEndOfCursor:
                print('sdbend')
                break
            except SDBBaseError as e:
                pysequoiadb._print(e)
                break
    except Exception as e:
        print(e)
        cursor.close()
    return status
FLAGS = DefineArgs()
seed = np.random.seed(seed=2)
ROWS = FLAGS.ROWS
COLS = FLAGS.COLS
CHANNELS = FLAGS.CHANNELS
ip = FLAGS.ip
print("ip", ip)
classes = len(os.listdir(FLAGS.dir))
gpun = FLAGS.gpu
os.environ['CUDA_VISIBLE_DEVICES'] = str(gpun)
mysql = "mysql+pymysql://cddba:foxconn2019..@"+ip+":3306/aimonitor"
MysqlConnect(mysql, status=1)
label_txt = {}
if not getGpuFromSdb(ip, str(int(gpun))):
    MysqlConnect(mysql, status=3)
    sys.exit(1)

def read_image(tuple_set):
    file_path = tuple_set[0]
    label = tuple_set[1]
    mode = "RGB" if FLAGS.mode == "none" else FLAGS.mode
    img = misc.imread(file_path, mode=mode)
    #img /= 255
    return misc.imresize(img, (ROWS, COLS), interp='bilinear'), label

class Dse(utils.Sequence):
    def __init__(self,images,bat):
        self.images = images
        self.bat = bat
    def __len__(self):
        return len(self.images) // self.bat
    def __getitem__(self, item):
        image_files = self.images[item*self.bat:(item+1)*self.bat]
        irs = [read_image(image_file) for image_file in image_files]
        images,labels = [],[]
        for image,label in irs:
            images.append(image)
            labels.append(label)
        imgs = []
        for image in images:
            if FLAGS.mode == "L":
                image = np.expand_dims(image, axis=2)
            imgs.append(image)

        return np.array(imgs,dtype=np.uint8),np_utils.to_categorical(labels,classes)


def getTrainData():
    basedir = FLAGS.dir
    dirs = []
    for i, dir in enumerate(os.listdir(basedir)):
        label_txt[i] = dir
        dir = os.path.join(basedir, dir)
        if os.path.isdir(dir):
            dirs.append(dir)
    dirData = []
    for dir in dirs:
        tempdir = []
        for img in os.listdir(dir):
            imgpath = os.path.join(dir, img)
            tempdir.append(imgpath)
        dirData.append(tempdir)
    labels = []
    for i, tdir in enumerate(dirData):
        length = len(tdir)
        labels.append(np.repeat(i, length))
    train = []
    for i in dirData:
        train += i
    train = np.array(train)
    target = np.concatenate(labels, axis=0)
    n_train = len(train)
    index = list(range(len(train)))
    np.random.shuffle(index)
    nval = 0.1
    ntest = 0.2
    vals = int(nval*n_train)
    tests = int(ntest*n_train)
    val_X,val_y= train[index[0:vals]],target[index[0:vals]]
    test_X,test_y = train[index[vals:vals+tests]],target[index[vals:vals+tests]]
    train_X,train_y = train[index[vals+tests:]],target[index[vals+tests:]]
    # train_X, test_X, train_y, test_y = train_test_split(train,
    #                                                     target,
    #                                                     test_size=0.2,
    #                                                     random_state=seed)
    train_images = []
    for i in range(len(train_X)):
        train_images.append((train_X[i], int(train_y[i])))

    test_images = []
    for i in range(len(test_X)):
        test_images.append((test_X[i], int(test_y[i])))
    val_images = []
    for i in range(len(val_X)):
        val_images.append((val_X[i], int(val_y[i])))
    train_gen = Dse(train_images,FLAGS.batch)
    test_gen = Dse(test_images,FLAGS.batch)
    val_gen = Dse(val_images,FLAGS.batch)
    return train_gen, test_gen,val_gen, len(train_images), len(test_images),len(val_images)


print(time.strftime('%Y/%m/%d %H:%M:%S', time.localtime(time.time())))
train_gen, test_gen,val_gen, n_train, n_test,n_val = getTrainData()

print('x_train length', n_train)
optimizer = RMSprop(lr=FLAGS.lr)
objective = 'categorical_crossentropy'

model = MobileNetV2(weights=None,
                    input_shape=(ROWS, COLS, CHANNELS),
                    include_top=True,
                    alpha=FLAGS.alpha,
                    # depth_multiplier=0.9,
                    classes=classes)
#model = load_model(model_path)
nb_epoch = FLAGS.epoch
batch_size = FLAGS.batch
reduce_lr2 = ReduceLROnPlateau(monitor='val_loss',
                               factor=FLAGS.factor,
                               patience=25,
                               mode='auto')


class LossHistory(Callback):
    # classes = len(np.unique(target))
    def on_train_begin(self, logs={}):
        self.losses = []
        self.val_losses = []
        self.acces = []
        self.val_acces = []

    def on_epoch_end(self, batch, logs={}):
        self.losses.append(logs.get('loss'))
        self.val_losses.append(logs.get('val_loss'))
        self.acces.append(logs.get('acc'))
        self.val_acces.append(logs.get('val_acc'))


early_stopping = EarlyStopping(monitor='val_loss', patience=FLAGS.patience, verbose=0, mode='auto')
history = LossHistory()

model.compile(optimizer=optimizer,
              loss=objective,
              metrics=['accuracy'])
# print(model.summary())
step_per_epoch = n_train // batch_size
step_test = n_test // batch_size
step_val = n_val // batch_size
model.fit_generator(train_gen, steps_per_epoch=step_per_epoch, epochs=nb_epoch, verbose=1, shuffle=True,
                    validation_data=val_gen,
                    validation_steps=step_val,
                    callbacks=[history, early_stopping, reduce_lr2])
predictions = model.predict_generator(test_gen, steps=step_test)

loss, accuracy = model.evaluate_generator(test_gen, steps=step_test)
print('test loss: ', loss)
print('test accuracy: ', accuracy)

loss = history.losses
val_loss = history.val_losses
acc = history.acces
val_acc = history.val_acces

print('train loss:', loss)
print('val loss:', val_loss)
print('train acc:', acc)
print('val acc:', val_acc)

print(time.strftime('%Y/%m/%d %H:%M:%S', time.localtime(time.time())))
DateName = FLAGS.name + "_" + time.strftime('%Y%m%d_%H%M', time.localtime(time.time()))
# save modename
model_save_dir = '/tmp/tmpModel/'
if not os.path.exists(model_save_dir):
    os.makedirs(model_save_dir)
if not os.path.exists(model_save_dir + "model_bak"):
    os.makedirs(model_save_dir + "model_bak")
if not os.path.exists(model_save_dir + "tflite_bak"):
    os.makedirs(model_save_dir + "tflite_bak")
if not os.path.exists(model_save_dir + "pb_bak"):
    os.makedirs(model_save_dir + "pb_bak")
if not os.path.exists(model_save_dir + "loss_bak"):
    os.makedirs(model_save_dir + 'loss_bak')
target_dir = '/aidfs/003/'
h5Model = model_save_dir + "model_bak/" + DateName + ".h5"
h5Target = target_dir + "model_bak/"
tfliteName = ''
tfliteModel = model_save_dir + "tflite_bak/" + DateName + ".tflite"
tfliteTarget = target_dir + "tflite_bak/"
pbModel = model_save_dir + "pb_bak"
pbName = ""
model.save(h5Model)


def h5_to_tflite(h5, tflite):
    # REW:
    cmd = 'CUDA_VISIBLE_DEVICES={} tflite_convert --keras_model_file={} --output_file={} --output_format=tflite'\
            .format(str(gpun),h5,tflite)
    res = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
                           )
    res.wait()
def h5_to_pb(h5_model, output_dir, model_name, out_prefix="output_"):
    out_nodes = []
    for i in range(len(h5_model.outputs)):
        out_nodes.append(out_prefix + str(i + 1))
        tf.identity(h5_model.output[i], out_prefix + str(i + 1))
    sess = K.get_session()
    from tensorflow.python.framework import graph_util, graph_io
    init_graph = sess.graph.as_graph_def()
    main_graph = graph_util.convert_variables_to_constants(sess, init_graph, out_nodes)
    graph_io.write_graph(main_graph, output_dir, name=model_name, as_text=False)


if FLAGS.tflite:
    h5_to_tflite(h5Model, tfliteModel)
    time.sleep(200)
    r1 = subprocess.Popen(f'mv {tfliteModel} {tfliteTarget}', shell=True, stdout=subprocess.PIPE,
                          )
    tfliteName = DateName + '.tflite'
if FLAGS.pb:
    h5_to_pb(model, pbModel, DateName + ".pb")
    r2 = subprocess.Popen(f'mv {pbModel + "/" + DateName + ".pb"} {target_dir + "pb_bak/"}', shell=True,
                          stdout=subprocess.PIPE,
                          stderr=subprocess.STDOUT)
    pbName = DateName + '.pb'
x = [i + 1 for i in range(len(loss))]

plt.figure()
plt.plot(x, loss, label="train_loss", color="red", linewidth=2)
plt.plot(x, val_loss, label="val_loss", color="green", linewidth=2)
plt.legend()
plt.xlabel("step")
plt.ylabel("loss")
plt.title("train")
figname = model_save_dir + "loss_bak/" + DateName + ".jpg"
plt.savefig(figname)
with open(h5Target + DateName + ".txt", 'w') as f:
    for key, value in label_txt.items():
        f.write(f"{str(key)}\t{value}\n")
MysqlConnect(mysql, 2, tfliteName, pbName, DateName + ".jpg", float(accuracy),
             DateName + ".txt")
r3 = subprocess.Popen(f'mv {h5Model} {h5Target}', shell=True, stdout=subprocess.PIPE,
                      stderr=subprocess.STDOUT)
r4 = subprocess.Popen(f'mv {figname} {target_dir + "loss_bak/"}', shell=True, stdout=subprocess.PIPE,
                      stderr=subprocess.STDOUT)
print('done')
