import argparse
import numpy as np
import tensorflow as tf
import tensorflow.keras as keras
from keras.models import Sequential, load_model
from keras.layers import Input, Dense, Dropout, Flatten, ReLU, BatchNormalization, MaxPooling2D
from keras.layers import Conv2D
import os.path as path
from tensorflow.python.keras import activations

def LoadData():
    """加载训练集和测试集"""
    x_train = np.load('./ad_train_data.npy')
    x_test = np.load('./ad_test_data.npy')
    y_train = np.load('./pwm_train_label.npy')
    y_test = np.load('./pwm_test_label.npy')
    return x_train, x_test, y_train, y_test

def CreateModelDense(num_classes=1, dropout=0.25, isBN=True, ad_batch_size=1):
    input_size = (ad_batch_size, 7, 1)
    if isBN:
        model = Sequential(
            [
                Input(input_size),
                Flatten(),
                Dense(140, activation='relu'),
                BatchNormalization(),
                Dropout(dropout),
                Dense(100, activation='relu'),
                BatchNormalization(),
                Dropout(dropout),
                Dense(40),
                ReLU(max_value=8),
                BatchNormalization(),
                Dense(num_classes),
                BatchNormalization()
            ]
        )
    else:
        model = Sequential(
            [
               Input(input_size),
                Flatten(),
                Dense(140, activation='tanh'),
                Dropout(dropout),
                Dense(100, activation='tanh'),
                Dropout(dropout),
                Dense(40),
                ReLU(max_value=8),
                Dense(num_classes)
            ]
        )
    model.summary()
    sModelName = 'smartcar_ad_dense_drop_0%d_adSize_%d' % (int(dropout * 100),ad_batch_size)
    if not isBN:
        sModelName += '_nobn'
    return sModelName, model

if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument('-r',
                        '--restart',
                        help='restart training',
                        action='store_true')
    parser.add_argument('-bn',
                        '--batchnormalization',
                        help='batchnormalization, true or false',
                        action='store_true')
    parser.add_argument('-d',
                        '--drop',
                        type=float,
                        help='Set the dropout rate, 0~1',
                        default=0.25) 
    parser.add_argument('-l',
                        '--learn_rate',
                        type=float,
                        help='Set the learning rate',
                        default=0.01) 
    parser.add_argument('-m',
                        '--min_learn_rate',
                        type=float,
                        help='min learning rate, will not decay when learning rate is less than this',
                        default=0.0007)
    parser.add_argument('-c',
                        '--decay_ppm',
                        type=float, 
                        default=1.5,
                        help='learning rate decay rate, in ppm: 1ppm = 1e-6')
    parser.add_argument('-b',
                        '--batch_size', 
                        type=int, 
                        default=25,
                        help="batch size")
    parser.add_argument('-ad', 
                        '--ad_size', 
                        type=int, 
                        default=1,
                        help="batch size")
    parser.add_argument('-a',
                        '--arch', 
                        type=str, 
                        default='dense',
                        help=" model architecture: can be '7x5', '5x3', 'dense'")
    args = parser.parse_args()

    # 获取args参数和初始化参数
    ad_size = args.ad_size
    lr = args.learn_rate
    M_lr = args.min_learn_rate
    batch_size = args.batch_size
    decay = args.decay_ppm / 1e6
    num_classes = 1
    opt = keras.optimizers.Adamax(lr, decay)
    minLoss = 1E5
    maxAccu = 0 
    epochshPerTrain = 3
    histCnt = 20
    lossHist = [1E4] * histCnt
    burstOftCnt = 0

    # 加载数据及数据归一化
    x_train, x_test, y_train, y_test = LoadData()

    print('x_train_shape is: ', x_train.shape)
    x_train = x_train.reshape(int(x_train.size /ad_size/7), ad_size,7,1)
    x_test = x_test.reshape(int(x_test.size /ad_size/7), ad_size,7,1)

    x_train = x_train.astype('int8')
    x_test = x_test.astype('int8')
    y_train = y_train.astype('int8')
    y_test = y_test.astype('int8')
    x_train = (x_train / 128).astype('float32')
    x_test = (x_test / 128).astype('float32')
    y_train = (y_train / 128).astype('float32')
    y_test = (y_test / 128).astype('float32')
    print('x_test data shape:%f~%f' %(min(x_train.flatten()),max(x_train.flatten())))
    print('y_test data shape:%f~%f' %(min(y_train),max(y_train)))
    print('x_train_shape is: ', x_train.shape)
    print('y_train_shape is: ', y_train.shape)

    model_name, model = CreateModelDense(num_classes, args.drop, not args.batchnormalization,ad_size)

    print("Training Model is ", model_name)

    LogFile = model_name + '_log.txt'
    saveModel = '%s.h5' % (model_name)
    saveCtx = '%s_ctx.h5' % (model_name)
    if args.restart == False and path.exists(saveCtx):
        """接着训练"""
        with open(LogFile, 'r') as lg:
            s = lg.read()
            lst = s.split('\n')[-2].split(',')
            for s in lst:
                if s.find('lr=') == 0:
                    lr = float(s[3:])
                    lr *= (1 - decay) ** (50000 / batch_size)
                if s.find('times=') == 0:
                    i = int(s[6:]) - 1 + 1
                if s.find('accu=') == 0:
                    maxAccu = float(s[5:])
                if s.find('loss=') == 0:
                    minLoss = float(s[5:])
        print('resume training from ', lst)
        model = load_model(saveCtx)
        s.close()
        fd = open(LogFile, 'a')
    else:
        with open(LogFile, 'w') as lg:
            s = 'times=%d,loss=%.4f,accu=%.4f,lr=%f,decay=%f' % (0, minLoss, maxAccu, lr, decay)
            lg.write(s + '\n')
        lg.close()
        i = 0
    model.compile(loss='mean_squared_error', optimizer=opt, metrics=['accuracy'])

    while i < 120:
        print("Train %d times" % (i + 1))
        hist = model.fit(x_train, y_train, batch_size, epochs=epochshPerTrain, \
                shuffle=True, callbacks=None)
        model.save(saveCtx)
        # evaluate
        loss, accuracy = model.evaluate(x_test, y_test)
        loss = int(loss * 10000) / 10000.0

        if loss < minLoss:
            minLoss = loss
            maxAccu = accuracy
            print("Saved a better model!")
            model.save(saveModel)
            # 保存日志
            s = 'Saved times=%d,loss=%.4f,accu=%.4f,lr=%f,decay=%f' % (i+1, minLoss, maxAccu,lr, decay)
        else:
            s = 'Saved times=%d,loss=%.4f,accu=%.4f,lr=%f,decay=%f' % (i+1, minLoss, maxAccu,lr, decay) 
        
        # 检测是否过拟合
        oftCnt = 0
        for k in range(histCnt):
            if loss > lossHist[k]:
                oftCnt += 1
        oftRate = oftCnt / histCnt 
        print ('overfit rate = %d%%' % int(oftRate * 100))
        if oftCnt / histCnt >= 0.6:
            burstOftCnt += 1
            if burstOftCnt > 3:
                print('Overfit!')
        else:
            burstOftCnt = 0
        s = s + 'overfit rate = %d%%' % int(oftRate * 100)
        
        lossHistPrnCnt = 6
        if lossHistPrnCnt > histCnt:
            lossHistPrnCnt = histCnt
        fd = open(LogFile, 'a')
        fd.write(s + '\n')
        print(s, lossHist[:lossHistPrnCnt])
        fd.close()
        
        # update loss history
        for k in range(histCnt - 1):
            ndx = histCnt - 1 - k
            lossHist[ndx] = lossHist[ndx - 1]
        lossHist[0] = loss

        fd = open(LogFile, 'a')
        lr *= (1 - decay) ** (50000 / batch_size)
        if lr < M_lr:
            lr = M_lr
        model = load_model(saveCtx)
        opt = keras.optimizers.Adamax(lr, decay)
        print('new lr_rate=%f' % (lr))
        model.compile(loss='mean_squared_error', optimizer=opt, metrics=['accuracy'])
        i += 1
