'''
 * @ author     ：廖传港
 * @ date       ：Created in 2020/11/6 11:14
 * @ description：
 * @ modified By：
 * @ ersion     : 
 * @File        : train.py 
'''

from com.lcg.version11 import Loading_pictures as lp
import numpy as np
from com.lcg.version11 import model as md
# from com.lcg.version11 import dl9 as md
import joblib
from keras.utils import to_categorical

# 总数
count = 200
debug = 0


# 加载数据集
def Loaddata():


    # 加载数据集
    X, Y = lp.loaddata("D:/python/data/data3/")

    # # reshape：改变数组维数 重新塑造 矩阵变维
    # XX = np.reshape(train_images, (60000, 22, 22))
    # X = np.array(XX[0:count, :])


    # Y = to_categorical(Y)  #将标签转换为分类的 one-hot 编码

    return X, Y



if __name__ == '__main__':

    # 加载数据集
    X,Y=Loaddata()


    print("Y:",Y.shape)
    print("X:",X.shape)

    #---------训练1--------------
    dnn = md.DNN()
    # dnn.Add(CNN2D(3,3,2,10))
    # 卷积
    dnn.Add(md.CNN2D_MultiLayer(3, 3, 2, 10))
    # 池化
    dnn.Add(md.DMaxPooling2D(2, 2))
    yy = dnn.Forward(X[0])
    dnn.Add(md.CNN2D_MultiLayer(3, 3, 1, 5))
    yy2 = dnn.Forward(X[0])
    dnn.Add(md.DFlatten())
    # yy=dnn.Forward(X[0])
    dnn.Add(md.DDense(10, 'relu', bFixRange=False))
    dnn.Add(md.DDense(1))
    # dnn.Add(DDense(1,'relu'))
    # yy=dnn.BatchPredict(X)
    dnn.Compile()
    # ratio=dnn.AdjustWeightByComputingRatio(X,Y)
    # print(ratio)
    dnn.Fit(X, Y, 50)
    # 将模型持久化保存
    joblib.dump(dnn, "D:/python/data/model/model5.model")



    # # ---------训练2--------------
    # dnn=md.DNN()
    # dnn.Add(md.CNN2D_MultiLayer(4,4,stride=2,nFilter=10))
    # dnn.Add(md.DMaxPooling2D(2,2))
    # dnn.Add(md.CNN2D_MultiLayer(4,4,stride=2,nFilter=2))
    # dnn.Add(md.DMaxPooling2D(2,2))
    # # yy=dnn.Forward(X[0])
    # dnn.Add(md.DFlatten())
    # # yy=dnn.Forward(X[0])
    # dnn.Add(md.DDense(80,'sigmoid'))
    # # dnn.Add(DDense(100,50,'relu'))
    # dnn.Add(md.DDense(10,'relu'))
    # # dnn.AdjustWeightRatio(5)
    # # dnn.Add(DDense(100,10,'linear'))
    # # ratio=dnn.AdjustWeightsRatio(X,YY)
    # # dnn.Add(DDense(10,1,'linear'))
    # dnn.Compile(lossMethod='SoftmaxCrossEntropy')
    # # ratio=dnn.AdjustWeightsRatio(X,YY)
    # # yy=dnn.BatchPredict(X)
    # dnn.Fit(X[0:150,:], Y[0:150,:],500)
    # # 将模型持久化保存
    # joblib.dump(dnn, "D:/python/data/model/model2.model")
    #
    # #-----------------------训练3----------------
    # dnn = md.DNN()
    # dnn.Add(md.CNN2D(6, 6, stride=2, nFilter=10))
    # dnn.Add(md.DMaxPooling2D(3, 3))
    # dnn.Add(md.DFlatten())
    # dnn.Add(md.DDense(80, 'relu', bFixRange=True))
    # dnn.Add(md.DDense(10, 'sigmoid', bFixRange=True))
    # dnn.Compile(lossMethod='SoftmaxCrossEntropy')
    # dnn.Fit(X,Y,200)
    # joblib.dump(dnn, "D:/python/data/model/model2.model")


    # predictY = dnn.BatchPredict(X[150:200, ])
    # predictYY = np.array([np.argmax(one_hot) for one_hot in predictY])
    # realY = Y[150:200, ]
    # realYY = np.array([np.argmax(one_hot) for one_hot in realY])
    # from sklearn.metrics import accuracy_score
    # accuracy_score(predictYY, realYY)