'''
 * @ author     ：廖传港
 * @ date       ：Created in 2020/10/29 16:45
 * @ description：
 * @ modified By：
 * @ ersion     : 
 * @File        : model_test.py 
'''

# -*- coding: utf-8 -*-
"""
Created on Tue Aug 25 11:39:54 2020

@author: 77994
"""

from keras.models import Sequential
from keras.layers import Dense

from keras import losses

import numpy as np
# import mnist
import com.lcg.version3.trainModelByTeacher as tr

import os

import random
import sys

import struct

from keras.utils import to_categorical

import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras import backend as K

from sklearn import metrics

from scipy import stats


# 加载数据集
def load_mnist(path, kind='train'):
    """Load MNIST data from `path`"""
    #    labels_path = os.path.join(path,
    #                               '%s-labels-idx1-ubyte'
    #                               % kind)
    #    images_path = os.path.join(path,
    #                               '%s-images-idx3-ubyte'
    #                               % kind)

    labels_path = os.path.join(path,
                               '%s-labels.idx1-ubyte'
                               % kind)
    images_path = os.path.join(path,
                               '%s-images.idx3-ubyte'
                               % kind)

    with open(labels_path, 'rb') as lbpath:
        magic, n = struct.unpack('>II',
                                 lbpath.read(8))
        labels = np.fromfile(lbpath,
                             dtype=np.uint8)

    with open(images_path, 'rb') as imgpath:
        magic, num, rows, cols = struct.unpack('>IIII',
                                               imgpath.read(16))
        images = np.fromfile(imgpath,
                             dtype=np.uint8).reshape(len(labels), 784)
    return images, labels


# --------------------------------------------------


# 加载数据集
def LoadMNIST():
    [train_images, train_labels] = load_mnist('D:/python/data/MNIST')

    # reshape：改变数组维数 重新塑造 矩阵变维
    XX = np.reshape(train_images, (60000, 28, 28))
    print("XX---------->",XX.shape)
    # 总数
    count = 200

    X = np.array(XX[0:count, :])
    X = X / 255.0

    Y = np.array(train_labels[0:count, ], int)

    Y = to_categorical(Y)


    return [X, Y]


X, Y = LoadMNIST()
print("X---------->",X.shape)
print("Y---------->",Y.shape)
YY = np.zeros(Y.shape[0], )
print("YY---------->",YY.shape)
for i in range(Y.shape[0]):
    idx = np.where(Y[i, :] > 0)

    YY[i] = idx[0] / 10

# print(YY)

#训练
dnn=tr.DNN()
#第一层卷积
dnn.Add(tr.CNN2D_MultiLayer(4,4,stride=2,nFilter=10))
#第一层池化层
dnn.Add(tr.DMaxPooling2D(2,2))
#第二层卷积层
dnn.Add(tr.CNN2D_MultiLayer(4,4,stride=2,nFilter=2))
#第二层池化层
dnn.Add(tr.DMaxPooling2D(2,2))


# yy=dnn.Forward(X[0])
# 拉平数组
dnn.Add(tr.DFlatten())

# yy=dnn.Forward(X[0])

dnn.Add(tr.DDense(80,'sigmoid'))

# dnn.Add(DDense(100,50,'relu'))

dnn.Add(tr.DDense(10,'relu'))


# dnn.AdjustWeightRatio(5)
# dnn.Add(DDense(100,10,'linear'))

# ratio=dnn.AdjustWeightsRatio(X,YY)


# dnn.Add(DDense(10,1,'linear'))


dnn.Compile(lossMethod='SoftmaxCrossEntropy')

# ratio=dnn.AdjustWeightsRatio(X,YY)

# yy=dnn.BatchPredict(X)

dnn.Fit(X[0:150,:], Y[0:150,:],500)

'''
dnn = tr.DNN()

dnn.Add(tr.CNN2D(6, 6, stride=2, nFilter=10))

dnn.Add(tr.DMaxPooling2D(2, 2))

# yy=dnn.Forward(X[0])

dnn.Add(tr.DFlatten())

# yy=dnn.Forward(X[0])

dnn.Add(tr.DDense(80, 'sigmoid', bFixRange=True))

# dnn.Add(DDense(100,50,'relu'))

dnn.Add(tr.DDense(10, 'relu', bFixRange=True))

# dnn.AdjustWeightRatio(5)
# dnn.Add(DDense(100,10,'linear'))

# ratio=dnn.AdjustWeightsRatio(X,YY)


# dnn.Add(DDense(10,1,'linear'))


dnn.Compile(lossMethod='SoftmaxCrossEntropy')

# ratio=dnn.AdjustWeightsRatio(X,YY)

# yy=dnn.BatchPredict(X)

dnn.Fit(X[0:150, :], Y[0:150, :], 200)

# predictY：预测Y BatchPredict批预测

predictY = dnn.BatchPredict(X[150:200, ])

predictYY = np.array([np.argmax(one_hot) for one_hot in predictY])

realY = Y[150:200, ]

realYY = np.array([np.argmax(one_hot) for one_hot in realY])

from sklearn.metrics import accuracy_score

accuracy_score(predictYY, realYY)
# realy=Y[180:200,]
#
# nx=yy[0]
#
# ny=realy[0]
# loss = np.sum(- ny * np.log(nx))

# crossE=CrossEntropy()
#
# loss=crossE.loss(yy[0],realy[0])

'''