# -*- coding: utf-8 -*-
"""
Created on Tue Aug 25 11:39:54 2020

@author: 77994
"""
import numpy as np
import os
import struct
from keras.utils import to_categorical

from com.lcg.version5 import dl9



def load_mnist(path, kind='train'):
    """Load MNIST data from `path`"""
#    labels_path = os.path.join(path,
#                               '%s-labels-idx1-ubyte'
#                               % kind)
#    images_path = os.path.join(path,
#                               '%s-images-idx3-ubyte'
#                               % kind)
    
    labels_path = os.path.join(path,
                               '%s-labels.idx1-ubyte'
                               % kind)
    images_path = os.path.join(path,
                               '%s-images.idx3-ubyte'
                               % kind)
    
    
    with open(labels_path, 'rb') as lbpath:
        magic, n = struct.unpack('>II',
                                 lbpath.read(8))
        labels = np.fromfile(lbpath,
                             dtype=np.uint8)

    with open(images_path, 'rb') as imgpath:
        magic, num, rows, cols = struct.unpack('>IIII',
                                               imgpath.read(16))
        images = np.fromfile(imgpath,
                             dtype=np.uint8).reshape(len(labels), 784)

    return images, labels






#--------------------------------------------------




def LoadMNIST():
        
    # [train_images,train_labels]=load_mnist('dataset\MNIST')
    [train_images,train_labels]=load_mnist('C:/Users/LCG/Desktop/MNIST')

    
    XX=np.reshape(train_images,(60000,28,28))


    count=200
    
    X=np.array(XX[0:count,:])
    

    
    X=X/255.0
    
    Y=np.array(train_labels[0:count,],int)
    
    Y= to_categorical(Y)

    return [X,Y]




X,Y=LoadMNIST()
print("X",X.shape)
print("Y",Y.shape)
print("X",X)
print("Y",Y)

YY=np.zeros(Y.shape[0],)

for i in range(Y.shape[0]):
    
    idx=np.where(Y[i,:]>0)
    
    YY[i]=idx[0]/10
    
    
# print(YY)
print("YY",YY.shape)
print("YY",YY)



dnn=dl9.DNN()

dnn.Add(dl9.CNN2D_MultiLayer(4,4,stride=2,nFilter=10))

dnn.Add(dl9.DMaxPooling2D(2,2))


dnn.Add(dl9.CNN2D_MultiLayer(4,4,stride=2,nFilter=2))

dnn.Add(dl9.DMaxPooling2D(2,2))


# yy=dnn.Forward(X[0])

dnn.Add(dl9.DFlatten())

# yy=dnn.Forward(X[0])

dnn.Add(dl9.DDense(80,'sigmoid'))

# dnn.Add(DDense(100,50,'relu'))

dnn.Add(dl9.DDense(10,'relu'))


# dnn.AdjustWeightRatio(5)
# dnn.Add(DDense(100,10,'linear'))

# ratio=dnn.AdjustWeightsRatio(X,YY)


# dnn.Add(DDense(10,1,'linear'))


dnn.Compile(lossMethod='SoftmaxCrossEntropy')

# ratio=dnn.AdjustWeightsRatio(X,YY) #调整矫正权重比例

# yy=dnn.BatchPredict(X)

dnn.Fit(X[0:150,:], Y[0:150,:],500)





'''

dnn=dl9.DNN()

dnn.Add(dl9.CNN2D(6,6,stride=2,nFilter=10))

dnn.Add(dl9.DMaxPooling2D(2,2))


# yy=dnn.Forward(X[0])

dnn.Add(dl9.DFlatten())

# yy=dnn.Forward(X[0])

dnn.Add(dl9.DDense(80,'sigmoid',bFixRange=True))

# dnn.Add(DDense(100,50,'relu'))

dnn.Add(dl9.DDense(10,'relu',bFixRange=True))


# dnn.AdjustWeightRatio(5)
# dnn.Add(DDense(100,10,'linear'))

# ratio=dnn.AdjustWeightsRatio(X,YY)


# dnn.Add(DDense(10,1,'linear'))


dnn.Compile(lossMethod='SoftmaxCrossEntropy')

# ratio=dnn.AdjustWeightsRatio(X,YY)

# yy=dnn.BatchPredict(X)

dnn.Fit(X[0:150,:], Y[0:150,:],200)


'''


predictY=dnn.BatchPredict(X[150:200,])


predictYY = np.array([np.argmax(one_hot)for one_hot in predictY])

realY=Y[150:200,]

realYY = np.array([np.argmax(one_hot)for one_hot in realY])


from sklearn.metrics import accuracy_score


print(accuracy_score(predictYY, realYY))

# realy=Y[180:200,]
#
# nx=yy[0]
#
# ny=realy[0]
# loss = np.sum(- ny * np.log(nx))
#
# crossE=dnn.CrossEntropy()
#
# loss=crossE.loss(yy[0],realy[0])



