# -*- coding: utf-8 -*-
"""
Created on Tue Aug 25 11:39:54 2020

@author: 77994
"""


from keras.models import Sequential
from keras.layers import Dense

from keras import losses

import numpy as np

# import mnist
import dl9 as dl

import os 

import random
import sys


import struct


from keras.utils import to_categorical

import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras import backend as K


from sklearn import metrics

from scipy import stats

#加载数据集
def load_mnist(path, kind='train'):
    """Load MNIST data from `path`"""
#    labels_path = os.path.join(path,
#                               '%s-labels-idx1-ubyte'
#                               % kind)
#    images_path = os.path.join(path,
#                               '%s-images-idx3-ubyte'
#                               % kind)
    
    labels_path = os.path.join(path,
                               '%s-labels.idx1-ubyte'
                               % kind)
    images_path = os.path.join(path,
                               '%s-images.idx3-ubyte'
                               % kind)
    
    
    with open(labels_path, 'rb') as lbpath:
        magic, n = struct.unpack('>II',
                                 lbpath.read(8))
        labels = np.fromfile(lbpath,
                             dtype=np.uint8)

    with open(images_path, 'rb') as imgpath:
        magic, num, rows, cols = struct.unpack('>IIII',
                                               imgpath.read(16))
        images = np.fromfile(imgpath,
                             dtype=np.uint8).reshape(len(labels), 784)

    # print("images---->",images.shape)
    # print("labels---->",labels)

    return images, labels







#--------------------------------------------------


#加载数据集
def LoadMNIST():
        
    [train_images,train_labels]=load_mnist('D:/python/data/MNIST')
    print("train_images:",train_images.shape)
    #reshape：改变数组维数 重新塑造 矩阵变维
    XX=np.reshape(train_images,(60000,28,28))

    # 总数
    count=200
    
    X=np.array(XX[0:count,:])


    X=X/255.0
    
    Y=np.array(train_labels[0:count,],int)
    
    Y= to_categorical(Y)
    
    
    return [X,Y]



# 加载数据集
X,Y=LoadMNIST()
print("Y========================>",Y)
# print("X.shape:",X.shape)  #X.shape: (200, 28, 28),200为总数
# print("Y.shape:",Y.shape)  #Y.shape: (200, 10),200为总数，10为10个数字

YY=np.zeros(Y.shape[0],)
# print("YY.shape:",YY.shape)   #YY.shape: (200,)
for i in range(Y.shape[0]):
    idx=np.where(Y[i,:]>0)
    # print("idx:",idx)
    YY[i]=idx[0]/10
print("idx[0]:",idx[0])
print("YY[i]:",YY[8]) #0.2
    
    
# print(YY)




dnn=dl.DNN()
dnn.Add(dl.CNN2D_MultiLayer(4,4,stride=2,nFilter=10))
dnn.Add(dl.DMaxPooling2D(2,2))
dnn.Add(dl.CNN2D_MultiLayer(4,4,stride=2,nFilter=2))
dnn.Add(dl.DMaxPooling2D(2,2))
# yy=dnn.Forward(X[0])
dnn.Add(dl.DFlatten())
# yy=dnn.Forward(X[0])
dnn.Add(dl.DDense(80,'sigmoid'))
# dnn.Add(DDense(100,50,'relu'))
dnn.Add(dl.DDense(10,'relu'))
# dnn.AdjustWeightRatio(5)
# dnn.Add(DDense(100,10,'linear'))
# ratio=dnn.AdjustWeightsRatio(X,YY)
# dnn.Add(DDense(10,1,'linear'))
dnn.Compile(lossMethod='SoftmaxCrossEntropy')
# ratio=dnn.AdjustWeightsRatio(X,YY)
# yy=dnn.BatchPredict(X)
dnn.Fit(X[0:150,:], Y[0:150,:],500)







#
#
# dnn=dl.DNN()
# dnn.Add(dl.CNN2D(6,6,stride=2,nFilter=10))
# dnn.Add(dl.DMaxPooling2D(2,2))
# # yy=dnn.Forward(X[0])
# dnn.Add(dl.DFlatten())
# # yy=dnn.Forward(X[0])
# dnn.Add(dl.DDense(80,'sigmoid',bFixRange=True))
# # dnn.Add(DDense(100,50,'relu'))
# dnn.Add(dl.DDense(10,'relu',bFixRange=True))
# # dnn.AdjustWeightRatio(5)
# # dnn.Add(DDense(100,10,'linear'))
# # ratio=dnn.AdjustWeightsRatio(X,YY)
# # dnn.Add(DDense(10,1,'linear'))
# dnn.Compile(lossMethod='SoftmaxCrossEntropy')
# # ratio=dnn.AdjustWeightsRatio(X,YY)
# # yy=dnn.BatchPredict(X)
# dnn.Fit(X[0:150,:], Y[0:150,:],200)


#predictY：预测Y BatchPredict批预测
predictY=dnn.BatchPredict(X[150:200,])
print("predictY==============>",predictY)
predictYY = np.array([np.argmax(one_hot)for one_hot in predictY])
realY=Y[150:200,]
print("realY-------------------->",realY)
realYY = np.array([np.argmax(one_hot)for one_hot in realY])
from sklearn.metrics import accuracy_score
accuracy_score(predictYY, realYY)
print("准确数=",accuracy_score(predictYY, realYY, normalize=False))
print("准确率=",accuracy_score(predictYY, realYY))
# realy=Y[180:200,]
#
# nx=yy[0]
#
# ny=realy[0]
# loss = np.sum(- ny * np.log(nx))
 
# crossE=CrossEntropy()
#
# loss=crossE.loss(yy[0],realy[0])
# predictY：预测Y BatchPredict批预测