# -*- coding: utf-8 -*-
"""
Created on Sat Jun  6 09:57:32 2020

@author: aaa
"""
import numpy as np
import struct
import  tensorflow as tf
import time
sess = tf.compat.v1.Session()

class  simplenet:
    def __init__(self):
        self.params = {}
        
        self.params['W1'] = self.w_init([5,5,1,32])
        self.params['b1'] = self.b_init([32]) 
        self.params['W2'] = self.w_init([5,5,32,64])
        self.params['b2'] = self.b_init([64])                                 #存储权重和偏移值
        self.params['W3'] = self.w_init([7*7*64,1024])
        self.params['b3'] = self.b_init([1024])
        self.params['W4'] = self.w_init([1024,10])
        self.params['b4'] = self.b_init([10])
    def one_hot(self,y_labels):
        y_hot = np.zeros((len(y_labels),10))
        for i in range(len(y_labels)):
            j = y_labels[i]
            y_hot[i][j] = 1
            return y_hot
        
    def w_init(self,shape):
          return tf.Variable(
              tf.random.truncated_normal(shape,stddev=0.1))
    def b_init(self,shape):
          return tf.Variable(
              tf.constant(0.1,shape = shape)) 
        
    def conv(self,x,w):
        return tf.nn.conv2d(x,w,strides=[1,1,1,1],padding='SAME')             #卷积层
    def relu(self,in_data):                                                            #激活函数
        return tf.nn.relu(in_data)
    def max_pool(self,x):
        return tf.nn.max_pool(x,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME')   #池化层
    
    def softmax_(self,x):
        
       
        return tf.nn.softmax(x)
    
    def predict_(self,x):
            w1,w2,w3,w4 = self.params['W1'],self.params['W2'],self.params['W3'],self.params['W4']
            b1,b2,b3,b4 = self.params['b1'],self.params['b2'],self.params['b3'],self.params['b4']
            #conv1
            conv1 = self.conv(x,w1) + b1
            relu1 = self.relu(conv1)
            pool1 = self.max_pool(relu1)
            #conv2
            conv2 = self.conv(pool1,w2) + b2
            relu2 = self.relu(conv2)
            pool2 = self.max_pool(relu2)
            #fc1
            f_pool2 = tf.reshape(pool2,[-1,7*7*64])
            fc1 = tf.matmul(f_pool2,w3) + b3
            relu_fc1 = self.relu(fc1)
            #fc2
            fc2 = tf.matmul(relu_fc1,w4) + b4
            p = self.softmax_(fc2)
            return p
            
           
    
    def num_gradient(self,f,x):                                                #数值下降,对权重求梯度
            h = 1e-4
            grad = np.zeros_like(x)
            it = np.nditer(x,flags = ['multi_index'],op_flags = ['readwrite']) 
            #这里将it定义为一个迭代器
            #np.nditer单数组的迭代,op_flags[]对数组修改读写，默认是只读，flags = ['multi_index']多重索引，索引元素在数组中的序列号，
            while not it.finished:                                                 #迭代如果没结束，则进行运行
                idx = it.multi_index
                tmp_val = x[idx]
                x[idx] = float(tmp_val) + h
                fxh1 = f(x)
                
                x[idx] = tmp_val - h
                fxh2 = f(x)
                grad[idx] = (fxh1 - fxh2)/(2*h)
                
                x[idx] = tmp_val
                it.iternext()                                                      #如果没有这个命令，则永远迭代第一个元素的索引
            return grad
        
    def gradient_des(self,f,init_x,lr = 0.01,step = 1000):                       #梯度下降
        x = init_x
        for i in range(step):
            grad_ = self.num_gradient(f,x)
            x-= lr*grad_                                                        #lr学习步长
            return x


    def loss(self,x,y_real):
            p = self.predict_(x)                                                   #交叉熵,
             
            return  tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_labels,logits=p))
        
        
        
    def accuracy(self,x,y_labels):
            acc = 0
           
            idx_max = np.argmax(self.p,axis = 1)
            acc += np.sum(idx_max == y_labels)/float(len(x))
            return acc
    
net = simplenet()    
#载入数据集
def load_data():
    with open('E:\\文件\\vision\\mnist_dataset\\train-images.idx3-ubyte','rb') as x:
        magic,num,rows,cols = struct.unpack('>IIII',x.read(16))           
        images = np.fromfile(x,dtype = np.uint8)
    with open('E:\\文件\\vision\\mnist_dataset\\train-labels.idx1-ubyte','rb') as y:
        magic,n = struct.unpack('>II',y.read(8))
        labels  = np.fromfile(y,dtype = np.uint8)
    return images,labels
t_images,t_labels = load_data()

x = t_images.astype(np.float32).reshape(-1,28*28)   #(60000,784) 
                                 
x = tf.slice(x,[0,0],[100,-1])
                                 
x = tf.reshape(x,[-1,28,28,1])
y_labels = t_labels.astype(np.int64)


 


# In[ ]:


#创建会话
with tf.Session() as sess:
    start_time=time.clock()
    sess.run(tf.global_variables_initializer()) #初始化变量
    
    print(sess.run(x.shape))
   
      
    
    end_time=time.clock()
    print('Running time:%s Second'%(end_time-start_time)) #输出运行时间       
                                      