# -*- coding: utf-8 -*-
"""
Created on Fri Jan 12 09:28:19 2018

@author: minus
"""

# -*- coding: utf-8 -*-
"""
Created on Sat Jan  6 16:40:50 2018

@author: Minus Ma
"""
import numpy as np
import time

clipmin=-1e6
clipmax=1e6
epsilon=1e-7

f32=np.float32

class global_variable:
    clip=(-0.01,0.01)
    isTraining=False
    learning_rate_scale=0.002
    decay_steps=20000
    decay_rate=0.5
    def updata(train_step):
        pass


class _Loss_Layer():
    def __init__(self,Predict,layer_name='Loss_layer'):
        self.pred_shape=Predict.output_shape
        self.loss=0
        self.accuracy=0
        
        self.head_nodes=[Predict]
        self.tail_nodes=[]  
        Predict.tail_nodes.append(self)
        
        self.layer_name=layer_name
    
    def initialization(self,batch_size):
        assert type(batch_size)==type(1),'batch_size必须是int类型'
        assert batch_size>0,'batch_size必须大于0'
        
        self.pred_shape=(batch_size,)+self.pred_shape[1:]
        
        Predict=self.head_nodes[0]
        
        self.pred=Predict.output
        self.label=np.zeros_like(self.pred)
        self.gradient=Predict.d_out   
 
        
    def set_label(self,label):
        self.label[...]=label.astype(f32)
    
    def clear_d_out(self): pass
    def forward_propagation(self): pass    
    def backward_propagation(self): pass

    def setTrainable(self,trainable):
        return self

    def printInfo(self,other_info=''):

        print('Layer: '+self.layer_name,',Input shape:'+str(self.pred_shape)\
              ,other_info)
    

class Input():
    def __init__(self,input_shape,layer_name='Input'):
        self.tail_nodes=[] 
        self.head_nodes=[]
        self.layer_name=layer_name
        assert type(input_shape)==type((10,)),'input_shape必须是元祖类型，如(10,) (1,2,3)'
        self.input_shape=(None,)+tuple(input_shape)
        self.output_shape=self.input_shape
        
    def initialization(self,batch_size):
        assert type(batch_size)==type(1),'batch_size必须是int类型'
        assert batch_size>0,'batch_size必须大于0'
        
        self.input_shape=(batch_size,)+self.input_shape[1:]
        self.output_shape=(batch_size,)+self.output_shape[1:]
        
        self.input=np.zeros(self.input_shape,f32)
        self.output=self.input
        self.d_in=None
        self.d_out=np.zeros_like(self.output)
        
    def set_data(self,data):
        self.output[...]=data.astype(f32)
        
    def clear_d_out(self):
        self.d_out[...]=0
        
    def forward_propagation(self): pass    
    def backward_propagation(self): pass

    def setTrainable(self,trainable):
        return self

    def __str__(self):
       return 'Layer: %s'%self.layer_name+',Shape:'+str(self.output_shape)
       
    def printInfo(self,other_info=''):
       print(self)

class Base_layer():
    def __init__(self,X,W,b,layer_name='base layer'):
        self.layer_name=layer_name
        
        self.W=W
        self.b=b
        
        self.d_W=np.zeros_like(self.W)
        self.d_b=np.zeros_like(self.b)
        
        self.W_increment=np.zeros_like(self.W)
        self.b_increment=np.zeros_like(self.b)
        
        self.trainable=True
        self.learning_rate=0.001
        self.momentum=0.5
        self.regulary_rate=0.000001
        
        self.head_nodes=[X]
        self.tail_nodes=[]  
        X.tail_nodes.append(self)
        
        self.weights_initialization()
        
    def initialization(self,batch_size):
        assert type(batch_size)==type(1),'batch_size必须是int类型'
        assert batch_size>0,'batch_size必须大于0'
        
        self.input_shape=(batch_size,)+self.input_shape[1:]
        self.output_shape=(batch_size,)+self.output_shape[1:]
        
        X=self.head_nodes[0]
        
        self.input=X.output
        self.output=np.zeros(self.output_shape,f32)
        
        self.d_in=X.d_out
        self.d_out=np.zeros_like(self.output)
   
    def weights_initialization(self):
        dim=1
        for d in self.input_shape[1:]:
            dim*=d
        if self.W.shape !=(0,):
            stddev=np.sqrt(2/dim)
            s=np.random.normal(0,stddev,self.W.size).astype(f32)
            self.W[...]=np.clip(s.reshape(self.W.shape),-10,10)
        
        if self.b.shape !=(0,):
            self.b[...]=np.zeros_like(self.b)
    
    def clear_d_out(self):
        self.d_out[...]=0
        
    def reset_cache(self):
        self.input[...]=0
        self.output[...]=0
        self.d_in[...]=0
        self.d_out[...]=0
        self.d_W[...]=0
        self.d_b[...]=0
        
    def setTrainable(self,trainable):
        self.trainable=trainable
        return self

    def update_weights(self):
        lr=global_variable.learning_rate_scale
        
        if self.trainable:
            self.W_increment=self.momentum*self.W_increment-lr*(self.d_W+self.regulary_rate*self.W)
            self.b_increment=self.momentum*self.b_increment-lr*self.d_b
            
            self.W += self.W_increment
            self.b += self.b_increment
            
            cmin,cmax=global_variable.clip
            self.W[...]=np.clip(self.W,cmin,cmax)
    
    def printInfo(self,other_info=''):
        if self.W.shape !=(0,):
            print('Layer: '+self.layer_name,',Input shape:'+str(self.input_shape),\
                  ',Output shape:'+str(self.output_shape),\
                  ',Weight shape:'+str(self.W.shape),other_info)
        else:
            print('Layer: '+self.layer_name,',Input shape:'+str(self.input_shape),\
                  ',Output shape:'+str(self.output_shape),other_info)
            
class Merge_layer():
    def __init__(self,X_group,merge_type='add',merge_axis=-1,layer_name='Merge'):
        self.layer_name=layer_name
        self.merge_type=merge_type
        self.merge_axis=merge_axis
        
        self.head_nodes=X_group
        self.tail_nodes=[]  
        for X in X_group:
            X.tail_nodes.append(self)

        self.input_shape=[s.input_shape for s in X_group]
        self.output_shape=(None,)
            
        if self.merge_type in ['concat','c','C']:
            new_dim=sum([x[self.merge_axis] for x in self.input_shape])
            new_shape_list=list(self.input_shape[0])
            new_shape_list[self.merge_axis]=new_dim
            self.output_shape=tuple(new_shape_list)

        else:
            self.output_shape=self.input_shape[0]

    def initialization(self,batch_size):  
        assert type(batch_size)==type(1),'batch_size必须是int类型'
        assert batch_size>0,'batch_size必须大于0'
    
        self.input=[]
        self.d_in=[]
        for id,X in enumerate(self.head_nodes):
            try:
                self.input.append(X.output)
                self.d_in.append(X.d_out)
                
                self.input_shape[id]=self.input[id].shape
            except Exception as e:
                print('警告，此输入层',X.layer_name,'未连接到Merge层',self.layer_name)
                
        if self.merge_type in ['concat','c','C']:
            self.output=np.concatenate(self.input,axis=self.merge_axis)
            self.d_out=np.zeros_like(self.output)

        else:
            self.output=np.zeros_like(self.input[0])
            self.d_out=np.zeros_like(self.output)
            
        self.output_shape=self.output.shape

    def forward_propagation(self):
        if self.merge_type=='concat':
            self.output[...]=np.concatenate(self.input,axis=self.merge_axis)
        else:
            self.output[...]=0
            for array in self.input: 
                self.output+=array
        
    def backward_propagation(self):
        if self.merge_type=='concat':
            idx_start=idx_end=0
            swap_dout=np.swapaxes(self.d_out,0,self.merge_axis)
            
            for i in range(len(self.d_in)):
                idx_end=idx_start+self.d_in[i].shape[self.merge_axis]
                sub_swap_dout=swap_dout[idx_start:idx_end]
                self.d_in[i]+=np.swapaxes(sub_swap_dout,0,self.merge_axis)
                idx_start=idx_end

        else:
            for i in range(len(self.d_in)):
                self.d_in[i]+=self.d_out
    
    def printInfo(self,other_info=''):  
        print('Layer: '+self.layer_name,',Input shape:'+str(self.input_shape[0]),\
                  ',Output shape:'+str(self.output_shape),other_info)

    def clear_d_out(self):
        self.d_out[...]=0  
        
    def setTrainable(self,trainable):
        return self
    
    def reset_cache(self):
        for i in range(len(self.input)):
            self.input[i][...]=0
            self.d_in[i][...]=0

        self.output[...]=0
        self.d_out[...]=0


class Dense_layer_py(Base_layer):
    def __init__(self,X,n_hidden):
        self.input_shape=X.output_shape
        self.output_shape=(self.input_shape[0],n_hidden)
        
        W=np.zeros((n_hidden,self.input_shape[1]),f32)
        b=np.zeros((n_hidden),f32)
        super(Dense_layer_py,self).__init__(X,W,b,'Dense')
        
        
    def forward_propagation(self):
        
        for i in range(self.output.shape[0]):
            self.output[i][...]=np.dot(self.W,self.input[i])+self.b

        self.output[...]=np.clip(self.output,clipmin,clipmax)
        
    def backward_propagation(self):
        self.d_W[...]=0
        self.d_b[...]=0
        for i in range(self.output.shape[0]):
            self.d_in[i]+=np.dot(self.W.T,self.d_out[i])
            self.d_W += np.outer(self.d_out[i],self.input[i])
            self.d_b += self.d_out[i]
        
        self.update_weights()
            
     
class Dense_layer_py2(Base_layer):
    def __init__(self,X,n_hidden):
        assert len(X.output_shape)==2,'全连接层的input_shape必须是两维，如(batch_szie,10)'
        self.input_shape=X.output_shape
        self.output_shape=(self.input_shape[0],n_hidden)
        
        W=np.zeros((n_hidden,self.input_shape[1]),f32)
        b=np.zeros((n_hidden),f32)
        super(Dense_layer_py2,self).__init__(X,W,b,'Dense2')
        
    def forward_propagation(self):
        self.output[...]=np.dot(self.input,self.W.T)+self.b
        self.output[...]=np.clip(self.output,clipmin,clipmax)
        
    def backward_propagation(self):
        self.d_in+=np.dot(self.d_out,self.W)
        self.d_W[...]=np.dot(self.d_out.T,self.input)
        self.d_b[...]=self.d_out.sum(0)
#        self.d_in[...]=np.clip(self.d_in,clipmin,clipmax)
        
        self.update_weights()
            

            

class MaxPool_layer_py(Base_layer):
    def __init__(self,X,k_size,stride):
        self.input_shape=X.output_shape
        out_height=(self.input_shape[1]-k_size[0])//stride[0]+1
        out_width=(self.input_shape[2]-k_size[1])//stride[1]+1            
        self.output_shape=(self.input_shape[0],out_height,out_width,self.input_shape[3])
        
        W=b=np.zeros(0)
        super(MaxPool_layer_py,self).__init__(X,W,b,'MaxPool')
        
        self.k_size=k_size
        self.stride=stride
     
    def initialization(self,batch_size):
        super(MaxPool_layer_py,self).initialization(batch_size)
        self.max_index=np.zeros(self.output_shape+(2,),np.uint8)

        
    def forward_propagation(self):
        hs,ws=self.stride
        hk,wk=self.k_size

        for i in range(self.output.shape[0]):  
            for h in range(self.output.shape[1]):
                for w in range(self.output.shape[2]):
                     for c in range(self.output.shape[3]):
                         idx=self.input[i,h*hs:h*hs+hk,w*ws:w*ws+wk,c].argmax()
                         idx_h=self.max_index[i,h,w,c,0]=idx//wk
                         idx_w=self.max_index[i,h,w,c,1]=idx%wk
                         self.output[i,h,w,c]=self.input[i,h*hs+idx_h,w*ws+idx_w,c]
        
    def backward_propagation(self):
        hs,ws=self.stride

        for i in range(self.output.shape[0]):
            for h in range(self.output.shape[1]):
                for w in range(self.output.shape[2]):
                    for c in range(self.output.shape[3]):
                        idx_h=self.max_index[i,h,w,c,0]
                        idx_w=self.max_index[i,h,w,c,1]
                        self.d_in[i,h*hs+idx_h,w*ws+idx_w,c]+=self.d_out[i,h,w,c]


class MaxPool_layer_py2(Base_layer):
    def __init__(self,X,k_size,stride):
        self.input_shape=X.output_shape
        out_height=(self.input_shape[1]-k_size[0])//stride[0]+1
        out_width=(self.input_shape[2]-k_size[1])//stride[1]+1            
        self.output_shape=(self.input_shape[0],out_height,out_width,self.input_shape[3])
        
        W=b=np.zeros(0)
        super(MaxPool_layer_py2,self).__init__(X,W,b,'MaxPool222')
        
        self.k_size=k_size
        self.stride=stride
        
    def initialization(self,batch_size):
        super(MaxPool_layer_py2,self).initialization(batch_size)        
        self.max_index=np.zeros((self.output_shape[0],self.output_shape[3],self.output_shape[1]*self.output_shape[2]),np.int32)

        
    def forward_propagation(self):
        hs,ws=self.stride
        hk,wk=self.k_size

        N,Hi,Wi,C=self.input.shape
        for h in range(self.output.shape[1]):
            for w in range(self.output.shape[2]):
             
                 falt=self.input[:,h*hs:h*hs+hk,w*ws:w*ws+wk,:].reshape((N,-1,C))
                 falt=falt.max(1)
                 self.output[:,h,w,:]=falt
        
    def backward_propagation(self):
        hs,ws=self.stride
        hk,wk=self.k_size
        
        N,Hi,Wi,C=self.input.shape
        
        tem=np.zeros((N,hk*wk,C),f32)
        
        for h in range(self.output.shape[1]):
            for w in range(self.output.shape[2]):
             
                 falt=self.input[:,h*hs:h*hs+hk,w*ws:w*ws+wk,:].reshape((N,-1,C))
                 falt_arg=falt.argmax(1)
                 
                 for arg in range(hk*wk):
                     tem[:,arg,:]=self.d_out[:,h,w,:]*(falt_arg==arg)
                     
                 self.d_in[:,h*hs:h*hs+hk,w*ws:w*ws+wk,:]+=tem.reshape(N,hk,wk,C)
                 
                 
class UpSampling_layer_py(Base_layer):
    def __init__(self,X,stride):
        self.input_shape=X.output_shape
        out_height=self.input_shape[1]*stride[0]
        out_width=self.input_shape[2]*stride[1]           
        self.output_shape=(self.input_shape[0],out_height,out_width,self.input_shape[3])
        
        W=b=np.zeros(0)
        super(UpSampling_layer_py,self).__init__(X,W,b,'UpSampling')
 
        self.stride=stride
        
    def initialization(self,batch_size):
        super(UpSampling_layer_py,self).initialization(batch_size)        

    def forward_propagation(self):
        hs,ws=self.stride
        N,Hi,Wi,C=self.input.shape
        tem=np.zeros((hs*ws,N,C),f32)

        for h in range(self.input.shape[1]):
            for w in range(self.input.shape[2]):
                tem[...]=0
                tem+=self.input[:,h,w,:]
                self.output[:,h*hs:(h+1)*hs,w*ws:(w+1)*ws,:]=\
                        tem.reshape((hs,ws,N,C)).transpose((2,0,1,3))
        
    def backward_propagation(self):
        hs,ws=self.stride

        for h in range(self.d_in.shape[1]):
            for w in range(self.d_in.shape[2]):
                self.d_in[:,h,w,:]+=self.d_out[:,h*hs:(h+1)*hs,w*ws:(w+1)*ws,:].sum(axis=(1,2))

class AvrPooling_layer_py(Base_layer):
    def __init__(self,X,stride):
        self.input_shape=X.output_shape
        out_height=(self.input_shape[1]-1)//stride[0]+1
        out_width=(self.input_shape[2]-1)//stride[1]+1       
        self.output_shape=(self.input_shape[0],out_height,out_width,self.input_shape[3])
        
        W=b=np.zeros(0)
        super(AvrPooling_layer_py,self).__init__(X,W,b,'AvrPooling')
 
        self.stride=stride
        
    def initialization(self,batch_size):
        super(AvrPooling_layer_py,self).initialization(batch_size)        

    def forward_propagation(self):
        hs,ws=self.stride
        for h in range(self.output.shape[1]):
            for w in range(self.output.shape[2]):
                self.output[:,h,w,:]=(1.0/hs/ws)*self.input[:,h*hs:(h+1)*hs,w*ws:(w+1)*ws,:].sum(axis=(1,2))
        
    def backward_propagation(self):
        
        hs,ws=self.stride
        N,_,_,C=self.d_out.shape
        tem=np.zeros((hs*ws,N,C),f32)

        for h in range(self.d_out.shape[1]):
            for w in range(self.d_out.shape[2]):
                tem[...]=0
                tem+=self.d_out[:,h,w,:]
                self.d_in[:,h*hs:(h+1)*hs,w*ws:(w+1)*ws,:]+=\
                        (1.0/hs/ws)*tem.reshape((hs,ws,N,C)).transpose((2,0,1,3))
         
        
class Conv_layer_py(Base_layer):
    def __init__(self,X,n_filters,k_size,stride=(1,1),padding='valid'):  
        if padding in ['same','SAME','Same']:
            pad=(k_size[0]//2,k_size[1]//2)
        else:
            pad=(0,0)
            
        self.input_shape=X.output_shape     
        out_height=(self.input_shape[1]-k_size[0]+2*pad[0])//stride[0]+1
        out_width=(self.input_shape[2]-k_size[1]+2*pad[1])//stride[1]+1        
        self.output_shape=(self.input_shape[0],out_height,out_width,n_filters)
        
        W=np.zeros((n_filters,k_size[0],k_size[1],self.input_shape[3]),f32)
        b=np.zeros((n_filters),f32)
        super(Conv_layer_py,self).__init__(X,W,b,'Conv')
        
        self.n_filters=n_filters
        self.k_size=k_size
        self.stride=stride
        self.pad=pad
        
    def initialization(self,batch_size):
        super(Conv_layer_py,self).initialization(batch_size)  
        
        N,H,W1,C=self.input_shape
        self.input_pad=np.zeros((N,H+2*self.pad[0],W1+2*self.pad[1],C),f32)
        self.d_in_pad=np.zeros_like(self.input_pad)
        
        _,HH,WW,F=self.output_shape
        
        self.big1=np.zeros((N,HH*WW,self.k_size[0]*self.k_size[1]*C),f32)
        self.big2=np.zeros((self.k_size[0]*self.k_size[1]*C,F),f32)
        
        self.big_din=np.zeros((N,H*W1,self.k_size[0]*self.k_size[1]*F),f32)
        self.big_C=np.zeros((self.k_size[0]*self.k_size[1]*F,C),f32)
        
        self.big_22=np.zeros((self.k_size[0]*self.k_size[1]*C,HH*WW),f32)
        self.big_33=np.zeros((HH*WW,F),f32)
        
        
    def forward_propagation(self):
        pad=self.pad
        
        hs,ws=self.stride
        hk,wk=self.k_size

        N,Hi,Wi,C=self.d_in.shape
        self.input_pad[:,pad[0]:Hi+pad[0],pad[1]:Wi+pad[1],:]=self.input
        for i in range(self.output.shape[0]):
            for h in range(self.output.shape[1]):
                for w in range(self.output.shape[2]):   
                    for c in range(self.output.shape[3]):
                        self.output[i,h,w,c]=(self.input_pad[i,h*hs:h*hs+hk,w*ws:w*ws+wk,:]*self.W[c]).sum()+self.b[c]

        self.output[...]=np.clip(self.output,clipmin,clipmax)
        
    
    def backward_propagation(self):
        hs,ws=self.stride
        hk,wk=self.k_size
        self.d_in_pad[...]=0
        self.d_W[...]=0
        self.d_b[...]=0
        for i in range(self.output.shape[0]):
            for h in range(self.output.shape[1]):
                for w in range(self.output.shape[2]):   
                    for c in range(self.output.shape[3]):

                        self.d_in_pad[i,h*hs:h*hs+hk,w*ws:w*ws+wk,:]+=self.d_out[i,h,w,c]*self.W[c]
                        self.d_W[c]+=self.d_out[i,h,w,c]*self.input_pad[i,h*hs:h*hs+hk,w*ws:w*ws+wk,:]
                        self.d_b[c]+=self.d_out[i,h,w,c]

        N,H,W1,C=self.input.shape
        self.d_in+=self.d_in_pad[:,self.pad[0]:H+self.pad[0],self.pad[1]:W1+self.pad[1],:]
#        self.d_in[...]=np.clip(self.d_in,clipmin,clipmax)
        
        self.update_weights()
            
            
            
class Conv_layer_py2(Base_layer):
    def __init__(self,X,n_filters,k_size,stride=(1,1),padding='valid'):
        if padding in ['same','SAME','Same']:
            pad=(k_size[0]//2,k_size[1]//2)
        else:
            pad=(0,0)
            
        self.input_shape=X.output_shape     
        out_height=(self.input_shape[1]-k_size[0]+2*pad[0])//stride[0]+1
        out_width=(self.input_shape[2]-k_size[1]+2*pad[1])//stride[1]+1        
        self.output_shape=(self.input_shape[0],out_height,out_width,n_filters)
        
        W=np.zeros((n_filters,k_size[0],k_size[1],self.input_shape[3]),f32)
        b=np.zeros((n_filters),f32)
        super(Conv_layer_py2,self).__init__(X,W,b,'Conv22')
        
        self.n_filters=n_filters
        self.k_size=k_size
        self.stride=stride
        self.pad=pad
        
    def initialization(self,batch_size):
        super(Conv_layer_py2,self).initialization(batch_size)          
 
        N,H,W1,C=self.input_shape
        self.input_pad=np.zeros((N,H+2*self.pad[0],W1+2*self.pad[1],C),f32)
        self.d_in_pad=np.zeros_like(self.input_pad)

        _,HH,WW,F=self.output_shape
        _,Hip,Wip,_=self.input_pad.shape
        
        self.big1=np.zeros((N*HH*WW,self.k_size[0]*self.k_size[1]*C),f32)
        self.big2=np.zeros((self.k_size[0]*self.k_size[1]*C,F),f32)
        
        self.big_din=np.zeros((N*Hip*Wip,self.k_size[0]*self.k_size[1]*F),f32)
        self.big_C=np.zeros((self.k_size[0]*self.k_size[1]*F,C),f32)
        
        self.big_W1=np.zeros((self.k_size[0]*self.k_size[1]*C,N*HH*WW),f32)
        self.big_W2=np.zeros((N*HH*WW,F),f32)
        
        
    def forward_propagation(self):
        pad=self.pad
        hs,ws=self.stride
        hk,wk=self.k_size
        WT=np.transpose(self.W,(1,2,3,0))
        self.big2=WT.reshape(self.big2.shape)
        
        N,Hi,Wi,C=self.d_in.shape
        self.input_pad[:,pad[0]:Hi+pad[0],pad[1]:Wi+pad[1],:]=self.input
       
        N,Ho,Wo,F=self.output.shape
        for h in range(self.output.shape[1]):
            for w in range(self.output.shape[2]):  
                self.big1[(w+h*Wo)*N:(w+h*Wo+1)*N]=self.input_pad[:,h*hs:h*hs+hk,w*ws:w*ws+wk,:].reshape((N,-1))
          
    
        outb=np.dot(self.big1,self.big2)
        self.output[...]=outb.reshape((Ho,Wo,N,F)).transpose((2,0,1,3))
        self.output+=self.b

        self.output[...]=np.clip(self.output,clipmin,clipmax)
    
    def backward_propagation(self):
        pad=self.pad
        hs,ws=self.stride
        hk,wk=self.k_size
        self.d_in_pad[...]=0
        self.d_W[...]=0
        self.d_b[...]=0
        N,Hi,Wi,C=self.d_in.shape
        N,Ho,Wo,F=self.d_out.shape
        d_out_pad=np.zeros((N,Ho+2*hk-2,Wo+2*wk-2,F),f32)
        d_out_pad[:,hk-1:Ho+hk-1,wk-1:Wo+wk-1,:]=self.d_out

     
        if hs>1:
            for i in range(Ho):
                for j in range(1,hs):
                    d_out_pad=np.insert(d_out_pad, Ho+hk-1-i, values=0, axis=1)
                    
        if ws>1:
            for i in range(Ho):
                for j in range(1,ws):
                    d_out_pad=np.insert(d_out_pad, Wo+wk-1-i, values=0, axis=2)
                    
                    
        WT=self.W[:,::-1,::-1,:]
        WT=np.transpose(WT,(1,2,0,3))
        big22=WT.reshape((-1,C))
        
        N,Hip,Wip,C=self.input_pad.shape
        for h in range(self.input_pad.shape[1]):
            for w in range(self.input_pad.shape[2]):  
                self.big_din[(w+h*Wip)*N:(w+h*Wip+1)*N]=d_out_pad[:,h:h+hk,w:w+wk,:].reshape((N,-1))           

        d_ip=np.dot(self.big_din,big22)
        self.d_in+=d_ip.reshape((Hip,Wip,N,C))\
            .transpose((2,0,1,3))[:,pad[0]:Hi+pad[0],pad[1]:Wi+pad[1],:]

        for h in range(hk):
            for w in range(wk):  
                self.big_W1[(w+h*wk)*C:(w+h*wk+1)*C]=\
                    self.input_pad[:,h:h+Ho,w:w+Wo,:].transpose((3,0,1,2)).reshape((C,-1))


        self.big_W2=self.d_out.reshape((-1,F))
        d_W_i=np.dot(self.big_W1,self.big_W2)
        self.d_W[...]=d_W_i.reshape((hk,wk,C,F)).transpose((3,0,1,2))
      
        self.d_b=self.d_out.sum((0,1,2))
#        self.d_in[...]=np.clip(self.d_in,clipmin,clipmax)    

        self.update_weights()
 
        
class Flatten_layer_py(Base_layer):
    def __init__(self,X):
        self.input_shape=X.output_shape
        dim=1
        for d in self.input_shape[1:]:
            dim*=d
        self.output_shape=(self.input_shape[0],dim)
       
        W=b=np.zeros(0)
        super(Flatten_layer_py,self).__init__(X,W,b,'Flatten')
        
    def forward_propagation(self):
        self.output[...]=np.reshape(self.input,self.output.shape)
        
    def backward_propagation(self):
        self.d_in+=np.reshape(self.d_out,self.d_in.shape)       
        
        
class Reshape_layer_py(Base_layer):
    def __init__(self,X,output_shape):
        self.input_shape=X.output_shape
        self.output_shape=(self.input_shape[0],)+output_shape
        
        W=b=np.zeros(0)
        super(Reshape_layer_py,self).__init__(X,W,b,'Reshape')
        
    def forward_propagation(self):
        self.output[...]=np.reshape(self.input,self.output.shape)
        
    def backward_propagation(self):
        self.d_in+=np.reshape(self.d_out,self.d_in.shape)
        
        
class ReLU_layer_py(Base_layer):
    def __init__(self,X):
        self.input_shape=X.output_shape
        self.output_shape=self.input_shape
        
        W=b=np.zeros(0)
        super(ReLU_layer_py,self).__init__(X,W,b,'ReLU')
        
    def forward_propagation(self):
        self.output[...]=self.input
        self.output[self.input<=0]=0
        
    def backward_propagation(self):
        tem=self.d_out.copy()
        tem[self.input<=0]=0
        self.d_in+=tem

class LeakyReLU_layer_py(Base_layer):
    def __init__(self,X,leak):
        self.input_shape=X.output_shape
        self.output_shape=self.input_shape
        self.leak=leak
        W=b=np.zeros(0)
        super(LeakyReLU_layer_py,self).__init__(X,W,b,'LeakyReLU')
        
    def forward_propagation(self):
        self.output[...]=self.input
        self.output[self.input<=0]*=self.leak
        
    def backward_propagation(self):
        tem=self.d_out.copy()
        tem[self.input<=0]*=self.leak
        self.d_in+=tem

class Dropout_layer_py(Base_layer):
    def __init__(self,X,drop_rate=0.5):
        self.input_shape=X.output_shape
        self.output_shape=self.input_shape
        
        W=b=np.zeros(0)
        super(Dropout_layer_py,self).__init__(X,W,b,'Dropout')
        self.drop_rate=drop_rate
        self.drop_mat=None
        
    def forward_propagation(self):
        self.drop_mat=np.random.random(size=self.input.shape)<self.drop_rate   
        self.output[...]=self.input
        self.output[self.drop_mat]=0
        
    def backward_propagation(self):
        tem=self.d_out.copy()
        tem[self.drop_mat]=0
        self.d_in+=tem
 
class BN_layer_py(Base_layer):
    def __init__(self,X):
        self.input_shape=X.output_shape
        self.output_shape=self.input_shape
        
        W=np.ones((self.input_shape[-1]),f32)
        b=np.zeros((self.input_shape[-1]),f32)
        
        super(BN_layer_py,self).__init__(X,W,b,'Batch_Normalization')
        eps=1e-7

        self.W[...]=1
        self.b[...]=0
      
        self.E_mu=[0,eps]
        self.E_var=[eps,eps]
        
    def forward_propagation(self):
        if len(self.input_shape) == 4:
            H=self.input_shape[-1]
            h=self.input.reshape((-1,H))
        else:
            h=self.input
           
        eps=1e-7
        N=h.shape[0]
        gamma=self.W
        beta=self.b
        
        if global_variable.isTraining and self.trainable:
            mu = 1/N*np.sum(h,axis =0) # Size (H,) 
            var = 1/N*np.sum((h-mu)**2,axis=0)# Size (H,) 
            self.E_mu[0]=mu
            self.E_mu[1]=1
            
            self.E_var[0]=var
            self.E_var[1]=1
            
        else:
            mu = self.E_mu[0]/self.E_mu[1]
            var = self.E_var[0]/self.E_var[1]
            
        hath = (h-mu)*(var+eps)**(-1./2.)
        y = gamma*hath+beta 
        
        if len(self.input_shape) == 4:
            self.output[...]=y.reshape(self.output.shape)
        else:
            self.output[...]=y

        
                
    def backward_propagation(self):
        if len(self.input_shape) == 4:
            H=self.input_shape[-1]
            h=self.input.reshape((-1,H))
            dy=self.d_out.reshape((-1,H))

        else:
            h=self.input
            dy=self.d_out

            
        eps=1e-7
        N=h.shape[0]
        gamma=self.W
        beta=self.b
          
        if self.trainable:
            mu = 1/N*np.sum(h,axis =0) # Size (H,) 
            var = 1/N*np.sum((h-mu)**2,axis=0)# Size (H,)      
            
            dbeta = np.sum(dy, axis=0)
            dgamma = np.sum((h - mu) * (var + eps)**(-1. / 2.) * dy, axis=0)
            dh = (1. / N) * gamma * (var + eps)**(-1. / 2.) * (N * dy - np.sum(dy, axis=0)
                - (h - mu) * (var + eps)**(-1.0) * np.sum(dy * (h - mu), axis=0)) 
            
        else:
            mu = self.E_mu[0]/self.E_mu[1]
            var = self.E_var[0]/self.E_var[1]
            
            dbeta = np.sum(dy, axis=0)
            dgamma = np.sum((h - mu) * (var + eps)**(-1. / 2.) * dy, axis=0)
            
            dh=gamma*(var + eps)**(-1. / 2.) * dy
            
        self.d_W=dgamma
        self.d_b=dbeta
        
        if len(self.input_shape) == 4:
            self.d_in+=dh.reshape(self.d_in.shape)
        else:
            self.d_in+=dh


    
    
class Sigmoid_layer_py(Base_layer):
    def __init__(self,X):
        self.input_shape=X.output_shape
        self.output_shape=self.input_shape
        
        W=b=np.zeros(0)
        super(Sigmoid_layer_py,self).__init__(X,W,b,'Sigmoid')
        
    def forward_propagation(self): 
        
        soft_val=np.clip(-self.input,-30,30)
        s=1/(1+np.exp(soft_val))
        self.output[...]=np.clip(s,epsilon,1-epsilon)

    def backward_propagation(self):
        self.d_in+=self.output*(1-self.output)*self.d_out
        
        
class Softmax_layer_py(Base_layer):
    def __init__(self,X):
        self.input_shape=X.output_shape
        self.output_shape=self.input_shape
        
        W=b=np.zeros(0)
        super(Softmax_layer_py,self).__init__(X,W,b,'Softmax')
        
    def forward_propagation(self):

        max_val=self.input.max(1)
        
        for i in range(self.output.shape[0]):
            soft_val=self.input[i]-max_val[i]
            soft_val=np.clip(np.exp(soft_val),1e-20,1)
            self.output[i]=soft_val/soft_val.sum()
            
        self.output[...]=np.clip(self.output,epsilon,1-epsilon)

    def backward_propagation(self):
        for i in range(self.output.shape[0]):
            derivative=np.diag(self.output[i])-np.outer(self.output[i],self.output[i])
            self.d_in[i]+=np.dot(derivative,self.d_out[i])
# TODO:
# In[9]:
class Cross_entropy_loss(_Loss_Layer):
    def __init__(self,Predict):
        self.alpha=1.0
        super(Cross_entropy_loss,self).__init__(Predict,'Cross_entropy_layer') 
        
    def forward_propagation(self):
        all_loss = -(self.label*np.log(self.pred))
        all_loss *= self.alpha 

        sum_axis=[i for i in range(1,len(all_loss.shape))]
        self.loss=all_loss.sum(axis=tuple(sum_axis))

        pred_class=self.pred.argmax(1)
        true_class=self.label.argmax(1)
        self.accuracy=(pred_class==true_class).astype(f32)
        
    def backward_propagation(self):
        for i in range(self.pred.shape[0]):
            self.gradient[i]+=-self.alpha*self.label[i]*(1/self.pred[i])/self.pred.shape[0]

        
    def setAlpha(self,alpha):
        self.alpha=alpha
        return self

class Log_loss(_Loss_Layer):
    def __init__(self,Predict):
        self.alpha=1.0
        super(Log_loss,self).__init__(Predict,'Log_loss') 

    def forward_propagation(self):
        all_loss = -(self.label*np.log(self.pred)+(1-self.label)*np.log(1-self.pred))
        all_loss *= self.alpha 
        sum_axis=[i for i in range(1,len(all_loss.shape))]
        self.loss=all_loss.sum(axis=tuple(sum_axis))
        
        pred_class=self.pred>0.5
        true_class=self.label>0.5
        self.accuracy=(pred_class==true_class).astype(f32)\
            .sum(axis=tuple(sum_axis))/(true_class.size/true_class.shape[0])
        
    def backward_propagation(self):
        self.gradient+=-self.alpha*(self.label*(1/self.pred)+(1-self.label)*np.log(1-self.pred))/self.pred.shape[0]
        
    def setAlpha(self,alpha):
        self.alpha=alpha
        return self
    
    
class WGAN_loss(_Loss_Layer):
    def __init__(self,Predict):
        self.alpha=1.0
        super(WGAN_loss,self).__init__(Predict,'WGAN_loss') 

    def forward_propagation(self):
        all_loss = -(self.label*self.pred+(1-self.label)*(1-self.pred))
        all_loss *= self.alpha 
        sum_axis=[i for i in range(1,len(all_loss.shape))]
        self.loss=all_loss.sum(axis=tuple(sum_axis))
        
        pred_class=self.pred>0.5
        true_class=self.label>0.5
        self.accuracy=(pred_class==true_class).astype(f32)\
            .sum(axis=tuple(sum_axis))/(true_class.size/true_class.shape[0])
        
    def backward_propagation(self):
        self.gradient+=-self.alpha*(self.label+(1-self.label)*(-1))/self.pred.shape[0]
        
    def setAlpha(self,alpha):
        self.alpha=alpha
        return self

class Node():
    def __init__(self,layer): 
        self.head_nodes=[]
        self.head_visited_num=0
        self.tail_nodes=[]
        self.tail_visited_num=0
        self.layer=layer

    def all_heads_visited(self):
        return len(self.head_nodes)==self.head_visited_num

    def all_tails_visited(self):
        return len(self.tail_nodes)==self.tail_visited_num

    def reset_visit_state(self):
        self.head_visited_num=self.tail_visited_num=0
        
        

class Model():
    def __init__(self,inputs,outputs,loss):
        
        assert type(inputs)==type([]),'输入类型必须是列表，列表元素是layer类'
        assert type(outputs)==type([]),'输出类型必须是列表，列表元素是layer类'
        assert type(loss)==type([]),'损失函数类型必须是列表，列表元素是layer类'
        
        self.input=inputs
        self.output=outputs
        self.loss=loss

        self.critical_path=None
        self.train_steps=0
        self.learning_rate=0.001
        self.initGraph()
    
    def initGraph(self): 
        def dfsSearchUsefulNode(layer,end,current_path,useful_layers):
            current_path.append(layer)
            if layer in end:
                for nd in current_path:
                    useful_layers.add(nd)
                current_path.pop()
                return
            for v in layer.tail_nodes:
                dfsSearchUsefulNode(v,end,current_path,useful_layers)
            current_path.pop()
#           dfs end
        
        cur_path=[]
        useful_layers=set()
        for x in self.input:
            dfsSearchUsefulNode(x,self.loss,cur_path,useful_layers) 

        layer_node_map={}
        for layer in useful_layers:
            layer_node_map[layer]=Node(layer)
        
        net_graph=[]
        for layer in useful_layers:
            node=layer_node_map[layer]
            for head in layer.head_nodes:                 
                if head in useful_layers:
                    node.head_nodes.append(layer_node_map[head])
            for tail in layer.tail_nodes:                 
                if tail in useful_layers:
                    node.tail_nodes.append(layer_node_map[tail])
            
            net_graph.append(node)
       

        self.critical_path=[]
        for i in range(len(net_graph)):
            for node in net_graph:
                if node not in self.critical_path:
                    if node.all_heads_visited():
                        self.critical_path.append(node)
                        for tail in node.tail_nodes:
                            tail.head_visited_num+=1
         
#        assert len(self.critical_path)==len(net_graph),'没有形成有向图，生成关键路径发生错误'
        
        for node in self.critical_path:
            node.layer.printInfo()
     
        
    def batchIndex(self,total_size,batch_size,shuffle=False):
        shf_idx=[i for i in range(total_size)]
        if shuffle:
            shf_idx=np.random.permutation(total_size)
            
        batch_index=[[shf_idx[(i+j)%total_size] for i in range(batch_size)] \
                        for j in range(0,total_size,batch_size)]
        return batch_index              
        
    def train(self,train_data,train_label,batch_size=32,epoch=10,val_dataset=None,shuffle=True):    
        assert type(train_data)==type([]),'train_data必须是list类型包装的，如对单个输入[train],多个输入[train_1,train_2,train_3]'
        assert type(train_label)==type([]),'train_label必须是list类型包装的，如对单个输出[label],多个输出[label_1,label_2,label_3]'

        total_size=train_data[0].shape[0]
        for t in train_data:
            assert t.shape[0]==total_size,'训练数据第一维度必须相等'
        for t in train_label:
            assert t.shape[0]==total_size,'训练标签第一维度必须相等'
            
        print()
        print('total_size:',total_size,'batch_size',batch_size,'epoch_steps',(total_size-1)//batch_size+1)
        
        global_variable.isTraining=True

        for node in self.critical_path:
            node.layer.initialization(batch_size) 

        for e in range(epoch):
            t1=time.time()
                        
            batch_index=self.batchIndex(total_size,batch_size,shuffle)
            data_idx=range(0,len(batch_index))

            total_loss=[0]*len(self.loss)
            train_size=len(batch_index)*batch_size
            for i in data_idx:
                
                for idx,in_layer in enumerate(self.input):
                    in_layer.set_data(train_data[idx][batch_index[i]])

                for idx,loss_layer in enumerate(self.loss):
                    loss_layer.set_label(train_label[idx][batch_index[i]])

                for node in self.critical_path:
                    node.layer.forward_propagation()
                    node.layer.clear_d_out()
      
                for idx,loss_layer in enumerate(self.loss):
                    total_loss[idx]+=loss_layer.loss.sum()/train_size

                for node in self.critical_path[::-1]:
                    node.layer.backward_propagation()
 
                self.train_steps+=1
                global_variable.updata(self.train_steps)  
     
                
            if val_dataset is not None:
                _,test_accuracy,test_loss=self.predict(val_dataset[0],batch_size,val_dataset[1])
                
                if len(total_loss)==1:
                    log_str='train_loss %.4f,test_loss %.4f,test_accuracy %.2f%s\n'% \
                        (total_loss[0],test_loss[0],test_accuracy[0]*100,'%')
                else:
                    log_str=''
                    for idx,_ in enumerate(total_loss):
                        log_str+='train_loss_%d %.4f,test_loss_%d %.4f,test_accuracy_%d %.2f%s \n'% \
                        (idx+1,total_loss[idx],idx+1,test_loss[idx],idx+1,test_accuracy[idx]*100,'%')
                
            else:
                if len(total_loss)==1:
                    log_str='train_loss %.4f \n'%total_loss[0]
                else:
                    log_str=''
                    for idx,_loss in enumerate(total_loss):
                        log_str+='train_loss_%d %.4f \n'%(idx+1,_loss)
                
                
            print('epoch:',e+1,'time: %.1f s'%(time.time()-t1))
            print(log_str)

                
    def predict(self,test_inputs,batch_size=1,evaluate_label=None):
        assert type(test_inputs)==type([]),'test_inputs必须是list类型包装的，如单个输入[test_data],多个输入[test_1,test_2,test_3]'
        total_size=test_inputs[0].shape[0]
        for t in test_inputs:
            assert t.shape[0]==total_size,'测试数据第一维度必须相等'
            
        if evaluate_label is not None:
            for t in evaluate_label:
                assert t.shape[0]==total_size,'测试标签第一维度必须相等'
                
                
        global_variable.isTraining=False
        
        for node in self.critical_path:
            node.layer.initialization(batch_size) 

        batch_index=self.batchIndex(total_size,batch_size)
        
        test_loss,test_accuracy=0,0
        test_outputs,test_losses,test_right_pred=[],[],[]
        for idx,out_layer in enumerate(self.output):
            test_outputs.append(np.empty((total_size,)+out_layer.output_shape[1:],f32))
            test_losses.append(np.empty((total_size,),f32))
            test_right_pred.append(np.empty((total_size,),f32))

        data_idx=range(0,len(batch_index))

        for i in data_idx:
                
            for idx,in_layer in enumerate(self.input):
                in_layer.set_data(test_inputs[idx][batch_index[i]])
            
            if evaluate_label is not None:
                for idx,loss_layer in enumerate(self.loss):
                    loss_layer.set_label(evaluate_label[idx][batch_index[i]])
                    
            for node in self.critical_path:
                node.layer.forward_propagation()
            if evaluate_label is not None:
                for idx,loss_layer in enumerate(self.loss):
                    test_losses[idx][batch_index[i]]=loss_layer.loss
                    test_right_pred[idx][batch_index[i]]=loss_layer.accuracy
                
            for idx,out_layer in enumerate(self.output):
                test_outputs[idx][batch_index[i]]=out_layer.output
       
        test_loss=[_loss.sum()/_loss.size for _loss in test_losses]
        test_accuracy=[_acc.sum()/_acc.size for _acc in test_right_pred]
        
        if evaluate_label is not None:
            return test_outputs,test_accuracy,test_loss
        else:
            return test_outputs
            
  
            
# In[1]:
        
if __name__=='__main__':
    
    from sklearn import datasets
    from sklearn.cross_validation import train_test_split
    import cv2,os
    from matplotlib import pyplot as plt
    
    
    ROOT='./mnist_png/'

    
    X_train, X_test=np.load(ROOT+'train_img.npy'),np.load(ROOT+'test_img.npy')
    y_train, y_test=np.load(ROOT+'train_label.npy'),np.load(ROOT+'test_label.npy')


    for k in [1,2]:
        plt.imshow(X_test[k],'gray')
        plt.show()
        print(y_test[k])

    
    X_train=0.98*X_train.reshape(-1,28,28,1).astype(f32)/255+0.01
    X_test=0.98*X_test.reshape(-1,28,28,1).astype(f32)/255+0.01

def D(net):
    x=Conv_layer_py2(net,32,(5,5),padding='same')#28*28
    x=BN_layer_py(x)
    x=LeakyReLU_layer_py(x,0.2)
    
    x=AvrPooling_layer_py(x,(2,2))#14*14
    x=Conv_layer_py2(x,64,(5,5),padding='same')
    x=BN_layer_py(x)
    x=LeakyReLU_layer_py(x,0.2)
    
    
    x=AvrPooling_layer_py(x,(2,2))#7*7
    x=Flatten_layer_py(x)
    x=Dense_layer_py2(x,512)
    x=BN_layer_py(x)
    x=LeakyReLU_layer_py(x,0.2)
    
    x1=Dense_layer_py2(x,1)

    return x1

def DC(net):
    x=Conv_layer_py2(net,16,(5,5),padding='same')#28*28
    x=BN_layer_py(x)
    x=LeakyReLU_layer_py(x,0.2)
    
    x=AvrPooling_layer_py(x,(2,2))#14*14
    x=Conv_layer_py2(x,32,(5,5),padding='same')
    x=BN_layer_py(x)
    x=LeakyReLU_layer_py(x,0.2)
    
    x=AvrPooling_layer_py(x,(2,2))#7*7
    x=Flatten_layer_py(x)
    x2=Dense_layer_py2(x,512)
    x2=BN_layer_py(x2)
    x2=LeakyReLU_layer_py(x2,0.2)
    
    x2=Dense_layer_py2(x2,10)
    x2=Softmax_layer_py(x2)              #1
    
    return x2

def G(net):
    x=Dense_layer_py2(net,512)
    x=BN_layer_py(x)

    x=ReLU_layer_py(x)

    x=Dense_layer_py2(net,7*7*64)
    x=BN_layer_py(x)

    x=ReLU_layer_py(x)

    x=Reshape_layer_py(x,(7,7,64))  #7*7
    x=UpSampling_layer_py(x,(2,2))  #14*14
    x=Conv_layer_py2(x,32,(5,5),padding='same')
    x=BN_layer_py(x)

    x=ReLU_layer_py(x)

    x=UpSampling_layer_py(x,(2,2))  #28*28
    x=Conv_layer_py2(x,1,(5,5),padding='same')
    x=Sigmoid_layer_py(x)              #28*28*1

    return x

rd_input=Input((100+10,))
G_out=G(rd_input)

img_input=Input((28,28,1))

D_input=Merge_layer([G_out,img_input],merge_type='add')


D_out=D(D_input)
D_class=DC(D_input)
loss=WGAN_loss(D_out)
loss_class=Cross_entropy_loss(D_class).setAlpha(1)

print("Generative")
G_model=Model([rd_input],[G_out],[loss,loss_class])
print("Dicriminate")
D_model=Model([img_input],[D_out],[loss])
DC_model=Model([img_input],[D_class],[loss_class])

 
def ChangeTrainable(model,trainable=True):
    for node in model.critical_path:
        node.layer.setTrainable(trainable)
   
size_tr=10000
batch=128

rd_sample=np.random.random((batch,100+10))
rd_sample[:,100:]=y_train[0:0+batch]


import cv2
global_variable.learning_rate_scale=0.0002
for iii in range(30):
    for j in range(0,60000,batch):
        rd_train=np.random.random((batch,100+10))
        D_cls_real=y_train[j:j+batch]
        rd_train[:,100:]=D_cls_real

        D_train_fake=G_model.predict([rd_train],batch_size=2*batch)[0]    
        D_train_real=X_train[j:j+batch]
        
        D_train=np.vstack((D_train_fake,D_train_real))
        D_label=np.ones((D_train.shape[0],1),f32)
        D_label[:D_train_fake.shape[0]]=0   
        D_cls=np.vstack([D_cls_real,D_cls_real])
        
#        D_val=([D_train_fake[:200]],[np.zeros((200,1),f32)])
        if j%(batch*20)==0:
            D_fake_sample=G_model.predict([rd_sample],batch_size=2*batch)[0]    

            vis=D_fake_sample[0:36,:,:,0]
            vis[vis>1]=1
            vis[vis<0]=0
            vis=vis*255
            vis=vis.reshape((6,6,28,28)).transpose((0,2,1,3))
            vis=vis.reshape((6*28,6*28))
            vis=vis.astype(np.uint8)
            
            cv2.imwrite('/home/user/MZH/mnist/iii_%d.png'%(j+60000*iii),vis)
            plt.imshow(vis,'gray')
            plt.show()
            print(j)
            time.sleep(3)
        
        D_input.reset_cache()
        ChangeTrainable(D_model,True)
        ChangeTrainable(DC_model,True)

        print('DDD.........real or fake..................',j)
        
        loss.setAlpha(1) 
        loss_class.setAlpha(0)   
        
        global_variable.clip=(-0.01,0.01)
        D_model.train([D_train],[D_label],batch_size=2*batch,epoch=1)
        global_variable.clip=(clipmin,clipmax)

    
        print('DDD.........class..................',j)

        loss_class.setAlpha(1)
        loss.setAlpha(0) 
        DC_model.train([D_train_real],[D_cls[batch:]],batch_size=2*batch,epoch=1)
        
        G_train=rd_train
        G_label=np.ones((G_train.shape[0],1),f32)
    
        D_input.reset_cache()
        ChangeTrainable(D_model,False)
        ChangeTrainable(DC_model,False)
        loss_class.setAlpha(0.2)
        loss.setAlpha(1) 


        print('GGG...........................',j)
        G_model.train([G_train],[G_label,D_cls_real],batch_size=2*batch,epoch=3)
 