import matplotlib.pyplot as plt
import numpy as np
import os
import random
import joblib
import math

# print("hello gitee")
# s = ""
# for a in range(0,20):
#     s = str(a).zfill(2)
#     print(s)


# 神经网络结构？
# https://zh.m.wikipedia.org/zh-hans/%E5%8D%B7%E7%A7%AF%E7%A5%9E%E7%BB%8F%E7%BD%91%E7%BB%9C
# 卷积层--线性整流层--池化--完全连接
# 特征 --   激励 --

# 读取图片  https://zhuanlan.zhihu.com/p/353938168
# dirpath = "F:\\abcd\\课\\智能设计\\作业要求.jpg"
# img_plt = plt.imread(dirpath)
# print("img_plt:",img_plt.shape) # img_plt: (540, 960, 3)
# print("type of img_plt:",type( img_plt )) # type of img_plt: <class 'numpy.ndarray'>
# # plt.imshow( img_plt , cmap=plt.cm.binary)
# # plt.imshow( img_plt[::2] , cmap=plt.cm.binary) # 三个通道rgb
# # plt.show()
# print( img_plt.shape[0])

# 数据读取与处理--------------------------------------------------------------------
# 读取目标文件夹下所有图片
def readImg( imgPathDir ):
    # img_plt = plt.imread( imgPath )
    # return img_plt
    imgList = []
    for filename in os.listdir( imgPathDir ):
        imgdata = plt.imread( imgPathDir + "\\" + filename )
        # print( "type of imgdata: " , type(imgdata) )
        # print( "filename: " , filename )
        imgList.append( imgdata )
    return imgList , len( imgList )

# 数据处理，打标签
def dataDeal( dataList , index ): # index:1,2,3 分别对应标签 [1,0,0],[0,1,0],[0,0,1]
    tempList = [0,0,0]
    dataDealList = []
    for element in dataList: # dataList 现在存的是 [ 二维numpy ] , 图片数据
        tmpEle = [ element / 255 ] # 数据映射到0,1之间
        tempList[index - 1] = 1
        tmpEle.append( tempList )
        dataDealList.append( tmpEle )
        tempList = [0,0,0]
    return dataDealList

# 三种数据综合处理与打乱
def readData( dir1 , dir2 ,dir3 ): # 三个存有图片的目录 1 全戴 2 半戴 3 不戴
    imgdataDeal = [] # 存储结构:  [ array[][] , list[0,1,0] ]  ,  array存储一个图片
    sizeofImage = [0,0,0]

    imgdataRaw , sizeofImage[0] = readImg( dir1 )
    imgdataDeal.extend(dataDeal( imgdataRaw , 1 ))

    imgdataRaw, sizeofImage[1] = readImg(dir2)
    imgdataDeal.extend(dataDeal(imgdataRaw, 2))

    imgdataRaw, sizeofImage[2] = readImg(dir3)
    imgdataDeal.extend(dataDeal(imgdataRaw, 3))
    random.shuffle(imgdataDeal) # 打乱排序

    sizeOfList = len( imgdataDeal ) # 获取训练数据的长度
    train_list = imgdataDeal[: int( sizeOfList * 0.7 ) ] # 70%训练集
    verify_list = imgdataDeal[int( sizeOfList * 0.7 ) : int( sizeOfList * 0.8) ] # 10%验证集
    test_list = imgdataDeal[int( sizeOfList * 0.8 ) :] # 20%测试集

    return train_list , verify_list , test_list # 返回处理过的数据

# 对训练数据分批次处理
def batches_train_set( train_set , batches ):
    train_set_batches = [] # 结果
    length_train = len( train_set )
    len_batches = int ( length_train / batches )
    for index in range( 0 , batches ):
        train_tmp = train_set[ index * len_batches : index * len_batches + len_batches ]
        train_set_batches.append( train_tmp )
    return train_set_batches

#1卷积层-----------------------------

# 对矩阵随机赋值
def random_assign( martix ,seed ):
    np.random.seed(seed)
    martix_rand = np.random.rand( martix.shape[0] , martix.shape[1] )
    return martix_rand

# 初始化卷积核
def initialize_kernel( kernel_num , kernel_size ,seed ):
    seed = seed * 50 # 保持一个随机性
    kernel_list = [] # 将卷积核存在列表中
    for index in range( 0 , kernel_num ):
        kernel_square = np.zeros( ( kernel_size , kernel_size ) ) # 先初始化 size * size 的矩阵
        kernel_square = random_assign( kernel_square ,seed ) # 对卷积核进行初始随机赋值
        kernel_list.append( kernel_square )
        seed += 1
    return kernel_list

# 卷积层2 权值不共享，每个结点t个卷积核
def c2( inputdata , node_kernel_list ):
    # node_kernel_list [ node1_kernel , node2_kernel , ... ]
    # node1_kernel [ k1 , k2 , ... ]

    # input 批次，通道 [ pic1 , pic2 , ... , 标签 ]
    output = [] # 输出 [ batche_list , label_list ]     batche1: martix[m,1]
    label = [] # 标签
    for batch in inputdata:
        label.append( batch[-1] )  # 取出标签
        del batch[-1] # 删除最后的标签，留下前面的多个矩阵
        value_output = np.zeros(( len( node_kernel_list[0] ) * len( batch ) , 1)) # 长度为 结点数*通道数
        for index in range( 0 , len( node_kernel_list[0] ) ):
            # 每个结点要进行卷积操作
            for index_channel in range( 0 , len(batch) ):
                value = np.sum( np.dot( batch[index_channel] , node_kernel_list[0][index][index_channel] ) ) / batch[index_channel].shape[0] ** 2
                value_output[ index * len(batch) + index_channel ] = value + node_kernel_list[1][index * len(batch) + index_channel,0]
        output.append( value_output )
    return [ output , label ]
# 卷积层1  卷积-池化
def c1( input_data , parameter_kernel_i , padding ):
    num = 1 # 正在处理地 num 张图片
    # 输出
    output = []

    for i_input_data in input_data: # 遍历该批次的图片 --i_input_data: [ data1 , data2 , ... , lable ]
        # 存储特征图
        img_data_list = []
        for index in range( 0 , len( parameter_kernel_i ) ): # 遍历所有卷积核
            # 进行卷积操作...
            img_data_sum = np.zeros( ( i_input_data[0].shape[0] - parameter_kernel_i[index].shape[0] + 1 + padding * 2 , i_input_data[0].shape[1] - parameter_kernel_i[index].shape[0] + 1 + padding * 2 ) )
            for index_ais in range( 0 , len( i_input_data ) - 1 ):
                img_data_tmp = convolution_oneimg( i_input_data[index_ais] , parameter_kernel_i[index].shape[0] , parameter_kernel_i[index] , padding )
                img_data_sum += img_data_tmp
            # img_data = pooling_oneimg( i_input_data[0] , 2 , 2 ) #  xxx卷积完之后进行池化xxx
            img_data_sum = img_data_sum / ( len( i_input_data ) - 1 )
            img_data_act = activate_fun( img_data_sum ) # 激活函数
            img_data_list.append( img_data_act )
        img_data_list.append( i_input_data[-1] ) # 记录标签  通道----img_data[ data1,data2,label ]
        output.append( img_data_list )
        print( "正在处理图片:" , num )
        num += 1
    return output

# 池化
def pooling( imgdata , size , model ): # model 1 average 2 max
    """
    imgdata<list> [ A , B , C , ... ]  一批次
        A: [ a , b , c , ... , index ] 多通道，和最后一个存储标签 , 标签的存储形式<list> [0,0,1]
            a: 二维矩阵
    """
    if model != 1 and model != 2:
        print(" ERROR --pooling_oneimg --model 1,2")
    # poolimg = np.zeros(( imgdata.shape[0] - size + 1 , imgdata.shape[1] - size + 1 ))
    size_pool = [ int( imgdata[0][0].shape[0] / size ) , int( imgdata[0][0].shape[1] / size ) ]

    # 输出,保持格式
    out_put = []
    out_put_location = []
    # 遍历该批次
    for index_batch in imgdata:
        # 遍历通道
        out_put_asi = [] # 存储输出信息
        out_put_location_asi = [] # 存储max位置信息
        for index_asi in range( 0 , len( index_batch ) - 1 ): # 最后一个是标签
        # for index_asi_imgdata in index_batch:
            # 池化后的图片
            poolimg = np.zeros((size_pool[0], size_pool[1]))
            # 存max位置 ----A[x,y]=[x1,y1]<list>
            # location_value = np.zeros((size_pool[0], size_pool[1]))
            location_value = [] #
            # 两层循环遍历一张图片
            for i_index in range( 0 , poolimg.shape[0] ):
                location_value_j = []
                for j_index in range( 0 , poolimg.shape[1] ):
                    sum_pool = 0 # 累加和
                    test_count = 0 # 测试用
                    max_value = 0 # 最大值
                    for i_pool in range( i_index * size , i_index * size + size ):
                        for j_pool in range( j_index * size , j_index * size + size ):
                            sum_pool += index_batch[index_asi][ i_pool , j_pool ]
                            if max_value < index_batch[index_asi][ i_pool , j_pool ]:
                                max_value = index_batch[index_asi][ i_pool , j_pool ]
                                loc_list = [ i_pool - i_index , j_pool - j_index ] # 获取max点位置
                            test_count += 1
                    sum_pool = sum_pool / ( size ** 2)
                    poolimg[i_index, j_index] = sum_pool if model == 1 else max_value # 根据model获得平均值或者最大值
                    location_value_j.append( loc_list )
                location_value.append( location_value_j )
            out_put_asi.append( poolimg ) # 存储所有通道输出图片
            out_put_location_asi.append( location_value )
        out_put_asi.append( index_batch[-1]) # 添加标签
        out_put.append( out_put_asi )
        out_put_location.append( out_put_asi )
    return out_put , out_put_location


# 将图片转化为矩阵
# def img2marix( imgdata , kernel_size ):
#     output = np.zeros( ( kernel_size ** 2 , ( imgdata.shape[0] - kernel_size + 1 ) ** 2 ) )
#     row = 0
#     for index_i in range( 0 , imgdata.shape[0] - kernel_size + 1 ):
#         for index_j in range( 0 , imgdata.shape[0] - kernel_size + 1 ):
#             # 取值
#             tmp = 0
#             for index_mar_i in range( index_i , index_i + kernel_size ):
#                 for index_mar_j in range( index_j , index_j + kernel_size ):
#                     output[ tmp , row ] = imgdata[ index_mar_i , index_mar_j ]
#                     tmp += 1
#             row += 1
#     return output

#  卷积,二维操作 ----
#   对一个图片卷积操作 , imgdata:<二维图片矩阵>
def convolution_oneimg( imgdata , kernel_size , kernel , padding ): # 卷积一遍 例如： 4 x 4 --->  2 x 2


    # 该矩阵存放卷积之后的特征图
    convImg = np.zeros(( imgdata.shape[0] - kernel_size + 1 + padding * 2 , imgdata.shape[1] - kernel_size + 1 + padding * 2 ))
    length = int ( kernel_size / 2 )
    i_convImg = 0
    j_convImg = 0
    for i_index in range( 0 + length - padding , imgdata.shape[0]  - length + padding ):
        for j_index in range( 0 + length - padding , imgdata.shape[1] - length + padding ):
            convImg[ i_convImg , j_convImg ] = convOnePoint( kernel_size , i_index , j_index , imgdata , kernel )
            j_convImg += 1
        j_convImg = 0
        i_convImg += 1
    return convImg

# 单点操作
def convOnePoint( kernel_size , x , y , imgdata , kernel ):  # 输出:一个float数,输入
    length = int ( ( kernel_size - 1 ) / 2 )#
    sumCom = 0
    for icomv in range( 0 , 2 * length + 1):# 遍历周围长度为length的正方形区域
        for jcomv in range(0, 2 * length + 1):
            if x - length + icomv < 0 or y - length + jcomv < 0 or x - length + icomv >= imgdata.shape[0] or y - length + jcomv >= imgdata.shape[1]: # 如果到了边界之外，假定其值为原点的值
                value = imgdata[x][y] * kernel[icomv][jcomv] # 原点值 * 卷积核值
                sumCom += value
            else:
                value = imgdata[ x - length + icomv ][ y - length + jcomv] * kernel[icomv][jcomv]
                sumCom += value
    return sumCom * (1 / (2 * length + 1) ** 2)  # 除以点数

# 确定最后一层卷积层的参数,添加至列表中

# 摊平操作，将 m * m * t , m*m图片大小, t通道数, 卷积为全连接层的输入 250个?
# 因此需要 250 组滤波器(结点数),每组滤波器 t个卷积核,每个卷积核大小 m*m ,则输出一个1*1*250
def flatten_data_conv( inputdata , node_num ):
    channel = len ( inputdata[0] ) - 1
    size_pic = inputdata[0][0].shape[0]
    # 初始化参数
    # node_kernel_list [ node1_kernel , node2_kernel , ... ]
    # node1_kernel [ k1 , k2 , ... ]
    node_kernel_list = []
    seed = 20
    b_tmp_martix = np.random.random(size=( channel * node_num , 1))
    for index in range( 0 , node_num ):
        kernel_tmp = initialize_kernel( channel , size_pic , seed )
        node_kernel_list.append( kernel_tmp )
        seed += 10
    node_kernel_list = [ node_kernel_list , b_tmp_martix ]
    output = c2( inputdata , node_kernel_list ) # output = [ output_data , label ]
    return output , node_kernel_list
    # def c1(input_data, parameter_kernel_i, padding):
    # 卷积完之后需转化为全连接层输入所需形式

# 压平操作 flatten --错误
def flatten_data( inputdata ):
    """
    inputdata  多批次 --多通道,最后一个为标签 --二维矩阵
    """
    output_list = []
    for batch in inputdata:
        output_tmp_list = []
        for index_asi in range( 0 , len( batch ) - 1 ):
            flatten_list = batch[index_asi].flatten('A') # 按行降维
            flatten_list = flatten_list.tolist() # 转列表
            output_tmp_list.extend( flatten_list ) #
        output_tmp_list = [ output_tmp_list , batch[-1] ]
        # output_tmp_list.append( batch[-1] ) # output_tmp_list : [ [ a1,a2,a3,...,an ],[1,0,0] ]  一次输入的形式
        output_list.append( output_tmp_list )
    return output_list

def sum_martix( martix ):
    sum_m = 0
    for index_i in range(0, martix.shape[0]):
        for index_j in range(0, martix.shape[1]):
            sum_m += martix[index_i, index_j]
    return sum_m
#全连接层----------一个全连接层
def fullyConnected( inputdata , parameter ): # parameter<list> [0]--w [1]--b

    """
    改;
    inputdata [ data_batch , label ]
        data_batch [ d1 , d2 , ... ]
            d1 martix m*1
        label [ l1 , l2 , ... ]
            l1 <list> [1,0,0]

    parameter [ w_martix , b_martix ]

    计算方式
    for batch:
        data_batch(l)(1*n(l)) =  data_batch(l-1)(n(l-1) * 1).T * w (w(l)*n(l))
        即( d.T * w + b.T ).T



    # inputdata  批次 --[[一列数据],[标签]]
    input [a1,a2,a3,...,an]T
    w [w11,w12,w13,...,w1m]
      [w21,w22,....   ,w2m]
      ...
      ...
      [wn1,wn2,...    ,wnm]

    output = dot ( w , input )

    """
    output_full = []
    test_batch_num = 0
    for batch in inputdata[0]:
        print("全连接层：正在遍历批次: ", test_batch_num, "...")
        data_batch = (np.dot( batch.T , parameter[0] )).T / parameter[0].shape[0] + parameter[1]
        output_full.append( data_batch )
        test_batch_num += 1
    return [ output_full , inputdata[1] ]

    # output_full = []
    # test_batch_num = 0
    # # 遍历批次  batch:
    # for batch in inputdata:
    #     print("全连接层：正在遍历批次: ",test_batch_num ,"..." )
    #     # 遍历某层所有结点
    #     output_tmp_list = [] # parameter[0] : [[w1],[w2],...,[wn]] ; w1=[w1_1,w1_2,...,w1_m]
    #     for index_par in range( 0 , len( parameter[0] ) ):
    #         martix_tmp = np.mat( batch[0] )
    #         # martix_tmp *= parameter[0][index_par]
    #         w_mar = np.mat( parameter[0] )
    #         martix_tmp = np.dot( w_mar , martix_tmp.T )
    #         martix_tmp /= len( parameter[0][0] )
    #         martix_tmp += parameter[1][index_par]
    #         sum_mar = sum_martix( martix_tmp )
    #
    #         # output_tmp = martix_tmp.tolist()
    #         # output_tmp = output_tmp[0]
    #         output_tmp_list.append( sum_mar / martix_tmp.size )
    #     output_onebatch = [ output_tmp_list , batch[1] ]
    #     output_full.append( output_onebatch )
    #     test_batch_num += 1
    # return output_full
# 初始化 全连接层参数
def initialize_fullyCon( pre_size , size ):
    # parameter_w_list = []
    # for index in range( 0 , size ):
    #     w_list_tmp = np.random.rand( pre_size )
    #     parameter_w_list.append( w_list_tmp )
    # parameter_b_list = np.random.rand( size ) # b
    parameter_w = np.random.rand( pre_size , size )
    parameter_b = np.random.rand( size )
    return parameter_w , np.expand_dims(parameter_b, axis=1)

# 初始化参数
"""
num_conv_list 传进一个列表，每个表项存储该卷积层的参数[ A , B , ... ]
    A -- [ kernel_num , kernel_size ]
num_fulluCon_list 存储全连接层参数 [ A , B , ... ]
    A -- [ size ] 
"""
def initialize_parameter_conv( num_conv_list ):
    # 卷积层
    parameter_conv = []
    seed_conv = 1
    for index_par in num_conv_list: # 为每一层做初始化
        conv_tmp_list = initialize_kernel( index_par[0] , index_par[1] , seed_conv )
        parameter_conv.append( conv_tmp_list )
        seed_conv += 1


    return parameter_conv

# 初始化全连接层参数，需要卷积池化结果压平参数
# num_fullyCon_list = [ 500 , 200 , ... , 3 ]
# input_full_num = 845?
def initialize_parameter_full( num_fullyCon_list ,input_full_num ):
    # 全连接层中的隐藏层
    parameter_fullyCon = []
    fully_tmp_first = initialize_fullyCon( input_full_num , num_fullyCon_list[0] ) # input_full_num:全连接层输入层的结点数
    parameter_fullyCon.append( fully_tmp_first )
    for index_par in range( 1 , len( num_fullyCon_list ) ):
        fully_tmp_list = initialize_fullyCon( num_fullyCon_list[index_par-1], num_fullyCon_list[index_par] )
        parameter_fullyCon.append( fully_tmp_list )
    return parameter_fullyCon # 结果 [ layer_i ... ]  layer_i [ w_martix , b_martix ]

# 激活函数 对矩阵 relu
def activate_fun( martix ):
    # relu
    for index_i in range( 0 , martix.shape[0] ):
        for index_j in range( 0 , martix.shape[1] ):
            if martix[ index_i , index_j ] < 0 :
                martix[ index_i , index_j ] = 0
    return martix

def relu( x ):
    return x if x > 0 else 0
# 激活函数 ， 全连接层
def activate_fullcon( input_data ): #
    for batch in input_data:
        for index in range( 0 , len( batch[0] ) ):
            batch[0][index] = relu( batch[0][index] )

# softmax zi
def softmax( input_data ): # input_data [ [x1,x2,x3] , [1,0,0] ] * batch
    # 改
    """
    input_data [ batch , label ]
    """
    for batch in input_data[0]:# 遍历批次
        tmp_martix = batch
        for index in range( 0 , tmp_martix.shape[0] ):
            tmp_martix[ index , 0 ] = math.exp( tmp_martix[ index , 0 ] )
        sum_tmp = np.sum( tmp_martix )
        batch /= sum_tmp
    # math.exp(x)
    # for batch in input_data: # 遍历批次
    #     sum_tmp = 0
    #     for index_sum in range( 0 , len( batch[0] ) ): # 求出分母
    #         testX = batch[0][index_sum]
    #         e_zc = math.exp( batch[0][index_sum] )
    #         sum_tmp += e_zc
    #     softmax_z = [] # 用以更换 [x1,x2,x3]
    #     for index in range( 0 , len( batch[0] ) ): # 遍历 x1,x2,x3
    #         softmax_z_i = math.exp( batch[0][index] ) / sum_tmp
    #         softmax_z.append( softmax_z_i )
    #     batch[0] = softmax_z
    # test = 'over'

# 损失函数
def loss(input_data ): # 经过softmax ----xxx input_data [ [x1,x2,x3] , [1,0,0] ] * batch
    # 改 --input_data

    # Loss = -累加i yi * logPi
    # 结果 [ loss1 , loss2 , loss3 ]
    tmp_martix = np.zeros((1,3))
    log_p = input_data[0]
    for index_batch in range( 0 , len( input_data[0] ) ):
        for index_i in range( 0 , input_data[0][index_batch].shape[0] ):
            for index_j in range(0, input_data[0][index_batch].shape[1]):
                log_p[index_batch][index_i,index_j] = math.log( log_p[index_batch][index_i,index_j] )
        # tmp_martix += np.multiply ( np.mat( input_data[1][index_batch] )  * log_p[index_batch].T )
        x = np.mat(input_data[1][index_batch])
        y = log_p[index_batch].T
        z = np.multiply( x , y )
        z = np.abs( z )
        tmp_martix += z
    tmp_martix /= len( input_data[0] )
    return tmp_martix.T
    # # 对某批次计算
    # loss_N = 0
    # for batch in input_data: # 遍历所有样本
    #     L_i = 0
    #     for index_sort in range( 0 , len( batch[1] ) ): # 遍历多分类
    #         y_log_p = - batch[1][index_sort] * math.log( batch[0][index_sort] ) # y * log_p
    #         L_i += y_log_p
    #     loss_N += L_i
    # loss_N /= len( input_data )
    # return loss_N

# 将两矩阵写惠 par_fullyCon
def martix2list_par_full( par_full_W_list , par_full_B_list ):
    par_full = []
    for index in range( 0 , len( par_full_W_list ) ):
        # list_w_tmp = par_full_W_list[index].tolist #  par_full_W_list[index] shape(1024,500)
        list_w_tmp = []
        list_b_tmp = par_full_B_list[index][:,0].tolist()
        for index_row_w in range( 0 , par_full_W_list[index].shape[1] ):
            w_tmp = par_full_W_list[index][:,index_row_w]
            list_w_tmp.append( w_tmp.tolist() )
        # list_b_tmp = par_full_B_list[index].tolist # shape(500,1)
        par_full.append( [ list_w_tmp , list_b_tmp ] )
    return par_full
# 将全连接层权值写为矩阵形式
def list2martix_par_full( par_full ): # par_full =
    """
    par_full = [ par_layer1 , par_layer2 , ... ]
        par_layer1 = [ w , b ]
            w = [ w1 , w2 , ... ]
                w1 = [w11,w12,...]
            b = [b1,b2,...]
    """
    par_full_W_list = [] # [ layer1_w , layer2_w , ... ]     layer1_w = martix : w * num(结点个数)
    par_full_B_list = []
    for index in range( 0 , len( par_full ) ):
        par_full_B_list.append( ( np.mat( par_full[index][1] ).T ) )
        martix_w = np.zeros( ( len( par_full[index][0][0] ) , len( par_full[index][0] ) ) )
        for index_node in range( 0 , len(par_full[index][0] ) ):
            mar_tmp = par_full[index][0][index_node]
            mar_tmp = np.mat( mar_tmp )
            martix_w[:,index_node] = mar_tmp
        par_full_W_list.append( martix_w )
    return par_full_W_list , par_full_B_list


# """
# -- xxx
# 转换data_full
# 存储形式
# [ layer1 , layer2 ... ] 层数
#     layer: [ z , a ]
#         z: [ z1,z2,...,zn] n批数据
#             z1:[ x , y ]
#                 x: [ x1,x2,...,xm ] m个输入，m个结点
#                 y: [ 0,0,1 ] 标签
# 转化为
#     layer [ z , a , y ] n批数据
#         z: [ z1,z2,...,zn] m*n矩阵
#         a: m*n 矩阵
#         y: 3*n 矩阵
#
# """
def data_full2martix( data_full ):

    # 改
    y_batch_mar= ( np.mat( data_full[0][0][1] ) ).T
    layer_list = []
    for index_layer in range( 0 , len( data_full ) ):
        z_batch_list = np.mat( [ value.reshape(-1) for value in data_full[index_layer][0][0] ] )
        a_batch_list = np.mat( [ value.reshape(-1) for value in data_full[index_layer][1][0] ] )

        # if index_layer == 0:
        #     z_batch_list = np.mat( data_full[index_layer][0][0] )
        #     a_batch_list = np.mat( data_full[index_layer][1][0] )
        # z_batch_list = np.append( np.mat( data_full[index_layer][0][0] ) )
        # a_batch_list = np.append( np.mat( data_full[index_layer][1][0] ) )

        # z_batch_list = np.mat( data_full[index_layer][0][0] )
        # a_batch_list = np.mat( data_full[index_layer][1][0] )
        layer_list.append( [ z_batch_list.T , a_batch_list.T ] )
    return [ layer_list , y_batch_mar ]
    # data_full_mar = data_full
    # # 遍历全连接层
    # for index_layer in range( 0 , len( data_full ) ):
    #     # 处理z,a,y
    #     z_list_tmp = []
    #     a_list_tmp = []
    #     y_list_tmp = []
    #     for index_batch in range( 0 , len( data_full[index_layer][0] ) ):
    #         z_list_tmp.append( data_full[index_layer][0][index_batch][0] )
    #         a_list_tmp.append( data_full[index_layer][1][index_batch][0] )
    #         y_list_tmp.append(data_full[index_layer][0][index_batch][1])
    #     z_martix_tmp = np.mat( z_list_tmp ).T
    #     a_martix_tmp = np.mat( a_list_tmp ).T
    #     y_martix_tmp =np.mat( y_list_tmp ).T
    #     data_full_mar[index_layer] = [ z_martix_tmp , a_martix_tmp , y_martix_tmp ]
    # return data_full_mar


# 偏L偏w  针对交叉熵函数
"""
反向传播，
par_full_martix 参数矩阵 w , b 
第l层
    原w
        [ w1 , w2 , w3 , ... , wn]
            wi 为该层第i个结点的w<一维向量>
    反向传播w
        即原w的转置,w.T
        
例子:
    300 200 50 3 
    s_dLdw 作为输入，三个参数分别代表了 l-1 , l , l+1 层 
    
data_x m(l-1) * n  m结点数,n批次数
data_p m(l) * n
s_dLdw m(l) * n 
par_full_martix_w w(l) * m(l) w该节点w数,w个数=m(l-1)个数
par_full_martix_b b(l)=m(l) * 1

s_dLdw_tran = np.dot ( par_full_martix_w [ w(l) * m(l) ]  , s_dLdw  [ m(l)*n ] )  [ w(l) * n ] 传递时不削减批次
计算dw,db时 s_dLdw = sum(s_dLdw)/len [ m(l) * n ]
"""
def dL_dw( data_x , data_p , s_dLdw , par_full_martix_w , par_full_martix_b ): # data_p --p , data_x --x
    #计算下一层s_dLdw_tran
    s_dLdw_tran = np.dot( par_full_martix_w , s_dLdw ) / par_full_martix_w.shape[1] + par_full_martix_b

    #为s_dLdw增加批次维度
    # s_dLdw_batch = np.zeros( ( s_dLdw.shape[0] , data_x.shape[1] ) )
    # for index in range( 0 , s_dLdw_batch.shape[1] ):
    #     s_dLdw_batch[ index ,:] = s_dLdw
    # 给s_dLdw求和取平均
    # s_dLdw_sum = np.sum( s_dLdw , axis=1 )/s_dLdw.shape[1]
    dw = -(1.0 / data_x.shape[1])
    dw *= np.dot(data_x , (data_p - s_dLdw).T) #
    # dw += lam * weight
    db = (data_p - s_dLdw)
    db = np.sum( db , axis=1 ) / db.shape[1]
    return dw , db , s_dLdw_tran

    # data_mar_X = []
    # data_mar_P = []
    # data_mar_Y = []
    # for index in range( 0 , len( data_x ) ) :
    #     data_mar_X.append( data_x[index][0] )
    #     data_mar_P.append( data_p[index][0] )
    #     data_mar_Y.append( data_x[index][1] )
    # data_mar_X = np.mat( data_mar_X )
    # data_mar_P = np.mat( data_mar_P )
    # data_mar_Y = np.mat( data_mar_Y )
    #
    # dw = -( 1.0 / len(data_x) )
    # dw *= np.dot( data_mar_X.T , (data_mar_Y - data_mar_P) )
    # # dw += lam * weight
    # db = (data_mar_Y - data_mar_P)# pCpa = a - y , papz =
    # db = np.sum( db , axis=0 ) / db.shape[0]
    # return dw , db.T , s_dLdw # 每列为dw1 dw2 dw3

# 卷积的反向传播
# 单点反池化
def unpool_oneimg( martix , size ):
    output_list = []
    for index_i in range( 0 , martix.shape[0] ):
        for index_j in range( 0 , martix.shape[1] ):
            martix_tmp = np.ones((size,size)) * martix[index_i,index_j] / size ** 2
            if index_j == 0:
                output = martix_tmp
            else:
                output = np.append( output , martix_tmp , axis=1 )
            # print( output.shape )
        output_list.append( output )

    martix_output = output_list[0]
    for index in range( 1 , len( output_list ) ):
        # print( output_list[index].shape )
        martix_output = np.append( martix_output , output_list[index] , axis=0 )
    return martix_output
# 反池化
def Unpooling( input_data , mode , size , location_value=0 ):
    # input_data batch node martix
    if mode == 1:
        for index_batch in range( 0 , len( input_data ) ):
            for index_node in range( 0 , len( input_data[0] ) ):
                input_data[index_batch][index_node] = unpool_oneimg( input_data[index_batch][index_node] , size )
    return input_data

#  反卷积，计算dw
def backpropagation_dw( inputdata_pre , input_dt , par_conv ):
    # inputdata_pre l-1层a batch node martix
    # input_dt l层dt batch node martix
    # par_conv l层w node martix

    dw = []
    for index_node_l in range( 0 , len( par_conv ) ):
        node_value = np.zeros( ( par_conv[0].shape[0] , par_conv[0].shape[0] ) ) # l层结点的值
        for index_node_pre in range( 0 , len( inputdata_pre[0] ) - 1 ): # 数据中最后一列存的标签
            dt_sum_tmp = np.zeros( ( input_dt[0][0].shape[0] , input_dt[0][0].shape[0] ) )
            for index_batch in range( 0 , len( inputdata_pre ) ):
                dt_sum_tmp += input_dt[index_batch][index_node_l]
            dt_sum_tmp /= len( inputdata_pre )
            value = convolution_oneimg( inputdata_pre[index_batch][index_node_pre] , dt_sum_tmp.shape[0] , dt_sum_tmp , 0 )
            node_value += value
        node_value /= len( par_conv )
        dw.append( node_value )
    return dw

# 计算卷积层最后一层的dt
# def backpropagation_full2conv( )

#   xxxx dt 与 dw 计算应分开
#   反卷积
def backpropagation( input_data , input_dt , par_conv , par_conv_later ):
    # input_data l-1层数据a batch node martix
    # input_dt l+1层的，传递dt batch node martix
    # par_conv l层权值 node martix
    # pre_par_conv l+1层权值 node martix
    output_dt = [] # 输出dt batch node martix
    # dw_kernel = [] # 输出dw l层node martix
    # 改

    if par_conv == -1: # 针对卷积层到全连接层
        l_node = len ( input_data[0] )
        shape_dt = input_data[0][0].shape[0]
    else :
        l_node = len( par_conv )
        shape_dt = input_data[0][0].shape[0] - par_conv[0][0].shape[0] + 1

    # shape_dt = par_conv[0].shape[0]
    # 这不是最终dt,最终dt的形状，由l层kernel形状决定
    # shape_dt = par_conv[0].shape[0] + par_conv_later[0].shape[0] - 1
    # 遍历批次
    for index_batch in range( 0 , len( input_data ) ):
        dt_l = []
        # 遍历 l层结点
        for index_node_l in range( 0 , l_node ):
            dt_sum_tmp = np.zeros( ( shape_dt , shape_dt ) )
            # 遍历l+1层结点
            for index_node_later in range( 0 , len( par_conv_later ) ):
                tmp_value = deconvolution_one( input_dt[index_batch][index_node_later] ,
                                               par_conv_later[index_node_later] ,
                                               shape_dt )
                dt_sum_tmp += tmp_value
            dt_sum_tmp /= len( par_conv_later )
            dt_l.append( dt_sum_tmp )
        output_dt.append( dt_l )
    return output_dt
    # # dw
    # for index_node_l in range( 0 , len( par_conv ) ): # 遍历l层
    #     sum_tmp_dw = np.zeros(( par_conv[0].shape[0], par_conv[0].shape[0])) # 结果累加
    #     for index_batch in range( 0 , len( input_data ) ): # 遍历批次
    #         for index_node_pre in range( 0 , len( input_data[0] ) - 1 ): # 遍历l-1层结点
    #             # al-1 与 dt卷积
    #             conv_tmp = convolution_oneimg(input_data[index_batch][index_node_pre],output_dt[index_batch][index_node_l].shape[0],output_dt[index_batch][index_node_l],0)  # output_dt[index_batch][index_node] * input_data[index_batch]
    #             sum_tmp_dw += conv_tmp
    #     sum_tmp_dw /= len( input_data ) * len( input_data[0] )
    #     dw_kernel.append( sum_tmp_dw )
    # return output_dt , dw_kernel

# 单dt kernel 反卷积
def deconvolution_one( input_dt_oneMartix , kernel , size ):
    # input_dt_oneMartix martix m * m   假设m为奇数
    # kernel martix k*k
    # siez int值 , 所需结果大小
    output = np.zeros( ( size + kernel.shape[0] - 1 , size + kernel.shape[0] - 1 ) )
    center = ( output.shape[0] - 1 )/2 # 中心点
    index_value = int ( center - ( input_dt_oneMartix.shape[0] - 1 ) / 2 ) # 左上角，赋值
    for index_i in range( index_value , index_value + input_dt_oneMartix.shape[0] ):
        for index_j in range(index_value, index_value + input_dt_oneMartix.shape[0]):
            index_dt_value_i = index_i - index_value
            index_dt_value_j = index_j - index_value
            output[  index_i , index_j ] = input_dt_oneMartix[ index_dt_value_i , index_dt_value_j ]
    # output[ int ( ( output.shape[0] - 1 ) / 2 ) , int ( ( output.shape[0] - 1 ) / 2 ) ] = input_dt_oneMartix
    # 反转kernel
    kernel_flip = np.flip( kernel , axis=0 )
    kernel_flip = np.flip( kernel_flip , axis=1 )
    # 卷积
    output = convolution_oneimg( output , kernel_flip.shape[0] , kernel_flip , 0)
    return output
    #return kernel_flip
#

#  xxxxx 该函数dw部分有误，dw应为lfull_in层dw,而输入dt为full_in层dt,函数返回结果为p6层dt
#  传递derta
def flatten2conv_backpropagation( input_data , input_dt , par_conv , pre_par_conv ):
    # input_data : batch_list pic_list martix 13*13
    # input_dt martix 1800 * batch
    # par_conv : node channel kernel
    # pre_par_conv : node kernel
    output_dt = [] # batch_list value_node
    dw = [] # node_list dw kernel(martix)
    shape_dt = input_data[0][0].shape[0] # - pre_par_conv[0][0].shape[0] + 1
    # shape_dt = pre_par_conv[0].shape[0] + par_conv[0][0].shape[0] - 1
    # shape_dt = input_data[0][0].shape[0] - pre_par_conv[0].shape[0] + 1

    for index_batch in range( 0 , input_dt.shape[1] ):
        print("flatten2conv_backpropagation 正在遍历批次: " , index_batch )
        # 存储l层各个结点dt值
        dt_l = []
        for index_node in range( 0 , len( pre_par_conv ) ):
            print("flatten2conv_backpropagation 正在遍历l层结点: ", index_node)
            # l层某结点的dt值 martix
            # dt_tmp_sum = np.zeros( ( input_data[0][0].shape[0] - 2  , input_data[0][0].shape[0] - 2 ) )
            dt_tmp_sum = np.zeros( ( shape_dt , shape_dt ) )
            for index_node_later in range( 0 , len( par_conv ) ):
                # 取到某批次 dt
                dt_tmp = input_dt[ index_node_later * len(pre_par_conv[0]) + index_node , index_batch ]
                # 将dt_tmp转化为1*1的矩阵
                dt_tmp = np.array([[dt_tmp]])
                # 用该 dt 和 kernel 去做反卷积
                # 作为参数输入的size是最终所需dt的形状
                dt_tmp = deconvolution_one( dt_tmp , par_conv[index_node_later][index_node], shape_dt )
                # 经过激活函数导数激活

                # 针对l+1层结点数，即kernel数 累加
                dt_tmp_sum += dt_tmp
            dt_tmp_sum /= len( par_conv ) # 取平均值
            dt_l.append( dt_tmp_sum )
        output_dt.append( dt_l )
    # 计算dw
    dw_list_later = []
    for index_node_later in range( 0 , len( par_conv ) ):
        tmp_list = []
        for index_node_l in range( 0 , len( input_data[0] ) ):
            sum_tmp = np.zeros( ( input_data[0][0].shape[0] , input_data[0][0].shape[1] ) )
            for index_batch in range(0, len(input_data)):
                dw_tmp = np.multiply( input_data[index_batch][index_node_l] , input_dt[ index_node_later * len( input_data[0] ) + index_node_l , index_batch] )
                sum_tmp += dw_tmp
            sum_tmp = np.multiply( sum_tmp , 1 / len(input_data) )
            tmp_list.append( sum_tmp )
        dw_list_later.append( tmp_list )
    return output_dt , dw_list_later
    # xxxx 错误    计算 dw
    # for index_node in range( 0 , len( output_dt[0] ) ): # 遍历结点 l层
    #     sum_tmp_dw = np.zeros(( pre_par_conv[0].shape[0] , pre_par_conv[0].shape[0] ))
    #     for index_batch in range( 0 , len( output_dt ) ): # 遍历批次
    #         # sum_tmp_dw = np.zeros(( pre_par_conv[0].shape[0] , pre_par_conv[0].shape[0] ))
    #         for index_node_pre in range( 0 , len( input_data[0] ) ): # 遍历l-1层结点，用于 卷积结果累加
    #             # al-1 与 dt卷积
    #             conv_tmp = convolution_oneimg( input_data[index_batch][index_node_pre] ,output_dt[index_batch][index_node].shape[0] ,output_dt[index_batch][index_node] , 0 )# output_dt[index_batch][index_node] * input_data[index_batch]
    #             sum_tmp_dw += conv_tmp
    #     sum_tmp_dw /= len( output_dt ) * len( input_data[0] )
    #     dw.append( sum_tmp_dw )
    # return output_dt , dw # xxx output batch_list node_list dt_martix m*m
    # # dw = al-1 *(卷积) dt  shape_as wl(kernel)    al-1 - kernel +1 = dt
    # # dt = dtl+1 * w(反)  dtl+1 = dt + w - 1


# 测试网络

# 验证网络
def verify_network( verify_set , mode=0 ):
    # 加载参数
    par_conv = joblib.load('F:\\ccddPY\\python\\ML\\par_conv')
    par_fullyCon = joblib.load('F:\\ccddPY\\python\\ML\\par_fullyCon')

    output_c1 = c1(verify_set, par_conv[0], 0)
    # c2 激活
    output_c2 = c1(output_c1, par_conv[1], 0)
    # 将结果存储到本地，测试
    # joblib.dump( output_c2 , 'F:\\ccddPY\\python\\ML\\output_c2')

    # 加载 output_c2
    # output_c2 = joblib.load('F:\\ccddPY\\python\\ML\\output_c2')
    # p3 池化
    output_p3, output_p3_location = pooling(output_c2, 2, 1)

    # c4
    output_c4 = c1(output_p3, par_conv[2], 0)

    # c5
    output_c5 = c1(output_c4, par_conv[3], 0)

    # p6
    output_p6, output_p6_location = pooling(output_c5, 2, 1)

    # 将结果存到本地，测试
    # joblib.dump(output_p6, 'F:\\ccddPY\\python\\ML\\output_p6')

    # 加载 output_p6
    # output_p6 = joblib.load('F:\\ccddPY\\python\\ML\\output_p6')
    # 存储全连接层每一层数据
    data_full = []
    # 压平操作 flatten 250个结点，每个结点t个卷积核
    # flatten_kernel = initialize_kernel( )
    input_fullycon, node_kernel_list = flatten_data_conv(output_p6, 300)
    # 输入和参数存入 --
    softmax(input_fullycon)

    # 多个全连接层
    test_full_num = 0
    for par_index_full in range(1, len(par_fullyCon)):  # index_full 遍历全连接层
        print("全连接层: ", test_full_num, "...")
        output_fc = fullyConnected(input_fullycon, par_fullyCon[par_index_full])  # par [ w , b ]
        tmp = output_fc
        softmax(output_fc)
        data_full.append([tmp, output_fc])  # 存储每一层的输出 z a
        input_fullycon = output_fc
        test_full_num += 1

    # loss
    lossN = loss(input_fullycon)
    lossN = np.abs(lossN)
    if mode == 0:
        print(" 验证集  loss = ", lossN)
    elif mode == 1:
        print(" 测试集  loss = ", lossN)

# 训练网络 ;;最终结
def main_fun( train_set_batches_list ):
    # # 输入数据
    # imgDir_fully_covered = "F:\\abcd\\课\\智能设计\\3class\\fully_covered"  # 240张
    # imgDir_not_covered = "F:\\abcd\\课\\智能设计\\3class\\not_covered"
    # imgDir_partially_covered = "F:\\abcd\\课\\智能设计\\3class\\partially_covered"
    # train_set, verify_set, test_set = readData(imgDir_fully_covered, imgDir_partially_covered, imgDir_not_covered)
    # train_set_batches_list = batches_train_set(train_set, 20) # 分成100批次测试 --testX

    # 初始化参数
    # 卷积层初始化参数  --三个卷积层
    num_conv_list = [ [ 6 , 3 ] , [ 16 , 3 ] , [ 16 , 3 ] , [ 6 , 3 ]]
    # 全连接层初始化参数 --隐藏层 --输出层
    # num_fullyCon_list = [ 500 , 300 , 500 , 200 , 50 , 3 ]
    num_fullyCon_list = [500, 200 , 50, 3]
    # 初始化参数
    par_conv = initialize_parameter_conv( num_conv_list )
    par_fullyCon = []
    # 训练集批次
    batch = 0
    lam = 0.01
    print("每批数据量: ", len(train_set_batches_list[0]))
    for index_iterations in range(0 , 300): # 迭代300次

        print("正在迭代第",index_iterations + 1 , "次")

        # 保存参数 , 加载参数
        # save_par_conv = np.mat(par_conv)
        # save_par_full = np.mat(par_fullyCon)
        # joblib.dump(save_par_conv, 'F:\\ccddPY\\python\\ML\\save_par_conv')
        # joblib.dump(save_par_full, 'F:\\ccddPY\\python\\ML\\save_par_full')
        # save_par_conv = joblib.load('F:\\ccddPY\\python\\ML\\save_par_conv')
        # save_par_full = joblib.load('F:\\ccddPY\\python\\ML\\save_par_full')

        # 存储卷积池化结果
        data_conv_full = []
        # 正向传播
        data_conv_full.append([train_set_batches_list[batch], -1])
        # c1 激活
        output_c1 = c1( train_set_batches_list[batch] , par_conv[0] , 0 )
        data_conv_full.append( [ output_c1 , 0 ] ) # 0:卷积
        # c2 激活
        output_c2 = c1( output_c1 , par_conv[1] , 0 )
        data_conv_full.append([output_c2, 0])
        # 将结果存储到本地，测试
        # joblib.dump( output_c2 , 'F:\\ccddPY\\python\\ML\\output_c2')

        # 加载 output_c2
        # output_c2 = joblib.load('F:\\ccddPY\\python\\ML\\output_c2')
        # p3 池化
        output_p3 , output_p3_location = pooling( output_c2 , 2 , 1 )
        data_conv_full.append([output_p3, 1])
        # c4
        output_c4 = c1( output_p3 , par_conv[2] , 0 )
        data_conv_full.append([output_c4, 0])
        # c5
        output_c5 = c1( output_c4 , par_conv[3] , 0 )
        data_conv_full.append([output_c5, 0])
        # p6
        output_p6, output_p6_location = pooling(output_c5, 2, 1)
        data_conv_full.append([output_p6, 1])
        # 将结果存到本地，测试
        # joblib.dump(output_p6, 'F:\\ccddPY\\python\\ML\\output_p6')

        # 加载 output_p6
        # output_p6 = joblib.load('F:\\ccddPY\\python\\ML\\output_p6')
        # 存储全连接层每一层数据
        data_full = []
        # 压平操作 flatten 250个结点，每个结点t个卷积核
        # flatten_kernel = initialize_kernel( )
        input_fullycon , node_kernel_list = flatten_data_conv( output_p6 , 300 )
        # 输入和参数存入 --
        xx = 1
        # input_fullycon = flatten_data( output_p6 )
        # 初始化全连接层参数
        if index_iterations == 0 :
            par_fullyCon = initialize_parameter_full( num_fullyCon_list , input_fullycon[0][0].shape[0] )
            par_fullyCon.insert( 0 , node_kernel_list ) # 将 输入层的参数传入最后一层，方便运算
        tmp = input_fullycon
        # activate_fullcon( input_fullycon )
        softmax( input_fullycon )
        # data_full 未包含输入层
        data_full.append( [ tmp , input_fullycon ] )  # 存入压平后的数据和激活后的输入
        # 多个全连接层
        test_full_num = 0
        for par_index_full in range( 1 , len( par_fullyCon ) ): # index_full 遍历全连接层
            print("全连接层: " , test_full_num , "...")
            output_fc = fullyConnected( input_fullycon , par_fullyCon[par_index_full] ) # par [ w , b ]
            tmp = output_fc
            softmax(output_fc)
            # if index_full == par_fullyCon[-1]:
            #     softmax( output_fc )
            # else :
            #     activate_fullcon( output_fc ) # 激活函数
            data_full.append( [ tmp , output_fc ] )  # 存储每一层的输出 z a
            input_fullycon = output_fc
            test_full_num += 1

        # softmax
        # softmax( input_fullycon )

        # loss
        lossN = loss( input_fullycon )
        lossN = np.abs( lossN )
        print( "loss = " , lossN )

        # test
        # if sum(lossN) < 0.00001:
        #     joblib.dump(par_conv, 'F:\\ccddPY\\python\\ML\\par_conv')
        #     joblib.dump(par_fullyCon, 'F:\\ccddPY\\python\\ML\\par_fullyCon')
        #     break

        # test 存储参数
        # joblib.dump(par_conv, 'F:\\ccddPY\\python\\ML\\par_conv')
        # joblib.dump(par_fullyCon, 'F:\\ccddPY\\python\\ML\\par_fullyCon')
        # par_conv = joblib.load( 'F:\\ccddPY\\python\\ML\\par_conv' )
        # par_fullyCon = joblib.load( 'F:\\ccddPY\\python\\ML\\par_fullyCon' )

        # # 准备权值 --改:不需要
        # par_full_w_list , par_full_b_list = list2martix_par_full( par_fullyCon )
        # # 转换data_full形式 --改

        data_full = data_full2martix( data_full )

        """
        data_full 存储形式
        [ layeri ...]
            [z,a]
                z [z,y]
                    z [ batchi ...]
                        batchi 1800 * 1
                    y [ batchi ...]
                        batchi <list> [1,0,0]
        应该为
        [ layer_list , label_martix ]
            layer_list [ layeri ... ]
                layeri [z,a]
                    z martix 1800 * n
                    a martix 1800 * n
            label_martix 3 * n 
        """
        # 反向传播、
        # data_full存储了输入层和隐藏层，输出层,par_full_w_list存储了隐藏层，输出层的参数值
        # par_full_w_list的index-1正好与data_full的index对应
        # 该循环计算出了下一层的dt个该层的dw
        for index in range( len(data_full[0]) - 1 , 0 , -1 ):
            if index == len(data_full[0]) - 1 : # 反向传播最后一层的输入 l+1 是标签  3*1
                s_dLdw = data_full[1]

                #s_dLdw = data_full[index][2]

                # s_dLdw = data_full[index][2][:,0]
                # s_dLdw = s_dLdw.squeeze()
                # s_dLdw = np.array([s_dLdw[0,0] , s_dLdw[0,1] , s_dLdw[0,2]])
            index_w = index
            index_b = index - 1
            # if index_b == 0 : # 输入层没有b --改，确认输入层的w,b,即卷积层最后到全连接层的输入
            #     dw , db , s_dLdw = dL_dw( data_full[0][index-1][0] , data_full[0][index][1] , s_dLdw ,  par_fullyCon[index_w][0] , par_fullyCon[0][1] ) # dw为矩阵
            #     # 更新
            # else:
            #     dw, db, s_dLdw = dL_dw(data_full[0][index - 1][0], data_full[0][index][1], s_dLdw, par_fullyCon[index_w][0],par_fullyCon[index_b][1] )

            dw, db, s_dLdw = dL_dw(data_full[0][index-1][0], data_full[0][index][1], s_dLdw, par_fullyCon[index_w][0],par_fullyCon[index_b][1])
            # 更新权值
            # par_fullyCon[index_b+1][1] -= lam * db
            for index_rnew in range( 0 , par_fullyCon[index_b+1][1].shape[0] ):
                par_fullyCon[index_b+1][1][index_rnew,0] -= lam * db[index_rnew,0]
                for index_rnew_w in range( 0 , par_fullyCon[index_w][0].shape[0] ):
                    par_fullyCon[index_w][0][index_rnew_w,index_rnew] -= lam * dw[index_rnew_w,index_rnew]
            # par_fullyCon[index_w][1] -= lam * dw  # 更新权值

        # 卷积反向传播更新权值   --output 批次 通道,带一个标签  --par_conv lyaer w_node
        # 全连接层到最后一层的卷积层
        layer = -1
        back_dt , dw_kernel = flatten2conv_backpropagation( data_conv_full[layer][0] , s_dLdw , par_fullyCon[0][0] , par_conv[layer] )
        # data_conv_full 1(训练数据) 2 3   4(pool)   5 6    7(pool)

        # 更新权值，full_in的权值
        for index_node in range( 0 , len( par_fullyCon[0][0] ) ):
            for index_channel in range( 0 , len( par_fullyCon[0][0][0] )):
                par_fullyCon[0][0][index_node][index_channel] -= np.multiply( dw_kernel[index_node][index_channel] , lam )

        # 更新权值
        # for index_node in range( 0 , len(par_conv[layer]) ):
        #     par_conv[layer][index_node] -= lam * dw_kernel[index_node]

        # 反池化 c5 dt
        back_dt = Unpooling( back_dt , 1 , 2 , 0 ) # 此时dt到了c5层
        # dw c5
        dw_kernel = backpropagation_dw(data_conv_full[4][0], back_dt, par_conv[3])
        # 权值更新
        for index_node in range( 0 , len( par_conv[3] ) ):
            par_conv[3][index_node] -= lam * dw_kernel[index_node]
        # 反卷积
        # c4 dt
        back_dt = backpropagation( data_conv_full[3][0] , back_dt , par_conv[2] , par_conv[3] )
        # dw c4
        dw_kernel = backpropagation_dw( data_conv_full[3][0] , back_dt , par_conv[2] )
        # 更新权值
        for index_node in range( 0 , len( par_conv[2] ) ):
            par_conv[2][index_node] -= lam * dw_kernel[index_node]
        # p3 dt
        back_dt = backpropagation(data_conv_full[3][0], back_dt, -1 , par_conv[2] )
        # 反池化 dt c2
        back_dt = Unpooling(back_dt, 1, 2, 0)
        # dw c2
        dw_kernel = backpropagation_dw(data_conv_full[1][0], back_dt, par_conv[1])
        # 权值更新
        for index_node in range( 0 , len( par_conv[1] ) ):
            par_conv[1][index_node] -= lam * dw_kernel[index_node]
        # 反卷积
        # c1 dt
        back_dt = backpropagation(data_conv_full[0][0], back_dt, par_conv[0], par_conv[1])
        # dw c1
        dw_kernel = backpropagation_dw(data_conv_full[0][0], back_dt, par_conv[0])
        # 更新权值
        for index_node in range( 0 , len( par_conv[0] ) ):
            par_conv[0][index_node] -= lam * dw_kernel[index_node]


        # 判断误差
        if sum( lossN ) < 0.00001 :
            joblib.dump(par_conv, 'F:\\ccddPY\\python\\ML\\par_conv')
            joblib.dump(par_fullyCon, 'F:\\ccddPY\\python\\ML\\par_fullyCon')
            break

        # 更新训练批次
        if batch == len(train_set_batches_list) - 1:
            batch = 0
        else :batch += 1

        # --testX
        # if index_iterations == 3 :
        #     print("每批数据量: ", len( train_set_batches_list[0] ) )
        #     print("已迭代",index_iterations + 1 , "次 停止--")
        #     break
    verify_network( verify_set , 0 )  # 验证集
    verify_network( test_set, 1 ) # 测试集


if __name__ == '__main__':
    # imgDir_fully_covered = "F:\\abcd\\课\\智能设计\\3class\\fully_covered" # 240张
    # imgDir_not_covered = "F:\\abcd\\课\\智能设计\\3class\\not_covered"
    # imgDir_partially_covered = "F:\\abcd\\课\\智能设计\\3class\\partially_covered"
    # train_set, verify_set, test_set = readData(imgDir_fully_covered, imgDir_partially_covered, imgDir_not_covered)
    # train_set_batches_list = batches_train_set( train_set , 10 )
    # 输入数据
    imgDir_fully_covered = "F:\\abcd\\课\\智能设计\\3class\\fully_covered"  # 240张
    imgDir_not_covered = "F:\\abcd\\课\\智能设计\\3class\\not_covered"
    imgDir_partially_covered = "F:\\abcd\\课\\智能设计\\3class\\partially_covered"
    train_set, verify_set, test_set = readData(imgDir_fully_covered, imgDir_partially_covered, imgDir_not_covered)
    train_set_batches_list = batches_train_set(train_set, 20) # 分成100批次测试 --testX

    main_fun( train_set_batches_list )
    verify_network( verify_set , 0 )
    verify_network( test_set , 1 )

    # ------test---------
    # 8 激活
    # main_fun()

    # 7 卷积一批数据集
    # 测试c1 -- 分批次 --0
    # output_c1 = c1( train_set_batches_list[0] , 6 , 3 , 0 )
    # print( output_c1 )

    # 6 kernel
    # kernel_list = initialize_kernel( 6 , 3 ) # 6个卷积核,大小 3*3
    # conv_img = convolution_oneimg( train_set[0][0] , kernel_list[0].shape[0] , kernel_list[0] , 1 )
    # print( conv_img.shape )
    # pool_img = pooling_oneimg( conv_img , 3 , 1 )
    # print( pool_img.shape )

    # comvOnePoint 测试
    # value = comvOnePoint( 3 , 4 , 4 , train_set[0][0] , kernel_list[0] )
    # print( value )

    # 5 读数据
    # train_set , verify_set , test_set = readData( imgDir_fully_covered , imgDir_partially_covered , imgDir_not_covered )
    # print( len( train_set ) )
    # print( len( verify_set ))
    # print( len( test_set ))
    #imgdataList , sizeImage = readData(imgDir_fully_covered , imgDir_partially_covered , imgDir_not_covered )

    # imgdataF_covered , sumCovered = readImg( imgDir_fully_covered )
    # imgdataF_covered_deal = dataDeal( imgdataF_covered , 1 )

    # sumCovered =  readImg( imgDir_par tially_covered )
    # print( sumCovered )

    # 4 改单通道
    # tmp = comv1poin1way( 5 , 1 , 1 , imgData , 1 , 1 ) # l=5    x,y = 1,1   model=1   rgb=g
    # pooling( imgData , 3 )

    # 3
    # pooling( imgData , 3 )
    #convImg = convolutionf( imgData , 3 )

    # 2
    # sumCom = comvOnePoint( 5 , 1 , 1 , imgData )
    # print( sumCom )

    # 1
    # npArr = np.mat( [ 1 , 2 , 3 ])
    # print( "npArr_shape: ",npArr.shape)
    # npArr += npArr
    # npArr = npArr * 0.5
    # print(npArr)
    test = 'over'


# ------

# xx mv 某点某通道取均值/最大值
# def comv1poin1way(length, x, y, imgdata, model , rgb):  # l = 0:一个点 , l = 1:9个点
#     sumComRGBx = 0
#     maxData = 0.0
#     for icomv in range(0, 2 * length + 1):  # 遍历周围长度为length的正方形区域
#         for jcomv in range(0, 2 * length + 1):
#             if x - length + icomv < 0 or y - length + jcomv < 0 or x - length + icomv >= imgdata.shape[0] or y - length + jcomv >= imgdata.shape[1]:  # 如果到了边界之外，假定其值为原点的值
#                 sumComRGBx += imgdata[x][y][rgb] # 若在边界外，取该点值
#                 if maxData < imgdata[x][y][rgb]: # 取最大值
#                     maxData = imgdata[x][y][rgb]
#             else:
#                 # 错误判断
#                 if x - length + icomv >= imgdata.shape[0] or y - length + jcomv >= imgdata.shape[1]:
#                     ERROR_1 = x - length + icomv - imgdata.shape[0]
#                     ERROR_2 = y - length + jcomv - imgdata.shape[1]
#                     print("ERROR cc001 \n")
#                 sumComRGBx += imgdata[x - length + icomv][y - length + jcomv][rgb]
#                 if maxData < imgdata[x - length + icomv][y - length + jcomv][rgb]:
#                     maxData = imgdata[x - length + icomv][y - length + jcomv][rgb]
#                     # print( "maxData: " , maxData , "  imgdata[ x - length + icomv ][ y - length + jcomv]: ", imgdata[ x - length + icomv ][ y - length + jcomv] )
#     if model == 1:
#         return sumComRGBx * (1 / (2 * length + 1) ** 2)  # 除以点数
#     elif model == 2:
#         return maxData
#     else:
#         print("ERROR!!!!  cc002\n")
#         return -1


# 卷积核对某一图像上某一点进行卷积运算