# -*- coding: utf-8 -*-
# @Time    : 2020/6/17 下午9:59
# @Author  : caotian
# @FileName: optimizationmodel.py
# @Software: PyCharm
import paddle
import paddle.fluid as fluid
from paddle.fluid.dygraph.nn import Conv2D,Pool2D,Linear
import json
import gzip
import numpy as np
import random
import os
import sys

class MNIST(fluid.dygraph.Layer):
    """
    MNIST类用于
    """
    def __init__(self):
        """
        方法用于
        """
        super(MNIST,self).__init__()
        self.conv1=Conv2D(num_channels=1,num_filters=20,filter_size=5,stride=1,padding=2,act='relu')
        self.pool1=Pool2D(pool_size=2,pool_stride=2,pool_type='max')
        self.conv2=Conv2D(num_channels=20,num_filters=20,filter_size=5,stride=1,padding=2,act='relu')
        self.pool2=Pool2D(pool_size=2,pool_stride=2,pool_type='max')
        self.fc=Linear(input_dim=980,output_dim=10,act='softmax')
    def forward(self,inputs,label=None,check_shape=False,check_content=False):
        # 给不同层的输出不同命名，方便调试
        outputs1 = self.conv1(inputs)
        outputs2 = self.pool1(outputs1)
        outputs3 = self.conv2(outputs2)
        outputs4 = self.pool2(outputs3)
        _outputs4 = fluid.layers.reshape(outputs4, [outputs4.shape[0], -1])
        outputs5 = self.fc(_outputs4)

        # 选择是否打印神经网络每层的参数尺寸和输出尺寸，验证网络结构是否设置正确
        if check_shape:
            # 打印每层网络设置的超参数-卷积核尺寸，卷积步长，卷积padding，池化核尺寸
            print("\n########## print network layer's superparams ##############")
            print("conv1-- kernel_size:{}, padding:{}, stride:{}".format(self.conv1.weight.shape, self.conv1._padding,
                                                                         self.conv1._stride))
            print("conv2-- kernel_size:{}, padding:{}, stride:{}".format(self.conv2.weight.shape, self.conv2._padding,
                                                                         self.conv2._stride))
            print("pool1-- pool_type:{}, pool_size:{}, pool_stride:{}".format(self.pool1._pool_type,
                                                                              self.pool1._pool_size,
                                                                              self.pool1._pool_stride))
            print("pool2-- pool_type:{}, poo2_size:{}, pool_stride:{}".format(self.pool2._pool_type,
                                                                              self.pool2._pool_size,
                                                                              self.pool2._pool_stride))
            print("fc-- weight_size:{}, bias_size_{}, activation:{}".format(self.fc.weight.shape, self.fc.bias.shape,
                                                                            self.fc._act))

            # 打印每层的输出尺寸
            print("\n########## print shape of features of every layer ###############")
            print("inputs_shape: {}".format(inputs.shape))
            print("outputs1_shape: {}".format(outputs1.shape))
            print("outputs2_shape: {}".format(outputs2.shape))
            print("outputs3_shape: {}".format(outputs3.shape))
            print("outputs4_shape: {}".format(outputs4.shape))
            print("outputs5_shape: {}".format(outputs5.shape))

        # 选择是否打印训练过程中的参数和输出内容，可用于训练过程中的调试
        if check_content:
            # 打印卷积层的参数-卷积核权重，权重参数较多，此处只打印部分参数
            print("\n########## print convolution layer's kernel ###############")
            print("conv1 params -- kernel weights:", self.conv1.weight[0][0])
            print("conv2 params -- kernel weights:", self.conv2.weight[0][0])

            # 创建随机数，随机打印某一个通道的输出值
            idx1 = np.random.randint(0, outputs1.shape[1])
            idx2 = np.random.randint(0, outputs3.shape[1])
            # 打印卷积-池化后的结果，仅打印batch中第一个图像对应的特征
            print("\nThe {}th channel of conv1 layer: ".format(idx1), outputs1[0][idx1])
            print("The {}th channel of conv2 layer: ".format(idx2), outputs3[0][idx2])
            print("The output of last layer:", outputs5[0], '\n')

        # 如果label不是None，则计算分类精度并返回
        if label is not None:
            acc = fluid.layers.accuracy(input=outputs5, label=label)
            return outputs5, acc
        else:
            return outputs5

