import tensorflow as tf
from tensorflow import keras as ks
from tensorflow.keras import layers,datasets
import numpy as np
import matplotlib.pyplot as plt


def data_type_struct():
    '''
    numpy
    '''
    x = np.array([[5, 78, 2, 34, 0],
                  [6, 79, 3, 35, 1],
                  [7, 80, 4, 36, 2]])
    x.ndim # 2 维度值
    (train_images, train_labels), (test_images, test_labels) = datasets.mnist.load_data()
    x = train_images
    print('轴 ndim', x.ndim)
    print('形 shape', x.shape)
    print('类 dtype', x.dtype)
    # 变形shape
    train_images = train_images.reshape((60000, 28 * 28))

    # 绘图
    import matplotlib.pyplot as plt
    digit = train_images[4]
    plt.imshow(digit, cmap=plt.cm.binary)
    plt.show()

    # 切片 [:,: ,: ,:] 有多少轴ndim就可以有多少ndim-1的逗号

def model_balabala():
    '''
    Dense 参数
    activation: relu

    损失函数选择:
    多分类问题，可以用分类交叉熵（categorical crossentropy）损失函数；
    回归问题，可以用均方误差（mean-squared error）损失函数；
    序列学习问题，可以用联结主义时序分类（CTC，connectionist temporal classification）损失函数

    定义模型:
     Sequential 类（仅用于层的线性堆叠，这是目前最常见的网络架构），
    函数式 API（functional API，用于层组成的有向无环图，让你可以构建任意形式的架构）
    '''

    #  relu(dot(W, input) + b)
    # 梯度下降与优化 带动量的 SGD、Adagrad、RMSProp 等  动量概念解决收敛速度和局部极小点

    #模型定义的两种方法
    from keras import models
    from keras import layers
    model = models.Sequential()
    model.add(layers.Dense(32, activation='relu', input_shape=(784,)))
    model.add(layers.Dense(10, activation='softmax'))
    # 下面是用函数式API定义的相同模型。
    input_tensor = layers.Input(shape=(784,))
    x = layers.Dense(32, activation='relu')(input_tensor)
    output_tensor = layers.Dense(10, activation='softmax')(x)
    model = models.Model(inputs=input_tensor, outputs=output_tensor)
    '''
    编译
    '''
    from keras import optimizers
    model.compile(optimizer=optimizers.RMSprop(lr=0.001),
                  loss='mse',
                  metrics=['accuracy'])
    
def t1():
    model = tf.keras.Sequential()
    model.add(layers.Dense(32, activation='relu'))
    model.add(layers.Dense(32,activation='relu'))
    model.add(layers.Dense(10,activation='relu'))
    layers.Dense(32, activation='sigmoid')
    layers.Dense(32, activation=tf.sigmoid)
    layers.Dense(32, kernel_initializer='orthogonal') # 设置权重初始化方案
    layers.Dense(32, kernel_initializer=tf.keras.initializers.glorot_normal)
    layers.Dense(32, kernel_regularizer=tf.keras.regularizers.l2(0.01)) # 设置正则
    layers.Dense(32, kernel_regularizer=tf.keras.regularizers.l1(0.01))
    model.compile(optimizer=tf.keras.optimizers.Adam(0.001), # 梯度优化策略
                  loss=tf.keras.losses.categorical_crossentropy,
                  metrics=[tf.keras.metrics.categorical_accuracy])
    train_x = np.random.random((1000, 72))
    train_y = np.random.random((1000, 10))

    val_x = np.random.random((200, 72))
    val_y = np.random.random((200, 10))

    model.fit(train_x, train_y, epochs=10, batch_size=100,
              validation_data=(val_x, val_y))

    dataset = tf.data.Dataset.from_tensor_slices((train_x, train_y))
    dataset = dataset.batch(32)
    dataset = dataset.repeat()
    val_dataset = tf.data.Dataset.from_tensor_slices((val_x, val_y))
    val_dataset = val_dataset.batch(32)
    val_dataset = val_dataset.repeat()

    model.fit(dataset, epochs=10, steps_per_epoch=30,
              validation_data=val_dataset, validation_steps=3)



if __name__ == '__main__':
    pass