# -*- coding: utf-8 -*-
"""
Created on Tue Aug 31 20:36:01 2021

@author: Administrator
"""

import tensorflow as tf
from tensorflow.keras import datasets, layers, optimizers, Sequential, metrics



conv_layers = [  # 5 units of conv + max pooling 
    # unit 1
    layers.Conv2D(64, kernel_size=[3,3], padding="same", activation=tf.nn.relu ),
    layers.Conv2D(64, kernel_size=[3,3], padding="same", activation=tf.nn.relu ),
    layers.MaxPool2D( pool_size = [2,2] ,strides = 2,  padding = "same"),
    
    # unit 2
    layers.Conv2D(128, kernel_size=[3,3], padding="same", activation=tf.nn.relu ),
    layers.Conv2D(128, kernel_size=[3,3], padding="same", activation=tf.nn.relu ),
    layers.MaxPool2D( pool_size = [2,2] , strides = 2, padding = "same"),
    
    # unit 3
    layers.Conv2D(256, kernel_size=[3,3], padding="same", activation=tf.nn.relu ),
    layers.Conv2D(256, kernel_size=[3,3], padding="same", activation=tf.nn.relu ),
    layers.MaxPool2D( pool_size = [2,2] , strides = 2, padding = "same"),
    
    # unit 4
    layers.Conv2D(512, kernel_size=[3,3], padding="same", activation=tf.nn.relu ),
    layers.Conv2D(512, kernel_size=[3,3], padding="same", activation=tf.nn.relu ),
    layers.MaxPool2D( pool_size = [2,2] , strides = 2, padding = "same"),
    
    # unit 5
    layers.Conv2D(512, kernel_size=[3,3], padding="same", activation=tf.nn.relu ),
    layers.Conv2D(512, kernel_size=[3,3], padding="same", activation=tf.nn.relu ),
    layers.MaxPool2D( pool_size = [2,2] , strides = 2, padding = "same"),
    ]


# 数据类型转换
def preprocess(x,y):
    x = tf.cast( x,dtype = tf.float32 ) / 255.0;
    y = tf.cast( y,dtype = tf.int32 );
    return x,y;



(x,y), (x_val, y_val) = datasets.cifar100.load_data();
print( 'datasets:', x.shape, y.shape, x.min(), x.max() );
print( 'datasets:', x_val.shape, y_val.shape, x.min(), x.max() );

y = tf.squeeze(y,axis=1);
y_val = tf.squeeze( y_val,axis=1 );
print( 'datasets:', x.shape, y.shape, x.min(), x.max() );
print( 'datasets:', x_val.shape, y_val.shape, x.min(), x.max() );

# 构建训练数据集
batchsize = 64;
train_db = tf.data.Dataset.from_tensor_slices( (x,y) );
train_db = train_db.map( preprocess ).shuffle(10000).batch( batchsize );

test_db = tf.data.Dataset.from_tensor_slices( (x_val, y_val) );
test_db = test_db.map( preprocess ).batch( batchsize );


print( "data base:", type( train_db )  )
sample = next( iter(train_db) );
print( "sample:",  type(sample), sample[0].shape, sample[1].shape )



conv_net = Sequential( conv_layers );
fc_net = Sequential( [
    layers.Dense(256, activation=tf.nn.relu),
    layers.Dense(256, activation=tf.nn.relu),
    layers.Dense(100, activation=None),
    ] )

conv_net.build( input_shape=[None,32,32,3] );
fc_net.build( input_shape = [None,512] );
optimizer = optimizers.Adam( lr=1e-4 );


variables = conv_net.trainable_variables + fc_net.trainable_variables;

for epoch in range(50):
    for step,(x,y) in enumerate( train_db ):
        
        with tf.GradientTape() as tape:
            out = conv_net(x);
            # flatten
            out = tf.reshape( out,[-1,512] );
            # fc
            logits = fc_net( out );
            
            y_onehot = tf.one_hot(y, depth=100);
            loss = tf.losses.categorical_crossentropy( y_onehot, logits, from_logits=True );
            loss = tf.reduce_mean(loss);
            
        grads = tape.gradient(loss, variables);
        optimizer.apply_gradients(  zip( grads, variables ) );
            
        if 0 == step % 100:
            print( epoch,step, "loss:",float(loss) );
      
    
    total_num = 0;
    total_correct = 0;
    for x,y in test_db:
        out = conv_net(x);
        out = tf.reshape( [-1,512] );
        logits = fc_net( out );
        prob = tf.nn.softmax( logits,axis=1 );
        pred = tf.argmax( prob,axis=1 );
        pred = tf.cast( pred,dtype = tf.int32 );
        
        correct = tf.equal( correct,y );
        correct = tf.cast( correct,dtype = tf.int32 );
        correct = tf.reduce_sum( correct );
        
        total_num += x.shape;
        total_correct += int( correct );
    
    acc = total_correct / total_num;
    print( epoch, 'acc:', acc )















