# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains definitions for the preactivation form of Residual Networks.

Residual networks (ResNets) were originally proposed in:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
    Deep Residual Learning for Image Recognition. arXiv:1512.03385

The full preactivation 'v2' ResNet variant implemented in this module was
introduced by:
[2] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
    Identity Mappings in Deep Residual Networks. arXiv: 1603.05027

The key difference of the full preactivation 'v2' variant compared to the
'v1' variant in [1] is the use of batch normalization before every weight layer
rather than after.
"""

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import tensorflow as tf
import numpy as np
import Layers
#from Quantize import fw,fa,fBits
from BatchNorm import BatchNorm, BatchNorm3d

_BATCH_NORM_DECAY = 0.997
_BATCH_NORM_EPSILON = 1e-5

_HEIGHT = 32
_WIDTH = 32
_FRAME = 32
_NUM_CHANNELS = 1
_LABEL_CLASSES = 40


def batch_norm_relu3d(inputs, is_training, data_format, name_scope):
	"""Performs a batch normalization followed by a ReLU."""
	# We set fused=True for a significant performance boost. See
	# # https://www.tensorflow.org/performance/performance_guide#common_fused_ops
	with tf.variable_scope(name_scope):
		inputs = BatchNorm3d(inputs,center=True, scale=True, is_training=is_training, decay=0.997, Random=None, data_format='NDHWC')
		inputs = tf.nn.relu(inputs)
	return inputs


def batch_norm_relu(inputs, is_training, name_scope):
	with tf.variable_scope(name_scope):
		inputs = BatchNorm(inputs,center=True, scale=True, is_training=is_training, decay=0.997, Random=None)
		inputs = tf.nn.relu(inputs)
	return inputs


def cnn_model(num_classes, data_format=None):
	if data_format is None:
		data_format = 'channels_last'

	def model(inputs, is_training):
		print('input shape?:, ', inputs.shape)
		inputs = tf.reshape(inputs, (-1,_FRAME, _HEIGHT, _WIDTH, _NUM_CHANNELS))
		print('Input Shape:, ', inputs.shape)

		inputs = Layers.conv_3d(inputs, 64, [5,5,5], name_scope = 'conv_1')
		inputs = batch_norm_relu3d(inputs, is_training, data_format, name_scope = 'act_1')
		inputs = Layers.conv_3d(inputs, 64, [3,3,3], name_scope = 'conv_1_2')
		inputs = batch_norm_relu3d(inputs, is_training, data_format, name_scope = 'act_1_2')
		inputs = tf.layers.average_pooling3d(inputs=inputs, pool_size=2, strides=2, padding='SAME',data_format=data_format)
		inputs = tf.identity(inputs, 'avg_pool_1')

		inputs = Layers.conv_3d(inputs, 128, [5,3,3], name_scope = 'conv_2')
		inputs = batch_norm_relu3d(inputs, is_training, data_format, name_scope = 'act_2')
		inputs = Layers.conv_3d(inputs, 128, [3,5,3], name_scope = 'conv_2_2')
		inputs = batch_norm_relu3d(inputs, is_training, data_format, name_scope = 'act_2_2')
		inputs = Layers.conv_3d(inputs, 128, [3,3,5], name_scope = 'conv_2_3')
		inputs = batch_norm_relu3d(inputs, is_training, data_format, name_scope = 'act_2_3')
		inputs = tf.layers.average_pooling3d(inputs=inputs, pool_size=2, strides=2, padding='SAME',data_format=data_format)
		inputs = tf.identity(inputs, 'avg_pool_2')

		inputs = Layers.conv_3d(inputs, 256, [3,3,3], name_scope = 'conv_3')
		inputs = batch_norm_relu3d(inputs, is_training, data_format, name_scope = 'act_3')
		inputs = Layers.conv_3d(inputs, 256, [3,3,3], name_scope = 'conv_3_2')
		inputs = batch_norm_relu3d(inputs, is_training, data_format, name_scope = 'act_3_2')
		inputs = Layers.conv_3d(inputs, 256, [3,3,3], name_scope = 'conv_3_3')
		inputs = batch_norm_relu3d(inputs, is_training, data_format, name_scope = 'act_3_3')
		inputs = tf.layers.average_pooling3d(inputs=inputs, pool_size=2, strides=2, padding='SAME',data_format=data_format)
		inputs = tf.identity(inputs, 'avg_pool_3')

		sz = np.prod(inputs.get_shape().as_list()[1:])
		inputs = tf.reshape(inputs, [-1,sz])
		inputs = Layers.linear(inputs, 4096, name_scope = 'linear_1')
		inputs = batch_norm_relu(inputs, is_training, name_scope = 'relu_linear_1')
		inputs = tf.nn.dropout(inputs, 0.5, name = 'dropout_1')

		inputs = Layers.linear(inputs, 2048, name_scope = 'linear_2')
		inputs = batch_norm_relu(inputs, is_training, name_scope = 'relu_linear_2')
		inputs = tf.nn.dropout(inputs, 0.5, name = 'dropout_2')

		inputs = Layers.linear(inputs, num_classes, name_scope = 'linear_3')

		return inputs

	return model


def t3f_tt_cnn_model(num_classes, data_format=None):
	if data_format is None:
		data_format = 'channels_last'

	def model(inputs, is_training):
		print('input shape?:, ', inputs.shape)
		inputs = tf.reshape(inputs, (-1,_FRAME, _HEIGHT, _WIDTH, _NUM_CHANNELS))
		print('Input Shape:, ', inputs.shape)

		inputs = Layers.conv_3d(inputs, 64, [5,5,5], name_scope = 'conv_1')
		inputs = batch_norm_relu3d(inputs, is_training, data_format, name_scope = 'act_1')
		inputs = Layers.conv_3d_tt(inputs, 64, [3,3,3], [4,4,4], [4,4,4], [16,16,16], name_scope = 'conv_1_2')
		inputs = batch_norm_relu3d(inputs, is_training, data_format, name_scope = 'act_1_2')
		inputs = tf.layers.average_pooling3d(inputs=inputs, pool_size=2, strides=2, padding='SAME',data_format=data_format)
		inputs = tf.identity(inputs, 'avg_pool_1')

		inputs = Layers.conv_3d_tt(inputs, 128, [5,3,3], [4,4,4], [8,4,4], [16,16,16], name_scope = 'conv_2')
		inputs = batch_norm_relu3d(inputs, is_training, data_format, name_scope = 'act_2')
		inputs = Layers.conv_3d_tt(inputs, 128, [3,5,3], [8,4,4], [4,8,4], [32,32,16], name_scope = 'conv_2_2')
		inputs = batch_norm_relu3d(inputs, is_training, data_format, name_scope = 'act_2_2')
		inputs = Layers.conv_3d_tt(inputs, 128, [3,3,5], [8,4,4], [4,8,4], [32,32,16], name_scope = 'conv_2_3')
		inputs = batch_norm_relu3d(inputs, is_training, data_format, name_scope = 'act_2_3')
		inputs = tf.layers.average_pooling3d(inputs=inputs, pool_size=2, strides=2, padding='SAME',data_format=data_format)
		inputs = tf.identity(inputs, 'avg_pool_2')

		inputs = Layers.conv_3d_tt(inputs, 256, [3,3,3], [8,4,4], [4,8,8], [32,32,32], name_scope = 'conv_3')
		inputs = batch_norm_relu3d(inputs, is_training, data_format, name_scope = 'act_3')
		inputs = Layers.conv_3d_tt(inputs, 256, [3,3,3], [4,4,4,4], [4,4,4,4], [16,16,16,16], name_scope = 'conv_3_2')
		inputs = batch_norm_relu3d(inputs, is_training, data_format, name_scope = 'act_3_2')
		inputs = Layers.conv_3d_tt(inputs, 256, [3,3,3], [4,4,4,4], [4,4,4,4], [16,16,16,16], name_scope = 'conv_3_3')
		inputs = batch_norm_relu3d(inputs, is_training, data_format, name_scope = 'act_3_3')
		inputs = tf.layers.average_pooling3d(inputs=inputs, pool_size=2, strides=2, padding='SAME',data_format=data_format)
		inputs = tf.identity(inputs, 'avg_pool_3')

		sz = np.prod(inputs.get_shape().as_list()[1:])
		inputs = tf.reshape(inputs, [-1,sz])
		inputs = Layers.linear_tt(inputs, 4096, [8,8,8,8,4], [4,4,4,8,8], [32,32,32,32], name_scope = 'linear_1')
		inputs = batch_norm_relu(inputs, is_training, name_scope = 'relu_linear_1')
		inputs = tf.nn.dropout(inputs, 0.9, name = 'dropout_1')

		inputs = Layers.linear_tt(inputs, 2048, [8,8,4,4,4], [4,4,4,4,8], [32,32,32,32], name_scope = 'linear_2')
		inputs = batch_norm_relu(inputs, is_training, name_scope = 'relu_linear_2')
		inputs = tf.nn.dropout(inputs, 0.9, name = 'dropout_2')

		inputs = Layers.linear(inputs, num_classes, name_scope = 'linear_3')

		return inputs

	return model
