import tensorflow as tf
import math
import numpy as np
import sys


sys.path.append('../../../')
import tensornet


opts = {}
opts['use_dropout'] = True
opts['initial_learning_rate'] = 0.03
opts['num_epochs_per_decay'] = 30.0
opts['learning_rate_decay_factor'] = 0.1

tt_rank = 16


# 这个函数的目的是对训练集数据进行数据增强，达到扩大样本容量防止过拟合的效果
def aug_train(image, aux, app_target=False):
	# 图片宽、高加4圈0,32*32变为40*40
    aug_image = tf.pad(image, [[4, 4], [4, 4], [0, 0]])

	# 随机裁剪，每个通道取出32*32
    bands = 3
    if (app_target):
        bands = 1
    aug_image = tf.random_crop(aug_image, [32, 32, bands])

	# 随机左右翻转
    aug_image = tf.image.random_flip_left_right(aug_image)

	# 对比度调整
    if (app_target==False):
        aug_image = tf.image.random_contrast(aug_image, 0.75, 1.25)

	# 返回的图片每个像素的‘标准分数’值，即减去全局均值再除以标准差
    aug_image = (aug_image - aux['mean']) / aux['std']
    return aug_image


# 与训练集不同，验证集图片不需要增强，只需要标准化成‘标准分数’即可
def aug_eval(image, aux):
	# 返回的图片每个像素的‘标准分数’值，即减去全局均值再除以标准差
    aug_image = (image - aux['mean']) / aux['std']
    return aug_image


def inference(images, train_phase, cpu_variables=False, app_target=False):
	"""
	该函数的网络结构基于 conv-fc，但是将fc的层数从3减少至2，并保证2层FC和原3层FC的连接数相当
	然后将 conv 和 fc 都写成 TT-format
	"""
	# tn_init = lambda dev: lambda shape: tf.truncated_normal(shape, stddev=dev)
	# tu_init = lambda bound: lambda shape: tf.random_uniform(shape, minval = -bound, maxval = bound)
	
	# dropout_rate = lambda p: (opts['use_dropout'] * (p - 1.0)) * tf.to_float(train_phase) + 1.0

	if (app_target):
		NUM_CLASSES = 5
	else:
		NUM_CLASSES = 10
	
	layers = []
	layers.append(images)

	# conv 1.1
	layers.append(tensornet.layers.conv(layers[-1],
                                        64,
                                        [3, 3],
                                        cpu_variables=cpu_variables,
                                        biases_initializer=tf.zeros_initializer(),
                                        scope='conv1.1'))
	
	layers.append(tensornet.layers.batch_normalization(layers[-1],
                                                       train_phase,
                                                       cpu_variables=cpu_variables,
                                                       scope='bn1.1'))
	
	layers.append(tf.nn.relu(layers[-1],
                             name='relu1.1'))

	# conv 1.2 带 pooling
	layers.append(tensornet.layers.tt_conv_t3f(layers[-1],
                                                [3, 3],
                                                np.array([4,4,4],dtype=np.int32),
                                                np.array([4,4,4],dtype=np.int32),
                                                np.array([tt_rank,tt_rank,tt_rank,1],dtype=np.int32),
                                                [1, 1],
                                                cpu_variables=cpu_variables,
                                                biases_initializer=tf.zeros_initializer(),
                                                scope='tt_conv1.2'))

	layers.append(tensornet.layers.batch_normalization(layers[-1],
                                                       train_phase,
                                                       cpu_variables=cpu_variables,
                                                       scope='bn1.2'))

	layers.append(tf.nn.relu(layers[-1],
                             name='relu1.2'))

	layers.append(tf.nn.max_pool(layers[-1],
                                 [1, 3, 3, 1],
                                 [1, 2, 2, 1],
                                 'SAME',
                                 name='max_pool1'))

	# conv 2.1
	layers.append(tensornet.layers.conv(layers[-1],
                                        256,
                                        [1, 1],
                                        cpu_variables=cpu_variables,
                                        biases_initializer=tf.zeros_initializer(),
                                        scope='conv2.1'))

	layers.append(tensornet.layers.batch_normalization(layers[-1],
                                                       train_phase,
                                                       cpu_variables=cpu_variables,
                                                       scope='bn2.1'))

	layers.append(tf.nn.relu(layers[-1],
                             name='relu2.1'))

	# conv 2.2 带 pooling
	layers.append(tensornet.layers.tt_conv_t3f(layers[-1],
                                                [3, 3],
                                                np.array([4,4,4,4],dtype=np.int32),
                                                np.array([4,4,4,4],dtype=np.int32),
                                                np.array([tt_rank,tt_rank,tt_rank,tt_rank,1],dtype=np.int32),
                                                [1, 1],
                                                cpu_variables=cpu_variables,
                                                biases_initializer=tf.zeros_initializer(),
                                                scope='tt_conv2.2'))
	
	layers.append(tensornet.layers.batch_normalization(layers[-1],
                                                       train_phase,
                                                       cpu_variables=cpu_variables,
                                                       scope='bn2.2'))

	layers.append(tf.nn.relu(layers[-1],
                             name='relu2.2'))
	
	layers.append(tf.nn.max_pool(layers[-1],
                                 [1, 3, 3, 1],
                                 [1, 2, 2, 1],
                                 'SAME',
                                 name='max_pool2'))

	# conv 3.1
	layers.append(tensornet.layers.tt_conv_t3f(layers[-1],
                                                [3, 3],
                                                np.array([4,4,4,4],dtype=np.int32),
                                                np.array([4,4,4,4],dtype=np.int32),
                                                np.array([tt_rank,tt_rank,tt_rank,tt_rank,1],dtype=np.int32),
                                                [1, 1],
												padding='VALID',
                                                cpu_variables=cpu_variables,
                                                biases_initializer=tf.zeros_initializer(),
                                                scope='tt_conv3.1'))
	
	layers.append(tensornet.layers.batch_normalization(layers[-1],
                                                       train_phase,
                                                       cpu_variables=cpu_variables,
                                                       scope='bn3.1'))

	layers.append(tf.nn.relu(layers[-1],
                             name='relu3.1'))

	# conv 3.2
	layers.append(tensornet.layers.tt_conv_t3f(layers[-1],
                                                [3, 3],
                                                np.array([4,4,4,4],dtype=np.int32),
                                                np.array([4,4,4,4],dtype=np.int32),
                                                np.array([tt_rank,tt_rank,tt_rank,tt_rank,1],dtype=np.int32),
                                                [1, 1],
												padding='VALID',
                                                cpu_variables=cpu_variables,
                                                biases_initializer=tf.zeros_initializer(),
                                                scope='tt_conv3.2'))
	
	layers.append(tensornet.layers.batch_normalization(layers[-1],
                                                       train_phase,
                                                       cpu_variables=cpu_variables,
                                                       scope='bn3.2'))

	layers.append(tf.nn.relu(layers[-1],
                             name='relu3.2'))

	layers.append(tf.nn.avg_pool(layers[-1],
                                 [1,4,4,1],
                                 [1,4,4,1],
                                 'SAME',
                                  name='avg_pool_full'))

	sz = np.prod(layers[-1].get_shape().as_list()[1:])

	layers.append(tensornet.layers.linear(tf.reshape(layers[-1], [-1, sz]),
                                          NUM_CLASSES,
                                          cpu_variables=cpu_variables,
                                          biases_initializer=None,
                                          scope='linear4.1'))
	
	return layers[-1]
	

# 损失函数
def losses(logits, labels):
	"""
	logits指神经网络的最后一层输出，不能带softmax，因为sparse_softmax_cross_entropy_with_logits内部带了softmax
	sparse_softmax_cross_entropy_with_logits在softmax归一化后会继续算交叉熵
	reduce_mean这里的用法没有规定维度所以相当于reduce_sum
	"""
	xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=labels, name='xentropy')
	loss = tf.reduce_mean(xentropy, name='loss')
	return [loss]


# 验证
def evaluation(logits, labels):
	"""
	logits指神经网络的最后一层输出，不能带softmax，代表了神经网络对一个输入的输出结果
	"""
	correct_flags = tf.nn.in_top_k(logits, labels, 1)
	# bool转换成int32后输出
	return tf.cast(correct_flags, tf.int32)
