import sys
sys.path.append('../../../')
import tensorflow as tf
import numpy as np
import tensornet

opts = {}
opts['initial_learning_rate'] = 0.03
opts['num_epochs_per_decay'] = 30.0
opts['learning_rate_decay_factor'] = 0.1


# 这个函数的目的是对训练集数据进行数据增强，达到扩大样本容量防止过拟合的效果
def aug_train(image, aux, app_target=False):
	# 图片宽、高加4圈0,32*32变为40*40
    aug_image = tf.pad(image, [[4, 4], [4, 4], [0, 0]])

	# 随机裁剪，每个通道取出32*32
    bands = 3
    if (app_target):
        bands = 1
    aug_image = tf.random_crop(aug_image, [32, 32, bands])

	# 随机左右翻转
    aug_image = tf.image.random_flip_left_right(aug_image)

	# 对比度调整
    if (app_target==False):
        aug_image = tf.image.random_contrast(aug_image, 0.75, 1.25)

	# 返回的图片每个像素的‘标准分数’值，即减去全局均值再除以标准差
    aug_image = (aug_image - aux['mean']) / aux['std']
    return aug_image


# 与训练集不同，验证集图片不需要增强，只需要标准化成‘标准分数’即可
def aug_eval(image, aux):
	# 返回的图片每个像素的‘标准分数’值，即减去全局均值再除以标准差
    aug_image = (image - aux['mean']) / aux['std']
    return aug_image


# 损失函数
def losses(logits, labels):
	"""
	logits指神经网络的最后一层输出，不能带softmax，因为sparse_softmax_cross_entropy_with_logits内部带了softmax
	sparse_softmax_cross_entropy_with_logits在softmax归一化后会继续算交叉熵
	reduce_mean这里的用法没有规定维度所以相当于reduce_sum
	"""
	xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=labels, name='xentropy')
	loss = tf.reduce_mean(xentropy, name='loss')
	return [loss]


# 验证
def evaluation(logits, labels):
	"""
	logits指神经网络的最后一层输出，不能带softmax，代表了神经网络对一个输入的输出结果
	"""
	correct_flags = tf.nn.in_top_k(logits, labels, 1)
	# bool转换成int32后输出
	return tf.cast(correct_flags, tf.int32)


def inference_original(images, train_phase, cpu_variables=False, app_target=False):
	"""
	该函数的网络用于测试正则性能的基础，因此不带dropout或TT
	"""
	if (app_target):
		NUM_CLASSES = 5
	else:
		NUM_CLASSES = 10
	
	with tf.variable_scope('original'):
		layers = []
		layers.append(images)
		
		# conv 1.1
		layers.append(tensornet.layers.conv(layers[-1], 64, [3,3], cpu_variables=cpu_variables, biases_initializer=None, scope='conv1.1'))
		layers.append(tf.nn.relu(layers[-1], name='relu1.1'))

		# conv 1.2 带 pooling
		layers.append(tensornet.layers.conv(layers[-1], 64, [3,3], cpu_variables=cpu_variables, biases_initializer=None, scope='tt_conv1.2'))
		layers.append(tf.nn.relu(layers[-1], name='relu1.2'))
		layers.append(tf.nn.max_pool(layers[-1], [1, 3, 3, 1], [1, 2, 2, 1], 'SAME', name='max_pool1'))

		# conv 2.1
		layers.append(tensornet.layers.conv(layers[-1], 128, [3,3], cpu_variables=cpu_variables, biases_initializer=None, scope='tt_conv2.1'))
		layers.append(tf.nn.relu(layers[-1], name='relu2.1'))

		# conv 2.2 带 pooling
		layers.append(tensornet.layers.conv(layers[-1], 128, [3,3], cpu_variables=cpu_variables, biases_initializer=None, scope='tt_conv2.2'))
		layers.append(tf.nn.relu(layers[-1], name='relu2.2'))
		layers.append(tf.nn.max_pool(layers[-1], [1, 3, 3, 1], [1, 2, 2, 1], 'SAME', name='max_pool2'))

		# conv 3.1
		layers.append(tensornet.layers.conv(layers[-1], 256, [3,3], cpu_variables=cpu_variables, biases_initializer=None, scope='tt_conv3.1'))
		layers.append(tf.nn.relu(layers[-1], name='relu3.1'))

		# conv 3.2 带 pooling
		layers.append(tensornet.layers.conv(layers[-1], 256, [3,3], cpu_variables=cpu_variables, biases_initializer=None, scope='tt_conv3.2'))
		layers.append(tf.nn.relu(layers[-1], name='relu3.2'))
		
		sz = np.prod(layers[-1].get_shape().as_list()[1:])
		layers.append(tensornet.layers.linear(tf.reshape(layers[-1], [-1, sz]), 2187, cpu_variables=cpu_variables, biases_initializer=None, scope='linear4.1'))
		layers.append(tf.nn.relu(layers[-1], name='relu4.1'))

		layers.append(tensornet.layers.linear(layers[-1], NUM_CLASSES, cpu_variables=cpu_variables, scope='linear4.2'))
	
	return layers[-1]


def inference_droput(images, train_phase, cpu_variables=False, app_target=False):
	if (app_target):
		NUM_CLASSES = 5
	else:
		NUM_CLASSES = 10

	# dropout定义
	if tfv_train_phase is not None:
		dropout_rate = lambda p: (p - 1.0) * tf.to_float(tfv_train_phase) + 1.0
	else:
		dropout_rate = lambda p: p * 0.0 + 1.0
	
	with tf.variable_scope('dropout'):
		layers = []
		layers.append(images)
		
		# conv 1.1
		layers.append(tensornet.layers.conv(layers[-1], 64, [3,3], cpu_variables=cpu_variables, biases_initializer=None, scope='conv1.1'))
		layers.append(tf.nn.relu(layers[-1], name='relu1.1'))

		# conv 1.2 带 pooling
		layers.append(tensornet.layers.conv(layers[-1], 64, [3,3], cpu_variables=cpu_variables, biases_initializer=None, scope='tt_conv1.2'))
		layers.append(tf.nn.relu(layers[-1], name='relu1.2'))
		layers.append(tf.nn.max_pool(layers[-1], [1, 3, 3, 1], [1, 2, 2, 1], 'SAME', name='max_pool1'))

		# conv 2.1
		layers.append(tensornet.layers.conv(layers[-1], 128, [3,3], cpu_variables=cpu_variables, biases_initializer=None, scope='tt_conv2.1'))
		layers.append(tf.nn.relu(layers[-1], name='relu2.1'))

		# conv 2.2 带 pooling
		layers.append(tensornet.layers.conv(layers[-1], 128, [3,3], cpu_variables=cpu_variables, biases_initializer=None, scope='tt_conv2.2'))
		layers.append(tf.nn.relu(layers[-1], name='relu2.2'))
		layers.append(tf.nn.max_pool(layers[-1], [1, 3, 3, 1], [1, 2, 2, 1], 'SAME', name='max_pool2'))

		# conv 3.1
		layers.append(tensornet.layers.conv(layers[-1], 256, [3,3], cpu_variables=cpu_variables, biases_initializer=None, scope='tt_conv3.1'))
		layers.append(tf.nn.relu(layers[-1], name='relu3.1'))

		# conv 3.2 带 pooling
		layers.append(tensornet.layers.conv(layers[-1], 256, [3,3], cpu_variables=cpu_variables, biases_initializer=None, scope='tt_conv3.2'))
		layers.append(tf.nn.relu(layers[-1], name='relu3.2'))

		sz = np.prod(layers[-1].get_shape().as_list()[1:])
		layers.append(tensornet.layers.linear(tf.reshape(layers[-1], [-1, sz]), 2187, cpu_variables=cpu_variables, biases_initializer=None, scope='linear4.1'))
		layers.append(tf.nn.relu(layers[-1], name='relu4.1'))
		layers.append(tf.nn.dropout(l_layers[-1], dropout_rate(0.8), name = 'dropout_4.1'))

		layers.append(tensornet.layers.linear(layers[-1], NUM_CLASSES, cpu_variables=cpu_variables, scope='linear4.2'))
	
	return layers[-1]


def inference_tt(images, train_phase, cpu_variables=False, app_target=False):
	if (app_target):
		NUM_CLASSES = 5
	else:
		NUM_CLASSES = 10

	with tf.variable_scope('tt'):
		layers = []
		layers.append(images)
		
		# conv 1.1
		layers.append(tensornet.layers.conv(layers[-1], 64, [3,3], cpu_variables=cpu_variables, biases_initializer=None, scope='conv1.1'))
		layers.append(tf.nn.relu(layers[-1], name='relu1.1'))

		# conv 1.2 带 pooling
		layers.append(tensornet.layers.conv(layers[-1], 64, [3,3], cpu_variables=cpu_variables, biases_initializer=None, scope='tt_conv1.2'))
		layers.append(tf.nn.relu(layers[-1], name='relu1.2'))
		layers.append(tf.nn.max_pool(layers[-1], [1, 3, 3, 1], [1, 2, 2, 1], 'SAME', name='max_pool1'))

		# conv 2.1
		layers.append(tensornet.layers.conv(layers[-1], 128, [3,3], cpu_variables=cpu_variables, biases_initializer=None, scope='tt_conv2.1'))
		layers.append(tf.nn.relu(layers[-1], name='relu2.1'))

		# conv 2.2 带 pooling
		layers.append(tensornet.layers.conv(layers[-1], 128, [3,3], cpu_variables=cpu_variables, biases_initializer=None, scope='tt_conv2.2'))
		layers.append(tf.nn.relu(layers[-1], name='relu2.2'))
		layers.append(tf.nn.max_pool(layers[-1], [1, 3, 3, 1], [1, 2, 2, 1], 'SAME', name='max_pool2'))

		# conv 3.1
		layers.append(tensornet.layers.conv(layers[-1], 256, [3,3], cpu_variables=cpu_variables, biases_initializer=None, scope='tt_conv3.1'))
		layers.append(tf.nn.relu(layers[-1], name='relu3.1'))

		# conv 3.2 带 pooling
		layers.append(tensornet.layers.conv(layers[-1], 256, [3,3], cpu_variables=cpu_variables, biases_initializer=None, scope='tt_conv3.2'))
		layers.append(tf.nn.relu(layers[-1], name='relu3.2'))
		
		sz = np.prod(layers[-1].get_shape().as_list()[1:])
		layers.append(tensornet.layers.tt_t3f(tf.reshape(layers[-1], [-1, sz]),
										np.array([4,4,4,4,4,4,4], dtype=np.int32),
										np.array([3,3,3,3,3,3,3], dtype=np.int32),
										np.array([1,12,12,12,12,12,12,1], dtype=np.int32),
										biases_initializer=None, cpu_variables=cpu_variables, scope='tt4.1'))
		layers.append(tf.nn.relu(layers[-1], name='relu4.1'))

		layers.append(tensornet.layers.linear(layers[-1], NUM_CLASSES, cpu_variables=cpu_variables, scope='linear4.2'))
	
	return layers[-1]
