import tensorflow as tf
import Layers
import GetData_ImageNet


# common ResNeKt V1
def _network(data, labels, b_reuse, flag_depth, tfv_train_phase = None):
	split_depth = []
	if flag_depth == 50:
		split_depth = [3,4,6,3]
	elif flag_depth == 101:
		split_depth = [3,4,23,3]
	elif flag_depth == 152:
		split_depth = [3,8,36,3]
	else:
		return None

	_WEIGHT_DECAY = 0.00004
	
	name = 'network' + '_' + str(flag_depth)

	with tf.variable_scope(name, reuse = b_reuse):
		l_layers = []
		l_layers.append(data)
		l_layers.append(Layers.conv_2d(l_layers[-1], 64, [7, 7], strides = [2, 2], tfv_train_phase = tfv_train_phase, act_first = False, name_scope = 'conv_1'))
		l_layers.append(Layers.maxpool_2d(l_layers[-1], [3, 3], [2, 2], name_scope = 'maxpool_1'))

		for i in range(split_depth[0]):
			if i == 0:
				l_layers.append(Layers.resnekt_bottleneck(l_layers[-1], 160, 160, 256, tfv_train_phase = tfv_train_phase, downsample_first = False, name_scope = 'bottleneck_1_%d' % (i + 1)))
			else:
				l_layers.append(Layers.resnekt_bottleneck(l_layers[-1], 160, 160, 256, tfv_train_phase = tfv_train_phase, name_scope = 'bottleneck_1_%d' % (i + 1)))
		for i in range(split_depth[1]):
			l_layers.append(Layers.resnekt_bottleneck(l_layers[-1], 288, 288, 512, tfv_train_phase = tfv_train_phase, name_scope = 'bottleneck_2_%d' % (i + 1)))
		for i in range(split_depth[2]):
			l_layers.append(Layers.resnekt_bottleneck(l_layers[-1], 544, 544, 1024, tfv_train_phase = tfv_train_phase, name_scope = 'bottleneck_3_%d' % (i + 1)))
		for i in range(split_depth[3]):
			if i != split_depth[3] - 1:
				l_layers.append(Layers.resnekt_bottleneck(l_layers[-1], 1088, 1088, 2048, tfv_train_phase = tfv_train_phase, name_scope = 'bottleneck_4_%d' % (i + 1)))
			else:
				l_layers.append(Layers.resnekt_bottleneck(l_layers[-1], 1088, 1088, 2048, tfv_train_phase = tfv_train_phase, act_last = True, name_scope = 'bottleneck_4_%d' % (i + 1)))

		l_layers.append(Layers.avgpool_2d(l_layers[-1], [7, 7], [1, 1], padding = 'VALID', is_gap = True, name_scope = 'avgpool'))
		l_layers.append(Layers.fc(l_layers[-1], GetData_ImageNet._LABEL_CLASSES, act_last = False, name_scope = 'fc_out'))
		outputs = tf.identity(l_layers[-1], 'final_dense')

	xentropy = tf.losses.softmax_cross_entropy(logits = outputs, onehot_labels = labels)
	l_var = [v for v in tf.trainable_variables() if 'batch_norm' not in v.name]
	l_var2 = [v for v in l_var if 'shortcut' not in v.name]
	loss = xentropy + _WEIGHT_DECAY * tf.add_n([tf.nn.l2_loss(v) for v in l_var2 if 'var_biases' not in v.name])
	correct_flags = tf.nn.in_top_k(outputs, tf.argmax(labels, axis = 1), 1, name = 'eval' + name)
	evaluation = tf.cast(correct_flags, tf.int32)

	return loss, evaluation


# get the network training and validation output respectively, including loss and evalation
def get_network_output(i, t_data, t_labels, v_data, v_labels, flag_depth, flag_asymmetric, flag_depthwise, tfv_train_phase):
	b_reuse = i > 0

	loss_train, eval_train = _network(t_data, t_labels, b_reuse, flag_depth, tfv_train_phase)
	loss_validation, eval_validation = _network(v_data, v_labels, True, flag_depth, tfv_train_phase)

	return loss_train, eval_train, loss_validation, eval_validation
