from __future__ import generators, division, absolute_import, with_statement, print_function, unicode_literals

#import tensorflow as tf
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()



import numpy as np
import matplotlib.pyplot as plt
#import tensorflow.contrib.slim as slim

flags = tf.app.flags
FLAGS = flags.FLAGS			


#定义初始化权值函数
def weight_variable(shape, name, reuse=True):
	#with tf.variable_scope(scope1, reuse = True)
	with tf.variable_scope(name, reuse = reuse) as scope:
		initial=tf.truncated_normal(shape,stddev=0.1)
		w = tf.Variable(initial, dtype=tf.float32, name="weights")
		#variable_summaries(w)
	return w
#定义初始化偏置函数
def bias_variable(shape, name, reuse=True):
	with tf.variable_scope(name, reuse = reuse) as scope:
		initial=tf.constant(0.1,shape=shape)
		b = tf.Variable(initial, dtype=tf.float32, name="biases")
		#variable_summaries(b)
	return b
#卷积层
def convLayer(x, kHeight, kWidth, strideX, strideY,
			featureNum, name,relu=True, padding = "SAME", reuse=True):
	"""convlutional"""
	channel = int(x.get_shape()[-1])
	with tf.variable_scope(name) as scope:
		w = weight_variable(shape = [kHeight, kWidth, channel, featureNum], name=scope, reuse=reuse)
		b = bias_variable(shape = [featureNum], name=scope, reuse=reuse)
		featureMap = tf.nn.conv2d(x, w, strides = [1, strideY, strideX, 1], padding = padding) + b
		tf.summary.histogram('featureMap', featureMap)
		if relu:
			return tf.nn.relu(featureMap)
		else:
			return featureMap
#池化层
def maxPoolLayer(x, kHeight, kWidth, strideX, strideY, name, padding = "SAME"):
	"""max-pooling"""
	return tf.nn.max_pool(x, ksize = [1, kHeight, kWidth, 1],
						strides = [1, strideX, strideY, 1], padding = padding, name = name)
def dropout(x, keepPro, name = None):
	"""dropout"""
	return tf.nn.dropout(x, keepPro, name)

def fcLayer(x, inputD, outputD, reluFlag, name):
	"""fully-connect"""
	# with tf.variable_scope(name) as scope:
	#     if reluFlag:
	#         return tf.layers.dense(x, outputD, activation=tf.nn.relu)
	#     elif not reluFlag:
	#         return tf.layers.dense(x, outputD,activation=None)	
	with tf.variable_scope(name) as scope:
		w = weight_variable(shape = [inputD, outputD], name=scope)
		b = bias_variable(shape=[outputD], name=scope)
		out = tf.matmul(x,w)+b   #矩阵相乘
		if reluFlag:
			return tf.nn.relu(out)
		else:
			return out

def mnist_model(input, reuse=False):
	with tf.name_scope("model"):
		with tf.name_scope("conv1") as scope:
			net = convLayer(input, 7, 7, 1,1, 32, name=scope, padding="SAME" , reuse=reuse)
			net = maxPoolLayer(net, 2, 2, 2, 2, name=scope)  #进行max_pooling 池化层
		with tf.name_scope("conv2") as scope:
			net = convLayer(net, 5, 5, 1,1, 64, name=scope, padding="SAME" , reuse=reuse)
			net = maxPoolLayer(net, 2, 2, 2, 2, name=scope)  #进行max_pooling 池化层

		with tf.name_scope("conv3") as scope:
			net = convLayer(net, 3, 3, 1,1, 128, name=scope, padding="SAME" , reuse=reuse)
			net = maxPoolLayer(net, 2, 2, 2, 2, name=scope)  #进行max_pooling 池化层

		with tf.name_scope("conv4") as scope:
			net = convLayer(net, 1, 1, 1,1, 256, name=scope, padding="SAME", reuse=reuse )
			net = maxPoolLayer(net, 2, 2, 2, 2, name=scope)  #进行max_pooling 池化层

		with tf.name_scope("conv5") as scope:
			net = convLayer(net, 1, 1, 1,1, 2, name=scope,relu=False, padding="SAME", reuse=reuse )
			net = maxPoolLayer(net, 2, 2, 2, 2, name=scope)  #进行max_pooling 池化层

		fc_len = net[0].shape
		fc_len_value = fc_len[0]*fc_len[1]*fc_len[2]
        
        #全连接层
        #把池化层2的输出扁平化为1维
		net = tf.reshape(net,[-1,fc_len_value])
		#net = tf.contrib.layers.flatten(net)
	
	return net

def mnist_model2(input, reuse=False):
	with tf.name_scope("model"):
		with tf.variable_scope("conv1") as scope:
			net = tf.contrib.layers.conv2d(input, 32, [7, 7], activation_fn=tf.nn.relu, padding='SAME',
		        weights_initializer=tf.contrib.layers.xavier_initializer_conv2d(),scope=scope,reuse=reuse)
			net = tf.contrib.layers.max_pool2d(net, [2, 2], padding='SAME')
		with tf.variable_scope("conv2") as scope:
			net = tf.contrib.layers.conv2d(net, 64, [5, 5], activation_fn=tf.nn.relu, padding='SAME',
		        weights_initializer=tf.contrib.layers.xavier_initializer_conv2d(),scope=scope,reuse=reuse)
			net = tf.contrib.layers.max_pool2d(net, [2, 2], padding='SAME')

		with tf.variable_scope("conv3") as scope:
			net = tf.contrib.layers.conv2d(net, 128, [3, 3], activation_fn=tf.nn.relu, padding='SAME',
		        weights_initializer=tf.contrib.layers.xavier_initializer_conv2d(),scope=scope,reuse=reuse)
			net = tf.contrib.layers.max_pool2d(net, [2, 2], padding='SAME')

		with tf.variable_scope("conv4") as scope:
			net = tf.contrib.layers.conv2d(net, 256, [1, 1], activation_fn=tf.nn.relu, padding='SAME',
		        weights_initializer=tf.contrib.layers.xavier_initializer_conv2d(),scope=scope,reuse=reuse)
			net = tf.contrib.layers.max_pool2d(net, [2, 2], padding='SAME')

		with tf.variable_scope("conv5") as scope:
			net = tf.contrib.layers.conv2d(net, 2, [1, 1], activation_fn=None, padding='SAME',
		        weights_initializer=tf.contrib.layers.xavier_initializer_conv2d(),scope=scope,reuse=reuse)
			net = tf.contrib.layers.max_pool2d(net, [2, 2], padding='SAME')
		net = tf.contrib.layers.flatten(net)
	
	return net

def contrastive_loss(model1, model2, y, margin):
	with tf.name_scope("contrastive-loss"):
		distance = tf.sqrt(tf.reduce_sum(tf.pow(model1 - model2, 2), 1, keepdims=True))
		similarity = y * tf.square(distance)                                           # keep the similar label (1) close to each other
		dissimilarity = (1 - y) * tf.square(tf.maximum((margin - distance), 0))        # give penalty to dissimilar label if the distance is bigger than margin
		return tf.reduce_mean(dissimilarity + similarity) / 2



