import tensorflow as tf
import t3f
import numpy as np

def tt_conv_t3f(inp,         
                 window,
                 inp_ch_modes,              
                 out_ch_modes,
                 ranks,
                 strides=[1, 1],
                 padding='SAME',
                 filters_initializer=tf.contrib.layers.xavier_initializer(uniform=False),
                 filters_regularizer=None,
                 cores_initializer=tf.contrib.layers.xavier_initializer(uniform=False),
                 cores_regularizer=None,
                 biases_initializer=tf.zeros_initializer,
                 biases_regularizer=None,
                 trainable=True,
                 cpu_variables=False,        
                 scope=None):
	""" tt-conv-layer (convolution of full input tensor with tt-filters (make tt full using t3f then use conv2d))
    Args:
        inp: input tensor, float - [batch_size, H, W, C]
        window: convolution window size, list [wH, wW]
        inp_ch_modes: input channels modes, np.array (int32) of size d
        out_ch_modes: output channels modes, np.array (int32) of size d
        ranks: tt-filters ranks, np.array (int32) of size (d + 1)        
        strides: strides, list of 2 ints - [sx, sy] 
        padding: 'SAME' or 'VALID', string
        filters_initializer: filters init function
        filters_regularizer: filters regularizer function
        cores_initializer: cores init function, could be a list of functions for specifying different function for each core
        cores_regularizer: cores regularizer function, could be a list of functions for specifying different function for each core
        biases_initializer: biases init function (if None then no biases will be used)
        biases_regularizer: biases regularizer function        
        trainable: trainable variables flag, bool
        cpu_variables: cpu variables flag, bool
        scope: layer variable scope name, string
    Returns:
        out: output tensor, float - [batch_size, prod(out_modes)]
    """
	with tf.variable_scope(scope):
		# input暂存
		tmp = tf.reshape(inp, [-1, inp.shape[1], inp.shape[2], inp.shape[3]])
		
		# TT filters
		filters_shape = [[window[0]] + inp_ch_modes.tolist(), [window[1]] + out_ch_modes.tolist()]
		initializer = t3f.glorot_initializer(filters_shape, tt_rank = [1] + ranks.tolist())
		tt_filters = t3f.get_variable('tt_filters', initializer = initializer)

		# tt_filters 左乘单位阵(因为inp_ch_modes一般比较小)，化为tf.Tensor，wHc_1c_2...c_d * wWs_1s_2...s_d
		identity_matrix = tf.eye(np.prod(filters_shape[0]))
		filters = t3f.matmul(identity_matrix, t3f.renormalize_tt_cores(tt_filters))

		# reshape, wHc_1c_2...c_d * wWs_1s_2...s_d 化为 wH * c_1 * c_2 *...* c_d * wW * s_1 * s_2 *...* s_d
		filters = tf.reshape(filters, [window[0]] + inp_ch_modes.tolist() + [window[1]] + out_ch_modes.tolist())

		# transpose, 化为 wH * wW * c_1 * c_2 *...* c_d * s_1 * s_2 *...* s_d
		inch_orders = []
		outch_orders = []
		d = inp_ch_modes.size
		for i in range(d):
			inch_orders.append(1 + i)
			outch_orders.append(2 + d + i)
		filters = tf.transpose(filters, [0, d + 1] + inch_orders + outch_orders)

		# reshape, 化为 wH * wW * c_1c_2...c_d * s_1s_2...s_d
		filters = tf.reshape(filters, [window[0], window[1], np.prod(inp_ch_modes), np.prod(out_ch_modes)])

		tmp = tf.nn.conv2d(tmp, filters, [1] + strides + [1], padding, name='conv2d')
		if biases_initializer is not None:
			biases = tf.get_variable('biases',shape=[np.prod(out_ch_modes)],initializer=biases_initializer,regularizer=biases_regularizer,trainable=trainable)
			out = tf.add(tmp, biases, name='out')
		else:
			out = tf.identity(tmp, name='out')

		return out
