# -*- coding: UTF-8 -*-

import numpy as np
import tensorflow as tf
import os
import sys
# from utils import *
import pdb

lp_filter = np.load('D:/MyPycharm/Predict_Module_win/wave/db4/lp.npy')
hp_filter = np.load('D:/MyPycharm/Predict_Module_win/wave/db4/hp.npy')


# 创建低通矩阵和高通矩阵
def get_wave_kernel(shape):
    mat_hp = np.zeros((shape[0], shape[1]))
    mat_lp = np.zeros((shape[0], shape[1]))

    for i in range(shape[1]):
        for j in range(8):
            mat_lp[2 * i - j, i] = lp_filter[j]
            mat_hp[2 * i - j, i] = hp_filter[j]

    return mat_lp, mat_hp


def variable_on_cpu(name, shape, initializer, use_fp16=False):
    """Helper to create a Variable stored on CPU memory.
    Args:
    name: name of the variable
    shape: list of ints
    initializer: initializer for Variable
    Returns:
    Variable Tensor
    """
    with tf.device('/cpu:0'):
        dtype = tf.float16 if use_fp16 else tf.float32
        var = tf.get_variable(name, shape, initializer=initializer, dtype=dtype)
    return var


def wave_op(input, len_input, scope, l1_value, weight_decay, sim_reg=0, activation=None):
    with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
        input = tf.squeeze(input)
        lp_mat, hp_mat = get_wave_kernel([len_input, len_input//2])  # 得到低通矩阵和高通矩阵
        lp_weight = wave_variable_with_l1(lp_mat, 'lp_weight', wd=weight_decay, l1_value=l1_value, sim_reg=sim_reg)
        hp_weight = wave_variable_with_l1(hp_mat, 'hp_weight', wd=weight_decay, l1_value=l1_value, sim_reg=sim_reg)
        # 偏置向量
        biases_lp = variable_on_cpu('biases_lp', [len_input // 2], tf.constant_initializer(0.0))
        biases_hp = variable_on_cpu('biases_hp', [len_input // 2], tf.constant_initializer(0.0))
        lp_out = tf.matmul(input, lp_weight)
        lp_out = tf.nn.bias_add(lp_out, biases_lp)  # (?, len_input/2)
        hp_out = tf.matmul(input, hp_weight)
        hp_out = tf.nn.bias_add(hp_out, biases_hp)  # (?, len_input/2)
        # 激活函数
        if not activation == None:
            hp_out = activation(hp_out)
            lp_out = activation(lp_out)
        # print hp_out, lp_out

        hp_out = tf.expand_dims(hp_out, -1)  # (?, len_input/2, 1)
        lp_out = tf.expand_dims(lp_out, -1)
        # print hp_out, lp_out

        # all_out = tf_concat(1, [lp_out, hp_out])
        return lp_out, hp_out


def _wave_variable_on_cpu(matr, name, trainable):
    """Helper to create a Variable stored on CPU memory.

    Args:
      name: name of the variable
      shape: list of ints
      initializer: initializer for Variable

    Returns:
      Variable Tensor
    """
    with tf.device('/cpu:0'):
        var = tf.Variable(matr, trainable=trainable, name=name, dtype=tf.float32)
        # shape = matr.shape
        # var = tf.get_variable(name, shape)
    return var


def wave_variable_with_l1(matr, name, wd, l1_value, sim_reg=None):
    """Helper to create an wavelt initialized Variable with weight decay.
      帮助创建带有权值衰减的波形初始化变量
    Note that the Variable is initialized with a truncated normal distribution.
    A weight decay is added only if one is specified.

    Args:
      name: name of the variable
      shape: list of ints
      stddev: standard deviation of a truncated Gaussian
      wd: add L2Loss weight decay multiplied by this float. If None, weight
          decay is not added for this Variable.
      l1_value: add L1Loss to wavelet initialized weight
      sim_reg: regularization terms on forcing trained weight to be similar with the initial weight

    Returns:
      Variable Tensor
    """
    var = _wave_variable_on_cpu(
        matr,
        name=name, trainable=True)
    var_reg = _wave_variable_on_cpu(
        matr,
        name=name, trainable=False)

    if l1_value is not None:
        l1_loss = tf.multiply(tf.reduce_mean(tf.abs(var)), l1_value, name='l1_loss')
        tf.add_to_collection('losses', l1_loss)
    if wd is not None:
        weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
        tf.add_to_collection('losses', weight_decay)
    if sim_reg is not None:
        similar_reg = tf.multiply(tf.nn.l2_loss(var - var_reg), sim_reg, name='similar_reg_loss')
        tf.add_to_collection('losses', similar_reg)
    return var


def tf_concat(axis, values):
    return tf.concat(values, axis)  # 最'高'的维度进行拼接
