import math
from difflib import get_close_matches

import numpy as np

import tensorflow as tf
from tensorflow.keras.initializers import VarianceScaling, RandomUniform, Constant
from tensorflow.keras.layers import BatchNormalization, LayerNormalization, LeakyReLU, Activation, Dense, \
    DepthwiseConv2D, ZeroPadding2D, Conv2D, Layer, Reshape, Flatten, Add, Concatenate


def Num2Bit(Num, B):
    Num_ = tf.cast(Num, tf.int32)
    m = tf.constant([128, 64, 32, 16, 8, 4, 2, 1], Num_.dtype)[-B:]
    out = tf.expand_dims(Num_, -1) // m
    bit = (out - (out % 1)) % 2
    bit = tf.reshape(bit[:, :, -B:], [-1, Num_.shape[1] * B])
    return tf.cast(bit, tf.float32)


def Bit2Num(Bit, B):
    Bit_ = tf.cast(Bit, tf.float32)
    Bit_ = tf.reshape(Bit_, [-1, Bit_.shape[1] // B, B])
    m = tf.constant([128,  64,  32,  16,   8,   4,   2,   1], Bit_.dtype)[-B:]
    num = tf.reduce_sum(Bit_ * m, -1)
    return num


@tf.custom_gradient
def QuantizationOp(x, B):
    dtype = x.dtype
    x = tf.cast(x, tf.float32)
    step = tf.cast(2 ** B, tf.float32)
    result = tf.round(x * step - 0.5)

    result = Num2Bit(result, B)

    def custom_grad(dy):
        grad = tf.reduce_mean(tf.reshape(dy, [tf.shape(dy)[0], -1, B]), axis=2)
        return grad, None

    result = tf.cast(result, dtype)
    return result, custom_grad


class QuantizationLayer(Layer):
    def __init__(self, B, **kwargs):
        self.B = B
        super(QuantizationLayer, self).__init__(**kwargs)

    def call(self, x):
        return QuantizationOp(x, self.B)

    def get_config(self):
        base_config = super(QuantizationLayer, self).get_config()
        base_config['B'] = self.B
        return base_config


@tf.custom_gradient
def DequantizationOp(x, B):
    dtype = x.dtype
    x = tf.cast(x, tf.float32)
    out = Bit2Num(x, B)
    step = tf.cast(2 ** B, tf.float32)
    result = (out + 0.5) / step

    def custom_grad(dy):
        grad_bit = tf.tile(tf.expand_dims(dy, -1), [1, 1, B])
        grad = tf.reshape(grad_bit, [tf.shape(grad_bit)[0], -1])
        return grad, None
    result = tf.cast(result, dtype)
    return result, custom_grad


class DequantizationLayer(Layer):
    def __init__(self, B, **kwargs):
        self.B = B
        super(DequantizationLayer, self).__init__(**kwargs)

    def call(self, x):
        return DequantizationOp(x, self.B)

    def get_config(self):
        base_config = super(DequantizationLayer, self).get_config()
        base_config['B'] = self.B
        return base_config


class Swish(Layer):

    def __init__(self, **kwargs):
        super().__init__(**kwargs)

    def call(self, x):
        return tf.nn.swish(x)

    def get_config(self):
        base_config = super().get_config()
        return base_config


class ReZero(Layer):

    def __init__(self, init_val=0., **kwargs):
        super().__init__(**kwargs)
        self.init_val = init_val
        self.res_weight = self.add_weight(
            name='res_weight', shape=(), dtype=tf.float32,
            trainable=True, initializer=Constant(init_val))

    def call(self, x):
        return x * self.res_weight

    def get_config(self):
        return {
            **super().get_config(),
            "init_val": self.init_val,
        }


DEFAULTS = {
    'norm': 'bn',
    'act': 'swish',
    'bn': {
        'momentum': 0.9,
    },
    'leaky_relu': {
        'alpha': 0.1,
    },
}


def Norm(x, type='def'):
    if type in ['def', 'default']:
        type = DEFAULTS['norm']
    if type == 'bn':
        return BatchNormalization(momentum=DEFAULTS['bn']['momentum'], epsilon=1e-5)(x)
    elif type == 'ln':
        return LayerNormalization(epsilon=1e-5)(x)
    else:
        raise ValueError("Not supported")


def Act(x, type='def'):
    if type in ['default', 'def']:
        type = DEFAULTS['act']
    if type == 'swish':
        return Swish()(x)
    elif type == 'leaky_relu':
        return LeakyReLU(alpha=DEFAULTS['leaky_relu']['alpha'])(x)
    else:
        return Activation(type)(x)


def Linear(x, out_channels, act=None):
    in_channels = x.shape[-1]
    kernel_initializer = VarianceScaling(1.0 / 3, 'fan_in', 'uniform')
    bound = math.sqrt(1 / in_channels)
    bias_initializer = RandomUniform(-bound, bound)
    x = Dense(out_channels,
              kernel_initializer=kernel_initializer,
              bias_initializer=bias_initializer)(x)
    if act is not None:
        x = Act(x)
    return x


def calc_same_padding(kernel_size, dilation):
    kh, kw = kernel_size
    dh, dw = dilation
    ph = (kh + (kh - 1) * (dh - 1) - 1) // 2
    pw = (kw + (kw - 1) * (dw - 1) - 1) // 2
    padding = ((ph, ph), (pw, pw))
    return padding


def _tuple(x):
    if isinstance(x, list):
        x = tuple(x)
    elif not isinstance(x, tuple):
        x = (x, x)
    return x


def Conv2d(x, out_channels: int, kernel_size, stride=1, padding='same', groups=1, dilation=1,
           bias=None, norm=None, act=None, pre=False):
    in_channels = x.shape[-1]
    kernel_size, stride, dilation = [_tuple(x) for x in [kernel_size, stride, dilation]]

    assert padding == 'same' or padding == 0
    if padding == 'same':
        padding = calc_same_padding(kernel_size, dilation)
    if padding == 0:
        padding = ((0, 0), (0, 0))

    mode = 'fan_in' if in_channels == groups else 'fan_out'

    kernel_initializer = VarianceScaling(1.0 / 3, mode, 'uniform')
    bound = math.sqrt(1 / (kernel_size[0] * kernel_size[1] * (in_channels // groups)))
    bias_initializer = RandomUniform(-bound, bound)

    if bias is None:
        use_bias = norm is None
    else:
        use_bias = bias

    if in_channels == groups:
        depth_multiplier = out_channels // in_channels
        conv = DepthwiseConv2D(kernel_size=kernel_size, strides=stride, padding='valid',
                               use_bias=use_bias, dilation_rate=dilation, depth_multiplier=depth_multiplier,
                               depthwise_initializer=kernel_initializer, bias_initializer=bias_initializer)
    else:
        conv = Conv2D(out_channels, kernel_size=kernel_size, strides=stride,
                      padding='valid', dilation_rate=dilation, use_bias=use_bias,
                      kernel_initializer=kernel_initializer, bias_initializer=bias_initializer)

    if padding != ((0, 0), (0, 0)):
        x = ZeroPadding2D(padding)(x)

    if pre:
        if norm:
            x = Norm(x, norm)
        if act:
            x = Act(x, act)
        x = conv(x)
    else:
        x = conv(x)
        if norm:
            x = Norm(x, norm)
        if act:
            x = Act(x, act)
    return x


def CRBlock(x, expand_ratio=1, rezero=False):
    channels = x.shape[-1]
    identity = x
    f_channels = channels * expand_ratio

    x = Norm(x)
    x = Act(x)

    path1 = Conv2d(x, f_channels, kernel_size=3)
    path1 = Conv2d(path1, f_channels, kernel_size=(1, 9), norm='def', act='def', pre=True)
    path1 = Conv2d(path1, f_channels, kernel_size=(9, 1), norm='def', act='def', pre=True)

    path2 = Conv2d(x, f_channels, kernel_size=(1, 5))
    path2 = Conv2d(path2, f_channels, kernel_size=(5, 1), norm='def', act='def', pre=True)

    x = Concatenate()([path1, path2])
    x = Norm(x)
    x = Act(x)
    x = Conv2d(x, channels, kernel_size=1)
    if rezero:
        x = ReZero()(x)
    x = Add()([x, identity])
    return x

def Encoder(x, channels, num_layers, expand_ratio=1, rezero=False, feedback_bits=432, B=3, return_rx=True):
    x = (x - 0.5) / 0.0236
    x = Conv2d(x, channels, kernel_size=5)
    for i in range(num_layers):
        x = CRBlock(x, expand_ratio=expand_ratio, rezero=rezero)
    x = Conv2d(x, channels // 4, kernel_size=1, norm='def', act='def', pre=True)
    x = Norm(x)
    x = Act(x)
    x = Flatten()(x)
    x = Linear(x, feedback_bits // B)
    x = Act(x, 'sigmoid')
    rx = x
    x = QuantizationLayer(B)(x)
    if return_rx:
        return x, rx
    else:
        return x


def OffsetNet(x, channels, feedback_bits, B):
    identity = x
    x = x - 0.5
    for c in channels:
        x = Linear(x, c)
        x = Act(x, 'def')
    x = Linear(x, feedback_bits // B, act='tanh') / (2 ** (B + 1))
    x = x + identity
    return x

def rsigmoid(x):
    return tf.math.log(x / (1 - x))

def Decoder(x, channels, num_layers, expand_ratio=1, rezero=False, feedback_bits=432, B=3, return_rx=True):
    x = DequantizationLayer(B)(x)
    x = OffsetNet(x, [512, 256], feedback_bits, B)
    x = tf.maximum(tf.minimum(x, 0.99), 0.01)
    rx = x
    x = rsigmoid(x)
    x = Linear(x, 24 * 16 * channels // 4)
    x = Reshape((24, 16, channels // 4))(x)
    x = Conv2d(x, channels, kernel_size=5, norm='def', act='def', pre=True)
    for i in range(num_layers):
        x = CRBlock(x, expand_ratio=expand_ratio, rezero=rezero)
    x = Norm(x)
    x = Act(x)
    x = Conv2d(x, 2, kernel_size=3, act='sigmoid')
    if return_rx:
        return x, rx
    else:
        return x

def NMSE(x, x_hat):
    x_real = np.reshape(x[:, :, :, 0], (len(x), -1))
    x_imag = np.reshape(x[:, :, :, 1], (len(x), -1))
    x_hat_real = np.reshape(x_hat[:, :, :, 0], (len(x_hat), -1))
    x_hat_imag = np.reshape(x_hat[:, :, :, 1], (len(x_hat), -1))
    x_C = x_real - 0.5 + 1j * (x_imag - 0.5)
    x_hat_C = x_hat_real - 0.5 + 1j * (x_hat_imag - 0.5)
    power = np.sum(abs(x_C) ** 2, axis=1)
    mse = np.sum(abs(x_C - x_hat_C) ** 2, axis=1)
    nmse = np.mean(mse / power)
    return nmse


def get_custom_objects():
    return {
        "QuantizationLayer": QuantizationLayer, "DequantizationLayer": DequantizationLayer,
        "ReZero": ReZero, "Swish": Swish,
    }
