from __future__ import absolute_import as _abs
import ctypes
import numpy as np
from ._dylib import _LIB


def _pool(pool_type, input_data, kernel_shape, strides, padding_size, scale=None):
    assert input_data.dtype == np.int8
    assert len(input_data.shape) == 4
    assert len(kernel_shape) == 2
    assert len(strides) == 2
    assert len(padding_size) == 4
    input_n = input_data.shape[0]
    input_c = input_data.shape[1]
    input_h = input_data.shape[2]
    input_w = input_data.shape[3]
    kernel_h = kernel_shape[0]
    kernel_w = kernel_shape[1]
    stride_h = strides[0]
    stride_w = strides[1]
    pad_t = padding_size[0]
    pad_b = padding_size[1]
    pad_l = padding_size[2]
    pad_r = padding_size[3]
    output_n = input_n
    output_c = input_c
    output_h = (input_h + pad_t + pad_b - kernel_h) // stride_h + 1
    output_w = (input_w + pad_l + pad_r - kernel_w) // stride_w + 1
    input_data = np.ascontiguousarray(input_data, dtype=np.int8)
    assert input_data.flags['C_CONTIGUOUS']
    output_data = np.ndarray([output_n, output_c, output_h, output_w],
                             dtype=np.int8, order='C')
    for i in range(input_n):
        input_pointer = input_data[i].ctypes.data_as(ctypes.c_void_p)
        output_pointer = output_data[i].ctypes.data_as(ctypes.c_void_p)
        if pool_type == "ave":
            assert scale
            scale = scale / (kernel_h * kernel_w)
            int_bits = int(np.ceil(np.log2(scale)))
            if int_bits < 0:
                int_bits = 0
            # Notice: Python only support s32 data type argument when invoke c func,
            # but c api should use u32: dec_bits = 32 - int_bits
            dec_bits = 31 - int_bits
            scale_int = int(scale * 2 ** dec_bits)
            out_shift = dec_bits
            _LIB.avepool_op_v2(input_pointer, output_pointer, input_w, input_h, input_c,
                               kernel_h, kernel_w, stride_h, stride_w,
                               pad_t, pad_b, pad_l, pad_r, 0, scale_int, out_shift)
        elif pool_type == "max":
            _LIB.maxpool_op_v2(input_pointer, output_pointer, input_w, input_h, input_c,
                               kernel_h, kernel_w, stride_h, stride_w,
                               pad_t, pad_b, pad_l, pad_r, -127)
        else:
            assert False
    return output_data


def _conv(conv_type, input_data, weight_data, bias_data, scale_data, strides, padding_size, activation_type="relu",
          lrelu_lmbda=None, out_shift=0):
    assert input_data.dtype == np.int8
    assert weight_data.dtype == np.int8
    assert scale_data.dtype == np.float
    assert len(input_data.shape) == 4
    assert len(weight_data.shape) == 4
    assert len(scale_data.shape) == 1
    assert len(strides) == 2
    assert len(padding_size) == 4

    if bias_data is None:
        # If it doesn't use bias, create a dummy one
        if conv_type == "conv2d":
            o_c = weight_data.shape[0]
        else:
            o_c = weight_data.shape[1]
        bias_data = np.full(o_c, 0, dtype=np.int32)
    else:
        assert bias_data.dtype == np.int32
        assert len(bias_data.shape) == 1

    input_n = input_data.shape[0]
    input_c = input_data.shape[1]
    assert input_c == weight_data.shape[1]
    input_h = input_data.shape[2]
    input_w = input_data.shape[3]
    kernel_h = weight_data.shape[2]
    kernel_w = weight_data.shape[3]
    stride_h = strides[0]
    stride_w = strides[1]
    pad_t = padding_size[0]
    pad_b = padding_size[1]
    pad_l = padding_size[2]
    pad_r = padding_size[3]
    output_n = input_n
    if conv_type == "conv2d":
        output_c = weight_data.shape[0]
        assert bias_data.shape[0] == output_c
        assert scale_data.shape[0] == output_c
    elif conv_type == "dw_conv2d":
        assert weight_data.shape[0] == 1
        output_c = input_c
    else:
        assert False
    output_h = (input_h + pad_t + pad_b - kernel_h) // stride_h + 1
    output_w = (input_w + pad_l + pad_r - kernel_w) // stride_w + 1
    if activation_type == "none":
        activation_num = 0
    elif activation_type == "relu":
        activation_num = 1
    elif activation_type == "leakyrelu":
        activation_num = 2
        assert lrelu_lmbda
        assert lrelu_lmbda < 1.0
        lrelu_lmbda = int(2 ** 8 * lrelu_lmbda)
    if not lrelu_lmbda:
        lrelu_lmbda = 0
    input_data = np.ascontiguousarray(input_data, dtype=np.int8)
    assert input_data.flags['C_CONTIGUOUS']
    weight_data = np.ascontiguousarray(weight_data, dtype=np.int8)
    assert weight_data.flags['C_CONTIGUOUS']
    bias_data = np.ascontiguousarray(bias_data, dtype=np.int32)
    assert bias_data.flags['C_CONTIGUOUS']
    scale_data_uint32 = np.ndarray(output_c, dtype=np.uint32, order='C')
    for i in range(output_c):
        assert 1.0 > scale_data[i] > 0.0
        t = scale_data[i] * (2 ** 32)
        if t > 2 ** 32 - 1:
            scale_data_uint32[i] = 2 ** 32 - 1
        else:
            scale_data_uint32[i] = int(t)
    output_data = np.ndarray([output_n, output_c, output_h, output_w],
                             dtype=np.int8, order='C')
    for i in range(input_n):
        input_pointer = input_data[i].ctypes.data_as(ctypes.c_void_p)
        weight_pointer = weight_data.ctypes.data_as(ctypes.c_void_p)
        bias_pointer = bias_data.ctypes.data_as(ctypes.c_void_p)
        scale_pointer = scale_data_uint32.ctypes.data_as(ctypes.c_void_p)
        output_pointer = output_data[i].ctypes.data_as(ctypes.c_void_p)
        if conv_type == "conv2d":
            _LIB.conv_op(
                input_pointer, weight_pointer, bias_pointer, scale_pointer, output_pointer,
                input_w, input_h, input_c, output_c,
                kernel_h, kernel_w, stride_h, stride_w,
                pad_t, pad_b, pad_l, pad_r, 0,
                activation_num, lrelu_lmbda, out_shift)
        elif conv_type == "dw_conv2d":
            _LIB.dw_conv_op(
                input_pointer, weight_pointer, bias_pointer, scale_pointer, output_pointer,
                input_w, input_h, input_c,
                kernel_h, kernel_w, stride_h, stride_w,
                pad_t, pad_b, pad_l, pad_r, 0,
                activation_num, lrelu_lmbda, out_shift)
        else:
            assert False
    return output_data


def _activation(input_data, scale, activation_type="relu", lrelu_lmbda=None):
    assert input_data.dtype == np.int8
    assert len(input_data.shape) == 4
    input_n = input_data.shape[0]
    input_c = input_data.shape[1]
    input_h = input_data.shape[2]
    input_w = input_data.shape[3]
    output_n = input_n
    output_h = input_h
    output_w = input_w
    output_c = input_c
    if activation_type == "none":
        activation_num = 0
    elif activation_type == "relu":
        activation_num = 1
    elif activation_type == "leakyrelu":
        activation_num = 2
        assert lrelu_lmbda
        assert lrelu_lmbda < 1.0
        lrelu_lmbda = int(2 ** 8 * lrelu_lmbda)
    if not lrelu_lmbda:
        lrelu_lmbda = 0
    input_data = np.ascontiguousarray(input_data, dtype=np.int8)
    assert input_data.flags['C_CONTIGUOUS']

    assert 0.0 < scale < 256.0
    int_bits = int(np.ceil(np.log2(scale)))
    if int_bits < 0:
        int_bits = 0
    dec_bits = 31 - int_bits
    scale_int = int(scale * 2 ** dec_bits)
    out_shift = dec_bits - 32

    output_data = np.ndarray([output_n, output_c, output_h, output_w],
                             dtype=np.int8, order='C')
    for i in range(input_n):
        input_pointer = input_data[i].ctypes.data_as(ctypes.c_void_p)
        output_pointer = output_data[i].ctypes.data_as(ctypes.c_void_p)
        _LIB.activation_op(
            input_pointer, output_pointer, scale_int,
            input_w, input_h, input_c,
            activation_num, lrelu_lmbda, out_shift)
    return output_data


def _fc(input_data, weight_data, bias_data, scale_data, activation_type="relu", lrelu_lmbda=None, out_shift=0):
    assert input_data.dtype == np.int8
    assert weight_data.dtype == np.int8
    assert len(input_data.shape) == 2
    assert len(weight_data.shape) == 2

    if bias_data is None:
        # If it doesn't use bias, create a dummy one
        bias_data = np.full(weight_data.shape[0], 0, dtype=np.int32)
    else:
        assert bias_data.dtype == np.int32
        assert len(bias_data.shape) == 1

    input_n = input_data.shape[0]
    input_size = input_data.shape[1]
    assert input_size == weight_data.shape[1]
    output_n = input_n
    output_size = weight_data.shape[0]
    assert output_size == bias_data.shape[0]
    if activation_type == "none":
        activation_num = 0
    elif activation_type == "relu":
        activation_num = 1
    elif activation_type == "leakyrelu":
        activation_num = 2
        assert lrelu_lmbda
        assert lrelu_lmbda < 1.0
        lrelu_lmbda = int(2 ** 8 * lrelu_lmbda)
    if not lrelu_lmbda:
        lrelu_lmbda = 0
    input_data = np.ascontiguousarray(input_data, dtype=np.int8)
    assert input_data.flags['C_CONTIGUOUS']
    weight_data = np.ascontiguousarray(weight_data, dtype=np.int8)
    assert weight_data.flags['C_CONTIGUOUS']
    bias_data = np.ascontiguousarray(bias_data, dtype=np.int32)
    assert bias_data.flags['C_CONTIGUOUS']
    scale_data_uint32 = np.ndarray(output_size, dtype=np.uint32, order='C')
    for i in range(output_size):
        assert 1.0 > scale_data > 0.0
        t = scale_data * (2 ** 32)
        if t > 2 ** 32 - 1:
            scale_data_uint32[i] = 2 ** 32 - 1
        else:
            scale_data_uint32[i] = int(t)
    output_data = np.ndarray([output_n, output_size],
                             dtype=np.int8, order='C')
    for i in range(input_n):
        input_pointer = input_data[i].ctypes.data_as(ctypes.c_void_p)
        weight_pointer = weight_data.ctypes.data_as(ctypes.c_void_p)
        bias_pointer = bias_data.ctypes.data_as(ctypes.c_void_p)
        scale_pointer = scale_data_uint32.ctypes.data_as(ctypes.c_void_p)
        output_pointer = output_data[i].ctypes.data_as(ctypes.c_void_p)
        _LIB.fc_op(
            input_pointer, weight_pointer, bias_pointer, scale_pointer, output_pointer,
            input_size, output_size, activation_num, lrelu_lmbda, out_shift)
    return output_data


def _ew(ew_type, input_1_data, input_2_data, scale, scale_2=None):
    assert input_1_data.dtype == np.int8
    assert input_2_data.dtype == np.int8
    assert input_1_data.shape == input_2_data.shape
    assert scale

    input_shape = input_1_data.shape
    input_size = input_1_data.size
    input_1_data = np.ascontiguousarray(input_1_data, dtype=np.int8)
    input_2_data = np.ascontiguousarray(input_2_data, dtype=np.int8)
    assert input_1_data.flags['C_CONTIGUOUS']
    assert input_2_data.flags['C_CONTIGUOUS']
    output_data = np.ndarray(input_shape,
                             dtype=np.int8, order='C')
    input_1_pointer = input_1_data.ctypes.data_as(ctypes.c_void_p)
    input_2_pointer = input_2_data.ctypes.data_as(ctypes.c_void_p)
    output_pointer = output_data.ctypes.data_as(ctypes.c_void_p)
    if ew_type == "add":
        assert scale_2
        int_bits = int(np.ceil(np.log2(scale)))
        if int_bits < 0:
            int_bits = 0
        dec_bits = 15 - int_bits
        scale_num_1 = int(scale * 2 ** dec_bits)
        scale_shift_1 = dec_bits

        int_bits = int(np.ceil(np.log2(scale_2)))
        if int_bits < 0:
            int_bits = 0
        dec_bits = 15 - int_bits
        scale_num_2 = int(scale_2 * 2 ** dec_bits)
        scale_shift_2 = dec_bits
        _LIB.ewadd_op(input_1_pointer, input_2_pointer, output_pointer, input_size,
                      scale_num_1, scale_num_2, scale_shift_1, scale_shift_2)
    elif ew_type == "mul":
        int_bits = int(np.ceil(np.log2(scale)))
        if int_bits < 0:
            int_bits = 0
        dec_bits = 31 - int_bits
        scale_num = int(scale * 2 ** dec_bits)
        scale_shift = dec_bits
        _LIB.ewmul_op(input_1_pointer, input_2_pointer, output_pointer, input_size, scale_num, scale_shift)
    else:
        assert False
    return output_data


def _global_average_pooling(input_data, scale):
    assert input_data.dtype == np.int8
    assert len(input_data.shape) == 4
    assert scale
    input_n = input_data.shape[0]
    input_c = input_data.shape[1]
    input_h = input_data.shape[2]
    input_w = input_data.shape[3]
    output_n = input_n
    output_c = input_c
    output_h = 1
    output_w = 1
    input_data = np.ascontiguousarray(input_data, dtype=np.int8)
    assert input_data.flags['C_CONTIGUOUS']
    output_data = np.ndarray([output_n, output_c, output_h, output_w],
                             dtype=np.int8, order='C')
    for i in range(input_n):
        input_pointer = input_data[i].ctypes.data_as(ctypes.c_void_p)
        output_pointer = output_data[i].ctypes.data_as(ctypes.c_void_p)
        scale = scale / (input_h * input_w)
        int_bits = int(np.ceil(np.log2(scale)))
        if int_bits < 0:
            int_bits = 0
        dec_bits = 31 - int_bits
        scale_int = int(scale * 2 ** dec_bits)
        out_shift = dec_bits
        _LIB.gap_op(input_pointer, output_pointer, input_w, input_h, input_c,
                    scale_int, out_shift)
    return output_data


def _upsampling(input_data, h_factor, w_factor):
    # Upsampling by using nearest interpolation
    assert input_data.dtype == np.int8
    assert len(input_data.shape) == 4
    assert h_factor < 4 and w_factor < 4

    # Inputs shape
    input_n = input_data.shape[0]
    input_c = input_data.shape[1]
    input_h = input_data.shape[2]
    input_w = input_data.shape[3]
    # Outputs shape
    output_n = input_n
    output_c = input_c
    output_h = input_h * h_factor
    output_w = input_w * w_factor

    # Prepare inputs and outputs for C API
    input_data = np.ascontiguousarray(input_data, dtype=np.int8)
    assert input_data.flags['C_CONTIGUOUS']
    output_data = np.ndarray([output_n, output_c, output_h, output_w],
                             dtype=np.int8, order='C')
    # Invoke C API for n times
    for i in range(input_n):
        input_pointer = input_data[i].ctypes.data_as(ctypes.c_void_p)
        output_pointer = output_data[i].ctypes.data_as(ctypes.c_void_p)

        _LIB.upsampling_op(input_pointer, output_pointer, input_h,
                           input_w, input_c, h_factor, w_factor)

    return output_data
