import numpy as np

cimport numpy as np

from scipy import sparse

from libc.stdlib cimport malloc, free
from libc.math cimport exp

REAL = np.float32
ctypedef np.float32_t REAL_t

DEF MAX_SENTENCE_LEN = 10000

DEF EXP_TABLE_SIZE = 1000
DEF MAX_EXP = 6

cdef REAL_t[EXP_TABLE_SIZE] EXP_TABLE

def init():
    """
    Precompute function `sigmoid(x) = 1 / (1 + exp(-x))`, for x values discretized
    into table EXP_TABLE.

    """
    # build the sigmoid table
    cdef unsigned int i
    for i in range(EXP_TABLE_SIZE):
        EXP_TABLE[i] = <REAL_t> exp((i / <REAL_t> EXP_TABLE_SIZE * 2 - 1) * MAX_EXP)
        EXP_TABLE[i] = <REAL_t> (EXP_TABLE[i] / (EXP_TABLE[i] + 1))

init()

cdef void record_min(const REAL_t*arr, const unsigned int arr_length, REAL_t*min_value, unsigned int*min_index) nogil:
    cdef unsigned int i
    cdef REAL_t tmp
    min_value[0] = arr[0]
    min_index[0] = 0
    for i in range(arr_length):
        tmp = arr[i]
        if tmp < min_value[0]:
            min_value[0] = tmp
            min_index[0] = i

def min_1d(np.ndarray[REAL_t, ndim=1] arr):
    cdef REAL_t*min_value = <REAL_t*> malloc(sizeof(REAL_t))
    cdef unsigned int*min_index = <unsigned int*> malloc(sizeof(unsigned int))
    record_min(&arr[0], arr.shape[0], min_value, min_index)
    return min_value[0], min_index[0]

cdef void find_k_max(const REAL_t*arr, const unsigned int arr_length, const unsigned int k,
                     REAL_t*k_max_value, unsigned int*k_max_index) nogil:
    cdef unsigned int i
    cdef unsigned int*min_index_in_box = <unsigned int*> malloc(sizeof(unsigned int))
    cdef REAL_t*min_value_in_box = <REAL_t*> malloc(sizeof(REAL_t))
    # init
    for i in range(k):
        k_max_value[i] = arr[i]
        k_max_index[i] = i
    record_min(k_max_value, k, min_value_in_box, min_index_in_box)
    # iteration over arr
    cdef REAL_t tmp
    for i in range(arr_length):
        tmp = arr[i]
        if tmp > min_value_in_box[0]:
            k_max_value[min_index_in_box[0]] = tmp
            k_max_index[min_index_in_box[0]] = i
            record_min(k_max_value, k, min_value_in_box, min_index_in_box)
    free(min_index_in_box)
    free(min_value_in_box)

def find_k_max_1d(np.ndarray[REAL_t, ndim=1] arr, const unsigned int k):
    cdef unsigned int arr_length = arr.shape[0]
    cdef REAL_t*k_max_value = <REAL_t*> malloc(k * sizeof(REAL_t))
    cdef unsigned int*k_max_index = <unsigned int*> malloc(k * sizeof(unsigned int))
    find_k_max(&arr[0], arr_length, k, k_max_value, k_max_index)
    cdef np.ndarray[REAL_t, ndim=1] value = np.empty((k,), dtype=REAL)
    cdef np.ndarray[np.uint32_t, ndim=1] index = np.empty((k,), dtype=np.uint32)
    cdef unsigned int i
    for i in range(k):
        value[i] = k_max_value[i]
        index[i] = k_max_index[i]
    return value, index

def k_max_filter_2d(np.ndarray[REAL_t, ndim=2] arr2d, const unsigned int k, dense=True):
    cdef unsigned int n_rows = arr2d.shape[0]
    cdef unsigned int n_cols = arr2d.shape[1]
    cdef unsigned int i, j
    cdef np.ndarray[REAL_t, ndim=2] new_arr2d
    if dense:
        new_arr2d = np.zeros((n_rows, n_cols), dtype=REAL)
        for i in range(n_rows):
            value, index = find_k_max_1d(arr2d[i], k)
            for j in range(k):
                new_arr2d[i, index[j]] = value[j]
        return new_arr2d
    else:
        data, rows, cols = [], [], []
        for i in range(n_rows):
            value, index = find_k_max_1d(arr2d[i], k)
            data.extend(value)
            rows.extend([i for _ in range(k)])
            cols.extend(index)
        return sparse.coo_matrix((data, (rows, cols)), shape=(n_rows, n_cols), dtype=REAL).tocsc()

cdef void sort_value_by_index(REAL_t*value, unsigned int*index, const unsigned int k) nogil:
    cdef unsigned int i, j, tmp_1, tmp_2
    cdef REAL_t tmp_v
    for i in range(k):
        tmp_1 = index[i]
        for j in range(i + 1, k):
            tmp_2 = index[j]
            if tmp_2 < tmp_1:
                index[i] = tmp_2
                index[j] = tmp_1
                tmp_1 = tmp_2
                tmp_v = value[i]
                value[i] = value[j]
                value[j] = tmp_v

def sort_value_by_index_1d(np.ndarray[REAL_t, ndim=1] value, np.ndarray[np.uint32_t, ndim=1] index):
    k = value.shape[0]
    sort_value_by_index(&value[0], &index[0], k)

cdef k_max_filter_1d(np.ndarray[REAL_t, ndim=1] arr, const np.uint8_t k):
    k_max_value, k_max_index = find_k_max_1d(arr, k)
    sort_value_by_index_1d(k_max_value, k_max_index)
    return k_max_value, k_max_index

cdef void fast_sentence_sg_hs_(const unsigned int*word_point,
                               const np.uint8_t*word_code,
                               const unsigned int code_len,
                               REAL_t*syn0, REAL_t*syn1,
                               const unsigned int word2_index,
                               const unsigned int hidden_size,
                               const REAL_t alpha,
                               const np.uint8_t k,
                               REAL_t*work) nogil:
    cdef unsigned int i, j, row1, row2
    cdef REAL_t *l1
    cdef REAL_t *l2a
    cdef unsigned int *k_max_index
    cdef REAL_t f, g
    row1 = word2_index * hidden_size

    for j in range(k):
        work[j] = <REAL_t> 0.0

    l1 = <REAL_t*> malloc(k * sizeof(REAL_t))
    k_max_index = <unsigned int*> malloc(k * sizeof(unsigned int))
    find_k_max(&syn0[row1], hidden_size, k, l1, k_max_index)
    sort_value_by_index(l1, k_max_index, k)

    for i in range(code_len):
        f = <REAL_t> 0.0
        row2 = word_point[i] * hidden_size
        for j in range(k):
            f += l1[j] * syn1[row2 + k_max_index[j]]
        if f <= -MAX_EXP or f >= MAX_EXP:
            continue
        f = EXP_TABLE[<int> ((f + MAX_EXP) * (EXP_TABLE_SIZE / MAX_EXP / 2))]
        g = (1 - word_code[i] - f) * alpha
        for j in range(k):
            work[j] += g * syn1[row2 + k_max_index[j]]
            # learn hidden -> output
        for j in range(k):
            syn1[row2 + k_max_index[j]] += g * l1[j]
    for i in range(k):
        syn0[row1 + k_max_index[i]] += work[i]

    free(l1)
    free(k_max_index)

def fast_sentence_sg_hs(np.ndarray[np.uint32_t, ndim=1] word_point,
                        np.ndarray[np.uint8_t, ndim=1] word_code,
                        const np.uint32_t code_len,
                        np.ndarray[REAL_t, ndim=2] syn0,
                        np.ndarray[REAL_t, ndim=2] syn1,
                        const np.uint32_t word2_index,
                        const REAL_t alpha,
                        const np.uint8_t k,
                        np.ndarray[REAL_t, ndim=1] work):
    cdef unsigned int hidden_size = syn0.shape[1]
    fast_sentence_sg_hs_(&word_point[0], &word_code[0], code_len, &syn0[0, 0], &syn1[0, 0], word2_index, hidden_size,
                         alpha, k, &work[0])

cdef unsigned long long fast_sentence_sg_neg_(const unsigned int negative, const np.uint32_t *table,
                                              unsigned long long table_len, REAL_t *syn0, REAL_t *syn1neg,
                                              const unsigned int hidden_size, const np.uint32_t word_index,
                                              const np.uint32_t word2_index, const REAL_t alpha,
                                              const np.uint8_t k, REAL_t *work,
                                              unsigned long long next_random) nogil:
    cdef unsigned int i, j, row1, row2
    cdef np.uint32_t target_index
    cdef REAL_t *l1
    cdef REAL_t *l2a
    cdef unsigned int *k_max_index
    cdef REAL_t f, g, label
    cdef unsigned long long modulo = 281474976710655ULL
    row1 = word2_index * hidden_size

    for j in range(k):
        work[j] = <REAL_t> 0.0

    l1 = <REAL_t*> malloc(k * sizeof(REAL_t))
    k_max_index = <unsigned int*> malloc(k * sizeof(unsigned int))
    find_k_max(&syn0[row1], hidden_size, k, l1, k_max_index)
    sort_value_by_index(l1, k_max_index, k)

    for i in range(negative + 1):
        if i == 0:
            target_index = word_index
            label = <REAL_t> 1.0
        else:
            target_index = table[(next_random >> 16) % table_len]
            next_random = (next_random * <unsigned long long>25214903917ULL + 11)&modulo
            if target_index == word_index:
                continue
            label = <REAL_t> 0.0
        f = <REAL_t> 0.0
        row2 = target_index * hidden_size
        for j in range(k):
            f += l1[j] * syn1neg[row2 + k_max_index[j]]
        if f <= -MAX_EXP or f >= MAX_EXP:
            continue
        f = EXP_TABLE[<unsigned int> ((f + MAX_EXP) * (EXP_TABLE_SIZE / MAX_EXP / 2))]
        g = (label - f) * alpha
        for j in range(k):
            work[j] += g * syn1neg[<unsigned int> (row2 + k_max_index[j])]
            # learn hidden -> output
        for j in range(k):
            syn1neg[<unsigned int> (row2 + k_max_index[j])] += g * l1[j]
    for i in range(k):
        syn0[<unsigned int> (row1 + k_max_index[i])] += work[i]

    free(l1)
    free(k_max_index)
    return next_random

def fast_sentence_sg_neg(const unsigned int negative, np.ndarray[np.uint32_t, ndim=1] table,
                         unsigned long long table_len, np.ndarray[REAL_t, ndim=2] syn0,
                         np.ndarray[REAL_t, ndim=2] syn1neg,
                         const unsigned int hidden_size, const np.uint32_t word_index,
                         const np.uint32_t word2_index, const REAL_t alpha,
                         const np.uint8_t k, np.ndarray[REAL_t, ndim=1] work,
                         unsigned long long next_random):
    return fast_sentence_sg_neg_(negative, &table[0], table_len, &syn0[0, 0], &syn1neg[0, 0], hidden_size,
                                 word_index, word2_index, alpha, k, &work[0], next_random)

def train_sentence_sg(model, sentence, alpha, _work):
    cdef int hs = model.hs
    cdef unsigned int negative = model.negative

    cdef REAL_t *syn0 = <REAL_t *> (np.PyArray_DATA(model.syn0))
    cdef REAL_t *work
    cdef REAL_t _alpha = alpha
    cdef unsigned int k_in_model = model.k
    cdef unsigned int size = model.layer1_size

    cdef unsigned int codelens[MAX_SENTENCE_LEN]
    cdef np.uint32_t indexes[MAX_SENTENCE_LEN]
    cdef np.uint32_t reduced_windows[MAX_SENTENCE_LEN]
    cdef unsigned int sentence_len
    cdef unsigned int window = model.window

    cdef unsigned int i, j, k
    cdef long result = 0

    # For hierarchical softmax
    cdef REAL_t *syn1
    cdef np.uint32_t *points[MAX_SENTENCE_LEN]
    cdef np.uint8_t *codes[MAX_SENTENCE_LEN]

    # For negative sampling
    cdef REAL_t *syn1neg
    cdef np.uint32_t *table
    cdef unsigned long long table_len
    cdef unsigned long long next_random

    if hs:
        syn1 = <REAL_t *> (np.PyArray_DATA(model.syn1))

    if negative:
        syn1neg = <REAL_t *> (np.PyArray_DATA(model.syn1neg))
        table = <np.uint32_t *> (np.PyArray_DATA(model.table))
        table_len = len(model.table)
        next_random = (2 ** 24) * np.random.randint(0, 2 ** 24) + np.random.randint(0, 2 ** 24)

    # convert Python structures to primitive types, so we can release the GIL
    work = <REAL_t *> np.PyArray_DATA(_work)
    sentence_len = <int> min(MAX_SENTENCE_LEN, len(sentence))

    for i in range(sentence_len):
        word = sentence[i]
        if word is None:
            codelens[i] = 0
        else:
            indexes[i] = word.index
            reduced_windows[i] = np.random.randint(window)
            if hs:
                codelens[i] = <int> len(word.code)
                codes[i] = <np.uint8_t *> np.PyArray_DATA(word.code)
                points[i] = <np.uint32_t *> np.PyArray_DATA(word.point)
            else:
                codelens[i] = 1

            result += 1

    # release GIL & train on the sentence
    with nogil:
        for i in range(sentence_len):
            if codelens[i] == 0:
                continue
            j = i - window + reduced_windows[i]
            if j < 0:
                j = 0
            k = i + window + 1 - reduced_windows[i]
            if k > sentence_len:
                k = sentence_len
            for j in range(j, k):
                if j == i or codelens[j] == 0:
                    continue
                if hs:
                    fast_sentence_sg_hs_(points[i], codes[i], codelens[i],
                                         syn0, syn1, indexes[j], size,
                                         _alpha, k_in_model, work)
                if negative:
                    next_random = fast_sentence_sg_neg_(negative, table, table_len,
                                                        syn0, syn1neg, size, indexes[i],
                                                        indexes[j], _alpha, k_in_model,
                                                        work, next_random)
    return result


