#ifndef ACTIVATIONFUNCTION_AVX2_H
#define ACTIVATIONFUNCTION_AVX2_H
//#ifdef X86_64_ARCH
#include <cmath>
#include <immintrin.h>



inline void leakyReLU(const double* input, double* output, size_t size, double alpha = 0.01) {
    const size_t vectorSize = 4; // AVX2 supports 4 doubles per vector
    __m256d alphaVec = _mm256_set1_pd(alpha);
    __m256d zeroVec = _mm256_setzero_pd();

    size_t i;
    for (i = 0; i + vectorSize <= size; i += vectorSize) {
        __m256d inputVec = _mm256_loadu_pd(&input[i]);
        __m256d mask = _mm256_cmp_pd(inputVec, zeroVec, _CMP_LE_OQ);
        __m256d scaledVec = _mm256_mul_pd(inputVec, alphaVec);
        __m256d outputVec = _mm256_blendv_pd(inputVec, scaledVec, mask);
        _mm256_storeu_pd(&output[i], outputVec);
    }

    // 处理剩余的元素
    for (; i < size; ++i) {
        output[i] = input[i] > 0 ? input[i] : alpha * input[i];
    }
}

inline __m256d exp_pd(__m256d x) {
    // Constants for the polynomial approximation
    __m256d c12 = _mm256_set1_pd(0.00000000208767569878681);
    __m256d c11 = _mm256_set1_pd(0.00000002505210838544172);
    __m256d c10 = _mm256_set1_pd(0.0000002755731922398589);
    __m256d c9 = _mm256_set1_pd(0.000002755731922398589);
    __m256d c8 = _mm256_set1_pd(0.0000248015873015873);
    __m256d c7 = _mm256_set1_pd(0.0001984126984126984);
    __m256d c6 = _mm256_set1_pd(0.001388888888888889);
    __m256d c5 = _mm256_set1_pd(0.008333333333333333);
    __m256d c4 = _mm256_set1_pd(0.041666666666666664);
    __m256d c3 = _mm256_set1_pd(0.16666666666666666);
    __m256d c2 = _mm256_set1_pd(0.5);
    __m256d c1 = _mm256_set1_pd(1.0);
    __m256d c0 = _mm256_set1_pd(1.0);

    // Polynomial approximation for exp(x) using Horner's method
    __m256d result = c12;
    result = _mm256_fmadd_pd(result, x, c11);
    result = _mm256_fmadd_pd(result, x, c10);
    result = _mm256_fmadd_pd(result, x, c9);
    result = _mm256_fmadd_pd(result, x, c8);
    result = _mm256_fmadd_pd(result, x, c7);
    result = _mm256_fmadd_pd(result, x, c6);
    result = _mm256_fmadd_pd(result, x, c5);
    result = _mm256_fmadd_pd(result, x, c4);
    result = _mm256_fmadd_pd(result, x, c3);
    result = _mm256_fmadd_pd(result, x, c2);
    result = _mm256_fmadd_pd(result, x, c1);
    result = _mm256_fmadd_pd(result, x, c0);

    return result;
}

// AVX2 Relu function for double
inline __m256d Relu(__m256d x) { return _mm256_max_pd(x, _mm256_setzero_pd());}


//这个实现使用了五阶Padé近似Tanh
__m256d tanh_avx2(__m256d x) {
    constexpr double pade_a5 = 1.0 / 2310.0;
    constexpr double pade_a3 = 17.0 / 2310.0;
    constexpr double pade_a1 = 376.0 / 2310.0;
    constexpr double pade_b4 = 10.0 / 2310.0;
    constexpr double pade_b2 = 190.0 / 2310.0;
    constexpr double pade_b0 = 179.0 / 2310.0;

    __m256d x2 = _mm256_mul_pd(x, x);
    __m256d x4 = _mm256_mul_pd(x2, x2);

    __m256d a = _mm256_fmadd_pd(_mm256_set1_pd(pade_a5), x4,
                                _mm256_fmadd_pd(_mm256_set1_pd(pade_a3), x2,
                                                _mm256_set1_pd(pade_a1)));
    __m256d b = _mm256_fmadd_pd(_mm256_set1_pd(pade_b4), x4,
                                _mm256_fmadd_pd(_mm256_set1_pd(pade_b2), x2,
                                                _mm256_set1_pd(pade_b0)));
    __m256d tanh_x = _mm256_div_pd(a, b);
    return tanh_x;
}

// AVX2 Tanh function for double
inline __m256d Tanh(__m256d x) {
    __m256d exp_x = exp_pd(x);
    __m256d exp_neg_x = exp_pd(_mm256_sub_pd(_mm256_setzero_pd(), x));
    return _mm256_div_pd(_mm256_sub_pd(exp_x, exp_neg_x), _mm256_add_pd(exp_x, exp_neg_x));
}

// AVX2 Sigmoid function for double
inline __m256d Sigmoid(__m256d x) {
    __m256d one = _mm256_set1_pd(1.0);
    __m256d exp_neg_x = exp_pd(_mm256_sub_pd(_mm256_setzero_pd(), x));
    return _mm256_div_pd(one, _mm256_add_pd(one, exp_neg_x));
}

// AVX2 Softmax function for double
inline __m256d Softmax(__m256d x, __m256d sum) {
    __m256d exp_x = exp_pd(x);
    return _mm256_div_pd(exp_x, sum);
}

// AVX2 Derivative of Relu function for double
inline __m256d dRelu(__m256d x) {
    return _mm256_and_pd(_mm256_cmp_pd(x, _mm256_setzero_pd(), _CMP_GT_OQ), _mm256_set1_pd(1.0));
}

// AVX2 Derivative of Tanh function for double
inline __m256d dTanh(__m256d x) {
    __m256d tanh_x = Tanh(x);
    return _mm256_sub_pd(_mm256_set1_pd(1.0), _mm256_mul_pd(tanh_x, tanh_x));
}

// AVX2 Derivative of Sigmoid function for double
inline __m256d dSigmoid(__m256d x) {
    __m256d sigmoid_x = Sigmoid(x);
    return _mm256_mul_pd(sigmoid_x, _mm256_sub_pd(_mm256_set1_pd(1.0), sigmoid_x));
}

// AVX2 Derivative of Softmax function for double
inline __m256d dSoftmax(__m256d x, __m256d sum) {
    __m256d exp_x = exp_pd(x);
    return _mm256_div_pd(exp_x, sum);
}

//#endif
#endif //!ACTIVATIONFUNCTION_AVX2_H