#include "vector/cross_entropy_loss.h"

#include <cmath>
#include <numeric>
#include <execution> // 并行执行策略
#include <immintrin.h>  // AVX指令集头文件
#include "stdint.h"
#include "vector/tensor.h"

void CrossEntropyLoss::forward(
    const std::vector<double>& input,
    const std::vector<double>& label,
    double& loss
) {
    const size_t n = input.size();
    const double epsilon = 1e-15;
    std::vector<double> output(n);

    // AVX 向量化版本 (支持时)
    #ifdef __AVX__
    constexpr size_t simd_width = 4;  // AVX 处理 4 doubles
    const size_t vec_end = n - (n % simd_width);

    #pragma omp parallel for
    for (size_t i = 0; i < vec_end; i += simd_width) {
        // 加载 input 和 label
        __m256d input_vec = _mm256_loadu_pd(&input[i]);
        __m256d label_vec = _mm256_loadu_pd(&label[i]);

        // 计算 max(input, epsilon)
        __m256d eps_vec = _mm256_set1_pd(epsilon);
        __m256d max_vec = _mm256_max_pd(input_vec, eps_vec);

        // 计算 -log(max) * label
        // __m256d log_vec = _mm256_log_pd(max_vec);
        // 手动计算log(max_vec)
        alignas(32) double max_arr[4];
        _mm256_store_pd(max_arr, max_vec);
        
        __m256d log_vec = _mm256_set_pd(
            std::log(max_arr[3]),
            std::log(max_arr[2]),
            std::log(max_arr[1]),
            std::log(max_arr[0])
        );
        __m256d result = _mm256_mul_pd(
            _mm256_mul_pd(log_vec, _mm256_set1_pd(-1.0)),
            label_vec
        );

        // 存储结果
        _mm256_storeu_pd(&output[i], result);
    }

    // 处理剩余元素
    #pragma omp parallel for
    for (size_t i = vec_end; i < n; ++i) {
        output[i] = -std::log(std::max(input[i], epsilon)) * label[i];
    }
    #else
    // 回退到并行 STL
    std::transform(
        input.begin(), input.end(), label.begin(), output.begin(),
        [epsilon](double x, double y) {
            return -std::log(std::max(x, epsilon)) * y;
        }
    );
    #endif

    // 并行求和
    loss = std::reduce(output.begin(), output.end(), 0.0);
}

std::vector<double> CrossEntropyLoss::backward(const std::vector<double>& input, const std::vector<double>& label) {
    const size_t n = input.size();
    const double epsilon = 1e-15;
    std::vector<double> derivative(n);

    #ifdef __AVX__
    #pragma omp parallel
    {
        #pragma omp for nowait
        for (size_t i = 0; i < n; i += 4) {
            if (i + 4 <= n) {
                __m256d input_vec = _mm256_loadu_pd(&input[i]);
                __m256d label_vec = _mm256_loadu_pd(&label[i]);
                __m256d grad_vec = _mm256_div_pd(
                    _mm256_mul_pd(label_vec, _mm256_set1_pd(-1.0)),
                    _mm256_max_pd(input_vec, _mm256_set1_pd(epsilon))
                );
                _mm256_storeu_pd(&derivative[i], grad_vec);
            } else {
                for (size_t j = i; j < n; ++j) {
                    derivative[j] = -label[j] / std::max(input[j], epsilon);
                }
            }
        }
    }
    #else
    #pragma omp parallel for
    for (size_t i = 0; i < n; ++i) {
        derivative[i] = -label[i] / std::max(input[i], epsilon);
    }
    #endif

    return derivative;
}
void CrossEntropyLoss::forward(const std::vector<std::vector<double>>& inputs, const std::vector<std::vector<double>>& labels, double& loss)
{
    input_batch_ = inputs;
    label_batch_ = labels;

    loss = 0;
    auto n = inputs.size();
    for(int64_t index = 0; index < n; index++)
    {
        double single_loss{};
        forward(inputs[index], labels[index], single_loss);
        loss += single_loss;
    }
}

std::vector<std::vector<double>> CrossEntropyLoss::backward()
{
    const size_t n = input_batch_.size();
    const size_t d = input_batch_[0].size();
    std::vector<std::vector<double>> derivative(n, std::vector<double>(d));
    for(int64_t index = 0; index < n; index++)
    {
        derivative[index] = backward(input_batch_[index], label_batch_[index]); 
    }
    return Tensor::multiply(derivative, 1.0/n);
}