#include "vector/softmax.h"

#include <cmath>
#include <numeric>
#include <algorithm>
#include <immintrin.h> 
#include "log.h"

#if 0
void SoftMax::forward(const std::vector<double>& input, std::vector<double> &output)
{
    input_ = input;
    double sum{};
    for(auto &item : input_)
    {
        sum += std::exp(item);
    }
    output.clear();
    for(auto &item : input_)
    {
        auto y_ba = std::exp(item)/sum;
        output.push_back(y_ba);
    }
    output_ = output;
}

std::vector<std::vector<double>> SoftMax::backward()
{
    auto n = output_.size();
    auto m = output_.size();

    std::vector<std::vector<double>> derivative;
    for(int64_t i = 0; i < n; i++)
    {
        std::vector<double> tmp{};
        for(int64_t j = 0; j < m; j++)
        {
            tmp.push_back(output_[i]*(delta(i, j) - output_[j]));
        }
        derivative.push_back(tmp);
    }
    return derivative;
}
#else
#if 1
void SoftMax::forward(const std::vector<double>& input, std::vector<double>& output) {
    const double epsilon = 1e-5;
    input_ = input;  // 如果不需要存储 input_，可以去掉这行
    const size_t n = input.size();
    output.resize(n);  // 预分配内存

    // 先计算所有 exp(x_i) 并缓存
    std::vector<double> exp_values(n);
    double sum = 0.0;

    for (size_t i = 0; i < n; ++i) {
        exp_values[i] = std::exp(input[i] + epsilon);
        sum += exp_values[i];
    }

    // 计算 softmax
    const double inv_sum = 1.0 / sum;  // 避免重复除法
    for (size_t i = 0; i < n; ++i) {
        output[i] = exp_values[i] * inv_sum;
    }

    output_ = output;  // 如果不需要存储 output_，可以去掉这行
}
#else
void SoftMax::forward(const std::vector<double>& input, std::vector<double>& output) {
    output.resize(input.size());
    std::vector<double> exp_values(input.size());

    std::transform(input.begin(), input.end(), exp_values.begin(), [](double x) { return std::exp(x); });
    const double sum = std::accumulate(exp_values.begin(), exp_values.end(), 0.0);
    const double inv_sum = 1.0 / sum;

    std::transform(exp_values.begin(), exp_values.end(), output.begin(), [inv_sum](double exp_x) { return exp_x * inv_sum; });
    output_ = output;
}
#endif

#if 0
std::vector<std::vector<double>> SoftMax::backward() {
    const size_t n = output_.size();
    std::vector<std::vector<double>> derivative(n, std::vector<double>(n));  // 预分配内存

    for (size_t i = 0; i < n; ++i) {
        const double Si = output_[i];
        for (size_t j = 0; j < n; ++j) {
            derivative[i][j] = Si * ((i == j) - output_[j]);  // 直接计算，避免调用 delta()
        }
    }

    return derivative;
}
#else
std::vector<std::vector<double>> SoftMax::backward() {
    const size_t n = output_.size();
    std::vector<std::vector<double>> derivative(n, std::vector<double>(n));

    for (size_t j = 0; j < n; ++j) {
        const double Sj = output_[j];

        #pragma omp simd
        for (size_t i = 0; i < n; ++i) {
            derivative[i][j] = output_[i] * ((i == j) - Sj);
        }
    }
    return derivative;
}

#endif
#endif

std::vector<std::vector<double>> SoftMax::backward(const std::vector<double> &output)
{
    const size_t n = output.size();
    std::vector<std::vector<double>> derivative(n, std::vector<double>(n));

    for (size_t j = 0; j < n; ++j) {
        const double Sj = output[j];

        #pragma omp simd
        for (size_t i = 0; i < n; ++i) {
            derivative[i][j] = output[i] * ((i == j) - Sj);
        }
    }
    return derivative;
}

void SoftMax::forward(const std::vector<std::vector<double>>& input, std::vector<std::vector<double>> &outputs)
{
    outputs.clear();
    for(auto &sample : input)
    {
        std::vector<double> output(sample.size());
        forward(sample, output);
        outputs.push_back(output);
    }
    output_batch_ = outputs;
}

std::vector<std::vector<double>> SoftMax::backward(const std::vector<std::vector<double>> &output_derivative)
{
    std::vector<std::vector<double>> derivatives{};

    auto n = output_batch_.size();
    for(int64_t index = 0; index < n; index++)
    {
        auto single_inner_derivative = backward(output_batch_[index]);
        auto single_output_derivative = std::vector<std::vector<double>>(1, output_derivative[index]);
        auto single_devivative = Tensor::dot(single_inner_derivative, Tensor::transpose(single_output_derivative));
        derivatives.push_back(Tensor::transpose(single_devivative)[0]);
    }
    return derivatives;
}