#include "htf_ai_interface.h"
#include <stdio.h>
#include "htf_ai_model.h"   // Include layer_t, ACTIVATION_*, and activation function definitions
#include <string.h>
#include <math.h>

// softmax (used for output normalization)
static void softmax(float* data, int size) {
    float max_val = data[0];
    for (int i = 1; i < size; i++) {
        if (data[i] > max_val) max_val = data[i];
    }
    float sum = 0.0f;
    for (int i = 0; i < size; i++) {
        data[i] = __builtin_expf(data[i] - max_val);
        sum += data[i];
    }
    for (int i = 0; i < size; i++) {
        data[i] /= sum;
    }
}

// Forward propagation
int htf_ai_run_inference(const float* input_data, int input_size,
                         float* output_data, int output_size) {
    // Get the model
    extern const layer_t* htf_ai_get_model_layers(int* layer_count);
    extern int htf_ai_get_input_size(void);
    extern int htf_ai_get_output_size(void);

    if (!input_data || !output_data ||
        input_size != htf_ai_get_input_size() ||
        output_size != htf_ai_get_output_size()) {
        return -1;
    }

    int layer_count = 0;
    const layer_t* layers = htf_ai_get_model_layers(&layer_count);
    if (!layers || layer_count == 0) return -2;

    // Allocate intermediate buffer (maximum layer width)
    #define MAX_LAYER_WIDTH 16
    float buffer_a[MAX_LAYER_WIDTH];
    float buffer_b[MAX_LAYER_WIDTH];
    float* curr = buffer_a;
    float* next = buffer_b;

    // Copy input
    memcpy(curr, input_data, input_size * sizeof(float));

    // Perform layer-by-layer inference
    for (int l = 0; l < layer_count; l++) {
        const layer_t* layer = &layers[l];
        int in_size = layer->input_size;
        int out_size = layer->output_size;

        for (int i = 0; i < out_size; i++) {
            float sum = layer->bias[i];
            for (int j = 0; j < in_size; j++) {
                sum += curr[j] * layer->weight[j * out_size + i];
            }

            // Activation (using functions defined in htf_ai_model.h)
            switch (layer->act) {
                case ACTIVATION_RELU:   next[i] = relu(sum); break;
                case ACTIVATION_SIGMOID:next[i] = sigmoid(sum); break;
                case ACTIVATION_TANH:   next[i] = tanh_f(sum); break;
                default:                next[i] = sum; break;
            }
        }

        // Swap buffers
        float* tmp = curr;
        curr = next;
        next = tmp;
    }

    // Output the final layer to result (and apply softmax)
    memcpy(output_data, curr, output_size * sizeof(float));
    softmax(output_data, output_size);

    return 0;
}

int htf_ai_init(void) {
    printf("[HTF AI] Enhanced native AI engine ready (RISC-V LiteOS_M)\n");
    return 0;
}

void htf_ai_deinit(void) {
    // No resource deallocation
}