#include "Model.hpp"
#include <stdexcept>
#include <cstring>
#include <mutex>


const std::string Model::labels[6] = {
    "Walking",
    "Walking Upstairs",
    "Walking Downstairs",
    "Sitting",
    "Standing",
    "Laying"
};

Model::Model(const std::string& modelPath) {
    interpreter = MNN::Interpreter::createFromFile(modelPath.c_str());
    if (interpreter == nullptr) {
        throw std::runtime_error("Failed to create interpreter");
    }
    MNN::ScheduleConfig config;
    session = interpreter->createSession(config);
    if (session == nullptr) {
        throw std::runtime_error("Failed to create session");
    }
    inputTensor = interpreter->getSessionInput(session, nullptr);
    if (inputTensor == nullptr) {
        throw std::runtime_error("Failed to get input tensor");
    }
    outputTensor = interpreter->getSessionOutput(session, nullptr);
    if (outputTensor == nullptr) {
        throw std::runtime_error("Failed to get output tensor");
    }
}

Model::~Model() {
    if (interpreter != nullptr) {
        if (session != nullptr) {
            interpreter->releaseSession(session);
        }
        interpreter->releaseModel();
        MNN::Interpreter::destroy(interpreter);
    }
}

void Model::fillInputTensor(const float inputs[128][6]) {
    // map the input tensor for writing
    float* input_map = (float*) inputTensor->map(MNN::Tensor::MAP_TENSOR_WRITE, inputTensor->getDimensionType());
    if (input_map == nullptr) {
        throw std::runtime_error("Failed to map input tensor");
    }
    // copy the inputs into the mapped tensor
    memcpy(input_map, static_cast<const void*>(inputs), 128 * 6 * sizeof(float));
    // unmap the input tensor
    inputTensor->unmap(MNN::Tensor::MAP_TENSOR_WRITE, inputTensor->getDimensionType(), input_map);
}

void Model::predict(std::array<float, 6>& output) {
    // run the session
    MNN::ErrorCode errorCode = interpreter->runSession(session);
    if (errorCode != MNN::ErrorCode::NO_ERROR) {
        throw std::runtime_error("Failed to run session");
    }
    // get the output tensor
    float* output_map = (float*) outputTensor->map(MNN::Tensor::MAP_TENSOR_READ, outputTensor->getDimensionType());
    if (output_map == nullptr) {
        throw std::runtime_error("Failed to map output tensor");
    }
    // copy the output data into the output array
    std::copy(output_map, output_map + 6, output.begin());
    // unmap the output tensor
    outputTensor->unmap(MNN::Tensor::MAP_TENSOR_READ, outputTensor->getDimensionType(), output_map);
}
