#pragma once

#include "blob.h"
#include "operator.h"
#include "vkinfer/runtime/context.h"
#include "vkinfer/core/tensor.h"
#include <string>
#include <map>

namespace vkinfer
{
    class Graph
    {
    private:
        std::shared_ptr<Context> context;

        // input/output
        std::vector<std::string> input_names;
        std::vector<std::string> output_names;

        // network structure
        std::map<std::string, std::shared_ptr<Operator>> operators;
        std::map<std::string, std::shared_ptr<Blob>> blobs;

        // topologically sorted layer names.
        std::vector<std::string> sorted_layers;
        bool is_valid;

    public:
        Graph(const std::string& model_file);
        ~Graph() {}

        bool valid() const { return is_valid; }
        std::vector<std::string> get_input_names() const { return input_names; }
        std::vector<std::string> get_output_names() const { return output_names; }
        std::vector<std::string> get_layer_names() const { return sorted_layers; }

        // do model inference
        void forward();
#if ALLOW_PARTIAL_FORWARD
        // TODO: partial inference.
        std::string partial_forward(uint32_t op_index);
#endif
        // feed input data from host cpu
        bool feed_input(const std::string& name, const std::vector<float>& data);
        bool feed_input(const std::string& name, const std::vector<float>& data, const std::vector<uint32_t>& shape);
        bool feed_input(const std::string& name, const std::shared_ptr<Tensor<float>>& tensor);
        // load output data to host
        bool load_output(const std::string& name, std::vector<float>& data);
    };
}
