//
// Created by SongpingWang on 2025/2/14.
//
#include <iostream>
#include <fstream>
#include <string>
#include <unordered_map>
#include <stdexcept>
#include "utils.h"
#include <filesystem>


int main(int argc, char* argv[]) {
    if (argc != 2) {
        std::cerr << "Usage: " << argv[0] << " <config_file>" << std::endl;
        return 1;
    }

    std::string config_file = argv[1];

    try {
        auto config = load_config(config_file);

        int input_size = std::stoi(config["DEFAULT"]["input_size"]);
        int batch_size = std::stoi(config["DEFAULT"]["batch_size"]);
        int local_rank = std::stoi(config["DEFAULT"]["local_rank"]);
        int epochs = std::stoi(config["DEFAULT"]["epochs"]);
        bool train_mode = config["DEFAULT"]["train"] == "true";
        bool test_mode = config["DEFAULT"]["test"] == "true";

        // Simulate environment variables
        int world_size = 1;  // This should be set based on your environment
        bool distributed = world_size > 1;

        if (distributed) {
            // Initialize distributed process group (pseudo-code)
            // torch::cuda::set_device(local_rank);
            // torch::distributed::init_process_group(backend, init_method);
        }

        if (local_rank == 0 && !std::filesystem::exists("weights")) {
            std::filesystem::create_directory("weights");
        }

        // Load additional parameters from YAML (pseudo-code)
        // YAML::Node params = YAML::LoadFile("utils/args.yaml");

        // Setup seed and multi-processes (pseudo-code)
        // util::setup_seed();
        // util::setup_multi_processes();

        // Profile and train/test functions (pseudo-code)
        // profile(input_size, batch_size, local_rank, epochs, params);
        if (train_mode) {
            // train(input_size, batch_size, local_rank, epochs, params);
        }
        if (test_mode) {
            // test(input_size, batch_size, local_rank, epochs, params);
        }

        if (distributed) {
            // Destroy process group (pseudo-code)
            // torch::distributed::destroy_process_group();
        }

    } catch (const std::exception& e) {
        std::cerr << "Error: " << e.what() << std::endl;
        return 1;
    }

    return 0;
}
