#pragma once

#include <string>
#include <map>
#include <vector>
#include <mutex>
#include <functional>
#include <nlohmann/json.hpp>
#include "model_types.h"

namespace lemon {

using json = nlohmann::json;

// Progress information for download operations
struct DownloadProgress {
    std::string file;           // Current file being downloaded
    int file_index = 0;         // Current file index (1-based)
    int total_files = 0;        // Total number of files to download
    size_t bytes_downloaded = 0; // Bytes downloaded for current file
    size_t bytes_total = 0;     // Total bytes for current file
    int percent = 0;            // Overall percentage (0-100)
    bool complete = false;      // True when all downloads finished
    std::string error;          // Error message if failed
};

// Callback for download progress updates
using DownloadProgressCallback = std::function<void(const DownloadProgress&)>;

struct ModelInfo {
    std::string model_name;
    std::string checkpoint;      // Original checkpoint identifier (for downloads/display)
    std::string resolved_path;   // Absolute path to model file/directory on disk
    std::string recipe;
    std::vector<std::string> labels;
    bool suggested = false;
    std::string mmproj;
    std::string source;  // "local_upload" for locally uploaded models
    bool downloaded = false;     // Whether model is downloaded and available
    double size = 0.0;   // Model size in GB
    
    // Multi-model support fields
    ModelType type = ModelType::LLM;      // Model type for LRU cache management
    DeviceType device = DEVICE_NONE;      // Target device(s) for this model
};

class ModelManager {
public:
    ModelManager();
    
    // Get all supported models from server_models.json
    std::map<std::string, ModelInfo> get_supported_models();
    
    // Get downloaded models
    std::map<std::string, ModelInfo> get_downloaded_models();
    
    // Filter models by available backends
    std::map<std::string, ModelInfo> filter_models_by_backend(
        const std::map<std::string, ModelInfo>& models);
    
    // Register a user model
    void register_user_model(const std::string& model_name,
                            const std::string& checkpoint,
                            const std::string& recipe,
                            bool reasoning = false,
                            bool vision = false,
                            bool embedding = false,
                            bool reranking = false,
                            const std::string& mmproj = "",
                            const std::string& source = "");
    
    // Download a model
    void download_model(const std::string& model_name,
                       const std::string& checkpoint = "",
                       const std::string& recipe = "",
                       bool reasoning = false,
                       bool vision = false,
                       bool embedding = false,
                       bool reranking = false,
                       const std::string& mmproj = "",
                       bool do_not_upgrade = false,
                       DownloadProgressCallback progress_callback = nullptr);
    
    // Delete a model
    void delete_model(const std::string& model_name);
    
    // Get model info by name
    ModelInfo get_model_info(const std::string& model_name);
    
    // Check if model exists
    bool model_exists(const std::string& model_name);
    
    // Check if model is downloaded
    bool is_model_downloaded(const std::string& model_name);
    
    // Check if model is downloaded with optional FLM cache (optimization)
    bool is_model_downloaded(const std::string& model_name, 
                             const std::vector<std::string>* flm_cache);
    
    // Get list of installed FLM models (for caching)
    std::vector<std::string> get_flm_installed_models();
    
    // Get HuggingFace cache directory (respects HF_HUB_CACHE, HF_HOME, and platform defaults)
    std::string get_hf_cache_dir() const;
    
private:
    json load_server_models();
    json load_user_models();
    void save_user_models(const json& user_models);
    
    std::string get_cache_dir();
    std::string get_user_models_file();
    
    // Cache management
    void build_cache();
    void add_model_to_cache(const std::string& model_name);
    void update_model_in_cache(const std::string& model_name, bool downloaded);
    void remove_model_from_cache(const std::string& model_name);
    
    // Resolve model checkpoint to absolute path on disk
    std::string resolve_model_path(const ModelInfo& info) const;
    
    // Download from Hugging Face
    void download_from_huggingface(const std::string& repo_id, 
                                   const std::string& variant = "",
                                   const std::string& mmproj = "",
                                   DownloadProgressCallback progress_callback = nullptr);
    
    // Download from FLM
    void download_from_flm(const std::string& checkpoint, bool do_not_upgrade = true);
    
    json server_models_;
    json user_models_;
    
    // Cache of all models with their download status
    mutable std::mutex models_cache_mutex_;
    mutable std::map<std::string, ModelInfo> models_cache_;
    mutable bool cache_valid_ = false;
};

} // namespace lemon

