/*
 * Copyright 2025 Redpanda Data, Inc.
 *
 * Licensed as a Redpanda Enterprise file under the Redpanda Community
 * License (the "License"); you may not use this file except in compliance with
 * the License. You may obtain a copy of the License at
 *
 * https://github.com/redpanda-data/redpanda/blob/master/licenses/rcl.md
 */

#pragma once

#include "cloud_topics/level_one/common/file_io.h"
#include "cloud_topics/level_one/compaction/committer.h"
#include "cloud_topics/level_one/compaction/log_collector.h"
#include "cloud_topics/level_one/compaction/log_info_collector.h"
#include "cloud_topics/level_one/compaction/meta.h"
#include "cloud_topics/level_one/compaction/scheduling_policies.h"
#include "cloud_topics/level_one/compaction/worker_manager.h"
#include "cloud_topics/level_one/metastore/replicated_metastore.h"
#include "cluster/metadata_cache.h"
#include "config/property.h"
#include "model/fundamental.h"
#include "ssx/semaphore.h"

class SchedulerTestFixture;

namespace cloud_topics::l1 {

/*
 * The `compaction_scheduler` contains all of the relevant objects required for
 * cloud topics compaction. It exists on shard0 and manages:
 *  - A `log_collector`, which pushes logs this broker is responsible for
 * compacting to both a set and intrusive list of logs.
 * - A `log_info_collector` which fetches metadata for managed logs from the
 * metastore on a sampling interval in the main `scheduling_loop()`.
 * - A `log_compaction_queue` which is populated with shared pointers to managed
 * logs that require compaction, sorted by a heuristic provided by the
 * `scheduling_policy`.
 * - A `worker_manager` which manages a `compaction_worker` on each shard,
 * which individually pull compaction work from the `log_compaction_queue` on
 * shard0. Compaction updates generated by workers are pushed to the
 * `metastore` and cloud storage using the `compaction_committer` managed here,
 * which also has an instance unique to each shard. Workers are alerted of new
 * work on the same sampling interval in `scheduling_loop()`.
 */
class compaction_scheduler {
public:
    compaction_scheduler(
      compaction_cluster_state,
      std::unique_ptr<scheduling_policy>,
      ss::sharded<file_io>*,
      ss::sharded<l1::replicated_metastore>*);

    // Starts the contained `_log_collector`, `_worker_manager`, and the
    // backgrounded scheduling loop.
    ss::future<> start();

    // Shuts down concurrency primitives, thereby stopping the backgrounded
    // scheduling loop, stops the `_log_collector`, requests inflight compaction
    // jobs in the `_worker_manager` be stopped, drains the managed partition
    // log list, and finally shuts down the `_worker_manager` once it is safe to
    // do so.
    ss::future<> stop();

    // Returns `true` iff the provided `tidp` is managed by this scheduler.
    bool is_managed(const model::ntp&) const noexcept;

    // Pushes a new `tidp` to be managed by this scheduler to the list of
    // `tidp`s. It is the caller's responsibility to ensure the partition is
    // not already managed by this scheduler.
    void manage_partition(
      const model::ntp&, const model::topic_id_partition&, std::string_view);

    // Removes the `tidp` from the list of managed partitions. No-ops if the
    // provided `tidp` is not managed by this scheduler. Because the `tidp`
    // may be undergoing an inflight compaction, this function will block until
    // it is complete (an early stop is requested by this function).
    ss::future<> unmanage_partition(model::ntp, std::string_view);

private:
    // Starts the backgrounded scheduling loop.
    void start_bg_loop();

    // The main compaction loop. Invoked in a background fiber until `_as` has
    // an abort requested or the `_gate` is closed.
    ss::future<> scheduling_loop();

private:
    // Pointer to sharded `file_io` held by `app`. Used by the `worker_manager`
    // for writing to local files and by the `committer` for writing to cloud
    // storage.
    ss::sharded<file_io>* _io;

    // Pointer to metastore.
    ss::sharded<replicated_metastore>* _metastore;

private:
    // Responsible for pushing logs to manage/unmanage to this scheduler.
    std::unique_ptr<log_collector> _log_collector;

    // Responsible for collecting compaction metadata (see: `log_info` in
    // `meta.h`) for managed logs during a scheduling loop.
    log_info_collector _log_info_collector;

    // Responsible for sorting logs collected by info_collector for compaction.
    std::unique_ptr<scheduling_policy> _scheduling_policy;

    // Responsible for dispatching compaction jobs to per-shard workers.
    worker_manager _worker_manager;

    // Responsible for committing updates from sharded jobs ran on the
    // `worker_manager` to the metastore and uploading compacted objects to
    // cloud storage.
    ss::sharded<compaction_committer> _committer;

    // The interval on which compaction loop is executed.
    config::binding<std::chrono::milliseconds> _compaction_interval;

    // This semaphore is used as a way to signal a change to
    // `log_compaction_interval_ms` during the `wait()` operation in the main
    // scheduling loop.
    ssx::semaphore _sem{0, "cloud_topics::compaction::scheduling_loop"};

    ss::abort_source _as;
    ss::gate _gate;

    // Set of logs this scheduler is responsible for issuing compaction jobs
    // for.
    log_set_t _logs;

    // Intrusive list of logs this scheduler is responsible for issuing
    // compaction jobs for.
    log_list_t _logs_list;

    // Container of pointers to logs in `_logs/_logs_list` which have sampled
    // metadata available and are available for compaction- i.e
    // `log->info_and_ts` is guaranteed to have a value.
    log_compaction_queue _compaction_queue;

    // TODO: remove this once more cluster objects speak `topic_id_partition`.
    chunked_hash_map<model::ntp, model::topic_id_partition> _ntp_to_tidp;

private:
    friend class ::SchedulerTestFixture;

    // Testing c-tor
    compaction_scheduler(
      log_info_collector, std::unique_ptr<scheduling_policy>);
};

std::unique_ptr<compaction_scheduler> make_default_compaction_scheduler(
  compaction_cluster_state,
  ss::sharded<file_io>*,
  ss::sharded<replicated_metastore>*);

} // namespace cloud_topics::l1
