// Copyright (c) 2023-2025 ParadeDB, Inc.
//
// This file is part of ParadeDB - Postgres for Search and Analytics
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.

use crate::index::merge_policy::LayeredMergePolicy;
use crate::index::mvcc::MvccSatisfies;
use crate::index::writer::index::{Mergeable, SearchIndexMerger};
use crate::postgres::ps_status::{set_ps_display_suffix, MERGING};
use crate::postgres::storage::block::{MVCCEntry, SegmentMetaEntry};
use crate::postgres::storage::buffer::{Buffer, BufferManager};
use crate::postgres::storage::fsm::FreeSpaceManager;
use crate::postgres::storage::merge::MergeLock;
use crate::postgres::storage::metadata::MetaPage;
use crate::postgres::PgSearchRelation;

use pgrx::bgworkers::*;
use pgrx::{check_for_interrupts, pg_sys, PgTryBuilder};
use pgrx::{pg_guard, FromDatum, IntoDatum};
use std::ffi::CStr;
use std::panic::AssertUnwindSafe;
use tantivy::index::SegmentMeta;

#[derive(Debug, Clone, Copy, PartialEq, Eq)]
#[repr(u8)]
pub(crate) enum MergeStyle {
    Insert,
    Vacuum,
}

impl TryFrom<u8> for MergeStyle {
    type Error = anyhow::Error;

    fn try_from(value: u8) -> Result<Self, Self::Error> {
        Ok(match value {
            0 => MergeStyle::Insert,
            1 => MergeStyle::Vacuum,
            _ => anyhow::bail!("invalid merge style: {value}"),
        })
    }
}

#[repr(C)]
#[derive(Debug, Copy, Clone, PartialEq)]
struct BackgroundMergeArgs {
    index_oid: pg_sys::Oid,
    blockno: pg_sys::BlockNumber,
}

impl BackgroundMergeArgs {
    pub fn new(index_oid: pg_sys::Oid, blockno: pg_sys::BlockNumber) -> Self {
        Self { index_oid, blockno }
    }

    pub fn index_oid(&self) -> pg_sys::Oid {
        self.index_oid
    }

    pub fn blockno(&self) -> pg_sys::BlockNumber {
        self.blockno
    }
}

impl IntoDatum for BackgroundMergeArgs {
    fn into_datum(self) -> Option<pg_sys::Datum> {
        let upper = u32::from(self.index_oid) as u64; // top 32 bits
        let lower = self.blockno as u64; // bottom 32 bits
        let raw: u64 = (upper << 32) | (lower & 0xFFFF_FFFF);
        Some(pg_sys::Datum::from(raw as i64))
    }

    fn type_oid() -> pg_sys::Oid {
        pg_sys::INT8OID
    }
}

impl FromDatum for BackgroundMergeArgs {
    unsafe fn from_polymorphic_datum(
        datum: pg_sys::Datum,
        is_null: bool,
        typoid: pg_sys::Oid,
    ) -> Option<Self> {
        if is_null {
            return None;
        }

        let raw = i64::from_polymorphic_datum(datum, is_null, typoid).unwrap() as u64;
        let index_oid = ((raw >> 32) & 0xFFFF_FFFF) as std::os::raw::c_uint;
        let blockno = (raw & 0xFFFF_FFFF) as u32;

        Some(Self {
            index_oid: index_oid.into(),
            blockno,
        })
    }
}

#[derive(Debug, Clone)]
struct IndexLayerSizes {
    user_configured_bg_layers: bool,
    foreground_layer_sizes: Vec<u64>,
    background_layer_sizes: Vec<u64>,
}

impl From<&PgSearchRelation> for IndexLayerSizes {
    fn from(index: &PgSearchRelation) -> Self {
        let index_options = index.options();

        let mut segment_cnt = 0;
        let mut index_byte_size = 0;
        unsafe {
            MetaPage::open(index).segment_metas().for_each(|_, entry| {
                if entry.visible() {
                    index_byte_size += entry.byte_size();
                    segment_cnt += 1;
                }
            });
        }

        let target_segment_count = index_options.target_segment_count();
        if segment_cnt <= target_segment_count {
            return Self {
                user_configured_bg_layers: false,
                foreground_layer_sizes: Vec::new(),
                background_layer_sizes: Vec::new(),
            };
        }

        let mut target_segment_byte_size = index_byte_size / target_segment_count as u64;

        // reduce by a third, which is what the LayeredMergePolicy does
        //
        // this is probably a terrible place to sneak in this adjustment but it's super important
        // as we inject this into the background layer sizes as a final layer and we don't want it to
        // be big enough that segments merge into it unnecessarily.  LayeredMergePolicy assumes that
        // a merged segment will be a third smaller, and that's what we account for here
        target_segment_byte_size -= target_segment_byte_size / 3;

        // clamp the highest layer size to be less than `target_segment_byte_size`:
        //
        // `index_byte_size` / `target_segment_count`
        //
        // i.e. how big should each segment be if we want to have exactly `target_segment_count` segments?
        //
        // for instance, imagine:
        // - layer sizes: [1mb, 10mb, 100mb]
        // - index size: 200mb
        // - target segment count: 10
        //
        // then our recomputed layer sizes would be:
        // - [1mb, 10mb]
        //
        // why? the 100mb layer gets excluded because the target segment size is 20mb

        let foreground_layer_sizes = index_options.foreground_layer_sizes();
        let mut background_layer_sizes = index_options.background_layer_sizes();
        let has_bg_layers = !background_layer_sizes.is_empty();

        if !background_layer_sizes.is_empty() {
            // additionally, ensure that the background layer sizes are <= to the target segment size
            background_layer_sizes.retain(|&layer_size| layer_size <= target_segment_byte_size);

            // NB:  Doing this can cause an existing, large, unbalanced index to end up merging down
            //      and this can cause operational problems.  It seems more practical to simply honor
            //      the user's configuration and not try to do anything clever.
            // // ensure the background layer sizes can merge down to the target segment size
            // background_layer_sizes.push(target_segment_byte_size);
        }

        // NB:  it's possible a user could configure "layer_sizes = '10TB'" or something ridiculous
        //      and that would cause us to merge the entire index into a single segment
        //      uncommenting this would prevent that, but light up some unit tests
        // ensure the foreground layer sizes are <= to the target segment size
        // foreground_layer_sizes.retain(|&layer_size| layer_size <= target_segment_byte_size);

        Self {
            user_configured_bg_layers: has_bg_layers,
            foreground_layer_sizes,
            background_layer_sizes,
        }
    }
}
impl IndexLayerSizes {
    fn user_configured_background_layers(&self) -> bool {
        self.user_configured_bg_layers
    }

    fn foreground(&self) -> &[u64] {
        &self.foreground_layer_sizes
    }

    fn combined(&self) -> Vec<u64> {
        let mut combined = self.foreground_layer_sizes.clone();
        combined.extend_from_slice(&self.background_layer_sizes);
        combined.sort_unstable();
        combined.dedup();
        combined
    }
}

/// Kick off a merge of the index, if needed.
///
/// First merge into the smaller layers in the foreground,
/// then launch a background worker to merge down the larger layers.
pub unsafe fn do_merge(
    index: &PgSearchRelation,
    style: MergeStyle,
    current_xid: Option<pg_sys::FullTransactionId>,
    next_xid: Option<pg_sys::FullTransactionId>,
) -> anyhow::Result<()> {
    let layer_sizes = IndexLayerSizes::from(index);
    let metadata = MetaPage::open(index);
    let cleanup_lock = metadata.cleanup_lock_shared();
    let merge_lock = metadata.acquire_merge_lock();

    let (needs_background_merge, largest_layer_size) =
        if layer_sizes.user_configured_background_layers() {
            let combined_layers = layer_sizes.combined();
            let merger = SearchIndexMerger::open(MvccSatisfies::Mergeable.directory(index))?;
            let mut background_merge_policy = LayeredMergePolicy::new(combined_layers);

            background_merge_policy.set_mergeable_segment_entries(&metadata, &merge_lock, &merger);
            let (merge_candidates, largest_layer_size) = background_merge_policy.simulate();

            (!merge_candidates.is_empty(), largest_layer_size)
        } else {
            (false, 0)
        };

    if needs_background_merge {
        // if we need (and think we can do) a background merge then we prefer to do that
        // we no longer need to hold the [`MergeLock`] as we're not merging in the foreground
        drop(merge_lock);
        drop(cleanup_lock);

        try_launch_background_merger(index, largest_layer_size);
    } else if style == MergeStyle::Insert && !layer_sizes.foreground().is_empty() {
        let foreground_merge_policy = LayeredMergePolicy::new(layer_sizes.foreground_layer_sizes);
        merge_index(
            index,
            foreground_merge_policy,
            merge_lock,
            cleanup_lock,
            false,
            current_xid.expect("foreground merging requires a current transaction id"),
            next_xid.expect("foreground merging requires a next transaction id"),
        );
    }

    // merge_lock is dropped here
    Ok(())
}

/// Try to launch a background process to merge down the index.
/// Is not guaranteed to launch the process if there are not enough `max_worker_processes` available.
unsafe fn try_launch_background_merger(index: &PgSearchRelation, largest_layer_size: u64) {
    let maybe_blockno = MetaPage::open(index)
        .bgmerger()
        .can_start(largest_layer_size);
    if maybe_blockno.is_none() {
        return;
    }

    let dbname = CStr::from_ptr(pg_sys::get_database_name(pg_sys::MyDatabaseId))
        .to_string_lossy()
        .into_owned();

    let worker_name = format!(
        "background merger for {}.{}",
        index.namespace(),
        index.name()
    );

    if BackgroundWorkerBuilder::new(&worker_name)
        .enable_spi_access()
        .enable_shmem_access(None)
        .set_library("pg_search")
        .set_function("background_merge")
        .set_argument(BackgroundMergeArgs::new(index.oid(), maybe_blockno.unwrap()).into_datum())
        .set_extra(&dbname)
        .load_dynamic()
        .is_err()
    {
        pgrx::log!("not enough available `max_worker_processes` to launch a background merger");
    }
}

/// Actually do the merge
/// This function is called by the background worker.
#[pg_guard]
#[no_mangle]
unsafe extern "C-unwind" fn background_merge(arg: pg_sys::Datum) {
    BackgroundWorker::attach_signal_handlers(SignalWakeFlags::SIGHUP | SignalWakeFlags::SIGTERM);
    BackgroundWorker::connect_worker_to_spi(Some(BackgroundWorker::get_extra()), None);
    BackgroundWorker::transaction(|| {
        set_ps_display_suffix(MERGING.as_ptr());
        pg_sys::pgstat_report_activity(pg_sys::BackendState::STATE_RUNNING, MERGING.as_ptr());

        pgrx::debug1!(
            "{}: starting background merge",
            BackgroundWorker::get_name()
        );

        let current_xid = pg_sys::GetCurrentFullTransactionId();
        let next_xid = current_xid;
        let args = BackgroundMergeArgs::from_datum(arg, false).unwrap();
        let index = PgSearchRelation::try_open(
            args.index_oid(),
            pg_sys::AccessShareLock as pg_sys::LOCKMODE,
        );
        if index.is_none() {
            pgrx::debug1!(
                "{}: index not found, suggesting it was just dropped",
                BackgroundWorker::get_name()
            );
            return;
        }
        let index = index.unwrap();
        let sentinel_buffer = MetaPage::open(&index)
            .bgmerger()
            .try_starting(args.blockno());
        if sentinel_buffer.is_none() {
            return;
        }
        let metadata = MetaPage::open(&index);

        let layer_sizes = IndexLayerSizes::from(&index);
        let merge_policy = LayeredMergePolicy::new(layer_sizes.combined());

        let cleanup_lock = metadata.cleanup_lock_shared();
        let merge_lock = metadata.acquire_merge_lock();

        PgTryBuilder::new(AssertUnwindSafe(|| {
            merge_index(
                &index,
                merge_policy,
                merge_lock,
                cleanup_lock,
                true,
                current_xid,
                next_xid,
            )
        }))
        .execute();
    })
}

#[inline]
unsafe fn merge_index(
    indexrel: &PgSearchRelation,
    mut merge_policy: LayeredMergePolicy,
    merge_lock: MergeLock,
    cleanup_lock: Buffer,
    gc_after_merge: bool,
    current_xid: pg_sys::FullTransactionId,
    next_xid: pg_sys::FullTransactionId,
) {
    // take a shared lock on the CLEANUP_LOCK and hold it until this function is done.  We keep it
    // locked here so we can cause `ambulkdelete()` to block, waiting for all merging to finish
    // before it decides to find the segments it should vacuum.  The reason is that it needs to see
    // the final merged segment, not the original segments that will be deleted
    let metadata = MetaPage::open(indexrel);
    let merger = SearchIndexMerger::open(MvccSatisfies::Mergeable.directory(indexrel))
        .expect("should be able to open merger");

    // further reduce the set of segments that the LayeredMergePolicy will operate on by internally
    // simulating the process, allowing concurrent merges to consider segments we're not, only retaining
    // the segments it decides can be merged into one or more candidates
    merge_policy.set_mergeable_segment_entries(&metadata, &merge_lock, &merger);
    let (merge_candidates, _) = merge_policy.simulate();
    // before we start merging, tell the merger to release pins on the segments it won't be merging
    let mut merger = merger
        .adjust_pins(merge_policy.mergeable_segments())
        .expect("should be able to adjust merger pins");

    let mut need_gc = !gc_after_merge;
    let ncandidates = merge_candidates.len();
    if ncandidates > 0 {
        // record all the segments the SearchIndexMerger can see, as those are the ones that
        // could be merged
        let merge_entry = merge_lock
            .merge_list()
            .add_segment_ids(merge_policy.mergeable_segments(), current_xid)
            .expect("should be able to write current merge segment_id list");
        drop(merge_lock);

        // we are NOT under the MergeLock at this point, which allows concurrent backends to also merge
        //
        // we defer raising a panic in the face of a merge error as we need to remove the created
        // `merge_entry` whether the merge worked or not

        let mut merge_result: anyhow::Result<Option<SegmentMeta>> = Ok(None);

        for candidate in merge_candidates {
            pgrx::debug1!("merging candidate with {} segments", candidate.0.len());

            merge_result = merger.merge_segments(&candidate.0);
            if merge_result.is_err() {
                break;
            }
            if gc_after_merge {
                garbage_collect_index(indexrel, current_xid, next_xid);
                need_gc = false;
            }
        }

        // re-acquire the MergeLock to remove the entry we made above
        let merge_lock = metadata.acquire_merge_lock();
        merge_lock
            .merge_list()
            .remove_entry(merge_entry)
            .expect("should be able to remove MergeEntry");
        drop(merge_lock);

        // we can garbage collect and return blocks back to the FSM without being under the MergeLock
        if need_gc {
            garbage_collect_index(indexrel, current_xid, next_xid);
        }

        // if merging was cancelled due to a legit interrupt we'd prefer that be provided to the user
        check_for_interrupts!();

        if let Err(e) = merge_result {
            if unsafe { pg_sys::InterruptPending } != 0 {
                pgrx::warning!("failed to merge: {e:?} because of interrupt");
            } else {
                panic!("failed to merge: {e:?}");
            }
        }
    } else {
        drop(merge_lock);
    }
    drop(cleanup_lock);
}

///
/// Garbage collect the segments, removing any which are no longer visible in transactions
/// occurring in this process.
///
/// If physical replicas might still be executing transactions on some segments, then they are
/// moved to the `SEGMENT_METAS_GARBAGE` list until those replicas indicate that they are no longer
/// in use, at which point they can be freed by `free_garbage`.
///
pub unsafe fn garbage_collect_index(
    indexrel: &PgSearchRelation,
    current_xid: pg_sys::FullTransactionId,
    next_xid: pg_sys::FullTransactionId,
) {
    // Remove items which are no longer visible to active local transactions from SEGMENT_METAS,
    // and place them in SEGMENT_METAS_RECYLCABLE until they are no longer visible to remote
    // transactions either.
    //
    // SEGMENT_METAS must be updated atomically so that a consistent list is visible for consumers:
    // SEGMENT_METAS_GARBAGE need not be because it is only ever consumed on the physical
    // replication primary.
    let mut segment_metas_linked_list = MetaPage::open(indexrel).segment_metas();
    let mut segment_metas = segment_metas_linked_list.atomically();
    let entries = segment_metas.garbage_collect(next_xid);

    // Replication is not enabled: immediately free the entries. It doesn't matter when we
    // commit the segment metas list in this case.
    segment_metas.commit();
    free_entries(indexrel, entries, current_xid);
}

/// Chase down all the files in a segment and return them to the FSM
pub fn free_entries(
    indexrel: &PgSearchRelation,
    freeable_entries: Vec<SegmentMetaEntry>,
    current_xid: pg_sys::FullTransactionId,
) {
    let mut bman = BufferManager::new(indexrel);
    bman.fsm().extend_with_when_recyclable(
        &mut bman,
        current_xid,
        freeable_entries
            .iter()
            .flat_map(move |entry| entry.freeable_blocks(indexrel)),
    );
}

#[cfg(any(test, feature = "pg_test"))]
#[pgrx::pg_schema]
mod tests {
    use super::*;
    use crate::postgres::options::{
        DEFAULT_BACKGROUND_LAYER_SIZES, DEFAULT_FOREGROUND_LAYER_SIZES,
    };
    use pgrx::prelude::*;

    enum LayerSizes {
        Default,
        Foreground(String),
        Background(String),
    }

    impl LayerSizes {
        fn config_str(&self) -> String {
            match self {
                LayerSizes::Default => "".to_string(),
                LayerSizes::Foreground(sizes) => format!(", layer_sizes = '{sizes}'"),
                LayerSizes::Background(sizes) => format!(", background_layer_sizes = '{sizes}'"),
            }
        }
    }

    fn create_index_with_layer_sizes(layer_sizes: LayerSizes) -> pg_sys::Oid {
        Spi::run("SET client_min_messages = 'debug1';").unwrap();
        Spi::run("CREATE TABLE IF NOT EXISTS t (id SERIAL, data TEXT);").unwrap();
        Spi::run("INSERT INTO t (data) VALUES ('test');").unwrap();
        Spi::run(
            format!(
                "CREATE INDEX t_idx ON t USING bm25(id, data) WITH (key_field = 'id'{})",
                layer_sizes.config_str()
            )
            .as_str(),
        )
        .unwrap();
        Spi::get_one::<pg_sys::Oid>(
            "SELECT oid FROM pg_class WHERE relname = 't_idx' AND relkind = 'i';",
        )
        .expect("spi should succeed")
        .unwrap()
    }

    #[pg_test]
    fn test_configured_foreground_layer_sizes() {
        let index_oid = create_index_with_layer_sizes(LayerSizes::Foreground(
            "1kb, 10kb, 100kb, 1mb".to_string(),
        ));
        let index = PgSearchRelation::open(index_oid);
        let layer_sizes = index.options().foreground_layer_sizes();
        assert_eq!(layer_sizes, vec![1024, 10240, 102400, 1048576]);
    }

    #[pg_test]
    fn test_configured_background_layer_sizes() {
        let index_oid = create_index_with_layer_sizes(LayerSizes::Background(
            "1kb, 10kb, 100kb, 1mb".to_string(),
        ));
        let index = PgSearchRelation::open(index_oid);
        let layer_sizes = index.options().background_layer_sizes();
        assert_eq!(layer_sizes, vec![1024, 10240, 102400, 1048576]);
    }

    #[pg_test]
    fn test_zeroed_foreground_layer_sizes() {
        let index_oid = create_index_with_layer_sizes(LayerSizes::Foreground("0".to_string()));
        let index = PgSearchRelation::open(index_oid);
        let layer_sizes = index.options().foreground_layer_sizes();
        assert!(layer_sizes.is_empty());
    }

    #[pg_test]
    fn test_zeroed_background_layer_sizes() {
        let index_oid = create_index_with_layer_sizes(LayerSizes::Background("0".to_string()));
        let index = PgSearchRelation::open(index_oid);
        let layer_sizes = index.options().background_layer_sizes();
        assert!(layer_sizes.is_empty());
    }

    #[pg_test]
    fn test_default_foreground_layer_sizes() {
        let index_oid = create_index_with_layer_sizes(LayerSizes::Default);
        let index = PgSearchRelation::open(index_oid);
        let layer_sizes = index.options().foreground_layer_sizes();
        assert_eq!(layer_sizes, DEFAULT_FOREGROUND_LAYER_SIZES.to_vec());
    }

    #[pg_test]
    fn test_default_background_layer_sizes() {
        let index_oid = create_index_with_layer_sizes(LayerSizes::Default);
        let index = PgSearchRelation::open(index_oid);
        let layer_sizes = index.options().background_layer_sizes();
        assert_eq!(layer_sizes, DEFAULT_BACKGROUND_LAYER_SIZES.to_vec());
    }

    #[pg_test]
    fn test_background_merge_args() {
        let args = BackgroundMergeArgs::new(pg_sys::Oid::from(100), 200);
        let datum = args.into_datum().unwrap();
        let args2 = unsafe { BackgroundMergeArgs::from_datum(datum, false).unwrap() };
        assert_eq!(args, args2);

        let args = BackgroundMergeArgs::new(pg_sys::Oid::from(0), 0);
        let datum = args.into_datum().unwrap();
        let args2 = unsafe { BackgroundMergeArgs::from_datum(datum, false).unwrap() };
        assert_eq!(args, args2);

        let args = BackgroundMergeArgs::new(pg_sys::Oid::from(u32::MAX), pg_sys::BlockNumber::MAX);
        let datum = args.into_datum().unwrap();
        let args2 = unsafe { BackgroundMergeArgs::from_datum(datum, false).unwrap() };
        assert_eq!(args, args2);
    }
}
