//! Tying the Merkle tree implementation to the problem domain.

use rayon::{ThreadPool, ThreadPoolBuilder};
use zksync_crypto_primitives::hasher::blake2::Blake2Hasher;
use zksync_prover_interface::inputs::{StorageLogMetadata, WitnessInputMerklePaths};
use zksync_types::{L1BatchNumber, StorageKey};

use crate::{
    consistency::ConsistencyError,
    storage::{PatchSet, Patched, RocksDBWrapper},
    types::{
        Key, NodeKey, RawNode, Root, TreeEntry, TreeEntryWithProof, TreeInstruction, TreeLogEntry,
        ValueHash, TREE_DEPTH,
    },
    BlockOutput, HashTree, MerkleTree, MerkleTreePruner, MerkleTreePrunerHandle, NoVersionError,
    PruneDatabase,
};

impl TreeInstruction<StorageKey> {
    /// Maps the key preimage in this instruction to a hashed key used by the Merkle tree.
    pub fn with_hashed_key(self) -> TreeInstruction {
        match self {
            Self::Read(key) => TreeInstruction::Read(key.hashed_key_u256()),
            Self::Write(entry) => TreeInstruction::Write(TreeEntry {
                key: entry.key.hashed_key_u256(),
                value: entry.value,
                leaf_index: entry.leaf_index,
            }),
        }
    }
}

/// Metadata for the current tree state.
#[derive(Debug, Clone)]
pub struct TreeMetadata {
    /// Current root hash of the tree.
    pub root_hash: ValueHash,
    /// 1-based index of the next leaf to be inserted in the tree.
    pub rollup_last_leaf_index: u64,
    /// Witness information. As with `repeated_writes`, no-op updates will be omitted from Merkle paths.
    pub witness: Option<WitnessInputMerklePaths>,
}

#[derive(Debug, PartialEq, Eq)]
enum TreeMode {
    Lightweight,
    Full,
}

/// Domain-specific wrapper of the Merkle tree.
///
/// This wrapper will accumulate changes introduced by [`Self::process_l1_batch()`],
/// [`Self::process_l1_batches()`] and [`Self::revert_logs()`] in RAM without saving them
/// to RocksDB. The accumulated changes can be saved to RocksDB via [`Self::save()`]
/// or discarded via [`Self::reset()`].
#[derive(Debug)]
pub struct ZkSyncTree {
    tree: MerkleTree<Patched<RocksDBWrapper>>,
    thread_pool: Option<ThreadPool>,
    mode: TreeMode,
    pruning_enabled: bool,
}

impl ZkSyncTree {
    /// Returns a hash of an empty tree. This is a constant value.
    pub fn empty_tree_hash() -> ValueHash {
        Blake2Hasher.empty_tree_hash()
    }

    fn create_thread_pool(thread_count: usize) -> ThreadPool {
        ThreadPoolBuilder::new()
            .thread_name(|idx| format!("new-merkle-tree-{idx}"))
            .num_threads(thread_count)
            .build()
            .expect("failed initializing `rayon` thread pool")
    }

    /// Returns metadata based on `storage_logs` generated by the genesis L1 batch. This does not
    /// create a persistent tree.
    #[allow(clippy::missing_panics_doc)] // false positive
    pub fn process_genesis_batch(storage_logs: &[TreeInstruction]) -> BlockOutput {
        let kvs = Self::filter_write_instructions(storage_logs);
        tracing::info!(
            "Creating Merkle tree for genesis batch with {instr_count}  writes",
            instr_count = kvs.len()
        );

        // `unwrap()`s are safe: in-memory trees never raise I/O errors
        let mut in_memory_tree = MerkleTree::new(PatchSet::default()).unwrap();
        let output = in_memory_tree.extend(kvs).unwrap();

        tracing::info!(
            "Processed genesis batch; root hash is {root_hash}, {leaf_count} leaves in total",
            root_hash = output.root_hash,
            leaf_count = output.leaf_count
        );
        output
    }

    /// Creates a tree with the full processing mode.
    ///
    /// # Errors
    ///
    /// Errors if sanity checks fail.
    pub fn new(db: RocksDBWrapper) -> anyhow::Result<Self> {
        Self::new_with_mode(db, TreeMode::Full)
    }

    /// Creates a tree with the lightweight processing mode.
    ///
    /// # Errors
    ///
    /// Errors if sanity checks fail.
    pub fn new_lightweight(db: RocksDBWrapper) -> anyhow::Result<Self> {
        Self::new_with_mode(db, TreeMode::Lightweight)
    }

    fn new_with_mode(db: RocksDBWrapper, mode: TreeMode) -> anyhow::Result<Self> {
        Ok(Self {
            tree: MerkleTree::new(Patched::new(db))?,
            thread_pool: None,
            mode,
            pruning_enabled: false,
        })
    }

    /// Returns tree pruner and a handle to stop it.
    ///
    /// # Panics
    ///
    /// Panics if this method was already called for the tree instance; it's logically unsound to run
    /// multiple pruners for the same tree concurrently.
    pub fn pruner(&mut self) -> (MerkleTreePruner<RocksDBWrapper>, MerkleTreePrunerHandle) {
        assert!(
            !self.pruning_enabled,
            "pruner was already obtained for the tree"
        );
        self.pruning_enabled = true;
        let db = self.tree.db.inner().clone();
        MerkleTreePruner::new(db)
    }

    /// Returns a readonly handle to the tree. The handle **does not** see uncommitted changes to the tree,
    /// only ones flushed to RocksDB.
    pub fn reader(&self) -> ZkSyncTreeReader {
        let db = self.tree.db.inner().clone();
        ZkSyncTreeReader(MerkleTree::new_unchecked(db))
    }

    /// Sets the chunk size for multi-get operations. The requested keys will be split
    /// into chunks of this size and requested in parallel using `rayon`. Setting chunk size
    /// to a large value (e.g., `usize::MAX`) will effectively disable parallelism.
    ///
    /// # Panics
    ///
    /// Panics if `chunk_size` is zero.
    pub fn set_multi_get_chunk_size(&mut self, chunk_size: usize) {
        assert!(chunk_size > 0, "Multi-get chunk size must be positive");
        self.tree
            .db
            .inner_mut()
            .set_multi_get_chunk_size(chunk_size);
    }

    /// Signals that the tree should use a dedicated `rayon` thread pool for parallel operations
    /// (for now, hash computations).
    ///
    /// If `thread_count` is 0, the default number of threads will be used; see `rayon` docs
    /// for details.
    pub fn use_dedicated_thread_pool(&mut self, thread_count: usize) {
        self.thread_pool = Some(Self::create_thread_pool(thread_count));
    }

    /// Returns the current root hash of this tree.
    pub fn root_hash(&self) -> ValueHash {
        self.tree.latest_root_hash()
    }

    /// Returns the root hash and leaf count at the specified L1 batch.
    pub fn root_info(&self, l1_batch_number: L1BatchNumber) -> Option<(ValueHash, u64)> {
        let root = self.tree.root(l1_batch_number.0.into())?;
        Some((root.hash(&Blake2Hasher), root.leaf_count()))
    }

    /// Checks whether this tree is empty.
    pub fn is_empty(&self) -> bool {
        let Some(version) = self.tree.latest_version() else {
            return true;
        };
        self.tree
            .root(version)
            .map_or(true, |root| matches!(root, Root::Empty))
    }

    /// Returns the next L1 batch number that should be processed by the tree.
    #[allow(clippy::missing_panics_doc)]
    pub fn next_l1_batch_number(&self) -> L1BatchNumber {
        let number = self.tree.latest_version().map_or(0, |version| {
            u32::try_from(version + 1).expect("integer overflow for L1 batch number")
        });
        L1BatchNumber(number)
    }

    /// Verifies tree consistency. `l1_batch_number` specifies the version of the tree
    /// to be checked, expressed as the number of latest L1 batch applied to the tree.
    ///
    /// # Errors
    ///
    /// Errors if an inconsistency is detected.
    pub fn verify_consistency(
        &self,
        l1_batch_number: L1BatchNumber,
    ) -> Result<(), ConsistencyError> {
        let version = u64::from(l1_batch_number.0);
        self.tree.verify_consistency(version, true)
    }

    /// Processes an iterator of storage logs comprising a single L1 batch.
    ///
    /// # Errors
    ///
    /// Proxies database I/O errors.
    pub fn process_l1_batch(
        &mut self,
        storage_logs: &[TreeInstruction],
    ) -> anyhow::Result<TreeMetadata> {
        match self.mode {
            TreeMode::Full => self.process_l1_batch_full(storage_logs),
            TreeMode::Lightweight => self.process_l1_batch_lightweight(storage_logs),
        }
    }

    fn process_l1_batch_full(
        &mut self,
        instructions: &[TreeInstruction],
    ) -> anyhow::Result<TreeMetadata> {
        let l1_batch_number = self.next_l1_batch_number();
        let starting_leaf_count = self.tree.latest_root().leaf_count();
        let starting_root_hash = self.tree.latest_root_hash();

        tracing::info!(
            "Extending Merkle tree with batch #{l1_batch_number} with {instr_count} ops in full mode",
            instr_count = instructions.len()
        );

        let output = if let Some(thread_pool) = &self.thread_pool {
            thread_pool.install(|| self.tree.extend_with_proofs(instructions.to_vec()))
        } else {
            self.tree.extend_with_proofs(instructions.to_vec())
        }?;

        let mut witness = WitnessInputMerklePaths::new(starting_leaf_count + 1);
        witness.reserve(output.logs.len());
        for (log, instruction) in output.logs.iter().zip(instructions) {
            let empty_levels_end = TREE_DEPTH - log.merkle_path.len();
            let empty_subtree_hashes =
                (0..empty_levels_end).map(|i| Blake2Hasher.empty_subtree_hash(i));
            let merkle_paths = log.merkle_path.iter().copied();
            let merkle_paths = empty_subtree_hashes
                .chain(merkle_paths)
                .map(|hash| hash.0)
                .collect();

            let value_written = match instruction {
                TreeInstruction::Write(entry) => entry.value.0,
                TreeInstruction::Read(_) => [0_u8; 32],
            };
            let log = StorageLogMetadata {
                root_hash: log.root_hash.0,
                is_write: !log.base.is_read(),
                first_write: matches!(log.base, TreeLogEntry::Inserted),
                merkle_paths,
                leaf_hashed_key: instruction.key(),
                leaf_enumeration_index: match instruction {
                    TreeInstruction::Write(entry) => entry.leaf_index,
                    TreeInstruction::Read(_) => match log.base {
                        TreeLogEntry::Read { leaf_index, .. } => leaf_index,
                        TreeLogEntry::ReadMissingKey => 0,
                        _ => unreachable!("Read instructions always transform to Read / ReadMissingKey log entries"),
                    }
                },
                value_written,
                value_read: match log.base {
                    TreeLogEntry::Updated { previous_value, .. } => {
                        if previous_value.0 == value_written {
                            // A no-op update that must be omitted from the produced `witness`.
                            continue;
                        }
                        previous_value.0
                    }
                    TreeLogEntry::Read { value, .. } => value.0,
                    TreeLogEntry::Inserted | TreeLogEntry::ReadMissingKey => [0_u8; 32],
                },
            };
            witness.push_merkle_path(log);
        }

        let root_hash = output.root_hash().unwrap_or(starting_root_hash);

        tracing::info!(
            "Processed batch #{l1_batch_number}; root hash is {root_hash}, \
             {leaf_count} leaves in total",
            leaf_count = output.leaf_count,
        );

        Ok(TreeMetadata {
            root_hash,
            rollup_last_leaf_index: output.leaf_count + 1,
            witness: Some(witness),
        })
    }

    fn process_l1_batch_lightweight(
        &mut self,
        instructions: &[TreeInstruction],
    ) -> anyhow::Result<TreeMetadata> {
        let kvs = Self::filter_write_instructions(instructions);
        let l1_batch_number = self.next_l1_batch_number();
        tracing::info!(
            "Extending Merkle tree with batch #{l1_batch_number} with {kv_count} writes \
             in lightweight mode",
            kv_count = kvs.len()
        );

        let output = if let Some(thread_pool) = &self.thread_pool {
            thread_pool.install(|| self.tree.extend(kvs))
        } else {
            self.tree.extend(kvs)
        }?;

        tracing::info!(
            "Processed batch #{l1_batch_number}; root hash is {root_hash}, \
             {leaf_count} leaves in total",
            root_hash = output.root_hash,
            leaf_count = output.leaf_count,
        );

        Ok(TreeMetadata {
            root_hash: output.root_hash,
            rollup_last_leaf_index: output.leaf_count + 1,
            witness: None,
        })
    }

    fn filter_write_instructions(instructions: &[TreeInstruction]) -> Vec<TreeEntry> {
        let kvs = instructions
            .iter()
            .filter_map(|instruction| match instruction {
                TreeInstruction::Write(entry) => Some(*entry),
                TreeInstruction::Read(_) => None,
            });
        kvs.collect()
    }

    /// Rolls back this tree to a previous state. This method will overwrite all unsaved changes in the tree.
    ///
    /// # Errors
    ///
    /// Proxies database I/O errors.
    pub fn roll_back_logs(&mut self, last_l1_batch_to_keep: L1BatchNumber) -> anyhow::Result<()> {
        self.tree.db.reset();
        let retained_version_count = u64::from(last_l1_batch_to_keep.0 + 1);
        // Since `Patched<_>` doesn't implement `PruneDatabase`, we borrow the underlying DB, which is safe
        // because the in-memory patch was reset above.
        MerkleTree::new_unchecked(self.tree.db.inner_mut())
            .truncate_recent_versions(retained_version_count)
    }

    /// Saves the accumulated changes in the tree to RocksDB.
    ///
    /// # Errors
    ///
    /// Proxies database I/O errors.
    pub fn save(&mut self) -> anyhow::Result<()> {
        let mut l1_batch_numbers = self.tree.db.patched_versions();
        l1_batch_numbers.sort_unstable();
        tracing::info!("Flushing L1 batches #{l1_batch_numbers:?} to RocksDB");
        self.tree.db.flush()
    }

    /// Resets the tree to the latest database state.
    pub fn reset(&mut self) {
        self.tree.db.reset();
    }
}

/// Readonly handle to a [`ZkSyncTree`].
#[derive(Debug)]
pub struct ZkSyncTreeReader(MerkleTree<RocksDBWrapper>);

// While cloning `MerkleTree` is logically unsound, cloning a reader is reasonable since it is readonly.
impl Clone for ZkSyncTreeReader {
    fn clone(&self) -> Self {
        Self(MerkleTree::new_unchecked(self.0.db.clone()))
    }
}

impl ZkSyncTreeReader {
    /// Creates a tree reader based on the provided database.
    ///
    /// # Errors
    ///
    /// Errors if sanity checks fail.
    pub fn new(db: RocksDBWrapper) -> anyhow::Result<Self> {
        MerkleTree::new(db).map(Self)
    }

    /// Returns a reference to the database this.
    pub fn db(&self) -> &RocksDBWrapper {
        &self.0.db
    }

    /// Converts this reader to the underlying DB.
    pub fn into_db(self) -> RocksDBWrapper {
        self.0.db
    }

    /// Returns the root hash and leaf count at the specified L1 batch.
    pub fn root_info(&self, l1_batch_number: L1BatchNumber) -> Option<(ValueHash, u64)> {
        let root = self.0.root(l1_batch_number.0.into())?;
        Some((root.hash(&Blake2Hasher), root.leaf_count()))
    }

    /// Returns the next L1 batch number that should be processed by the tree.
    #[allow(clippy::missing_panics_doc)]
    pub fn next_l1_batch_number(&self) -> L1BatchNumber {
        let number = self.0.latest_version().map_or(0, |version| {
            u32::try_from(version + 1).expect("integer overflow for L1 batch number")
        });
        L1BatchNumber(number)
    }

    /// Returns the minimum L1 batch number retained by the tree.
    #[allow(clippy::missing_panics_doc)]
    pub fn min_l1_batch_number(&self) -> Option<L1BatchNumber> {
        self.0.first_retained_version().map(|version| {
            L1BatchNumber(u32::try_from(version).expect("integer overflow for L1 batch number"))
        })
    }

    /// Reads entries together with Merkle proofs with the specified keys from the tree. The entries are returned
    /// in the same order as requested.
    ///
    /// # Errors
    ///
    /// Returns an error if the tree `version` is missing.
    pub fn entries_with_proofs(
        &self,
        l1_batch_number: L1BatchNumber,
        keys: &[Key],
    ) -> Result<Vec<TreeEntryWithProof>, NoVersionError> {
        let version = u64::from(l1_batch_number.0);
        self.0.entries_with_proofs(version, keys)
    }

    /// Returns raw nodes for the specified `keys`.
    pub fn raw_nodes(&self, keys: &[NodeKey]) -> Vec<Option<RawNode>> {
        let raw_nodes = self.0.db.raw_nodes(keys).into_iter();
        raw_nodes
            .zip(keys)
            .map(|(slice, key)| {
                let slice = slice?;
                Some(if key.is_empty() {
                    RawNode::deserialize_root(&slice)
                } else {
                    RawNode::deserialize(&slice)
                })
            })
            .collect()
    }

    /// Returns raw stale keys obsoleted in the specified version of the tree.
    pub fn raw_stale_keys(&self, l1_batch_number: L1BatchNumber) -> Vec<NodeKey> {
        let version = u64::from(l1_batch_number.0);
        self.0.db.stale_keys(version)
    }

    /// Verifies consistency of the tree at the specified L1 batch number.
    ///
    /// # Errors
    ///
    /// Returns the first encountered verification error, should one occur.
    pub fn verify_consistency(
        &self,
        l1_batch_number: L1BatchNumber,
    ) -> Result<(), ConsistencyError> {
        let version = l1_batch_number.0.into();
        self.0.verify_consistency(version, true)
    }
}
