// 区块共识：编织区块链的算法，包含创世区块以及调整全网挖矿难度。
// “孤块”（Orphan Block）的概念，即未被主链采纳的区块会被丢弃，其中的交易会重新回到交易池等待被打包。这也是需要注意的一点。
//      孤块交易状态：部分可能已被主链包含，未包含的会重新进入交易池。
//      交易广播机制：节点自动回收并广播未确认交易，无需矿工手动操作。

use std::{collections::VecDeque, sync::Arc};

use num_bigint::BigUint;
use tokio::sync::Mutex;
use tracing::{debug, error, info, trace, warn};

use bc_base::{
    BlockHeight, BlockId, TransactionValue,
    block::{self, Block},
    constants,
};
use bc_lib::top_storage::{
    BlockAndIndexStorageWithCache, MiscItemStorageWithCache, TransactionAndIndexStorageWithCache,
};

use crate::{
    block::{BlockConsensusError, block_index::BlockAndIndexManager, orphan_blocks::OrphanBlocks},
    transaction::TransactionConsensus,
};

#[derive(Debug)]
pub(crate) struct BlockConsensus {
    orphan_blocks: OrphanBlocks,

    block_and_index_manager: BlockAndIndexManager,
}

impl BlockConsensus {
    pub async fn new(
        block_and_index_storage_with_cache: Arc<Mutex<BlockAndIndexStorageWithCache>>,
        tx_and_index_storage_with_cache: Arc<Mutex<TransactionAndIndexStorageWithCache>>,
        misc_item_storage_with_cache: Arc<Mutex<MiscItemStorageWithCache>>,
        tx_consensus: Arc<Mutex<TransactionConsensus>>,
    ) -> Self {
        Self {
            orphan_blocks: OrphanBlocks::new(),

            block_and_index_manager: BlockAndIndexManager::new(
                block_and_index_storage_with_cache,
                tx_and_index_storage_with_cache,
                misc_item_storage_with_cache,
                tx_consensus,
            )
            .await,
        }
    }

    // Process the received block, or the block generated by the miner.
    // bool ProcessBlock(CNode* pfrom, CBlock* pblock)
    pub async fn process_block(
        &mut self,
        block: &Block,
    ) -> Result<Vec<Block>, BlockConsensusError> {
        let block_id = block.calc_block_id();
        debug!("Process Block({})", block_id);

        // Check if the block is already in the pool
        if self
            .block_and_index_manager
            .block_index_exists_in_cache(&block_id)
            .await
        {
            return Err(BlockConsensusError::BlockAlreadyExists(block_id));
        }

        // Check if the block is already in the orphan pool
        if self.orphan_blocks.contains(&block_id) {
            return Err(BlockConsensusError::OrphanBlockAlreadyExists(block_id));
        }

        block.preliminary_check()?;
        trace!("Preliminary check passed");

        // Check if the block is a valid extension of the current chain
        let prev_block_id = block.get_prev_block_id();
        if !self
            .block_and_index_manager
            .block_index_exists_in_cache(prev_block_id)
            .await
        {
            self.orphan_blocks
                .add_orphan_block(&block_id, block, prev_block_id);

            let start_blk = self
                .block_and_index_manager
                .read_latest_block_id_in_best_chain()
                .await;
            let end_blk = self.get_orphan_root(block);

            return Err(BlockConsensusError::NeedGetBlockFromNetwork(
                start_blk, end_blk,
            )); // TODO: // Ask this guy to fill in what we're missing blocks
        }

        self.accept_block(&block_id, block).await?;
        let mut accepted_blocks = vec![block.clone()];

        // Recursively process any orphan blocks that depended on this one
        let mut v_work_queue: VecDeque<BlockId> = VecDeque::new();
        v_work_queue.push_back(block_id);
        while let Some(prev_block_id) = v_work_queue.pop_front() {
            if let Some(orphan_block_ids) = self
                .orphan_blocks
                .remove_orphan_block_by_prev(&prev_block_id)
            {
                for orphan_block_id in orphan_block_ids {
                    if let Some(orphan_block) =
                        self.orphan_blocks.remove_orphan_block(&orphan_block_id)
                    {
                        self.accept_block(&orphan_block_id, &orphan_block).await?;
                        v_work_queue.push_back(orphan_block_id);
                        accepted_blocks.push(orphan_block);
                    }
                }
            }
        }

        Ok(accepted_blocks)
    }

    async fn check_block_before_accept(
        &self,
        block: &Block,
        prev_block_id: &BlockId,
    ) -> Result<(), BlockConsensusError> {
        let block_time = block.get_timestamp();
        let median_time_past = self
            .block_and_index_manager
            .get_median_time_past(prev_block_id)
            .await
            .unwrap();
        if block_time <= median_time_past {
            error!(
                "Timestamp is too old. block_time: {}, median_time_past: {}",
                block_time, median_time_past
            );
            return Err(BlockConsensusError::TimestampTooOld);
        }

        if !block.is_all_tx_final(
            block_time as i64,
            self.block_and_index_manager.read_best_height().await as u32,
        ) {
            return Err(BlockConsensusError::ContainsNonFinalTransaction);
        }

        // check proof of work
        let blk_nbits = block.get_nbits();
        let next_work_required = self.get_next_work_required(prev_block_id).await;
        if blk_nbits != next_work_required {
            error!(
                "Incorrect proof of work(nbits: {:#x}, work required: {:#x})",
                blk_nbits, next_work_required
            );
            return Err(BlockConsensusError::IncorrectProofOfWork);
        }

        Ok(())
    }

    // conditions:
    //  - block and block-index not exist
    //  - not orphan block
    //  - block preliminary check passed
    async fn accept_block(
        &mut self,
        block_id: &BlockId,
        block: &Block,
    ) -> Result<(), BlockConsensusError> {
        let prev_block_id = block.get_prev_block_id();
        debug!(
            "AcceptBlock: block_id = {}, prev_block_id = {}",
            block_id, prev_block_id
        );

        assert!(
            self.block_and_index_manager
                .block_index_exists_in_cache(prev_block_id)
                .await
        );
        assert!(
            !(self
                .block_and_index_manager
                .block_index_exists_in_cache(block_id)
                .await),
        );

        self.check_block_before_accept(block, prev_block_id).await?;

        // check fee @ BlockAndIndexManager::connect_block()

        let current_block_height = match self.write_block_tx_and_index_sync(block, block_id).await {
            Err(e) => {
                error!("Write block tx and index failed: {:?}", e);
                self.delete_block_tx_and_index_sync(block, block_id).await?;
                return Err(e);
            }
            Ok(current_block_height) => current_block_height,
        };

        if let Err(e) = self
            .block_and_index_manager
            .process_block_bottom_half(block_id, block, current_block_height)
            .await
        {
            // IMPORTAN
            self.delete_block_tx_and_index_sync(block, block_id).await?;
            error!("Process block bottom half failed: {:?}", e);
            return Err(e.into());
        }

        debug!(
            "Latest block in best chain: {}",
            self.block_and_index_manager
                .read_latest_block_id_in_best_chain()
                .await
        );

        // TODO: Notify UI to display prev block's coinbase if it was ours
        if *block_id
            == self
                .block_and_index_manager
                .read_latest_block_id_in_best_chain()
                .await
        {
            warn!("TODO: Notify UI to display prev block's coinbase if it was ours");
        }

        // TODO: Relay inventory, but don't relay old inventory during initial block download
        // if *block_id == self.hash_best_chain {
        //     for node in self.p2p_app_client.get_connected_nodes() {
        //         if node.is_need_update_inventory(self.best_hight) {
        //             // node.send_inventory(CInv(MSG_BLOCK, *block_id));
        //         }
        //     }
        // }

        Ok(())
    }

    async fn write_block_tx_and_index_sync(
        &mut self,
        block: &Block,
        block_id: &BlockId,
    ) -> Result<BlockHeight, BlockConsensusError> {
        debug!("Store Block({}) To Disk.", block_id);
        // 1.1 store block to db
        // 1.2 store block-index to db and cache
        self.block_and_index_manager
            .write_block_and_index_sync(block_id, block)
            .await?;

        // set right block height
        let prev_blk_id = block.get_prev_block_id();
        let current_block_height = self
            .block_and_index_manager
            .calc_block_index_node_height(prev_blk_id)
            .await?;
        self.block_and_index_manager
            .update_block_height_sync(block_id, current_block_height)
            .await?;

        // 2.1 store tx to db
        // 2.2 store tx-index to db and cache
        let merkle_tree = block.get_merkle_tree();
        let transactions = block.get_transactions();
        assert!(transactions.len() == 1 || merkle_tree.len() > transactions.len()); // transactions.len() == 1 --> only contain coinbase tx

        for (index, tx) in transactions.iter().enumerate() {
            let tx_id = &merkle_tree[index];
            self.block_and_index_manager
                .write_tx_and_index_sync(tx_id, tx, Some(*block_id))
                .await?;
            trace!("Store Tx({}) & tx-index To Disk successed.", tx_id);
        }

        Ok(current_block_height)
    }

    async fn delete_block_tx_and_index_sync(
        &mut self,
        block: &Block,
        block_id: &BlockId,
    ) -> Result<(), BlockConsensusError> {
        debug!("Delete Block({}) from Disk.", block_id);

        // 1.1 delete block from db
        // 1.2 delete block-index from db and cache
        self.block_and_index_manager
            .delete_block_and_index_sync(block_id)
            .await?;

        // 2.1 delete tx-index from db and cache
        // 2.2 delete tx from db
        let merkle_tree = block.get_merkle_tree();
        let transactions = block.get_transactions();
        assert!(transactions.len() == 1 || merkle_tree.len() > transactions.len()); // transactions.len() == 1 --> only contain coinbase tx

        for (index, _tx) in transactions.iter().enumerate() {
            let tx_id = &merkle_tree[index];
            self.block_and_index_manager
                .delete_tx_and_index_from_db_and_cache(tx_id)
                .await?;
            trace!("Store Tx({}) & tx-index To Disk successed.", tx_id);
        }

        Ok(())
    }

    pub async fn get_next_work_required(&self, prev_block_id: &BlockId) -> u32 {
        const N_TARGET_TIMESPAN: u32 = 14 * 24 * 60 * 60; // two weeks
        const N_TARGET_SPACING: u32 = 10 * 60;
        const N_INTERVAL: u32 = N_TARGET_TIMESPAN / N_TARGET_SPACING;

        assert!(!prev_block_id.is_null());

        let prev_block_index = self
            .block_and_index_manager
            .read_block_index_node_in_cache(prev_block_id)
            .await;
        assert!(prev_block_index.is_some());

        let block_index_last = prev_block_index.unwrap();
        if (block_index_last.get_height() + 1) % N_INTERVAL as BlockHeight != 0 {
            return block_index_last.get_nbits();
        }

        let mut pindex_first = block_index_last.clone();
        for _ in 0..N_INTERVAL - 1 {
            let prev_block_id = pindex_first.get_prev_block_id();
            let prev_block_index = self
                .block_and_index_manager
                .read_block_index_node_in_cache(prev_block_id)
                .await;
            if let Some(prev_block_index) = prev_block_index {
                pindex_first = prev_block_index;
            } else {
                break;
            }
        }

        let n_actual_timespan = block_index_last.get_timestamp() - pindex_first.get_timestamp();
        debug!("  nActualTimespan = {}  before bounds", n_actual_timespan);
        let n_actual_timespan = std::cmp::max(n_actual_timespan, N_TARGET_TIMESPAN / 4);
        let n_actual_timespan = std::cmp::min(n_actual_timespan, N_TARGET_TIMESPAN * 4);

        let mut bn_new = block::nbits_to_target_threshold(block_index_last.get_nbits()).unwrap();
        bn_new *= n_actual_timespan as u64;
        bn_new /= N_TARGET_TIMESPAN as u64;

        let bn_new = std::cmp::min(
            bn_new,
            BigUint::from_bytes_be(&constants::PROOF_OF_WORK_LIMIT),
        );
        let nbits_new = block::target_threshold_to_nbits(&bn_new);

        info!("GetNextWorkRequired RETARGET");
        info!(
            "nTargetTimespan = {}    nActualTimespan = {}",
            N_TARGET_TIMESPAN, n_actual_timespan
        );
        info!(
            "Before: {:08x}  {:064x}",
            block_index_last.get_nbits(),
            block::nbits_to_target_threshold(block_index_last.get_nbits()).unwrap()
        );
        info!("After:  {:08x}  {:064x}", nbits_new, bn_new);

        nbits_new
    }

    fn get_orphan_root(&self, block: &Block) -> BlockId {
        // Find the highest orphan block that is a valid extension of the current chain
        let mut orphan_root = block;
        while let Some(prev_block) = self
            .orphan_blocks
            .get_orphan_block(orphan_root.get_prev_block_id())
        {
            orphan_root = prev_block;
        }
        orphan_root.calc_block_id()
    }

    pub async fn read_latest_block_id_in_best_chain(&self) -> BlockId {
        self.block_and_index_manager
            .read_latest_block_id_in_best_chain()
            .await
    }

    pub async fn read_best_height(&self) -> BlockHeight {
        self.block_and_index_manager.read_best_height().await
    }

    pub async fn dump_all_block(&self) {
        self.block_and_index_manager.dump_all_block().await;
    }

    pub async fn get_subsidy_for_mining(&self) -> TransactionValue {
        self.block_and_index_manager.get_subsidy_for_mining().await
    }

    pub async fn read_block_and_txs(
        &self,
        block_id: &BlockId,
    ) -> Result<Block, BlockConsensusError> {
        let r = self
            .block_and_index_manager
            .read_block_and_txs(block_id)
            .await?;
        Ok(r)
    }
}
