use std::sync::Arc;

use tokio::sync::Mutex;
use tracing::{debug, error, info, trace, warn};

use bc_base::{
    BlockHeight, BlockId, TransactionId, TransactionValue,
    block::{Block, BlockIndexNode},
    constants,
    transaction::Transaction,
};
use bc_lib::top_storage::{
    BlockAndIndexStorageWithCache, MiscItemStorageWithCache, TransactionAndIndexStorageWithCache,
};

use crate::{
    TransactionConsensus,
    block::block_index::{BlockIndexError, data_manager::BlockIndexDataManager},
};

#[derive(Debug)]
pub struct BlockAndIndexManager {
    data_manger: BlockIndexDataManager,
    tx_consensus: Arc<Mutex<TransactionConsensus>>,
}

impl BlockAndIndexManager {
    const N_MEDIAN_TIME_SPAN: usize = 11;

    pub async fn new(
        block_and_index_storage_with_cache: Arc<Mutex<BlockAndIndexStorageWithCache>>,
        tx_and_index_storage_with_cache: Arc<Mutex<TransactionAndIndexStorageWithCache>>,
        misc_item_storage_with_cache: Arc<Mutex<MiscItemStorageWithCache>>,
        tx_consensus: Arc<Mutex<TransactionConsensus>>,
    ) -> Self {
        let data_manger = BlockIndexDataManager::new(
            block_and_index_storage_with_cache,
            tx_and_index_storage_with_cache,
            misc_item_storage_with_cache,
        );
        Self {
            data_manger,
            tx_consensus,
        }
    }

    pub async fn get_subsidy_for_mining(&self) -> TransactionValue {
        let mut subsidy = constants::SUBSIDY;

        // Subsidy is cut in half every 4 years
        subsidy >>=
            self.data_manger.read_best_height().await / constants::SUBSIDY_ADJUSTMENT_INTERVAL;

        subsidy
    }

    // Get the median time past of the block
    pub async fn get_median_time_past(&self, block_id: &BlockId) -> Option<u32> {
        let mut time_seq = self
            .data_manger
            .get_prev_n_timestame(block_id, Self::N_MEDIAN_TIME_SPAN)
            .await;
        if time_seq.is_empty() {
            return None;
        }

        time_seq.sort_unstable();
        trace!("time_seq: {:?}, len: {}", time_seq, time_seq.len());
        Some(time_seq[time_seq.len() / 2])
    }

    pub async fn read_block_and_txs(&self, block_id: &BlockId) -> Result<Block, BlockIndexError> {
        self.data_manger.read_block_and_txs(block_id).await
    }

    pub async fn dump_all_block(&self) {
        self.data_manger.dump_all_block().await;
    }
}

// for add_block_index_node
impl BlockAndIndexManager {
    // IMPORTANT: main logic of block index
    // Before executing this function, all data related to the block has been stored.
    // bool CBlock::AddToBlockIndex(unsigned int nFile, unsigned int nBlockPos) {
    pub async fn process_block_bottom_half(
        &mut self,
        block_id: &BlockId,
        block: &Block,
        current_block_height: BlockHeight,
    ) -> Result<(), BlockIndexError> {
        let prev_blk_id = block.get_prev_block_id();

        self.check_preconditions_for_add_block_index_node(block_id, prev_blk_id)
            .await;

        let current_best_height = self.data_manger.read_best_height().await;
        debug!(
            "current_best_height: {}, current_block_height: {}",
            current_best_height, current_block_height
        );

        if current_block_height > current_best_height {
            let latest_block_id_in_best_chain =
                self.data_manger.read_latest_block_id_in_best_chain().await;
            debug!(
                "latest_block_id_in_best_chain: {}, prev_blk_id of current block: {}",
                latest_block_id_in_best_chain, prev_blk_id
            );

            if prev_blk_id == &latest_block_id_in_best_chain {
                // Adding to current best branch
                if let Err(e) = self.connect_block(block_id, block, prev_blk_id).await {
                    error!("connect block failed: {:?}", e);
                    if let Err(e) = self.disconnect_block(block, prev_blk_id).await {
                        error!("disconnect block failed: {:?}", e);
                    }
                    self.data_manger
                        .delete_block_and_index_sync(block_id)
                        .await?;
                    return Err(e);
                }

                // remove TX from mempool @ bc_consensus::Consensus::process_block
            } else {
                // New best branch
                info!(
                    "[New best branch] prev_blk_id of current block: {}, current block: {}",
                    prev_blk_id, block_id
                );
                self.reorganize_block_chain(block_id).await?;
            }

            self.update_misc_item_after_add_block_index_node(block_id, current_block_height)
                .await?;
        } else {
            // self.misc_item_cache
            //     .potential_blocks_of_main_chain
            //     .insert(*block_id); // todo
        }

        Ok(())
    }

    async fn check_preconditions_for_add_block_index_node(
        &mut self,
        block_id: &BlockId,
        prev_blk_id: &BlockId,
    ) {
        assert_ne!(*prev_blk_id, BlockId::new(constants::NULL_BLOCK_ID));
        assert!(self.data_manger.block_exists_in_db(prev_blk_id).await);
        // assert!(self.data_manger.block_exists_in_db(prev_blk_id).await);
        assert!(
            self.data_manger
                .block_index_exists_in_cache(prev_blk_id)
                .await
        );

        assert!(self.data_manger.block_exists_in_db(block_id).await);
        assert!((self.data_manger.block_index_exists_in_cache(block_id).await));
        assert!(
            (self.data_manger.block_exists_in_db(block_id).await),
            "block index({})",
            block_id
        );
    }

    async fn update_misc_item_after_add_block_index_node(
        &mut self,
        block_id: &BlockId,
        blk_index_node_height: BlockHeight,
    ) -> Result<(), BlockIndexError> {
        self.data_manger
            .write_latest_blk_id_in_best_chain(block_id)
            .await?;
        self.data_manger
            .write_best_height(blk_index_node_height)
            .await?;
        self.data_manger.write_time_best_received().await?;

        Ok(())
    }
    pub async fn calc_block_index_node_height(
        &self,
        prev_blk_id: &BlockId,
    ) -> Result<BlockHeight, BlockIndexError> {
        if let Some(prev_index) = self
            .data_manger
            .read_block_index_node_in_cache(prev_blk_id)
            .await
        {
            Ok(prev_index.get_height() + 1)
        } else {
            Err(BlockIndexError::PrevBlockIndexNodeNotFound(*prev_blk_id))
        }
    }

    // bool CBlock::ConnectBlock(CTxDB& txdb, CBlockIndex* pindex) {
    async fn connect_block(
        &mut self,
        block_id: &BlockId,
        block: &Block,
        prev_blk_id: &BlockId,
    ) -> Result<(), BlockIndexError> {
        self.connect_inputs_of_txs_in_block(block).await?;

        // update next hash of prev block in Disk
        self.data_manger
            .set_next_block_id(prev_blk_id, block_id)
            .await?;

        // Watch for transactions paying to me:
        //      process at bc_manager::BcManager::process_block

        Ok(())
    }

    async fn connect_inputs_of_txs_in_block(&self, block: &Block) -> Result<(), BlockIndexError> {
        // check block fee in disk
        let mut tx_fees = 0;
        for tx in block.get_transactions() {
            if tx.is_coinbase() {
                continue;
            }

            let tx_id = tx.calc_tx_id();
            let mut tx_consensus = self.tx_consensus.lock().await;
            let tx_fee = tx_consensus.connect_inputs_of_tx(&tx_id, tx, 0).await?;
            tx_fees += tx_fee;
        }

        let block_fee_max = self.get_subsidy_for_mining().await + tx_fees;
        if block.get_transactions()[0].get_output_value() > block_fee_max {
            return Err(BlockIndexError::BlockFeeIsTooLarge);
        }

        Ok(())
    }

    // bool CBlock::DisconnectBlock(CTxDB& txdb, CBlockIndex* pindex) {
    async fn disconnect_block(
        &mut self,
        block: &Block,
        prev_blk_id: &BlockId,
    ) -> Result<(), BlockIndexError> {
        self.disconnect_inputs_of_txs_in_block(block).await?;

        // Update block index on disk without changing it in memory.
        // The memory index structure will be changed after the db commits.
        self.data_manger.reset_next_block_id(prev_blk_id).await?;

        Ok(())
    }

    async fn disconnect_inputs_of_txs_in_block(
        &self,
        block: &Block,
    ) -> Result<(), BlockIndexError> {
        for tx in block.get_transactions().iter().rev() {
            if tx.is_coinbase() {
                continue;
            }

            let tx_id = tx.calc_tx_id();
            let mut tx_consensus = self.tx_consensus.lock().await;
            tx_consensus.disconnect_inputs_of_tx(&tx_id, tx).await?;
        }

        Ok(())
    }
}

// for reorganize
impl BlockAndIndexManager {
    // bool Reorganize(CTxDB& txdb, CBlockIndex* pindexNew)
    // Before executing this function, all data related to the block(block, block-index, tx, tx-index) has been stored.
    // NOTE: There are at least two nodes behind the fork node in new chain.
    async fn reorganize_block_chain(&mut self, block_id: &BlockId) -> Result<(), BlockIndexError> {
        info!("Reorganize block chain from block({}) ...", block_id);
        let block_index_node_new = self
            .data_manger
            .read_block_index_node_in_cache(block_id)
            .await
            .unwrap();

        // step 1. find the fork
        let fork = self
            .find_fork_block_index_node(&block_index_node_new)
            .await?;
        let fork_blk_id = *fork.get_block_id();
        info!("The fork block is {}", &fork_blk_id);

        // step 2. List of what to disconnect
        let block_id_to_disconnect = self.get_disconnect_block_id(&fork).await?;
        trace!("Block id to disconnect: {:?}", &block_id_to_disconnect);

        // step 3. List of what to connect
        let block_id_to_connect = self
            .get_connect_block_id(&block_index_node_new, &fork)
            .await?;
        trace!("Block id to connect: {:?}", &block_id_to_connect);

        // step 4. Disconnect shorter branch in memory and disk
        let txs_to_resurrect = self
            .disconnect_shorter_branch(&block_id_to_disconnect)
            .await?;
        debug!("Disconnect shorter branch in memory and disk done.");
        trace!("txs_to_resurrect: {:?}", txs_to_resurrect);

        // step 5. Connect longer branch in memory and disk, return redundant transactions
        let txs_to_delete = self
            .connect_longer_branch(&fork_blk_id, &block_id_to_connect)
            .await?;
        debug!("Connect longer branch in memory and disk done.");
        trace!("txs_to_delete: {:?}", txs_to_delete);

        // step 6. Commit now because resurrecting could take some time
        // self.flush_db()?;

        let txs_to_resurrect: Vec<TransactionId> = txs_to_resurrect
            .into_iter()
            .filter(|tx_id| !txs_to_delete.contains(tx_id))
            .collect();
        trace!("real txs_to_resurrect: {:?}", txs_to_resurrect);

        // step 7. Resurrect memory transactions that were in the disconnected branch
        self.resurrect_txs_in_memory(&txs_to_resurrect)?;

        // step 8. Delete redundant memory transactions that are in the connected branch
        self.delete_redundant_txs_in_memory(&txs_to_delete)?;

        Ok(())
    }

    async fn find_fork_block_index_node(
        &self,
        block_index_node_new: &BlockIndexNode,
    ) -> Result<BlockIndexNode, BlockIndexError> {
        let latest_block_id_in_best_chain =
            self.data_manger.read_latest_block_id_in_best_chain().await;
        let mut fork = self
            .data_manger
            .read_block_index_node_in_cache(&latest_block_id_in_best_chain)
            .await
            .unwrap();
        let mut tmp_prev_index_longer;
        let mut longer = block_index_node_new;
        while fork.get_block_id() != longer.get_block_id() {
            fork = match self
                .data_manger
                .read_block_index_node_in_cache(fork.get_prev_block_id())
                .await
            {
                None => return Err(BlockIndexError::PrevOfForkIsNone),
                Some(prev_index_fork) => prev_index_fork,
            };

            while longer.get_height() > fork.get_height() {
                longer = match self
                    .data_manger
                    .read_block_index_node_in_cache(longer.get_prev_block_id())
                    .await
                {
                    None => return Err(BlockIndexError::PrevOfLongerIsNone),
                    Some(prev_index_longer) => {
                        tmp_prev_index_longer = prev_index_longer;
                        &tmp_prev_index_longer
                    }
                };
            }
        }

        Ok(fork)
    }

    async fn get_disconnect_block_id(
        &self,
        fork: &BlockIndexNode,
    ) -> Result<Vec<BlockId>, BlockIndexError> {
        let mut block_id_to_disconnect = Vec::new();
        let latest_block_id_in_best_chain =
            self.data_manger.read_latest_block_id_in_best_chain().await;
        let mut blk_index_node = self
            .data_manger
            .read_block_index_node_in_cache(&latest_block_id_in_best_chain)
            .await
            .unwrap();

        while blk_index_node.get_height() > fork.get_height() {
            block_id_to_disconnect.push(*blk_index_node.get_block_id());
            blk_index_node = self
                .data_manger
                .read_block_index_node_in_cache(blk_index_node.get_prev_block_id())
                .await
                .unwrap();
        }

        assert!(!block_id_to_disconnect.is_empty());
        // tail before head
        Ok(block_id_to_disconnect)
    }

    async fn get_connect_block_id(
        &self,
        block_index_node_new: &BlockIndexNode,
        fork: &BlockIndexNode,
    ) -> Result<Vec<BlockId>, BlockIndexError> {
        let mut block_id_to_connect = Vec::new();
        let mut blk_index_node = block_index_node_new;

        let mut t;
        while blk_index_node.get_height() > fork.get_height() {
            block_id_to_connect.push(*blk_index_node.get_block_id());
            t = self
                .data_manger
                .read_block_index_node_in_cache(blk_index_node.get_prev_block_id())
                .await
                .unwrap();
            blk_index_node = &t;
        }

        assert!(block_id_to_connect.len() >= 2);
        // head before tail
        block_id_to_connect.reverse();
        Ok(block_id_to_connect)
    }

    async fn read_txs_of_block(
        &self,
        block_id: &BlockId,
    ) -> Result<Vec<TransactionId>, BlockIndexError> {
        // let mut txs_of_block = Vec::new();

        let block_in_db = self.data_manger.read_block_from_db(block_id).await?;
        let ids = block_in_db.get_transaction_ids();
        // txs_of_block.extend_from_slice(&ids[1..]);

        Ok(ids.clone())
    }

    // resurrect transactions that were in the disconnected branch
    async fn disconnect_shorter_branch(
        &mut self,
        block_id_to_disconnect: &Vec<BlockId>,
    ) -> Result<Vec<TransactionId>, BlockIndexError> {
        // tail before head
        assert_eq!(
            self.data_manger
                .read_block_index_node_in_cache(block_id_to_disconnect.first().unwrap())
                .await
                .unwrap()
                .get_next_block_id(),
            None
        );

        let mut txs_to_resurrect = Vec::new();
        for block_id in block_id_to_disconnect {
            let mut block = self.data_manger.read_block_and_txs(block_id).await?;
            let _ = block.build_merkle_tree()?;
            let block_index_node = self
                .data_manger
                .read_block_index_node_in_cache(block_id)
                .await
                .unwrap();
            let prev_blk_id = block_index_node.get_prev_block_id();
            self.disconnect_block(&block, prev_blk_id).await?;

            let ids = self.read_txs_of_block(block_id).await?;
            txs_to_resurrect.extend_from_slice(&ids[1..]);

            // delete coinbase tx
            self.data_manger.delete_tx_and_index_sync(&ids[0]).await?;

            // delete block (not containing txs)
            self.data_manger
                .delete_block_and_index_sync(block_id)
                .await?;
        }

        Ok(txs_to_resurrect)
    }

    // collect transactions that were in the connected longer branch
    async fn connect_longer_branch(
        &mut self,
        fork_blk_id: &BlockId,
        blk_id_to_connect: &Vec<BlockId>,
    ) -> Result<Vec<TransactionId>, BlockIndexError> {
        // head before tail
        assert_eq!(
            self.data_manger
                .read_block_index_node_in_cache(blk_id_to_connect.last().unwrap())
                .await
                .unwrap()
                .get_next_block_id(),
            None
        );

        let mut redundant_txs = Vec::new();

        let mut prev_blk_id = fork_blk_id;
        let mut connect_failed = false;
        for block_id in blk_id_to_connect {
            if connect_failed {
                self.data_manger.delete_block_and_txs(block_id).await?;
            } else {
                let block = self.data_manger.read_block_and_txs(block_id).await?;
                match self.connect_block(block_id, &block, prev_blk_id).await {
                    Err(r) => {
                        error!(
                            "Connect block failed({}), block_id:{} , prev_blk_id: {}",
                            r, block_id, prev_blk_id
                        );
                        self.data_manger.delete_block_and_txs(block_id).await?;
                        connect_failed = true;
                    }
                    Ok(_) => {
                        prev_blk_id = block_id;
                        let t = self.read_txs_of_block(block_id).await?;
                        redundant_txs.extend_from_slice(&t[1..]);
                    }
                }
            }
        }

        // todo: partially connect failed?

        if connect_failed {
            self.delete_redundant_txs_in_memory(&redundant_txs)?;
            Err(BlockIndexError::ConnectBlockFailed)
        } else {
            Ok(redundant_txs)
        }
    }

    // step 7. Resurrect memory transactions that were in the disconnected branch
    fn resurrect_txs_in_memory(
        &self,
        txs_to_resurrect: &Vec<TransactionId>,
    ) -> Result<(), BlockIndexError> {
        for _tx_id in txs_to_resurrect {
            // TODO:
            // delete from DB
            // add to tx pool
        }

        Ok(())
    }

    // step 8. Delete redundant memory transactions that are in the connected branch
    fn delete_redundant_txs_in_memory(
        &self,
        redundant_txs: &Vec<TransactionId>,
    ) -> Result<(), BlockIndexError> {
        for _tx_id in redundant_txs {
            // TODO: delete from tx pool
        }

        Ok(())
    }
}

// simple wrapper for read
impl BlockAndIndexManager {
    pub async fn read_latest_block_id_in_best_chain(&self) -> BlockId {
        self.data_manger.read_latest_block_id_in_best_chain().await
    }

    pub async fn read_best_height(&self) -> BlockHeight {
        self.data_manger.read_best_height().await
    }

    pub async fn block_index_exists_in_cache(&self, block_id: &BlockId) -> bool {
        self.data_manger.block_index_exists_in_cache(block_id).await
    }

    pub async fn read_block_index_node_in_cache(
        &self,
        block_id: &BlockId,
    ) -> Option<BlockIndexNode> {
        self.data_manger
            .read_block_index_node_in_cache(block_id)
            .await
    }
}

// simple wrapper for write
impl BlockAndIndexManager {
    pub async fn write_block_and_index_sync(
        &mut self,
        block_id: &BlockId,
        block: &Block,
    ) -> Result<(), BlockIndexError> {
        self.data_manger
            .write_block_and_index_sync(block_id, block)
            .await
    }

    pub async fn update_block_height_sync(
        &mut self,
        block_id: &BlockId,
        block_height: BlockHeight,
    ) -> Result<(), BlockIndexError> {
        self.data_manger
            .update_block_height_sync(block_id, block_height)
            .await
    }

    pub async fn delete_block_and_index_sync(
        &mut self,
        block_id: &BlockId,
    ) -> Result<(), BlockIndexError> {
        self.data_manger.delete_block_and_index_sync(block_id).await
    }

    pub async fn write_tx_and_index_sync(
        &mut self,
        tx_id: &TransactionId,
        tx: &Transaction,
        belong_to_block: Option<BlockId>,
    ) -> Result<(), BlockIndexError> {
        if self.data_manger.tx_exists_in_db(tx_id).await {
            warn!("tx({}) already exists in db", tx_id);
        }
        self.data_manger
            .write_tx_and_index_sync(tx_id, tx, belong_to_block)
            .await?;
        Ok(())
    }

    pub async fn delete_tx_and_index_from_db_and_cache(
        &mut self,
        tx_id: &TransactionId,
    ) -> Result<(), BlockIndexError> {
        self.data_manger.delete_tx_and_index_sync(tx_id).await?;
        Ok(())
    }
}
