use crate::{
    app::{
        FileProcessResult, PublishedFile, PublishedFileChunk, make_chunk_file_name,
        make_chunks_directory_name, make_file_metadata_path_name,
    },
    pb::PublishFileRequest,
};
use anyhow::{Context, anyhow};
use rs_merkle::{Hasher as _, MerkleProof, MerkleTree, algorithms::Sha256};
use rs_sha256::Sha256Hasher;
use std::{
    collections::HashMap,
    hash::{Hash, Hasher as _},
    path::{Path, PathBuf},
};
use tokio::{
    fs::{self, File},
    io::{AsyncReadExt, AsyncWriteExt, BufReader},
};
use tonic::Status;
use tracing::info;

const BUFFER_SIZE: usize = 1024; // 1KB
const CHUNK_SIZE: usize = 1024 * 1024; // 1MB

const LOG_TARGET: &str = "app::file_processor";

impl FileProcessResult {
    pub fn key_value(&self) -> Result<(Vec<u8>, Vec<u8>), serde_cbor::Error> {
        let key = self.key();
        let small: PublishedFile = self.into();
        let value = serde_cbor::to_vec(&small)?;
        Ok((key, value))
    }

    pub fn hash_id(&self) -> u64 {
        let mut hashser = Sha256Hasher::default();
        self.hash(&mut hashser);
        hashser.finish()
    }

    pub fn key(&self) -> Vec<u8> {
        let id = self.hash_id();
        id.to_be_bytes().to_vec()
    }

    pub fn merkle_proof_hash(&self, chunk_id: usize) -> Option<u64> {
        let proof = self.merkle_proofs.get(&chunk_id)?;
        let mut hashser = Sha256Hasher::default();
        proof.hash(&mut hashser);
        Some(hashser.finish())
    }

    #[allow(clippy::type_complexity)]
    pub fn chunk_key_value(
        &self,
        chunk_id: usize,
    ) -> Result<Option<(Vec<u8>, Vec<u8>)>, serde_cbor::Error> {
        if let Some(key) = self.merkle_proof_hash(chunk_id) {
            let value = serde_cbor::to_vec(&PublishedFileChunk::new(chunk_id))?;
            return Ok(Some((key.to_be_bytes().to_vec(), value)));
        }
        Ok(None)
    }

    pub fn verify_chunk(&self, chunk_id: usize, chunk_data: impl AsRef<[u8]>) -> bool {
        if let Some(proof) = self.merkle_proofs.get(&chunk_id) {
            let proof = MerkleProof::<Sha256>::from_bytes(proof.as_slice()).unwrap();
            let chunk_hash = Sha256::hash(chunk_data.as_ref());
            return proof.verify(
                self.merkle_root,
                &[chunk_id],
                &[chunk_hash],
                self.number_of_chunks,
            );
        }
        false
    }
}

pub fn file_chunk_value(chunk_id: usize) -> Result<Vec<u8>, serde_cbor::Error> {
    serde_cbor::to_vec(&PublishedFileChunk::new(chunk_id))
}

#[derive(Debug, Default)]
pub struct Processor {}

impl Processor {
    pub fn new() -> Self {
        Self::default()
    }

    pub async fn process_file(
        &self,
        request: &PublishFileRequest,
    ) -> Result<FileProcessResult, Status> {
        let file_path = &request.file_path;
        if !self.file_exists(file_path).await? {
            return Err(Status::invalid_argument("file does not exist"));
        }

        // creating chunks directory
        let file_name = self.file_name(file_path)?;
        let chunks_directory = self.create_chunks_dir(&file_name, file_path).await?;

        // 注意: 栈中的buffer分配不能太大，否则会溢出错误，堆分配可以很大
        let mut buffer = [0; BUFFER_SIZE];
        let mut reader = BufReader::new(self.read_file(file_path).await?);
        let mut chunk_number = 0;
        let mut merkle_tree = MerkleTree::<Sha256>::new();
        loop {
            let (chunk, n) = self.read_chunk(&mut reader, &mut buffer).await?;

            merkle_tree.insert(Sha256::hash(chunk.as_slice()));

            self.write_chunk_file(chunk_number, chunk, &chunks_directory)
                .await?;

            chunk_number += 1;

            if n == 0 {
                break;
            }
        }

        merkle_tree.commit();
        let merkle_root = merkle_tree
            .root()
            .ok_or(Status::internal("merkle tree root is none"))?;

        let mut merkle_proofs = HashMap::with_capacity(chunk_number + 1);
        for i in 0..chunk_number {
            let proof = merkle_tree.proof(&[i]);
            merkle_proofs.insert(i, proof.to_bytes());
        }

        let result = FileProcessResult {
            original_file_name: file_name,
            number_of_chunks: chunk_number,
            chunks_directory,
            merkle_root,
            merkle_proofs,
            public: request.public,
        };

        // save result to chunks dir
        self.save_file_process_result(&result).await?;

        Ok(result)
    }

    /// 处理下载文件, 将chunk文件合并为一个文件
    pub async fn process_download_file(
        &self,
        download_dir: impl AsRef<Path>,
        metadata: &FileProcessResult,
    ) -> anyhow::Result<()> {
        let result_file_path = download_dir.as_ref().join(&metadata.original_file_name);
        let mut merkle_tree = MerkleTree::<Sha256>::new();
        let mut result_file = File::create(result_file_path).await?;
        for chunk_id in 0..metadata.number_of_chunks {
            let chunk_file_path = download_dir.as_ref().join(make_chunk_file_name(chunk_id));
            let chunk_data = fs::read(chunk_file_path)
                .await
                .context(format!("failed to read chunk file: {}", chunk_id))?;
            merkle_tree.insert(Sha256::hash(&chunk_data));
            result_file.write_all(&chunk_data).await?;
        }
        result_file.flush().await?;

        // finalizing merkle tree
        merkle_tree.commit();
        let Some(current_merkle_root) = merkle_tree.root() else {
            return Err(anyhow!("merkle tree root is none"));
        };
        if current_merkle_root != metadata.merkle_root {
            return Err(anyhow!("merkle tree root not match"));
        }
        Ok(())
    }

    async fn save_file_process_result(&self, result: &FileProcessResult) -> Result<(), Status> {
        let result_file =
            std::fs::File::create(make_file_metadata_path_name(&result.chunks_directory))
                .map_err(|e| Status::internal(format!("cannot create file: {}", e)))?;
        serde_cbor::to_writer(result_file, result)
            .map_err(|e| Status::internal(format!("cannot write file: {}", e)))?;
        Ok(())
    }

    // reading file in chunks
    async fn read_file(&self, file_path: impl Into<String>) -> Result<File, Status> {
        let file = File::open(file_path.into())
            .await
            .map_err(|e| Status::internal(format!("cannot open file: {}", e)))?;
        Ok(file)
    }

    async fn write_chunk_file(
        &self,
        chunk_number: usize,
        chunk: impl AsRef<[u8]>,
        chunks_directory: impl AsRef<Path>,
    ) -> Result<(), Status> {
        let chunk_file_name = chunks_directory
            .as_ref()
            .join(make_chunk_file_name(chunk_number));
        fs::write(chunk_file_name, chunk)
            .await
            .map_err(|e| Status::internal(format!("failed to write chunk file: {}", e)))?;
        Ok(())
    }

    async fn read_chunk(
        &self,
        reader: &mut BufReader<File>,
        buffer: &mut [u8; BUFFER_SIZE],
    ) -> Result<(Vec<u8>, usize), Status> {
        let mut to_write = Vec::<u8>::with_capacity(CHUNK_SIZE);
        let mut n = 0;

        // 退出条件: to_write 被写满了或 读取到文件末尾(n == 0)
        // 读到文件末尾说明已经读完了
        while to_write.len() < CHUNK_SIZE {
            n = reader
                .read(buffer)
                .await
                .map_err(|e| Status::internal(format!("failed to read file: {}", e)))?;
            // 注意：使用n截断多余的0，否则多余的0会造成内容错误
            to_write.append(&mut buffer[0..n].to_vec());

            *buffer = [0; BUFFER_SIZE];
            if n == 0 {
                break;
            }
        }
        Ok((to_write, n))
    }

    async fn file_exists(&self, file_path: impl Into<String>) -> Result<bool, Status> {
        let metadata = fs::metadata(file_path.into())
            .await
            .map_err(|e| Status::invalid_argument(format!("cannot read file metadata: {}", e)))?;
        Ok(metadata.is_file())
    }

    async fn create_chunks_dir(
        &self,
        file_name: &str,
        file_path: impl Into<String>,
    ) -> Result<PathBuf, Status> {
        let file_path = PathBuf::from(file_path.into());
        let containing_dir = file_path.parent().ok_or(Status::invalid_argument(
            "cannot get file's parent directory!",
        ))?;

        let pieces_dir = make_chunks_directory_name(containing_dir, file_name);

        info!(target: LOG_TARGET, "chunks dir: {:?}", pieces_dir.as_path());
        let _ = fs::remove_dir_all(pieces_dir.clone()).await;
        fs::create_dir_all(pieces_dir.clone()).await.map_err(|e| {
            Status::internal(format!(
                "failed to create chunks directory for the file: {}",
                e
            ))
        })?;

        Ok(pieces_dir)
    }

    fn file_name(&self, file_path: impl Into<String>) -> Result<String, Status> {
        let file_path = PathBuf::from(file_path.into());
        let file_name = file_path
            .file_name()
            .ok_or(Status::internal("cannot get file's name!"))?
            .to_string_lossy();
        Ok(file_name.to_string())
    }
}

#[cfg(test)]
mod tests {
    use anyhow::Result;

    use super::*;

    #[tokio::test]
    async fn test_merkle_tree_proof() -> Result<()> {
        let processor = Processor::new();
        let result = processor
            .process_file(&PublishFileRequest {
                file_path: "./fixtures/word.txt".to_string(),
                public: true,
            })
            .await?;

        for i in 0..result.number_of_chunks {
            let chunk_content =
                fs::read(result.chunks_directory.join(make_chunk_file_name(i))).await?;
            let valid = result.verify_chunk(i, &chunk_content);
            assert!(valid);
        }
        dbg!(&result.chunks_directory);
        fs::remove_dir_all(result.chunks_directory).await?;

        Ok(())
    }
}
