use std::{collections::HashSet, path::PathBuf, sync::Arc, time::Duration};

use anyhow::{Context, anyhow};
use async_channel::{Receiver, TryRecvError};
use tokio::{
    fs,
    sync::{
        RwLock,
        mpsc::{self, UnboundedSender},
        oneshot,
    },
    time,
};
use tokio_util::sync::CancellationToken;
use tracing::{error, info};

use crate::{
    app::{
        FileChunkDownloadRequest, FileProcessResult, P2pCommand, Processor,
        ProvideFileChunkRequest, ProvideFileRequest, Service, make_chunk_file_name,
        store::{self, PendingDownloadRecord, PublishedFileRecord},
    },
    error::ServerError,
};

const LOG_TARGET: &str = "app::file_download_service";
const INTERVAL_CHECK_PENDING_DOWNLOAD_SECS: u64 = 5;
const CHUNK_DOWNLOAD_WORKER_WAIT_SECS: u64 = 1;

#[derive(Debug)]
pub struct FileChunkDownload {
    pub file_id: u64,
    pub chunk_id: usize,
    pub merkle_root: [u8; 32],
    pub merkle_proof: Vec<u8>,
}

impl FileChunkDownload {
    pub fn new(
        file_id: u64,
        chunk_id: usize,
        merkle_root: [u8; 32],
        merkle_proof: Vec<u8>,
    ) -> Self {
        Self {
            file_id,
            chunk_id,
            merkle_root,
            merkle_proof,
        }
    }
}

pub struct FileDownloadService<FileStore>
where
    FileStore: store::Store + Send + Sync + Clone + 'static,
{
    file_store: FileStore,
    worker_count_per_file: usize,
    downloads: Arc<RwLock<HashSet<u64>>>,
    command_sender: mpsc::Sender<P2pCommand>,
}

impl<FileStore> FileDownloadService<FileStore>
where
    FileStore: store::Store + Send + Sync + Clone + 'static,
{
    pub fn new(
        file_store: FileStore,
        worker_count_per_file: usize,
        command_sender: mpsc::Sender<P2pCommand>,
    ) -> Self {
        Self {
            file_store,
            worker_count_per_file,
            downloads: Arc::new(RwLock::new(HashSet::new())),
            command_sender,
        }
    }

    async fn file_chunk_download_worker(
        worker_id: usize,
        metadata: FileProcessResult,
        chunk_download_notify: UnboundedSender<usize>,
        chunk_download_receiver: Receiver<FileChunkDownload>,
        command_sender: mpsc::Sender<P2pCommand>,
        download_path: PathBuf,
    ) {
        info!(target: LOG_TARGET, "File chunk download worker {} started", worker_id);

        loop {
            // 尝试立即接收消息（非阻塞）
            let recv_result = chunk_download_receiver.try_recv();
            if let Err(TryRecvError::Empty) = recv_result {
                // 通道为空，等待一段时间后继续循环检查
                time::sleep(Duration::from_secs(CHUNK_DOWNLOAD_WORKER_WAIT_SECS)).await;
                continue;
            }
            if let Err(TryRecvError::Closed) = recv_result {
                // 通道关闭，退出循环
                break;
            }
            let chunk = recv_result.unwrap();
            let chunk_id = chunk.chunk_id;
            info!(target: LOG_TARGET, "File chunk download worker {} is downloading chunk {}", worker_id, chunk_id);
            let (result_sender, result_receiver) = oneshot::channel();
            if let Err(e) = command_sender
                .send(P2pCommand::RequestFileChunk {
                    request: FileChunkDownloadRequest {
                        file_id: chunk.file_id,
                        chunk_id,
                    },
                    result: result_sender,
                })
                .await
            {
                error!(target: LOG_TARGET, "Failed to send p2p command: {}", e);
                continue;
            };
            match result_receiver.await {
                Ok(data) => {
                    info!(target: LOG_TARGET, "File chunk download worker {} downloaded chunk {}", worker_id, chunk_id);

                    // verify chunk data
                    if !metadata.verify_chunk(chunk.chunk_id, &data) {
                        error!(target: LOG_TARGET, "chunk {} data verification failed", chunk.chunk_id);
                        continue;
                    }
                    info!(target: LOG_TARGET, "chunk {} data verification passed", chunk.chunk_id);
                    // save chunk to local file
                    let chunk_file_name = make_chunk_file_name(chunk.chunk_id);
                    let chunk_file_path = download_path.join(chunk_file_name);
                    if let Err(e) = fs::write(chunk_file_path, data).await {
                        error!(target: LOG_TARGET, "Failed to write chunk file: {}", e);
                    }
                }
                Err(e) => {
                    error!(target: LOG_TARGET, "File chunk download worker {} failed to download chunk {}: {}", worker_id, chunk_id, e);
                }
            }
            // TODO: error case: log the error and retry the operation (so possibly other peer will be asked in P2P service)
            // TODO: success case: save the file to downloads folder and send back success
            if let Err(e) = chunk_download_notify.send(chunk_id) {
                error!(target: LOG_TARGET, "Failed to send chunk id {} to notify channel: {}", chunk_id, e);
            }
        }

        info!(target: LOG_TARGET, "File chunk download worker {} finished", worker_id);
    }

    async fn download_file(
        record: PendingDownloadRecord,
        number_of_workers: usize,
        file_store: FileStore,
        command_sender: mpsc::Sender<P2pCommand>,
    ) -> anyhow::Result<()>
    where
        FileStore: store::Store + Send + Sync + 'static,
    {
        info!(target: LOG_TARGET, "File {} is not being downloaded, start downloading..", record.file_id);
        // load metadata to get the number of chunks
        let metadata = record.load_metadata().context("Failed to load metadata")?;
        info!(target: LOG_TARGET, "File {} has {} chunks", metadata.original_file_name.clone(), metadata.number_of_chunks);
        // create a channel that workers can use to download chunks
        // async_channel 类似于broadcast::channel，多生产者，多消费者模型，但每个消息只能由其中一个消费者消费
        let (chunk_download_sender, chunk_download_receiver) =
            async_channel::bounded::<FileChunkDownload>(100);
        // 每个worker下载一个chunk后，通知这里, 这里使用unbounded_channel，因为不需要缓存，所以不会阻塞
        let (chunk_download_notify_sender, mut chunk_download_notify_receiver) =
            mpsc::unbounded_channel::<usize>();

        // 因为有可能该文件已经下载过一部分/下载完了，所以需要知道哪些chunk已经下载过(避免重复下载)
        let downloaded_chunk_ids =
            file_store.already_downloaded_chunks_in_pending_download(record.file_id)?;

        // start N number of workers for this file download
        let number_of_workers =
            number_of_workers.min(metadata.number_of_chunks - downloaded_chunk_ids.len());
        for i in 0..number_of_workers {
            let chunk_download_notify_sender_clone = chunk_download_notify_sender.clone();
            let chunk_download_receiver_clone = chunk_download_receiver.clone();
            let command_sender_clone = command_sender.clone();
            let download_path_clone = record.download_path.clone();

            // TODO: 这里metadata clone操作耗费内存
            let metadata_clone = metadata.clone();
            tokio::spawn(async move {
                Self::file_chunk_download_worker(
                    i,
                    metadata_clone,
                    chunk_download_notify_sender_clone,
                    chunk_download_receiver_clone,
                    command_sender_clone,
                    download_path_clone,
                )
                .await;
            });
        }

        // 必须drop实例，因为下面的代码已经不在使用到了，否则发送/接收端会阻塞，一直不退出，导致死锁
        drop(chunk_download_notify_sender);
        drop(chunk_download_receiver);

        // iterate over chunks and send to channel (send the corresponding merkle proof root for validation)
        for chunk_id in 0..metadata.number_of_chunks {
            if downloaded_chunk_ids.contains(&chunk_id) {
                info!(target: LOG_TARGET, "File {} Chunk {} already downloaded, skip..", record.file_id, chunk_id);
                continue;
            }
            if let Some(merkle_proof) = metadata.merkle_proofs.get(&chunk_id) {
                if let Err(e) = chunk_download_sender
                    .send(FileChunkDownload::new(
                        record.file_id,
                        chunk_id,
                        metadata.merkle_root,
                        merkle_proof.clone(),
                    ))
                    .await
                {
                    error!(target: LOG_TARGET, "Error sending file chunk download: {}", e);
                    continue;
                }
                info!(target: LOG_TARGET, "File {} Chunk {} sent to download", record.file_id, chunk_id);
            }
        }
        // 数据已经发完了，要关闭channel(规范：谁发送，谁关闭，发送端关闭)
        chunk_download_sender.close();

        // waiting for result
        let mut downloaded_chunks_count = 0;
        while let Some(chunk_id) = chunk_download_notify_receiver.recv().await {
            info!(target: LOG_TARGET, "File chunk {} downloaded", chunk_id);
            // Adding pending download to local DB
            file_store.add_downloaded_chunk_to_pending_download(record.file_id, chunk_id)?;
            // trigger here to provide chunk on DHT
            if let Some(merkle_proof_hash) = metadata.merkle_proof_hash(chunk_id) {
                let (provide_sender, provide_receiver) = oneshot::channel();
                if let Err(e) = command_sender
                    .send(P2pCommand::ProvideFileChunk {
                        request: ProvideFileChunkRequest {
                            file_id: record.file_id,
                            chunk_id,
                            chunk_hash: merkle_proof_hash,
                        },
                        result: provide_sender,
                    })
                    .await
                {
                    error!(target: LOG_TARGET, "Failed to send p2p command: {}", e);
                    continue;
                }
                if !provide_receiver.await? {
                    return Err(anyhow!("Failed to provide file chunk!"));
                }
            }

            downloaded_chunks_count += 1;
            if downloaded_chunks_count == metadata.number_of_chunks - downloaded_chunk_ids.len() {
                info!(target: LOG_TARGET, "File {} downloaded", record.file_id);
                break;
            }
        }

        // process downloaded file
        let processor = Processor::new();
        processor
            .process_download_file(record.download_path.clone(), &metadata)
            .await?;

        // adding file as published
        let file_id = record.file_id;
        let original_file_name = record.original_file_name;
        let chunks_directory = record.download_path;
        let public = metadata.public;
        file_store.add_published_file_record({
            PublishedFileRecord {
                file_id,
                original_file_name,
                chunks_directory,
                public,
            }
        })?;

        file_store.remove_pending_download(file_id)?;

        // start providing file on DHT
        let (provide_command_tx, provide_command_rx) = oneshot::channel::<bool>();
        command_sender
            .send(P2pCommand::ProvideFile {
                request: ProvideFileRequest { file_id, metadata },
                result: provide_command_tx,
            })
            .await?;
        if !provide_command_rx.await? {
            return Err(anyhow!("Failed to provide file!"));
        }

        Ok(())
    }

    async fn check_file_downloaded(&self, file_id: u64) -> bool {
        let downloads = self.downloads.read().await;
        if !downloads.contains(&file_id) {
            return false;
        }
        drop(downloads);

        let mut downloads = self.downloads.write().await;
        if !downloads.contains(&file_id) {
            downloads.insert(file_id);
        }
        drop(downloads);
        true
    }

    async fn check_pending_downloads(&self) {
        info!(target: LOG_TARGET, "Checking pending downloads...");
        match self.file_store.fetch_all_pending_downloads() {
            Ok(pending_download_records) => {
                for pending_download_record in pending_download_records {
                    info!(target: LOG_TARGET, "Still waiting for {:?} to be downloading..", pending_download_record);
                    if self
                        .check_file_downloaded(pending_download_record.file_id)
                        .await
                    {
                        // TODO: somehow check how many chunks we have been downloaded, so if it is successful, we can remove from pending list
                        info!(target: LOG_TARGET, "File {} is already being downloaded, skip..", pending_download_record.file_id);
                        continue;
                    }
                    let pending_download_record_clone = pending_download_record.clone();
                    let downloads_lock_clone = self.downloads.clone();
                    let worker_count_per_file = self.worker_count_per_file;
                    let file_store_clone = self.file_store.clone();
                    let command_sender_clone = self.command_sender.clone();
                    tokio::spawn(async move {
                        let file_id = pending_download_record_clone.file_id;
                        if let Err(e) = Self::download_file(
                            pending_download_record_clone,
                            worker_count_per_file,
                            file_store_clone,
                            command_sender_clone,
                        )
                        .await
                        {
                            error!(target: LOG_TARGET, "Error starting file download: {:?}", e);
                            downloads_lock_clone.write().await.remove(&file_id);
                        }
                    });
                }
            }
            Err(e) => {
                error!(target: LOG_TARGET, "Error checking pending downloads: {}", e);
            }
        }
    }
}

#[async_trait::async_trait]
impl<FileStore> Service for FileDownloadService<FileStore>
where
    FileStore: store::Store + Send + Sync + Clone + 'static,
{
    async fn start(&mut self, cancel_token: CancellationToken) -> Result<(), ServerError> {
        let mut interval =
            time::interval(Duration::from_secs(INTERVAL_CHECK_PENDING_DOWNLOAD_SECS));
        loop {
            tokio::select! {
                _ = interval.tick() => {
                    self.check_pending_downloads().await;
                }
                _ = cancel_token.cancelled() => {
                    info!(target: LOG_TARGET, "File download service shutting down...");
                    break;
                }
            }
        }

        Ok(())
    }
}
