pub mod read;
pub mod dedup;
pub mod delta;
pub mod write;
pub mod source;
pub mod error;

use read::*;
use dedup::*;
use delta::*;
use write::*;
pub use source::*;
pub use error::*;

use crate::common::*;

pub enum BackupEvaluation {
    Dataset,
    Algorithm,
    Time,
    Space,
    Throughout,
    Other,
}

impl Into<usize> for BackupEvaluation {
    fn into(self) -> usize {
        self as usize
    }
}

#[derive(Debug, Clone)]
pub struct SeqFileInfo {
    pub file_id:usize, 
    pub file_info:FileInfo,
}

#[derive(Debug, Clone)]
pub struct SeqChunk {
    pub file_id:usize, 
    pub chunk_id:usize,
    pub chunk:Chunk,
}

#[derive(Debug, Clone)]
pub struct SeqChunkPointer {
    pub file_id:usize, 
    pub chunk_id:usize,
    // pub chunk_pointer:..
}

#[derive(Debug, Clone)]
pub enum WriteItem {
    File(SeqFileInfo),
    Chunk(SeqChunk),
    ChunkPointer(SeqChunkPointer),
}

impl WriteItem {
    pub fn file_id(&self) -> usize {
        match self {
            Self::File(SeqFileInfo { file_id, .. })=> { *file_id },
            Self::Chunk(SeqChunk { file_id, .. })=> { *file_id },
            Self::ChunkPointer(SeqChunkPointer{ file_id, .. })=> { *file_id },
        }
    }

    pub fn chunk_unwrap(self) -> SeqChunk {
        if let Self::Chunk(chunk) = self { return chunk; }
        else { unreachable!() }
    }
}

fn test_dedup_process(
    index:usize,
    input:Vec<&'static str>,
    eva:&mut EvaluationInner,
) {
    use config::File;
    use std::sync::mpsc;
    use std::time::{Instant};
    use rayon::*;
    use std::ops::AddAssign;
    
    let path_vec = input;
    
    let (split_tx, split_rx) = mpsc::channel();
    let (dedup_tx, dedup_rx) = mpsc::channel();
    let (write_tx, write_rx) = mpsc::channel();
    
    let config = config::Config::builder()
        .add_source(File::with_name("./chunk_io.toml"))
        .build()
        .unwrap();
    let source = SourcePool::from_config(config).unwrap();
    
    let mut reader = ReaderConfig::from_source(&source).unwrap().build();
    let mut splitor = SplitConfig::from_source(&source).unwrap().build();
    let hasher = ChunkHasher::from_source(&source);
    let mut dedup = ChunkDeduplicator::from_source(&source);
    let mut writer = ChunkWriter::from_source(&source);

    let start_time = Instant::now();

    let mut data_size = 0;
    let mut read_time = 0;
    let mut split_time = 0;
    let mut dedup_time = 0;

    let data_size_p = &mut data_size;
    let read_time_p = &mut read_time;
    let split_time_p = &mut split_time;
    let dedup_time_p = &mut dedup_time;
    scope(|s|{
        let hasher = &hasher;
        let write_tx2 = write_tx.clone();
        // let dedup_tx = &dedup_tx;
        s.spawn(move |_|{
            let produce_file_block = |b| {
                split_tx.send(b).expect("read send error");
            };
            
            for &path in path_vec.iter() {
                reader.read_path(path.to_string(), produce_file_block).expect("read error");
            }
            // println!("read time:{} us", reader.get_read_time());
            // println!("read size:{} B", reader.get_read_size());
            data_size_p.add_assign(reader.get_read_size() as u64);
            read_time_p.add_assign(reader.get_read_time());
        });
        s.spawn(move |s|{
            let dedup_tx = dedup_tx;
            let dedup_tx_r = &dedup_tx;
            rayon::scope(move |s1|{
                let dedup_chunk = |mut chunk:SeqChunk| {
                    hasher.hash_chunk(&mut chunk.chunk);
                    dedup_tx_r.send(chunk).unwrap();
                };
                let produce_write_item = |w| {
                    match w {
                        WriteItem::File(_) => { write_tx.send(w).unwrap();},
                        WriteItem::Chunk(mut chunk) => { 
                            // let once_tx = dedup_tx.clone();
                            s1.spawn(move |_|dedup_chunk(chunk)); 
                        },
                        WriteItem::ChunkPointer(_) => { panic!("你是怎么出现在这个地方的") }, 
                    };
                };
                
                for b in split_rx.iter() {
                    splitor.split_file_block(b, produce_write_item).expect("split error");
                }
                // println!("split time:{} us", splitor.get_split_time());
                split_time_p.add_assign(splitor.get_split_time());
            });
        });
        s.spawn(move|_|{
            let produce_write_item = |w| {
                write_tx2.send(w).unwrap();
            };
            for chunk in dedup_rx.iter() {
                dedup.search_chunk(chunk, produce_write_item).expect("dedup error");
            }
            // println!("dedup time:{} us", dedup.get_dedup_time());
            dedup_time_p.add_assign(dedup.get_dedup_time());
        });
        s.spawn(move |_|{
            for w in write_rx.iter() {
                writer.write(w).expect("writer_error");
            }
            // println!("write end");
        });
    });
    let exec_time = start_time.elapsed().as_micros();
    // println!("exec Time:{}", exe_time);
    eva.set("fingerprint", source.get_config().get_string("fingerprint").unwrap());
    eva.set("dataset", index as u8);
    eva.set("schedule", "avg");
    eva.set("data size(B)", data_size);
    eva.set("read time(us)", read_time);
    eva.set("split time(us)", split_time);
    // eva.set("hash time(us)", hash_time);
    eva.set("dedup time(us)", dedup_time);
    eva.set("exec time(us)", exec_time);
}

fn test_dedup_process_stage(
    index:usize,
    input:Vec<&'static str>,
    eva:&mut EvaluationInner,
) {
    use config::File;
    use std::sync::mpsc;
    use std::thread;
    use std::time::{Duration, Instant};
    use std::sync::Arc;
    use parking_lot::Mutex;

    let path_vec = input;

    // 创建通道（主线程不持有任何发送端）
    let (split_tx, split_rx) = mpsc::channel();
    let (hash_tx, hash_rx) = mpsc::channel();
    let (dedup_tx, dedup_rx) = mpsc::channel();
    let (write_tx, write_rx) = mpsc::channel();

    let config = config::Config::builder()
        .add_source(File::with_name("./chunk_io.toml"))
        .build()
        .unwrap();
    let source = SourcePool::from_config(config).unwrap();

    let mut reader = ReaderConfig::from_source(&source).unwrap().build();
    let mut splitor = SplitConfig::from_source(&source).unwrap().build();
    let hasher = ChunkHasher::from_source(&source);
    let mut dedup = ChunkDeduplicator::from_source(&source);
    let mut writer = ChunkWriter::from_source(&source);

    let start_time = Instant::now();

    let data_size = Arc::new(Mutex::new(0));
    let read_time = Arc::new(Mutex::new(0));
    let split_time = Arc::new(Mutex::new(0));
    let hash_time = Arc::new(Mutex::new(0));
    let dedup_time = Arc::new(Mutex::new(0));

    let data_size_p = data_size.clone();
    let read_time_p = read_time.clone();
    let split_time_p = split_time.clone();
    let hash_time_p = hash_time.clone();
    let mut dedup_time_p = dedup_time.clone();


    // Reader线程（唯一持有split_tx）
    let reader_handle = thread::spawn(move || {
        let produce_file_block = |b| split_tx.send(b).expect("Failed to send file block");
        
        for path in path_vec {
            reader.read_path(path.to_string(), produce_file_block)
                .expect("File reading failed");
        }
        // println!("[READER] Time: {} μs", reader.get_read_time());
        *data_size_p.clone().lock() = reader.get_read_size() as u64;
        *read_time_p.lock() = reader.get_read_time();
        // split_tx在此被自动drop
    });

    // Splitor线程（持有split_rx和临时clone的发送端）
    let splitor_handle = {
        let hash_tx = hash_tx.clone();
        let write_tx = write_tx.clone();
        thread::spawn(move || {
            for b in split_rx.iter() {
                splitor.split_file_block(b, |w| match w {
                    WriteItem::File(f) => write_tx.send(WriteItem::File(f)).unwrap(),
                    WriteItem::Chunk(c) => hash_tx.send(c).unwrap(),
                    WriteItem::ChunkPointer(_) => panic!("Unexpected ChunkPointer"),
                }).expect("Block splitting failed");
            }
            // println!("[SPLITOR] Time: {} μs", splitor.get_split_time());
            *split_time_p.lock() = splitor.get_split_time();
            // hash_tx和write_tx在此被drop
        })
    };

    // Hasher线程（持有hash_rx和临时clone的dedup_tx）
    let hasher_handle = {
        let dedup_tx = dedup_tx.clone();
        thread::spawn(move || {
            let mut hash_time_cnt = Duration::ZERO;
            let mut chunk_count = 0u64;

            for mut chunk in hash_rx.iter() {
                let start_time = Instant::now();
                hasher.hash_chunk(&mut chunk.chunk);
                hash_time_cnt += start_time.elapsed();
                chunk_count += 1;
                dedup_tx.send(chunk).expect("Failed to send hashed chunk");
            }

            // println!("[HASHER] Total: {:.3}ms | Chunks: {} | Avg: {:.3}μs/chunk", 
            //     total_duration.as_secs_f64() * 1000.0, 
            //     chunk_count,
            //     total_duration.as_secs_f64() * 1_000_000.0 / chunk_count.max(1) as f64);
            *hash_time_p.lock() = hash_time_cnt.as_micros();
            // dedup_tx在此被drop
        })
    };

    // Dedup线程（持有dedup_rx和临时clone的write_tx）
    let dedup_handle = {
        let write_tx = write_tx.clone();
        thread::spawn(move || {
            for chunk in dedup_rx.iter() {
                dedup.search_chunk(chunk, |w| write_tx.send(w).unwrap())
                    .expect("Deduplication failed");
            }
            // println!("[DEDUP] Time: {} μs", dedup.get_dedup_time());
            *dedup_time_p.lock() = dedup.get_dedup_time();
            // write_tx在此被drop
        })
    };

    // Writer线程（唯一持有write_rx）
    let writer_handle = thread::spawn(move || {
        for w in write_rx.iter() {
            writer.write(w).expect("Data writing failed");
        }
        // println!("[WRITER] Completed");
    });

    // 主线程显式释放原始发送端（关键修改点！）
    drop(hash_tx);
    drop(dedup_tx);
    drop(write_tx);

    // 等待所有线程
    reader_handle.join().unwrap();
    splitor_handle.join().unwrap();
    hasher_handle.join().unwrap();
    dedup_handle.join().unwrap();
    writer_handle.join().unwrap();
    let exec_time = start_time.elapsed().as_micros();
    // println!("[EXE] Time:{}", exec_time);
    eva.set("fingerprint", source.get_config().get_string("fingerprint").unwrap());
    eva.set("dataset", index as u8);
    eva.set("schedule", "fixed");
    eva.set("data size(B)", *data_size.lock());
    eva.set("read time(us)", *read_time.lock());
    eva.set("split time(us)", *split_time.lock());
    eva.set("hash time(us)", *hash_time.lock());
    eva.set("dedup time(us)", *dedup_time.lock());
    eva.set("exec time(us)", exec_time);
}

fn clean_index() {
    let status = std::process::Command::new("sh")
        .arg("./clean.sh")
        .status()
        .expect("Failed to execute script");
    if status.success() {
        println!("Script executed successfully");
    } else {
        eprintln!("Script exited with code: {:?}", status.code());
    }
}

fn rayon_test_dedup_process(
    index:usize,
    input:Vec<&'static str>,
    eva:Evaluation,
) -> Evaluation
{
    use config::File;
    use crate::pipeline::*;
    let config = config::Config::builder()
        .add_source(File::with_name("./chunk_io.toml"))
        .build()
        .unwrap();
    let source = SourcePool::from_config(config).unwrap();

    let reader = ReaderConfig::from_source(&source).unwrap().build();
    let splitor = SplitConfig::from_source(&source).unwrap().build();
    let hasher = ChunkHasher::from_source(&source);
    let dedup = ChunkDeduplicator::from_source(&source);
    let writer = ChunkWriter::from_source(&source);
    let pool = rayon::ThreadPoolBuilder::new().num_threads(6).build().unwrap();
    let now = std::time::Instant::now();
    pool.scope(|s|{
        rayon_impl::from_scope_and_iter(s, input)
            .pipelined_mut(reader.eva_wrap(eva.clone()))
            .pipelined_mut(splitor.eva_wrap(eva.clone()))
            .pipelined_immut(hasher)
            .pipelined_mut(dedup.eva_wrap(eva.clone()))
            .pipelined_mut(writer.eva_wrap(eva.clone()))
            .run();
    });
    let exec_time = now.elapsed().as_micros();
    eva.lock().register_and_set(BackupEvaluation::Time, "exec time(us)", exec_time);
    eva
}

#[test]
fn test_rayon() {
    use std::sync::Arc;
    use parking_lot::Mutex;
    use std::fs::OpenOptions;
    use std::io::Write;
    let eva_inner = EvaluationInner::new();
    let path_vec = vec![
        "../linux_dir/src/linux-5.0.1",
        "../linux_dir/src/linux-5.0.2",
        "../linux_dir/src/linux-5.0.3",
        "../linux_dir/src/linux-5.0.4",
        "../linux_dir/src/linux-5.0.5",
    ];

    clean_index();
    let eva = Arc::new(Mutex::new(eva_inner));
    let eva = rayon_test_dedup_process(5, path_vec, eva);
    let eva = eva.lock().clone();
    
    let mut file = OpenOptions::new()
        .create(true)
        .write(true)
        .append(true)
        .open("./exec_result.csv")
        .unwrap();
    let file_size = file.metadata().unwrap().len();
    if file_size == 0 {
        let csv_name = eva.csv_name_line(true);
        file.write(csv_name.as_bytes()).unwrap();
    }
    file.write(eva.csv_value_line(true).as_bytes()).unwrap();
    clean_index();
}

#[test]
fn test_two_method() {
    use crate::common::*;
    use std::fs::OpenOptions;
    use std::io::Write;
    let mut eva_origin = EvaluationInner::new();
    eva_origin.register(BackupEvaluation::Algorithm, "fingerprint");
    eva_origin.register(BackupEvaluation::Dataset, "dataset");
    eva_origin.register(BackupEvaluation::Algorithm, "schedule");
    eva_origin.register(BackupEvaluation::Time, "data size(B)");
    eva_origin.register(BackupEvaluation::Time, "read time(us)");
    eva_origin.register(BackupEvaluation::Time, "split time(us)");
    eva_origin.register(BackupEvaluation::Time, "hash time(us)");
    eva_origin.register(BackupEvaluation::Time, "dedup time(us)");
    eva_origin.register(BackupEvaluation::Time, "exec time(us)");

    let mut improve_origin = EvaluationInner::new();
    improve_origin.register(BackupEvaluation::Algorithm, "fingerprint");
    improve_origin.register(BackupEvaluation::Dataset, "dataset");
    improve_origin.register(BackupEvaluation::Space, "data size(B)");
    improve_origin.register(BackupEvaluation::Other, "limit");
    improve_origin.register(BackupEvaluation::Other, "improve");


    let path_vec = vec![
        "../linux_dir/src/linux-5.0.1",
        "../linux_dir/src/linux-5.0.2",
        "../linux_dir/src/linux-5.0.3",
        "../linux_dir/src/linux-5.0.4",
        "../linux_dir/src/linux-5.0.5",
    ];

    let mut file = OpenOptions::new()
        .create(true)
        .write(true)
        .append(true)
        .open("./exec_result.csv")
        .unwrap();

    let file_size = file.metadata().unwrap().len();
    if file_size == 0 {
        let csv_name = eva_origin.csv_name_line(true);
        file.write(csv_name.as_bytes()).unwrap();
    }

    let mut improve_file = OpenOptions::new()
        .create(true)
        .write(true)
        .append(true)
        .open("./improve.csv")
        .unwrap();

    let improve_file_size = improve_file.metadata().unwrap().len();
    if improve_file_size == 0 {
        let csv_name = improve_origin.csv_name_line(true);
        improve_file.write(csv_name.as_bytes()).unwrap();
    }

    for i in 1..=path_vec.len() {
        let mut eva1 = eva_origin.clone();
        let mut eva2 = eva_origin.clone();
        let mut improve_eva= improve_origin.clone();
        clean_index();
        test_dedup_process(i, path_vec[..i].to_vec(), &mut eva1);
        clean_index();
        test_dedup_process_stage(i, path_vec[..i].to_vec(), &mut eva2);
        let hash_time:u128 = eva2.get("hash time(us)").unwrap().try_into().unwrap();
        let exec_time1:u128 = eva1.get("exec time(us)").unwrap().try_into().unwrap();
        let exec_time2:u128 = eva2.get("exec time(us)").unwrap().try_into().unwrap();
        let limit = hash_time as f64 / exec_time2 as f64;
        let improve = exec_time2 as f64 / exec_time1 as f64;
        improve_eva.set("fingerprint", eva1.get("fingerprint").unwrap());
        improve_eva.set("dataset", eva1.get("dataset").unwrap());
        improve_eva.set("data size(B)", eva1.get("data size(B)").unwrap());
        improve_eva.set("limit", limit);
        improve_eva.set("improve", improve);
        file.write(eva1.csv_value_line(true).as_bytes()).unwrap();
        file.write(eva2.csv_value_line(true).as_bytes()).unwrap();
        improve_file.write(improve_eva.csv_value_line(true).as_bytes()).unwrap();
    }
    clean_index();
}