use std::cmp::{max, min};
use std::collections::HashMap;
use std::ops::Range;
use std::sync::{Arc, Mutex, MutexGuard, Weak};
use std::sync::atomic::{AtomicU32, Ordering};
use std::sync::mpsc::Sender as StdSender;
use std::time::{Duration, Instant};

use egui::{UserAttentionType, ViewportCommand};
use lazy_static::lazy_static;
use regex::{Regex, RegexBuilder};
use sysinfo::{Pid, ProcessRefreshKind, System};
use tokio::runtime::Runtime;
use tokio::sync::watch::{Receiver, Sender};
use tokio::task::JoinHandle;

use crate::raw::{AllLog, DRAIN_LOG_SIZE, Log, MAXIMUM_LOG_SIZE, RawCtlSender, RawEventFromUI, SessionMetaData, start_collect_log};

type TaskOwnerPtr = Arc<Mutex<FilterTaskOwner>>;

#[derive(Debug, Clone, Copy, PartialEq)]
pub enum KeywordType {
    Process,
    Tag,
    Message,
}

enum MatchResult {
    Match,
    NotMatch,
    Skip,
}

#[derive(Debug, Clone)]
pub enum FilterRequirement {
    Keyword(KeywordRequirement),
    LogLevel([bool; 6]),
}

#[derive(Debug, Clone)]
pub struct KeywordRequirement {
    pub keyword_type: KeywordType,
    pub text: String,
    pub is_case_insensitive: bool,
    pub is_whole_word: bool,
    pub is_exclude: bool,
    regex: Option<Regex>,
}

impl KeywordRequirement {
    pub fn new(keyword_type: KeywordType, keyword: String, is_case_insensitive: bool, is_whole_word: bool, is_exclude: bool) -> Self {
        if is_case_insensitive || is_whole_word {
            let regex_keyword= if is_whole_word {
                format!(r"\b{}\b", keyword)
            } else {
                keyword
            };
            let mut requirement = Self {
                keyword_type,
                text: regex_keyword,
                is_case_insensitive,
                is_whole_word,
                is_exclude,
                regex: None,
            };
            requirement.regex = Some(RegexBuilder::new(&requirement.text).case_insensitive(is_case_insensitive).build().unwrap());
            requirement
        } else {
            Self {
                keyword_type,
                text: keyword,
                is_case_insensitive,
                is_whole_word,
                is_exclude,
                regex: None,
            }
        }
    }
}

impl PartialEq for FilterRequirement {
    fn eq(&self, other: &Self) -> bool {
        match (self, other) {
            (FilterRequirement::Keyword(keyword_this), FilterRequirement::Keyword(keyword_that)) => {
                keyword_this.keyword_type == keyword_this.keyword_type &&
                    keyword_this.is_exclude == keyword_that.is_exclude &&
                    keyword_this.is_case_insensitive == keyword_that.is_case_insensitive &&
                    keyword_this.is_whole_word == keyword_that.is_whole_word &&
                    keyword_this.text == keyword_that.text
            }
            (FilterRequirement::LogLevel(level_this), FilterRequirement::LogLevel(level_that)) => {
                level_this == level_that
            }
            _ => { false }
        }
    }
}

impl FilterRequirement {
    fn is_match(&self, log: &Log) -> MatchResult {
        match self {
            FilterRequirement::Keyword(keyword) => {
                if keyword.text.is_empty() {
                    return MatchResult::Skip;
                }
                let to_be_match = match keyword.keyword_type {
                    KeywordType::Process => log.get_process(),
                    KeywordType::Tag => log.get_tag(),
                    KeywordType::Message => log.get_content(),
                };
                let original_matched = if let Some(regex) = keyword.regex.as_ref() {
                    regex.is_match(to_be_match)
                } else {
                    to_be_match.contains(keyword.text.as_str())
                };
                return if keyword.is_exclude {
                    if original_matched {
                        MatchResult::NotMatch
                    } else {
                        MatchResult::Match
                    }
                } else {
                    if original_matched {
                        MatchResult::Match
                    } else {
                        MatchResult::Skip
                    }
                }
            }
            FilterRequirement::LogLevel(level_allowed) => {
                if level_allowed[log.get_level() as usize] { MatchResult::Match } else { MatchResult::NotMatch }
            }
        }
    }
}

impl Eq for FilterRequirement {}

#[derive(Copy, Clone)]
struct FilterStatistics {
    log_size: usize,
    task_size: usize,
    start_time: Instant,
}

pub enum MainEvent {
    NewSession(LogFilter),
    EnvironmentBroken,
}

pub struct LogFilter {
    filter_task: TaskOwnerPtr,
    pub session_id: i32,
    pub all_ui_log: AllLog,
}

impl Clone for LogFilter {
    fn clone(&self) -> Self {
        let filter_task_cloned = self.filter_task.clone();
        filter_task_cloned.lock().unwrap().shared_count += 1;
        LogFilter {
            filter_task: filter_task_cloned,
            session_id: self.session_id,
            all_ui_log: self.all_ui_log.clone(),
        }
    }
}

pub struct LogOneFrame {
    log: AllLog,
    start_id: usize,
}

impl LogOneFrame {
    pub fn acquire_current_log<F>(&self, mut f: F)
    where F: FnMut(&[Arc<Log>])
    {
        let locked_log = self.log.lock().unwrap();
        let start_index = locked_log.first().map_or(0, |log| self.start_id.saturating_sub(log.id));
        let length = locked_log.len();
        f(&locked_log[start_index..length])
    }
}

impl LogFilter {
    pub fn get_log_one_frame(&self) -> LogOneFrame {
        let filter_locked = self.filter_task.lock().unwrap();
        if filter_locked.requirement_list.is_empty() {
            LogOneFrame {
                log: filter_locked.all_log.clone(),
                start_id: filter_locked.scan_id_area.start,
            }
        } else {
            LogOneFrame {
                log: filter_locked.all_ui_log.clone(),
                start_id: 0,
            }
        }
    }

    /// 更新过滤条件，并且触发重新过滤，由ui线程调用
    pub fn update(&mut self, requirement_list: Vec<Vec<FilterRequirement>>) {
        let mut filter_locked = self.filter_task.lock().unwrap();
        if filter_locked.requirement_list == requirement_list {
            return;
        }
        // if requirement_list.is_empty() {
        //     if filter_locked.user_requirement_none {
        //         return;
        //     }
        // } else {
        //     if filter_locked.requirement_list == requirement_list && !filter_locked.user_requirement_none {
        //         return;
        //     }
        // }
        if filter_locked.main_handle.is_none() {
            // main_handle是在start_receiving()里面初始化的，而我们总是先调用start_receiving()之后再回到ui层，所以这种情况不应该存在
            panic!("impossible situation")
        }
        if filter_locked.shared_count == 1 {
            // 这个过滤任务是该过滤器独享的，直接更新即可
            filter_locked.update_requirement_list_from_user(requirement_list);
        } else if filter_locked.shared_count > 1 {
            // 现在是第一次过滤条件更新，开启新的过滤任务
            filter_locked.shared_count -= 1;
            let (new_filter, new_ui_log) = filter_locked.copy_from_self(Some(requirement_list));
            self.all_ui_log = new_ui_log;
            drop(filter_locked);
            self.filter_task = new_filter;
        } else {
            panic!("impossible situation")
        }
    }

    /// 停止过滤，停止该过滤器名下的所有后台任务，在过滤器对应的ui界面关闭时由ui线程调用
    ///
    /// 考虑覆写Filter的Drop特征来利用RAII机制自动停止过滤？
    /// 但是后台过滤工作线程可能一直持有Filter导致其对应的引用计数无法归0从而导致Drop方法无法被调用，
    /// 所以还是让ui层来主动调用停止吧
    pub fn stop_filtering(&mut self) {
        let mut filter_locked = self.filter_task.lock().unwrap();
        if filter_locked.shared_count == 1 {
            filter_locked.task_inflight.iter()
                .map(|(_, handle_opt)| handle_opt.as_ref())
                .for_each(|handle| {
                if let Some(handle) = handle {
                    handle.abort();
                }
            });
            if let Some(handle) = filter_locked.main_handle.take() {
                handle.abort();
            }
        } else {
            log::info!("stop_filtering abort due to multiple shared_count {}", filter_locked.shared_count);
        }
        filter_locked.shared_count -= 1;
    }

    /// 用户清空当前log，
    /// 在清空对应过滤器中的log之后，需要遍历一遍过滤器所属会话中的其他过滤器，清空落在所有过滤器检索范围之外的原始log
    pub fn clear_from_ui(&mut self) {
        let from_session;
        {
            let mut filter_locked = self.filter_task.lock().unwrap();
            from_session = filter_locked.session_id;
            if filter_locked.shared_count == 1 {
                filter_locked.scan_id_area.start = filter_locked.scan_id_area.end;
                log::info!("scan_id_area.start cleared: {}", filter_locked.scan_id_area.start);
                filter_locked.all_ui_log.lock().unwrap().clear();
            } else if filter_locked.shared_count > 1 {
                filter_locked.shared_count -= 1;
                let (new_filter_task, new_ui_log) = filter_locked.copy_from_self(None);
                self.all_ui_log = new_ui_log;
                drop(filter_locked);
                self.filter_task = new_filter_task;
            } else {
                panic!("impossible situation");
            }
        }

        let mut filter_task_with_minimum_scan_id: Option<TaskOwnerPtr> = None;
        {
            let mut map_locked = (*ALL_FILTER_TASK_MAP).lock().unwrap();
            if let Some(filter_list) = map_locked.get_mut(&from_session) {
                filter_list.retain(|filter_task_weak| {
                    if let Some(filter_task) = filter_task_weak.upgrade() {
                        if let Some(minimum) = filter_task_with_minimum_scan_id.as_mut() {
                            let filter_locked_scan_id = filter_task.lock().unwrap().scan_id_area.start;
                            let minimum_locked_scan_id = minimum.lock().unwrap().scan_id_area.start;
                            if filter_locked_scan_id < minimum_locked_scan_id {
                                *minimum = filter_task;
                            }
                        } else {
                            filter_task_with_minimum_scan_id = Some(filter_task);
                        }
                        true
                    } else {
                        false
                    }
                });
            }
        }
        if let Some(filter_task) = filter_task_with_minimum_scan_id {
            let filter_locked = filter_task.lock().unwrap();
            filter_locked.raw_ctl_tx.send(RawEventFromUI::DrainLog {
                session_id: filter_locked.session_id,
                least_id: filter_locked.scan_id_area.start,
            }).expect("send RawEventFromUI::DrainLog failed");
        }
    }

    pub fn start_new_session(&self) {
        let ctl_cx = &self.filter_task.lock().unwrap().raw_ctl_tx;
        if let Err(err) = ctl_cx.send(RawEventFromUI::StartNewSession) {
            log::error!("start_new_session failed {err}");
        }
    }
}

pub struct FilterTaskOwner {
    shared_this: Weak<Mutex<Self>>,
    shared_count: i32,

    raw_ctl_tx: RawCtlSender,
    raw_rx: Receiver<()>,
    raw_tx: Sender<()>,
    all_log: AllLog,
    all_ui_log: AllLog,
    last_merged_seq: usize,
    filter_log_pending_to_merge: HashMap<usize, Vec<Arc<Log>>>,
    main_handle: Option<JoinHandle<()>>,
    
    filter_seq: usize,

    ui_ctx: egui::Context,
    requirement_list: Vec<Vec<FilterRequirement>>,

    task_inflight: HashMap<usize, Option<JoinHandle<()>>>,

    scan_id_area: Range<usize>,// 需要被处理的原始id范围
    session_id: i32,
}

lazy_static!(
    static ref FILTER_WORKER_COUNT: usize = {
         max(2, min(8, num_cpus::get() - 2))// 留一个给raw，留一个给ui线程
    };

    static ref TOKIO_RUNTIME: Runtime = {
        tokio::runtime::Builder::new_multi_thread()
            .worker_threads(*FILTER_WORKER_COUNT)
            .enable_all()
            .build()
            .unwrap()
    };

    static ref ALL_FILTER_TASK_MAP: Mutex<HashMap<i32, Vec<Weak<Mutex<FilterTaskOwner>>>>> = {
        Mutex::new(HashMap::new())
    };

    static ref ALL_SESSION_METADATA: Mutex<HashMap<i32, (f32/*usage*/, u32/*eta*/)>> = {
        Mutex::new(HashMap::new())
    };
);

/// 内存使用量兆字节
static MEMORY_USAGE: AtomicU32 = AtomicU32::new(0);

/// cpu使用率乘以10
static CPU_USAGE_10: AtomicU32 = AtomicU32::new(0);

pub fn get_mem_usage() -> u32 {
    MEMORY_USAGE.load(Ordering::Acquire)
}

pub fn get_cpu_usage() -> u32 {
    CPU_USAGE_10.load(Ordering::Acquire)
}

pub fn get_session_metadata(session_id: i32) -> Option<(f32/*usage*/, u32/*eta*/)> {
    (*ALL_SESSION_METADATA).lock().unwrap().get(&session_id).map(|data| data.clone())
}

pub fn start(ui_ctx: egui::Context, ui_tx: StdSender<MainEvent>) {
    let ui_ctx_clone = ui_ctx.clone();
    (*TOKIO_RUNTIME).spawn(async move {
        let pid = Pid::from_u32(std::process::id());
        let mut system = System::new_all();
        let mut last_memory_in_mb = 0;
        let mut last_cpu_in_10 = 0;
        loop {
            tokio::time::sleep(Duration::from_secs(1)).await;
            system.refresh_process_specifics(pid, ProcessRefreshKind::new().with_memory().with_cpu());

            if let Some(process) = system.process(pid) {
                let usage_in_bytes = process.memory();
                let usage_in_cpu = process.cpu_usage();
                let mut updated = false;
                let memory_in_mb = (usage_in_bytes / (1024 * 1024)) as u32;
                if memory_in_mb != last_memory_in_mb {
                    last_memory_in_mb = memory_in_mb;
                    updated = true;
                    MEMORY_USAGE.store(last_memory_in_mb, Ordering::Release);
                }
                let cpu_in_10 = (usage_in_cpu * 10.0) as u32;
                if cpu_in_10 != last_cpu_in_10 {
                    last_cpu_in_10 = cpu_in_10;
                    updated = true;
                    CPU_USAGE_10.store(last_cpu_in_10, Ordering::Relaxed);
                }
                if updated {
                    ui_ctx_clone.request_repaint();
                }
            }
        }
    });
    (*TOKIO_RUNTIME).spawn(async move {
        let (ctl_tx, mut session_rx) = start_collect_log();
        while let Some(session) = session_rx.recv().await {
            match session.meta_data {
                SessionMetaData::BufferUsageStatistics(usage, eta) => {
                    (*ALL_SESSION_METADATA).lock().unwrap().insert(session.session_id, (usage, eta));
                    ui_ctx.request_repaint();
                }
                SessionMetaData::NewSession(all_log, raw_tx, raw_rx) => {
                    let all_ui_log = Arc::new(Mutex::new(Vec::with_capacity(1000)));
                    let filter_task = Arc::new(Mutex::new(FilterTaskOwner {
                        shared_this: Weak::new(),
                        shared_count: 1,
                        raw_ctl_tx: ctl_tx.clone(),
                        raw_rx,
                        raw_tx,
                        all_log,
                        all_ui_log: all_ui_log.clone(),
                        last_merged_seq: 0,
                        filter_log_pending_to_merge: HashMap::new(),
                        main_handle: None,
                        filter_seq: 1,
                        ui_ctx: ui_ctx.clone(),
                        requirement_list: Vec::new(),
                        task_inflight: HashMap::new(),
                        scan_id_area: 0..0,
                        session_id: session.session_id,
                    }));
                    let mut filter_locked = filter_task.lock().unwrap();
                    filter_locked.shared_this = Arc::downgrade(&filter_task);
                    filter_locked.start_receiving();

                    {
                        let mut map_locked = (*ALL_FILTER_TASK_MAP).lock().unwrap();
                        map_locked.insert(filter_locked.session_id, vec![filter_locked.shared_this.clone()]);
                    }

                    if let Err(err) = ui_tx.send(MainEvent::NewSession(LogFilter {
                        filter_task: filter_task.clone(),
                        session_id: session.session_id,
                        all_ui_log,
                    })) {
                        log::warn!("notify new session {} to ui failed?! err {err}", session.session_id);
                    } else {
                        log::info!("notify new session {} to ui", session.session_id);
                        ui_ctx.send_viewport_cmd(ViewportCommand::RequestUserAttention(UserAttentionType::Informational));
                        ui_ctx.request_repaint();
                    }
                }
            }
        }
        if let Ok(()) = ui_tx.send(MainEvent::EnvironmentBroken) {
            log::error!("session receiver broken?!");
            ui_ctx.request_repaint();
        }
    });
}

impl FilterTaskOwner {

    fn update_requirement_list_from_user(&mut self, filter_requirement: Vec<Vec<FilterRequirement>>) {
        self.requirement_list = filter_requirement;
        self.task_inflight.clear();
        self.filter_log_pending_to_merge.clear();
        self.scan_id_area.end = self.scan_id_area.start;
        self.filter_seq = 1;
        self.last_merged_seq = 0;
        self.raw_tx.send(()).unwrap();
    }

    /// new_requirement为空则说明是清空，不为空则是过滤条件更新
    fn copy_from_self(&self, new_requirement: Option<Vec<Vec<FilterRequirement>>>) -> (TaskOwnerPtr, AllLog) {
        let all_ui_log = Arc::new(Mutex::new(Vec::with_capacity(1000)));
        let filter_changes = new_requirement.is_some();
        // let filter_is_empty = new_requirement.as_ref().map_or(0, |list| list.len()) == 0;
        let mut new_filter_task = FilterTaskOwner {
            shared_this: Weak::new(),
            shared_count: 1,
            raw_ctl_tx: self.raw_ctl_tx.clone(),
            raw_rx: self.raw_rx.clone(),
            raw_tx: self.raw_tx.clone(),
            requirement_list: new_requirement.unwrap_or_else(|| self.requirement_list.clone()),
            task_inflight: HashMap::new(),
            filter_log_pending_to_merge: HashMap::new(),
            main_handle: None,
            filter_seq: self.filter_seq,
            all_log: self.all_log.clone(),
            all_ui_log: all_ui_log.clone(),
            last_merged_seq: self.last_merged_seq,
            ui_ctx: self.ui_ctx.clone(),
            scan_id_area: self.scan_id_area.clone(),
            session_id: self.session_id,
        };
        if filter_changes {
            new_filter_task.scan_id_area.end = new_filter_task.scan_id_area.start;
            new_filter_task.last_merged_seq = 0;
            new_filter_task.filter_seq = 0;
        } else {
            new_filter_task.scan_id_area.start = new_filter_task.scan_id_area.end;
        }
        let filter_wrapped = Arc::new(Mutex::new(new_filter_task));
        {
            let mut filter_locked = filter_wrapped.lock().unwrap();
            filter_locked.shared_this = Arc::downgrade(&filter_wrapped);
            filter_locked.start_receiving();
            if filter_changes {
                filter_locked.raw_tx.send(()).unwrap();
            }
            {
                let mut map_locked = (*ALL_FILTER_TASK_MAP).lock().unwrap();
                let all_filter_in_session = map_locked.get_mut(&filter_locked.session_id);
                if let Some(all_filter) = all_filter_in_session {
                    all_filter.push(filter_locked.shared_this.clone());
                } else {
                    map_locked.insert(filter_locked.session_id, vec![filter_locked.shared_this.clone()]);
                }
            }
        }
        (filter_wrapped, all_ui_log)
    }

    /// 开始接收来自原始log信号继而开始派发过滤任务
    fn start_receiving(&mut self) {
        if self.main_handle.is_some() {
            return;
        }
        let filter_weak = self.shared_this.clone();
        let session_id = self.session_id;
        let mut raw_rx = self.raw_rx.clone();
        self.main_handle = Some((*TOKIO_RUNTIME).spawn(async move {
            loop {
                if raw_rx.changed().await.is_ok() {
                    if let Some(filter) = filter_weak.upgrade() {
                        filter.lock().unwrap().schedule_filtering_task();
                        continue;
                    } else {
                        log::info!("filter from session:{session_id} is dropped by ui");
                    }
                } else {
                    log::warn!("raw rx meet error?!");
                }
                break;
            }
        }));
    }

    fn schedule_filtering_task(&mut self) {
        let mut log_to_be_filter_list: Vec<(usize, Vec<Arc<Log>>)> = Vec::new();
        let log_size: usize;
        {
            let all_log = self.all_log.lock().unwrap();
            if all_log.is_empty() {
                return;
            }
            let log_id_range = all_log.first().unwrap().id..all_log.last().unwrap().id + 1;
            let old_scan_id = self.scan_id_area.end;
            let filter_range = max(self.scan_id_area.end, log_id_range.start)..log_id_range.end;
            if filter_range.is_empty() {
                return;
            }
            log_size = filter_range.len();
            let start_id_offset = log_id_range.start;
            self.scan_id_area.end = log_id_range.end;

            let filter_id_range = filter_range.start - start_id_offset..filter_range.end - start_id_offset;
            if self.requirement_list.is_empty() {
                let seq = self.filter_seq;
                log_to_be_filter_list.push((seq, Vec::from(&all_log.as_slice()[filter_id_range])));
                self.filter_seq += 1;
            } else {
                let increment = max(100, min(2000, filter_range.len() / *FILTER_WORKER_COUNT));
                let mut filter_slice_range = filter_id_range.start..min(filter_id_range.end, filter_id_range.start + increment);
                // if log_task_splitting {
                //     log::info!("raw log coming increment is {increment}, schedule filtering at {:?} slice begin at {:?}", filter_id_range, filter_slice_range);
                // }
                while filter_slice_range.start < filter_id_range.end {
                    let seq = self.filter_seq;
                    self.filter_seq += 1;
                    // let old_size = log_to_be_filter_list.len();
                    log_to_be_filter_list.push((seq, all_log.iter()
                        .skip(filter_slice_range.start)
                        .take(filter_slice_range.len())
                        .map(|log| log.clone())
                        .collect()));
                    // if log_task_splitting {
                    //     log::info!("split filtering at {:?} with seq {seq} old size {old_size}, new size {}",
                    //         filter_slice_range, log_to_be_filter_list.len());
                    // }

                    filter_slice_range.start += increment;
                    filter_slice_range.end = min(filter_id_range.end, filter_slice_range.end + increment);
                }
            }

            if log_to_be_filter_list.is_empty() {
                log::warn!("no log need to be filter?! old_scan_id {old_scan_id} log_id_range {log_id_range:?} filter_range {filter_range:?} start_id_offset {start_id_offset}");
                return;
            }
        }

        /// 真正的过滤任务，输入为原始log和过滤条件，输出为满足过滤条件后的log
        fn filter_task(input: Vec<Arc<Log>>, requirement_list: &Vec<Vec<FilterRequirement>>) -> Vec<Arc<Log>> {
            if requirement_list.is_empty() {
                return input;
            }
            input.iter().filter_map(|log| {
                for inner_list in requirement_list {
                    let mut is_matched: bool = false;
                    for requirement in inner_list {
                        match requirement.is_match(log) {
                            MatchResult::Match => { is_matched = true; }
                            MatchResult::NotMatch => { return None; }
                            MatchResult::Skip => {}
                        }
                    }
                    if !is_matched {
                        return None;
                    }
                }
                Some(log.clone())
            }).collect()
        }

        if log_to_be_filter_list.len() == 1 {
            //只有一个task，直接同步执行
            let (seq, task) = log_to_be_filter_list.remove(0);
            let output = filter_task(task, &self.requirement_list);
            self.task_inflight.insert(seq, None);
            self.report_filter_task_done(seq, output, None);
        } else {
            //多个task的情况，启动多个异步任务来并发执行
            let requirement_list = Arc::new(self.requirement_list.clone());
            let shared_this_arc = self.shared_this.upgrade().unwrap();
            let start_info = Some(FilterStatistics {
                log_size,
                task_size: log_to_be_filter_list.len(),
                start_time: Instant::now(),
            });
            for (seq, task) in log_to_be_filter_list {
                let owner_in_task = shared_this_arc.clone();
                let requirement_list = requirement_list.clone();
                self.task_inflight.insert(seq, Some(tokio::spawn(async move {
                    let output = filter_task(task, &requirement_list);
                    owner_in_task.lock().unwrap().report_filter_task_done(seq, output, start_info);
                })));
            }
        }
    }

    ///每个过滤任务完成之后在这儿汇总，并且按照原始顺序依次通知给ui层
    fn report_filter_task_done(&mut self, seq: usize, mut filter_log: Vec<Arc<Log>>, start_info: Option<FilterStatistics>) {
        let expectation = self.task_inflight.remove(&seq);
        if expectation.is_none() {
            log::warn!("filter task size:{} complete but seq {} not found?", filter_log.len(), seq);
            return;
        }
        if seq == self.last_merged_seq + 1 {
            if self.task_inflight.is_empty() && start_info.is_some() {
                let FilterStatistics{log_size, task_size, start_time} = start_info.unwrap();
                let duration = Instant::now() - start_time;
                if duration > Duration::from_millis(5) {
                    log::info!("filter log size:{log_size} task size:{task_size} total cost {duration:?}");
                }
            }
            self.last_merged_seq = seq;

            let mut curr_merge_seq = seq + 1;
            while let Some(log_fragment) = self.filter_log_pending_to_merge.remove(&curr_merge_seq).as_mut() {
                filter_log.append(log_fragment);
                self.last_merged_seq = curr_merge_seq;
                curr_merge_seq += 1;
            }

            if filter_log.len() > 0 || seq == 1 {
                {
                    let mut all_ui_log = self.all_ui_log.lock().unwrap();
                    if seq == 1 {
                        // 第一个过滤任务的结束，总是清空输出列表
                        all_ui_log.clear();
                    }
                    all_ui_log.append(&mut filter_log);
                    let curr_len = all_ui_log.len();
                    if curr_len > MAXIMUM_LOG_SIZE {
                        all_ui_log.drain(0..curr_len - DRAIN_LOG_SIZE);
                        log::info!("filtered log has increased to {} but drain to {}", curr_len, all_ui_log.len());
                    }
                }
                self.ui_ctx.request_repaint();
            }
        } else {
            if self.task_inflight.is_empty() && start_info.is_some() {
                let FilterStatistics{log_size, task_size, start_time} = start_info.unwrap();
                let duration = Instant::now() - start_time;
                log::error!("impossible last filter task completes with log size:{log_size} task size:{task_size} total cost {duration:?}?!");
            }
            self.filter_log_pending_to_merge.insert(seq, filter_log);
        }
    }
}