import os
import re
import json
import socket
import datetime
import threading
import gzip
from collections import deque
from typing import Dict, List, Optional, Tuple

try:
    import paramiko  # 可用于远程采集
except Exception:
    paramiko = None


SEVERITY_PATTERNS = [
    (re.compile(r"\b(critical|crit|fatal|panic)\b", re.IGNORECASE), "CRITICAL"),
    (re.compile(r"\b(error|err|fail|failed|exception|traceback)\b", re.IGNORECASE), "ERROR"),
    (re.compile(r"\b(warn|warning|deprecated)\b", re.IGNORECASE), "WARN"),
    (re.compile(r"\b(info|information|started|listening|connected)\b", re.IGNORECASE), "INFO"),
    (re.compile(r"\b(debug|trace)\b", re.IGNORECASE), "DEBUG"),
]


DEFAULT_LOG_FILES = [
    "/var/log/syslog",
    "/var/log/messages",
    "/var/log/auth.log",
    "/var/log/secure",
    "/var/log/dmesg",
    "/var/log/kern.log",
    "/var/log/daemon.log",
    "/var/log/audit/audit.log",
    "/var/log/cron",
    "/var/log/maillog",
    "/var/log/mail.log",
    # 常见 Web 与服务日志
    "/var/log/nginx/access.log",
    "/var/log/nginx/error.log",
    "/var/log/httpd/access_log",
    "/var/log/httpd/error_log",
    "/var/log/apache2/access.log",
    "/var/log/apache2/error.log",
    # 容器与编排
    "/var/log/docker.log",
    "/var/log/containerd.log",
]


class LinuxLogCollector:
    def __init__(self, base_dir: Optional[str] = None) -> None:
        # 统一使用相对路径 backend/data/logs
        if base_dir is None:
            backend_dir = os.path.dirname(os.path.dirname(__file__))
            self.base_dir = os.path.join(backend_dir, "data", "logs")
        else:
            self.base_dir = base_dir
        
        # 确保目录存在
        os.makedirs(self.base_dir, exist_ok=True)
        
        # 创建分析结果存储目录
        self.analysis_dir = os.path.join(self.base_dir, "logsanalysis")
        os.makedirs(self.analysis_dir, exist_ok=True)

        # 等级顺序映射，便于过滤
        self._severity_rank: Dict[str, int] = {
            "CRITICAL": 0,
            "ERROR": 1,
            "WARN": 2,
            "INFO": 3,
            "DEBUG": 4,
        }

        # 从配置文件加载日志路径（可选）
        self.config_paths: List[str] = []
        try:
            import yaml  # type: ignore
            conf_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), "conf.yaml")
            if os.path.exists(conf_path):
                with open(conf_path, "r", encoding="utf-8", errors="ignore") as f:
                    conf = yaml.safe_load(f) or {}
                lp = conf.get("log_paths") or {}
                if isinstance(lp, dict):
                    for _, v in lp.items():
                        if isinstance(v, str):
                            self.config_paths.append(v)
        except Exception:
            # 如果缺少 yaml 或解析失败，忽略即可
            pass

        # 项目本地日志（如 backend/log.txt）
        try:
            self.project_log_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), "log.txt")
        except Exception:
            self.project_log_path = None

        # 增量采集状态
        self._state_path = os.path.join(self.base_dir, "offsets.json")
        self._state_lock = threading.Lock()

    def _load_state(self) -> Dict[str, Dict[str, int]]:
        try:
            with open(self._state_path, "r", encoding="utf-8", errors="ignore") as f:
                data = json.load(f)
                if isinstance(data, dict):
                    return data
        except Exception:
            return {}
        return {}

    def _save_state(self, state: Dict[str, Dict[str, int]]) -> None:
        try:
            with open(self._state_path, "w", encoding="utf-8") as f:
                json.dump(state, f, ensure_ascii=False, indent=2)
        except Exception:
            pass

    def _read_local_file_incremental(self, host: str, path: str) -> List[str]:
        try:
            if not os.path.exists(path) or not os.path.isfile(path):
                return []
            key = f"{host}:{path}"
            with self._state_lock:
                state = self._load_state()
                rec = state.get(key, {})
            size = os.path.getsize(path)
            last_off = int(rec.get("offset", 0)) if isinstance(rec, dict) else 0
            if size < last_off:
                last_off = 0
            with open(path, "rb") as f:
                f.seek(last_off)
                content = f.read()
            new_off = size
            with self._state_lock:
                state[key] = {"offset": new_off}
                self._save_state(state)
            if not content:
                return []
            text = content.decode("utf-8", errors="ignore")
            return text.splitlines()
        except Exception:
            return []

    _ts_patterns: List[Tuple[re.Pattern, str]] = [
        (re.compile(r"(\d{4}-\d{2}-\d{2})[T\s](\d{2}:\d{2}:\d{2})"), "%Y-%m-%d %H:%M:%S"),
        (re.compile(r"(Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)\s+\d{1,2}\s+\d{2}:\d{2}:\d{2}"), "%b %d %H:%M:%S"),
    ]

    def _parse_line_time(self, line: str) -> Optional[datetime.datetime]:
        now = datetime.datetime.now()
        for pat, fmt in self._ts_patterns:
            m = pat.search(line)
            if not m:
                continue
            ts = m.group(0)
            try:
                if fmt == "%b %d %H:%M:%S":
                    dt = datetime.datetime.strptime(ts, fmt)
                    return dt.replace(year=now.year)
                return datetime.datetime.strptime(ts, fmt)
            except Exception:
                continue
        return None

    def _is_small_text_file(self, path: str, max_size_mb: int = 50) -> bool:
        try:
            if not os.path.isfile(path):
                return False
            if os.path.getsize(path) > max_size_mb * 1024 * 1024:
                return False
            # 简单尝试打开，若为二进制可能抛异常或读为空
            with open(path, "r", encoding="utf-8", errors="ignore") as f:
                _ = f.read(1024)
            return True
        except Exception:
            return False

    def _discover_log_files(self, limit_per_dir: int = 10) -> List[str]:
        candidates: List[str] = []
        known_dirs = [
            "/var/log",
            "/var/log/nginx",
            "/var/log/httpd",
            "/var/log/apache2",
            "/var/log/audit",
            "/var/log/mysql",
            "/var/log/postgresql",
            "/var/log/containers",
            "/var/log/pods",
        ]

        # 追加配置中的路径所在目录
        for p in self.config_paths:
            try:
                d = os.path.dirname(p)
                if d and d not in known_dirs:
                    known_dirs.append(d)
            except Exception:
                continue

        patterns = [
            ".log", "_log", "messages", "secure", "auth.log", "kern.log", "audit.log",
            "access.log", "error.log", "docker.log", "containerd.log",
        ]

        seen: set = set()
        for d in known_dirs:
            try:
                if not os.path.isdir(d):
                    continue
                matched = 0
                for name in sorted(os.listdir(d)):
                    if matched >= limit_per_dir:
                        break
                    fp = os.path.join(d, name)
                    if fp in seen:
                        continue
                    if os.path.isdir(fp):
                        continue
                    # 接受轮转与压缩后缀，压缩文件仅在本地处理中解压读取
                    if any(pat in name for pat in patterns) and (name.endswith('.gz') or self._is_small_text_file(fp)):
                        candidates.append(fp)
                        seen.add(fp)
                        matched += 1
            except Exception:
                continue

        # 追加配置文件中明确指定的日志路径
        for p in self.config_paths:
            if p not in seen and self._is_small_text_file(p):
                candidates.append(p)
                seen.add(p)

        return candidates

    def _classify_line(self, line: str) -> str:
        for pattern, level in SEVERITY_PATTERNS:
            if pattern.search(line):
                return level
        return "INFO"

    def _save_lines(self, host: str, source_file: str, lines: List[str], *,
                    min_level: str = "WARN",
                    include_keywords: Optional[List[str]] = None,
                    exclude_keywords: Optional[List[str]] = None,
                    save_raw: bool = False,
                    file_name_override: Optional[str] = None) -> Dict[str, str]:
        date_str = datetime.datetime.now().strftime("%Y-%m-%d")
        host_dir = os.path.join(self.base_dir, host, date_str)
        os.makedirs(host_dir, exist_ok=True)

        # 保存原始（可选，默认不保存，避免占用）
        src_name = file_name_override or os.path.basename(source_file) or "journal"
        raw_path = os.path.join(host_dir, f"{src_name}.log")
        if save_raw:
            with open(raw_path, "a", encoding="utf-8", errors="ignore") as f:
                for ln in lines:
                    f.write(ln.rstrip("\n") + "\n")

        # 分类保存
        buckets: Dict[str, List[str]] = {"CRITICAL": [], "ERROR": [], "WARN": [], "INFO": [], "DEBUG": []}
        min_rank = self._severity_rank.get(min_level.upper(), self._severity_rank["WARN"])
        inc = [k for k in (include_keywords or []) if isinstance(k, str) and k]
        exc = [k for k in (exclude_keywords or []) if isinstance(k, str) and k]

        for ln in lines:
            level = self._classify_line(ln)
            if self._severity_rank[level] > min_rank:
                continue
            if inc and not any(k in ln for k in inc):
                continue
            if exc and any(k in ln for k in exc):
                continue
            buckets[level].append(ln)

        out_files: Dict[str, str] = ({"raw": raw_path} if save_raw else {})
        for level, bucket in buckets.items():
            if not bucket:
                continue
            level_path = os.path.join(host_dir, f"{src_name}.{level.lower()}.log")
            with open(level_path, "a", encoding="utf-8", errors="ignore") as f:
                for ln in bucket:
                    f.write(ln.rstrip("\n") + "\n")
            out_files[level] = level_path

        # 索引文件
        index_path = os.path.join(host_dir, "index.json")
        index = {"host": host, "date": date_str, "source": src_name, "files": out_files}
        try:
            if os.path.exists(index_path):
                with open(index_path, "r", encoding="utf-8", errors="ignore") as r:
                    existing = json.load(r)
                if isinstance(existing, list):
                    existing.append(index)
                    data_to_write = existing
                else:
                    data_to_write = [existing, index]
            else:
                data_to_write = [index]
            with open(index_path, "w", encoding="utf-8") as w:
                json.dump(data_to_write, w, ensure_ascii=False, indent=2)
        except Exception:
            pass

        return out_files

    def get_analysis_dir(self) -> str:
        """获取分析结果存储目录"""
        return self.analysis_dir

    def get_storage_info(self) -> Dict[str, str]:
        """获取存储信息"""
        return {
            "logs_dir": self.base_dir,
            "analysis_dir": self.analysis_dir,
            "logs_dir_exists": str(os.path.exists(self.base_dir)),
            "analysis_dir_exists": str(os.path.exists(self.analysis_dir))
        }

    def _sanitize_filename(self, name: str) -> str:
        # Windows 与 Linux 兼容处理：替换路径分隔与非法字符
        return re.sub(r"[^A-Za-z0-9._-]", "_", name).strip("_") or "log"

    def collect_from_path_list(self, list_file: Optional[str] = None, *,
                               limit_per_source: int = 10,
                               min_level: str = "INFO",
                               include_keywords: Optional[List[str]] = None,
                               exclude_keywords: Optional[List[str]] = None,
                               save_raw: bool = False) -> Dict[str, Dict[str, str]]:
        host = socket.gethostname() or "localhost"
        results: Dict[str, Dict[str, str]] = {}
        try:
            list_path = list_file or os.path.join(os.path.dirname(os.path.dirname(__file__)), "log.txt")
            if not os.path.exists(list_path):
                return results
            with open(list_path, "r", encoding="utf-8", errors="ignore") as f:
                paths = [ln.strip() for ln in f.readlines() if ln.strip() and not ln.strip().startswith("#")]
        except Exception:
            return results

        def _tail_plain_text(path: str, n: int, chunk_size: int = 65536) -> List[str]:
            try:
                with open(path, 'rb') as f:
                    f.seek(0, os.SEEK_END)
                    file_size = f.tell()
                    if file_size == 0:
                        return []
                    buffer = bytearray()
                    bytes_to_read = 0
                    pos = file_size
                    newline_count = 0
                    while pos > 0 and newline_count <= n:
                        read_size = chunk_size if pos >= chunk_size else pos
                        pos -= read_size
                        f.seek(pos)
                        chunk = f.read(read_size)
                        if not chunk:
                            break
                        buffer[:0] = chunk
                        newline_count += chunk.count(b'\n')
                    text = buffer.decode('utf-8', errors='ignore')
                    lines = text.splitlines()
                    return lines[-n:] if n > 0 else lines
            except Exception:
                return []

        def _tail_gzip_text(path: str, n: int) -> List[str]:
            try:
                with gzip.open(path, 'rt', encoding='utf-8', errors='ignore') as gzf:
                    dq = deque(maxlen=max(1, n)) if n > 0 else None
                    if dq is None:
                        return gzf.read().splitlines()
                    for line in gzf:
                        dq.append(line.rstrip('\n'))
                    return list(dq)
            except Exception:
                return []

        for p in paths:
            try:
                # 既支持文件也支持压缩文件
                lines: List[str] = []
                if p.endswith('.gz') and os.path.exists(p):
                    lines = _tail_gzip_text(p, limit_per_source)
                elif os.path.exists(p) and os.path.isfile(p):
                    lines = _tail_plain_text(p, limit_per_source)
                if not lines:
                    continue
                safe_name = self._sanitize_filename(p)
                results[p] = self._save_lines(
                    host, p, [ln.rstrip("\n") for ln in lines],
                    min_level=min_level,
                    include_keywords=include_keywords,
                    exclude_keywords=exclude_keywords,
                    save_raw=save_raw,
                    file_name_override=safe_name,
                )
            except Exception:
                continue
        return results

    def _read_local_file(self, path: str) -> List[str]:
        try:
            if os.path.exists(path):
                if path.endswith('.gz'):
                    with gzip.open(path, 'rt', encoding='utf-8', errors='ignore') as f:
                        return f.readlines()
                else:
                    with open(path, "r", encoding="utf-8", errors="ignore") as f:
                        return f.readlines()
        except Exception:
            return []
        return []

    def collect_local(self, extra_files: Optional[List[str]] = None, use_journalctl: bool = True,
                      *,
                      min_level: str = "WARN",
                      since_hours: Optional[int] = 24,
                      include_keywords: Optional[List[str]] = None,
                      exclude_keywords: Optional[List[str]] = None,
                      max_lines_per_source: int = 10000,
                      max_total_lines: int = 50000,
                      incremental: bool = True,
                      save_raw: bool = False,
                      journal_units: Optional[List[str]] = None) -> Dict[str, Dict[str, str]]:
        host = socket.gethostname() or "localhost"
        results: Dict[str, Dict[str, str]] = {}
        collected_count = 0

        # journalctl 优先
        if use_journalctl:
            try:
                import subprocess
                cmd = ["journalctl", "-o", "short-iso"]
                # 时间窗口
                if isinstance(since_hours, int) and since_hours > 0:
                    since_dt = datetime.datetime.now() - datetime.timedelta(hours=since_hours)
                    cmd.extend(["--since", since_dt.strftime("%Y-%m-%d %H:%M:%S")])
                else:
                    # 回退到条数限制
                    cmd.extend(["-n", "5000"])  # 仍保留一个上限

                # 等级过滤映射到 journalctl 优先级
                priority_map = {
                    "CRITICAL": "crit",
                    "ERROR": "err",
                    "WARN": "warning",
                    "INFO": "info",
                    "DEBUG": "debug",
                }
                p = priority_map.get(min_level.upper(), "warning")
                cmd.extend(["-p", f"{p}.."])  # 最低优先级到更高严重性

                proc = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True, timeout=25)
                if proc.returncode == 0 and proc.stdout:
                    lines = proc.stdout.splitlines()
                    # 只保留最后 max_lines_per_source 行
                    if isinstance(max_lines_per_source, int) and max_lines_per_source > 0:
                        lines = lines[-max_lines_per_source:]
                    if isinstance(max_total_lines, int) and max_total_lines > 0 and collected_count + len(lines) > max_total_lines:
                        lines = lines[: max(0, max_total_lines - collected_count)]
                    results["journalctl"] = self._save_lines(
                        host, "journalctl", lines,
                        min_level=min_level,
                        include_keywords=include_keywords,
                        exclude_keywords=exclude_keywords,
                        save_raw=save_raw,
                    )
                    collected_count += len(lines)
            except Exception:
                pass

            # 采集特定 systemd 单元
            if journal_units:
                for unit in [u for u in journal_units if isinstance(u, str) and u]:
                    try:
                        cmd_u = ["journalctl", "-o", "short-iso", "-u", unit]
                        if isinstance(since_hours, int) and since_hours > 0:
                            since_dt = datetime.datetime.now() - datetime.timedelta(hours=since_hours)
                            cmd_u.extend(["--since", since_dt.strftime("%Y-%m-%d %H:%M:%S")])
                        else:
                            cmd_u.extend(["-n", "5000"])
                        p = priority_map.get(min_level.upper(), "warning")
                        cmd_u.extend(["-p", f"{p}.."])
                        proc_u = subprocess.run(cmd_u, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True, timeout=25)
                        if proc_u.returncode == 0 and proc_u.stdout:
                            u_lines = proc_u.stdout.splitlines()
                            if isinstance(max_lines_per_source, int) and max_lines_per_source > 0:
                                u_lines = u_lines[-max_lines_per_source:]
                            if isinstance(max_total_lines, int) and max_total_lines > 0 and collected_count + len(u_lines) > max_total_lines:
                                u_lines = u_lines[: max(0, max_total_lines - collected_count)]
                            results[f"journalctl:{unit}"] = self._save_lines(
                                host, f"journalctl.{unit}", u_lines,
                                min_level=min_level,
                                include_keywords=include_keywords,
                                exclude_keywords=exclude_keywords,
                                save_raw=save_raw,
                            )
                            collected_count += len(u_lines)
                    except Exception:
                        continue

        files = list(DEFAULT_LOG_FILES)
        if extra_files:
            files.extend([p for p in extra_files if isinstance(p, str)])
        # 追加项目本地日志
        if getattr(self, 'project_log_path', None) and os.path.exists(self.project_log_path):
            files.append(self.project_log_path)
        # 自动发现更多日志
        try:
            files.extend(self._discover_log_files())
        except Exception:
            pass

        # 去重保持顺序
        seen_local: set = set()
        uniq_files: List[str] = []
        for fp in files:
            if fp not in seen_local:
                uniq_files.append(fp)
                seen_local.add(fp)

        cutoff = None
        if isinstance(since_hours, int) and since_hours > 0:
            cutoff = datetime.datetime.now() - datetime.timedelta(hours=since_hours)

        for fp in uniq_files:
            if isinstance(max_total_lines, int) and max_total_lines > 0 and collected_count >= max_total_lines:
                break
            # 压缩文件不支持增量，统一走常规读取
            if incremental and not fp.endswith('.gz'):
                lines = self._read_local_file_incremental(host, fp)
            else:
                lines = self._read_local_file(fp)
            if lines:
                # 文件场景难以精准时间过滤，采取尾部截断减少体量
                if isinstance(max_lines_per_source, int) and max_lines_per_source > 0:
                    lines = lines[-max_lines_per_source:]
                # 行级时间过滤（可解析才比较）
                if cutoff is not None:
                    filtered = []
                    for ln in lines:
                        dt = self._parse_line_time(ln)
                        if dt is None or dt >= cutoff:
                            filtered.append(ln)
                    lines = filtered
                if isinstance(max_total_lines, int) and max_total_lines > 0 and collected_count + len(lines) > max_total_lines:
                    lines = lines[: max(0, max_total_lines - collected_count)]
                results[fp] = self._save_lines(
                    host, fp, lines,
                    min_level=min_level,
                    include_keywords=include_keywords,
                    exclude_keywords=exclude_keywords,
                    save_raw=save_raw,
                )
                collected_count += len(lines)

        return results

    def collect_remote(self, host: str, username: str, password: Optional[str] = None, port: int = 22, key_filename: Optional[str] = None, files: Optional[List[str]] = None,
                       *,
                       min_level: str = "WARN",
                       since_hours: Optional[int] = 24,
                       include_keywords: Optional[List[str]] = None,
                       exclude_keywords: Optional[List[str]] = None,
                       max_lines_per_source: int = 10000,
                       max_total_lines: int = 50000,
                       incremental: bool = True,
                       save_raw: bool = False) -> Dict[str, Dict[str, str]]:
        if paramiko is None:
            raise RuntimeError("paramiko 未安装，无法进行远程采集")

        results: Dict[str, Dict[str, str]] = {}
        collected_count = 0
        client = paramiko.SSHClient()
        client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
        client.connect(hostname=host, port=port, username=username, password=password, key_filename=key_filename, timeout=10)
        try:
            # 先尝试journalctl
            try:
                # 远程 journalctl，构造命令
                cmd = "journalctl -o short-iso"
                if isinstance(since_hours, int) and since_hours > 0:
                    since_dt = datetime.datetime.now() - datetime.timedelta(hours=since_hours)
                    cmd += f" --since '{since_dt.strftime('%Y-%m-%d %H:%M:%S')}'"
                else:
                    cmd += " -n 5000"

                priority_map = {
                    "CRITICAL": "crit",
                    "ERROR": "err",
                    "WARN": "warning",
                    "INFO": "info",
                    "DEBUG": "debug",
                }
                p = priority_map.get(min_level.upper(), "warning")
                cmd += f" -p {p}.."

                stdin, stdout, stderr = client.exec_command(cmd, timeout=25)
                output = stdout.read().decode(errors="ignore")
                if output:
                    lines = output.splitlines()
                    if isinstance(max_lines_per_source, int) and max_lines_per_source > 0:
                        lines = lines[-max_lines_per_source:]
                    if isinstance(max_total_lines, int) and max_total_lines > 0 and collected_count + len(lines) > max_total_lines:
                        lines = lines[: max(0, max_total_lines - collected_count)]
                    results["journalctl"] = self._save_lines(
                        host, "journalctl", lines,
                        min_level=min_level,
                        include_keywords=include_keywords,
                        exclude_keywords=exclude_keywords,
                        save_raw=save_raw,
                    )
                    collected_count += len(lines)
            except Exception:
                pass

            target_files = files or []
            if not target_files:
                target_files = list(DEFAULT_LOG_FILES)
            # 远程尝试发现常见目录下的日志文件（仅列目录，不递归）
            try:
                sftp = client.open_sftp()
                remote_dirs = [
                    "/var/log", "/var/log/nginx", "/var/log/httpd", "/var/log/apache2",
                    "/var/log/audit", "/var/log/mysql", "/var/log/postgresql",
                    "/var/log/containers", "/var/log/pods",
                ]
                patterns = [".log", "_log", "messages", "secure", "auth.log", "kern.log", "audit.log", "access.log", "error.log"]
                for d in remote_dirs:
                    try:
                        for name in sftp.listdir(d):
                            if any(name.endswith(suf) for suf in [".1", ".2", ".gz", ".xz", ".zip"]):
                                continue
                            if any(pat in name for pat in patterns):
                                target_files.append(os.path.join(d, name))
                    except Exception:
                        continue
                sftp.close()
            except Exception:
                pass

            # 去重
            seen_remote: set = set()
            target_files = [fp for fp in target_files if not (fp in seen_remote or seen_remote.add(fp))]
            sftp = client.open_sftp()
            try:
                for fp in target_files:
                    try:
                        with sftp.file(fp, "r") as remote_file:
                            content: Optional[str]
                            if incremental:
                                try:
                                    key = f"{host}:{fp}"
                                    with self._state_lock:
                                        state = self._load_state()
                                        rec = state.get(key, {})
                                    size = sftp.stat(fp).st_size
                                    last_off = int(rec.get("offset", 0)) if isinstance(rec, dict) else 0
                                    if size < last_off:
                                        last_off = 0
                                    remote_file.seek(last_off)
                                    data = remote_file.read()
                                    with self._state_lock:
                                        state[key] = {"offset": size}
                                        self._save_state(state)
                                    content = data.decode("utf-8", errors="ignore") if data else ""
                                except Exception:
                                    data = remote_file.read()
                                    content = data.decode("utf-8", errors="ignore") if data else ""
                            else:
                                data = remote_file.read()
                                content = data.decode("utf-8", errors="ignore") if data else ""
                            if content:
                                lines = content.splitlines()
                                if isinstance(max_lines_per_source, int) and max_lines_per_source > 0:
                                    lines = lines[-max_lines_per_source:]
                                if isinstance(since_hours, int) and since_hours > 0:
                                    cutoff = datetime.datetime.now() - datetime.timedelta(hours=since_hours)
                                    filtered = []
                                    for ln in lines:
                                        # 复用本地解析方法
                                        dt = None
                                        for pat, fmt in self._ts_patterns:
                                            m = pat.search(ln)
                                            if m:
                                                ts = m.group(0)
                                                try:
                                                    if fmt == "%b %d %H:%M:%S":
                                                        dtx = datetime.datetime.strptime(ts, fmt)
                                                        dt = dtx.replace(year=datetime.datetime.now().year)
                                                    else:
                                                        dt = datetime.datetime.strptime(ts, fmt)
                                                except Exception:
                                                    dt = None
                                                break
                                        if dt is None or dt >= cutoff:
                                            filtered.append(ln)
                                    lines = filtered
                                if isinstance(max_total_lines, int) and max_total_lines > 0 and collected_count + len(lines) > max_total_lines:
                                    lines = lines[: max(0, max_total_lines - collected_count)]
                                results[fp] = self._save_lines(
                                    host, fp, lines,
                                    min_level=min_level,
                                    include_keywords=include_keywords,
                                    exclude_keywords=exclude_keywords,
                                    save_raw=save_raw,
                                )
                                collected_count += len(lines)
                    except Exception:
                        continue
            finally:
                sftp.close()
        finally:
            client.close()

        return results


class LogCollectScheduler:
    def __init__(self, collector: LinuxLogCollector, interval_seconds: int = 0, extra_files: Optional[List[str]] = None,
                 *,
                 min_level: str = "WARN",
                 since_hours: Optional[int] = 24,
                 include_keywords: Optional[List[str]] = None,
                 exclude_keywords: Optional[List[str]] = None,
                 max_lines_per_source: int = 10000,
                 save_raw: bool = False) -> None:
        self.collector = collector
        self.interval_seconds = interval_seconds
        self.extra_files = extra_files or []
        self._thread: Optional[threading.Thread] = None
        self._stop = threading.Event()

        # 采集参数
        self.min_level = min_level
        self.since_hours = since_hours
        self.include_keywords = include_keywords or []
        self.exclude_keywords = exclude_keywords or []
        self.max_lines_per_source = max_lines_per_source
        self.save_raw = save_raw

    def _loop(self) -> None:
        while not self._stop.is_set():
            try:
                self.collector.collect_local(
                    extra_files=self.extra_files,
                    min_level=self.min_level,
                    since_hours=self.since_hours,
                    include_keywords=self.include_keywords,
                    exclude_keywords=self.exclude_keywords,
                    max_lines_per_source=self.max_lines_per_source,
                    save_raw=self.save_raw,
                )
            except Exception:
                pass
            if self.interval_seconds <= 0:
                break
            self._stop.wait(self.interval_seconds)

    def start(self) -> None:
        if self._thread and self._thread.is_alive():
            return
        self._stop.clear()
        self._thread = threading.Thread(target=self._loop, daemon=True)
        self._thread.start()

    def stop(self) -> None:
        self._stop.set()
        if self._thread:
            self._thread.join(timeout=2)


