import argparse
import os
import re
import subprocess
import glob
import multiprocessing
from datetime import datetime
from multiprocessing import Pool
import sys
import psutil
import gc
from tqdm import tqdm
from prettytable import PrettyTable
from natsort import natsorted
import csv
import tempfile
import pandas as pd
from typing import List, Dict, Optional, Union, Callable, Iterator, Generator

# 全局变量控制详细输出
verbose = False

def log_verbose(message):
    """打印详细日志信息"""
    if verbose:
        print(f"[VERBOSE] {message}")

def get_memory_usage():
    """获取当前进程的内存使用情况（MB）"""
    process = psutil.Process(os.getpid())
    return process.memory_info().rss / 1024 / 1024

def log_memory_usage(message=""):
    """记录当前内存使用情况"""
    if verbose:
        memory_mb = get_memory_usage()
        print(f"[MEMORY] {message} - 使用内存: {memory_mb:.2f} MB")

def get_timestamp():
    """获取当前时间戳，格式为%Y.%m.%d_%H.%M.%S"""
    return datetime.now().strftime("%Y.%m.%d_%H.%M.%S")

def find_files(input_paths, regex_pattern, file_filter='file', recursive=True):
    """根据输入路径、正则表达式和文件类型查找文件"""
    files = []
    pattern = re.compile(regex_pattern)

    # 使用传入的日志函数或默认的全局函数
    log_verbose(f"开始查找文件，正则表达式: {regex_pattern}, 文件类型: {file_filter}, 递归模式: {recursive}")

    for input_path in input_paths:
        log_verbose(f"处理输入路径: {input_path}")

        if os.path.isfile(input_path):
            # 如果是单个文件
            log_verbose(f"检测到单个文件: {input_path}")
            # 根据文件类型过滤
            if file_filter == 'dir':
                log_verbose(f"跳过文件（仅处理目录模式）: {input_path}")
                continue

            filename = os.path.abspath(input_path)
            if pattern.search(filename):
                files.append(filename)
                log_verbose(f"添加文件: {filename}")
        elif os.path.isdir(input_path):
            # 如果是目录
            log_verbose(f"检测到目录: {input_path}")
            
            if recursive:
                # 递归遍历目录
                log_verbose(f"开始递归遍历目录: {input_path}")
                for root, dirs, filenames in os.walk(input_path):
                    log_verbose(f"遍历目录: {root}")
                    
                    # 处理目录
                    if file_filter in ['dir', 'all']:
                        for dirname in dirs:
                            full_dir_path = os.path.abspath(os.path.join(root, dirname))
                            files.append(full_dir_path)
                    
                    # 处理文件
                    if file_filter in ['file', 'all']:
                        for filename in filenames:
                            full_file_path = os.path.abspath(os.path.join(root, filename))
                            if pattern.search(full_file_path):
                                files.append(full_file_path)
                                log_verbose(f"添加文件: {full_file_path}")
            else:
                # 非递归模式，只遍历当前目录
                for f in os.listdir(input_path):
                    filename = os.path.abspath(os.path.join(input_path, f))
                    if file_filter == 'dir' and os.path.isdir(filename):
                        files.append(filename)
                        log_verbose(f"添加目录: {filename}")
                    elif file_filter == 'file' and os.path.isfile(filename) and pattern.search(filename):
                        files.append(filename)
                        log_verbose(f"添加文件: {filename}")
                    elif file_filter == 'all' and pattern.search(filename):
                        files.append(filename)
                        log_verbose(f"添加文件/目录: {filename}")
    
    # 去重并排序
    files = list(set(files))
    files = natsorted(files)
    log_verbose(f"总共找到 {len(files)} 个匹配的文件")
    return files

def _process_file_stream_wrapper(args):
    """流式处理包装函数，用于多进程（必须放在类外部以支持pickle）"""
    file_path, conditions, columns, limit, temp_dir, idx, include_path = args
    scanner = CSVScanner(verbose=False)  # 子进程不输出详细日志
    
    # 流式处理单个文件
    temp_file = os.path.join(temp_dir, f"temp_{idx}.csv")
    
    try:
        with open(temp_file, 'w', encoding='utf-8', newline='') as f:
            writer = None
            first_row = True
            
            for row in scanner.process_single_file_stream(file_path, conditions, columns, limit, include_path):
                if writer is None:
                    # 第一行，写入表头
                    writer = csv.DictWriter(f, fieldnames=list(row.keys()))
                    writer.writeheader()
                
                writer.writerow(row)
                
                # 定期刷新
                if idx % 10000 == 0:
                    f.flush()
                    gc.collect()
        
        return temp_file
    except Exception as e:
        log_verbose(f"处理文件 {file_path} 时出错: {e}")
        return None

class CSVScanner:
    """CSV文件查找、过滤、打印、输出的多功能工具类"""
    
    def __init__(self, verbose: bool = False):
        self.verbose = verbose
        self.matched_files = []
        self.filtered_data = []
        self.temp_dir = None
    
    def parse_filter_conditions(self, filter_conditions: List[str]) -> List[tuple]:
        """解析过滤条件，返回(字段名, 操作符, 值)的元组列表"""
        conditions = []
        operators = ['=', '==', '!=', '>', '<', '>=', '<=', 'contains', 'startswith', 'endswith']
        
        for condition in filter_conditions:
            condition = condition.strip()
            for op in operators:
                if op in condition:
                    parts = condition.split(op, 1)
                    if len(parts) == 2:
                        field = parts[0].strip()
                        value = parts[1].strip()
                        # 处理字符串值（去掉引号）
                        if (value.startswith('"') and value.endswith('"')) or \
                           (value.startswith("'") and value.endswith("'")):
                            value = value[1:-1]
                        conditions.append((field, op, value))
                        break
        
        return conditions
    
    def apply_filter(self, data: List[Dict], conditions: List[tuple]) -> List[Dict]:
        """应用过滤条件到数据"""
        if not conditions:
            return data
        
        filtered_data = []
        
        for row in data:
            match = True
            for field, operator, value in conditions:
                if field not in row:
                    log_verbose(f"警告: 字段 '{field}' 不在CSV文件中，可用字段: {list(row.keys())}")
                    match = False
                    break
                
                field_value = str(row[field]).strip()
                target_value = str(value).strip()
                
                try:
                    if operator in ['=', '==']:
                        if field_value != target_value:
                            match = False
                            break
                    elif operator == '!=':
                        if field_value == target_value:
                            match = False
                            break
                    elif operator == '>':
                        if float(field_value) <= float(target_value):
                            match = False
                            break
                    elif operator == '<':
                        if float(field_value) >= float(target_value):
                            match = False
                            break
                    elif operator == '>=':
                        if float(field_value) < float(target_value):
                            match = False
                            break
                    elif operator == '<=':
                        if float(field_value) > float(target_value):
                            match = False
                            break
                    elif operator == 'contains':
                        if target_value not in field_value:
                            match = False
                            break
                    elif operator == 'startswith':
                        if not field_value.startswith(target_value):
                            match = False
                            break
                    elif operator == 'endswith':
                        if not field_value.endswith(target_value):
                            match = False
                            break
                except ValueError:
                    # 如果无法转换为数字，则使用字符串比较
                    if operator in ['>', '<', '>=', '<=']:
                        log_verbose(f"警告: 字段 '{field}' 的值 '{field_value}' 无法转换为数字，使用字符串比较")
                        if operator == '>':
                            if field_value <= target_value:
                                match = False
                                break
                        elif operator == '<':
                            if field_value >= target_value:
                                match = False
                                break
                        elif operator == '>=':
                            if field_value < target_value:
                                match = False
                                break
                        elif operator == '<=':
                            if field_value > target_value:
                                match = False
                                break
            
            if match:
                filtered_data.append(row)
        
        return filtered_data
    
    def read_csv_file(self, file_path: str) -> List[Dict]:
        """读取CSV文件并返回字典列表（保持向后兼容）"""
        return list(self.read_csv_stream(file_path))
    
    def read_csv_stream(self, file_path: str, batch_size: int = 1000) -> Generator[List[Dict], None, None]:
        """流式读取CSV文件，返回生成器，按批次返回数据"""
        try:
            log_memory_usage(f"开始读取文件: {file_path}")
            
            # 尝试UTF-8编码
            try:
                with open(file_path, 'r', encoding='utf-8') as file:
                    reader = csv.DictReader(file)
                    batch = []
                    for i, row in enumerate(reader):
                        batch.append(row)
                        if len(batch) >= batch_size:
                            log_memory_usage(f"处理批次 {len(batch)} 行")
                            yield batch
                            batch = []
                            # 手动触发垃圾回收
                            if i % (batch_size * 10) == 0:
                                gc.collect()
                                log_memory_usage(f"垃圾回收后")
                    
                    # 处理剩余的数据
                    if batch:
                        yield batch
                        
            except UnicodeDecodeError:
                # 如果UTF-8失败，尝试GBK编码
                with open(file_path, 'r', encoding='gbk') as file:
                    reader = csv.DictReader(file)
                    batch = []
                    for i, row in enumerate(reader):
                        batch.append(row)
                        if len(batch) >= batch_size:
                            log_memory_usage(f"处理批次 {len(batch)} 行")
                            yield batch
                            batch = []
                            # 手动触发垃圾回收
                            if i % (batch_size * 10) == 0:
                                gc.collect()
                                log_memory_usage(f"垃圾回收后")
                    
                    # 处理剩余的数据
                    if batch:
                        yield batch
                        
        except Exception as e:
            log_verbose(f"读取文件 {file_path} 失败: {e}")
            yield []
    
    def read_csv_single_stream(self, file_path: str) -> Generator[Dict, None, None]:
        """逐行读取CSV文件，返回单个行的生成器"""
        try:
            log_memory_usage(f"开始逐行读取文件: {file_path}")
            
            # 尝试UTF-8编码
            try:
                with open(file_path, 'r', encoding='utf-8') as file:
                    reader = csv.DictReader(file)
                    for i, row in enumerate(reader):
                        yield row
                        # 定期垃圾回收
                        if i % 10000 == 0:
                            gc.collect()
                            log_memory_usage(f"处理第 {i} 行后")
                            
            except UnicodeDecodeError:
                # 如果UTF-8失败，尝试GBK编码
                with open(file_path, 'r', encoding='gbk') as file:
                    reader = csv.DictReader(file)
                    for i, row in enumerate(reader):
                        yield row
                        # 定期垃圾回收
                        if i % 10000 == 0:
                            gc.collect()
                            log_memory_usage(f"处理第 {i} 行后")
                        
        except Exception as e:
            log_verbose(f"读取文件 {file_path} 失败: {e}")
            return
    
    def process_single_file(self, file_path: str, conditions: List[tuple], 
                          columns: Optional[List[str]] = None, 
                          limit: int = -1, 
                          include_path: bool = False) -> List[Dict]:
        """处理单个CSV文件（保持向后兼容）"""
        return list(self.process_single_file_stream(file_path, conditions, columns, limit, include_path))
    
    def process_single_file_stream(self, file_path: str, conditions: List[tuple], 
                                 columns: Optional[List[str]] = None, 
                                 limit: int = -1, 
                                 include_path: bool = False) -> Generator[Dict, None, None]:
        """流式处理单个CSV文件，返回生成器"""
        log_verbose(f"流式处理文件: {file_path}")
        log_memory_usage(f"开始处理文件: {file_path}")
        
        total_processed = 0
        total_matched = 0
        
        # 逐行读取和处理
        for row in self.read_csv_single_stream(file_path):
            total_processed += 1
            
            # 应用过滤条件
            if self._row_matches_conditions(row, conditions):
                total_matched += 1
                
                # 添加path字段
                if include_path:
                    row = dict(row)  # 创建副本以避免修改原始数据
                    row['csv_path'] = file_path
                
                # 选择指定列
                if columns:
                    # 如果包含path字段，确保它在列列表中
                    if include_path and 'csv_path' not in columns:
                        columns = ['csv_path'] + columns
                    
                    filtered_row = {col: row.get(col, '') for col in columns if col in row}
                    yield filtered_row
                else:
                    yield row
                
                # 限制行数
                if limit > 0 and total_matched >= limit:
                    log_verbose(f"达到限制行数 {limit}，停止处理")
                    break
            
            # 定期输出进度
            if total_processed % 10000 == 0:
                log_verbose(f"已处理 {total_processed} 行，匹配 {total_matched} 行")
                log_memory_usage(f"处理进度")
        
        log_verbose(f"文件 {file_path} 处理完成：共处理 {total_processed} 行，匹配 {total_matched} 行")
        log_memory_usage(f"文件处理完成")
    
    def _row_matches_conditions(self, row: Dict, conditions: List[tuple]) -> bool:
        """检查单行数据是否匹配所有过滤条件"""
        if not conditions:
            return True
        
        for field, operator, value in conditions:
            if field not in row:
                log_verbose(f"警告: 字段 '{field}' 不在CSV文件中，可用字段: {list(row.keys())}")
                return False
            
            field_value = str(row[field]).strip()
            target_value = str(value).strip()
            
            try:
                if operator in ['=', '==']:
                    if field_value != target_value:
                        return False
                elif operator == '!=':
                    if field_value == target_value:
                        return False
                elif operator == '>':
                    if float(field_value) <= float(target_value):
                        return False
                elif operator == '<':
                    if float(field_value) >= float(target_value):
                        return False
                elif operator == '>=':
                    if float(field_value) < float(target_value):
                        return False
                elif operator == '<=':
                    if float(field_value) > float(target_value):
                        return False
                elif operator == 'contains':
                    if target_value not in field_value:
                        return False
                elif operator == 'startswith':
                    if not field_value.startswith(target_value):
                        return False
                elif operator == 'endswith':
                    if not field_value.endswith(target_value):
                        return False
            except ValueError:
                # 如果无法转换为数字，则使用字符串比较
                if operator in ['>', '<', '>=', '<=']:
                    log_verbose(f"警告: 字段 '{field}' 的值 '{field_value}' 无法转换为数字，使用字符串比较")
                    if operator == '>':
                        if field_value <= target_value:
                            return False
                    elif operator == '<':
                        if field_value >= target_value:
                            return False
                    elif operator == '>=':
                        if field_value < target_value:
                            return False
                    elif operator == '<=':
                        if field_value > target_value:
                            return False
        
        return True
    
    def process_files_multiprocess_stream(self, file_paths: List[str], conditions: List[tuple],
                                        columns: Optional[List[str]] = None, 
                                        limit: int = -1, 
                                        output_file: Optional[str] = None,
                                        include_path: bool = False) -> Generator[Dict, None, None]:
        """流式多进程处理多个CSV文件，返回生成器"""
        if not file_paths:
            return
        
        log_memory_usage(f"开始多进程处理 {len(file_paths)} 个文件")
        
        # 创建临时目录
        temp_dir = tempfile.mkdtemp(prefix="csv_scan_")
        log_verbose(f"创建临时目录: {temp_dir}")
        
        # 准备参数
        args_list = [(file_path, conditions, columns, limit, temp_dir, idx, include_path) 
                    for idx, file_path in enumerate(file_paths)]
        
        # 使用多进程处理
        process_count = min(len(file_paths), multiprocessing.cpu_count())
        log_verbose(f"使用 {process_count} 个进程处理 {len(file_paths)} 个文件")
        
        temp_files = []
        with Pool(processes=process_count) as pool:
            # 使用tqdm显示进度条
            results = list(tqdm(
                pool.imap(_process_file_stream_wrapper, args_list),
                total=len(args_list),
                desc="处理文件"
            ))
            
            # 收集临时文件
            temp_files = [result for result in results if result is not None]
        
        # 流式合并临时文件
        if temp_files:
            log_memory_usage(f"开始合并 {len(temp_files)} 个临时文件")
            
            # 获取所有字段名
            all_fields = set()
            for temp_file in temp_files:
                try:
                    with open(temp_file, 'r', encoding='utf-8') as f:
                        reader = csv.DictReader(f)
                        if reader.fieldnames:
                            all_fields.update(reader.fieldnames)
                except:
                    pass
            
            all_fields = sorted(list(all_fields))
            
            # 流式读取和合并临时文件
            for temp_file in temp_files:
                try:
                    with open(temp_file, 'r', encoding='utf-8') as f:
                        reader = csv.DictReader(f)
                        for row in reader:
                            yield row
                except Exception as e:
                    log_verbose(f"读取临时文件 {temp_file} 失败: {e}")
            
            # 清理临时文件
            for temp_file in temp_files:
                try:
                    os.remove(temp_file)
                except:
                    pass
            
            try:
                os.rmdir(temp_dir)
            except:
                pass
            
            log_memory_usage(f"多进程处理完成")
        else:
            # 清理临时目录
            try:
                os.rmdir(temp_dir)
            except:
                pass
    
    def save_to_csv(self, data:List[Dict], output_file: str):
        """保存数据到CSV文件"""
        if not data:
            return
        
        try:
            # 确保输出目录存在
            output_dir = os.path.dirname(output_file)
            if output_dir and not os.path.exists(output_dir):
                os.makedirs(output_dir)
            
            with open(output_file, 'w', encoding='utf-8', newline='') as f:
                writer = csv.DictWriter(f, fieldnames=data[0].keys())
                writer.writeheader()
                writer.writerows(data)
            
            log_verbose(f"成功保存 {len(data)} 行数据到 {output_file}")
        except Exception as e:
            log_verbose(f"保存文件 {output_file} 失败: {e}")
    
    def print_table(self, data:List[Dict], columns: Optional[List[str]] = None):
        """使用PrettyTable打印数据表格"""
        if not data:
            print("没有数据可显示")
            return
        
        # 确定要显示的列
        if columns:
            display_columns = [col for col in columns if col in data[0].keys()]
        else:
            display_columns = list(data[0].keys())
        
        # 创建表格
        table = PrettyTable()
        table.field_names = display_columns
        
        # 设置对齐方式
        for field in display_columns:
            table.align[field] = 'l'
        
        # 添加数据行
        for row in data:
            table.add_row([row.get(col, '') for col in display_columns])
        
        print(table)
    
    def scan_and_filter(self, input_paths: List[str], regex_pattern: str = r".*\.csv$",
                       conditions: Optional[List[str]] = None,
                       columns: Optional[List[str]] = None,
                       limit: int = -1,
                       multi_process: bool = False,
                       output_file: Optional[str] = None,
                       include_path: bool = False) -> Generator[Dict, None, None]:
        """扫描并过滤CSV文件的主方法（统一使用流式处理）"""
        # 查找文件
        files = find_files(input_paths, regex_pattern)
        if not files:
            print("没有找到匹配的CSV文件")
            return
        else:
            print(f'match files:')
            for f in files:
                print(f'    {f}')
        
        # 解析过滤条件
        filter_conditions = self.parse_filter_conditions(conditions) if conditions else []
        print(f'filter conditions: {filter_conditions}')
        
        total_processed = 0
        total_matched = 0
        
        # 处理文件
        if multi_process and len(files) > 1:
            # 多进程流式处理
            for row in self.process_files_multiprocess_stream(files, filter_conditions, columns, limit, output_file, include_path):
                total_matched += 1
                yield row
                
                # 限制总行数
                if limit > 0 and total_matched >= limit:
                    log_verbose(f"达到限制行数 {limit}，停止处理")
                    return
                
                total_processed += 1
                
                # 定期输出进度
                if total_processed % 10000 == 0:
                    log_verbose(f"已处理 {total_processed} 行，匹配 {total_matched} 行")
                    log_memory_usage(f"处理进度")
        else:
            # 单进程流式处理
            for file_path in tqdm(files, desc="处理文件"):
                for row in self.process_single_file_stream(file_path, filter_conditions, columns, limit, include_path):
                    total_matched += 1
                    yield row
                    
                    # 限制总行数
                    if limit > 0 and total_matched >= limit:
                        log_verbose(f"达到限制行数 {limit}，停止处理")
                        return
                    
                    total_processed += 1
                    
                    # 定期输出进度
                    if total_processed % 10000 == 0:
                        log_verbose(f"已处理 {total_processed} 行，匹配 {total_matched} 行")
                        log_memory_usage(f"处理进度")
        
        log_verbose(f"处理完成：共处理 {total_processed} 行，匹配 {total_matched} 行")
        
        # 如果有输出文件，直接写入
        if output_file:
            self._write_stream_to_file(
                self.scan_and_filter(input_paths, regex_pattern, conditions, columns, limit, multi_process, None, include_path),
                output_file,
                columns
            )
    
    def _write_stream_to_file(self, data_stream, output_file: str, columns: Optional[List[str]] = None):
        """将流式数据写入文件"""
        # 确保输出目录存在
        output_dir = os.path.dirname(output_file)
        if output_dir and not os.path.exists(output_dir):
            os.makedirs(output_dir)
        
        rows_written = 0
        
        with open(output_file, 'w', encoding='utf-8', newline='') as f:
            writer = None
            
            for row in data_stream:
                if writer is None:
                    # 第一行，确定字段名
                    fieldnames = columns if columns else list(row.keys())
                    writer = csv.DictWriter(f, fieldnames=fieldnames)
                    writer.writeheader()
                
                # 只写入指定的列
                if columns:
                    filtered_row = {col: row.get(col, '') for col in columns if col in row}
                    writer.writerow(filtered_row)
                else:
                    writer.writerow(row)
                
                rows_written += 1
                
                # 定期刷新和垃圾回收
                if rows_written % 10000 == 0:
                    f.flush()
                    gc.collect()
                    log_verbose(f"已写入 {rows_written} 行到 {output_file}")
        
        log_verbose(f"成功写入 {rows_written} 行数据到 {output_file}")

def main():
    global verbose

    # 创建argparse解析器
    parser = argparse.ArgumentParser(
        description="csv文件查询和阅读（内存优化版）",
        formatter_class=argparse.RawDescriptionHelpFormatter,
    epilog="""
示例用法:
  # 基本用法 - 查找当前目录下所有CSV文件
  python csv_scan.py .
  
  # 查找特定模式的CSV文件
  python csv_scan.py . -r "data_.*\.csv"
  
  # 过滤特定列
  python csv_scan.py . -c name age email
  
  # 添加过滤条件
  python csv_scan.py . -f "age>18 status=active"
  
  # 限制结果行数
  python csv_scan.py . -l 100
  
  # 保存结果到文件
  python csv_scan.py . -o result.csv
  
  # 使用多进程处理
  python csv_scan.py . -m
  
  # 详细日志模式
  python csv_scan.py . -v
"""
    )

    # 添加命令行参数
    parser.add_argument('input',
                       nargs='+',
                       help='输入文件路径列表，支持文件、目录或通配符')

    parser.add_argument('-r', '--regex',
                       default=r".*\.csv$",
                       help='csv文件名正则表达式过滤器，默认匹配所有.csv文件')

    parser.add_argument('-m', '--multi-process',
                       action='store_true',
                       help='是否使用多进程执行命令')
    
    parser.add_argument('-c', '--columns',
                       nargs='+',
                       type=str,
                       default=None,
                       help='要显示的字段名，多个列用空格分隔, 默认None为显示全部字段')

    parser.add_argument('-f', '--filter',
                       nargs='+',
                       default=None,
                       type=str,
                       help='csv过滤条件，多个条件用空格分隔，例如：-f "col1=value1 col2>value2"')

    parser.add_argument('-l', '--limit',
                       type=int,
                       default=-1,
                       help='搜索过滤结果的最大行数，默认为-1表示不限制')
    
    parser.add_argument('-p', '--path_field',
                       action='store_true',
                       help='添加一个path字段打印或是输出到csv中')
    
    parser.add_argument('-o','--output', 
                        type=str,
                        default=None, 
                        help='搜索结果保存到新的csv路径')

    parser.add_argument('-v', '--verbose',
                       action='store_true',
                       help='打印详细的执行日志，包括文件遍历、正则匹配和命令执行过程')

    # 解析参数
    args = parser.parse_args()
    # 设置全局verbose标志
    verbose = args.verbose
    if verbose:
        log_verbose("启用详细日志模式")
        log_verbose(f"解析的参数: {vars(args)}")

    log_verbose("=== CSV文件扫描和过滤工具（内存优化版） ===")
    log_verbose(f"输入路径: {args.input}")
    log_verbose(f"正则表达式: {args.regex}")
    log_verbose(f"进程模式: {'多进程' if args.multi_process else '单进程'}")
    
    # 创建CSVScanner实例
    scanner = CSVScanner(verbose=verbose)
    
    # 打印内存使用情况
    initial_memory = get_memory_usage()
    log_verbose(f"初始内存使用: {initial_memory:.2f} MB")
    
    # 处理文件
    result_count = 0
    
    if args.output:
        # 有输出文件，直接流式写入
        scanner._write_stream_to_file(
            scanner.scan_and_filter(
                input_paths=args.input,
                regex_pattern=args.regex,
                conditions=args.filter,
                columns=args.columns,
                limit=args.limit,
                multi_process=args.multi_process,
                output_file=None,  # 这里设为None，因为已经在_write_stream_to_file中处理
                include_path=args.path_field
            ),
            args.output,
            args.columns
        )
        print(f"\n流式处理完成，结果已保存到: {args.output}")
    else:
        # 无输出文件，显示统计信息和示例数据
        sample_data = []
        for row in scanner.scan_and_filter(
            input_paths=args.input,
            regex_pattern=args.regex,
            conditions=args.filter,
            columns=args.columns,
            limit=args.limit,
            multi_process=args.multi_process,
            output_file=None,
            include_path=args.path_field
        ):
            result_count += 1
            sample_data.append(row)
            
            # 只收集前100行作为示例
            if len(sample_data) >= 100:
                sample_data = sample_data[:100]
                break
            
            # 定期输出进度
            if result_count % 10000 == 0:
                print(f"已处理 {result_count} 行数据...")
        
        print(f"\n处理完成，共找到 {result_count} 行匹配的数据")
        
        # 显示示例数据
        if sample_data:
            print(f"\n显示前 {len(sample_data)} 行数据作为示例:")
            scanner.print_table(sample_data, args.columns)
        else:
            print("\n没有找到匹配的数据")
    
    # 显示最终内存使用情况
    final_memory = get_memory_usage()
    memory_diff = final_memory - initial_memory
    log_verbose(f"最终内存使用: {final_memory:.2f} MB (变化: {memory_diff:+.2f} MB)")

if __name__ == "__main__":
    main()
