#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
高性能Oracle数据批量导入脚本
基于SQLAlchemy + oracledb，性能优先
"""

import pandas as pd
import numpy as np
from sqlalchemy import create_engine, text
import oracledb
import time
import logging
from concurrent.futures import ThreadPoolExecutor, as_completed
import gc
import os
import threading
from typing import Generator, List

# ==================== 配置常量 ====================
# 数据库连接配置
DB_USERNAME = "shineijiujia"          # 填入用户名
DB_PASSWORD = "123456"          # 填入密码
DB_HOST = "localhost"     # 数据库主机
DB_PORT = 1521           # 端口
DB_SERVICE = "ORCL"      # 服务名或SID

# 数据配置
TABLE_NAME = "tbldirty_费用明细0826"    # 目标表名 (循环中会按文件名覆盖)
CSV_FILE_PATH = r".\dataSource\费用明细0826.csv"          # CSV文件路径 (循环中覆盖)
DELIMITER = ","                        # 分隔符
ENCODING = "utf-8"                       # 文件编码
NEWLINE = "\n"                         # 换行符

# 性能配置 - 针对30GB大文件优化
CHUNK_SIZE = 150000                     # 每批次行数 (大文件适当减小)
MAX_WORKERS = 64                       # 最大并发线程数 (大文件减少并发)
MAX_MEMORY_GB = 25                     # 最大内存使用(GB)
BATCH_SIZE = 15000                     # 每次insert的批次大小

# 注意：字段配置现在在每次导入时动态读取，不再在这里全局读取
# 这样可以支持多文件导入时的不同字段配置

# ==================== 日志配置 ====================
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s',
    handlers=[
        logging.FileHandler(r'.\dataSource\log\bulk_import.log', encoding='utf-8'),
        logging.StreamHandler()
    ]
)
logger = logging.getLogger(__name__)

class OracleBulkImporter:
    def __init__(self, column_count: int = None, table_name: str = None, delimiter: str = None, csv_path: str = None):
        """初始化批量导入器。
        参数优先使用传入值，未传入时回退到全局常量。"""
        self.connection_string = f"oracle+oracledb://{DB_USERNAME}:{DB_PASSWORD}@{DB_HOST}:{DB_PORT}/{DB_SERVICE}"
        self.engine = None
        self.table_name = table_name or TABLE_NAME
        self.delimiter = delimiter or DELIMITER
        self.csv_path = csv_path or CSV_FILE_PATH
        self.total_rows_imported = 0
        self.total_chunks_processed = 0

        # 线程安全的全局统计信息
        self.stats_lock = threading.Lock()
        self.global_stats = {
            'total_lines': 0,
            'start_time': 0,
            'processed_rows': 0,
            'active_chunks': 0,
            'completed_chunks': 0,
            'total_chunks': 0,
            'chunk_progress': {},
            'last_update_time': 0
        }

        # 列数确定
        if column_count is not None:
            self.column_count = column_count
        else:
            try:
                csvFileName = os.path.basename(self.csv_path)
                with open(rf".\dataSource\errorlog\csvColsDict_{csvFileName}.json", 'r', encoding='utf-8') as f:
                    colDict = pd.read_json(f, typ='series', encoding='utf-8')
                    self.column_count = len(colDict)
            except Exception as e:
                logger.error(f"读取字段配置失败: {e}")
                self.column_count = 10

        placeholders = ', '.join([f':param{i+1}' for i in range(self.column_count)])
        self.insert_sql = f"INSERT INTO {self.table_name} VALUES ({placeholders})"

    def create_engine(self):
        """创建SQLAlchemy引擎"""
        try:
            # 配置连接池和性能参数
            self.engine = create_engine(
                self.connection_string,
                pool_size=MAX_WORKERS * 3,
                max_overflow=MAX_WORKERS * 2,
                pool_pre_ping=False,
                pool_recycle=3600,
                connect_args={
                    # 移除thick_mode参数，使用默认配置
                }   
            )
            logger.info("数据库引擎创建成功")
            return True
        except Exception as e:
            logger.error(f"创建数据库引擎失败: {e}")
            return False

    def test_connection(self):
        """测试数据库连接"""
        try:
            with self.engine.connect() as conn:
                result = conn.execute(text("SELECT 1 FROM DUAL"))
                logger.info("数据库连接测试成功")
                return True
        except Exception as e:
            logger.error(f"数据库连接测试失败: {e}")
            return False

    def clear_table(self):
        """清空目标表"""
        try:
            # 先检查表是否存在（使用普通连接，不开启显式事务）
            with self.engine.connect() as conn:
                exists = conn.execute(
                    text("SELECT 1 FROM USER_TABLES WHERE TABLE_NAME = UPPER(:t)"),
                    {"t": self.table_name}
                ).fetchone()
            if not exists:
                logger.error(f"目标表不存在: {self.table_name} (ORA-00942) —— 请先建表或调整表名")
                return False

            # 使用 raw_connection 直接执行 TRUNCATE，避免 SQLAlchemy autobegin 干扰
            try:
                raw_conn = self.engine.raw_connection()
                try:
                    cur = raw_conn.cursor()
                    cur.execute(f"TRUNCATE TABLE {self.table_name}")
                    # Oracle DDL 自动提交；手动再 commit 确保状态
                    raw_conn.commit()
                    logger.info(f"已清空表 {self.table_name} (TRUNCATE)")
                    return True
                finally:
                    try:
                        cur.close()
                    except Exception:
                        pass
                    raw_conn.close()
            except Exception as e_trunc:
                if 'ORA-00942' in str(e_trunc):
                    logger.error(f"TRUNCATE 报表不存在: {self.table_name}")
                    return False
                logger.warning(f"TRUNCATE失败，尝试DELETE方式: {e_trunc}")

            # 回退 DELETE 清空 (仅在 TRUNCATE 失败且表存在时执行)
            try:
                with self.engine.begin() as conn_del:
                    result = conn_del.execute(text(f"DELETE FROM {self.table_name}"))
                    deleted = result.rowcount
                logger.info(f"✅ 已清空表 {self.table_name}，删除 {deleted:,} 行 (DELETE)")
                return True
            except Exception as e_del:
                logger.error(f"DELETE 清空表失败: {e_del}")
                return False
        except Exception as e:
            logger.error(f"清空表失败: {e}")
            return False

    def check_table_columns(self):
        """检查表的实际列数"""
        try:
            with self.engine.connect() as conn:
                # 查询表的列信息
                result = conn.execute(text(f"""
                    SELECT COUNT(*) as column_count 
                    FROM USER_TAB_COLUMNS 
                    WHERE TABLE_NAME = UPPER('{self.table_name}')
                """))
                actual_columns = result.fetchone()[0]
                logger.info(f"表 {self.table_name} 实际列数: {actual_columns}, 预期列数: {self.column_count}")
                
                if actual_columns != self.column_count:
                    logger.warning(f"列数不匹配！调整 column_count 从 {self.column_count} 到 {actual_columns}")
                    self.column_count = actual_columns
                    # 重新生成SQL
                    placeholders = ', '.join([f':param{i+1}' for i in range(self.column_count)])
                    self.insert_sql = f"INSERT INTO {self.table_name} VALUES ({placeholders})"
                    logger.info(f"已更新INSERT SQL: {self.insert_sql}")
                
                return True
        except Exception as e:
            logger.error(f"检查表列数失败: {e}")
            return False

    def count_total_lines(self, file_path: str) -> int:
        """计算CSV文件总行数"""
        try:
            logger.info(f"正在计算文件总行数: {file_path}")
            start_time = time.time()
            
            with open(file_path, 'rb') as f:
                line_count = sum(1 for _ in f)
            
            elapsed = time.time() - start_time
            logger.info(f"文件总行数: {line_count:,} 行，计算耗时: {elapsed:.2f} 秒")
            
            # 减去标题行
            return max(0, line_count - 1)
        except Exception as e:
            logger.error(f"计算文件行数失败: {e}")
            return 0

    def format_time_span(self, seconds: float) -> str:
        """格式化时间显示"""
        seconds = round(seconds, 2)
        if seconds > 3600:
            return f"{int(seconds // 3600)}h {int(seconds % 3600 // 60)}m {seconds % 60:.1f}s"
        elif seconds > 60:
            return f"{int(seconds // 60)}m {seconds % 60:.1f}s"
        else:
            return f"{seconds:.1f}s"

    def read_csv_chunks(self, file_path: str) -> Generator[tuple, None, None]:
        """
        分块读取CSV文件
        
        Args:
            file_path: CSV文件路径
            
        Yields:
            tuple: (chunk_id, DataFrame)
        """
        try:
            logger.info(f"开始读取CSV文件: {file_path}")
            
            # 使用pandas分块读取，优化内存使用
            # 根据分隔符选择引擎：多字符分隔符必须用python引擎，但不支持自定义换行符
            if len(self.delimiter) > 1:
                # 多字符分隔符，使用python引擎，不指定lineterminator
                chunk_reader = pd.read_csv(
                    file_path,
                    encoding=ENCODING,
                    sep=self.delimiter,
                    chunksize=CHUNK_SIZE,
                    dtype=str,                    # 全部读取为字符串
                    na_values=[''],              # 空值处理
                    keep_default_na=False,       # 不转换NA
                    engine='python',             # 使用python引擎支持多字符分隔符
                    header=0                     # 跳过第一行(列名)
                    # 注意：python引擎不支持自定义lineterminator，使用默认
                )
            else:
                # 单字符分隔符，使用c引擎，性能更好
                chunk_reader = pd.read_csv(
                    file_path,
                    encoding=ENCODING,
                    sep=self.delimiter,
                    chunksize=CHUNK_SIZE,
                    dtype=str,                    # 全部读取为字符串
                    na_values=[''],              # 空值处理
                    keep_default_na=False,       # 不转换NA
                    engine='c',                  # 使用c引擎，性能更好
                    header=0,                    # 跳过第一行(列名)
                    lineterminator=NEWLINE       # 指定换行符
                )
            
            chunk_id = 0
            for chunk in chunk_reader:
                # 数据预处理
                chunk = chunk.fillna('')     # 填充空值为空字符串
                chunk = chunk.astype(str)    # 确保所有数据为字符串类型
                
                # 确保列数正确
                if len(chunk.columns) < self.column_count:
                    # 补充缺失的列
                    for i in range(len(chunk.columns), self.column_count):
                        chunk[f'col_{i}'] = ''
                elif len(chunk.columns) > self.column_count:
                    # 截取前N列
                    chunk = chunk.iloc[:, :self.column_count]
                
                yield chunk_id, chunk
                chunk_id += 1
                
                # 内存管理
                if chunk_id % 10 == 0:
                    gc.collect()  # 强制垃圾回收
                    
        except Exception as e:
            logger.error(f"读取CSV文件失败: {e}")
            raise

    def insert_chunk_batch(self, chunk_data: tuple, global_stats: dict = None) -> int:
        """
        批量插入数据块
        
        Args:
            chunk_data: (chunk_id, DataFrame)
            global_stats: 全局统计信息，包含 {'total_lines', 'start_time', 'processed_rows'}
            
        Returns:
            int: 成功插入的行数
        """
        chunk_id, chunk = chunk_data
        rows_inserted = 0
        is_parallel = global_stats is None
        
        try:
            start_time = time.time()
            chunk_size = len(chunk)
            
            # 如果是并行处理，标记块开始处理
            if is_parallel:
                self.start_chunk_processing(chunk_id, chunk_size)
            
            # 准备批量插入的数据
            data_rows = []
            for _, row in chunk.iterrows():
                # 转换为元组，简化空值处理 - 直接插入None
                row_tuple = tuple(
                    val if pd.notna(val) and str(val).strip() != '' 
                    else None 
                    for val in row
                )
                data_rows.append(row_tuple)
            
            # 分批插入，避免内存占用过大
            with self.engine.connect() as conn:
                trans = conn.begin()
                try:
                    # 分小批次插入
                    for i in range(0, len(data_rows), BATCH_SIZE):
                        batch = data_rows[i:i + BATCH_SIZE]
                        # 使用executemany进行批量插入 - 确保参数数量匹配
                        param_dicts = []
                        for row in batch:
                            # 确保每行都有正确的列数
                            padded_row = list(row) + [None] * (self.column_count - len(row))
                            row_dict = {f'param{j+1}': padded_row[j] for j in range(self.column_count)}
                            param_dicts.append(row_dict)
                        
                        conn.execute(text(self.insert_sql), param_dicts)
                        rows_inserted += len(batch)
                    
                    trans.commit()
                    
                    elapsed = time.time() - start_time
                    chunk_speed = rows_inserted / elapsed if elapsed > 0 else 0
                    
                    # 更新全局统计信息
                    if is_parallel:
                        self.update_global_stats(rows_inserted, chunk_id)
                        current_stats = self.get_global_stats_copy()
                        # 使用智能ETA计算
                        overall_speed, eta_str, progress_pct = self.calculate_smart_eta(current_stats)
                        eta_info = f"，进度 {progress_pct:.1f}%，整体速度 {overall_speed:.0f} 行/秒，剩余时间 {eta_str}"
                    else:
                        # 串行处理，直接计算
                        current_stats = global_stats
                        current_time = time.time()
                        total_elapsed = current_time - current_stats.get('start_time', start_time)
                        current_total_rows = current_stats.get('processed_rows', 0) + rows_inserted
                        total_lines = current_stats.get('total_lines', 0)
                        
                        if current_total_rows > 0 and total_lines > 0:
                            overall_speed = current_total_rows / total_elapsed if total_elapsed > 0 else 0
                            remaining_rows = total_lines - current_total_rows
                            eta_seconds = remaining_rows / overall_speed if overall_speed > 0 else 0
                            eta_str = self.format_time_span(eta_seconds)
                            progress_pct = (current_total_rows / total_lines) * 100
                            eta_info = f"，进度 {progress_pct:.1f}%，剩余时间 {eta_str}"
                        else:
                            eta_info = ""
                    
                    # 输出日志
                    loggerStr = f"块 {chunk_id}: 插入 {rows_inserted:,} 行，耗时 {elapsed:.2f}秒，块速度 {chunk_speed:.0f} 行/秒"
                    if eta_info:
                        loggerStr += eta_info
                    logger.info(loggerStr)

                except Exception as e:
                    trans.rollback()
                    logger.error(f"插入块 {chunk_id} 时发生错误，已回滚: {e}")
                    raise
            
            return rows_inserted
            
        except Exception as e:
            logger.error(f"插入块 {chunk_id} 失败: {e}")
            return 0
        finally:
            # 清理内存
            del chunk
            gc.collect()

    def update_global_stats(self, rows_processed: int, chunk_id: int = None):
        """线程安全地更新全局统计信息"""
        with self.stats_lock:
            self.global_stats['processed_rows'] += rows_processed
            self.global_stats['completed_chunks'] += 1
            self.global_stats['last_update_time'] = time.time()
            if chunk_id is not None:
                self.global_stats['chunk_progress'][chunk_id] = {
                    'rows': rows_processed,
                    'completed_time': time.time()
                }
    
    def start_chunk_processing(self, chunk_id: int, chunk_size: int):
        """标记块开始处理"""
        with self.stats_lock:
            self.global_stats['active_chunks'] += 1
            self.global_stats['chunk_progress'][chunk_id] = {
                'start_time': time.time(),
                'size': chunk_size,
                'rows': 0,
                'completed_time': None
            }
    
    def get_global_stats_copy(self):
        """线程安全地获取全局统计信息的副本"""
        with self.stats_lock:
            return self.global_stats.copy()
    
    def calculate_smart_eta(self, current_stats: dict, rows_inserted: int = 0) -> tuple:
        """智能计算ETA和速度，考虑并行处理的特点"""
        total_lines = current_stats.get('total_lines', 0)
        start_time = current_stats.get('start_time', time.time())
        processed_rows = current_stats.get('processed_rows', 0) + rows_inserted
        completed_chunks = current_stats.get('completed_chunks', 0)
        total_chunks = current_stats.get('total_chunks', 0)
        active_chunks = current_stats.get('active_chunks', 0)
        
        current_time = time.time()
        total_elapsed = current_time - start_time
        
        # 如果处理时间太短，返回预估值
        if total_elapsed < 5:
            return 0, "计算中...", 0
        
        # 计算速度：优先使用已完成块的平均速度
        chunk_progress = current_stats.get('chunk_progress', {})
        completed_chunk_speeds = []
        
        for chunk_info in chunk_progress.values():
            if chunk_info.get('completed_time') and chunk_info.get('start_time'):
                chunk_elapsed = chunk_info['completed_time'] - chunk_info['start_time']
                chunk_rows = chunk_info.get('rows', 0)
                if chunk_elapsed > 0 and chunk_rows > 0:
                    completed_chunk_speeds.append(chunk_rows / chunk_elapsed)
        
        # 如果有完成的块，使用平均速度；否则用总体速度
        if completed_chunk_speeds and len(completed_chunk_speeds) >= 2:
            # 使用最近完成的块的平均速度，更准确
            recent_speeds = completed_chunk_speeds[-min(3, len(completed_chunk_speeds)):]
            overall_speed = sum(recent_speeds) / len(recent_speeds)
        elif processed_rows > 0:
            overall_speed = processed_rows / total_elapsed
        else:
            overall_speed = 0
        
        # 计算进度百分比
        progress_pct = (processed_rows / total_lines) * 100 if total_lines > 0 else 0
        
        # 计算ETA
        if overall_speed > 0 and total_lines > 0:
            remaining_rows = total_lines - processed_rows
            eta_seconds = remaining_rows / overall_speed
            eta_str = self.format_time_span(eta_seconds)
        else:
            eta_str = "计算中..."
        
        return overall_speed, eta_str, progress_pct

    def bulk_import(self, use_parallel: bool = True) -> dict:
        """
        执行批量导入
        
        Args:
            use_parallel: 是否使用并行处理
            
        Returns:
            dict: 导入统计信息
        """
        start_time = time.time()
        total_rows = 0
        total_chunks = 0
        
        logger.info("="*60)
        logger.info("开始批量导入数据")
        logger.info(f"文件路径: {self.csv_path}")
        logger.info(f"目标表: {self.table_name}")
        logger.info(f"块大小: {CHUNK_SIZE:,}")
        logger.info(f"批次大小: {BATCH_SIZE:,}")
        logger.info(f"最大并发: {MAX_WORKERS}")
        logger.info(f"并行处理: {use_parallel}")
        logger.info("="*60)

        # 计算总行数
        total_lines = self.count_total_lines(self.csv_path)
        if total_lines == 0:
            logger.error("无法获取文件行数或文件为空")
            return {'success': False, 'error': '文件为空或无法读取'}
        
        logger.info(f"预计需要导入 {total_lines:,} 行数据")
        
        # 清空表
        logger.info("开始清空目标表...")
        if not self.clear_table():
            logger.error("清空表失败，终止导入")
            return {'success': False, 'error': '清空表失败'}
        
        # 检查表列数
        logger.info("检查表结构...")
        if not self.check_table_columns():
            logger.error("检查表列数失败，终止导入")
            return {'success': False, 'error': '检查表列数失败'}
        
        logger.info("="*60)
        
        try:
            if use_parallel:
                # 初始化全局统计信息
                with self.stats_lock:
                    self.global_stats['total_lines'] = total_lines
                    self.global_stats['start_time'] = start_time
                    self.global_stats['processed_rows'] = 0
                    self.global_stats['active_chunks'] = 0
                    self.global_stats['completed_chunks'] = 0
                    self.global_stats['chunk_progress'] = {}
                
                logger.info("🚀 启动流式并行处理（避免预读取大文件）...")
                
                # 流式并行处理 - 避免预读取所有块
                with ThreadPoolExecutor(max_workers=MAX_WORKERS) as executor:
                    futures = {}
                    completed_chunks = 0
                    submitted_chunks = 0
                    
                    # 流式读取和提交任务
                    chunk_generator = self.read_csv_chunks(self.csv_path)
                    
                    try:
                        # 先提交初始批次的任务（避免空闲）
                        for _ in range(MAX_WORKERS * 2):  # 预先提交2倍线程数的任务
                            try:
                                chunk_data = next(chunk_generator)
                                future = executor.submit(self.insert_chunk_batch, chunk_data)
                                futures[future] = {
                                    'chunk_id': chunk_data[0],
                                    'submit_time': time.time()
                                }
                                submitted_chunks += 1
                                logger.info(f"📤 提交块 {chunk_data[0]} 处理任务")
                            except StopIteration:
                                break
                        
                        logger.info(f"🔄 开始处理，已提交 {submitted_chunks} 个初始任务...")
                        
                        # 动态处理完成的任务并提交新任务
                        generator_exhausted = False
                        consecutive_timeouts = 0
                        
                        while futures:
                            # 等待任务完成，使用timeout避免死锁
                            try:
                                done_futures = []
                                # 使用短超时，快速检查是否有任务完成
                                for future in as_completed(futures, timeout=1):
                                    done_futures.append(future)
                                    # 获取一个完成的任务后立即处理，避免阻塞
                                    break
                                
                                # 如果有完成的任务，重置超时计数
                                if done_futures:
                                    consecutive_timeouts = 0
                                else:
                                    consecutive_timeouts += 1
                                    
                                # 如果连续超时且没有新任务可提交，可能出现问题
                                if consecutive_timeouts > 10 and generator_exhausted:
                                    logger.warning(f"⚠️ 连续超时 {consecutive_timeouts} 次，当前还有 {len(futures)} 个任务未完成")
                                    # 强制检查一些任务的状态
                                    stuck_futures = []
                                    for future in list(futures.keys()):
                                        if future.done():
                                            done_futures.append(future)
                                        else:
                                            stuck_futures.append(future)
                                    if stuck_futures:
                                        logger.warning(f"⚠️ 发现 {len(stuck_futures)} 个可能卡住的任务")
                                
                            except TimeoutError:
                                # as_completed超时是正常的，继续循环
                                consecutive_timeouts += 1
                                if consecutive_timeouts > 5:
                                    logger.debug(f"🔄 等待任务完成中...（{len(futures)} 个任务运行中）")
                                continue
                            except Exception as e:
                                logger.error(f"❌ 等待任务完成时发生错误: {e}")
                                consecutive_timeouts += 1
                                continue
                            
                            # 处理完成的任务
                            for future in done_futures:
                                if future not in futures:
                                    logger.warning(f"⚠️ 发现重复处理的future，跳过")
                                    continue
                                    
                                chunk_info = futures.pop(future)
                                chunk_id = chunk_info['chunk_id']
                                
                                try:
                                    rows_inserted = future.result()
                                    total_rows += rows_inserted
                                    completed_chunks += 1
                                    
                                    # 计算进度和时间信息
                                    current_time = time.time()
                                    elapsed_total = current_time - start_time
                                    progress_pct = (total_rows / total_lines) * 100 if total_lines > 0 else 0
                                    
                                    # 估算剩余时间
                                    if total_rows > 0:
                                        current_stats = self.get_global_stats_copy()
                                        overall_speed, eta_str, _ = self.calculate_smart_eta(current_stats)
                                    else:
                                        overall_speed, eta_str = 0, "计算中..."
                                    
                                    # 输出实时进度
                                    logger.info(
                                        f"✅ 块 {chunk_id:3d} 完成 | "
                                        f"已完成: {completed_chunks} 块 | "
                                        f"数据进度: {total_rows:8,}/{total_lines:,} ({progress_pct:.1f}%) | "
                                        f"整体速度: {overall_speed:6,.0f} 行/秒 | "
                                        f"已用: {self.format_time_span(elapsed_total)} | "
                                        f"剩余: {eta_str}"
                                    )
                                    
                                except Exception as e:
                                    logger.error(f"❌ 块 {chunk_id} 处理失败: {e}")
                                    completed_chunks += 1
                            
                            # 尝试提交新任务保持并发度
                            if not generator_exhausted:
                                while len(futures) < MAX_WORKERS and len(futures) < MAX_WORKERS * 3:  # 限制最大排队数
                                    try:
                                        chunk_data = next(chunk_generator)
                                        future = executor.submit(self.insert_chunk_batch, chunk_data)
                                        futures[future] = {
                                            'chunk_id': chunk_data[0],
                                            'submit_time': time.time()
                                        }
                                        submitted_chunks += 1
                                        if submitted_chunks % 10 == 0:  # 每10个块输出一次提交信息
                                            logger.info(f"📤 已提交 {submitted_chunks} 个任务")
                                    except StopIteration:
                                        # 没有更多块了
                                        generator_exhausted = True
                                        logger.info(f"📝 所有数据块已提交完毕，等待剩余 {len(futures)} 个任务完成...")
                                        break
                        
                        # 确保所有任务都完成
                        if futures:
                            logger.warning(f"⚠️ 主循环结束时还有 {len(futures)} 个未完成的任务，等待完成...")
                            remaining_futures = list(futures.keys())
                            completed_in_cleanup = 0
                            try:
                                for future in as_completed(remaining_futures, timeout=300):  # 5分钟超时
                                    if future in futures:
                                        chunk_info = futures.pop(future)
                                        chunk_id = chunk_info['chunk_id']
                                        try:
                                            rows_inserted = future.result()
                                            total_rows += rows_inserted
                                            completed_chunks += 1
                                            completed_in_cleanup += 1
                                            logger.info(f"✅ 补充完成块 {chunk_id}: {rows_inserted:,} 行")
                                        except Exception as e:
                                            logger.error(f"❌ 补充处理块 {chunk_id} 失败: {e}")
                                            completed_chunks += 1
                                            completed_in_cleanup += 1
                            except Exception as e:
                                logger.error(f"❌ 清理剩余任务时发生错误: {e}")
                            
                            # 如果还有未处理的任务，强制处理
                            if futures:
                                logger.warning(f"⚠️ 超时后仍有 {len(futures)} 个任务未完成，强制等待...")
                                for future, chunk_info in futures.items():
                                    chunk_id = chunk_info['chunk_id']
                                    try:
                                        rows_inserted = future.result(timeout=60)  # 每个任务最多等待1分钟
                                        total_rows += rows_inserted
                                        completed_chunks += 1
                                        completed_in_cleanup += 1
                                        logger.info(f"✅ 强制完成块 {chunk_id}: {rows_inserted:,} 行")
                                    except Exception as e:
                                        logger.error(f"❌ 强制处理块 {chunk_id} 失败: {e}")
                                        completed_chunks += 1
                                        completed_in_cleanup += 1
                                futures.clear()
                                
                            logger.info(f"🧹 清理阶段完成了 {completed_in_cleanup} 个任务")
                    
                    except Exception as e:
                        logger.error(f"❌ 流式处理发生错误: {e}")
                        
                    total_chunks = completed_chunks
                    logger.info(f"🎯 并行处理完成，共处理 {total_chunks} 个块")
                    
            else:
                # 串行处理
                chunk_id = 0
                for chunk_data in self.read_csv_chunks(self.csv_path):
                    chunk_start_time = time.time()
                    
                    # 准备全局统计信息
                    global_stats = {
                        'total_lines': total_lines,
                        'start_time': start_time,
                        'processed_rows': total_rows  # 当前已处理的行数
                    }
                    
                    rows_inserted = self.insert_chunk_batch(chunk_data, global_stats)
                    chunk_elapsed = time.time() - chunk_start_time
                    
                    total_rows += rows_inserted
                    total_chunks += 1
                    chunk_id = chunk_data[0]
                    
                    # 计算进度信息
                    elapsed_total = time.time() - start_time
                    progress_pct = (total_rows / total_lines) * 100 if total_lines > 0 else 0
                    
                    # 估算剩余时间
                    if total_rows > 0:
                        avg_speed = total_rows / elapsed_total
                        remaining_rows = total_lines - total_rows
                        eta_seconds = remaining_rows / avg_speed if avg_speed > 0 else 0
                        eta_str = self.format_time_span(eta_seconds)
                    else:
                        avg_speed = 0
                        eta_str = "计算中..."
                    
                    # 输出进度信息
                    logger.info(
                        f"📦 块 {chunk_id:3d} | "
                        f"本批: {rows_inserted:,} 行 ({self.format_time_span(chunk_elapsed)}) | "
                        f"累计: {total_rows:,}/{total_lines:,} ({progress_pct:.1f}%) | "
                        f"速度: {avg_speed:,.0f} 行/秒 | "
                        f"剩余: {eta_str}"
                    )
            
            elapsed = time.time() - start_time
            avg_speed = total_rows / elapsed if elapsed > 0 else 0
            
            # 统计信息
            stats = {
                'total_rows': total_rows,
                'total_chunks': total_chunks,
                'elapsed_time': elapsed,
                'avg_speed': avg_speed,
                'success': True
            }
            
            logger.info("="*60)
            logger.info("🎉 导入完成!")
            logger.info(f"📊 最终统计:")
            logger.info(f"   总行数: {total_rows:,} / {total_lines:,}")
            logger.info(f"   总块数: {total_chunks}")
            logger.info(f"   总耗时: {self.format_time_span(elapsed)}")
            logger.info(f"   平均速度: {avg_speed:,.0f} 行/秒")
            logger.info(f"   数据完整性: {(total_rows/total_lines*100):.2f}%")
            logger.info("="*60)
            
            return stats
            
        except Exception as e:
            logger.error(f"批量导入失败: {e}")
            return {'success': False, 'error': str(e)}

def insert_process():
    """主函数"""
    # 检查配置
    if not DB_USERNAME or not DB_PASSWORD:
        print("❌ 请先配置数据库用户名和密码")
        return
    
    if not os.path.exists(CSV_FILE_PATH):
        print(f"❌ CSV文件不存在: {CSV_FILE_PATH}")
        return
    
    # 动态读取字段配置
    csvFileName = os.path.basename(CSV_FILE_PATH)
    csvFileName = csvFileName.replace('.csv', '')  # 替换文件扩展名
    colDict_file = rf".\dataSource\errorlog\csvColsDict_{csvFileName}.json"
    
    if not os.path.exists(colDict_file):
        print(f"❌ 字段配置文件不存在: {colDict_file}")
        return
    
    try:
        with open(colDict_file, 'r', encoding='utf-8') as f:
            colDict = pd.read_json(f, typ='series', encoding='utf-8')
            column_count = len(colDict)  # 字段数量
        print(f"✅ 读取字段配置成功，字段数量: {column_count}")
    except Exception as e:
        print(f"❌ 读取字段配置失败: {e}")
        return
    
    # 创建导入器，传入动态配置
    importer = OracleBulkImporter(column_count, table_name=TABLE_NAME, delimiter=DELIMITER, csv_path=CSV_FILE_PATH)
    
    # 创建数据库引擎
    if not importer.create_engine():
        print("❌ 创建数据库引擎失败")
        return
    
    # 测试连接
    if not importer.test_connection():
        print("❌ 数据库连接测试失败")
        return
    
    print("✅ 数据库连接正常，开始导入...")
    
    # 执行导入
    result = importer.bulk_import(use_parallel=True)
    
    if result.get('success'):
        print(f"\n🎉 导入成功!")
        print(f"📊 统计信息:")
        print(f"   总行数: {result['total_rows']:,}")
        print(f"   总耗时: {result['elapsed_time']:.2f} 秒")
        print(f"   平均速度: {result['avg_speed']:.0f} 行/秒")
    else:
        print(f"\n❌ 导入失败: {result.get('error')}")

def main():
    """入口函数"""
    try:
        insert_process()
    except KeyboardInterrupt:
        print("\n🔔 导入被中断")
    except Exception as e:
        print(f"❌ 发生错误: {e}")
    finally:
        # 确保清理资源
        gc.collect()
        print("🔚 程序结束")

if __name__ == "__main__":
    # main()

    # 有一个csvFileName列表，（修改CSV_FILE_PATH），然后遍历的去插入数据库
    csvFileNameList = [
        # '440703_H44070300295_江门蓬江孚昌门诊部_结算表.csv', '440703_H44070300295_江门蓬江孚昌门诊部_就诊表.csv', 
        # '440703_H44070300295_江门蓬江孚昌门诊部_明细表.csv', '440703_H44070300295_江门蓬江孚昌门诊部_诊断表.csv', 
        # '440703_H44070300297_江门康城血液透析中心_结算表.csv', '440703_H44070300297_江门康城血液透析中心_就诊表.csv', 
        # '440703_H44070300297_江门康城血液透析中心_明细表.csv', '440703_H44070300297_江门康城血液透析中心_诊断表.csv', 
        # '440703_H44070300331_江门蓬江宏仁中西医结合诊所_结算表.csv', '440703_H44070300331_江门蓬江宏仁中西医结合诊所_就诊表.csv', 
        # '440703_H44070300331_江门蓬江宏仁中西医结合诊所_明细表.csv', '440703_H44070300331_江门蓬江宏仁中西医结合诊所_诊断表.csv', 
        # '440784_H44078400312_鹤山市第三人民医院_结算表.csv', '440784_H44078400312_鹤山市第三人民医院_就诊表.csv', 
        # '440784_H44078400312_鹤山市第三人民医院_明细表.csv', '440784_H44078400312_鹤山市第三人民医院_诊断表.csv', 
        # '440785_H44078500035_恩平市恩城街道办事处江南社区卫生服务中心_结算表.csv', '440785_H44078500035_恩平市恩城街道办事处江南社区卫生服务中心_就诊表.csv', 
        # '440785_H44078500035_恩平市恩城街道办事处江南社区卫生服务中心_明细表.csv', '440785_H44078500035_恩平市恩城街道办事处江南社区卫生服务中心_诊断表.csv', 
        '440785_H44078500065_恩平市沙湖镇中心卫生院恩平市第二人民医院_结算表.csv', '440785_H44078500065_恩平市沙湖镇中心卫生院恩平市第二人民医院_就诊表.csv', 
        '440785_H44078500065_恩平市沙湖镇中心卫生院恩平市第二人民医院_明细表.csv', '440785_H44078500065_恩平市沙湖镇中心卫生院恩平市第二人民医院_诊断表.csv', 
        # '440785_H44078500113_恩平市大槐镇中心卫生院_结算表.csv', '440785_H44078500113_恩平市大槐镇中心卫生院_就诊表.csv', 
        # '440785_H44078500113_恩平市大槐镇中心卫生院_明细表.csv', '440785_H44078500113_恩平市大槐镇中心卫生院_诊断表.csv', 
        # '440785_H44078500128_恩平市横陂镇中心卫生院_结算表.csv', '440785_H44078500128_恩平市横陂镇中心卫生院_就诊表.csv', 
        # '440785_H44078500128_恩平市横陂镇中心卫生院_明细表.csv', '440785_H44078500128_恩平市横陂镇中心卫生院_诊断表.csv', 
        # '440785_H44078500147_恩平爱尔新希望眼耳鼻喉医院_结算表.csv', '440785_H44078500147_恩平爱尔新希望眼耳鼻喉医院_就诊表.csv', 
        # '440785_H44078500147_恩平爱尔新希望眼耳鼻喉医院_明细表.csv', '440785_H44078500147_恩平爱尔新希望眼耳鼻喉医院_诊断表.csv'
    ]
    for csvFileNameMain in csvFileNameList:
        # 统一使用文件名(去扩展名)作为表名后缀，避免出现 _csv 冗余
        base_name, ext = os.path.splitext(os.path.basename(csvFileNameMain))
        CSV_FILE_PATH = rf".\dataSource\{csvFileNameMain}"
        TABLE_NAME = f"tbldirty_{base_name}"  # 不再包含 _csv 后缀
        DELIMITER = ','  # 固定分隔符

        print(f"============ 开始导入文件: {CSV_FILE_PATH} (表: {TABLE_NAME}) ============")
        insert_process()
        print(f"============ 完成导入文件: {CSV_FILE_PATH} (表: {TABLE_NAME}) ============")