# -*- coding: utf-8 -*-

import psycopg2
import psycopg2.extras
import csv
import os
import time
import argparse
import logging
import multiprocessing
from concurrent.futures import ThreadPoolExecutor
from contextlib import contextmanager

# 配置全局日志为ERROR级别，避免干扰
logging.basicConfig(
    level=logging.ERROR,
    format='%(asctime)s - %(levelname)s - %(message)s',
    handlers=[]  # 移除所有处理器
)
logger = logging.getLogger(__name__)
# 禁用logger的传播，防止日志向上传递到root logger
logger.propagate = False

# 固定数据库配置
DEFAULT_CONFIG = {
    "host": "192.1.0.33",
    "port": "5432",
    "database": "vsisdb",
    "user": "vsisuser",
    "password": "123Qwe!@#",
    "schema": "vsis",
    "batch_size": 100000,  # 增大批次大小
    "max_file_size": 500 * 1024 * 1024,  # 单个文件最大500MB
    "estimated_row_size": 300,  # 每行预估字节数
    "retry_count": 3,
    "connection_timeout": 60,  # 增加连接超时
    "query_timeout": 7200,  # 增加查询超时到2小时
    "max_workers": min(4, multiprocessing.cpu_count())  # 并行工作线程数
}

# 连接池管理
class ConnectionPool:
    def __init__(self, config, logger, pool_size=5):
        self.config = config
        self.logger = logger
        self.pool_size = pool_size
        self.pool = []
        self.lock = multiprocessing.Lock()

    def initialize(self):
        """初始化连接池"""
        for _ in range(self.pool_size):
            conn = self._create_connection()
            if conn:
                self.pool.append(conn)
        self.logger.info(f"连接池初始化完成，创建了 {len(self.pool)}/{self.pool_size} 个连接")

    def _create_connection(self):
        """创建单个数据库连接"""
        try:
            # 打印连接参数（隐藏密码）
            self.logger.info(f"尝试连接数据库: host={self.config['host']}, port={self.config['port']}, database={self.config['database']}, user={self.config['user']}")
            conn = psycopg2.connect(
                host=self.config['host'],
                port=self.config['port'],
                database=self.config['database'],
                user=self.config['user'],
                password=self.config['password'],
                connect_timeout=self.config['connection_timeout']
            )
            # 设置查询超时
            with conn.cursor() as cursor:
                cursor.execute(f"SET statement_timeout = '{self.config['query_timeout']}s'")
            self.logger.info("数据库连接创建成功")
            return conn
        except psycopg2.OperationalError as e:
            self.logger.error(f"数据库操作错误: {str(e)}")
            return None
        except Exception as e:
            self.logger.error(f"创建数据库连接失败: {str(e)}", exc_info=True)
            return None

    @contextmanager
    def get_connection(self):
        """获取一个数据库连接，使用完毕后自动归还"""
        with self.lock:
            if not self.pool:
                self.logger.warning("连接池为空，创建新连接")
                conn = self._create_connection()
                if not conn:
                    raise RuntimeError("无法获取数据库连接")
            else:
                conn = self.pool.pop()

        try:
            yield conn
        finally:
            with self.lock:
                self.pool.append(conn)

    def close_all(self):
        """关闭所有连接"""
        with self.lock:
            for conn in self.pool:
                try:
                    conn.close()
                except Exception as e:
                    self.logger.warning(f"关闭连接时出错: {str(e)}")
            self.pool.clear()


class VolunteerDataExporter:
    def __init__(self, table_suffix, csv_prefix, output_dir='.', config=DEFAULT_CONFIG):
        self.db_config = config
        self.table_suffix = table_suffix
        self.csv_prefix = csv_prefix
        self.output_dir = output_dir
        
        # 先创建logger
        self.logger = logging.getLogger(f'VolunteerDataExporter_{self.table_suffix}')
        self.logger.setLevel(logging.INFO)
        
        # 创建带有table_suffix的格式化器
        formatter = logging.Formatter('%(asctime)s - %(levelname)s - [%(table_suffix)s] - %(message)s')
        
        # 添加文件处理器，使用绝对路径确保日志位置稳定
        log_dir = os.path.join(self.output_dir, 'logs')
        os.makedirs(log_dir, exist_ok=True)
        log_file = os.path.join(log_dir, f'volunteer_export_{self.table_suffix}.log')
        file_handler = logging.FileHandler(log_file)
        file_handler.setFormatter(formatter)
        self.logger.addHandler(file_handler)
        
        # 仅在非后台运行时添加控制台处理器
        if os.environ.get('BACKGROUND_RUN', '0') == '0':
            stream_handler = logging.StreamHandler()
            stream_handler.setFormatter(formatter)
            self.logger.addHandler(stream_handler)
        
        # 创建日志适配器，添加table_suffix上下文
        self.logger = logging.LoggerAdapter(self.logger, {'table_suffix': self.table_suffix})
        
        # 然后初始化connection_pool并传入logger
        self.connection_pool = ConnectionPool(config, self.logger)
        
        # 确保输出目录存在
        os.makedirs(output_dir, exist_ok=True)

    def _get_table_names(self):
        """获取带后缀的表名，特殊处理江苏省32开头的表"""
        kdp01_table = f"{self.db_config['schema']}.kdp01_{self.table_suffix}"
        
        # 处理江苏省32开头的表后缀
        if self.table_suffix.startswith('32'):
            # 生成kdx07_3200到kdx07_3213的表名列表
            kdx07_tables = []
            for suffix in range(3200, 3214):
                kdx07_tables.append(f"{self.db_config['schema']}.kdx07_{suffix}")
            return kdp01_table, kdx07_tables
        else:
            kdx07_table = f"{self.db_config['schema']}.kdx07_{self.table_suffix}"
            return kdp01_table, [kdx07_table]

    def _build_query(self):
        """构建SQL查询语句，优化查询性能"""
        kdp01_table, kdx07_tables = self._get_table_names()
        
        # 构建kdx07表的UNION ALL查询
        kdx07_union = []
        for i, table in enumerate(kdx07_tables):
            kdx07_union.append(f"SELECT axcp0005, albp0029, albx0702 FROM {table}")
        
        kdx07_union_query = " UNION ALL ".join(kdx07_union)
        
        # 优化查询: 使用索引友好的WHERE条件，避免不必要的计算
        query = f"""            SELECT \
                k.axcp0004 AS 证件号类型, \
                k.axcp0005 AS 证件号码, \
                MAX(k.axcp0006) AS 性别, \
                MAX(k.axcp0002) AS 姓名, \
                MAX(k.axaa0003) AS 区域, \
                MAX(k.axcp0037) AS 手机号, \
                MAX(k.albp0029) AS 志愿者编号, \
                ROUND(COALESCE(SUM(k7.albx0702::NUMERIC), 0), 2) AS 服务总时长 \
            FROM {kdp01_table} k \
            LEFT JOIN ( \
                {kdx07_union_query} \
            ) k7 ON k.axcp0005 = k7.axcp0005 AND k.albp0029 = k7.albp0029 \
            WHERE k.axcp0004 != '01' and (k.axcp0077 !=99 or k.axcp0077 is null) \
            GROUP BY k.axcp0005, k.axcp0004 \
            ORDER BY k.axcp0005 \
        """
        return query.strip()

    def _execute_query(self):
        """执行查询并返回结果生成器"""
        query = self._build_query()
        # 添加分号以确保SQL语句格式正确
        query = query.rstrip(';') + ';'
        self.logger.info(f"执行查询: {query}")  # 记录完整查询语句
        
        # 记录事务隔离级别
        with self.connection_pool.get_connection() as conn:
            isolation_level = conn.isolation_level
            self.logger.info(f"数据库事务隔离级别: {isolation_level}")
            
            # 记录当前数据库用户和搜索路径
            with conn.cursor() as info_cursor:
                try:
                    info_cursor.execute("SELECT current_user, current_schema, current_setting('search_path');")
                    db_info = info_cursor.fetchone()
                    self.logger.info(f"数据库用户: {db_info[0]}, 当前模式: {db_info[1]}, 搜索路径: {db_info[2]}")
                except Exception as e:
                    self.logger.error(f"获取数据库信息失败: {str(e)}")
                    # 尝试备选查询方式
                    try:
                        info_cursor.execute("SELECT current_user, current_schema;")
                        db_info = info_cursor.fetchone()
                        self.logger.info(f"数据库用户: {db_info[0]}, 当前模式: {db_info[1]}")
                        self.logger.warning("无法获取搜索路径信息")
                    except Exception as e2:
                        self.logger.error(f"获取基本数据库信息失败: {str(e2)}")
        
        with self.connection_pool.get_connection() as conn:
            self.logger.info(f"数据库连接信息: 服务器={conn.info.host}, 端口={conn.info.port}, 数据库={conn.info.dbname}, 用户={conn.info.user}")
            
            # 首先尝试使用简单游标测试查询
            try:
                with conn.cursor() as test_cursor:
                    self.logger.info("开始测试查询...")
                    test_cursor.execute(query)
                    self.logger.info("测试查询执行完成")
                    
                    if test_cursor.description is None:
                        self.logger.warning("测试查询无结果集描述")
                    else:
                        columns = [desc[0] for desc in test_cursor.description]
                        self.logger.info(f"测试查询结果列: {columns}")
                        
                        # 尝试获取少量数据
                        sample_rows = test_cursor.fetchmany(5)
                        self.logger.info(f"测试查询样本数据 (前5行): {sample_rows}")
            except Exception as e:
                self.logger.error(f"测试查询时发生错误: {str(e)}", exc_info=True)
            
            # 使用普通游标进行处理 (不再使用服务器端游标)
            try:
                with conn.cursor(cursor_factory=psycopg2.extras.DictCursor) as cursor:
                    self.logger.info("开始正式查询...")
                    cursor.execute(query)
                    self.logger.info("正式查询执行完成")
                      
                    if cursor.description is None:
                        self.logger.error(f"正式查询后cursor.description为None，查询语句: {query}")
                        # 尝试获取数据库通知
                        notices = conn.notices
                        if notices:
                            self.logger.warning(f"数据库通知: {notices}")
                        raise RuntimeError("查询执行失败，无法获取结果集描述")
                      
                    # 获取表头
                    columns = [desc[0] for desc in cursor.description]
                    self.logger.info(f"正式查询结果列: {columns}")
                    yield columns
                      
                    # 分批获取数据
                    batch_count = 0
                    total_db_rows = 0
                    while True:
                        rows = cursor.fetchmany(self.db_config['batch_size'])
                        if not rows:
                            break
                        batch_count += 1
                        current_batch_size = len(rows)
                        total_db_rows += current_batch_size
                        self.logger.info(f"获取第 {batch_count} 批数据，共 {current_batch_size} 行，累计: {total_db_rows}")
                        yield rows
                    self.logger.info(f"查询完成，共从数据库获取 {total_db_rows} 行数据")
            except psycopg2.Error as e:
                self.logger.error(f"执行正式查询时发生数据库错误: {str(e)}")
                self.logger.error(f"查询语句: {query}")
                raise RuntimeError(f"查询执行失败: {str(e)}\n查询语句: {query}") from e
            except Exception as e:
                self.logger.error(f"正式查询过程中发生异常: {str(e)}", exc_info=True)
                raise

    def export_to_csv(self):
        """执行查询并导出到CSV，使用并行处理提高性能"""
        try:
            # 初始化连接池
            self.connection_pool.initialize()
            
            # 准备CSV文件
            csv_filename = os.path.join(self.output_dir, f"{self.csv_prefix}_volunteer_{self.table_suffix}.csv")
            self.logger.info(f"开始导出数据到: {csv_filename}")
            
            start_time = time.time()
            result_generator = self._execute_query()
            
            # 获取表头
            columns = next(result_generator)
            
            # 写入CSV文件
            with open(csv_filename, 'w', newline='', encoding='utf-8') as csvfile:
                writer = csv.writer(csvfile)
                # 写入表头
                writer.writerow(columns)
                
                # 处理数据批次
                batch_num = 1
                total_rows = 0
                
                # 按批次写入数据
                for rows in result_generator:
                    # 检查批次数据是否为空
                    if not rows:
                        self.logger.warning(f"批次 {batch_num} 为空")
                        batch_num += 1
                        continue
                    
                    writer.writerows(rows)
                    batch_count = len(rows)
                    total_rows += batch_count
                    self.logger.info(f"已写入批次 {batch_num}，行数: {batch_count}，累计: {total_rows}")
                    batch_num += 1
                
                self.logger.info(f"CSV写入完成，共写入 {total_rows} 行数据")
            
            elapsed_time = time.time() - start_time
            self.logger.info(f"导出完成 - 共写入CSV {total_rows} 行，耗时: {elapsed_time:.2f}秒")
            
            # 尝试获取数据库查询的总行数
            db_total_rows = None
            try:
                # 查询数据库以获取实际行数
                with self.connection_pool.get_connection() as conn:
                    with conn.cursor() as count_cursor:
                        count_query = f"SELECT COUNT(*) FROM ({self._build_query()}) AS subquery" 
                        count_cursor.execute(count_query)
                        db_total_rows = count_cursor.fetchone()[0]
                        self.logger.info(f"数据库查询实际行数: {db_total_rows}")
            except Exception as e:
                self.logger.error(f"获取数据库实际行数失败: {str(e)}")
            
            # 比较数据库行数与CSV写入行数
            if db_total_rows is not None and db_total_rows != total_rows:
                self.logger.warning(f"警告: 数据库实际行数({db_total_rows})与CSV写入行数({total_rows})不匹配!")
            elif db_total_rows is not None:
                self.logger.info(f"数据校验成功: 数据库行数({db_total_rows})与CSV写入行数({total_rows})一致")
                
                # 添加单表查询结果对比
                try:
                    with self.connection_pool.get_connection() as conn:
                        with conn.cursor() as single_table_cursor:
                            # 手动构建单表查询，使用与主查询相同的WHERE条件
                            # 查询重复的证件号(axcp0005)及其出现次数
                            # 查询重复的证件号(axcp0005)及其出现次数，并按重复次数降序排列
                            # 使用动态表名后缀构建查询
                            kdp01_table = f"{self.db_config['schema']}.kdp01_{self.table_suffix}"
                            single_table_query = f"""SELECT k.axcp0005, COUNT(*) as count
                            FROM {kdp01_table} k
                            WHERE k.axcp0004 != '01' and (k.axcp0077 !=99 or k.axcp0077 is null)
                            GROUP BY k.axcp0005
                            HAVING COUNT(*) > 1
                            ORDER BY count DESC"""
                            single_table_cursor.execute(single_table_query)
                            duplicate_ids = single_table_cursor.fetchall()
                            
                            if duplicate_ids:
                                self.logger.info(f"发现 {len(duplicate_ids)} 个重复的证件号:")
                                for id, count in duplicate_ids:
                                    self.logger.info(f"证件号: {id}, 重复次数: {count}")
                            else:
                                self.logger.info("未发现重复的证件号")

                            # 统计总记录数(不分组)
                            # 同样使用动态表名后缀构建总记录数查询
                            total_count_query = f"""SELECT COUNT(*) FROM {kdp01_table} k
                            WHERE k.axcp0004 != '01' and (k.axcp0077 !=99 or k.axcp0077 is null)"""
                            single_table_cursor.execute(total_count_query)
                            single_table_count = single_table_cursor.fetchone()[0]
                            self.logger.info(f"单表查询(仅{kdp01_table})的总记录数: {single_table_count}")
                            if single_table_count != db_total_rows:
                                self.logger.info(f"多表连接查询比单表查询少 {single_table_count - db_total_rows} 行数据")
                except Exception as e:
                    self.logger.error(f"执行单表查询对比失败: {str(e)}")
            
            return csv_filename
        except Exception as e:
            self.logger.error(f"导出失败: {str(e)}", exc_info=True)
            raise
        finally:
            # 关闭连接池
            self.connection_pool.close_all()
            self.logger.info("数据库连接池已关闭")

    def suggest_indexes(self):
        """建议数据库索引以提高查询性能"""
        indexes = [
            f"CREATE INDEX idx_kdp01_axcp0004_axcp0005 ON {self.db_config['schema']}.kdp01_{self.table_suffix} (axcp0004, axcp0005);",
            f"CREATE INDEX idx_kdp01_axcp0005 ON {self.db_config['schema']}.kdp01_{self.table_suffix} (axcp0005);",
            f"CREATE INDEX idx_kdx07_axcp0005_albp0029 ON {self.db_config['schema']}.kdx07_{self.table_suffix} (axcp0005, albp0029);"
        ]
        self.logger.info("为提高查询性能，建议创建以下索引:")
        for idx in indexes:
            self.logger.info(idx)
        return indexes

def main():
    parser = argparse.ArgumentParser(description="志愿者数据导出工具(优化版)")
    parser.add_argument("-s", "--suffix", required=True, help="表名后缀（如：10）")
    parser.add_argument("-p", "--prefix", required=True, help="CSV文件前缀")
    parser.add_argument("-o", "--output", default=".", help="输出目录")
    parser.add_argument("--suggest-indexes", action="store_true", help="仅显示建议的索引而不执行导出")
    args = parser.parse_args()

    try:
        exporter = VolunteerDataExporter(
            table_suffix=args.suffix,
            csv_prefix=args.prefix,
            output_dir=args.output
        )
        
        if args.suggest_indexes:
            exporter.suggest_indexes()
            return 0
        
        csv_file = exporter.export_to_csv()
        exporter.logger.info(f"数据已成功导出到: {csv_file}")
        
        # 输出索引建议
        exporter.suggest_indexes()
    except Exception as e:
        if 'exporter' in locals():
            exporter.logger.critical(f"程序运行失败: {str(e)}", exc_info=True)
        else:
            # 如果exporter还没创建，则使用全局logger
            logger.critical(f"程序运行失败: {str(e)}", exc_info=True)
        return 1
    return 0


if __name__ == '__main__':
    exit(main())