#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
志愿者活动数据导出脚本
查询kdp01和kde07表，输出字段：证件号码,志愿者编号,活动名称,队伍名称,活动开始时间,活动结束时间,活动地点,活动时长
不进行去重处理，保留所有匹配记录
"""
import os
import csv
import logging
import time
import argparse
from datetime import datetime
from typing import List, Dict, Any



# 确保logs目录存在
logs_dir = 'logs'
os.makedirs(logs_dir, exist_ok=True)

# 日志配置将在main函数中完成，以便使用命令行参数

# 导入PostgreSQL数据库驱动
import psycopg2
import psycopg2.extensions
import psycopg2.extras
DB_TYPE = 'postgresql'



# 固定数据库配置和其他设置
DEFAULT_CONFIG = {
    "host": "192.1.0.33",
    "port": 5432,
    "database": "vsisdb",
    "user": "vsisuser",
    "password": "123Qwe!@#",
    "schema": "vsis",
    "batch_size": 100000,  # 批次大小
    "connection_timeout": 60,  # 连接超时
    "query_timeout": 7200,  # 查询超时(秒)
    "retry_count": 3,  # 重试次数
    "pool_size": 5  # 连接池大小
}

class ConnectionPool:
    """数据库连接池管理"""
    def __init__(self, config, logger, pool_size=5):
        self.config = config
        self.logger = logger
        self.pool_size = pool_size
        self.pool = []
        self.initialize()

    def initialize(self):
        """初始化连接池"""
        self.logger.info(f"开始初始化连接池，大小: {self.pool_size}")
        start_time = time.time()
        for i in range(self.pool_size):
            self.logger.debug(f"创建连接 {i+1}/{self.pool_size}")
            conn = self._create_connection()
            if conn:
                self.pool.append(conn)
            # 避免同时创建过多连接
            time.sleep(0.5)
        duration = time.time() - start_time
        self.logger.info(f"连接池初始化完成，创建了 {len(self.pool)}/{self.pool_size} 个连接，耗时: {duration:.2f} 秒")

    def _create_connection(self):
        """创建单个数据库连接"""
        try:
            # 打印连接参数（隐藏密码）
            self.logger.info(f"尝试连接数据库: host={self.config['host']}, port={self.config['port']}, database={self.config['database']}, user={self.config['user']}")
            self.logger.info(f"检测到的数据库类型: {DB_TYPE}")

            # PostgreSQL连接
            conn = psycopg2.connect(
                host=self.config['host'],
                port=self.config['port'],
                dbname=self.config['database'],
                user=self.config['user'],
                password=self.config['password'],
                connect_timeout=self.config['connection_timeout']
            )
            # 设置查询超时
            with conn.cursor() as cursor:
                cursor.execute(f"SET statement_timeout = '{self.config['query_timeout']}s'")
            self.logger.info("PostgreSQL数据库连接创建成功")
            return conn
        except Exception as e:
            self.logger.error(f"创建数据库连接失败: {str(e)}", exc_info=True)
            return None

    def get_connection(self):
        """获取一个数据库连接"""
        if not self.pool:
            self.logger.warning("连接池为空，创建新连接")
            conn = self._create_connection()
            if not conn:
                raise RuntimeError("无法获取数据库连接")
            return conn
        else:
            return self.pool.pop()

    def return_connection(self, conn):
        """归还连接到连接池"""
        if conn:
            # 检查连接是否关闭且事务是否正常
            if not conn.closed and conn.status == psycopg2.extensions.STATUS_READY:
                self.pool.append(conn)
            else:
                self.logger.warning("尝试归还已关闭或处于异常状态的连接")
                # 尝试关闭连接
                try:
                    conn.close()
                except:
                    pass
        else:
            self.logger.warning("尝试归还空连接")

    def close_all(self):
        """关闭所有连接"""
        for conn in self.pool:
            try:
                conn.close()
            except Exception as e:
                self.logger.warning(f"关闭连接时出错: {str(e)}")
        self.pool.clear()

class VolunteerActivityExporter:
    """志愿者活动数据导出器"""
    def __init__(self, table_suffix: str, output_dir: str, prefix: str = ''):
        # 使用全局默认配置
        self.config = DEFAULT_CONFIG
        self.table_suffix = table_suffix
        self.output_dir = output_dir
        self.prefix = prefix

        # 初始化连接池 (使用全局logger)
        self.connection_pool = ConnectionPool(self.config, logger, pool_size=self.config['pool_size'])
        self.batch_size = self.config['batch_size']
        self.retry_count = self.config['retry_count']

    def _build_query(self) -> str:
        """构建SQL查询"""
        kdp01_table = f"{self.config['schema']}.kdp01_{self.table_suffix}"
        
        # 处理特殊后缀编码
        # 当表后缀为'32'（江苏）时，需要特殊处理多个表
        if self.table_suffix == '32':
            # 江苏32特殊处理的表列表
            jiangsu_tables = [
                f"{self.config['schema']}.kdx07_3200",
                f"{self.config['schema']}.kdx07_3201",
                f"{self.config['schema']}.kdx07_3202",
                f"{self.config['schema']}.kdx07_3203",
                f"{self.config['schema']}.kdx07_3204",
                f"{self.config['schema']}.kdx07_3205",
                f"{self.config['schema']}.kdx07_3206",
                f"{self.config['schema']}.kdx07_3207",
                f"{self.config['schema']}.kdx07_3208",
                f"{self.config['schema']}.kdx07_3209",
                f"{self.config['schema']}.kdx07_3210",
                f"{self.config['schema']}.kdx07_3211",
                f"{self.config['schema']}.kdx07_3212",
                f"{self.config['schema']}.kdx07_3213"
            ]
            # 使用UNION ALL连接所有江苏特殊表，并添加别名
            kde07_table = f'( {" UNION ALL ".join([f"SELECT * FROM {table}" for table in jiangsu_tables])} ) AS k7_sub'
        else:
            kde07_table = f"{self.config['schema']}.kdx07_{self.table_suffix} AS k7_sub"

        # PostgreSQL语法
        query = f"""
            SELECT
                k.axcp0005 AS 证件号码,
                k.albp0029 AS 志愿者编号,
                k7_sub.albx0104 AS 活动名称,
                k7_sub.albe0302 AS 队伍名称,
                k7_sub.albx0703 AS 活动开始时间,
                k7_sub.albx0704 AS 活动结束时间,
                k7_sub.albx0198 AS 活动地点,
                COALESCE(k7_sub.albx0702::numeric, 0.00) AS 活动时长
            FROM
                {kdp01_table} k
            LEFT JOIN
                {kde07_table} ON TRIM(k.axcp0005) = TRIM(k7_sub.axcp0005) -- 去除两端空格后匹配
            WHERE
                k.axcp0004 != '01' and (k.axcp0077 !=99 or k.axcp0077 is null)
        """

        return query

    def _execute_query_with_retry(self, query: str) -> List[Dict[str, Any]]:
        """执行SQL查询并返回结果，支持重试机制"""
        retry = 0
        while retry < self.retry_count:
            try:
                conn = self.connection_pool.get_connection()
                try:
                    with conn.cursor() as cursor:
                        # PostgreSQL使用RealDictCursor获取字典格式结果
                        cursor = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
                        logger.info(f"执行查询: {query[:100]}...")
                        start_query_time = time.time()
                        cursor.execute(query)
                        query_duration = time.time() - start_query_time
                        logger.info(f"查询执行完成，耗时: {query_duration:.2f} 秒")
                        results = cursor.fetchall()
                        logger.info(f"查询完成，共获取 {len(results)} 条记录")
                        # 提交事务
                        conn.commit()
                        return results
                except Exception as e:
                    # 发生异常时回滚事务
                    try:
                        conn.rollback()
                    except:
                        pass
                    raise
                finally:
                    self.connection_pool.return_connection(conn)
            except Exception as e:
                retry += 1
                logger.error(f"查询执行失败 (重试 {retry}/{self.retry_count}): {str(e)}")
                if retry >= self.retry_count:
                    raise
                time.sleep(2 * retry)  # 指数退避
        raise RuntimeError("查询重试次数超过限制")

    def export_to_csv(self, results: List[Dict[str, Any]], output_file: str, max_file_size_mb: int = 300):
        """导出结果到CSV文件，确保单个文件不超过指定大小

        Args:
            results: 要导出的数据列表
            output_file: 输出文件路径
            max_file_size_mb: 单个文件最大大小(MB)，默认为300MB
        """
        if not results:
            logger.warning("没有数据可导出")
            return

        # 确保输出目录存在
        os.makedirs(os.path.dirname(output_file), exist_ok=True)

        # 提取字段名作为CSV标题
        fieldnames = ["证件号码", "志愿者编号", "活动名称", "队伍名称", "活动开始时间", "活动结束时间", "活动地点", "活动时长"]

        max_file_size_bytes = max_file_size_mb * 1024 * 1024
        file_index = 0
        current_output_file = output_file
        total_written = 0

        csvfile = None
        try:
            while total_written < len(results):
                # 如果文件已存在且超过大小限制，则创建新文件
                if os.path.exists(current_output_file) and os.path.getsize(current_output_file) >= max_file_size_bytes:
                    file_index += 1
                    base_name, ext = os.path.splitext(output_file)
                    current_output_file = f"{base_name}_{file_index}{ext}"

                csvfile = open(current_output_file, 'a' if os.path.exists(current_output_file) else 'w', newline='', encoding='utf-8')
                writer = csv.DictWriter(csvfile, fieldnames=fieldnames)

                # 如果是新文件，写入标题
                if os.path.getsize(current_output_file) == 0:
                    writer.writeheader()

                # 写入数据直到达到文件大小限制或写完所有数据
                start_written = total_written
                while total_written < len(results):
                    result = results[total_written]
                    cleaned_row = {field: result.get(field, '') for field in fieldnames}
                    writer.writerow(cleaned_row)
                    total_written += 1

                    # 检查文件大小，如果超过限制则停止写入
                    if csvfile.tell() >= max_file_size_bytes:
                        break

                csvfile.close()
                csvfile = None

                # 如果已写入数据，创建新文件继续写入
                if total_written < len(results):
                    file_index += 1
                    base_name, ext = os.path.splitext(output_file)
                    current_output_file = f"{base_name}_{file_index}{ext}"
        finally:
            if csvfile is not None:
                csvfile.close()

        logger.info(f"数据导出完成，共写入 {file_index + 1} 个文件，最后一个文件: {current_output_file}")
        return len(results)

    def run(self):
        """运行导出流程"""
        try:
            start_time = datetime.now()
            logger.info("开始导出志愿者活动数据")

            # 构建并执行查询
            query = self._build_query()
            logger.info(f"构建的查询语句: {query}")
            results = self._execute_query_with_retry(query)

            # 导出到CSV (按文件大小限制处理)
            prefix_part = f'{self.prefix}_' if self.prefix else ''
            base_filename = f'{prefix_part}volunteer_activity_{self.table_suffix}.csv'
            output_file = os.path.join(self.output_dir, base_filename)
            total_rows = self.export_to_csv(results, output_file)

            end_time = datetime.now()
            duration = (end_time - start_time).total_seconds()
            logger.info(f"导出完成，共导出 {total_rows} 行数据，耗时: {duration:.2f} 秒")

            return total_rows
        except Exception as e:
            logger.error(f"导出过程中发生错误: {str(e)}")
            raise
        finally:
            self.connection_pool.close_all()

def parse_args():    
    parser = argparse.ArgumentParser(description='导出志愿者活动数据') 
    parser.add_argument('-p', '--prefix', default='', help='文件名前缀（对应文件名中的地区部分，如江苏）')   
    parser.add_argument('-s', '--suffix', required=True, help='表后缀（对应文件名中的数字部分，如32）')    
    parser.add_argument('-o', '--output', default='.', help='输出目录，默认为当前目录')    
    args = parser.parse_args()    
    return args

if __name__ == '__main__':
    args = parse_args()

    # 配置日志，使用-s参数的值作为文件名后缀
    log_filename = f'volunteer_export_{args.suffix}.log'
    log_file_path = os.path.join(logs_dir, log_filename)

    logging.basicConfig(
        level=logging.DEBUG,
        format='%(asctime)s - %(levelname)s - %(message)s',
        handlers=[
            logging.StreamHandler(),
            logging.FileHandler(log_file_path, encoding='utf-8')
        ]
    )

    logger = logging.getLogger(__name__)
    logger.info(f"日志文件已创建: {log_file_path}")

    # 创建导出器并运行
    exporter = VolunteerActivityExporter(args.suffix, args.output, args.prefix)
    exporter.run()