#!/usr/bin/env python
# -*- coding: utf-8 -*-

import psycopg2
import csv
import os
import time
import argparse
import logging

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s',
    handlers=[
        logging.FileHandler('volunteer_export.log'),
        logging.StreamHandler()
    ]
)
logger = logging.getLogger(__name__)

# 固定数据库配置
DEFAULT_CONFIG = {
    "host": "192.1.0.33",
    "port": "5432",
    "database": "vsisdb",
    "user": "vsisuser",
    "password": "123Qwe!@#",
    "schema": "vsis",
    "batch_size": 50000,  # 每批获取5万行
    "max_file_size": 500 * 1024 * 1024,  # 单个文件最大500MB
    "estimated_row_size": 300,  # 每行预估字节数
    "retry_count": 3,
    "connection_timeout": 30,
    "query_timeout": 3600  # 查询超时1小时
}


class VolunteerDataExporter:
    def __init__(self, table_suffix, csv_prefix, output_dir='.'):
        self.db_config = DEFAULT_CONFIG
        self.table_suffix = table_suffix
        self.csv_prefix = csv_prefix
        self.output_dir = output_dir
        
        # 确保输出目录存在
        os.makedirs(output_dir, exist_ok=True)
        
        # 初始化数据库连接
        self.conn = None
        self._init_connection()
        
    def _get_table_names(self):
        """获取带后缀的表名，特殊处理江苏省32开头的表"""
        kdp01_table = f"{self.db_config['schema']}.kdp01_{self.table_suffix}"
        
        # 处理江苏省32开头的表后缀
        if self.table_suffix.startswith('32'):
            # 生成kdx07_3200到kdx07_3213的表名列表
            kdx07_tables = []
            for suffix in range(3200, 3214):
                kdx07_tables.append(f"{self.db_config['schema']}.kdx07_{suffix}")
            return kdp01_table, kdx07_tables
        else:
            kdx07_table = f"{self.db_config['schema']}.kdx07_{self.table_suffix}"
            return kdp01_table, [kdx07_table]
        
    def _init_connection(self):
        """初始化数据库连接"""
        try:
            self.conn = psycopg2.connect(
                host=self.db_config['host'],
                port=self.db_config['port'],
                database=self.db_config['database'],
                user=self.db_config['user'],
                password=self.db_config['password'],
                connect_timeout=self.db_config['connection_timeout']
            )
            # 设置查询超时
            with self.conn.cursor() as cursor:
                cursor.execute(f"SET statement_timeout = '{self.db_config['query_timeout']}s'")
            logger.info("数据库连接成功")
        except Exception as e:
            logger.error(f"数据库连接失败: {str(e)}")
            raise RuntimeError(f"无法连接到数据库: {str(e)}")
            
    def _build_query(self):
        """构建SQL查询语句，特殊处理江苏省32开头的表"""
        kdp01_table, kdx07_tables = self._get_table_names()
        
        # 构建kdx07表的UNION ALL查询
        kdx07_union = []
        for i, table in enumerate(kdx07_tables):
            alias = f"k7_{i}"
            kdx07_union.append(f"SELECT axcp0005, albp0029, albx0702 FROM {table}")
        
        kdx07_union_query = " UNION ALL ".join(kdx07_union)
        
        query = f"""
            SELECT 
                k.axcp0004 AS 证件号类型,
                k.axcp0005 AS 证件号码,
                MAX(k.axcp0006) AS 性别,
                MAX(k.axcp0002) AS 姓名,
                MAX(k.axaa0003) AS 区域,
                MAX(k.axcp0037) AS 手机号,
                MAX(k.albp0029) AS 志愿者编号,
                ROUND(COALESCE(SUM(k7.albx0702::NUMERIC), 0), 2) AS 服务总时长
            FROM {kdp01_table} k
            LEFT JOIN (
                {kdx07_union_query}
            ) k7 ON k.axcp0005 = k7.axcp0005 AND k.albp0029 = k7.albp0029
            WHERE k.axcp0004 != '01'
            GROUP BY k.axcp0005
            ORDER BY k.axcp0005
        """
        return query
    
    def export_to_csv(self):
        """执行查询并导出到CSV"""
        if not self.conn:
            logger.error("未连接到数据库")
            return
        
        try:
            # 构建查询
            query = self._build_query()
            logger.info(f"执行查询: {query[:100]}...")
            
            # 执行查询
            start_time = time.time()
            with self.conn.cursor(name='server_side_cursor') as cursor:
                cursor.itersize = self.db_config['batch_size']
                cursor.execute(query)
                
                # 获取表头
                columns = [desc[0] for desc in cursor.description]
                
                # 准备CSV文件
                csv_filename = os.path.join(self.output_dir, f"{self.csv_prefix}_volunteer_{self.table_suffix}.csv")
                
                with open(csv_filename, 'w', newline='', encoding='utf-8') as csvfile:
                    writer = csv.writer(csvfile)
                    # 写入表头
                    writer.writerow(columns)
                    
                    # 分批写入数据
                    batch_num = 1
                    total_rows = 0
                    while True:
                        rows = cursor.fetchmany(self.db_config['batch_size'])
                        if not rows:
                            break
                        
                        writer.writerows(rows)
                        batch_count = len(rows)
                        total_rows += batch_count
                        logger.info(f"已写入批次 {batch_num}，行数: {batch_count}，累计: {total_rows}")
                        batch_num += 1
                
                elapsed_time = time.time() - start_time
                logger.info(f"导出完成 - 共 {total_rows} 行，耗时: {elapsed_time:.2f}秒")
                return csv_filename
        except Exception as e:
            logger.error(f"导出失败: {str(e)}", exc_info=True)
            raise
        finally:
            # 关闭连接
            if self.conn:
                try:
                    self.conn.close()
                    logger.info("数据库连接已关闭")
                except Exception as e:
                    logger.warning(f"关闭数据库连接时出错: {str(e)}")

def main():
    parser = argparse.ArgumentParser(description="志愿者数据导出工具")
    parser.add_argument("-s", "--suffix", required=True, help="表名后缀（如：10）")
    parser.add_argument("-p", "--prefix", required=True, help="CSV文件前缀")
    parser.add_argument("-o", "--output", default=".", help="输出目录")
    args = parser.parse_args()

    try:
        exporter = VolunteerDataExporter(
            table_suffix=args.suffix,
            csv_prefix=args.prefix,
            output_dir=args.output
        )
        csv_file = exporter.export_to_csv()
        logger.info(f"数据已成功导出到: {csv_file}")
    except Exception as e:
        logger.critical(f"程序运行失败: {str(e)}", exc_info=True)
        return 1
    return 0


if __name__ == '__main__':
    exit(main())