#!/usr/bin/python
# -*- coding: utf-8 -*-  
"""
@Project : hello 
@file : oracle_to_csv.py
@Author : shenj
@time : 2025/5/22 11:05
@func :
"""
import logging
import time

from com.cn.for_cdc.common.OracleQueryExecutor import OracleQueryExecutor
from com.cn.for_cdc.common.cdc_conmons import oracle_adm_connections
from com.cn.for_cdc.common.log_helper import configure_logging

import csv
import logging
from typing import List, Dict, Any

import pandas as pd
from io import StringIO

import csv
from typing import Generator
from collections import OrderedDict

def get_ordered_fieldnames(data: List[dict]) -> List[str]:
    """动态提取字段名并保持首次出现顺序"""
    seen = OrderedDict()
    for d in data:
        for key in d.keys():
            if key not in seen:
                seen[key] = None  # 仅记录键的存在性
    return list(seen.keys())

def chunker(data: List[dict], size: int = 5000) -> Generator:
    """大数据分块生成器"""
    for i in range(0, len(data), size):
        yield data[i:i + size]


def write_large_csv(data: List[dict], filename: str) -> None:
    """高性能分块写入"""
    fieldnames = get_ordered_fieldnames(data)

    with open(filename, 'w', newline='', encoding='utf-8') as f:
        writer = csv.DictWriter(f, fieldnames=fieldnames)
        writer.writeheader()

        for chunk in chunker(data):
            # 预处理缺失字段
            processed = [
                {k: row.get(k, '') for k in fieldnames}
                for row in chunk
            ]
            writer.writerows(processed)


def pandas_optimized_write(
        data: List[Dict],
        filename: str,
        mode: str = 'w',
        chunksize: int = 10000,
        compression: str = None
) -> None:
    """
    优化后的分块写入方法（综合搜索结果[1][6][9]）

    参数：
        data: 字典数据列表
        filename: 输出文件名
        mode: 写入模式（w-覆盖, a-追加）
        chunksize: 每块数据量（根据搜索结果[9]建议默认1万）
        compression: 压缩格式（'gzip'/'bz2'等）
    """
    # 类型优化（基于搜索结果[2][3]）
    df = pd.DataFrame(data)

    # 列类型推断优化
    type_rules = {
        'ROW_ID': 'category',  # 高重复值列用category
        'COMMENTS': 'string'  # 明确指定字符串类型
    }
    df = df.astype(type_rules, errors='ignore')

    # 内存回收
    del data

    # 分块写入（基于搜索结果[6][9]）
    for i in range(0, len(df), chunksize):
        chunk = df.iloc[i:i + chunksize]

        # 首次写入包含header
        header = (i == 0) and (mode == 'w')

        chunk.to_csv(
            filename,
            mode=mode,
            header=header,
            index=False,
            compression=compression,
            encoding='utf-8-sig',  # 解决Excel打开乱码
            quoting=csv.QUOTE_NONNUMERIC,  # 非数字字段自动加引号
            chunksize=5000  # 根据搜索结果[6]设置内部chunksize
        )

        # 内存清理
        del chunk

    # 强制释放内存（针对超大DF）
    pd.DataFrame().memory_usage(index=True, deep=True)


def write_dicts_to_csv(
        data: List[Dict[str, Any]],
        filename: str,
        encoding: str = "utf-8",
        delimiter: str = ",",
        quote_char: str = '"',
        errors: str = "strict"
) -> None:
    """
    将字典列表写入CSV文件（支持动态字段名和异常处理）

    :param data: 字典数据列表（每个字典代表一行）
    :param filename: 输出文件名
    :param encoding: 文件编码（默认UTF-8）
    :param delimiter: 列分隔符（默认逗号）
    :param quote_char: 文本包裹符（默认双引号）
    :param errors: 编码错误处理策略（默认严格模式）

    :raises ValueError: 输入数据为空或格式错误
    :raises IOError: 文件写入失败
    """
    # 参数有效性校验
    if not data or not isinstance(data, list):
        raise ValueError("输入数据必须是非空列表")

    if not all(isinstance(item, dict) for item in data):
        raise ValueError("列表元素必须为字典类型")

    try:
        # 动态提取字段名（保持字段顺序）
        fieldnames = list({k for d in data for k in d.keys()})

        # 文件写入操作
        with open(filename, "w", newline="", encoding=encoding, errors=errors) as csvfile:
            writer = csv.DictWriter(
                csvfile,
                fieldnames=fieldnames,
                delimiter=delimiter,
                quotechar=quote_char,
                quoting=csv.QUOTE_MINIMAL
            )

            writer.writeheader()
            writer.writerows(data)

        logging.info(f"成功生成CSV文件: {filename} (共{len(data)}行)")

    except csv.Error as e:
        error_msg = f"CSV写入失败: {str(e)}"
        logging.error(error_msg)
        raise IOError(error_msg) from e
    except UnicodeEncodeError as e:
        error_msg = f"编码错误: {e.reason} (位置:{e.start}-{e.end})"
        logging.error(error_msg)
        raise  IOError(error_msg)

def main():
    """使用示例"""
    configure_logging()

    # 初始化执行器
    config = oracle_adm_connections['icibe_pro']
    executor = OracleQueryExecutor(config)

    # 复杂查询示例
    complex_sql = """

SELECT /*+ parallel(16) */slri.ROW_ID, slt.comments
FROM siebel.S_LOY_TXN@SBL slt,siebel.s_loy_acrl_itm@SBL slri
WHERE slt.ROW_ID = slri.TXN_ID AND slt.BU_ID = '1-7V7V' AND slt.BU_ID = '1-7V7V' 
AND slri.last_upd > TO_DATE('2025-05-06','YYYY-MM-DD') 
AND slri.created > TO_DATE('2022-06-01','YYYY-MM-DD') 
AND slt.created > TO_DATE('2022-06-01','YYYY-MM-DD') 
AND slri.last_upd < SYSDATE - 1
MINUS
SELECT /*+ parallel(16) */sbmp.ROW_ID, sbmp.COMMENTS
FROM STG_B_MEM_POINT sbmp WHERE sbmp.BU_CODE = 'ICINL'

    """

    # 执行单个查询
    try:
        start = time.time()
        result = executor.execute_query(complex_sql)
        logging.info(f"获取到{len(result)}条差异数据")
        pandas_optimized_write(data=result, filename="20250522tmp.csv")
        logging.info(f"总执行耗时: {time.time() - start:.2f}s")

    except Exception as e:
        logging.error(f"主流程执行异常: {e}")


if __name__ == "__main__":
    main()
