import base64
import json
import logging
import os
import sys
import time
from contextlib import contextmanager
from datetime import datetime, timedelta
from typing import Optional, Dict, Any, List, Union, Tuple

import polars as pl
import psycopg2
import psycopg2.extras
import psycopg2.pool
import requests
import rsa
from deepdiff import DeepDiff

from datetime import datetime, date
from decimal import Decimal
def get_logger():
    log_path, txt_name_time = log_project_path()
    pathname = txt_name_time

    file_handler = logging.FileHandler('%s' % (txt_name_time), mode='a', encoding="utf8")
    file_handler.setFormatter(logging.Formatter(
        '%(asctime)s [%(levelname)s] %(module)s-%(lineno)d:\t%(message)s'
    ))
    file_handler.setLevel(logging.INFO)
    # 创建处理器：sh为控制台处理器，fh为文件处理器
    console_handler = logging.StreamHandler(sys.stdout)
    console_handler.setFormatter(logging.Formatter(
        '%(asctime)s [%(levelname)s] %(module)s-%(lineno)d:\t%(message)s',
        datefmt="%Y/%m/%d %H:%M:%S"
    ))
    console_handler.setLevel(logging.INFO)

    logging.basicConfig(
        level=min(logging.INFO, logging.INFO),
        handlers=[file_handler, console_handler],
    )
    logger = logging.getLogger(__name__)
    # logger.info('')
    return logger


def log_project_path():
    root_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
    now_time = datetime.now()
    log_path = os.path.join(root_path, 'logs')
    txt_name_time = now_time.strftime("%Y-%m-%d")
    txt_name_time = os.path.join(log_path + '\\{}.txt'.format(txt_name_time))
    if not os.path.exists(log_path):
        os.mkdir(log_path)
    return log_path, txt_name_time


logger = get_logger()


class GlobalExceptionHandler:
    def __init__(self):
        self.exception_count = 0
        self.last_exception_time = None
        self.exception_details = []

    def handle_exception(self, exc_type, exc_value, exc_traceback):
        print("=== 全局异常处理器被调用 ===")  # 添加调试输出
        self.exception_count += 1
        self.last_exception_time = datetime.now()

        exception_info = {
            'timestamp': self.last_exception_time,
            'type': exc_type.__name__,
            'value': str(exc_value),
            'count': self.exception_count
        }
        self.exception_details.append(exception_info)

        logger.error(
            f"未捕获的异常 [{self.exception_count}]: {exc_type.__name__}: {exc_value}",
            exc_info=(exc_type, exc_value, exc_traceback)
        )

        if issubclass(exc_type, KeyboardInterrupt):
            sys.__excepthook__(exc_type, exc_value, exc_traceback)
            return

        logger.error(f"程序运行异常统计 - 总异常数: {self.exception_count}, 最后异常时间: {self.last_exception_time}")


# 创建并设置全局异常处理器
global_exception_handler = GlobalExceptionHandler()
sys.excepthook = global_exception_handler.handle_exception


class PostgreSQLConnectionPool:
    def __init__(self, min_conn: int, max_conn: int, **kwargs):
        self.pool = psycopg2.pool.ThreadedConnectionPool(
            min_conn, max_conn, **kwargs
        )

    @contextmanager
    def get_transaction(self):
        conn = None
        try:
            conn = self.pool.getconn()
            # 确保每次获取连接时都重置 autocommit 和事务状态
            conn.autocommit = False
            if conn.status == psycopg2.extensions.STATUS_IN_TRANSACTION:
                conn.rollback()  # 清理上个未完成事务
                logger.warning("清理了上一个未完成的事务")

            logger.info("✅ 事务开始")
            yield TransactionContext(conn)

            # 正常结束，提交事务
            conn.commit()
            logger.info("✅ 事务已提交")

        except Exception as e:
            if conn:
                conn.rollback()
                logger.error(f"❌ 事务回滚: {e}")
            raise e
        finally:
            if conn:
                # 归还连接前重置 autocommit，避免影响下次使用
                conn.autocommit = True
                self.pool.putconn(conn)
                logger.info("🔁 连接已归还连接池")

    def execute_in_transaction(self, func, *args, **kwargs):
        with self.get_transaction() as tx:
            return func(tx, *args, **kwargs)

    def close_all(self):
        self.pool.closeall()
        logger.info("🔌 所有连接已关闭")


class TransactionContext:
    def __init__(self, connection):
        self.conn = connection
        self._has_error = False

    def __enter__(self):
        return self

    def __exit__(self, exc_type, exc_val, exc_tb):
        if exc_type is not None:
            self.conn.rollback()
            self._has_error = True
            logger.error(f"❌ 事务因异常回滚: {exc_val}")
        else:
            self.conn.commit()
            logger.info("✅ 事务已提交")
    def execute_values(self, table_name: str, sql: str, argslist: List[Tuple], template: Optional[str] = None,
                       page_size: int = 1000):
        if self._has_error:
            raise Exception("事务已处于错误状态，不能执行操作")
        with self.conn.cursor() as cursor:
            psycopg2.extras.execute_values(cursor, sql, argslist, template=template, page_size=page_size)
            logger.info(
                f"📊 向表 {table_name} 执行 execute_values 完成，影响行数: {cursor.rowcount}，处理数据条数: {len(argslist)}")
            logger.info("=" * 100)
            logger.info(f"📊 数据导入完成 - 统计汇总")
            logger.info(f"成功行数: {cursor.rowcount}")
            logger.info(f"✅ 累计落表总行数（新增+更新）: {len(argslist)}")
            logger.info("=" * 100)

    def execute_query_dict(self, query: str, params: tuple = None) -> List[Dict[str, Any]]:
        try:
            with self.conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor) as cursor:
                try:
                    cursor.execute(query, params)
                    return cursor.fetchall()
                finally:
                    cursor.close()
        except Exception as e:
            logger.error(f"执行查询失败: {e}")
            raise

    def commit(self):
        self.conn.commit()
        logger.info("✅ 手动提交事务")

    # 👇 新增：显式回滚
    def rollback(self):
        self.conn.rollback()
        logger.info("❌ 手动回滚事务")


class CJIndexTreeProcessor:
    def __init__(self, api_config: dict, db_config: dict, executor: PostgreSQLConnectionPool):
        self.api_config = api_config
        self.executor = executor
        self.db_config = db_config
        self._cached_token = None
        self.total_inserted_rows = 0  # 累计成功落表的总行数
        self.session = requests.session()

    def _call_api_with_retry(
            self,
            api_url: str,
            api_params: dict,
            headers: Optional[Dict[str, str]] = None,
            timeout: int = 10,
            retry_times: int = 3,
            retry_delay: float = 0.5,
            backoff_factor: float = 2.0,
            session: Optional[requests.Session] = None
    ) -> Union[requests.Response, None]:
        if session is None:
            self.session = requests.Session()
        for attempt in range(retry_times + 1):
            try:
                logger.info(f"调用API: {api_url} (尝试 {attempt + 1}/{retry_times + 1})")
                response = self.session.post(
                    url=api_url,
                    json=api_params,  # 使用 json 参数自动序列化
                    headers=self.session.headers,
                    timeout=timeout
                )
                return response
            except requests.exceptions.Timeout:
                logger.error(f"请求超时 (尝试 {attempt + 1}/{retry_times + 1})")
            except requests.exceptions.ConnectionError:
                logger.error(f"连接错误 (尝试 {attempt + 1}/{retry_times + 1})")
            except requests.exceptions.RequestException as e:
                logger.error(f"请求异常: {e} (尝试 {attempt + 1}/{retry_times + 1})")
            except Exception as e:
                logger.error(f"未知错误: {e} (尝试 {attempt + 1}/{retry_times + 1})")

            # 如果不是最后一次尝试，等待后重试
            if attempt < retry_times:
                delay = retry_delay * (backoff_factor ** attempt)
                logger.info(f"等待 {delay} 秒后重试...")
                time.sleep(delay)

        logger.error(f"API调用失败，已重试 {retry_times} 次")
        return None

    def _generate_token(self, params: Optional[dict] = None) -> str:
        try:
            message = params.get('username') + ";" + datetime.now().strftime("%Y-%m-%d") + ";" + params.get('password')
            # 示例：基于时间戳和预定义密钥生成token
            public_key_rsa = rsa.PublicKey.load_pkcs1(params.get('public_key'))
            crypto = rsa.encrypt(message.encode("utf-8"), public_key_rsa)
            # todo 这里要转化为 utf-8 序列化 crypto64 = base64.b64encode(crypto)
            crypto64 = base64.b64encode(crypto).decode('utf-8')
            request_params = {'json': crypto64}
            headers = {
                "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.3",
            }
            token = self._call_api_with_retry(api_url=params['auth_url'], api_params=request_params, headers=headers, )
            token = json.loads(token.text)
            token = token["token"]
            logger.info(f"成功生成token: {token}")
            return token
        except Exception as e:
            raise Exception(f"获取 token 失败: {e}")

    def _extract_reader(self, page_no: int, ) -> dict:
        token = self._cached_token if not not self._cached_token else self._generate_token(
            self.api_config['token_param'])
        try:
            headers = {
                'Content-Type': 'application/json',
                'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
            }
            # 刷新token
            self.session.headers.update({'token': token})
            api_params = self.api_config['api_params'].copy()
            api_params['pageNo'] = page_no
            response = self._call_api_with_retry(api_url=api_config['api_url'], api_params=api_params,
                                                 headers=headers, )
            try:
                data = response.json()
            except ValueError:
                raise Exception(
                    f"第 {page_no} 页返回非JSON响应，状态码: {response.status_code}, 文本前200: {response.text[:200]}")
            if 'message' not in data:
                raise KeyError("API响应缺少 message 字段")
            # parsed_data = json.loads(data['message'])
            parsed_data = data['message']
            return parsed_data[:-1] if parsed_data else []
        except Exception as e:
            raise e

    def _fetch_all_stream(self, ):
        current_page = 1
        while True:
            logger.info("=" * 80 + f"正在请求第 {current_page} 页数据" + "=" * 80)
            data = self._extract_reader(current_page)
            if not data:
                break
            yield data
            if len(data) < api_config['api_params']['pageSize']:
                break
            current_page += 1

    def _transform_data(self, extract_reader_result: List[Dict], ) -> Dict:
        try:
            logger.info("开始数据转换处理")
            json_input = extract_reader_result
            table_name = self.db_config['mapping_table']
            # data = json.loads(json_input)
            df = pl.DataFrame(json_input)
            metrics_results_insert_row_df = (
                df.filter(pl.col('INDICATOR_ID').is_not_null())
                .with_columns([
                    pl.col('INDICATOR_ID').cast(pl.Utf8),
                    pl.col('SHOW_NAME').fill_null('').cast(pl.Utf8),
                    pl.col('UNIT').fill_null('').cast(pl.Utf8),
                    pl.col('FREQUENCY').fill_null('不定期'),
                    pl.col('DESCRIPTION').fill_null('').str.replace('无', ''),
                    pl.col('FIRST_CATEGORY').fill_null('').cast(pl.Utf8),
                    pl.col('UPDATE_TIME').fill_null(0),
                    pl.col('REGION').fill_null('').cast(pl.Utf8),
                ])
                .with_columns([
                    (pl.lit('CJ-') + pl.col('INDICATOR_ID')).alias('edb_metrics_code'),
                    pl.lit('行业报告').alias('edb_catagory'),
                    pl.col('INDICATOR_ID').alias('third_code'),
                    pl.lit('CJ').alias('third_type'),
                    pl.col('SHOW_NAME').alias('metrics_name'),
                    pl.col('UNIT').alias('metrics_unit'),
                    pl.col('FREQUENCY').replace_strict(
                        {'day': '日', 'ten-day': '十天', 'quarter': '季', 'half-year': '半年', 'month': '月',
                         'week': '周', 'year': '年'}, default='不定期').alias('metrics_frequence'),
                    pl.col('DESCRIPTION').alias('metrics_remark'),
                    pl.col('FIRST_CATEGORY').alias('metrics_data_source'),
                    pl.lit('').alias('metrics_startdate'),
                    pl.lit('').alias('metrics_enddate'),
                    pl.col('UPDATE_TIME').cast(pl.Int64, strict=False)
                    .map_elements(lambda x: x // 1000 if x is not None else None).cast(pl.Datetime('ms'),strict=False)
                    .dt.strftime('%Y-%m-%d %H:%M:%S').fill_null('').alias('metrics_update'),
                    pl.col('REGION').alias('metrics_nation'),
                    pl.lit('').alias('available_date'),
                    pl.lit(0).cast(pl.Int64).alias('metrics_stopped'),
                ]).with_columns(
                    org_data=pl.struct(pl.all()).map_elements(
                        lambda row: json.dumps({k.lower(): v for k, v in row.items()}, ensure_ascii=False, default=str),
                        return_dtype=pl.Utf8
                    )
                ).select([
                    'edb_metrics_code', 'edb_catagory', 'third_code', 'third_type', 'metrics_name', 'metrics_unit',
                    'metrics_frequence', 'metrics_remark',
                    'metrics_data_source', 'metrics_startdate', 'metrics_enddate', 'metrics_update', 'metrics_nation',
                    'available_date', 'metrics_stopped', 'org_data'
                ])
            )

            source_df = (
                df.with_columns(
                    tree=pl.col("PATHS").str.replace_all(r'\\', '').str.strip_chars('[]"'),
                    parts=pl.col("PATHS").str.replace_all(r'\\', '').str.strip_chars('[]"').str.split("/")
                    .list.eval(pl.element().filter(pl.element() != ""))
                )
                .with_columns(total_level=pl.col("parts").list.len())
                .explode("parts")
                .with_columns(
                    remark=pl.col("parts").str.replace(r"^\d+", "", literal=False),
                    tree_level=pl.col("parts").str.extract(r"^(\d)", 1).cast(pl.Int64) - 1,
                    total_level=pl.col("total_level"),
                    data_type=pl.lit("CJ"),
                    node_type=pl.lit("NODE"),
                    node_name=pl.col("SHOW_NAME"),
                    indicator_id=pl.col("INDICATOR_ID"),
                )
                .with_columns(parent_remark=pl.col("remark").shift(1).over("PATHS"))
                .with_columns(parent_remark=pl.when(pl.col("tree_level") == 0).then(pl.lit("CJ-hyroot")).otherwise(
                    pl.col("parent_remark")))
                .filter(pl.col("tree_level") > 0)
                .select("remark", "tree_level", "tree", "total_level", "data_type", "node_type", "node_name",
                        "indicator_id", "parent_remark")
            )

            max_mapping_data = executor.execute_in_transaction(lambda tx: tx.execute_query_dict(
                f"""
                    SELECT level::VARCHAR AS tree_level,
                    COALESCE(MAX(ctm.mapping), '0') AS max_mapping
                    FROM (VALUES ('1'), ('2'), ('3'), ('4'), ('5')) AS level_list(level)
                    LEFT JOIN {table_name} ctm ON ctm.tree_level::VARCHAR = level_list.level::VARCHAR
                    GROUP BY level 
                """
            ))

            existing_mapping_data = executor.execute_in_transaction(lambda tx: tx.execute_query_dict(
                f"""
                    SELECT
                        remark,
                        tree_level::VARCHAR AS tree_level, 
                        mapping as existing_mapping
                    FROM {table_name}
                """
            ))

            max_mapping_df = (
                pl.DataFrame(max_mapping_data).with_columns(
                    tree_level=pl.col("tree_level").cast(pl.Int64),
                    max_mapping=pl.col("max_mapping").cast(pl.Int64)
                )
            )

            if existing_mapping_data and len(existing_mapping_data) > 0:
                existing_df = (pl.DataFrame(existing_mapping_data).with_columns([
                    pl.col("tree_level").cast(pl.Int64),
                    pl.col("existing_mapping").cast(pl.Utf8)
                ])
                )
            else:
                existing_df = pl.DataFrame({
                    "remark": pl.Series([], dtype=pl.Utf8),
                    "tree_level": pl.Series([], dtype=pl.Int64),
                    "existing_mapping": pl.Series([], dtype=pl.Utf8)
                })
            # 关联表获取存在的 mapping 或者 同层级最大 mapping
            unique_remark_level = source_df.select("remark", "tree_level", "tree", "total_level", "indicator_id",
                                                   "parent_remark", )
            unique_remark_level = unique_remark_level.join(
                existing_df.select("remark", "tree_level", "existing_mapping"), on=["remark", "tree_level"], how="left")
            unique_remark_level = unique_remark_level.join(
                max_mapping_df.select("tree_level", "max_mapping"),
                on="tree_level",
                how="left"
            )

            # 先创建一个去重的 dataframe 来计算顺序
            unique_order_df = (
                unique_remark_level
                .select(["tree_level", "remark"])
                .unique(maintain_order=True)
                .with_columns(
                    unique_order=pl.col("remark").cum_count().over("tree_level")
                )
            )

            # 将顺序 join 回原表
            unique_remark_level = (
                unique_remark_level.join(unique_order_df, on=["tree_level", "remark"], how="left")
                .with_columns(level_max_mapping=pl.col("max_mapping").cast(pl.Utf8).fill_null("0").cast(pl.Int64))
                .with_columns(raw_mapping=pl.col("level_max_mapping") + pl.col("unique_order"))
                .with_columns(mapping=pl.when(pl.col("existing_mapping").is_not_null())
                    .then(pl.col("existing_mapping"))
                    .otherwise(
                        pl.when(pl.col("tree_level") == 1)
                        .then(pl.col("raw_mapping").cast(pl.Utf8).str.zfill(2))
                        .when(pl.col("tree_level") == 2)
                        .then(pl.col("raw_mapping").cast(pl.Utf8).str.zfill(5))
                        .when(pl.col("tree_level") == 3)
                        .then(pl.col("raw_mapping").cast(pl.Utf8).str.zfill(6))
                        .when(pl.col("tree_level") == 4)
                        .then(pl.col("raw_mapping").cast(pl.Utf8).str.zfill(7))
                        .when(pl.col("tree_level") == 5)
                        .then(pl.col("raw_mapping").cast(pl.Utf8).str.zfill(8))
                        .otherwise(pl.col("raw_mapping").cast(pl.Utf8).str.zfill(2))
                    )
                ).drop("unique_order", "level_max_mapping", "raw_mapping")
            )

            unique_remark_level = (
                unique_remark_level.join(
                    unique_remark_level.select([
                        pl.col("remark").alias("parent_remark"),
                        pl.col("mapping").alias("parent_mapping")
                    ]).unique(), on="parent_remark", how="left"
                ).with_columns(
                    parent_id=pl.when(pl.col("tree_level") == 1)
                    .then(pl.lit("CJ-hyroot"))
                    .when(pl.col("parent_mapping").is_null())
                    .then(pl.lit("CJ-hyroot"))
                    .otherwise(pl.col("parent_mapping"))
                )
            )

            new_cj_tree_mapping = (unique_remark_level
                                   .with_columns(pl.col("tree_level").cast(pl.Utf8))
                                   .unique(subset=["remark", "tree_level"], keep="first", maintain_order=True)
                                   .select(["mapping", "remark", "tree_level", "tree"]).to_dicts())
            meta_cols = {"one_id", "data_type", "node_id", "node_name", "node_type", "parent_id", "create_time","modify_time",}
            meta_public_data_index_tree_df = (
                unique_remark_level.with_columns([
                    pl.lit("CJ").alias("data_type"),
                    pl.col("tree").str.replace(r'\d+', '', literal=False).alias("node_name"),
                    pl.lit("NODE").alias("node_type"),
                    (pl.lit("QJIND_") + pl.col("mapping")).alias("node_id"),
                    (pl.lit("CJ-QJIND_") + pl.col("mapping")).alias("one_id"),
                    pl.lit(datetime.now()).alias("create_time"),
                    pl.lit(datetime.now()).alias("modify_time"),
                    (pl.lit("QJIND_") + pl.col("parent_id")).alias("parent_id"),
                ])
            )

            derived_child_nodes = (
                meta_public_data_index_tree_df.filter(
                    pl.col("tree_level").cast(pl.Int64) == (pl.col("total_level").cast(pl.Int64) - 1))
                .with_columns([(
                                       pl.lit("CJ-") + pl.col("indicator_id")).alias("one_id"),
                               pl.col("indicator_id").alias("node_id"),
                               pl.col("node_id").alias("parent_id"),
                               pl.lit("CJ").alias("data_type"),
                               pl.lit("DER_IND").alias("node_type"),
                               pl.col("node_name").alias("node_name"),
                               pl.lit(datetime.now()).alias("create_time"),
                               pl.lit(datetime.now()).alias("modify_time"), ])
                .select(meta_cols))
            combined_data_df = meta_public_data_index_tree_df.select(meta_cols).vstack(derived_child_nodes).unique(subset=["node_id"], keep="first", maintain_order=True)
            # 加上 节点
            meta_public_data_index_tree_row = combined_data_df.to_dicts()
            return {
                'new_cj_tree_mapping': new_cj_tree_mapping,
                'meta_public_data_index_tree_row': meta_public_data_index_tree_row,
                'metrics_results_insert_row': metrics_results_insert_row_df.to_dicts(),
            }
        except Exception as e:
            logger.error(f"transform_data执行失败: {e}")
            raise

    def load_writer(self, upsert_load_data: dict) -> bool:
        if not upsert_load_data:
            logging.warning("没有数据需要导入")
            return True
        try:
            logger.info("开始数据写入处理")
            def _do_load(tx):
                table_name_config = self.db_config
                mapping_rows = upsert_load_data['new_cj_tree_mapping']
                tree_rows = upsert_load_data['meta_public_data_index_tree_row']
                metrics_rows = upsert_load_data['metrics_results_insert_row']
                # 第一步：插入 mapping 表
                tx.execute_values(
                    table_name_config['mapping_table'],
                    sql=f"""
                        INSERT INTO {table_name_config['mapping_table']} (mapping, remark, tree_level, tree)
                        SELECT tmp.mapping, tmp.remark, tmp.tree_level, tmp.tree
                        FROM (VALUES %s) AS tmp(mapping, remark, tree_level, tree)
                        WHERE NOT EXISTS (
                            SELECT 1 FROM {table_name_config['mapping_table']} e 
                            WHERE e.remark = tmp.remark AND e.tree_level = tmp.tree_level
                        )
                    """,
                    argslist=mapping_rows,
                    template="(%(mapping)s, %(remark)s, %(tree_level)s, %(tree)s)",
                    page_size=self.api_config.get('batch_config', {}).get('batch_size', 1000),
                )
                # 第二步：插入 tree 表
                tx.execute_values(
                    table_name_config['tree_table'],
                    f"""
                    INSERT INTO {table_name_config['tree_table']} 
                    (one_id,data_type,node_id,node_name,node_type,parent_id,create_time,modify_time) 
                    VALUES %s ON CONFLICT (one_id) DO 
                    UPDATE SET
                        data_type = EXCLUDED.data_type,
                        node_id = EXCLUDED.node_id,
                        node_name = EXCLUDED.node_name,
                        node_type = EXCLUDED.node_type,
                        parent_id = EXCLUDED.parent_id,
                        modify_time = CURRENT_TIMESTAMP
                    """,
                    argslist=tree_rows,
                    template="(%(one_id)s, %(data_type)s, %(node_id)s, %(node_name)s, %(node_type)s, %(parent_id)s, CURRENT_TIMESTAMP, CURRENT_TIMESTAMP)",
                    page_size=self.api_config.get('batch_config', {}).get('batch_size', 1000)
                )
                # 第三步：插入 metrics 表
                tx.execute_values(
                    table_name_config['metrics_table'],
                    sql=f"""
                        INSERT INTO {table_name_config['metrics_table']} (
                            edb_metrics_code, edb_catagory, third_code, third_type, metrics_name,
                            metrics_unit, metrics_frequence, metrics_remark, metrics_data_source,
                            metrics_startdate, metrics_enddate, metrics_update, metrics_nation,
                            create_time, available_date, metrics_stopped, org_data
                        ) VALUES %s ON CONFLICT (edb_metrics_code) DO UPDATE SET
                            edb_catagory = EXCLUDED.edb_catagory,
                            third_code = EXCLUDED.third_code,
                            third_type = EXCLUDED.third_type,
                            metrics_name = EXCLUDED.metrics_name,
                            metrics_unit = EXCLUDED.metrics_unit,
                            metrics_frequence = EXCLUDED.metrics_frequence,
                            metrics_remark = EXCLUDED.metrics_remark,
                            metrics_data_source = EXCLUDED.metrics_data_source,
                            metrics_startdate = EXCLUDED.metrics_startdate,
                            metrics_enddate = EXCLUDED.metrics_enddate,
                            metrics_update = EXCLUDED.metrics_update,
                            metrics_nation = EXCLUDED.metrics_nation,
                            available_date = EXCLUDED.available_date,
                            metrics_stopped = EXCLUDED.metrics_stopped,
                            org_data = EXCLUDED.org_data,
                            modify_time = CURRENT_TIMESTAMP 
                    """,
                    argslist=metrics_rows,
                    template="(%(edb_metrics_code)s, %(edb_catagory)s, %(third_code)s, %(third_type)s, %(metrics_name)s, %(metrics_unit)s, %(metrics_frequence)s, %(metrics_remark)s, %(metrics_data_source)s, %(metrics_startdate)s, %(metrics_enddate)s, %(metrics_update)s, %(metrics_nation)s, CURRENT_TIMESTAMP, %(available_date)s, %(metrics_stopped)s, %(org_data)s)",
                    page_size=self.api_config.get('batch_config', {}).get('batch_size', 1000),
                )

            # 将所有操作放在同一个事务中执行
            self.executor.execute_in_transaction(_do_load)
            logger.info("数据写入处理完成")
            return True
        except psycopg2.Error as e:
            logger.error(f"数据写入失败(数据库错误): {str(e)}")
            raise
        except Exception as e:
            logger.error(f"数据写入失败(其他错误): {str(e)}")
            raise

    def _run_import(self) -> bool:
        success_count = 0
        total_pages = 0
        failed_pages = 0
        first_page = True  # 标记是否为第一页
        for page_data in self._fetch_all_stream():
            total_pages += 1
            try:
                logger.info(f"第{total_pages}页转换和写入")
                transformed = self._transform_data(page_data)
                import pandas as pd
                pd.DataFrame(transformed['metrics_results_insert_row']).to_csv('data.csv',
                                                                               mode='a',  # 追加模式
                                                                               header=first_page,  # 只有第一页写入表头
                                                                               index=False,
                                                                               encoding='utf-8-sig')

                first_page = False  # 设置为False，后续页不再写表头
                if self.load_writer(transformed):
                    success_count += 1
            except Exception as e:
                failed_pages += 1
                raise Exception(f"第 {total_pages} 页处理失败: {e}")
        if total_pages == 0:
            logger.info("未处理到任何页，可能API无数据或首次请求失败")
        logger.info(f"完成：{total_pages} 页，成功 {success_count} 页，失败 {failed_pages} 页")
        return success_count > 0

if __name__ == "__main__":
    api_config = {
        'batch_config': {
            'batch_size': 1000,  # 每批次处理的记录数
            'max_workers': 4  # 并发处理线程数
        },
        'api_params': {
            'ETL_DATE': (datetime.now() - timedelta(days=100)).strftime("%Y-%m-%d %H:%M:%S"),
            'pageSize': 1000,
            'pageNo': 1
        },
        'token_param': {
            'auth_url': 'http://u.95579.com/dataapi/auth',
            'username': "thfund@cjsc.com.cn",
            'password': "dt7mTNwA",
            'public_key': """
                                  -----BEGIN RSA PUBLIC KEY-----
                                  MIGJAoGBAJDhq0rhTN2YeAICdi5Zpl2sqQTCDXGCrCxY9gx1Jb4+etCZkfy/0PZQ
                                  ZHA7TBmrbert4PUZKbpy0qEYnxvWGGmZ/cJ0YYSgue2aPSfkq/QRS/Y58W+j+TcO
                                  lCv+dZ7Wi0k68Yu7wyH+ZbDo55ySwLpiI/o+IQvzOwRPHlQ0yIkVAgMBAAE=
                                  -----END RSA PUBLIC KEY-----
                                  """
        },
        'api_url': 'http://u.95579.com/dataapi/get_alp_api_data_info',
    }

    # 数据库配置
    db_config = {
        'standalone': {
            'table_config': {
                'mapping_table': 'a_aa_lsy_cj_tree_mapping',
                'tree_table': 'a_aa_lsy_meta_public_data_index_tree',
                'metrics_table': 'a_aa_lsy_test_index_center_third_edb_metrics_main'
            },
            'sql_connect': {
                'dbname': 'postgres',
                'user': 'postgres',
                'password': 'postgres',
                'host': '127.0.0.1',
                'application_name': "third_edb_data",
                'port': '5432'
            }
        },
        'test_env': {
            'table_config': {
                'mapping_table': 'a_aa_lsy_cj_tree_mapping',
                'tree_table': 'a_aa_lsy_meta_public_data_index_tree',
                'metrics_table': 'a_aa_lsy_test_index_center_third_edb_metrics_main'
            },
            'sql_connect': {
                'dbname': 'odpstest',
                'user': 'LTAI5tQo9VtJ414iSEZrE8Vn',
                'password': 'BPBKqDiQ7JKPl6o3QDIFg6kh71nakS',
                'host': 'hgprecn-cn-v641lnkxm003-cn-shanghai.hologres.aliyuncs.com',
                'port': '80'
            },
            'application_name': "third_edb_data",
        }
        ,
        'prod_env': {
            'table_config': {
                'mapping_table': 'cj_tree_mapping',
                'tree_table': 'meta_public_data_index_tree',
                'metrics_table': 'index_center_third_edb_metrics_main'
            },
            # 'sql_connect': {
            #     'dbname': 'dataocean',
            #     'user': getParam(user),
            #     'password': getParam(password),
            #     'host': getParam(host),
            #     'port': getParam(port)
            # },
            # 'application_name': "third_edb_data",
        }
    }
    config_env = 'standalone'
    logger.info(f"配置环境选择: {config_env}")
    selected_config = db_config[config_env]
    logger.info("创建数据库连接池")
    executor = PostgreSQLConnectionPool(min_conn=5, max_conn=20, **selected_config['sql_connect'])
    dataImporter = CJIndexTreeProcessor(api_config, selected_config['table_config'], executor)
    try:
        dataImporter._run_import()
    except Exception as e:
        raise Exception(f"❌ 执行失败: {e}")
    finally:
        executor.close_all()
