#!/usr/bin/python
# -*- coding: utf-8 -*-  
"""
@Project : hello 
@file : async_batch_query_tmp.py
@Author : shenj
@time : 2025/5/14 16:48
@func : 批量异步查询返回行号

todo
直接从excel获取行号
汇总统计成功失败的sql
指定跑哪几条SQl
"""

import asyncio
import logging
from contextlib import asynccontextmanager
from datetime import datetime
from typing import List, Dict, Any, Optional

import oracledb
import pandas as pd
import sqlparse

from com.cn.for_cdc.check_data.optimized_concat import process_data
from com.cn.for_cdc.check_data.query_sql_detail import query_detail
from com.cn.for_cdc.common.log_helper import configure_logging

configure_logging()

# 数据库配置
DB_CONFIG = {
    "user": "CRM_CDC_PP",
    "password": "xDzkcrmIM3cdcV4ppy",
    "dsn": "exa4-scan.uk.aswatson.net:1521/CRMBKVP",
    "min": 2,
    "max": 10,
    "increment": 2
}

# 性能参数
MAX_CONCURRENT = 8
QUERY_TIMEOUT = 1500
RETRY_COUNT = 1
BATCH_SIZE = 500
MAX_DISPLAY_ROWS = 10
MAX_SQL_DISPLAY_LENGTH = 50000  # SQL显示最大长度

list_pass = []
list_fail = []
list_abnormal = []


detail_row_sql={}

def extract_inner_sql(sql):
    def find_subqueries(tokens):
        subqueries = []
        for token in tokens:
            if isinstance(token, sqlparse.sql.Parenthesis):
                inner_sql = token.value.strip()[1:-1].strip()  # 去掉括号
                subqueries.append(inner_sql)
            elif hasattr(token, 'tokens'):
                subqueries.extend(find_subqueries(token.tokens))
        return subqueries

    parsed = sqlparse.parse(sql)
    if not parsed:
        raise ValueError("无法解析 SQL 语句")

    stmt = parsed[0]  # 获取第一个 SQL 语句
    from_token = None

    # 找到 FROM 关键字后面的 token
    for token in stmt.tokens:
        if token.ttype is sqlparse.tokens.Keyword and token.value.upper() == 'FROM':
            from_token = token
            break

    if not from_token:
        raise ValueError("未找到 FROM 子句")

    # 查找 FROM 后面的内容（可能在后面几个 token 中）
    idx = stmt.token_index(from_token)
    from_body = stmt.tokens[idx + 1:]

    subqueries = find_subqueries(from_body)

    if not subqueries:
        raise ValueError("未找到括号内的子查询语句")

    return subqueries


class OracleQueryExecutor:
    """Oracle异步查询执行器（带行号SQL打印）"""

    def __init__(self):
        self.pool = None
        self.active_connections = 0
        self.supports_timeout = False
        self._initialize_driver()
        self.query_counter = 0  # 查询计数器

    def _initialize_driver(self):
        """初始化oracledb驱动"""
        try:
            oracledb.defaults.driver_name = "thin"
            logging.info("使用oracledb Thin模式")
        except Exception as e:
            logging.warning(f"无法使用Thin模式: {e}")
            oracledb.init_oracle_client()
            logging.info("使用oracledb Thick模式")

    def initialize_pool(self):
        """初始化连接池"""
        try:
            self.pool = oracledb.create_pool(**DB_CONFIG)

            with self.pool.acquire() as conn:
                with conn.cursor() as cursor:
                    try:
                        cursor.execute("ALTER SESSION SET STATEMENT_TIMEOUT=1000")
                        self.supports_timeout = True
                        logging.info("数据库支持STATEMENT_TIMEOUT参数")
                    except oracledb.DatabaseError:
                        self.supports_timeout = False
                        logging.warning("数据库不支持STATEMENT_TIMEOUT参数")

                    cursor.execute("""
                        ALTER SESSION SET
                        NLS_DATE_FORMAT='YYYY-MM-DD HH24:MI:SS'
                        NLS_TIMESTAMP_FORMAT='YYYY-MM-DD HH24:MI:SS.FF'
                    """)

            logging.info(f"连接池初始化成功 (min={DB_CONFIG['min']}, max={DB_CONFIG['max']})")
        except Exception as e:
            logging.error(f"连接池初始化失败: {e}")
            raise

    @asynccontextmanager
    async def get_connection(self):
        """获取连接"""
        conn = None
        try:
            conn = await asyncio.get_event_loop().run_in_executor(
                None,
                lambda: self.pool.acquire()
            )
            self.active_connections += 1
            yield conn
        finally:
            if conn:
                await asyncio.get_event_loop().run_in_executor(
                    None,
                    conn.close
                )
                self.active_connections -= 1

    async def execute_sql(
            self,
            sql: str,
            params: Optional[Dict[str, Any]] = None,
            query_id: Optional[int] = None,
            source_line: Optional[int] = None  # 新增：SQL来源行号
    ) -> Dict[str, Any]:
        """执行SQL查询并打印结果"""
        self.query_counter += 1
        current_query_id = self.query_counter

        result = {
            "query_id": current_query_id,
            "source_line": source_line,  # 记录SQL来源行号
            "sql": self._truncate_sql(sql, MAX_SQL_DISPLAY_LENGTH),
            "status": "fail",
            "data": None,
            "columns": None,
            "elapsed": None,
            "error": None,
            "start_time": datetime.now().strftime('%H:%M:%S')
        }
        start_time = datetime.now()

        # 打印开始执行信息（带行号）
        # self._print_query_start(current_query_id, sql, source_line)

        for attempt in range(RETRY_COUNT + 1):
            try:
                async with self.get_connection() as conn:
                    cursor = conn.cursor()
                    try:
                        await self._configure_session(cursor)

                        if self.supports_timeout:
                            result.update(await self._execute_with_native_timeout(cursor, sql, params, start_time))
                        else:
                            result.update(await self._execute_with_python_timeout(cursor, sql, params, start_time))

                        # 打印查询结果
                        self._print_query_result(result)
                        return result
                    finally:
                        cursor.close()

            except asyncio.TimeoutError:
                error_msg = f"Timeout (attempt {attempt + 1}/{RETRY_COUNT})"
                result["error"] = error_msg
                if attempt == RETRY_COUNT:
                    result["status"] = "timeout"
                    logging.warning(f"查询超时: {result['sql']}")

            except oracledb.Error as e:
                list_abnormal.append({current_query_id + 1: 'SQl异常'})
                error_msg = str(e)
                result["error"] = error_msg
                if hasattr(e, 'code') and e.code == 1013:
                    delay = min(1 + attempt * 0.5, 3)
                    await asyncio.sleep(delay)
                    logging.info(f"锁等待，延迟 {delay}秒后重试 (查询ID: {current_query_id})")
                else:
                    logging.error(f"查询报错❌❌❌(查询ID: {current_query_id}),查询SQl:\n {sql}")
                    logging.error(f"错误信息: {error_msg}")

            except Exception as e:
                result["error"] = str(e)
                logging.error(f"未知错误 (查询ID: {current_query_id}): {e}")

        result["elapsed"] = (datetime.now() - start_time).total_seconds()
        return result

    def _print_query_start(self, query_id: int, sql: str, source_line: Optional[int]):
        """打印查询开始信息（带行号）"""
        line_info = f"(来源行号: {source_line})" if source_line else ""
        truncated_sql = self._truncate_sql(sql, 100)

        logging.info(f"\n{'=' * 50}")
        logging.info(f"🚀 开始执行查询 ID: {query_id} {line_info}")
        logging.info(f"🕒 开始时间: {datetime.now().strftime('%H:%M:%S')}")
        logging.info(f"📝 SQL: {truncated_sql}")
        logging.info(f"{'=' * 50}")

    def _print_query_result(self, result: Dict):
        """打印带行号的查询结果"""
        line_info = f"(来源行号: {result['source_line']})" if result.get('source_line') else ""

        # 打印查询头部信息
        logging.info(f"{'-' * 50}开始查询{result['source_line']}{'-' * 50}")
        logging.info(
            f"查询ID: {result['query_id']} {line_info} - 状态: {'✅ 成功' if result['status'] == 'success' else '❌ 失败'}")

        if result["status"] != "success":
            logging.info(f"⛔ 错误: {result['error']}")
            logging.info(f"SQL: \n{result['sql']}")
            logging.info(f"⏱耗时: {result['elapsed']:.2f}秒")
            logging.info(f"{'-' * 50}查询⛔错误{'-' * 50}")
            return

        # 打印查询元信息
        logging.info(f"SQL:\n {result['sql']}")
        logging.info(f"耗时: {result['elapsed']:.2f}秒")
        logging.info(f"开始时间: {result['start_time']}")
        logging.info(f"结束时间: {datetime.now().strftime('%H:%M:%S')}")
        if result["data"]:
            if result["data"][0][0] == 0:
                logging.info("数据校验通过:✅")
                list_pass.append({result['source_line']: 0})
            else:
                logging.info('-' * 50 + '数据校验不通过:❌')
                try:
                    logging.debug(f"{line_info}[inner_sqls]".center(100,'-'))
                    inner_sql = extract_inner_sql(result["sql"])[0]
                    detail_row_sql[result['source_line']]=inner_sql
                    # logging.debug(f"\n{inner_sql}")
                    logging.debug(f"\n{detail_row_sql}")
                    query_detail(detail_row_sql)
                    logging.debug(f"[inner_sqls]".center(100,'-'))
                except ValueError as e:
                    print("❌ 提取inner_sqls错误：", e)

                list_fail.append({result['source_line']: result["data"][0][0]})
            # 准备表格数据
            headers = ["行号"] + result["columns"]  # 添加行号列
            data = result["data"][:MAX_DISPLAY_ROWS]

            # 计算每列最大宽度
            col_widths = [max(len(str(h)), 10) for h in headers]
            for i, row in enumerate(data, 1):
                for j, val in enumerate([i] + list(row)):
                    col_widths[j] = max(col_widths[j], len(str(val)))

            # 打印表头
            header_row = " | ".join(f"{h:<{col_widths[i]}}" for i, h in enumerate(headers))
            logging.info(f"查询结果:")
            logging.info(header_row)
            logging.info("-" * len(header_row))

            # 打印带行号的数据行
            for i, row in enumerate(data, 1):
                row_data = [str(i)] + [str(x) for x in row]
                logging.info(" | ".join(f"{d:<{col_widths[j]}}" for j, d in enumerate(row_data)))

            # 添加行数提示
            total_rows = len(result["data"])
            if total_rows > MAX_DISPLAY_ROWS:
                logging.info(f"\n...共 {total_rows} 行，显示前 {MAX_DISPLAY_ROWS} 行...")

                # 打印最后几行数据（可选）
                if total_rows > MAX_DISPLAY_ROWS * 2:
                    logging.info("\n最后几行数据:")
                    for i, row in enumerate(result["data"][-3:], total_rows - 2):
                        row_data = [str(i)] + [str(x) for x in row]
                        logging.info(" | ".join(f"{d:<{col_widths[j]}}" for j, d in enumerate(row_data)))



        else:
            logging.info("无数据返回")

        sorted_list_pass = sorted(list_pass, key=lambda d: list(d.keys())[0])
        sorted_list_fail = sorted(list_fail, key=lambda d: list(d.keys())[0])
        # sorted_list_abnormal = sorted(list_abnormal, key=lambda d: list(d.keys())[0])

        # 去重（基于字典的键）
        unique_dicts = {}
        for d in list_abnormal:
            key = list(d.keys())[0]
            unique_dicts[key] = d

        # 重新转换为列表并排序
        sorted_unique_list = sorted(unique_dicts.values(), key=lambda d: list(d.keys())[0])
        existing_keys = set()
        for d in list_pass + list_fail:
            existing_keys.update(d.keys())
        max_key = max(k for d in list_pass + list_fail for k in d.keys())
        all_keys = set(range(3, max_key + 1))
        missing_keys = sorted(all_keys - existing_keys)
        logging.info(f"check_list_pass:\n{sorted_list_pass}")
        logging.info(f"check_list_fail:\n{sorted_list_fail}")
        logging.info(f"list_abnormal:\n{sorted_unique_list}")
        logging.info(f"missing_keys:\n{missing_keys}")
        logging.info(f"{'-' * 50} 查询结束 {'-' * 50}")
        df = process_data(list_pass, list_fail, missing_keys)
        logging.debug('\n' + str(df))
        if max_key == 59:
            all_keys = (
                    [list(d.keys())[0] for d in list_pass] + [list(d.keys())[0] for d in list_fail] + missing_keys
            )
            # # 去重并排序
            sorted_keys = sorted(set(all_keys))

            # 构建 DataFrame（合并到一列 "Status"）
            data = []
            for key in sorted_keys:
                try:
                    if any(key in d for d in list_pass):
                        status = "Pass"
                        value = next(d[key] for d in list_pass if key in d)
                    else:
                        status = "Fail"
                        value = next(d[key] for d in list_fail if key in d)
                except StopIteration:
                    status = "Missing"
                    value = ''
                data.append({
                    "Key": key,
                    "Status": status,
                    "Value": value
                })

            df = pd.DataFrame(data)
            # 导出 Excel
            df.to_excel(f"{datetime.now().strftime('%Y%m%d%H%M')}_results_icibe.xlsx", index=False,
                        sheet_name="Results")
            logging.info(f"Excel 文件已生成：{datetime.now().strftime('%Y%m%d%H%M')}_results_icibe.xlsx")

    async def _configure_session(self, cursor):
        """配置会话参数"""
        await asyncio.get_event_loop().run_in_executor(
            None,
            lambda: cursor.execute("""
                ALTER SESSION SET
                OPTIMIZER_MODE=ALL_ROWS
                NLS_DATE_FORMAT='YYYY-MM-DD HH24:MI:SS'
            """)
        )

    async def _execute_with_native_timeout(self, cursor, sql, params, start_time):
        """使用数据库原生超时执行"""
        await asyncio.get_event_loop().run_in_executor(
            None,
            lambda: cursor.execute(f"ALTER SESSION SET STATEMENT_TIMEOUT={QUERY_TIMEOUT * 1000}")
        )

        await asyncio.get_event_loop().run_in_executor(
            None,
            lambda: cursor.execute(sql, params or {})
        )

        return await self._fetch_results(cursor, start_time)

    async def _execute_with_python_timeout(self, cursor, sql, params, start_time):
        """使用Python层超时执行"""
        try:
            await asyncio.wait_for(
                asyncio.get_event_loop().run_in_executor(
                    None,
                    lambda: cursor.execute(sql, params or {})
                ),
                timeout=QUERY_TIMEOUT
            )
            return await self._fetch_results(cursor, start_time)
        except asyncio.TimeoutError:
            await asyncio.get_event_loop().run_in_executor(
                None,
                lambda: cursor.connection.cancel()
            )
            # raise
            logging.info("查询超时，已取消查询")
            return {
                "status": "timeout",
                "data": [],
                "columns": [],
                "elapsed": (datetime.now() - start_time).total_seconds()
            }

    async def _fetch_results(self, cursor, start_time):
        """获取查询结果"""
        columns = [col[0] for col in cursor.description]
        rows = []
        while True:
            batch = await asyncio.get_event_loop().run_in_executor(
                None,
                lambda: cursor.fetchmany(BATCH_SIZE)
            )
            if not batch:
                break
            rows.append(batch)

        return {
            "status": "success",
            "data": [item for sublist in rows for item in sublist],  # 扁平化列表
            "columns": columns,
            "elapsed": (datetime.now() - start_time).total_seconds()
        }

    async def execute_queries(
            self,
            sql_list: List[str],
            params_list: Optional[List[Dict[str, Any]]] = None,
            source_lines: Optional[List[int]] = None  # 新增：SQL来源行号列表
    ) -> List[Dict]:
        """并发执行多个查询（带行号信息）"""
        if not self.pool:
            self.initialize_pool()

        semaphore = asyncio.Semaphore(MAX_CONCURRENT)
        params_list = params_list or [None] * len(sql_list)
        source_lines = source_lines or [None] * len(sql_list)  # 默认无行号

        async def worker(sql: str, params: Dict[str, Any], idx: int, line_no: int):
            async with semaphore:
                return await self.execute_sql(sql, params, idx, line_no)

        tasks = [asyncio.create_task(worker(sql, params, idx, line_no))
                 for idx, (sql, params, line_no) in enumerate(zip(sql_list, params_list, source_lines))]

        results = [None] * len(sql_list)
        for task in asyncio.as_completed(tasks):
            result = await task
            results[result["query_id"] - 1] = result  # query_id从1开始
        return results

    @staticmethod
    def _truncate_sql(sql: str, max_len: int = 10000) -> str:
        """截断长SQL用于显示"""
        return (sql[:max_len] + "...") if len(sql) > max_len else sql

    async def safe_close(self):
        """安全关闭连接池"""
        if not self.pool:
            return

        max_wait = 30
        start_time = datetime.now()

        while self.active_connections > 0:
            elapsed = (datetime.now() - start_time).total_seconds()
            if elapsed > max_wait:
                logging.warning(f"强制关闭连接池，仍有{self.active_connections}个活跃连接")
                break
            await asyncio.sleep(1)

        self.pool.close()
        logging.info("连接池已关闭")


def deal_sql(sql):
    """处理SQL"""
    sql = str(sql).strip().replace(';', '') if sql is not None else ''
    try:
        safe_sql = sql.replace('\xa0', ' ')  # 替换不换行空格
    except UnicodeError:
        safe_sql = sql.encode('utf-8', errors='replace').decode('utf-8')

    safe_sql = (safe_sql
                .replace('${BU_ID}', "'1-7V7X'")
                .replace('${BU_NAME}', "'ICIBE'"))

    return safe_sql


def excel_to_dict_alt(excel_path, sheet_name, row_num) -> dict:
    """直接读取首列生成字典"""
    df = pd.read_excel(excel_path, sheet_name=sheet_name, header=None).fillna('')  # 无标题模式
    index_sql_dict = {idx + 1: deal_sql(val) for idx, val in enumerate(df.iloc[:, 0].astype(str))} # 只读第一列

    target_set = set(row_num) if row_num else set()
    filtered_dict = {k: v for k, v in index_sql_dict.items() if not target_set or k in target_set}
    filtered_dict = {k: v for k, v in filtered_dict.items() if k > 2}
    return filtered_dict


async def main(excel_path, sheet_name, row_num):
    """主函数"""
    logging.info("开始批量执行Oracle查询")
    executor = OracleQueryExecutor()
    try:
        # 获取SQL列表和行号信息
        index_sql_dict = excel_to_dict_alt(excel_path, sheet_name, row_num)
        sql_list = [sql for _, sql in index_sql_dict.items()]
        line_numbers = [line_no for line_no, _ in index_sql_dict.items()]

        start_time = datetime.now()
        results = await executor.execute_queries(sql_list, source_lines=line_numbers)
        elapsed = (datetime.now() - start_time).total_seconds()
        # 打印性能报告（带行号统计）
        success = sum(1 for r in results if r["status"] == "success")
        avg_time = sum(r["elapsed"] for r in results) / len(results)
        logging.info("[执行报告]".center(100, '-'))
        logging.info(f"成功查询: {success}/{len(sql_list)}")
        logging.info(f"总耗时: {elapsed:.2f}s | 平均耗时: {avg_time:.2f}s")
        if len(sql_list) > 0:
            slowest = max(results, key=lambda x: x['elapsed'])
            logging.info(
                f"最慢查询: {slowest['elapsed']:.2f}s (ID: {slowest['query_id']}, 行号: {slowest['source_line']})")
            logging.info(f"最慢SQL:\n {slowest['sql']}")
        res = {i['source_line']: i['data'][0][0] for i in results}
        sorted_dict = dict(sorted(res.items()))
        logging.info(f"{sorted_dict}")
        logging.info("[执行报告]".center(100, '-'))
    except Exception as e:
        logging.error(f"执行出错: {e}")
    finally:
        await executor.safe_close()


async def async_batch_run():
    logging.info("开始ICIBE的数据校验".center(100, '-'))
    excel_path = r'F:\workspace\pythonProject\hello\com\cn\for_cdc\input_data\20250606.xlsx'
    sheet_name = 'Sheet1'
    # row_num = [42,46,48,49,50,51,57]
    row_num = []
    await main(excel_path, sheet_name, row_num)
    logging.info("程序执行完毕".center(100, '-'))

    # other test
    # index_sql_dict = excel_to_dict_alt()
    # print(index_sql_dict)


if __name__ == "__main__":
    asyncio.run(async_batch_run())
