# 修复后的 insert_daily_result_util.py
import pandas as pd
from decimal import Decimal, InvalidOperation
import logging
from typing import Dict, List, Tuple, Optional, Any

from config.config import BASE_DIR
from util.db_util import DBUtil
from util.initial_log import initial_log_config


class DataConverter:
    """数据转换工具类"""

    @staticmethod
    def to_decimal(value: Any) -> Optional[Decimal]:
        """安全转换为Decimal类型"""
        if pd.isnull(value) or value == '' or str(value).strip().upper() == 'NULL':
            return None
        try:
            return Decimal(str(value))
        except (InvalidOperation, ValueError):
            return None

    @staticmethod
    def to_string(value: Any) -> Optional[str]:
        """安全转换为字符串类型"""
        if pd.isnull(value) or str(value).strip().upper() == 'NULL' or str(value).strip() == '':
            return None
        return str(value)

    @staticmethod
    def to_integer(value: Any) -> Optional[int]:
        """安全转换为整数类型"""
        if pd.isnull(value) or value == '' or str(value).strip().upper() == 'NULL':
            return None
        try:
            return int(value)
        except Exception:
            return None


class DatabaseManager:
    """数据库操作管理类"""

    @staticmethod
    def insert_audit_record(audit_data: Dict[str, Any]) -> int:
        """插入审计记录并获取新的PROCESS_ID"""
        # 将SET NOCOUNT ON和INSERT语句分开执行，避免影响SCOPE_IDENTITY()
        insert_sql = """
        INSERT INTO ppa.TB_PPA_PROCESS_AUDIT (
            FILE_ID, START_DATE, END_DATE, PROCESS_TYPE, PROCESS, ACTIVE, STATUS,
            ERROR_DETAILS, START_DATETIME, END_DATETIME, SOURCE_PROCESS_ID, OUTPUT_FILE_ID,
            CREATED_BY, CREATED_DATE, MODIFIED_BY, MODIFIED_DATE, WF_ID
        ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?);
        """

        params = (
            DataConverter.to_decimal(audit_data.get('FILE_ID')),
            audit_data.get('START_DATE'),
            audit_data.get('END_DATE'),
            audit_data.get('PROCESS_TYPE'),
            audit_data.get('PROCESS'),
            audit_data.get('ACTIVE'),
            audit_data.get('STATUS'),
            DataConverter.to_string(audit_data.get('ERROR_DETAILS')),
            audit_data.get('START_DATETIME'),
            audit_data.get('END_DATETIME'),
            DataConverter.to_integer(audit_data.get('SOURCE_PROCESS_ID')),
            DataConverter.to_integer(audit_data.get('OUTPUT_FILE_ID')),
            audit_data.get('CREATED_BY'),
            audit_data.get('CREATED_DATE'),
            audit_data.get('MODIFIED_BY'),
            audit_data.get('MODIFIED_DATE'),
            DataConverter.to_decimal(audit_data.get('WF_ID'))
        )

        # 打印SQL和参数
        logging.info(f"Executing audit insert SQL: {insert_sql}")
        logging.info(f"Audit insert params: {params}")

        # 使用连接和游标来获取受影响行数
        conn = DBUtil.get_conn()
        cursor = conn.cursor()

        try:
            # 执行插入操作
            cursor.execute(insert_sql, params)
            # 获取受影响的行数
            affected_rows = cursor.rowcount
            logging.info(f"Audit insert affected {affected_rows} rows")

            # 单独获取新插入记录的PROCESS_ID
            cursor.execute("SELECT SCOPE_IDENTITY();")
            result = cursor.fetchone()

            logging.info(f"SCOPE_IDENTITY() result: {result}")

            if result is None or result[0] is None:
                # 尝试另一种方式获取ID
                cursor.execute("SELECT @@IDENTITY;")
                result = cursor.fetchone()
                logging.info(f"@@IDENTITY result: {result}")

                if result is None or result[0] is None:
                    raise Exception("Failed to get PROCESS_ID from both SCOPE_IDENTITY() and @@IDENTITY")

            process_id = int(result[0])
            logging.info(f"Inserted audit record with PROCESS_ID: {process_id}")

            # 提交事务
            conn.commit()
            return process_id

        except Exception as e:
            # 回滚事务
            conn.rollback()
            logging.error(f"Error inserting audit record: {e}")
            raise
        finally:
            cursor.close()
            conn.close()

    @staticmethod
    def batch_insert(table: str, columns: List[str], rows: List[Tuple]) -> None:
        """批量插入数据行"""
        if not rows:
            logging.info(f"No data to insert into {table}")
            return

        placeholders = ','.join(['?'] * len(columns))
        sql = f"INSERT INTO {table} ({','.join(columns)}) VALUES ({placeholders})"

        logging.info(f"Starting to insert {len(rows)} rows into {table}")
        # 打印SQL语句
        logging.info(f"Batch insert SQL: {sql}")

        # 使用连接和游标进行显式事务管理
        conn = DBUtil.get_conn()
        cursor = conn.cursor()

        try:
            total_affected_rows = 0
            for idx, row in enumerate(rows, 1):
                # 打印前几行的参数以便调试
                if idx <= 5:
                    logging.info(f"Inserting row {idx} params: {row}")
                elif idx == 6:
                    logging.info("... (showing only first 5 rows)")

                cursor.execute(sql, row)
                affected_rows = cursor.rowcount
                total_affected_rows += affected_rows
                logging.info(f"Row {idx} affected {affected_rows} rows")

            # 提交事务
            conn.commit()
            logging.info(
                f"Successfully inserted {len(rows)} rows into {table}, total affected rows: {total_affected_rows}")

        except Exception as e:
            # 回滚事务
            conn.rollback()
            logging.error(f"Error inserting data into {table}: {e}")
            for col_name, col_value in zip(columns, row):
                logging.error(f"  {col_name}: {col_value} ({type(col_value)})")
            raise
        finally:
            cursor.close()
            conn.close()


class DataProcessor:
    """数据处理类"""

    # 列定义 - 根据实际CSV文件结构调整
    AUDIT_COLUMNS = [
        'PROCESS_ID', 'FILE_ID', 'START_DATE', 'END_DATE', 'PROCESS_TYPE', 'PROCESS',
        'ACTIVE', 'STATUS', 'ERROR_DETAILS', 'START_DATETIME', 'END_DATETIME',
        'SOURCE_PROCESS_ID', 'OUTPUT_FILE_ID', 'CREATED_BY', 'CREATED_DATE',
        'MODIFIED_BY', 'MODIFIED_DATE', 'WF_ID'
    ]

    RESULT_COLUMNS = [
        'PROCESS_ID', 'DATA_ID', 'TYPE', 'START_DATE', 'END_DATE',
        'TWR', 'OPEN_NAV', 'CLOSE_NAV', 'OPEN_ADJUSTMENT', 'CLOSE_ADJUSTMENT',
        'STATUS', 'ERROR_DETAILS'
    ]

    DECIMAL_COLUMNS = ['TWR', 'OPEN_NAV', 'CLOSE_NAV', 'OPEN_ADJUSTMENT', 'CLOSE_ADJUSTMENT']
    INTEGER_COLUMNS = ['PROCESS_ID']
    STRING_COLUMNS = ['DATA_ID', 'TYPE', 'START_DATE', 'END_DATE', 'STATUS', 'ERROR_DETAILS']

    def __init__(self):
        self.process_id_map: Dict[Tuple[str, str, str], int] = {}
        self.converter = DataConverter()
        self.db_manager = DatabaseManager()

    def load_csv_data(self, file_path: str) -> pd.DataFrame:
        """加载CSV数据"""
        logging.info(f"Loading data from {file_path}")
        return pd.read_csv(file_path, sep="|", dtype=str, keep_default_na=False)

    def process_audit_data(self, audit_csv: str) -> None:
        """处理审计数据并建立PROCESS_ID映射"""
        df_audit = self.load_csv_data(audit_csv)

        logging.info("Processing audit data and building PROCESS_ID mapping")
        logging.info(f"Audit data shape: {df_audit.shape}")

        for idx, (_, row) in enumerate(df_audit.iterrows()):
            # 处理数据，将'NULL'字符串转换为None
            processed_row = {}
            for k in self.AUDIT_COLUMNS:
                if k in row:
                    value = row[k]
                    # 将'NULL'字符串转换为None
                    if pd.isna(value) or str(value).strip().upper() == 'NULL' or str(value).strip() == '':
                        processed_row[k] = None
                    else:
                        processed_row[k] = value
                else:
                    processed_row[k] = None

            logging.info(
                f"Processing audit row {idx + 1}: PROCESS={row.get('PROCESS')}, START_DATE={row.get('START_DATE')}, END_DATE={row.get('END_DATE')}")
            new_process_id = self.db_manager.insert_audit_record(processed_row)
            logging.info(f"Created new PROCESS_ID {new_process_id} for audit row {idx + 1}")

            # 使用 (PROCESS, START_DATE, END_DATE) 作为唯一键
            key = (row['PROCESS'], row['START_DATE'], row['END_DATE'])
            self.process_id_map[key] = new_process_id

        logging.info(f"Successfully processed {len(df_audit)} audit records")
        logging.info(f"Process ID mapping contains {len(self.process_id_map)} entries")

    def update_process_ids(self, df: pd.DataFrame, process_type: str) -> pd.DataFrame:
        """更新DataFrame中的PROCESS_ID"""

        def get_new_process_id(row):
            key = (process_type, row['START_DATE'], row['END_DATE'])
            return self.process_id_map.get(key)

        df['PROCESS_ID'] = df.apply(get_new_process_id, axis=1)
        missing_process_id_count = df['PROCESS_ID'].isna().sum()
        logging.info(
            f"Updated PROCESS_ID for {len(df)} {process_type} records, {missing_process_id_count} records missing PROCESS_ID")
        return df

    def convert_row_data(self, row: pd.Series) -> Tuple:
        """转换行数据为数据库格式"""
        record = []
        for col in self.RESULT_COLUMNS:
            if col in self.DECIMAL_COLUMNS:
                record.append(self.converter.to_decimal(row.get(col)))
            elif col in self.INTEGER_COLUMNS:
                record.append(self.converter.to_integer(row.get(col)))
            elif col in self.STRING_COLUMNS:
                record.append(self.converter.to_string(row.get(col)))
            else:
                record.append(self.converter.to_string(row.get(col)))
        return tuple(record)

    def process_result_data(self, df: pd.DataFrame) -> List[Tuple]:
        """处理结果数据"""
        rows = []
        none_process_id_count = 0
        for _, row in df.iterrows():
            if row['PROCESS_ID'] is not None:
                rows.append(self.convert_row_data(row))
            else:
                none_process_id_count += 1

        logging.info(
            f"Processed result data: {len(rows)} rows with valid PROCESS_ID, {none_process_id_count} rows with None PROCESS_ID")
        return rows


def insert_daily_calculation_result(audit_csv: str, twr_csv: str, agg_csv: str) -> None:
    """
    插入每日计算结果数据

    Args:
        audit_csv: 审计数据CSV文件路径
        twr_csv: TWR数据CSV文件路径
        agg_csv: 汇总数据CSV文件路径
    """
    logging.info("Starting daily calculation result import process")

    try:
        processor = DataProcessor()

        # 1. 处理审计数据
        processor.process_audit_data(audit_csv)

        # 2. 加载TWR和汇总数据
        df_twr = processor.load_csv_data(twr_csv)
        logging.info(f"Loaded TWR data with {len(df_twr)} rows")

        df_agg = processor.load_csv_data(agg_csv)
        logging.info(f"Loaded AGG data with {len(df_agg)} rows")

        # 3. 更新PROCESS_ID
        df_twr = processor.update_process_ids(df_twr, 'Daily_TWR')
        df_agg = processor.update_process_ids(df_agg, 'Daily_TWR_Aggregate')

        # 4. 转换数据格式
        twr_rows = processor.process_result_data(df_twr)
        agg_rows = processor.process_result_data(df_agg)

        # 5. 插入数据库
        processor.db_manager.batch_insert("ppa.TB_PPA_RESULT_DAILY_TWR",
                                          processor.RESULT_COLUMNS, twr_rows)
        processor.db_manager.batch_insert("ppa.TB_PPA_RESULT_DAILY_TWR_AGGREGATE",
                                          processor.RESULT_COLUMNS, agg_rows)

        logging.info("Daily calculation result import completed successfully")

    except Exception as e:
        logging.error(f"Error during daily calculation result import: {e}")
        raise


if __name__ == '__main__':
    initial_log_config()
    audit_csv_path = f"{BASE_DIR}/data/calculation/happy_case/low_case/daily_calculation_result(202412)/audit.csv"
    twr_csv_path = f"{BASE_DIR}/data/calculation/happy_case/low_case/daily_calculation_result(202412)/daily_twr(202412).csv"
    agg_csv_path = f"{BASE_DIR}/data/calculation/happy_case/low_case/daily_calculation_result(202412)/daily_twr_aggregate(202412).csv"
    insert_daily_calculation_result(audit_csv_path, twr_csv_path, agg_csv_path)
