import configparser  # 添加配置文件解析模块
import gc
import logging
import os
import re
import sys
import time
import traceback
from pathlib import Path  # 添加路径处理模块

import dmPython
import geopandas as gpd
import numpy as np
import pandas as pd
from shapely import wkt


# ========================
# 配置管理模块
# ========================
class ConfigManager:
    """配置文件管理器，处理配置文件的读取和验证"""

    def __init__(self, config_path='config.ini'):
        self.config_path = config_path
        self.config = configparser.ConfigParser()
        self.logger = logging.getLogger('ConfigManager')

    def load_config(self):
        """加载并验证配置文件"""
        self.logger.info(f"加载配置文件: {self.config_path}")

        # 检查配置文件是否存在
        if not Path(self.config_path).exists():
            self.logger.error(f"配置文件不存在: {self.config_path}")
            raise FileNotFoundError(f"配置文件 {self.config_path} 不存在")

        try:
            # 读取配置文件
            self.config.read(self.config_path, encoding='utf-8')

            # 验证必要配置项
            required_sections = ['PATHS', 'DATABASE', 'SETTINGS']
            for section in required_sections:
                if not self.config.has_section(section):
                    self.logger.error(f"配置文件缺少必要部分: [{section}]")
                    raise ValueError(f"配置文件缺少部分: [{section}]")

            # 验证路径配置
            required_paths = ['shapefile_path', 'excel_path']
            for path in required_paths:
                if not self.config.get('PATHS', path, fallback=None):
                    self.logger.error(f"缺少必要路径配置: {path}")
                    raise ValueError(f"缺少路径配置: {path}")

            self.logger.info("配置文件验证通过")
            return True
        except Exception as e:
            self.logger.error(f"配置文件加载失败: {traceback.format_exc()}")
            raise

    def get_path(self, key):
        """获取路径配置项"""
        return self.config.get('PATHS', key)

    def get_db_config(self):
        """获取数据库配置字典"""
        return {
            'user': self.config.get('DATABASE', 'user'),
            'password': self.config.get('DATABASE', 'password'),
            'host': self.config.get('DATABASE', 'host'),
            'port': self.config.getint('DATABASE', 'port'),
            'schema': self.config.get('DATABASE', 'schema', fallback=None)
        }

    def get_setting(self, key, default=None):
        """获取设置项"""
        return self.config.get('SETTINGS', key, fallback=default)


# ========================
# 增强日志配置（支持SQL语句记录）
# ========================
def setup_logger():
    """配置支持SQL日志的系统"""
    logger = logging.getLogger('ShapefileLoader')
    logger.setLevel(logging.DEBUG)

    # 清除现有处理器
    for handler in logger.handlers[:]:
        logger.removeHandler(handler)

    # 创建格式器（包含SQL操作标识）
    formatter = logging.Formatter(
        '%(asctime)s | %(levelname)-8s | %(module)s:%(lineno)d | %(message)s'
    )

    # 控制台处理器
    console_handler = logging.StreamHandler(sys.stdout)
    console_handler.setLevel(logging.INFO)
    console_handler.setFormatter(formatter)

    logger.addHandler(console_handler)
    return logger


# 初始化日志
logger = setup_logger()


# ========================
# 文件操作函数（新增Excel文件删除功能）
# ========================
def safe_delete_excel(excel_path: str):
    """安全删除Excel文件（如果存在）"""
    try:
        # 检查文件是否存在
        if os.path.exists(excel_path):
            logger.info(f"检测到已存在的Excel文件: {excel_path}")
            logger.info(f"开始删除旧Excel文件...")

            # 记录文件大小
            file_size = os.path.getsize(excel_path) / (1024 * 1024)  # MB

            # 执行删除操作
            os.remove(excel_path)
            logger.info(f"旧Excel文件已删除 | 大小: {file_size:.2f}MB")

            # 确认文件是否成功删除
            if not os.path.exists(excel_path):
                logger.info("文件删除确认: 成功")
                return True
            else:
                logger.error("文件删除失败: 文件仍存在")
                return False
        else:
            logger.info("无旧Excel文件需要删除")
            return True
    except Exception as e:
        logger.error(f"Excel文件删除失败: {traceback.format_exc()}")
        return False


# ========================
# 核心函数优化（解决编码问题和ROWID冲突）
# ========================
def safe_read_shapefile(path: str) -> gpd.GeoDataFrame:
    """安全读取Shapefile并自动处理编码问题"""
    try:
        logger.info(f"开始读取Shapefile: {path}")

        # 常见中文编码优先级列表
        encodings = ['utf-8', 'gbk', 'gb18030', 'big5', 'latin1', 'iso-8859-1']

        for encoding in encodings:
            try:
                logger.debug(f"尝试使用编码: {encoding}")
                gdf = gpd.read_file(path, encoding=encoding)

                # 检测几何列
                if 'geometry' not in gdf.columns:
                    geo_candidates = ['geom', 'geometries', 'shape', 'wkt', 'geography']
                    found_geom = next((col for col in gdf.columns if col.lower() in geo_candidates), None)

                    if found_geom:
                        logger.warning(f"检测到备用几何列: {found_geom}, 自动设置为活动几何列")
                        gdf = gdf.set_geometry(found_geom)
                    else:
                        geom_cols = [col for col in gdf.columns if str(gdf[col].dtype) == 'geometry']
                        if geom_cols:
                            logger.warning(f"通过数据类型识别几何列: {geom_cols[0]}")
                            gdf = gdf.set_geometry(geom_cols[0])
                        else:
                            raise KeyError("无法识别任何几何列")

                logger.info(f"成功使用 {encoding} 编码读取 {len(gdf)} 条记录")
                return gdf
            except UnicodeDecodeError:
                logger.warning(f"编码 {encoding} 失败，尝试下一种编码")
                continue
            except Exception as e:
                logger.error(f"读取时发生意外错误: {traceback.format_exc()}")
                raise

        # 所有编码尝试失败
        raise UnicodeDecodeError("无法使用任何已知编码解码文件")

    except Exception as e:
        logger.error(f"Shapefile读取失败: {traceback.format_exc()}")
        raise


def optimize_dataframe(gdf: gpd.GeoDataFrame) -> pd.DataFrame:
    """优化DataFrame并解决ROWID冲突"""
    try:
        logger.info("开始优化数据框结构...")
        start_time = time.time()

        active_geom_col = gdf.geometry.name
        original_columns = gdf.columns.tolist()

        # 安全处理几何列（保留原始几何列）
        gdf['GEOMETRY_WKT'] = gdf.geometry.apply(
            lambda geom: wkt.dumps(geom) if geom else None
        )

        # 字段名规范化
        new_columns = []
        # 扩展达梦保留字列表（包含ROWID）
        reserved_keywords = {'ADMIN', 'USER', 'TABLE', 'SELECT', 'INDEX',
                             'INSERT', 'UPDATE', 'DELETE', 'GROUP', 'ORDER',
                             'ROWID', 'ROW', 'ID'}

        for col in gdf.columns:
            # 跳过原始几何列
            if col == active_geom_col:
                new_columns.append(col)
                continue

            # 转换前清理特殊字符
            clean_col = re.sub(r'[^\w\s]', '', col)
            new_col = clean_col.upper()[:30]
            new_col = re.sub(r'[^A-Z0-9_]', '_', new_col)

            # 关键优化：避免使用ROWID
            if new_col == 'ROWID':
                new_col = 'CUSTOM_ID'  # 重命名冲突列
                logger.warning(f"检测到冲突列名ROWID，已重命名为: {new_col}")

            # 处理保留字
            if new_col in reserved_keywords:
                logger.warning(f"检测到保留字: {new_col}, 重命名为 {new_col}_R")
                new_col = f"{new_col}_R"

            new_columns.append(new_col)

        gdf.columns = new_columns

        # 处理特殊字符：替代无法编码的字符
        for col in gdf.select_dtypes(include='object').columns:
            # 跳过几何列
            if col == active_geom_col:
                continue

            gdf[col] = gdf[col].apply(
                lambda x: x.encode('utf-8', 'replace').decode('utf-8')
                if isinstance(x, str) else x
            )

        df = gdf.replace({np.nan: None, '': None})

        elapsed = time.time() - start_time
        logger.info(f"数据优化完成 | 字段数: {len(df.columns)} | 耗时: {elapsed:.2f}s")
        logger.debug(f"优化后字段列表: {list(df.columns)}")
        return df
    except Exception as e:
        logger.error(f"数据优化失败: {traceback.format_exc()}")
        raise


def safe_export_to_excel(df: pd.DataFrame, excel_path: str):
    """安全导出Excel文件"""
    try:
        logger.info(f"开始导出Excel: {excel_path}")
        start_time = time.time()

        # 使用ExcelWriter上下文管理器
        with pd.ExcelWriter(excel_path, engine='openpyxl') as writer:
            df.to_excel(writer, index=False, sheet_name='SpatialData')

        # 记录文件信息
        elapsed = time.time() - start_time
        file_size = os.path.getsize(excel_path) / (1024 * 1024)  # MB
        logger.info(f"Excel导出成功 | 大小: {file_size:.2f}MB | 耗时: {elapsed:.2f}s")
        return True
    except Exception as e:
        logger.error(f"Excel导出失败: {traceback.format_exc()}")
        raise


# ========================
# 数据库操作函数（增强SQL日志和编码处理）
# ========================
def connect_dameng(db_config: dict):
    """连接达梦数据库（解决编码问题）"""
    logger.info(f"尝试连接达梦数据库: {db_config['host']}:{db_config['port']}")
    start_time = time.time()

    for attempt in range(1, 4):
        try:
            # 关键优化：使用local_code=1替代encoding参数
            conn = dmPython.connect(
                user=db_config['user'],
                password=db_config['password'],
                server=db_config['host'],
                port=db_config['port'],
                schema=db_config['schema'],
                autoCommit=False,
                local_code=1  # 指定客户端编码为UTF-8
            )
            elapsed = time.time() - start_time
            logger.info(f"数据库连接成功 | 编码: UTF-8 | 耗时: {elapsed:.2f}s")
            return conn
        except dmPython.Error as e:
            logger.warning(f"连接失败(尝试 {attempt}/3): {str(e)}")
            if attempt == 3:
                logger.error("数据库连接失败，达到最大重试次数")
                raise ConnectionError("达梦数据库连接失败") from e
            time.sleep(2 ** attempt)  # 指数退避


def generate_dm_table_sql(df: pd.DataFrame, table_name: str) -> tuple:
    """生成达梦建表语句（解决ROWID冲突）"""
    logger.info("开始生成建表SQL...")
    start_time = time.time()

    # 达梦数据类型映射
    type_mapping = {
        'object': 'VARCHAR(1000)',
        'int64': 'BIGINT',
        'float64': 'DOUBLE',
        'datetime64[ns]': 'TIMESTAMP',
        'bool': 'BOOLEAN'
    }

    # 表名规范化
    safe_table_name = re.sub(r'[^A-Z0-9_]', '_', table_name.upper())[:30]
    logger.debug(f"安全表名: {safe_table_name}")

    # 关键优化：使用CUSTOM_ID替代ROWID
    sql = f"""CREATE TABLE {safe_table_name} (
    CUSTOM_ID BIGINT IDENTITY(1,1) PRIMARY KEY,"""

    for col in df.columns:
        # 跳过原始几何列
        if col == 'geometry':
            continue

        dtype = str(df[col].dtype)
        # WKT列特殊处理
        sql_type = 'CLOB' if 'GEOMETRY_WKT' in col else type_mapping.get(dtype, 'VARCHAR(4000)')
        sql += f"\n    {col} {sql_type},"

    sql = sql.rstrip(',') + "\n);"

    elapsed = time.time() - start_time
    logger.info(f"建表SQL生成完成 | 表名: {safe_table_name} | 耗时: {elapsed:.2f}s")
    logger.info(f"完整建表语句:\n{sql}")
    return sql, safe_table_name


def execute_sql_with_logging(cursor, sql: str, operation: str = "Execute"):
    """执行SQL并记录详细日志"""
    try:
        logger.info(f"执行SQL操作: {operation}")
        logger.info(f"SQL语句: {sql}")

        start_time = time.time()
        cursor.execute(sql)
        elapsed = time.time() - start_time

        logger.info(f"SQL执行成功 | 影响行数: {cursor.rowcount} | 耗时: {elapsed:.4f}s")
        return True
    except dmPython.Error as e:
        logger.error(f"SQL执行失败: {str(e)}")
        return False


def convert_numpy_to_python(value):
    """将NumPy数据类型转换为Python原生类型"""
    if isinstance(value, np.generic):
        return value.item()  # 将NumPy标量转换为Python原生类型
    elif isinstance(value, np.ndarray):
        return value.tolist()  # 将NumPy数组转换为Python列表
    return value  # 其他类型保持不变


def batch_insert_data(conn, df: pd.DataFrame, table_name: str, batch_size=500):
    """批量插入数据（内存优化版）"""
    logger.info(f"开始批量插入数据到表 {table_name}...")
    start_time = time.time()
    cursor = conn.cursor()

    # 创建安全插入语句（排除几何列）
    insert_columns = [col for col in df.columns if col != 'geometry']
    columns = ', '.join(insert_columns)
    placeholders = ', '.join(['?' for _ in insert_columns])
    insert_sql = f'INSERT INTO {table_name} ({columns}) VALUES ({placeholders})'

    # 记录插入语句模板
    logger.info(f"插入语句模板: {insert_sql}")

    try:
        total_rows = len(df)
        inserted_rows = 0
        batch_count = 0
        current_batch_size = batch_size  # 动态批次大小

        # 使用生成器逐行生成数据，避免内存堆积
        def row_generator():
            """逐行生成数据，避免批量内存占用"""
            for idx in range(total_rows):
                row = df.iloc[idx]
                try:
                    # 安全编码处理并转换NumPy类型
                    safe_row = tuple(
                        convert_numpy_to_python(  # 转换NumPy类型为Python原生类型
                            item.encode('utf-8', 'replace').decode('utf-8')
                            if isinstance(item, str) else item
                        )
                        for col_name, item in row.items()
                        if col_name != 'geometry'  # 排除几何列
                    )
                    yield safe_row
                except Exception as e:
                    logger.warning(f"行 {idx} 数据预处理失败: {e}")
                    # 记录失败行但继续处理
                    continue

        row_gen = row_generator()

        # 动态批次处理机制
        while inserted_rows < total_rows:
            # 收集当前批次数据
            batch_data = []
            current_batch = 0
            while current_batch < current_batch_size and inserted_rows + current_batch < total_rows:
                try:
                    row_data = next(row_gen)
                    batch_data.append(row_data)
                    current_batch += 1
                except StopIteration:
                    break

            # 关键优化：检查batch_data是否为空
            if not batch_data:
                logger.info("批次数据为空，跳过插入操作")
                break

            try:
                # 执行批量插入
                cursor.executemany(insert_sql, batch_data)
                conn.commit()
                inserted_rows += len(batch_data)
                batch_count += 1

                # 计算进度和性能指标
                elapsed_batch = time.time() - start_time
                progress = inserted_rows / total_rows * 100
                speed = inserted_rows / elapsed_batch if elapsed_batch > 0 else 0

                logger.info(
                    f"批次 {batch_count} | 进度: {progress:.1f}% | "
                    f"已插入: {inserted_rows}/{total_rows} | "
                    f"批次大小: {len(batch_data)} | "
                    f"速度: {speed:.2f}行/秒"
                )

                # 动态调整批次大小：成功时尝试增大批次（上限1000）
                # 关键修复：确保batch_data存在且非空
                if current_batch_size < 1000 and len(batch_data) == current_batch_size:
                    current_batch_size = min(1000, int(current_batch_size * 1.2))
                    logger.debug(f"增加批次大小至: {current_batch_size}")

                # 内存优化：分批释放资源
                del batch_data
                gc.collect()

            except dmPython.DataError as e:
                # 自动处理数据类型转换
                logger.warning(f"数据格式错误: {str(e)}，尝试类型转换...")
                try:
                    # 重建批次数据（类型转换后）
                    converted_batch = []
                    for row in batch_data:
                        converted_row = []
                        for item in row:
                            # 尝试数值转换并转换NumPy类型
                            if isinstance(item, (int, float, str)):
                                try:
                                    item = pd.to_numeric(item, errors='ignore')
                                except Exception:
                                    pass
                            # 确保所有类型都是Python原生类型
                            item = convert_numpy_to_python(item)
                            converted_row.append(item)
                        converted_batch.append(tuple(converted_row))

                    cursor.executemany(insert_sql, converted_batch)
                    conn.commit()
                    inserted_rows += len(converted_batch)

                    # 记录转换后插入成功
                    logger.info(f"类型转换后成功插入 {len(converted_batch)} 行")

                except Exception as e_conv:
                    conn.rollback()
                    logger.error(f"类型转换后插入失败: {str(e_conv)}")

            except (dmPython.Error, MemoryError) as e:
                conn.rollback()
                if "memory" in str(e).lower() or isinstance(e, MemoryError):
                    # 内存不足时减小批次大小
                    new_batch_size = max(10, current_batch_size / 2)
                    logger.warning(f"内存不足，减小批次大小: {current_batch_size} -> {new_batch_size}")
                    current_batch_size = new_batch_size

                    # 尝试重新插入当前批次（减小后）
                    try:
                        # 分批处理当前数据（避免一次性占用）
                        for i in range(0, len(batch_data), new_batch_size):
                            small_batch = batch_data[i:i + new_batch_size]
                            # 转换NumPy类型为Python原生类型
                            converted_small_batch = []
                            for row in small_batch:
                                converted_row = tuple(convert_numpy_to_python(item) for item in row)
                                converted_small_batch.append(converted_row)

                            cursor.executemany(insert_sql, converted_small_batch)
                            conn.commit()
                            inserted_rows += len(converted_small_batch)

                            # 记录小批次插入成功
                            logger.info(f"小批次插入成功: {len(converted_small_batch)} 行")

                    except Exception as e_small:
                        logger.error(f"小批次插入失败: {str(e_small)}")
                else:
                    logger.error(f"批次插入失败: {str(e)}")

        elapsed_total = time.time() - start_time
        logger.info(
            f"数据插入完成 | 总行数: {inserted_rows}/{total_rows} | "
            f"总耗时: {elapsed_total:.2f}s | 平均速度: {inserted_rows / elapsed_total:.2f}行/秒"
        )
        return inserted_rows == total_rows
    except Exception as e:
        conn.rollback()
        logger.error(f"批处理插入失败: {traceback.format_exc()}")
        return False
    finally:
        cursor.close()


# ========================
# 主流程执行（全链路加固）
# ========================
def main():
    try:
        logger.info("=" * 80)
        logger.info("空间数据处理流程启动")
        logger.info("=" * 80)

        # 1. 加载配置文件
        config_manager = ConfigManager()
        config_manager.load_config()

        # 2. 获取配置参数
        shapefile_path = config_manager.get_path('shapefile_path')
        excel_path = config_manager.get_path('excel_path')
        table_name = config_manager.get_setting('table_name', 'DEFAULT_GEO_TABLE')
        db_config = config_manager.get_db_config()

        logger.info(f"Shapefile路径: {shapefile_path}")
        logger.info(f"Excel路径: {excel_path}")
        logger.info(f"目标表名: {table_name}")
        logger.info(f"数据库配置: {db_config}")

        # 3. 安全读取Shapefile（增强编码处理）
        gdf = safe_read_shapefile(shapefile_path)

        # 4. 数据转换优化
        df = optimize_dataframe(gdf)

        # 5. 安全删除旧Excel文件（如果存在）
        if not safe_delete_excel(excel_path):
            logger.warning("旧Excel文件删除失败，但流程将继续执行")

        # 6. 安全导出Excel
        safe_export_to_excel(df, excel_path)

        # 7. 生成建表SQL
        create_table_sql, safe_table_name = generate_dm_table_sql(df, table_name)

        # 8. 数据库操作
        conn = connect_dameng(db_config)
        cursor = conn.cursor()

        # 删除旧表（带详细日志）
        drop_sql = f"DROP TABLE IF EXISTS {safe_table_name}"
        logger.info(f"准备删除旧表: {safe_table_name}")
        logger.info(f"删除表SQL: {drop_sql}")

        try:
            if execute_sql_with_logging(cursor, drop_sql, operation="DROP TABLE"):
                logger.info("旧表删除成功")
        except dmPython.ProgrammingError as e:
            if "-2007" not in str(e):  # 忽略表不存在的错误
                raise
            logger.warning("忽略表不存在错误")

        # 创建新表（带详细日志）
        logger.info(f"开始创建新表: {safe_table_name}")
        if execute_sql_with_logging(cursor, create_table_sql, operation="CREATE TABLE"):
            conn.commit()
            logger.info(f"表 {safe_table_name} 创建成功")

        # 9. 数据同步（使用内存优化版）
        if batch_insert_data(conn, df, safe_table_name):
            logger.info("数据同步到达梦数据库完成")
        else:
            logger.error("数据同步失败，请检查错误日志")

    except Exception as e:
        logger.critical(f"流程执行失败: {traceback.format_exc()}")
    finally:
        if conn:
            conn.close()
            logger.info("数据库连接已关闭")

        logger.info("=" * 80)
        logger.info("流程执行结束")
        logger.info("=" * 80)


if __name__ == "__main__":
    main()
