import csv
import psycopg2
from psycopg2 import sql
from psycopg2.extras import execute_batch
import time
from pathlib import Path
import traceback
'''
def csv_to_postgresql(csv_file_path, db_config, table_name, batch_size=10000):
    """
    高效导入百万级CSV数据到PostgreSQL

    参数:
    - csv_file_path: CSV文件路径
    - db_config: 数据库连接配置
    - table_name: 目标表名
    - batch_size: 每批导入的行数(默认10000)
    """
    # 定义CSV列名到数据库列名和类型的映射
    column_mapping = {
        "资源标识（小区）": "cell_id",
        "mcc": "mcc",
        "mnc": "mnc",
        "基站ID（10进制）": "base_station_id",
        "本地小区识别ID": "local_cell_id",
        "网元ip": "network_element_ip",
        "plmn": "plmn",
        "tac寻呼识别码": "tac",
        "pci": "pci",
        "省份编号": "province_code",
        "地市编号": "city_code",
        "区县编号": "district_code",
        "基站名称（中文）": "base_station_name",
        "小区名称（中文）": "cell_name",
        "覆盖半径": "coverage_radius",
        "roam_type": "roam_type",
        "生命周期状态": "life_cycle_status",
        "共建共享类型": "sharing_type",
        "id": "id"
    }

    csv_path = Path(csv_file_path)
    if not csv_path.exists():
        raise FileNotFoundError(f"CSV文件不存在: {csv_file_path}")

    total_rows = 0
    # 快速统计CSV总行数
    with open(csv_file_path, 'r', encoding='utf-8') as f:
        total_rows = sum(1 for _ in f) - 1  # 减去表头

    print(f"开始导入 {total_rows} 行数据到表 {table_name}")
    start_time = time.time()

    try:
        with psycopg2.connect(**db_config) as conn:
            conn.autocommit = False  # 使用事务提高性能
            with conn.cursor() as cursor:
                # 创建表


                # 准备插入语句
                insert_sql = sql.SQL("INSERT INTO {} ({}) VALUES ({})").format(
                    sql.Identifier(table_name),
                    sql.SQL(', ').join(map(sql.Identifier, column_mapping.values())),
                    sql.SQL(', ').join([sql.Placeholder()] * len(column_mapping))
                )

                # 分批处理数据
                processed_rows = 0
                with open(csv_file_path, 'r', encoding='utf-8') as file:
                    reader = csv.DictReader(file)
                    batch = []

                    for row in reader:
                        # 转换数据类型
                        values = []
                        for col in column_mapping.keys():
                            value = row.get(col, '')
                            if value.strip() == '' and col != 'id':
                                values.append(None)  # 空值处理
                            else:
                                try:
                                    # 根据目标类型转换值
                                    if col in['基站ID（10进制）','本地小区识别ID','tac寻呼识别码']:
                                        values.append(int(value))
                                    else:
                                        values.append(value)
                                except ValueError:
                                    # 类型转换失败时记录错误
                                    print(
                                        f"行 {processed_rows + 1}: 字段 '{col}' 的值 '{value}' 无法转换为 {column_mapping[col]}")
                                    values.append(None)
                        if values[3] is not None and values[4] is not None:
                            id_value = f"{values[3]:x}{values[4]:x}"
                            values[3] = f"{values[3]:x}"
                            values[4] = f"{values[4]:x}"
                            values[7] = f"{values[7]:x}"
                            values[18] = id_value
                        batch.append(tuple(values))
                        processed_rows += 1

                        if len(batch) >= batch_size:
                            execute_batch(cursor, insert_sql, batch)
                            batch = []

                            # 显示进度
                            elapsed = time.time() - start_time
                            progress = processed_rows / total_rows * 100
                            eta = elapsed / processed_rows * (total_rows - processed_rows) if processed_rows > 0 else 0
                            print(
                                f"已处理: {processed_rows}/{total_rows} ({progress:.2f}%), 耗时: {elapsed:.2f}s, ETA: {eta:.2f}s")

                    # 处理剩余批次
                    if batch:
                        execute_batch(cursor, insert_sql, batch)

                    # 重新启用索引和约束
                    cursor.execute(f"ALTER TABLE {table_name} ENABLE TRIGGER ALL")
                    cursor.execute(f"ANALYZE {table_name}")

                conn.commit()
                elapsed_time = time.time() - start_time
                print(f"导入完成! 共处理 {processed_rows} 行数据，耗时: {elapsed_time:.2f}s")

    except Exception as e:
        print(f"导入过程中发生错误: {e}")
        # 自动回滚事务
'''
def csv_to_postgresql(csv_file_path, db_config, table_name, batch_size=10000):
    """
    高效导入百万级CSV数据到PostgreSQL

    参数:
    - csv_file_path: CSV文件路径
    - db_config: 数据库连接配置
    - table_name: 目标表名
    - batch_size: 每批导入的行数(默认10000)
    """
    # 定义CSV列名到数据库列名和类型的映射
    column_mapping = {
        "资源标识（小区）": "cell_id",
        "mcc": "mcc",
        "mnc": "mnc",
        "基站ID（10进制）": "base_station_id",
        "本地小区识别ID": "local_cell_id",
        "网元ip": "network_element_ip",
        "plmn": "plmn",
        "tac寻呼识别码": "tac",
        "pci": "pci",
        "省份编号": "province_code",
        "地市编号": "city_code",
        "区县编号": "district_code",
        "基站名称（中文）": "base_station_name",
        "小区名称（中文）": "cell_name",
        "覆盖半径": "coverage_radius",
        "roam_type": "roam_type",
        "生命周期状态": "life_cycle_status",
        "共建共享类型": "sharing_type",
        "id": "id"
    }

    csv_path = Path(csv_file_path)
    if not csv_path.exists():
        raise FileNotFoundError(f"CSV文件不存在: {csv_file_path}")

    total_rows = 0
    # 快速统计CSV总行数
    with open(csv_file_path, 'r', encoding='utf-8') as f:
        total_rows = sum(1 for _ in f) - 1  # 减去表头

    print(f"开始导入 {total_rows} 行数据到表 {table_name}")
    start_time = time.time()

    try:
        with psycopg2.connect(**db_config) as conn:
            conn.autocommit = False  # 使用事务提高性能
            with conn.cursor() as cursor:
                # 创建表


                # 准备插入语句
                insert_sql = sql.SQL("INSERT INTO {} ({}) VALUES ({})").format(
                    sql.Identifier(table_name),
                    sql.SQL(', ').join(map(sql.Identifier, column_mapping.values())),
                    sql.SQL(', ').join([sql.Placeholder()] * len(column_mapping))
                )

                # 分批处理数据
                processed_rows = 0
                with open(csv_file_path, 'r', encoding='utf-8') as file:
                    reader = csv.DictReader(file)
                    batch = []

                    for row in reader:
                        # 转换数据类型
                        values = []
                        for col in column_mapping.keys():
                            value = row.get(col, '')
                            if value.strip() == '' and col != 'id':
                                values.append(None)  # 空值处理
                            else:
                                try:
                                    # 根据目标类型转换值
                                    if col in['基站ID（10进制）','本地小区识别ID','tac寻呼识别码']:
                                        values.append(int(value))
                                    else:
                                        values.append(value)
                                except ValueError:
                                    # 类型转换失败时记录错误
                                    print(
                                        f"行 {processed_rows + 1}: 字段 '{col}' 的值 '{value}' 无法转换为 {column_mapping[col]}")
                                    values.append(None)
                        if values[3] is not None and values[4] is not None:
                            id_value = f"{values[3]:x}{values[4]:x}"
                            values[3] = f"{values[3]:x}"
                            values[4] = f"{values[4]:x}"
                            if values[7] is not None :
                                values[7] = f"{values[7]:x}"

                            values[18] = id_value
                        batch.append(tuple(values))
                        processed_rows += 1

                        if len(batch) >= batch_size:
                            execute_batch(cursor, insert_sql, batch)
                            batch = []

                            # 显示进度
                            elapsed = time.time() - start_time
                            progress = processed_rows / total_rows * 100
                            eta = elapsed / processed_rows * (total_rows - processed_rows) if processed_rows > 0 else 0
                            print(
                                f"已处理: {processed_rows}/{total_rows} ({progress:.2f}%), 耗时: {elapsed:.2f}s, ETA: {eta:.2f}s")

                    # 处理剩余批次
                    if batch:
                        execute_batch(cursor, insert_sql, batch)

                    # 重新启用索引和约束
                    cursor.execute(f"ALTER TABLE {table_name} ENABLE TRIGGER ALL")
                    cursor.execute(f"ANALYZE {table_name}")

                conn.commit()
                elapsed_time = time.time() - start_time
                print(f"导入完成! 共处理 {processed_rows} 行数据，耗时: {elapsed_time:.2f}s")

    except Exception as e:
        print(f"导入过程中发生错误: {e}")
        # 自动回滚事务
        traceback.print_exc()

# 使用示例
if __name__ == "__main__":
    db_config = {
        "host": "172.18.0.84",
        "port": 5432,
        "dbname": "guangdianrisk",
        "user": "gdrisk",
        "password": "RQ1s2di^n6qWpy3",
        "connect_timeout": 10,  # 连接超时（秒）
        "options": "-c statement_timeout=300000",  # 查询超时（毫秒，这里是5分钟）
        "keepalives": 1,  # 启用TCP保活机制
        "keepalives_idle": 30,  # 多久开始发送保活包（秒）
        "keepalives_interval": 10,  # 保活包发送间隔（秒）
        "keepalives_count": 5  # 保活包重试次数
    }

    csv_to_postgresql(
        csv_file_path="C:/Users/55577/Documents/t4.csv",
        db_config=db_config,
        table_name="hunan_jizhan"
    )