import psycopg2
from psycopg2 import sql
import logging
import argparse
import os
import json
import time
import sys
from tqdm import tqdm
import io

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s',
    handlers=[
        logging.FileHandler(
            r"C:\Users\17610\Desktop\python脚本\log\csv_import.log",
            encoding='utf-8'
        ),
        logging.StreamHandler(sys.stdout)
    ]
)

# 数据库配置（动态拼接options）
DB_CONFIG = {
    "host": "172.31.169.55",
    "port": "5432",
    "dbname": "postgres",
    "user": "postgres",
    "password": "123456",
    "sslmode": "disable"
}

# 状态文件路径
STATE_FILE = r"C:\Users\17610\Desktop\python脚本\log\import_state.json"
# 最大重试次数
MAX_RETRIES = 3
# 重试延迟（秒）
RETRY_DELAY = 5
# CSV分隔符
CSV_DELIMITER = '\t'
# 缓冲区大小 (10MB)
BUFFER_SIZE = 10 * 1024 * 1024
# 每次处理行数 (10万行)
CHUNK_ROWS = 100000


def load_state():
    """加载导入状态"""
    if os.path.exists(STATE_FILE):
        try:
            with open(STATE_FILE, 'r', encoding='utf-8') as f:
                return json.load(f)
        except Exception as e:
            logging.error("加载状态文件失败: %s" % str(e))
    return {
        "processed_rows": 0,
        "csv_path": "",
        "table_name": "",
        "total_rows": 0,
        "start_time": 0,
        "schema": "public"  # 新增：记录目标Schema
    }


def save_state(state):
    """保存导入状态"""
    try:
        with open(STATE_FILE, 'w', encoding='utf-8') as f:
            json.dump(state, f, indent=2, ensure_ascii=False)
    except Exception as e:
        logging.error("保存状态文件失败: %s" % str(e))


def clear_state():
    """清除状态文件"""
    if os.path.exists(STATE_FILE):
        try:
            os.remove(STATE_FILE)
        except Exception as e:
            logging.error("清除状态文件失败: %s" % str(e))


def calculate_total_rows(csv_path):
    """快速计算总行数（不包括表头）"""
    count = 0
    try:
        with open(csv_path, 'r', encoding='utf-8', errors='ignore') as f:
            next(f)  # 跳过表头
            count = sum(1 for _ in f)
    except Exception as e:
        logging.error("统计行数失败: %s" % str(e))
    return count


def fast_import(conn, csv_path, table_name, schema, start_row, end_row, total_rows):
    """高效导入数据块（支持Schema）"""
    try:
        with conn.cursor() as cursor:
            # 解析表名（支持schema.table格式）
            if '.' in table_name:
                schema, table = table_name.split('.', 1)
            else:
                table = table_name
            table_identifier = sql.Identifier(schema, table)  # 转义Schema和表名

            # 打开文件并定位
            with open(csv_path, 'r', encoding='utf-8', errors='ignore') as f:
                next(f)  # 跳过表头
                for _ in range(start_row):  # 跳过已处理行
                    next(f)

                # 创建缓冲区
                buffer = io.BytesIO()
                writer = io.TextIOWrapper(buffer, encoding='utf-8')

                # 读取指定行数
                rows_to_read = end_row - start_row
                for _ in range(rows_to_read):
                    line = next(f, None)
                    if line is None:
                        break
                    writer.write(line)
                writer.flush()
                buffer.seek(0)

                # 执行COPY命令（指定Schema）
                cursor.copy_expert(
                    sql.SQL("""
                    COPY {} FROM STDIN 
                    WITH (FORMAT CSV, DELIMITER E'\t', NULL '\\N')
                    """).format(table_identifier),
                    buffer
                )

            conn.commit()
            return True
    except Exception as e:
        logging.error("导入失败: %s" % str(e))
        conn.rollback()
        return False


def main(csv_path, table_name, schema="public"):
    """主函数 - 支持指定Schema的百万数据导入"""
    state = load_state()

    # 状态检查（新增Schema校验）
    if state.get("csv_path") and state["csv_path"] != csv_path:
        logging.warning("检测到不同CSV任务(%s)", state["csv_path"])
        if input("是否清除状态并开始新任务? (y/n): ").lower() == 'y':
            clear_state()
            state = load_state()
        else:
            return

    # 首次运行初始化（新增Schema记录）
    if not state.get("total_rows"):
        state["total_rows"] = calculate_total_rows(csv_path)
        state["csv_path"] = csv_path
        state["table_name"] = table_name
        state["schema"] = schema  # 记录用户指定的Schema
        state["processed_rows"] = 0
        state["start_time"] = time.time()
        # 动态设置数据库连接的search_path（连接时的默认Schema）
        DB_CONFIG["options"] = f"-c client_encoding=utf8 -c search_path={schema}"
        save_state(state)

    processed_rows = state["processed_rows"]
    total_rows = state["total_rows"]
    start_time = state["start_time"]
    current_schema = state["schema"]  # 从状态中获取Schema

    if processed_rows >= total_rows:
        logging.info("导入已完成")
        clear_state()
        return

    # 连接数据库
    try:
        conn = psycopg2.connect(**DB_CONFIG)
        conn.autocommit = False
        logging.info("数据库连接成功（Schema: %s）", current_schema)
    except Exception as e:
        logging.error("数据库连接失败: %s" % str(e))
        return

    # 进度条
    progress_bar = tqdm(
        total=total_rows,
        initial=processed_rows,
        desc="导入进度",
        unit="行",
        bar_format="{l_bar}{bar}| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, {rate_fmt}]"
    )

    # 分块处理
    while processed_rows < total_rows:
        end_row = min(processed_rows + CHUNK_ROWS, total_rows)
        chunk_start_time = time.time()

        # 尝试导入（传递Schema参数）
        success = False
        for attempt in range(1, MAX_RETRIES + 1):
            if fast_import(conn, csv_path, table_name, current_schema, processed_rows, end_row, total_rows):
                success = True
                break
            logging.warning("导入失败，重试 %d/%d", attempt, MAX_RETRIES)
            time.sleep(RETRY_DELAY)

        if not success:
            logging.error("块 %d-%d 多次重试后仍失败", processed_rows, end_row)
            break

        # 更新状态
        rows_imported = end_row - processed_rows
        processed_rows = end_row
        state["processed_rows"] = processed_rows
        save_state(state)

        # 更新进度条
        progress_bar.update(rows_imported)

        # 计算速度和剩余时间
        chunk_time = time.time() - chunk_start_time
        rows_per_sec = rows_imported / chunk_time if chunk_time > 0 else 0
        elapsed_time = time.time() - start_time
        remaining_rows = total_rows - processed_rows
        time_str = time.strftime("%H:%M:%S", time.gmtime(remaining_rows / rows_per_sec)) if rows_per_sec > 0 else "未知"

        logging.info("已导入: %d-%d行 (%.1f 行/秒), 剩余: %d行, 预计: %s",
                     end_row - rows_imported, end_row, rows_per_sec, remaining_rows, time_str)

    # 完成处理
    progress_bar.close()
    conn.close()

    if processed_rows >= total_rows:
        total_time = time.time() - start_time
        mins, secs = divmod(total_time, 60)
        logging.info("导入完成! 共导入 %d 行, 耗时: %d分%d秒 (%.1f 行/秒)",
                     total_rows, mins, secs, total_rows / total_time)
        clear_state()
    else:
        logging.warning("导入中断! 成功导入 %d/%d 行", processed_rows, total_rows)


if __name__ == "__main__":
    parser = argparse.ArgumentParser(description='高效CSV导入工具 - 支持指定Schema')
    parser.add_argument('csv_path', help='CSV文件路径')
    parser.add_argument('table_name', help='目标表名（格式：table_name 或 schema.table_name）')
    parser.add_argument('--schema', default='public', help='目标Schema（默认：public）')
    args = parser.parse_args()

    try:
        main(args.csv_path, args.table_name, args.schema)
    except Exception as e:
        logging.error("程序异常终止: %s", str(e), exc_info=True)