import pymysql
import datetime
import traceback
from apscheduler.schedulers.blocking import BlockingScheduler

# 配置数据库连接信息
source_db_config = {
    "host": "10.172.100.18",
    "user": "dataphin",
    "password": "dataphin123",
    "database": "sys",
    "port": 3306
}
target_db_config = {
    "host": "10.172.100.17",
    "user": "root",
    "password": "rKgH_2441EA",
    "database": "namcs_ods_report",
    "port": 3301
}

# 连接到源数据库和目标数据库
source_conn = pymysql.connect(**source_db_config)
target_conn = pymysql.connect(**target_db_config)

source_cursor = source_conn.cursor()
target_cursor = target_conn.cursor()

def get_table_names(conn):
    """获取数据库中的所有表名"""
    cursor = conn.cursor()
    cursor.execute("SHOW TABLES")
    tables = [table[0] for table in cursor.fetchall()]
    cursor.close()
    return tables

def create_log_table(conn):
    """创建同步日志表"""
    cursor = conn.cursor()
    create_sql = """
    CREATE TABLE IF NOT EXISTS sync_log (
        id INT AUTO_INCREMENT PRIMARY KEY,
        table_name VARCHAR(255),
        error_message TEXT,
        snapshot_date DATE
    )
    """
    cursor.execute(create_sql)
    conn.commit()
    cursor.close()

def batch_insert(table_name, source_cursor, target_cursor, batch_size=1000):
    """批量插入数据到目标表"""
    offset = 0
    while True:
        source_cursor.execute(f"SELECT * FROM {table_name} LIMIT {batch_size} OFFSET {offset}")
        rows = source_cursor.fetchall()
        if not rows:
            break
        target_cursor.executemany(f"INSERT INTO {table_name} VALUES ({','.join(['%s']*len(rows[0]))})", rows)
        offset += batch_size

def sync_table_structure(table_name, source_cursor, target_cursor):
    """同步表结构"""
    source_cursor.execute(f"SHOW CREATE TABLE {table_name}")
    create_table_sql = source_cursor.fetchone()[1]
    target_cursor.execute(create_table_sql)

def sync_table(table_name, source_conn, target_conn, snapshot_date, is_first_sync):
    """同步指定表的数据"""
    try:
        source_cursor = source_conn.cursor()
        target_cursor = target_conn.cursor()

        if is_first_sync:
            target_cursor.execute(f"DROP TABLE IF EXISTS {table_name}")
            sync_table_structure(table_name, source_cursor, target_cursor)
            target_conn.commit()

        batch_insert(table_name, source_cursor, target_cursor)
        target_conn.commit()

    except Exception as e:
        error_message = str(e) + "\n" + traceback.format_exc()
        log_cursor = target_conn.cursor()
        log_sql = "INSERT INTO sync_log (table_name, error_message, snapshot_date) VALUES (%s, %s, %s)"
        log_cursor.execute(log_sql, (table_name, error_message, snapshot_date))
        target_conn.commit()
        log_cursor.close()

def main(is_first_sync):
    """主函数，执行数据同步操作"""
    snapshot_date = datetime.date.today()
    source_tables = get_table_names(source_conn)
    target_tables = get_table_names(target_conn)
    create_log_table(target_conn)

    for table in source_tables:
        sync_table(table, source_conn, target_conn, snapshot_date, is_first_sync)

    source_conn.close()
    target_conn.close()

    """设置定时任务，每天凌晨执行数据同步"""
def scheduled_task():
    scheduler = BlockingScheduler()
    scheduler.add_job(lambda: main(is_first_sync=False), 'cron', hour=0, minute=0)  # 每天凌晨执行
    scheduler.start()

if __name__ == "__main__":
    # 第一时间同步
    main(is_first_sync=True)
    scheduled_task()
