import pandas as pd
from sqlalchemy import create_engine, text

try:
    # 阿里云RDS MySQL连接信息
    host = "rm-bp1ft7y63rger71a5go.mysql.rds.aliyuncs.com"  # 阿里云RDS实例连接地址
    port = 3306  # 端口号，一般是3306
    user = "root"
    password = "Unicdata@1qaz"
    database = "ai-tenant"

    # 创建连接字符串
    connection_string = f"mysql+pymysql://{user}:{password}@{host}:{port}/{database}"

    # 创建引擎
    engine = create_engine(connection_string)

    # 测试连接
    with engine.connect() as connection:
        result = connection.execute(text("SELECT 1"))
        print("连接成功!")

        # 执行你的查询
        # result = connection.execute(text("你的SQL查询"))
        # for row in result:
        #     print(row)

except Exception as e:
    print(f"连接错误: {e}")


# 获取两个数据库的表结构
def get_database_columns(db_name):
    query = f"""
    SELECT 
        table_name,
        column_name,
        data_type,
        column_type,
        is_nullable,
        column_default,
        extra
    FROM information_schema.columns
    WHERE table_schema = '{db_name}'
    ORDER BY table_name, ordinal_position
    """
    return pd.read_sql(text(query), engine)


# 获取两个数据库的列信息
db1_name = "database1"  # 源数据库
db2_name = "database2"  # 目标数据库（我们要将db1的结构同步到db2）
db1_columns = get_database_columns(db1_name)
db2_columns = get_database_columns(db2_name)

# 为每个数据库的DataFrame添加源标识
db1_columns['source'] = db1_name
db2_columns['source'] = db2_name

# 合并数据集以便比较
db1_key = db1_columns[['table_name', 'column_name']].copy()
db1_key['in_db1'] = True
db2_key = db2_columns[['table_name', 'column_name']].copy()
db2_key['in_db2'] = True

# 合并数据
all_columns = pd.merge(db1_key, db2_key, on=['table_name', 'column_name'], how='outer')

# 填充缺失值
all_columns['in_db1'] = all_columns['in_db1'].fillna(False)
all_columns['in_db2'] = all_columns['in_db2'].fillna(False)

# 找出差异
only_in_db1 = all_columns[(all_columns['in_db1'] == True) & (all_columns['in_db2'] == False)]
only_in_db2 = all_columns[(all_columns['in_db1'] == False) & (all_columns['in_db2'] == True)]
in_both = all_columns[(all_columns['in_db1'] == True) & (all_columns['in_db2'] == True)]

# 生成SQL脚本
sql_scripts = []

# 1. 为db2中缺少的列生成ADD COLUMN语句
if not only_in_db1.empty:
    for _, row in only_in_db1.iterrows():
        table_name = row['table_name']
        column_name = row['column_name']

        # 获取列的详细信息
        col_info = db1_columns[(db1_columns['table_name'] == table_name) &
                               (db1_columns['column_name'] == column_name)].iloc[0]

        # 构建列定义
        column_def = f"`{column_name}` {col_info['column_type']}"

        # 添加NULL/NOT NULL
        if col_info['is_nullable'] == 'YES':
            column_def += " NULL"
        else:
            column_def += " NOT NULL"

        # 添加默认值
        if pd.notna(col_info['column_default']):
            if col_info['data_type'] in ('char', 'varchar', 'text', 'date', 'datetime', 'timestamp'):
                column_def += f" DEFAULT '{col_info['column_default']}'"
            else:
                column_def += f" DEFAULT {col_info['column_default']}"

        # 添加额外属性（如AUTO_INCREMENT）
        if pd.notna(col_info['extra']) and col_info['extra']:
            column_def += f" {col_info['extra']}"

        # 生成ALTER TABLE语句
        alter_sql = f"ALTER TABLE `{db2_name}`.`{table_name}` ADD COLUMN {column_def};"
        sql_scripts.append(alter_sql)

# 2. 对于两个数据库都有的列，检查数据类型是否相同
if not in_both.empty:
    db1_details = db1_columns[
        ['table_name', 'column_name', 'data_type', 'column_type', 'is_nullable', 'column_default', 'extra']]
    db2_details = db2_columns[
        ['table_name', 'column_name', 'data_type', 'column_type', 'is_nullable', 'column_default', 'extra']]

    common_columns = pd.merge(db1_details, db2_details,
                              on=['table_name', 'column_name'],
                              suffixes=('_db1', '_db2'))

    # 找出数据类型不同的列
    type_diff = common_columns[
        (common_columns['data_type_db1'] != common_columns['data_type_db2']) |
        (common_columns['column_type_db1'] != common_columns['column_type_db2']) |
        (common_columns['is_nullable_db1'] != common_columns['is_nullable_db2']) |
        (common_columns['column_default_db1'] != common_columns['column_default_db2']) |
        ((common_columns['extra_db1'] != common_columns['extra_db2']) &
         (~(common_columns['extra_db1'].isna() & common_columns['extra_db2'].isna())))
        ]

    if not type_diff.empty:
        for _, row in type_diff.iterrows():
            table_name = row['table_name']
            column_name = row['column_name']

            # 构建列定义
            column_def = f"`{column_name}` {row['column_type_db1']}"

            # 添加NULL/NOT NULL
            if row['is_nullable_db1'] == 'YES':
                column_def += " NULL"
            else:
                column_def += " NOT NULL"

            # 添加默认值
            if pd.notna(row['column_default_db1']):
                if row['data_type_db1'] in ('char', 'varchar', 'text', 'date', 'datetime', 'timestamp'):
                    column_def += f" DEFAULT '{row['column_default_db1']}'"
                else:
                    column_def += f" DEFAULT {row['column_default_db1']}"

            # 添加额外属性（如AUTO_INCREMENT）
            if pd.notna(row['extra_db1']) and row['extra_db1']:
                column_def += f" {row['extra_db1']}"

            # 生成ALTER TABLE语句
            alter_sql = f"ALTER TABLE `{db2_name}`.`{table_name}` MODIFY COLUMN {column_def};"
            sql_scripts.append(alter_sql)

# 3. 生成删除db2中多余列的SQL（可选，取消注释以启用）
'''
if not only_in_db2.empty:
    for _, row in only_in_db2.iterrows():
        table_name = row['table_name']
        column_name = row['column_name']

        # 生成DROP COLUMN语句
        drop_sql = f"ALTER TABLE `{db2_name}`.`{table_name}` DROP COLUMN `{column_name}`;"
        sql_scripts.append(drop_sql)
'''

# 打印生成的SQL脚本
print("-- SQL脚本用于将 {} 的结构同步到 {}".format(db1_name, db2_name))
print("-- 生成时间: {}".format(pd.Timestamp.now()))
print("\n-- 添加缺失的列")
add_column_scripts = [s for s in sql_scripts if "ADD COLUMN" in s]
if add_column_scripts:
    print("\n".join(add_column_scripts))
else:
    print("-- 没有需要添加的列")

print("\n-- 修改数据类型不匹配的列")
modify_column_scripts = [s for s in sql_scripts if "MODIFY COLUMN" in s]
if modify_column_scripts:
    print("\n".join(modify_column_scripts))
else:
    print("-- 没有需要修改的列")

# 将SQL脚本保存到文件
with open('sync_database_structure.sql', 'w') as f:
    f.write("-- SQL脚本用于将 {} 的结构同步到 {}\n".format(db1_name, db2_name))
    f.write("-- 生成时间: {}\n\n".format(pd.Timestamp.now()))

    f.write("-- 添加缺失的列\n")
    if add_column_scripts:
        f.write("\n".join(add_column_scripts))
    else:
        f.write("-- 没有需要添加的列\n")

    f.write("\n\n-- 修改数据类型不匹配的列\n")
    if modify_column_scripts:
        f.write("\n".join(modify_column_scripts))
    else:
        f.write("-- 没有需要修改的列\n")

print("\nSQL脚本已保存到 sync_database_structure.sql")

# 将比较结果保存到Excel文件
with pd.ExcelWriter('database_comparison.xlsx') as writer:
    if not only_in_db1.empty:
        db1_details = pd.merge(only_in_db1, db1_columns, on=['table_name', 'column_name'])
        db1_details.to_excel(writer, sheet_name=f'Only in {db1_name}', index=False)

    if not only_in_db2.empty:
        db2_details = pd.merge(only_in_db2, db2_columns, on=['table_name', 'column_name'])
        db2_details.to_excel(writer, sheet_name=f'Only in {db2_name}', index=False)

    if 'type_diff' in locals() and not type_diff.empty:
        type_diff.to_excel(writer, sheet_name='Type Differences', index=False)

print("比较结果已保存到 database_comparison.xlsx")