# 传输数据import pandas as pd
from sqlalchemy import create_engine
import logging
import pandas as pd

# 配置日志
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)


class UserDataMigrator:
    def __init__(self, source_db_url, target_db_url):
        """
        初始化数据迁移器

        Args:
            source_db_url: 源数据库连接字符串
            target_db_url: 目标数据库连接字符串
        """
        self.source_engine = create_engine(source_db_url)
        self.target_engine = create_engine(target_db_url)

    def read_wide_table(self):
        """
        从源数据库读取宽表数据

        Returns:
            pandas.DataFrame: 宽表数据
        """
        query = "SELECT * FROM user_wide_table"
        try:
            df = pd.read_sql(query, self.source_engine)
            logger.info(f"成功读取 {len(df)} 条用户数据")
            return df
        except Exception as e:
            logger.error(f"读取宽表数据失败: {e}")
            raise

    def split_and_migrate_data(self, batch_size=1000):
        """
        拆分数据并迁移到目标表

        Args:
            batch_size: 批处理大小
        """
        try:
            # 读取宽表数据
            wide_data = self.read_wide_table()

            # 拆分用户基本信息
            users_df = wide_data[[
                'id', 'user_id', 'real_name', 'phone', 'email',
                'register_time', 'status', 'create_time'
            ]].copy()

            # 拆分用户详细信息
            user_details_df = wide_data[[
                'user_id', 'gender', 'birthday', 'address',
                'balance', 'last_login_time', 'create_time'
            ]].copy()

            # 分批插入数据以提高性能
            self._batch_insert(users_df, 'users', batch_size)
            self._batch_insert(user_details_df, 'user_details', batch_size)

            logger.info("数据迁移完成")

        except Exception as e:
            logger.error(f"数据迁移过程中发生错误: {e}")
            raise

    def _batch_insert(self, df, table_name, batch_size):
        """
        分批插入数据到目标表

        Args:
            df: 要插入的数据框
            table_name: 目标表名
            batch_size: 批处理大小
        """
        total_rows = len(df)
        for i in range(0, total_rows, batch_size):
            batch_df = df.iloc[i:i + batch_size]
            try:
                batch_df.to_sql(
                    name=table_name,
                    con=self.target_engine,
                    if_exists='append',
                    index=False,
                    method='multi'
                )
                logger.info(f"已插入 {min(i + batch_size, total_rows)}/{total_rows} 条记录到 {table_name} 表")
            except Exception as e:
                logger.error(f"插入数据到 {table_name} 表时出错: {e}")
                raise


def main():
    """主函数"""
    # 数据库连接配置
    SOURCE_DB_URL = "mysql+pymysql://username:password@localhost:3306/source_db"
    TARGET_DB_URL = "mysql+pymysql://username:password@localhost:3306/target_db"

    # 创建迁移器实例
    migrator = UserDataMigrator(SOURCE_DB_URL, TARGET_DB_URL)

    # 执行数据迁移
    migrator.split_and_migrate_data(batch_size=1000)


if __name__ == "__main__":
    main()
