import os
from datetime import datetime
from utils.common import CommonUtils

class DataMigrator:
    def __init__(self, mysql_db, kingbase_db, migration_config, logger, task_name):
        self.mysql_db = mysql_db
        self.kingbase_db = kingbase_db
        self.migration_config = migration_config
        self.logger = logger
        self.task_name = task_name
        self.target_schema = migration_config.get('target_schema', '')
        self.batch_size = migration_config.get('batch_size', 1000)
    
    def migrate_data(self, tables, resume_info=None):
        """
        迁移数据
        """
        self.logger.info(f"开始迁移数据，批量大小: {self.batch_size}")
        
        # 初始化结果统计
        migration_results = {
            'success_tables': [],
            'failed_tables': [],
            'total_tables': len(tables),
            'total_records_migrated': 0
        }
        
        # 获取是否跳过已存在表的配置
        skip_existing_tables = self.migration_config.get('skip_existing_tables', False)
        
        # 处理每个表的数据迁移
        for table_index, table_name in enumerate(tables):
            self.logger.info(f"开始迁移表数据 [{table_index + 1}/{len(tables)}]: {table_name}")
            
            try:
                # 检查是否需要续传
                start_row = 0
                if resume_info and table_name in resume_info:
                    start_row = resume_info[table_name].get('last_row', 0)
                    if start_row > 0:
                        self.logger.info(f"从上次中断的位置继续迁移: {table_name}, 行号: {start_row}")
                
                # 检查是否需要跳过已存在数据的表
                if skip_existing_tables:
                    # 获取目标表记录数
                    target_schema_lower = self.target_schema.lower()
                    target_count = self.kingbase_db.get_table_count(target_schema_lower, table_name)
                    if target_count > 0:
                        self.logger.info(f"表 {table_name} 在目标数据库中已存在数据，跳过迁移")
                        migration_results['success_tables'].append(table_name)
                        continue
                
                # 获取源表记录数
                source_count = self.mysql_db.get_table_count(table_name)
                
                if source_count == 0:
                    self.logger.info(f"表 {table_name} 没有数据，跳过")
                    migration_results['success_tables'].append(table_name)
                    continue
                
                # 记录开始时间
                start_time = datetime.now()
                
                # 批量迁移数据
                migrated_count = self._migrate_table_data(table_name, start_row)
                
                # 记录结束时间
                end_time = datetime.now()
                time_cost = (end_time - start_time).total_seconds()
                
                # 校验数据
                self._validate_data(table_name, source_count)
                
                self.logger.info(f"表数据迁移成功: {table_name}, 迁移记录数: {migrated_count}, 耗时: {time_cost:.2f}秒")
                
                # 更新结果统计
                migration_results['success_tables'].append(table_name)
                migration_results['total_records_migrated'] += migrated_count
                
            except Exception as e:
                self.logger.error(f"表数据迁移失败: {table_name}\n{e}")
                migration_results['failed_tables'].append(table_name)
                raise
        
        self.logger.info(f"数据迁移完成，成功迁移 {len(migration_results['success_tables'])}/{len(tables)} 个表，共迁移 {migration_results['total_records_migrated']} 条记录")
        
        return migration_results
    
    def _migrate_table_data(self, table_name, start_row=0):
        """
        迁移单个表的数据
        """
        total_migrated = 0
        
        # 获取表结构，用于构建INSERT语句
        table_structure = self.mysql_db.get_table_structure(table_name)
        if not table_structure:
            raise Exception(f"获取表结构失败: {table_name}")
        
        # 获取列名，并使用双引号括起来以保持大小写
        column_names = [column['COLUMN_NAME'] for column in table_structure]
        
        # 构建列名映射，处理可能的大小写问题
        column_map = {col.lower(): col for col in column_names}
        
        # 构建INSERT语句中的列名部分，用双引号括起来以保持大小写
        quoted_column_names = [f'"{col}"' for col in column_names]
        columns_str = ', '.join(quoted_column_names)
        placeholders = ', '.join(['%s'] * len(column_names))
        
        # 构建INSERT语句，表名用双引号括起来以保持大小写
        # 使用小写的模式名，与kingbase_db.py保持一致
        target_schema_lower = self.target_schema.lower()
        insert_sql = f'INSERT INTO {target_schema_lower}."{table_name}" ({columns_str}) VALUES ({placeholders})'
        
        # 获取数据迭代器
        data_iterator = self.mysql_db.get_table_data_iterator(
            table_name, 
            batch_size=self.batch_size,
            start_row=start_row
        )
        
        # 批量插入数据
        for batch_index, batch_data in enumerate(data_iterator):
            try:
                # 准备批量参数
                batch_params = []
                for row in batch_data:
                    # 处理每一行数据
                    row_params = []
                    for column_name in column_names:
                        # 处理可能的NULL值
                        # 尝试直接获取，如果失败则尝试使用小写列名获取
                        try:
                            value = row[column_name]
                        except (KeyError, TypeError):
                            # 尝试使用小写列名
                            try:
                                value = row[column_name.lower()]
                            except (KeyError, TypeError):
                                # 如果都获取不到，则设置为None
                                value = None
                        
                        # 处理MySQL中的0000-00-00 00:00:00及类似的零日期时间值
                        # 在PostgreSQL/KingbaseES中不支持这种格式
                        if isinstance(value, str):
                            # 检查是否是日期时间格式且包含0000年份
                            if '0000-' in value or value.lower() == '0000-00-00 00:00:00' or value.lower() == '0000-00-00':
                                # 对于包含时间的日期
                                if ' ' in value and ':' in value:
                                    value = '1000-01-01 00:00:00'  # 转换为KingbaseES支持的最小日期时间
                                else:
                                    value = '1000-01-01'  # 转换为KingbaseES支持的最小日期
                        
                        row_params.append(value)
                    batch_params.append(row_params)
                
                # 执行批量插入
                self.kingbase_db.execute_batch(insert_sql, batch_params)
                
                # 更新计数
                batch_count = len(batch_data)
                total_migrated += batch_count
                
                # 记录进度
                current_row = start_row + total_migrated
                self.logger.debug(f"表 {table_name} 批次 {batch_index + 1} 迁移成功，当前进度: {current_row}")
                
                # 可以在这里添加进度保存逻辑，用于断点续传
                
            except Exception as e:
                self.logger.error(f"表 {table_name} 批次 {batch_index + 1} 迁移失败\n{e}")
                raise
        
        return total_migrated
    
    def _validate_data(self, table_name, source_count):
        """
        校验迁移的数据
        """
        try:
            # 获取目标表记录数
            # 使用小写的模式名，与kingbase_db.py保持一致
            target_schema_lower = self.target_schema.lower()
            target_count = self.kingbase_db.get_table_count(target_schema_lower, table_name)
            
            # 比较记录数
            if source_count == target_count:
                self.logger.info(f"数据校验成功: {table_name}, 源表记录数: {source_count}, 目标表记录数: {target_count}")
            else:
                error_msg = f"数据校验失败: {table_name}, 源表记录数: {source_count}, 目标表记录数: {target_count}"
                self.logger.error(error_msg)
                raise Exception(error_msg)
        except Exception as e:
            self.logger.error(f"数据校验异常: {table_name}\n{e}")
            raise
    
    def get_resume_info(self, task_tracker):
        """
        获取续传信息
        """
        try:
            # 从任务跟踪器获取续传信息
            task_status = task_tracker.get_task_status()
            if task_status and 'resume_info' in task_status:
                return task_status['resume_info']
            return None
        except Exception as e:
            self.logger.error(f"获取续传信息失败\n{e}")
            return None