import logging
import time
import yaml
from datetime import datetime, timedelta
from sync_handler import get_db_conn

logger = logging.getLogger(__name__)


def data_cleanup_task(days_to_keep=30, batch_size=1000, dry_run=False, **kwargs):
    """
    数据清理任务
    
    Args:
        days_to_keep: 保留天数
        batch_size: 批处理大小
        dry_run: 是否为试运行（不实际删除数据）
    
    Returns:
        dict: 清理结果
    """
    start_time = time.time()
    total_deleted = 0
    error_count = 0
    error_details = []
    
    # 读取配置文件
    import os
    config_path = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'config', 'config.yaml')
    with open(config_path, encoding='utf-8') as f:
        config = yaml.safe_load(f)
    
    # 计算清理的截止日期
    cutoff_date = datetime.now() - timedelta(days=days_to_keep)
    cutoff_str = cutoff_date.strftime('%Y-%m-%d %H:%M:%S')
    
    logger.info(f"[CLEANUP] 开始数据清理任务, 删除 {cutoff_str} 之前的数据, dry_run={dry_run}")
    
    try:
        # 连接目标数据库
        tgt_conn = get_db_conn(config['target_db'])
        cursor = tgt_conn.cursor()
        
        # 定义需要清理的表和对应的时间字段
        cleanup_tables = [
            {'table': 'sync_main_data', 'time_field': 'created_time'},
            {'table': 'sync_detail_data', 'time_field': 'updated_time'},
            # 可以添加更多表
        ]
        
        for table_info in cleanup_tables:
            table_name = table_info['table']
            time_field = table_info['time_field']
            
            try:
                # 首先统计要删除的记录数
                count_sql = f"""
                    SELECT COUNT(*) FROM {table_name} 
                    WHERE {time_field} < %s
                """
                cursor.execute(count_sql, (cutoff_str,))
                count_to_delete = cursor.fetchone()[0]
                
                if count_to_delete == 0:
                    logger.info(f"[CLEANUP] 表 {table_name} 无需清理数据")
                    continue
                
                logger.info(f"[CLEANUP] 表 {table_name} 待清理记录数: {count_to_delete}")
                
                if not dry_run:
                    # 分批删除数据
                    deleted_in_table = 0
                    while True:
                        delete_sql = f"""
                            DELETE FROM {table_name} 
                            WHERE {time_field} < %s 
                            LIMIT %s
                        """
                        cursor.execute(delete_sql, (cutoff_str, batch_size))
                        affected_rows = cursor.rowcount
                        
                        if affected_rows == 0:
                            break
                            
                        deleted_in_table += affected_rows
                        total_deleted += affected_rows
                        
                        logger.info(f"[CLEANUP] 表 {table_name} 已删除 {deleted_in_table}/{count_to_delete} 条记录")
                        
                        # 提交事务
                        tgt_conn.commit()
                        
                        # 短暂休息，避免对数据库造成过大压力
                        time.sleep(0.1)
                        
                        if deleted_in_table >= count_to_delete:
                            break
                    
                    logger.info(f"[CLEANUP] 表 {table_name} 清理完成，实际删除 {deleted_in_table} 条记录")
                else:
                    logger.info(f"[CLEANUP] 试运行模式，表 {table_name} 将删除 {count_to_delete} 条记录")
                    total_deleted += count_to_delete
                    
            except Exception as table_error:
                error_count += 1
                error_msg = f"清理表 {table_name} 失败: {str(table_error)}"
                error_details.append(error_msg)
                logger.error(error_msg)
                # 回滚当前表的操作
                tgt_conn.rollback()
        
        duration = time.time() - start_time
        
        if dry_run:
            logger.info(f"[CLEANUP] 试运行完成，预计删除 {total_deleted} 条记录，耗时 {duration:.2f}s")
        else:
            logger.info(f"[CLEANUP] 清理完成，实际删除 {total_deleted} 条记录，耗时 {duration:.2f}s")
        
        return {
            'deleted_count': total_deleted,
            'error_count': error_count,
            'error_details': error_details,
            'duration': duration,
            'dry_run': dry_run,
            'cutoff_date': cutoff_str
        }
        
    except Exception as e:
        logger.exception(f"[CLEANUP] 数据清理任务执行失败: {e}")
        return {
            'deleted_count': total_deleted,
            'error_count': error_count + 1,
            'error_details': error_details + [f"任务执行异常: {str(e)}"],
            'duration': time.time() - start_time,
            'dry_run': dry_run
        }
    finally:
        if 'tgt_conn' in locals():
            tgt_conn.close()


def archive_old_data_task(days_to_archive=90, archive_table_suffix='_archive', **kwargs):
    """
    数据归档任务
    
    Args:
        days_to_archive: 归档天数
        archive_table_suffix: 归档表后缀
    
    Returns:
        dict: 归档结果
    """
    start_time = time.time()
    total_archived = 0
    error_count = 0
    error_details = []
    
    # 读取配置文件
    import os
    config_path = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'config', 'config.yaml')
    with open(config_path, encoding='utf-8') as f:
        config = yaml.safe_load(f)
    
    # 计算归档的截止日期
    cutoff_date = datetime.now() - timedelta(days=days_to_archive)
    cutoff_str = cutoff_date.strftime('%Y-%m-%d %H:%M:%S')
    
    logger.info(f"[ARCHIVE] 开始数据归档任务, 归档 {cutoff_str} 之前的数据")
    
    try:
        tgt_conn = get_db_conn(config['target_db'])
        cursor = tgt_conn.cursor()
        
        # 定义需要归档的表
        archive_tables = [
            {'table': 'sync_main_data', 'time_field': 'created_time'},
            {'table': 'sync_detail_data', 'time_field': 'updated_time'},
        ]
        
        for table_info in archive_tables:
            table_name = table_info['table']
            archive_table = table_name + archive_table_suffix
            time_field = table_info['time_field']
            
            try:
                # 创建归档表（如果不存在）
                create_archive_sql = f"""
                    CREATE TABLE IF NOT EXISTS {archive_table} 
                    LIKE {table_name}
                """
                cursor.execute(create_archive_sql)
                
                # 将数据插入归档表
                insert_sql = f"""
                    INSERT INTO {archive_table} 
                    SELECT * FROM {table_name} 
                    WHERE {time_field} < %s
                """
                cursor.execute(insert_sql, (cutoff_str,))
                archived_count = cursor.rowcount
                
                if archived_count > 0:
                    # 删除原表中的数据
                    delete_sql = f"""
                        DELETE FROM {table_name} 
                        WHERE {time_field} < %s
                    """
                    cursor.execute(delete_sql, (cutoff_str,))
                    deleted_count = cursor.rowcount
                    
                    total_archived += archived_count
                    logger.info(f"[ARCHIVE] 表 {table_name} 归档 {archived_count} 条记录，删除 {deleted_count} 条记录")
                else:
                    logger.info(f"[ARCHIVE] 表 {table_name} 无需归档数据")
                
                tgt_conn.commit()
                
            except Exception as table_error:
                error_count += 1
                error_msg = f"归档表 {table_name} 失败: {str(table_error)}"
                error_details.append(error_msg)
                logger.error(error_msg)
                tgt_conn.rollback()
        
        duration = time.time() - start_time
        logger.info(f"[ARCHIVE] 归档完成，总计归档 {total_archived} 条记录，耗时 {duration:.2f}s")
        
        return {
            'archived_count': total_archived,
            'error_count': error_count,
            'error_details': error_details,
            'duration': duration,
            'cutoff_date': cutoff_str
        }
        
    except Exception as e:
        logger.exception(f"[ARCHIVE] 数据归档任务执行失败: {e}")
        return {
            'archived_count': total_archived,
            'error_count': error_count + 1,
            'error_details': error_details + [f"任务执行异常: {str(e)}"],
            'duration': time.time() - start_time
        }
    finally:
        if 'tgt_conn' in locals():
            tgt_conn.close()