#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
RedFire 数据库备份恢复管理器
支持PostgreSQL、Redis和MongoDB的备份和恢复
"""

import os
import asyncio
import logging
import shutil
import subprocess
import gzip
import tarfile
from datetime import datetime, timedelta
from pathlib import Path
from typing import Dict, List, Any, Optional, Tuple
from dataclasses import dataclass
import json

# 配置日志
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)


@dataclass
class BackupConfig:
    """备份配置"""
    backup_type: str  # full, incremental, differential
    retention_days: int
    compression: bool
    encryption: bool
    storage_path: str
    remote_storage: Optional[Dict[str, Any]] = None


@dataclass
class BackupInfo:
    """备份信息"""
    backup_id: str
    database_type: str
    database_name: str
    backup_type: str
    file_path: str
    file_size: int
    created_at: datetime
    is_compressed: bool
    is_encrypted: bool
    checksum: str
    metadata: Dict[str, Any]


class DatabaseBackupManager:
    """数据库备份管理器"""
    
    def __init__(self, config: Dict[str, Any]):
        self.config = config
        self.backup_configs = self._load_backup_configs()
        self.backup_history = []
    
    def _load_backup_configs(self) -> Dict[str, BackupConfig]:
        """加载备份配置"""
        configs = {}
        
        # PostgreSQL备份配置
        configs['postgresql'] = BackupConfig(
            backup_type='full',
            retention_days=30,
            compression=True,
            encryption=False,
            storage_path='/backup/postgresql'
        )
        
        # Redis备份配置
        configs['redis'] = BackupConfig(
            backup_type='full',
            retention_days=7,
            compression=True,
            encryption=False,
            storage_path='/backup/redis'
        )
        
        # MongoDB备份配置
        configs['mongodb'] = BackupConfig(
            backup_type='full',
            retention_days=30,
            compression=True,
            encryption=False,
            storage_path='/backup/mongodb'
        )
        
        return configs
    
    async def backup_postgresql(self, database_name: str, 
                               backup_type: str = 'full') -> BackupInfo:
        """备份PostgreSQL数据库"""
        logger.info(f"开始备份PostgreSQL数据库: {database_name}")
        
        config = self.backup_configs['postgresql']
        timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
        backup_id = f"pg_{database_name}_{backup_type}_{timestamp}"
        
        # 创建备份目录
        backup_dir = Path(config.storage_path) / backup_id
        backup_dir.mkdir(parents=True, exist_ok=True)
        
        # 构建pg_dump命令
        pg_config = self.config.get('postgresql', {})
        dump_file = backup_dir / f"{database_name}.sql"
        
        cmd = [
            'pg_dump',
            '-h', pg_config.get('host', 'localhost'),
            '-p', str(pg_config.get('port', 5432)),
            '-U', pg_config.get('username', 'postgres'),
            '-d', database_name,
            '--no-password',
            '--verbose',
            '--format=custom',
            '--file', str(dump_file)
        ]
        
        # 设置环境变量
        env = os.environ.copy()
        env['PGPASSWORD'] = pg_config.get('password', '')
        
        try:
            # 执行备份
            result = subprocess.run(
                cmd, 
                capture_output=True, 
                text=True, 
                env=env,
                check=True
            )
            
            logger.info(f"PostgreSQL备份完成: {dump_file}")
            
            # 压缩备份文件
            if config.compression:
                compressed_file = await self._compress_file(dump_file)
                os.remove(dump_file)
                final_file = compressed_file
            else:
                final_file = dump_file
            
            # 计算校验和
            checksum = await self._calculate_checksum(final_file)
            
            # 创建备份信息
            backup_info = BackupInfo(
                backup_id=backup_id,
                database_type='postgresql',
                database_name=database_name,
                backup_type=backup_type,
                file_path=str(final_file),
                file_size=final_file.stat().st_size,
                created_at=datetime.now(),
                is_compressed=config.compression,
                is_encrypted=config.encryption,
                checksum=checksum,
                metadata={
                    'pg_version': await self._get_pg_version(),
                    'backup_command': ' '.join(cmd),
                    'tables_count': await self._get_tables_count(database_name)
                }
            )
            
            # 保存备份元数据
            await self._save_backup_metadata(backup_info)
            
            logger.info(f"PostgreSQL备份成功: {backup_id}")
            return backup_info
            
        except subprocess.CalledProcessError as e:
            logger.error(f"PostgreSQL备份失败: {e.stderr}")
            raise
        except Exception as e:
            logger.error(f"PostgreSQL备份过程出错: {e}")
            raise
    
    async def backup_redis(self, redis_name: str = 'default') -> BackupInfo:
        """备份Redis数据库"""
        logger.info(f"开始备份Redis: {redis_name}")
        
        config = self.backup_configs['redis']
        timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
        backup_id = f"redis_{redis_name}_{timestamp}"
        
        # 创建备份目录
        backup_dir = Path(config.storage_path) / backup_id
        backup_dir.mkdir(parents=True, exist_ok=True)
        
        redis_config = self.config.get('redis', {})
        
        try:
            # 方式1: 使用redis-cli BGSAVE
            cmd = [
                'redis-cli',
                '-h', redis_config.get('host', 'localhost'),
                '-p', str(redis_config.get('port', 6379)),
                'BGSAVE'
            ]
            
            if redis_config.get('password'):
                cmd.extend(['-a', redis_config['password']])
            
            result = subprocess.run(cmd, capture_output=True, text=True, check=True)
            
            # 等待BGSAVE完成
            await asyncio.sleep(2)
            
            # 复制dump.rdb文件
            redis_data_dir = '/var/lib/redis'  # 默认Redis数据目录
            dump_file = Path(redis_data_dir) / 'dump.rdb'
            backup_file = backup_dir / 'dump.rdb'
            
            if dump_file.exists():
                shutil.copy2(dump_file, backup_file)
            else:
                # 方式2: 使用redis-cli --rdb
                backup_file = backup_dir / f"{redis_name}.rdb"
                cmd = [
                    'redis-cli',
                    '-h', redis_config.get('host', 'localhost'),
                    '-p', str(redis_config.get('port', 6379)),
                    '--rdb', str(backup_file)
                ]
                
                if redis_config.get('password'):
                    cmd.extend(['-a', redis_config['password']])
                
                subprocess.run(cmd, check=True)
            
            logger.info(f"Redis备份完成: {backup_file}")
            
            # 压缩备份文件
            if config.compression:
                compressed_file = await self._compress_file(backup_file)
                os.remove(backup_file)
                final_file = compressed_file
            else:
                final_file = backup_file
            
            # 计算校验和
            checksum = await self._calculate_checksum(final_file)
            
            # 创建备份信息
            backup_info = BackupInfo(
                backup_id=backup_id,
                database_type='redis',
                database_name=redis_name,
                backup_type='full',
                file_path=str(final_file),
                file_size=final_file.stat().st_size,
                created_at=datetime.now(),
                is_compressed=config.compression,
                is_encrypted=config.encryption,
                checksum=checksum,
                metadata={
                    'redis_version': await self._get_redis_version(),
                    'db_size': await self._get_redis_db_size()
                }
            )
            
            # 保存备份元数据
            await self._save_backup_metadata(backup_info)
            
            logger.info(f"Redis备份成功: {backup_id}")
            return backup_info
            
        except Exception as e:
            logger.error(f"Redis备份失败: {e}")
            raise
    
    async def backup_mongodb(self, database_name: str) -> BackupInfo:
        """备份MongoDB数据库"""
        logger.info(f"开始备份MongoDB数据库: {database_name}")
        
        config = self.backup_configs['mongodb']
        timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
        backup_id = f"mongo_{database_name}_{timestamp}"
        
        # 创建备份目录
        backup_dir = Path(config.storage_path) / backup_id
        backup_dir.mkdir(parents=True, exist_ok=True)
        
        mongo_config = self.config.get('mongodb', {})
        
        # 构建mongodump命令
        cmd = [
            'mongodump',
            '--host', f"{mongo_config.get('host', 'localhost')}:{mongo_config.get('port', 27017)}",
            '--db', database_name,
            '--out', str(backup_dir)
        ]
        
        if mongo_config.get('username'):
            cmd.extend(['--username', mongo_config['username']])
        if mongo_config.get('password'):
            cmd.extend(['--password', mongo_config['password']])
        if mongo_config.get('auth_database'):
            cmd.extend(['--authenticationDatabase', mongo_config['auth_database']])
        
        try:
            # 执行备份
            result = subprocess.run(cmd, capture_output=True, text=True, check=True)
            
            logger.info(f"MongoDB备份完成: {backup_dir}")
            
            # 打包备份目录
            archive_file = backup_dir.parent / f"{backup_id}.tar"
            with tarfile.open(archive_file, 'w') as tar:
                tar.add(backup_dir, arcname=backup_id)
            
            # 删除原始目录
            shutil.rmtree(backup_dir)
            
            # 压缩备份文件
            if config.compression:
                compressed_file = await self._compress_file(archive_file)
                os.remove(archive_file)
                final_file = compressed_file
            else:
                final_file = archive_file
            
            # 计算校验和
            checksum = await self._calculate_checksum(final_file)
            
            # 创建备份信息
            backup_info = BackupInfo(
                backup_id=backup_id,
                database_type='mongodb',
                database_name=database_name,
                backup_type='full',
                file_path=str(final_file),
                file_size=final_file.stat().st_size,
                created_at=datetime.now(),
                is_compressed=config.compression,
                is_encrypted=config.encryption,
                checksum=checksum,
                metadata={
                    'mongo_version': await self._get_mongo_version(),
                    'collections_count': await self._get_mongo_collections_count(database_name)
                }
            )
            
            # 保存备份元数据
            await self._save_backup_metadata(backup_info)
            
            logger.info(f"MongoDB备份成功: {backup_id}")
            return backup_info
            
        except Exception as e:
            logger.error(f"MongoDB备份失败: {e}")
            raise
    
    async def restore_postgresql(self, backup_info: BackupInfo, 
                                target_database: str = None) -> bool:
        """恢复PostgreSQL数据库"""
        target_db = target_database or backup_info.database_name
        logger.info(f"开始恢复PostgreSQL数据库: {target_db}")
        
        try:
            # 解压备份文件（如果需要）
            restore_file = backup_info.file_path
            if backup_info.is_compressed:
                restore_file = await self._decompress_file(backup_info.file_path)
            
            # 验证校验和
            if not await self._verify_checksum(restore_file, backup_info.checksum):
                raise ValueError("备份文件校验和验证失败")
            
            # 构建pg_restore命令
            pg_config = self.config.get('postgresql', {})
            cmd = [
                'pg_restore',
                '-h', pg_config.get('host', 'localhost'),
                '-p', str(pg_config.get('port', 5432)),
                '-U', pg_config.get('username', 'postgres'),
                '-d', target_db,
                '--verbose',
                '--clean',
                '--if-exists',
                restore_file
            ]
            
            # 设置环境变量
            env = os.environ.copy()
            env['PGPASSWORD'] = pg_config.get('password', '')
            
            # 执行恢复
            result = subprocess.run(cmd, capture_output=True, text=True, env=env, check=True)
            
            logger.info(f"PostgreSQL恢复成功: {target_db}")
            
            # 清理临时文件
            if backup_info.is_compressed and restore_file != backup_info.file_path:
                os.remove(restore_file)
            
            return True
            
        except Exception as e:
            logger.error(f"PostgreSQL恢复失败: {e}")
            return False
    
    async def restore_redis(self, backup_info: BackupInfo) -> bool:
        """恢复Redis数据库"""
        logger.info(f"开始恢复Redis数据库: {backup_info.database_name}")
        
        try:
            # 解压备份文件（如果需要）
            restore_file = backup_info.file_path
            if backup_info.is_compressed:
                restore_file = await self._decompress_file(backup_info.file_path)
            
            # 验证校验和
            if not await self._verify_checksum(restore_file, backup_info.checksum):
                raise ValueError("备份文件校验和验证失败")
            
            # 停止Redis服务
            redis_config = self.config.get('redis', {})
            
            # 方式1: 直接替换dump.rdb文件
            redis_data_dir = '/var/lib/redis'
            target_file = Path(redis_data_dir) / 'dump.rdb'
            
            # 备份当前dump.rdb
            if target_file.exists():
                backup_current = target_file.with_suffix('.rdb.backup')
                shutil.copy2(target_file, backup_current)
            
            # 复制恢复文件
            shutil.copy2(restore_file, target_file)
            
            # 重启Redis服务（需要根据实际部署方式调整）
            subprocess.run(['systemctl', 'restart', 'redis'], check=True)
            
            logger.info(f"Redis恢复成功: {backup_info.database_name}")
            
            # 清理临时文件
            if backup_info.is_compressed and restore_file != backup_info.file_path:
                os.remove(restore_file)
            
            return True
            
        except Exception as e:
            logger.error(f"Redis恢复失败: {e}")
            return False
    
    async def restore_mongodb(self, backup_info: BackupInfo, 
                             target_database: str = None) -> bool:
        """恢复MongoDB数据库"""
        target_db = target_database or backup_info.database_name
        logger.info(f"开始恢复MongoDB数据库: {target_db}")
        
        try:
            # 解压备份文件（如果需要）
            restore_file = backup_info.file_path
            if backup_info.is_compressed:
                restore_file = await self._decompress_file(backup_info.file_path)
            
            # 验证校验和
            if not await self._verify_checksum(restore_file, backup_info.checksum):
                raise ValueError("备份文件校验和验证失败")
            
            # 解压tar文件
            restore_dir = Path(restore_file).parent / 'restore_temp'
            with tarfile.open(restore_file, 'r') as tar:
                tar.extractall(restore_dir)
            
            # 找到备份数据目录
            backup_data_dir = restore_dir / backup_info.backup_id / backup_info.database_name
            
            # 构建mongorestore命令
            mongo_config = self.config.get('mongodb', {})
            cmd = [
                'mongorestore',
                '--host', f"{mongo_config.get('host', 'localhost')}:{mongo_config.get('port', 27017)}",
                '--db', target_db,
                '--drop',
                str(backup_data_dir)
            ]
            
            if mongo_config.get('username'):
                cmd.extend(['--username', mongo_config['username']])
            if mongo_config.get('password'):
                cmd.extend(['--password', mongo_config['password']])
            if mongo_config.get('auth_database'):
                cmd.extend(['--authenticationDatabase', mongo_config['auth_database']])
            
            # 执行恢复
            result = subprocess.run(cmd, capture_output=True, text=True, check=True)
            
            logger.info(f"MongoDB恢复成功: {target_db}")
            
            # 清理临时文件
            shutil.rmtree(restore_dir)
            if backup_info.is_compressed and restore_file != backup_info.file_path:
                os.remove(restore_file)
            
            return True
            
        except Exception as e:
            logger.error(f"MongoDB恢复失败: {e}")
            return False
    
    async def cleanup_old_backups(self):
        """清理过期备份"""
        logger.info("开始清理过期备份")
        
        for db_type, config in self.backup_configs.items():
            backup_dir = Path(config.storage_path)
            if not backup_dir.exists():
                continue
            
            cutoff_date = datetime.now() - timedelta(days=config.retention_days)
            
            for backup_path in backup_dir.iterdir():
                if backup_path.is_file() or backup_path.is_dir():
                    # 从文件名提取时间戳
                    try:
                        if '_' in backup_path.name:
                            timestamp_str = backup_path.name.split('_')[-1].split('.')[0]
                            backup_date = datetime.strptime(timestamp_str, '%Y%m%d_%H%M%S')
                            
                            if backup_date < cutoff_date:
                                if backup_path.is_file():
                                    backup_path.unlink()
                                else:
                                    shutil.rmtree(backup_path)
                                logger.info(f"删除过期备份: {backup_path}")
                    except Exception as e:
                        logger.warning(f"处理备份文件失败: {backup_path}, 错误: {e}")
    
    async def list_backups(self, database_type: str = None, 
                          database_name: str = None) -> List[BackupInfo]:
        """列出备份文件"""
        backups = []
        
        for db_type, config in self.backup_configs.items():
            if database_type and db_type != database_type:
                continue
            
            backup_dir = Path(config.storage_path)
            if not backup_dir.exists():
                continue
            
            metadata_files = backup_dir.glob('**/backup_metadata.json')
            
            for metadata_file in metadata_files:
                try:
                    with open(metadata_file, 'r') as f:
                        data = json.load(f)
                    
                    backup_info = BackupInfo(
                        backup_id=data['backup_id'],
                        database_type=data['database_type'],
                        database_name=data['database_name'],
                        backup_type=data['backup_type'],
                        file_path=data['file_path'],
                        file_size=data['file_size'],
                        created_at=datetime.fromisoformat(data['created_at']),
                        is_compressed=data['is_compressed'],
                        is_encrypted=data['is_encrypted'],
                        checksum=data['checksum'],
                        metadata=data['metadata']
                    )
                    
                    if database_name and backup_info.database_name != database_name:
                        continue
                    
                    backups.append(backup_info)
                    
                except Exception as e:
                    logger.warning(f"读取备份元数据失败: {metadata_file}, 错误: {e}")
        
        # 按创建时间排序
        backups.sort(key=lambda x: x.created_at, reverse=True)
        return backups
    
    async def _compress_file(self, file_path: Path) -> Path:
        """压缩文件"""
        compressed_path = file_path.with_suffix(file_path.suffix + '.gz')
        
        with open(file_path, 'rb') as f_in:
            with gzip.open(compressed_path, 'wb') as f_out:
                shutil.copyfileobj(f_in, f_out)
        
        return compressed_path
    
    async def _decompress_file(self, file_path: str) -> str:
        """解压文件"""
        file_path = Path(file_path)
        
        if file_path.suffix == '.gz':
            decompressed_path = file_path.with_suffix('')
            
            with gzip.open(file_path, 'rb') as f_in:
                with open(decompressed_path, 'wb') as f_out:
                    shutil.copyfileobj(f_in, f_out)
            
            return str(decompressed_path)
        
        return str(file_path)
    
    async def _calculate_checksum(self, file_path: Path) -> str:
        """计算文件校验和"""
        import hashlib
        
        hash_md5 = hashlib.md5()
        with open(file_path, 'rb') as f:
            for chunk in iter(lambda: f.read(4096), b""):
                hash_md5.update(chunk)
        
        return hash_md5.hexdigest()
    
    async def _verify_checksum(self, file_path: str, expected_checksum: str) -> bool:
        """验证文件校验和"""
        actual_checksum = await self._calculate_checksum(Path(file_path))
        return actual_checksum == expected_checksum
    
    async def _save_backup_metadata(self, backup_info: BackupInfo):
        """保存备份元数据"""
        metadata_dir = Path(backup_info.file_path).parent
        metadata_file = metadata_dir / 'backup_metadata.json'
        
        metadata = {
            'backup_id': backup_info.backup_id,
            'database_type': backup_info.database_type,
            'database_name': backup_info.database_name,
            'backup_type': backup_info.backup_type,
            'file_path': backup_info.file_path,
            'file_size': backup_info.file_size,
            'created_at': backup_info.created_at.isoformat(),
            'is_compressed': backup_info.is_compressed,
            'is_encrypted': backup_info.is_encrypted,
            'checksum': backup_info.checksum,
            'metadata': backup_info.metadata
        }
        
        with open(metadata_file, 'w') as f:
            json.dump(metadata, f, indent=2)
    
    async def _get_pg_version(self) -> str:
        """获取PostgreSQL版本"""
        try:
            result = subprocess.run(['psql', '--version'], capture_output=True, text=True)
            return result.stdout.strip()
        except:
            return 'unknown'
    
    async def _get_tables_count(self, database_name: str) -> int:
        """获取表数量"""
        # 这里应该连接数据库查询，简化为返回0
        return 0
    
    async def _get_redis_version(self) -> str:
        """获取Redis版本"""
        try:
            result = subprocess.run(['redis-server', '--version'], capture_output=True, text=True)
            return result.stdout.strip()
        except:
            return 'unknown'
    
    async def _get_redis_db_size(self) -> int:
        """获取Redis数据库大小"""
        # 这里应该连接Redis查询，简化为返回0
        return 0
    
    async def _get_mongo_version(self) -> str:
        """获取MongoDB版本"""
        try:
            result = subprocess.run(['mongod', '--version'], capture_output=True, text=True)
            return result.stdout.strip()
        except:
            return 'unknown'
    
    async def _get_mongo_collections_count(self, database_name: str) -> int:
        """获取MongoDB集合数量"""
        # 这里应该连接MongoDB查询，简化为返回0
        return 0


# 使用示例
async def main():
    """备份管理器使用示例"""
    # 配置
    config = {
        'postgresql': {
            'host': 'localhost',
            'port': 5432,
            'username': 'redfire_prod',
            'password': 'your_password'
        },
        'redis': {
            'host': 'localhost',
            'port': 6379,
            'password': 'your_password'
        },
        'mongodb': {
            'host': 'localhost',
            'port': 27017,
            'username': 'admin',
            'password': 'your_password'
        }
    }
    
    # 创建备份管理器
    backup_manager = DatabaseBackupManager(config)
    
    # 备份PostgreSQL
    pg_backup = await backup_manager.backup_postgresql('redfire_stage2')
    print(f"PostgreSQL备份完成: {pg_backup.backup_id}")
    
    # 备份Redis
    redis_backup = await backup_manager.backup_redis()
    print(f"Redis备份完成: {redis_backup.backup_id}")
    
    # 列出所有备份
    backups = await backup_manager.list_backups()
    print(f"共有 {len(backups)} 个备份文件")
    
    # 清理过期备份
    await backup_manager.cleanup_old_backups()


if __name__ == "__main__":
    asyncio.run(main())
