"""
PostgreSQL增强备份模块
包含完整的压缩、重删、加密处理流程
"""

import os
import hashlib
import logging
from concurrent.futures import ThreadPoolExecutor
from modules import CompressionModule, DeduplicationModule, EncryptionModule

LOGGER = logging.getLogger(__name__)

class BackupManager:
    def __init__(self, config):
        """
        初始化备份管理器
        :param config: 配置字典，包含：
            - backup_dir: 备份存储目录
            - compression_alg: 压缩算法（gzip/lzma/zlib）
            - dedup_db_path: 重删数据库路径
            - encryption_key: 加密密钥（bytes）
        """
        # 初始化处理模块
        self.compression = CompressionModule(config['compression_alg'])
        self.deduplication = DeduplicationModule(
            config['dedup_db_path'],
            block_size=4*1024*1024  # 4MB分块
        )
        self.encryption = EncryptionModule(config['encryption_key'])
        self.backup_dir = config['backup_dir']
        
        # 创建必要目录结构
        os.makedirs(os.path.join(self.backup_dir, 'blocks'), exist_ok=True)
        os.makedirs(os.path.join(self.backup_dir, 'manifests'), exist_ok=True)
        
        LOGGER.debug("BackupManager initialized")

    def _process_single_file(self, file_path):
        """
        处理单个文件的完整备份流程（内部方法）
        """
        try:
            # 1. 读取原始数据
            with open(file_path, 'rb') as f:
                raw_data = f.read()
            
            # 2. 压缩处理
            compressed = self.compression.compress(raw_data)
            
            # 3. 分块和去重
            blocks = self.deduplication.split_into_blocks(compressed)
            unique_blocks, dedup_rate = self.deduplication.process_blocks(blocks)
            
            # 4. 加密并存储
            block_hashes = []
            for block in unique_blocks:
                # 生成块哈希
                block_hash = hashlib.sha256(block).hexdigest()
                
                # 加密处理
                encrypted_block = self.encryption.encrypt(block)
                
                # 存储路径：backup_dir/blocks/ab/cd/abcdef...
                storage_path = os.path.join(
                    self.backup_dir,
                    'blocks',
                    block_hash[:2],
                    block_hash[2:4],
                    block_hash
                )
                os.makedirs(os.path.dirname(storage_path), exist_ok=True)
                with open(storage_path, 'wb') as f:
                    f.write(encrypted_block)
                
                block_hashes.append(block_hash)
            
            # 5. 生成清单文件
            manifest = {
                'original_path': file_path,
                'compression': self.compression.algorithm,
                'encryption': 'AES-256-CBC',
                'block_hashes': block_hashes,
                'original_size': len(raw_data),
                'dedup_rate': dedup_rate
            }
            manifest_path = os.path.join(
                self.backup_dir,
                'manifests',
                os.path.basename(file_path) + '.json'
            )
            with open(manifest_path, 'w') as f:
                json.dump(manifest, f)
            
            LOGGER.info(f"Successfully backed up {file_path} | "
                       f"Compression: {self.compression.algorithm} | "
                       f"Dedup rate: {dedup_rate:.2f}%")
            
            return True
        except Exception as e:
            LOGGER.error(f"Failed to backup {file_path}: {str(e)}")
            return False

    def execute_backup(self, source_paths):
        """
        执行备份操作
        :param source_paths: 需要备份的路径列表
        :return: 成功备份的文件数
        """
        success_count = 0
        
        # 使用线程池并行处理多个文件
        with ThreadPoolExecutor(max_workers=4) as executor:
            futures = []
            for path in source_paths:
                futures.append(executor.submit(self._process_single_file, path))
            
            for future in futures:
                if future.result():
                    success_count += 1
        
        LOGGER.info(f"Backup completed | Total: {len(source_paths)} | Success: {success_count}")
        return success_count