# -*- coding: utf-8 -*-
"""
数据备份服务

按照模块文档2.3.1节要求实现数据备份功能：
- 自动备份配置和执行
- 手动备份管理
- 备份文件管理和验证
"""

import os
import json
import gzip
import shutil
import asyncio
import hashlib
from typing import Dict, List, Optional, Any
from datetime import datetime, timedelta
from pathlib import Path
from sqlalchemy.orm import Session
from sqlalchemy import text
import subprocess
import logging

from ..core.database import get_db, engine
from ..core.exceptions import BusinessError, ValidationError
from ..models.system_config import SystemConfig, ConfigCategory

logger = logging.getLogger(__name__)


class BackupService:
    """数据备份服务类"""

    def __init__(self, db: Session):
        self.db = db
        self.backup_base_path = self._get_backup_path()
        self._ensure_backup_directory()

    def _get_backup_path(self) -> Path:
        """获取备份存储路径"""
        # 从系统配置获取备份路径
        config = self.db.query(SystemConfig).filter(
            SystemConfig.config_key == "backup.storage_path"
        ).first()
        
        if config and config.config_value:
            backup_path = Path(config.config_value)
        else:
            # 默认备份路径
            backup_path = Path("data/backups")
        
        return backup_path.resolve()

    def _ensure_backup_directory(self):
        """确保备份目录存在"""
        try:
            self.backup_base_path.mkdir(parents=True, exist_ok=True)
            logger.info(f"备份目录已准备: {self.backup_base_path}")
        except Exception as e:
            logger.error(f"创建备份目录失败: {e}")
            raise BusinessError(f"无法创建备份目录: {str(e)}")

    async def create_backup(
        self, 
        backup_type: str = "full",
        description: str = "",
        operator: str = "system",
        tables: Optional[List[str]] = None
    ) -> Dict[str, Any]:
        """
        创建数据备份
        
        Args:
            backup_type: 备份类型 (full/incremental)
            description: 备份说明
            operator: 操作人
            tables: 指定备份的表，None表示全部表
            
        Returns:
            备份信息字典
        """
        try:
            # 生成备份ID和文件名
            backup_id = self._generate_backup_id()
            timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
            backup_filename = f"backup_{backup_id}_{timestamp}.sql"
            
            if self._get_config_bool("backup.compression", True):
                backup_filename += ".gz"
            
            backup_file_path = self.backup_base_path / backup_filename
            
            # 执行备份
            if backup_type == "full":
                success = await self._create_full_backup(backup_file_path, tables)
            else:
                success = await self._create_incremental_backup(backup_file_path, tables)
            
            if not success:
                raise BusinessError("备份执行失败")
            
            # 验证备份文件
            file_size = backup_file_path.stat().st_size
            file_hash = self._calculate_file_hash(backup_file_path)
            
            # 记录备份信息
            backup_info = {
                "backup_id": backup_id,
                "filename": backup_filename,
                "file_path": str(backup_file_path),
                "backup_type": backup_type,
                "file_size": file_size,
                "file_hash": file_hash,
                "description": description,
                "tables": tables or "all",
                "created_at": datetime.now().isoformat(),
                "created_by": operator,
                "status": "completed"
            }
            
            # 保存备份记录
            await self._save_backup_record(backup_info)
            
            logger.info(f"备份创建成功: {backup_id}, 文件: {backup_filename}")
            return backup_info
            
        except Exception as e:
            logger.error(f"创建备份失败: {e}", exc_info=True)
            raise BusinessError(f"创建备份失败: {str(e)}")

    async def _create_full_backup(
        self, 
        backup_file_path: Path, 
        tables: Optional[List[str]] = None
    ) -> bool:
        """创建全量备份"""
        try:
            # 获取数据库连接信息
            db_url = str(engine.url)
            
            # 构建mysqldump命令
            cmd = self._build_mysqldump_command(db_url, tables)
            
            # 执行备份
            with open(backup_file_path, 'wb') as f:
                if self._get_config_bool("backup.compression", True):
                    # 压缩备份
                    process = await asyncio.create_subprocess_shell(
                        cmd,
                        stdout=asyncio.subprocess.PIPE,
                        stderr=asyncio.subprocess.PIPE
                    )
                    
                    stdout, stderr = await process.communicate()
                    
                    if process.returncode != 0:
                        logger.error(f"mysqldump失败: {stderr.decode()}")
                        return False
                    
                    # 压缩数据
                    compressed_data = gzip.compress(stdout)
                    f.write(compressed_data)
                else:
                    # 不压缩
                    process = await asyncio.create_subprocess_shell(
                        cmd,
                        stdout=f,
                        stderr=asyncio.subprocess.PIPE
                    )
                    
                    await process.communicate()
                    
                    if process.returncode != 0:
                        return False
            
            return True
            
        except Exception as e:
            logger.error(f"全量备份执行失败: {e}")
            return False

    async def _create_incremental_backup(
        self, 
        backup_file_path: Path, 
        tables: Optional[List[str]] = None
    ) -> bool:
        """创建增量备份"""
        try:
            # 获取上次备份时间
            last_backup_time = await self._get_last_backup_time()
            
            if not last_backup_time:
                # 如果没有上次备份，执行全量备份
                return await self._create_full_backup(backup_file_path, tables)
            
            # TODO: 实现增量备份逻辑
            # 这里简化为全量备份
            logger.warning("增量备份功能暂未实现，执行全量备份")
            return await self._create_full_backup(backup_file_path, tables)
            
        except Exception as e:
            logger.error(f"增量备份执行失败: {e}")
            return False

    def _build_mysqldump_command(self, db_url: str, tables: Optional[List[str]] = None) -> str:
        """构建mysqldump命令"""
        # 解析数据库连接信息
        # 这里简化处理，实际应该解析SQLAlchemy URL
        cmd_parts = [
            "mysqldump",
            "--single-transaction",
            "--routines",
            "--triggers",
            "--default-character-set=utf8mb4"
        ]
        
        # 添加表名（如果指定）
        if tables:
            cmd_parts.extend(tables)
        
        # TODO: 添加实际的数据库连接参数
        cmd_parts.extend([
            "-h", "localhost",
            "-u", "root",
            "-p", "password",
            "crm_db"
        ])
        
        return " ".join(cmd_parts)

    def get_backup_list(
        self, 
        page: int = 1, 
        size: int = 20,
        backup_type: Optional[str] = None
    ) -> Dict[str, Any]:
        """获取备份列表"""
        try:
            backup_records = self._load_backup_records()
            
            # 筛选备份类型
            if backup_type:
                backup_records = [
                    record for record in backup_records 
                    if record.get("backup_type") == backup_type
                ]
            
            # 分页
            total = len(backup_records)
            start = (page - 1) * size
            end = start + size
            page_records = backup_records[start:end]
            
            return {
                "items": page_records,
                "total": total,
                "page": page,
                "size": size
            }
            
        except Exception as e:
            logger.error(f"获取备份列表失败: {e}")
            raise BusinessError(f"获取备份列表失败: {str(e)}")

    async def delete_backup(self, backup_id: str, operator: str) -> bool:
        """删除备份"""
        try:
            backup_records = self._load_backup_records()
            backup_record = None
            
            # 查找备份记录
            for record in backup_records:
                if record.get("backup_id") == backup_id:
                    backup_record = record
                    break
            
            if not backup_record:
                raise ValidationError(f"备份记录不存在: {backup_id}")
            
            # 删除备份文件
            backup_file_path = Path(backup_record["file_path"])
            if backup_file_path.exists():
                backup_file_path.unlink()
            
            # 从记录中移除
            backup_records.remove(backup_record)
            self._save_backup_records(backup_records)
            
            logger.info(f"备份删除成功: {backup_id}, 操作人: {operator}")
            return True
            
        except Exception as e:
            logger.error(f"删除备份失败: {e}")
            raise BusinessError(f"删除备份失败: {str(e)}")

    async def cleanup_old_backups(self) -> Dict[str, Any]:
        """清理过期备份"""
        try:
            retention_days = self._get_config_int("backup.retention_days", 30)
            cutoff_date = datetime.now() - timedelta(days=retention_days)
            
            backup_records = self._load_backup_records()
            deleted_count = 0
            
            # 查找过期备份
            remaining_records = []
            for record in backup_records:
                created_at = datetime.fromisoformat(record["created_at"])
                if created_at < cutoff_date:
                    # 删除过期备份
                    backup_file_path = Path(record["file_path"])
                    if backup_file_path.exists():
                        backup_file_path.unlink()
                    deleted_count += 1
                else:
                    remaining_records.append(record)
            
            # 保存更新后的记录
            self._save_backup_records(remaining_records)
            
            logger.info(f"清理过期备份完成，删除了 {deleted_count} 个备份")
            return {
                "deleted_count": deleted_count,
                "retention_days": retention_days
            }
            
        except Exception as e:
            logger.error(f"清理过期备份失败: {e}")
            raise BusinessError(f"清理过期备份失败: {str(e)}")

    def _generate_backup_id(self) -> str:
        """生成备份ID"""
        timestamp = datetime.now().strftime("%Y%m%d%H%M%S")
        return f"BK{timestamp}"

    def _calculate_file_hash(self, file_path: Path) -> str:
        """计算文件哈希值"""
        hash_md5 = hashlib.md5()
        with open(file_path, "rb") as f:
            for chunk in iter(lambda: f.read(4096), b""):
                hash_md5.update(chunk)
        return hash_md5.hexdigest()

    async def _save_backup_record(self, backup_info: Dict[str, Any]):
        """保存备份记录"""
        try:
            records = self._load_backup_records()
            records.append(backup_info)
            self._save_backup_records(records)
        except Exception as e:
            logger.error(f"保存备份记录失败: {e}")

    def _load_backup_records(self) -> List[Dict[str, Any]]:
        """加载备份记录"""
        records_file = self.backup_base_path / "backup_records.json"
        try:
            if records_file.exists():
                with open(records_file, 'r', encoding='utf-8') as f:
                    return json.load(f)
            return []
        except Exception as e:
            logger.error(f"加载备份记录失败: {e}")
            return []

    def _save_backup_records(self, records: List[Dict[str, Any]]):
        """保存备份记录"""
        records_file = self.backup_base_path / "backup_records.json"
        try:
            with open(records_file, 'w', encoding='utf-8') as f:
                json.dump(records, f, ensure_ascii=False, indent=2)
        except Exception as e:
            logger.error(f"保存备份记录失败: {e}")

    async def _get_last_backup_time(self) -> Optional[datetime]:
        """获取上次备份时间"""
        try:
            records = self._load_backup_records()
            if not records:
                return None
            
            # 按时间排序，获取最新备份
            records.sort(key=lambda x: x["created_at"], reverse=True)
            last_record = records[0]
            return datetime.fromisoformat(last_record["created_at"])
        except Exception:
            return None

    def _get_config_bool(self, key: str, default: bool = False) -> bool:
        """获取布尔类型配置"""
        try:
            config = self.db.query(SystemConfig).filter(
                SystemConfig.config_key == key
            ).first()
            if config:
                return config.config_value.lower() in ("true", "1", "yes")
            return default
        except Exception:
            return default

    def _get_config_int(self, key: str, default: int = 0) -> int:
        """获取整数类型配置"""
        try:
            config = self.db.query(SystemConfig).filter(
                SystemConfig.config_key == key
            ).first()
            if config:
                return int(config.config_value)
            return default
        except Exception:
            return default

    # ==================== 数据恢复功能 ====================

    async def restore_backup(
        self, 
        backup_id: str, 
        operator: str,
        restore_options: Optional[Dict[str, Any]] = None
    ) -> Dict[str, Any]:
        """
        恢复数据备份
        
        Args:
            backup_id: 备份ID
            operator: 操作人
            restore_options: 恢复选项
            
        Returns:
            恢复结果信息
        """
        try:
            # 查找备份记录
            backup_record = self._find_backup_record(backup_id)
            if not backup_record:
                raise ValidationError(f"备份记录不存在: {backup_id}")
            
            # 验证备份文件
            backup_file_path = Path(backup_record["file_path"])
            if not backup_file_path.exists():
                raise ValidationError(f"备份文件不存在: {backup_file_path}")
            
            # 验证文件完整性
            if not await self._validate_backup_file(backup_record):
                raise ValidationError("备份文件验证失败，文件可能已损坏")
            
            # 执行恢复前检查
            restore_checks = await self._pre_restore_checks(backup_record, restore_options)
            if not restore_checks["success"]:
                raise BusinessError(f"恢复前检查失败: {restore_checks['message']}")
            
            # 创建恢复记录
            restore_id = self._generate_restore_id()
            restore_record = {
                "restore_id": restore_id,
                "backup_id": backup_id,
                "backup_filename": backup_record["filename"],
                "restore_type": restore_options.get("restore_type", "full") if restore_options else "full",
                "started_at": datetime.now().isoformat(),
                "status": "in_progress",
                "operator": operator
            }
            
            # 执行数据恢复
            restore_success = await self._execute_restore(backup_record, restore_options)
            
            # 更新恢复记录
            restore_record.update({
                "completed_at": datetime.now().isoformat(),
                "status": "completed" if restore_success else "failed",
                "success": restore_success
            })
            
            # 保存恢复记录
            await self._save_restore_record(restore_record)
            
            if restore_success:
                logger.info(f"数据恢复成功: {backup_id}, 操作人: {operator}")
            else:
                logger.error(f"数据恢复失败: {backup_id}")
            
            return restore_record
            
        except Exception as e:
            logger.error(f"恢复备份失败: {e}", exc_info=True)
            raise BusinessError(f"恢复备份失败: {str(e)}")

    async def validate_backup(self, backup_id: str) -> Dict[str, Any]:
        """验证备份文件完整性"""
        try:
            backup_record = self._find_backup_record(backup_id)
            if not backup_record:
                raise ValidationError(f"备份记录不存在: {backup_id}")
            
            result = await self._validate_backup_file(backup_record)
            
            return {
                "backup_id": backup_id,
                "valid": result,
                "message": "备份文件验证成功" if result else "备份文件验证失败",
                "validated_at": datetime.now().isoformat()
            }
            
        except Exception as e:
            logger.error(f"验证备份失败: {e}")
            raise BusinessError(f"验证备份失败: {str(e)}")

    def get_restore_history(
        self, 
        page: int = 1, 
        size: int = 20
    ) -> Dict[str, Any]:
        """获取恢复历史记录"""
        try:
            restore_records = self._load_restore_records()
            
            # 分页
            total = len(restore_records)
            start = (page - 1) * size
            end = start + size
            page_records = restore_records[start:end]
            
            return {
                "items": page_records,
                "total": total,
                "page": page,
                "size": size
            }
            
        except Exception as e:
            logger.error(f"获取恢复历史失败: {e}")
            raise BusinessError(f"获取恢复历史失败: {str(e)}")

    async def _validate_backup_file(self, backup_record: Dict[str, Any]) -> bool:
        """验证备份文件完整性"""
        try:
            backup_file_path = Path(backup_record["file_path"])
            
            # 检查文件是否存在
            if not backup_file_path.exists():
                return False
            
            # 验证文件大小
            current_size = backup_file_path.stat().st_size
            expected_size = backup_record.get("file_size", 0)
            if current_size != expected_size:
                logger.warning(f"备份文件大小不匹配: 期望{expected_size}, 实际{current_size}")
                return False
            
            # 验证文件哈希
            current_hash = self._calculate_file_hash(backup_file_path)
            expected_hash = backup_record.get("file_hash", "")
            if current_hash != expected_hash:
                logger.warning(f"备份文件哈希不匹配: 期望{expected_hash}, 实际{current_hash}")
                return False
            
            return True
            
        except Exception as e:
            logger.error(f"验证备份文件失败: {e}")
            return False

    async def _pre_restore_checks(
        self, 
        backup_record: Dict[str, Any], 
        restore_options: Optional[Dict[str, Any]]
    ) -> Dict[str, Any]:
        """恢复前检查"""
        try:
            checks = {
                "storage_space": True,
                "database_connection": True,
                "permission": True,
                "version_compatibility": True
            }
            
            # 检查存储空间
            backup_size = backup_record.get("file_size", 0)
            # TODO: 实际检查磁盘空间
            
            # 检查数据库连接
            try:
                self.db.execute(text("SELECT 1"))
            except Exception:
                checks["database_connection"] = False
            
            # 检查版本兼容性
            # TODO: 实现版本兼容性检查
            
            success = all(checks.values())
            message = "所有检查通过" if success else f"检查失败: {[k for k, v in checks.items() if not v]}"
            
            return {
                "success": success,
                "message": message,
                "checks": checks
            }
            
        except Exception as e:
            return {
                "success": False,
                "message": f"检查过程异常: {str(e)}",
                "checks": {}
            }

    async def _execute_restore(
        self, 
        backup_record: Dict[str, Any], 
        restore_options: Optional[Dict[str, Any]]
    ) -> bool:
        """执行数据恢复"""
        try:
            backup_file_path = Path(backup_record["file_path"])
            
            # 构建mysql恢复命令
            cmd = self._build_mysql_restore_command()
            
            # 读取备份文件内容
            if backup_record["filename"].endswith(".gz"):
                # 解压缩
                with gzip.open(backup_file_path, 'rt', encoding='utf-8') as f:
                    sql_content = f.read()
            else:
                with open(backup_file_path, 'r', encoding='utf-8') as f:
                    sql_content = f.read()
            
            # 执行SQL恢复
            process = await asyncio.create_subprocess_shell(
                cmd,
                stdin=asyncio.subprocess.PIPE,
                stdout=asyncio.subprocess.PIPE,
                stderr=asyncio.subprocess.PIPE
            )
            
            stdout, stderr = await process.communicate(input=sql_content.encode())
            
            if process.returncode != 0:
                logger.error(f"数据恢复执行失败: {stderr.decode()}")
                return False
            
            return True
            
        except Exception as e:
            logger.error(f"执行数据恢复失败: {e}")
            return False

    def _build_mysql_restore_command(self) -> str:
        """构建mysql恢复命令"""
        cmd_parts = [
            "mysql",
            "--default-character-set=utf8mb4"
        ]
        
        # TODO: 添加实际的数据库连接参数
        cmd_parts.extend([
            "-h", "localhost",
            "-u", "root",
            "-p", "password",
            "crm_db"
        ])
        
        return " ".join(cmd_parts)

    def _find_backup_record(self, backup_id: str) -> Optional[Dict[str, Any]]:
        """查找备份记录"""
        try:
            records = self._load_backup_records()
            for record in records:
                if record.get("backup_id") == backup_id:
                    return record
            return None
        except Exception:
            return None

    def _generate_restore_id(self) -> str:
        """生成恢复ID"""
        timestamp = datetime.now().strftime("%Y%m%d%H%M%S")
        return f"RT{timestamp}"

    async def _save_restore_record(self, restore_record: Dict[str, Any]):
        """保存恢复记录"""
        try:
            records = self._load_restore_records()
            records.append(restore_record)
            self._save_restore_records(records)
        except Exception as e:
            logger.error(f"保存恢复记录失败: {e}")

    def _load_restore_records(self) -> List[Dict[str, Any]]:
        """加载恢复记录"""
        records_file = self.backup_base_path / "restore_records.json"
        try:
            if records_file.exists():
                with open(records_file, 'r', encoding='utf-8') as f:
                    return json.load(f)
            return []
        except Exception as e:
            logger.error(f"加载恢复记录失败: {e}")
            return []

    def _save_restore_records(self, records: List[Dict[str, Any]]):
        """保存恢复记录"""
        records_file = self.backup_base_path / "restore_records.json"
        try:
            with open(records_file, 'w', encoding='utf-8') as f:
                json.dump(records, f, ensure_ascii=False, indent=2)
        except Exception as e:
            logger.error(f"保存恢复记录失败: {e}")