#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
文本分析结果管理器模块
负责管理和存储文本分析结果
"""

import os
import json
import logging
import sqlite3
import threading
import uuid
from typing import Dict, Any, List, Optional, Union
from datetime import datetime
from pathlib import Path

# 设置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
)
logger = logging.getLogger(__name__)


class TextAnalysisResultManager:
    """
    文本分析结果管理器
    提供文本分析结果的存储、检索、更新和删除功能
    """
    
    def __init__(self, 
                 storage_path: str = None,
                 storage_type: str = "file",  # file 或 sqlite
                 config_path: str = None):
        """
        初始化文本分析结果管理器
        
        Args:
            storage_path: 存储路径
            storage_type: 存储类型
            config_path: 配置文件路径
        """
        # 加载配置
        self._config = self._load_config(config_path)
        
        # 设置存储参数
        self._storage_type = storage_type or self._config.get("storage_type", "file")
        
        # 设置存储路径
        if storage_path:
            self._storage_path = storage_path
        else:
            default_path = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))), "data", "text_analysis")
            self._storage_path = self._config.get("storage_path", default_path)
        
        # 确保存储目录存在
        os.makedirs(self._storage_path, exist_ok=True)
        
        # 线程锁
        self._lock = threading.RLock()
        
        # 初始化存储
        self._init_storage()
        
        # 缓存管理
        self._cache = {}
        self._cache_size = self._config.get("cache_size", 100)
        
        logger.info(f"文本分析结果管理器初始化完成，存储类型: {self._storage_type}")
        logger.info(f"存储路径: {self._storage_path}")
    
    def _load_config(self, config_path: str = None) -> Dict[str, Any]:
        """
        加载配置文件
        
        Args:
            config_path: 配置文件路径
            
        Returns:
            Dict[str, Any]: 配置字典
        """
        config = {
            "storage_type": "file",
            "storage_path": os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))), "data", "text_analysis"),
            "cache_size": 100,
            "file_extension": ".json",
            "db_name": "text_analysis.db",
            "backup_enabled": True,
            "backup_interval": 24,  # 小时
            "max_history": 1000
        }
        
        if config_path and os.path.exists(config_path):
            try:
                with open(config_path, 'r', encoding='utf-8') as f:
                    custom_config = json.load(f)
                    config.update(custom_config)
                logger.info(f"配置文件加载成功: {config_path}")
            except Exception as e:
                logger.error(f"加载配置文件失败: {str(e)}")
        
        return config
    
    def _init_storage(self):
        """
        初始化存储系统
        """
        if self._storage_type == "sqlite":
            # 初始化SQLite数据库
            db_path = os.path.join(self._storage_path, self._config.get("db_name", "text_analysis.db"))
            self._db_path = db_path
            
            with self._lock:
                conn = sqlite3.connect(db_path)
                cursor = conn.cursor()
                
                # 创建表
                cursor.execute('''
                CREATE TABLE IF NOT EXISTS text_analysis_results (
                    id TEXT PRIMARY KEY,
                    video_id TEXT,
                    analysis_type TEXT,
                    analysis_time TEXT,
                    analysis_data TEXT,
                    confidence REAL,
                    source_type TEXT,
                    source_text TEXT,
                    metadata TEXT,
                    created_at TEXT,
                    updated_at TEXT
                )
                ''')
                
                # 创建索引
                cursor.execute('CREATE INDEX IF NOT EXISTS idx_video_id ON text_analysis_results(video_id)')
                cursor.execute('CREATE INDEX IF NOT EXISTS idx_analysis_type ON text_analysis_results(analysis_type)')
                cursor.execute('CREATE INDEX IF NOT EXISTS idx_created_at ON text_analysis_results(created_at)')
                
                conn.commit()
                conn.close()
                
                logger.info(f"SQLite数据库初始化完成: {db_path}")
        else:
            # 文件存储，确保目录结构
            self._results_dir = os.path.join(self._storage_path, "results")
            self._backups_dir = os.path.join(self._storage_path, "backups")
            os.makedirs(self._results_dir, exist_ok=True)
            os.makedirs(self._backups_dir, exist_ok=True)
            
            logger.info(f"文件存储初始化完成: {self._results_dir}")
    
    def save_result(self, 
                   result: Dict[str, Any],
                   video_id: str = None,
                   analysis_type: str = "general",
                   metadata: Dict[str, Any] = None) -> str:
        """
        保存分析结果
        
        Args:
            result: 分析结果
            video_id: 视频ID
            analysis_type: 分析类型
            metadata: 元数据
            
        Returns:
            str: 结果ID
        """
        with self._lock:
            # 生成唯一ID
            result_id = str(uuid.uuid4())
            timestamp = datetime.now().isoformat()
            
            # 准备存储数据
            stored_data = {
                "id": result_id,
                "video_id": video_id,
                "analysis_type": analysis_type,
                "analysis_time": timestamp,
                "analysis_data": result,
                "confidence": result.get("sentiment", {}).get("confidence", 0.0) if analysis_type == "sentiment" else 0.0,
                "source_type": metadata.get("source_type") if metadata else "unknown",
                "source_text": metadata.get("source_text")[:100] + "..." if metadata and len(metadata.get("source_text", "")) > 100 else metadata.get("source_text", "") if metadata else "",
                "metadata": metadata or {},
                "created_at": timestamp,
                "updated_at": timestamp
            }
            
            # 保存到存储
            if self._storage_type == "sqlite":
                self._save_to_sqlite(stored_data)
            else:
                self._save_to_file(stored_data)
            
            # 更新缓存
            self._update_cache(result_id, stored_data)
            
            logger.info(f"分析结果保存成功，ID: {result_id}, 视频ID: {video_id}, 类型: {analysis_type}")
            
            return result_id
    
    def _save_to_sqlite(self, data: Dict[str, Any]):
        """
        保存到SQLite
        
        Args:
            data: 要保存的数据
        """
        try:
            conn = sqlite3.connect(self._db_path)
            cursor = conn.cursor()
            
            cursor.execute('''
            INSERT INTO text_analysis_results 
            (id, video_id, analysis_type, analysis_time, analysis_data, confidence, source_type, source_text, metadata, created_at, updated_at)
            VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
            ''', (
                data["id"],
                data["video_id"],
                data["analysis_type"],
                data["analysis_time"],
                json.dumps(data["analysis_data"], ensure_ascii=False),
                data["confidence"],
                data["source_type"],
                data["source_text"],
                json.dumps(data["metadata"], ensure_ascii=False),
                data["created_at"],
                data["updated_at"]
            ))
            
            conn.commit()
            conn.close()
            
        except Exception as e:
            logger.error(f"保存到SQLite失败: {str(e)}")
            raise
    
    def _save_to_file(self, data: Dict[str, Any]):
        """
        保存到文件
        
        Args:
            data: 要保存的数据
        """
        try:
            # 按视频ID组织目录
            video_dir = os.path.join(self._results_dir, data["video_id"] if data["video_id"] else "unknown")
            os.makedirs(video_dir, exist_ok=True)
            
            # 文件名
            file_name = f"{data['id']}{self._config.get('file_extension', '.json')}"
            file_path = os.path.join(video_dir, file_name)
            
            # 保存文件
            with open(file_path, 'w', encoding='utf-8') as f:
                json.dump(data, f, ensure_ascii=False, indent=2)
                
        except Exception as e:
            logger.error(f"保存到文件失败: {str(e)}")
            raise
    
    def get_result(self, result_id: str) -> Optional[Dict[str, Any]]:
        """
        获取分析结果
        
        Args:
            result_id: 结果ID
            
        Returns:
            Optional[Dict[str, Any]]: 分析结果
        """
        # 先查缓存
        if result_id in self._cache:
            logger.debug(f"从缓存获取结果: {result_id}")
            return self._cache[result_id]["analysis_data"]
        
        with self._lock:
            if self._storage_type == "sqlite":
                result = self._get_from_sqlite(result_id)
            else:
                result = self._get_from_file(result_id)
            
            # 更新缓存
            if result:
                self._update_cache(result_id, result)
            
            return result["analysis_data"] if result else None
    
    def _get_from_sqlite(self, result_id: str) -> Optional[Dict[str, Any]]:
        """
        从SQLite获取结果
        
        Args:
            result_id: 结果ID
            
        Returns:
            Optional[Dict[str, Any]]: 分析结果
        """
        try:
            conn = sqlite3.connect(self._db_path)
            conn.row_factory = sqlite3.Row  # 返回字典格式
            cursor = conn.cursor()
            
            cursor.execute('''
            SELECT * FROM text_analysis_results WHERE id = ?
            ''', (result_id,))
            
            row = cursor.fetchone()
            conn.close()
            
            if row:
                data = dict(row)
                # 解析JSON字段
                data["analysis_data"] = json.loads(data["analysis_data"])
                data["metadata"] = json.loads(data["metadata"])
                return data
            
            return None
            
        except Exception as e:
            logger.error(f"从SQLite获取失败: {str(e)}")
            return None
    
    def _get_from_file(self, result_id: str) -> Optional[Dict[str, Any]]:
        """
        从文件获取结果
        
        Args:
            result_id: 结果ID
            
        Returns:
            Optional[Dict[str, Any]]: 分析结果
        """
        try:
            # 遍历所有视频目录
            for video_id in os.listdir(self._results_dir):
                video_dir = os.path.join(self._results_dir, video_id)
                if os.path.isdir(video_dir):
                    file_name = f"{result_id}{self._config.get('file_extension', '.json')}"
                    file_path = os.path.join(video_dir, file_name)
                    
                    if os.path.exists(file_path):
                        with open(file_path, 'r', encoding='utf-8') as f:
                            return json.load(f)
            
            # 如果没找到，检查unknown目录
            unknown_dir = os.path.join(self._results_dir, "unknown")
            if os.path.exists(unknown_dir):
                file_name = f"{result_id}{self._config.get('file_extension', '.json')}"
                file_path = os.path.join(unknown_dir, file_name)
                
                if os.path.exists(file_path):
                    with open(file_path, 'r', encoding='utf-8') as f:
                        return json.load(f)
            
            return None
            
        except Exception as e:
            logger.error(f"从文件获取失败: {str(e)}")
            return None
    
    def get_results_by_video_id(self, video_id: str) -> List[Dict[str, Any]]:
        """
        根据视频ID获取所有分析结果
        
        Args:
            video_id: 视频ID
            
        Returns:
            List[Dict[str, Any]]: 分析结果列表
        """
        with self._lock:
            if self._storage_type == "sqlite":
                return self._get_from_sqlite_by_video_id(video_id)
            else:
                return self._get_from_file_by_video_id(video_id)
    
    def _get_from_sqlite_by_video_id(self, video_id: str) -> List[Dict[str, Any]]:
        """
        从SQLite根据视频ID获取结果
        
        Args:
            video_id: 视频ID
            
        Returns:
            List[Dict[str, Any]]: 分析结果列表
        """
        try:
            conn = sqlite3.connect(self._db_path)
            conn.row_factory = sqlite3.Row
            cursor = conn.cursor()
            
            cursor.execute('''
            SELECT * FROM text_analysis_results WHERE video_id = ? ORDER BY created_at DESC
            ''', (video_id,))
            
            rows = cursor.fetchall()
            conn.close()
            
            results = []
            for row in rows:
                data = dict(row)
                data["analysis_data"] = json.loads(data["analysis_data"])
                data["metadata"] = json.loads(data["metadata"])
                results.append(data["analysis_data"])
            
            return results
            
        except Exception as e:
            logger.error(f"从SQLite获取视频结果失败: {str(e)}")
            return []
    
    def _get_from_file_by_video_id(self, video_id: str) -> List[Dict[str, Any]]:
        """
        从文件根据视频ID获取结果
        
        Args:
            video_id: 视频ID
            
        Returns:
            List[Dict[str, Any]]: 分析结果列表
        """
        try:
            video_dir = os.path.join(self._results_dir, video_id)
            if not os.path.exists(video_dir):
                return []
            
            results = []
            for file_name in os.listdir(video_dir):
                if file_name.endswith(self._config.get('file_extension', '.json')):
                    file_path = os.path.join(video_dir, file_name)
                    with open(file_path, 'r', encoding='utf-8') as f:
                        data = json.load(f)
                        results.append(data["analysis_data"])
            
            # 按创建时间排序
            results.sort(key=lambda x: x.get("timestamp", ""), reverse=True)
            
            return results
            
        except Exception as e:
            logger.error(f"从文件获取视频结果失败: {str(e)}")
            return []
    
    def update_result(self, 
                     result_id: str,
                     updated_data: Dict[str, Any]) -> bool:
        """
        更新分析结果
        
        Args:
            result_id: 结果ID
            updated_data: 更新的数据
            
        Returns:
            bool: 是否更新成功
        """
        with self._lock:
            # 先获取现有数据
            if self._storage_type == "sqlite":
                existing = self._get_from_sqlite(result_id)
            else:
                existing = self._get_from_file(result_id)
            
            if not existing:
                logger.warning(f"更新失败，结果不存在: {result_id}")
                return False
            
            # 更新数据
            existing["analysis_data"].update(updated_data)
            existing["updated_at"] = datetime.now().isoformat()
            
            # 保存更新后的数据
            if self._storage_type == "sqlite":
                try:
                    conn = sqlite3.connect(self._db_path)
                    cursor = conn.cursor()
                    
                    cursor.execute('''
                    UPDATE text_analysis_results 
                    SET analysis_data = ?, updated_at = ?
                    WHERE id = ?
                    ''', (
                        json.dumps(existing["analysis_data"], ensure_ascii=False),
                        existing["updated_at"],
                        result_id
                    ))
                    
                    conn.commit()
                    conn.close()
                    
                except Exception as e:
                    logger.error(f"更新SQLite失败: {str(e)}")
                    return False
            else:
                try:
                    video_dir = os.path.join(self._results_dir, existing["video_id"] if existing["video_id"] else "unknown")
                    file_name = f"{result_id}{self._config.get('file_extension', '.json')}"
                    file_path = os.path.join(video_dir, file_name)
                    
                    with open(file_path, 'w', encoding='utf-8') as f:
                        json.dump(existing, f, ensure_ascii=False, indent=2)
                        
                except Exception as e:
                    logger.error(f"更新文件失败: {str(e)}")
                    return False
            
            # 更新缓存
            self._update_cache(result_id, existing)
            
            logger.info(f"分析结果更新成功: {result_id}")
            return True
    
    def delete_result(self, result_id: str) -> bool:
        """
        删除分析结果
        
        Args:
            result_id: 结果ID
            
        Returns:
            bool: 是否删除成功
        """
        with self._lock:
            if self._storage_type == "sqlite":
                try:
                    conn = sqlite3.connect(self._db_path)
                    cursor = conn.cursor()
                    
                    cursor.execute('DELETE FROM text_analysis_results WHERE id = ?', (result_id,))
                    
                    conn.commit()
                    conn.close()
                    
                    success = cursor.rowcount > 0
                    
                except Exception as e:
                    logger.error(f"删除SQLite记录失败: {str(e)}")
                    return False
            else:
                # 查找并删除文件
                success = False
                for video_id in os.listdir(self._results_dir):
                    video_dir = os.path.join(self._results_dir, video_id)
                    if os.path.isdir(video_dir):
                        file_name = f"{result_id}{self._config.get('file_extension', '.json')}"
                        file_path = os.path.join(video_dir, file_name)
                        
                        if os.path.exists(file_path):
                            os.remove(file_path)
                            success = True
                            break
            
            # 清除缓存
            if result_id in self._cache:
                del self._cache[result_id]
            
            if success:
                logger.info(f"分析结果删除成功: {result_id}")
            else:
                logger.warning(f"删除失败，结果不存在: {result_id}")
            
            return success
    
    def list_results(self, 
                    analysis_type: str = None,
                    limit: int = 100,
                    offset: int = 0) -> List[Dict[str, Any]]:
        """
        列出分析结果
        
        Args:
            analysis_type: 分析类型过滤
            limit: 返回数量限制
            offset: 偏移量
            
        Returns:
            List[Dict[str, Any]]: 结果列表
        """
        with self._lock:
            if self._storage_type == "sqlite":
                return self._list_from_sqlite(analysis_type, limit, offset)
            else:
                return self._list_from_file(analysis_type, limit, offset)
    
    def _list_from_sqlite(self, 
                         analysis_type: str = None,
                         limit: int = 100,
                         offset: int = 0) -> List[Dict[str, Any]]:
        """
        从SQLite列出结果
        
        Args:
            analysis_type: 分析类型过滤
            limit: 返回数量限制
            offset: 偏移量
            
        Returns:
            List[Dict[str, Any]]: 结果列表
        """
        try:
            conn = sqlite3.connect(self._db_path)
            conn.row_factory = sqlite3.Row
            cursor = conn.cursor()
            
            if analysis_type:
                cursor.execute('''
                SELECT * FROM text_analysis_results 
                WHERE analysis_type = ? 
                ORDER BY created_at DESC 
                LIMIT ? OFFSET ?
                ''', (analysis_type, limit, offset))
            else:
                cursor.execute('''
                SELECT * FROM text_analysis_results 
                ORDER BY created_at DESC 
                LIMIT ? OFFSET ?
                ''', (limit, offset))
            
            rows = cursor.fetchall()
            conn.close()
            
            results = []
            for row in rows:
                data = dict(row)
                data["analysis_data"] = json.loads(data["analysis_data"])
                data["metadata"] = json.loads(data["metadata"])
                results.append(data)
            
            return results
            
        except Exception as e:
            logger.error(f"从SQLite列出结果失败: {str(e)}")
            return []
    
    def _list_from_file(self, 
                       analysis_type: str = None,
                       limit: int = 100,
                       offset: int = 0) -> List[Dict[str, Any]]:
        """
        从文件列出结果
        
        Args:
            analysis_type: 分析类型过滤
            limit: 返回数量限制
            offset: 偏移量
            
        Returns:
            List[Dict[str, Any]]: 结果列表
        """
        try:
            all_results = []
            
            # 遍历所有目录
            for video_id in os.listdir(self._results_dir):
                video_dir = os.path.join(self._results_dir, video_id)
                if os.path.isdir(video_dir):
                    for file_name in os.listdir(video_dir):
                        if file_name.endswith(self._config.get('file_extension', '.json')):
                            file_path = os.path.join(video_dir, file_name)
                            with open(file_path, 'r', encoding='utf-8') as f:
                                data = json.load(f)
                                # 应用过滤
                                if not analysis_type or data["analysis_type"] == analysis_type:
                                    all_results.append(data)
            
            # 排序
            all_results.sort(key=lambda x: x.get("created_at", ""), reverse=True)
            
            # 分页
            return all_results[offset:offset + limit]
            
        except Exception as e:
            logger.error(f"从文件列出结果失败: {str(e)}")
            return []
    
    def export_results(self, 
                      output_path: str,
                      format: str = "json",
                      video_id: str = None,
                      analysis_type: str = None) -> bool:
        """
        导出分析结果
        
        Args:
            output_path: 输出路径
            format: 输出格式 (json, csv)
            video_id: 视频ID过滤
            analysis_type: 分析类型过滤
            
        Returns:
            bool: 是否导出成功
        """
        try:
            # 获取要导出的结果
            if video_id:
                results = self.get_results_by_video_id(video_id)
            else:
                # 获取所有结果
                all_results = []
                offset = 0
                while True:
                    batch = self.list_results(analysis_type, limit=1000, offset=offset)
                    if not batch:
                        break
                    all_results.extend(batch)
                    offset += 1000
                results = [r["analysis_data"] for r in all_results]
            
            # 导出
            if format == "csv":
                # 简单的CSV导出
                self._export_to_csv(results, output_path)
            else:
                # 默认JSON导出
                with open(output_path, 'w', encoding='utf-8') as f:
                    json.dump(results, f, ensure_ascii=False, indent=2)
            
            logger.info(f"导出成功，共 {len(results)} 条结果: {output_path}")
            return True
            
        except Exception as e:
            logger.error(f"导出失败: {str(e)}")
            return False
    
    def _export_to_csv(self, results: List[Dict[str, Any]], output_path: str):
        """
        导出为CSV
        
        Args:
            results: 结果列表
            output_path: 输出路径
        """
        import csv
        
        with open(output_path, 'w', newline='', encoding='utf-8-sig') as f:
            fieldnames = ['timestamp', 'success', 'keywords', 'sentiment_label', 'sentiment_confidence', 'topics']
            writer = csv.DictWriter(f, fieldnames=fieldnames)
            
            writer.writeheader()
            
            for result in results:
                row = {
                    'timestamp': result.get('timestamp', ''),
                    'success': result.get('success', False),
                    'keywords': ', '.join(result.get('keywords', [])),
                    'sentiment_label': result.get('sentiment', {}).get('label', ''),
                    'sentiment_confidence': result.get('sentiment', {}).get('confidence', 0),
                    'topics': ', '.join([t['topic'] for t in result.get('topics', [])])
                }
                writer.writerow(row)
    
    def _update_cache(self, result_id: str, data: Dict[str, Any]):
        """
        更新缓存
        
        Args:
            result_id: 结果ID
            data: 结果数据
        """
        # 检查缓存大小
        if len(self._cache) >= self._cache_size:
            # 删除最旧的缓存项
            oldest_key = min(self._cache.keys(), key=lambda k: self._cache[k].get("updated_at", ""))
            if oldest_key != result_id:  # 避免删除当前更新的项
                del self._cache[oldest_key]
        
        # 更新缓存
        self._cache[result_id] = data
    
    def clear_cache(self):
        """
        清除缓存
        """
        with self._lock:
            self._cache.clear()
            logger.info("缓存已清除")
    
    def get_statistics(self) -> Dict[str, Any]:
        """
        获取统计信息
        
        Returns:
            Dict[str, Any]: 统计信息
        """
        with self._lock:
            if self._storage_type == "sqlite":
                return self._get_sqlite_statistics()
            else:
                return self._get_file_statistics()
    
    def _get_sqlite_statistics(self) -> Dict[str, Any]:
        """
        获取SQLite统计信息
        
        Returns:
            Dict[str, Any]: 统计信息
        """
        try:
            conn = sqlite3.connect(self._db_path)
            cursor = conn.cursor()
            
            # 总记录数
            cursor.execute('SELECT COUNT(*) FROM text_analysis_results')
            total_count = cursor.fetchone()[0]
            
            # 按类型统计
            cursor.execute('SELECT analysis_type, COUNT(*) FROM text_analysis_results GROUP BY analysis_type')
            type_counts = dict(cursor.fetchall())
            
            # 视频数量
            cursor.execute('SELECT COUNT(DISTINCT video_id) FROM text_analysis_results WHERE video_id IS NOT NULL')
            video_count = cursor.fetchone()[0]
            
            # 最新记录
            cursor.execute('SELECT MAX(created_at) FROM text_analysis_results')
            latest_time = cursor.fetchone()[0]
            
            conn.close()
            
            return {
                "total_count": total_count,
                "type_counts": type_counts,
                "video_count": video_count,
                "latest_time": latest_time,
                "cache_size": len(self._cache)
            }
            
        except Exception as e:
            logger.error(f"获取SQLite统计失败: {str(e)}")
            return {}
    
    def _get_file_statistics(self) -> Dict[str, Any]:
        """
        获取文件统计信息
        
        Returns:
            Dict[str, Any]: 统计信息
        """
        try:
            total_count = 0
            type_counts = defaultdict(int)
            video_count = 0
            latest_time = ""
            
            for video_id in os.listdir(self._results_dir):
                video_dir = os.path.join(self._results_dir, video_id)
                if os.path.isdir(video_dir) and video_id != "unknown":
                    video_count += 1
                    
                    for file_name in os.listdir(video_dir):
                        if file_name.endswith(self._config.get('file_extension', '.json')):
                            file_path = os.path.join(video_dir, file_name)
                            with open(file_path, 'r', encoding='utf-8') as f:
                                data = json.load(f)
                                total_count += 1
                                type_counts[data["analysis_type"]] += 1
                                
                                # 更新最新时间
                                if data["created_at"] > latest_time:
                                    latest_time = data["created_at"]
            
            return {
                "total_count": total_count,
                "type_counts": dict(type_counts),
                "video_count": video_count,
                "latest_time": latest_time,
                "cache_size": len(self._cache)
            }
            
        except Exception as e:
            logger.error(f"获取文件统计失败: {str(e)}")
            return {}
    
    def create_backup(self, backup_path: str = None) -> str:
        """
        创建备份
        
        Args:
            backup_path: 备份路径
            
        Returns:
            str: 备份文件路径
        """
        try:
            if not self._config.get("backup_enabled", True):
                logger.warning("备份功能未启用")
                return ""
            
            # 生成备份文件名
            timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
            if not backup_path:
                backup_filename = f"backup_{timestamp}.zip"
                backup_path = os.path.join(self._backups_dir, backup_filename)
            
            # 创建备份
            if self._storage_type == "sqlite":
                # 复制数据库文件
                import shutil
                shutil.copy2(self._db_path, backup_path)
            else:
                # 压缩文件目录
                import zipfile
                with zipfile.ZipFile(backup_path, 'w', zipfile.ZIP_DEFLATED) as zipf:
                    for root, _, files in os.walk(self._results_dir):
                        for file in files:
                            if file.endswith(self._config.get('file_extension', '.json')):
                                file_path = os.path.join(root, file)
                                arcname = os.path.relpath(file_path, self._results_dir)
                                zipf.write(file_path, arcname)
            
            logger.info(f"备份创建成功: {backup_path}")
            return backup_path
            
        except Exception as e:
            logger.error(f"创建备份失败: {str(e)}")
            return ""


# 使用示例
if __name__ == '__main__':
    try:
        print("文本分析结果管理器初始化中...")
        
        # 创建实例
        manager = TextAnalysisResultManager(storage_type="file")
        
        print("\n文本分析结果管理器初始化成功!")
        print("\n可用功能:")
        print("- save_result(): 保存分析结果")
        print("- get_result(): 获取分析结果")
        print("- update_result(): 更新分析结果")
        print("- delete_result(): 删除分析结果")
        print("- list_results(): 列出分析结果")
        print("- export_results(): 导出分析结果")
        print("- get_statistics(): 获取统计信息")
        print("- create_backup(): 创建备份")
        
        # 测试统计信息
        stats = manager.get_statistics()
        print(f"\n当前存储统计:")
        print(f"  总记录数: {stats.get('total_count', 0)}")
        print(f"  视频数量: {stats.get('video_count', 0)}")
        print(f"  缓存大小: {stats.get('cache_size', 0)}")
        
    except Exception as e:
        print(f"初始化失败: {str(e)}")