#!/usr/bin/env python3
"""
日志查询性能优化工具
针对/web/logs页面的查询特点进行性能优化
"""

import sqlite3
from pathlib import Path
from datetime import datetime, timedelta
from typing import Optional, Dict, Any
from sqlalchemy import create_engine, text
from sqlalchemy.orm import sessionmaker
from core.config import get_settings
from core.database import SessionLocal
from models.database import APILog
import json
import time

class LogPerformanceOptimizer:
    """日志性能优化器"""
    
    def __init__(self):
        self.settings = get_settings()
        self.engine = create_engine(self.settings.database_url)
        self.SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=self.engine)
    
    def _generate_cache_key(self, prefix: str, **kwargs) -> str:
        """生成缓存键"""
        # 过滤掉None值
        filtered_kwargs = {k: v for k, v in kwargs.items() if v is not None}
        # 将参数转换为字符串并排序
        param_str = json.dumps(sorted(filtered_kwargs.items()), sort_keys=True)
        # 生成哈希
        param_hash = hashlib.md5(param_str.encode()).hexdigest()
        return f"{prefix}:{param_hash}"
    
    def _get_cache(self, key: str) -> Optional[Dict[str, Any]]:
        """从缓存获取数据"""
        if not self.cache_manager:
            return None
        
        cached_data = self.cache_manager.get(key)
        if cached_data:
            self.cache_stats["hits"] += 1
            return cached_data
        else:
            self.cache_stats["misses"] += 1
            return None
    
    def _set_cache(self, key: str, data: Dict[str, Any], ttl: int = None) -> bool:
        """设置缓存"""
        if not self.cache_manager:
            return False
        
        cache_ttl = ttl or self.settings.cache_ttl
        return self.cache_manager.set(key, data, ttl=cache_ttl)
    
    def _invalidate_cache(self, pattern: str = None) -> bool:
        """失效缓存"""
        if not self.cache_manager:
            return False
        
        if pattern:
            return self.cache_manager.invalidate_pattern(pattern)
        else:
            return self.cache_manager.invalidate_all()
    
    def get_cache_stats(self) -> Dict[str, Any]:
        """获取缓存统计信息"""
        total_requests = self.cache_stats["hits"] + self.cache_stats["misses"]
        hit_rate = (self.cache_stats["hits"] / total_requests * 100) if total_requests > 0 else 0
        
        return {
            "cache_enabled": self.settings.cache_enabled,
            "hits": self.cache_stats["hits"],
            "misses": self.cache_stats["misses"],
            "hit_rate": round(hit_rate, 2),
            "total_requests": total_requests
        }
    
    def create_indexes(self):
        """创建必要的数据库索引"""
        indexes = [
            # 单列索引
            "CREATE INDEX IF NOT EXISTS idx_api_logs_created_at ON api_logs(created_at)",
            # 针对日期范围查询的优化索引
            "CREATE INDEX IF NOT EXISTS idx_api_logs_date_range ON api_logs(created_at DESC)",
        ]
        
        db = SessionLocal()
        try:
            for index_sql in indexes:
                try:
                    db.execute(text(index_sql))
                    print(f"✓ 创建索引: {index_sql}")
                except Exception as e:
                    print(f"✗ 创建索引失败: {index_sql}")
                    print(f"  错误: {e}")
            
            db.commit()
            print("数据库索引创建完成")
        except Exception as e:
            db.rollback()
            print(f"创建索引时发生错误: {e}")
        finally:
            db.close()
    
    def optimize_log_stats_query(self, start_date: Optional[str] = None, end_date: Optional[str] = None) -> Dict[str, Any]:
        """优化统计查询 - 使用单个查询获取所有统计数据，支持缓存"""
        # 生成缓存键
        cache_key = self._generate_cache_key("log_stats", start_date=start_date, end_date=end_date)
        
        # 尝试从缓存获取
        cached_result = self._get_cache(cache_key)
        if cached_result:
            return cached_result
        
        db = SessionLocal()
        try:
            # 构建基础查询
            base_query = "SELECT "
            base_query += "COUNT(*) as total_requests, "
            base_query += "SUM(CASE WHEN status_code < 400 THEN 1 ELSE 0 END) as success_count, "
            base_query += "AVG(response_time) as avg_response_time, "
            base_query += "SUM(CASE WHEN status_code >= 400 THEN 1 ELSE 0 END) as error_count "
            base_query += "FROM api_logs WHERE 1=1 "
            
            params = {}
            
            # 添加日期筛选条件
            if start_date:
                try:
                    start_datetime = datetime.strptime(start_date, "%Y-%m-%d")
                    base_query += "AND created_at >= :start_date "
                    params['start_date'] = start_datetime
                except ValueError:
                    pass
            
            if end_date:
                try:
                    end_datetime = datetime.strptime(end_date, "%Y-%m-%d")
                    end_datetime = end_datetime.replace(hour=23, minute=59, second=59, microsecond=999999)
                    base_query += "AND created_at <= :end_date "
                    params['end_date'] = end_datetime
                except ValueError:
                    pass
            
            # 执行查询
            result = db.execute(text(base_query), params).fetchone()
            
            total_requests = result.total_requests or 0
            success_count = result.success_count or 0
            avg_response_time = result.avg_response_time or 0
            error_count = result.error_count or 0
            
            # 计算成功率
            success_rate = (success_count / total_requests * 100) if total_requests > 0 else 0
            
            result_data = {
                "total_requests": total_requests,
                "success_rate": round(success_rate, 2),
                "avg_response_time": round(avg_response_time, 3),
                "error_count": error_count
            }
            
            # 设置缓存（统计数据缓存时间可以短一些，因为数据会变化）
            self._set_cache(cache_key, result_data, ttl=60)  # 60秒缓存
            
            return result_data
        except Exception as e:
            print(f"统计查询错误: {e}")
            error_data = {
                "total_requests": 0,
                "success_rate": 0,
                "avg_response_time": 0,
                "error_count": 0
            }
            # 错误数据也缓存，避免频繁报错
            self._set_cache(cache_key, error_data, ttl=10)  # 10秒缓存
            return error_data
        finally:
            db.close()
    
    def optimize_log_list_query(self, page: int = 1, limit: int = 10, user_id: Optional[int] = None,
                               endpoint: Optional[str] = None, status_code: Optional[int] = None,
                               start_date: Optional[str] = None, end_date: Optional[str] = None) -> Dict[str, Any]:
        """优化日志列表查询 - 使用更高效的分页和筛选，支持缓存"""
        # 只缓存第一页的结果，因为用户通常先看第一页
        cache_key = None
        if page == 1:
            cache_key = self._generate_cache_key(
                "log_list_page1", 
                limit=limit, user_id=user_id, endpoint=endpoint, 
                status_code=status_code, start_date=start_date, end_date=end_date
            )
            
            # 尝试从缓存获取
            cached_result = self._get_cache(cache_key)
            if cached_result:
                return cached_result
        
        db = SessionLocal()
        try:
            # 构建优化的查询
            query = "SELECT * FROM api_logs WHERE 1=1 "
            count_query = "SELECT COUNT(*) FROM api_logs WHERE 1=1 "
            
            params = {}
            conditions = []
            
            # 添加筛选条件
            if user_id:
                conditions.append("user_id = :user_id")
                params['user_id'] = user_id
            
            if endpoint:
                conditions.append("endpoint LIKE :endpoint")
                params['endpoint'] = f"%{endpoint}%"
            
            if status_code:
                conditions.append("status_code = :status_code")
                params['status_code'] = status_code
            
            if start_date:
                try:
                    start_datetime = datetime.strptime(start_date, "%Y-%m-%d")
                    conditions.append("created_at >= :start_date")
                    params['start_date'] = start_datetime
                except ValueError:
                    pass
            
            if end_date:
                try:
                    end_datetime = datetime.strptime(end_date, "%Y-%m-%d")
                    end_datetime = end_datetime.replace(hour=23, minute=59, second=59, microsecond=999999)
                    conditions.append("created_at <= :end_date")
                    params['end_date'] = end_datetime
                except ValueError:
                    pass
            
            # 组合查询条件
            if conditions:
                where_clause = " AND ".join(conditions)
                query += f" AND {where_clause} "
                count_query += f" AND {where_clause} "
            
            # 添加排序和分页
            query += " ORDER BY created_at DESC LIMIT :limit OFFSET :offset"
            params['limit'] = limit
            params['offset'] = (page - 1) * limit
            
            # 执行查询
            logs_result = db.execute(text(query), params).fetchall()
            count_result = db.execute(text(count_query), params).fetchone()
            
            total = count_result[0] if count_result else 0
            pages = (total + limit - 1) // limit if total > 0 else 1
            
            # 转换结果为字典列表
            logs = []
            for row in logs_result:
                log_dict = {
                    'id': row.id,
                    'user_id': row.user_id,
                    'endpoint': row.endpoint,
                    'method': row.method,
                    'status_code': row.status_code,
                    'response_time': row.response_time,
                    'ip_address': row.ip_address,
                    'user_agent': row.user_agent,
                    'created_at': row.created_at.isoformat() if row.created_at else None
                }
                logs.append(log_dict)
            
            result_data = {
                "items": logs,
                "total": total,
                "page": page,
                "limit": limit,
                "pages": pages
            }
            
            # 缓存第一页结果（缓存时间可以长一些）
            if cache_key and page == 1:
                self._set_cache(cache_key, result_data, ttl=300)  # 5分钟缓存
            
            return result_data
        except Exception as e:
            print(f"日志列表查询错误: {e}")
            error_data = {
                "items": [],
                "total": 0,
                "page": page,
                "limit": limit,
                "pages": 1
            }
            # 错误数据也缓存，避免频繁报错
            if cache_key and page == 1:
                self._set_cache(cache_key, error_data, ttl=30)  # 30秒缓存
            return error_data
        finally:
            db.close()
    
    def cleanup_old_logs(self, days: int = 90) -> Dict[str, Any]:
        """清理旧日志数据，同时清理相关缓存"""
        db = SessionLocal()
        try:
            cutoff_date = datetime.now() - timedelta(days=days)
            
            # 删除旧日志
            delete_query = "DELETE FROM api_logs WHERE created_at < :cutoff_date"
            result = db.execute(text(delete_query), {'cutoff_date': cutoff_date})
            deleted_count = result.rowcount
            
            db.commit()
            print(f"已删除 {deleted_count} 条旧日志记录（保留最近 {days} 天的数据）")
            
            # 优化数据库
            db.execute(text("VACUUM"))
            db.commit()
            print("数据库优化完成")
            
            # 清理相关缓存
            self._invalidate_cache("log_stats:")  # 清理所有统计缓存
            self._invalidate_cache("log_list_page1:")  # 清理所有第一页列表缓存
            
            return {
                "deleted_count": deleted_count,
                "message": f"成功清理了 {deleted_count} 条旧日志记录，并清理了相关缓存"
            }
        except Exception as e:
            db.rollback()
            print(f"清理旧日志时发生错误: {e}")
            return {
                "deleted_count": 0,
                "message": f"清理失败: {str(e)}"
            }
        finally:
            db.close()
    
    def get_query_performance_stats(self):
        """获取查询性能统计信息"""
        db = SessionLocal()
        try:
            # 获取表的基本统计信息
            stats_query = """
            SELECT 
                COUNT(*) as total_records,
                MIN(created_at) as earliest_record,
                MAX(created_at) as latest_record
            FROM api_logs
            """
            
            result = db.execute(text(stats_query)).fetchone()
            
            # 获取索引信息
            index_query = """
            SELECT name, tbl_name FROM sqlite_master 
            WHERE type='index' AND tbl_name='api_logs'
            """
            
            index_result = db.execute(text(index_query)).fetchall()
            indexes = [row[0] for row in index_result]
            
            return {
                "total_records": result.total_records,
                "earliest_record": result.earliest_record.isoformat() if result.earliest_record else None,
                "latest_record": result.latest_record.isoformat() if result.latest_record else None,
                "indexes": indexes
            }
        except Exception as e:
            print(f"获取性能统计时发生错误: {e}")
            return {}
        finally:
            db.close()

def main():
    """主函数 - 执行性能优化"""
    optimizer = LogPerformanceOptimizer()
    
    print("开始日志查询性能优化...")
    
    # 1. 创建索引
    print("\n1. 创建数据库索引...")
    optimizer.create_indexes()
    
    # 2. 显示性能统计
    print("\n2. 获取性能统计信息...")
    stats = optimizer.get_query_performance_stats()
    print(f"总记录数: {stats.get('total_records', 0)}")
    print(f"最早记录: {stats.get('earliest_record', 'N/A')}")
    print(f"最新记录: {stats.get('latest_record', 'N/A')}")
    print(f"现有索引: {', '.join(stats.get('indexes', []))}")
    
    # 3. 测试优化后的查询性能
    print("\n3. 测试优化后的查询性能...")
    start_time = time.time()
    stats_result = optimizer.optimize_log_stats_query()
    stats_time = time.time() - start_time
    print(f"统计查询耗时: {stats_time:.3f}秒")
    print(f"统计结果: {stats_result}")
    
    start_time = time.time()
    list_result = optimizer.optimize_log_list_query(page=1, limit=10)
    list_time = time.time() - start_time
    print(f"列表查询耗时: {list_time:.3f}秒")
    print(f"列表结果: {list_result['total']} 条记录")
    
    print("\n性能优化完成！")

if __name__ == "__main__":
    main()