"""数据库优化管理命令

提供数据库性能优化和维护功能。
"""

from django.core.management.base import BaseCommand, CommandError
from django.db import connection, transaction
from django.conf import settings
from django.core.cache import cache
from django.utils import timezone
from datetime import timedelta
import logging

logger = logging.getLogger('linkbook')


class Command(BaseCommand):
    """数据库优化命令
    
    提供各种数据库优化和维护功能。
    """
    
    help = '数据库优化和维护工具'
    
    def add_arguments(self, parser):
        """添加命令参数"""
        parser.add_argument(
            '--action',
            type=str,
            choices=['analyze', 'vacuum', 'reindex', 'cleanup', 'stats', 'all'],
            default='stats',
            help='执行的优化操作'
        )
        
        parser.add_argument(
            '--table',
            type=str,
            help='指定要优化的表名'
        )
        
        parser.add_argument(
            '--days',
            type=int,
            default=30,
            help='清理多少天前的数据'
        )
        
        parser.add_argument(
            '--dry-run',
            action='store_true',
            help='只显示将要执行的操作，不实际执行'
        )
    
    def handle(self, *args, **options):
        """处理命令"""
        action = options['action']
        table = options.get('table')
        days = options['days']
        dry_run = options['dry_run']
        
        self.stdout.write(
            self.style.SUCCESS(f'开始执行数据库优化: {action}')
        )
        
        try:
            if action == 'analyze' or action == 'all':
                self._analyze_tables(table, dry_run)
            
            if action == 'vacuum' or action == 'all':
                self._vacuum_tables(table, dry_run)
            
            if action == 'reindex' or action == 'all':
                self._reindex_tables(table, dry_run)
            
            if action == 'cleanup' or action == 'all':
                self._cleanup_old_data(days, dry_run)
            
            if action == 'stats':
                self._show_database_stats()
            
            self.stdout.write(
                self.style.SUCCESS('数据库优化完成')
            )
            
        except Exception as e:
            logger.error(f'数据库优化失败: {str(e)}')
            raise CommandError(f'优化失败: {str(e)}')
    
    def _analyze_tables(self, table=None, dry_run=False):
        """分析表统计信息"""
        self.stdout.write('正在分析表统计信息...')
        
        with connection.cursor() as cursor:
            if table:
                tables = [table]
            else:
                # 获取所有用户表
                cursor.execute("""
                    SELECT tablename FROM pg_tables 
                    WHERE schemaname = 'public'
                """)
                tables = [row[0] for row in cursor.fetchall()]
            
            for table_name in tables:
                sql = f'ANALYZE {table_name};'
                
                if dry_run:
                    self.stdout.write(f'  [DRY RUN] {sql}')
                else:
                    try:
                        cursor.execute(sql)
                        self.stdout.write(
                            self.style.SUCCESS(f'  ✓ 已分析表: {table_name}')
                        )
                    except Exception as e:
                        self.stdout.write(
                            self.style.ERROR(f'  ✗ 分析表失败 {table_name}: {str(e)}')
                        )
    
    def _vacuum_tables(self, table=None, dry_run=False):
        """清理表空间"""
        self.stdout.write('正在清理表空间...')
        
        with connection.cursor() as cursor:
            if table:
                tables = [table]
            else:
                cursor.execute("""
                    SELECT tablename FROM pg_tables 
                    WHERE schemaname = 'public'
                """)
                tables = [row[0] for row in cursor.fetchall()]
            
            for table_name in tables:
                sql = f'VACUUM ANALYZE {table_name};'
                
                if dry_run:
                    self.stdout.write(f'  [DRY RUN] {sql}')
                else:
                    try:
                        cursor.execute(sql)
                        self.stdout.write(
                            self.style.SUCCESS(f'  ✓ 已清理表: {table_name}')
                        )
                    except Exception as e:
                        self.stdout.write(
                            self.style.ERROR(f'  ✗ 清理表失败 {table_name}: {str(e)}')
                        )
    
    def _reindex_tables(self, table=None, dry_run=False):
        """重建索引"""
        self.stdout.write('正在重建索引...')
        
        with connection.cursor() as cursor:
            if table:
                # 获取指定表的索引
                cursor.execute("""
                    SELECT indexname FROM pg_indexes 
                    WHERE tablename = %s AND schemaname = 'public'
                """, [table])
            else:
                # 获取所有用户表的索引
                cursor.execute("""
                    SELECT indexname FROM pg_indexes 
                    WHERE schemaname = 'public'
                """)
            
            indexes = [row[0] for row in cursor.fetchall()]
            
            for index_name in indexes:
                sql = f'REINDEX INDEX {index_name};'
                
                if dry_run:
                    self.stdout.write(f'  [DRY RUN] {sql}')
                else:
                    try:
                        cursor.execute(sql)
                        self.stdout.write(
                            self.style.SUCCESS(f'  ✓ 已重建索引: {index_name}')
                        )
                    except Exception as e:
                        self.stdout.write(
                            self.style.ERROR(f'  ✗ 重建索引失败 {index_name}: {str(e)}')
                        )
    
    def _cleanup_old_data(self, days, dry_run=False):
        """清理旧数据"""
        self.stdout.write(f'正在清理 {days} 天前的数据...')
        
        cutoff_date = timezone.now() - timedelta(days=days)
        
        # 清理活动日志
        from apps.common.models import ActivityLog
        old_logs = ActivityLog.objects.filter(created_at__lt=cutoff_date)
        count = old_logs.count()
        
        if dry_run:
            self.stdout.write(f'  [DRY RUN] 将删除 {count} 条活动日志')
        else:
            deleted_count = old_logs.delete()[0]
            self.stdout.write(
                self.style.SUCCESS(f'  ✓ 已删除 {deleted_count} 条活动日志')
            )
        
        # 清理登录历史
        from apps.authentication.models import LoginHistory
        old_history = LoginHistory.objects.filter(login_time__lt=cutoff_date)
        count = old_history.count()
        
        if dry_run:
            self.stdout.write(f'  [DRY RUN] 将删除 {count} 条登录历史')
        else:
            deleted_count = old_history.delete()[0]
            self.stdout.write(
                self.style.SUCCESS(f'  ✓ 已删除 {deleted_count} 条登录历史')
            )
        
        # 清理缓存
        if not dry_run:
            cache.clear()
            self.stdout.write(
                self.style.SUCCESS('  ✓ 已清理缓存')
            )
        else:
            self.stdout.write('  [DRY RUN] 将清理所有缓存')
    
    def _show_database_stats(self):
        """显示数据库统计信息"""
        self.stdout.write('数据库统计信息:')
        
        with connection.cursor() as cursor:
            # 数据库大小
            cursor.execute("""
                SELECT pg_size_pretty(pg_database_size(current_database()))
            """)
            db_size = cursor.fetchone()[0]
            self.stdout.write(f'  数据库大小: {db_size}')
            
            # 表统计
            cursor.execute("""
                SELECT 
                    schemaname,
                    tablename,
                    pg_size_pretty(pg_total_relation_size(schemaname||'.'||tablename)) as size,
                    pg_stat_get_tuples_returned(c.oid) as tuples_read,
                    pg_stat_get_tuples_fetched(c.oid) as tuples_fetched
                FROM pg_tables pt
                JOIN pg_class c ON c.relname = pt.tablename
                WHERE schemaname = 'public'
                ORDER BY pg_total_relation_size(schemaname||'.'||tablename) DESC
                LIMIT 10
            """)
            
            self.stdout.write('\n  前10个最大的表:')
            self.stdout.write('  表名\t\t\t大小\t\t读取次数\t获取次数')
            self.stdout.write('  ' + '-' * 60)
            
            for row in cursor.fetchall():
                schema, table, size, reads, fetches = row
                self.stdout.write(
                    f'  {table:<20}\t{size:<10}\t{reads or 0:<10}\t{fetches or 0}'
                )
            
            # 索引统计
            cursor.execute("""
                SELECT 
                    schemaname,
                    tablename,
                    indexname,
                    pg_size_pretty(pg_relation_size(indexname::regclass)) as size
                FROM pg_indexes 
                WHERE schemaname = 'public'
                ORDER BY pg_relation_size(indexname::regclass) DESC
                LIMIT 5
            """)
            
            self.stdout.write('\n  前5个最大的索引:')
            self.stdout.write('  索引名\t\t\t\t大小')
            self.stdout.write('  ' + '-' * 40)
            
            for row in cursor.fetchall():
                schema, table, index, size = row
                self.stdout.write(f'  {index:<30}\t{size}')
            
            # 连接统计
            cursor.execute("""
                SELECT 
                    count(*) as total_connections,
                    count(*) FILTER (WHERE state = 'active') as active_connections,
                    count(*) FILTER (WHERE state = 'idle') as idle_connections
                FROM pg_stat_activity
                WHERE datname = current_database()
            """)
            
            total, active, idle = cursor.fetchone()
            self.stdout.write(f'\n  连接统计:')
            self.stdout.write(f'    总连接数: {total}')
            self.stdout.write(f'    活跃连接: {active}')
            self.stdout.write(f'    空闲连接: {idle}')