#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
RedFire PostgreSQL 分区自动化管理器
==================================

功能特性:
- 自动创建时间分区表
- 智能分区维护和清理
- 分区性能监控
- 分区统计和优化建议
- 支持多种分区策略

Author: RedFire Team
Date: 2025-09-16
Version: v1.0.0
"""

import asyncio
import logging
from datetime import datetime, timedelta, date
from typing import Dict, List, Optional, Tuple, Any
from dataclasses import dataclass, asdict
from pathlib import Path
import json
import yaml
import asyncpg
from calendar import monthrange
import re

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)


@dataclass
class PartitionConfig:
    """分区配置"""
    table_name: str
    partition_column: str = "datetime"
    partition_type: str = "monthly"  # daily, weekly, monthly, yearly
    retention_months: int = 24  # 保留月数
    auto_create_future: int = 3  # 提前创建未来分区数量
    compression_enabled: bool = True
    compression_after_days: int = 7
    
    # 索引配置
    create_indexes: bool = True
    index_patterns: List[str] = None
    
    # 性能配置
    parallel_workers: int = 4
    maintenance_window: str = "02:00-04:00"  # 维护时间窗口
    
    def __post_init__(self):
        if self.index_patterns is None:
            # 默认索引模式
            self.index_patterns = [
                f"idx_{self.table_name}_{{column_name}}",
                f"idx_{self.table_name}_{self.partition_column}",
                f"idx_{self.table_name}_symbol_{self.partition_column}"
            ]


@dataclass
class PartitionInfo:
    """分区信息"""
    partition_name: str
    table_name: str
    partition_type: str
    start_date: date
    end_date: date
    size_bytes: int
    row_count: int
    last_modified: datetime
    is_compressed: bool = False


class PostgreSQLPartitionManager:
    """PostgreSQL分区管理器"""
    
    def __init__(self, connection_config: Dict[str, Any]):
        self.connection_config = connection_config
        self.connection_pool = None
        self.partition_configs: Dict[str, PartitionConfig] = {}
        
    async def initialize(self):
        """初始化连接池"""
        self.connection_pool = await asyncpg.create_pool(**self.connection_config)
        logger.info("PostgreSQL partition manager initialized")
    
    async def register_table(self, config: PartitionConfig):
        """注册需要分区管理的表"""
        self.partition_configs[config.table_name] = config
        logger.info(f"Registered table {config.table_name} for partition management")
    
    async def ensure_parent_table_partitioned(self, table_name: str, partition_column: str) -> bool:
        """确保父表已配置为分区表"""
        async with self.connection_pool.acquire() as conn:
            # 检查表是否已经是分区表
            is_partitioned = await conn.fetchval("""
                SELECT EXISTS (
                    SELECT 1 FROM pg_partitioned_table pt
                    JOIN pg_class c ON pt.partrelid = c.oid
                    WHERE c.relname = $1
                )
            """, table_name)
            
            if is_partitioned:
                logger.info(f"Table {table_name} is already partitioned")
                return True
            
            # 检查表是否存在
            table_exists = await conn.fetchval("""
                SELECT EXISTS (
                    SELECT FROM information_schema.tables 
                    WHERE table_schema = 'public' 
                    AND table_name = $1
                )
            """, table_name)
            
            if not table_exists:
                logger.warning(f"Table {table_name} does not exist")
                return False
            
            # 检查是否有数据
            row_count = await conn.fetchval(f"SELECT COUNT(*) FROM {table_name}")
            
            if row_count > 0:
                logger.warning(f"Table {table_name} has {row_count} rows. Manual migration required.")
                return False
            
            # 转换为分区表
            await self._convert_to_partitioned_table(conn, table_name, partition_column)
            return True
    
    async def _convert_to_partitioned_table(self, conn, table_name: str, partition_column: str):
        """将普通表转换为分区表"""
        logger.info(f"Converting table {table_name} to partitioned table")
        
        # 备份表名
        backup_table = f"{table_name}_backup_{datetime.now().strftime('%Y%m%d_%H%M%S')}"
        
        try:
            # 1. 获取表结构
            table_schema = await self._get_table_schema(conn, table_name)
            
            # 2. 重命名原表为备份表
            await conn.execute(f"ALTER TABLE {table_name} RENAME TO {backup_table}")
            
            # 3. 创建分区父表
            create_sql = f"""
                CREATE TABLE {table_name} (
                    {table_schema}
                ) PARTITION BY RANGE ({partition_column})
            """
            await conn.execute(create_sql)
            
            # 4. 重建索引和约束
            await self._recreate_indexes_and_constraints(conn, table_name, backup_table)
            
            logger.info(f"Successfully converted {table_name} to partitioned table")
            logger.info(f"Backup table created: {backup_table}")
            
        except Exception as e:
            logger.error(f"Failed to convert table {table_name}: {e}")
            # 尝试恢复
            try:
                await conn.execute(f"DROP TABLE IF EXISTS {table_name}")
                await conn.execute(f"ALTER TABLE {backup_table} RENAME TO {table_name}")
            except:
                pass
            raise
    
    async def _get_table_schema(self, conn, table_name: str) -> str:
        """获取表结构DDL"""
        # 获取列定义
        columns = await conn.fetch("""
            SELECT column_name, data_type, character_maximum_length,
                   is_nullable, column_default
            FROM information_schema.columns
            WHERE table_name = $1 AND table_schema = 'public'
            ORDER BY ordinal_position
        """, table_name)
        
        column_defs = []
        for col in columns:
            col_def = f"{col['column_name']} {col['data_type']}"
            
            if col['character_maximum_length']:
                col_def += f"({col['character_maximum_length']})"
            
            if col['is_nullable'] == 'NO':
                col_def += " NOT NULL"
            
            if col['column_default']:
                col_def += f" DEFAULT {col['column_default']}"
            
            column_defs.append(col_def)
        
        return ",\n    ".join(column_defs)
    
    async def _recreate_indexes_and_constraints(self, conn, new_table: str, backup_table: str):
        """重建索引和约束"""
        # 获取原表的索引
        indexes = await conn.fetch("""
            SELECT indexname, indexdef 
            FROM pg_indexes 
            WHERE tablename = $1 AND schemaname = 'public'
        """, backup_table.split('_backup_')[0])  # 原表名
        
        # 重建索引（修改表名）
        for index in indexes:
            if index['indexname'].endswith('_pkey'):
                continue  # 跳过主键，会自动创建
            
            new_indexdef = index['indexdef'].replace(backup_table, new_table)
            try:
                await conn.execute(new_indexdef)
            except Exception as e:
                logger.warning(f"Failed to recreate index {index['indexname']}: {e}")
    
    async def create_partition(self, table_name: str, start_date: date, end_date: date) -> str:
        """创建单个分区"""
        config = self.partition_configs.get(table_name)
        if not config:
            raise ValueError(f"Table {table_name} not registered for partition management")
        
        # 生成分区名称
        if config.partition_type == "monthly":
            partition_name = f"{table_name}_{start_date.strftime('%Y_%m')}"
        elif config.partition_type == "daily":
            partition_name = f"{table_name}_{start_date.strftime('%Y_%m_%d')}"
        elif config.partition_type == "weekly":
            partition_name = f"{table_name}_{start_date.strftime('%Y_w%U')}"
        else:
            partition_name = f"{table_name}_{start_date.strftime('%Y')}"
        
        async with self.connection_pool.acquire() as conn:
            # 检查分区是否已存在
            exists = await conn.fetchval("""
                SELECT EXISTS (
                    SELECT FROM information_schema.tables 
                    WHERE table_schema = 'public' 
                    AND table_name = $1
                )
            """, partition_name)
            
            if exists:
                logger.debug(f"Partition {partition_name} already exists")
                return partition_name
            
            # 创建分区
            create_sql = f"""
                CREATE TABLE {partition_name} PARTITION OF {table_name}
                FOR VALUES FROM ('{start_date}') TO ('{end_date}')
            """
            
            await conn.execute(create_sql)
            
            # 创建索引
            if config.create_indexes:
                await self._create_partition_indexes(conn, partition_name, config)
            
            # 设置压缩（如果支持TimescaleDB）
            if config.compression_enabled:
                await self._setup_compression(conn, partition_name, config)
            
            logger.info(f"Created partition: {partition_name} ({start_date} to {end_date})")
            return partition_name
    
    async def _create_partition_indexes(self, conn, partition_name: str, config: PartitionConfig):
        """为分区创建索引"""
        try:
            # 基础索引
            basic_indexes = [
                f"CREATE INDEX idx_{partition_name}_{config.partition_column} ON {partition_name} ({config.partition_column} DESC)",
                f"CREATE INDEX idx_{partition_name}_symbol ON {partition_name} (symbol) WHERE symbol IS NOT NULL",
                f"CREATE INDEX idx_{partition_name}_symbol_datetime ON {partition_name} (symbol, {config.partition_column} DESC)"
            ]
            
            # VnPy特定索引
            if 'tick' in partition_name:
                basic_indexes.extend([
                    f"CREATE INDEX idx_{partition_name}_last_price ON {partition_name} (last_price) WHERE last_price > 0",
                    f"CREATE INDEX idx_{partition_name}_volume ON {partition_name} (volume) WHERE volume > 0"
                ])
            elif 'bar' in partition_name:
                basic_indexes.extend([
                    f"CREATE INDEX idx_{partition_name}_close_price ON {partition_name} (close_price) WHERE close_price > 0",
                    f"CREATE INDEX idx_{partition_name}_interval ON {partition_name} (interval)"
                ])
            
            for index_sql in basic_indexes:
                try:
                    await conn.execute(index_sql)
                except Exception as e:
                    logger.warning(f"Failed to create index: {e}")
                    
        except Exception as e:
            logger.error(f"Error creating indexes for {partition_name}: {e}")
    
    async def _setup_compression(self, conn, partition_name: str, config: PartitionConfig):
        """设置分区压缩（TimescaleDB）"""
        try:
            # 检查是否安装了TimescaleDB
            has_timescaledb = await conn.fetchval("""
                SELECT EXISTS (
                    SELECT FROM pg_extension 
                    WHERE extname = 'timescaledb'
                )
            """)
            
            if has_timescaledb:
                # 设置压缩策略
                await conn.execute(f"""
                    SELECT add_compression_policy('{partition_name}', 
                        INTERVAL '{config.compression_after_days} days')
                """)
                logger.debug(f"Compression policy set for {partition_name}")
                
        except Exception as e:
            logger.debug(f"Compression setup failed for {partition_name}: {e}")
    
    async def create_future_partitions(self, table_name: str, months: int = 3) -> List[str]:
        """创建未来分区"""
        config = self.partition_configs.get(table_name)
        if not config:
            raise ValueError(f"Table {table_name} not registered")
        
        created_partitions = []
        current_date = date.today()
        
        for i in range(months):
            if config.partition_type == "monthly":
                start_date = date(current_date.year, current_date.month, 1)
                next_month = start_date.replace(day=28) + timedelta(days=4)
                end_date = next_month.replace(day=1)
                
                partition_name = await self.create_partition(table_name, start_date, end_date)
                created_partitions.append(partition_name)
                
                # 移动到下个月
                current_date = end_date
            
            elif config.partition_type == "daily":
                start_date = current_date
                end_date = start_date + timedelta(days=1)
                
                partition_name = await self.create_partition(table_name, start_date, end_date)
                created_partitions.append(partition_name)
                
                current_date = end_date
        
        logger.info(f"Created {len(created_partitions)} future partitions for {table_name}")
        return created_partitions
    
    async def cleanup_old_partitions(self, table_name: str) -> List[str]:
        """清理过期分区"""
        config = self.partition_configs.get(table_name)
        if not config:
            raise ValueError(f"Table {table_name} not registered")
        
        cutoff_date = date.today() - timedelta(days=config.retention_months * 30)
        dropped_partitions = []
        
        async with self.connection_pool.acquire() as conn:
            # 获取所有分区
            partitions = await conn.fetch("""
                SELECT schemaname, tablename 
                FROM pg_tables 
                WHERE tablename LIKE $1 || '_%'
                AND schemaname = 'public'
            """, table_name)
            
            for partition in partitions:
                partition_name = partition['tablename']
                
                # 从分区名解析日期
                partition_date = self._parse_partition_date(partition_name, config.partition_type)
                
                if partition_date and partition_date < cutoff_date:
                    try:
                        await conn.execute(f"DROP TABLE {partition_name}")
                        dropped_partitions.append(partition_name)
                        logger.info(f"Dropped old partition: {partition_name}")
                    except Exception as e:
                        logger.error(f"Failed to drop partition {partition_name}: {e}")
        
        return dropped_partitions
    
    def _parse_partition_date(self, partition_name: str, partition_type: str) -> Optional[date]:
        """从分区名称解析日期"""
        try:
            if partition_type == "monthly":
                # 格式: table_name_2024_01
                match = re.search(r'(\d{4})_(\d{2})$', partition_name)
                if match:
                    year, month = int(match.group(1)), int(match.group(2))
                    return date(year, month, 1)
            
            elif partition_type == "daily":
                # 格式: table_name_2024_01_15
                match = re.search(r'(\d{4})_(\d{2})_(\d{2})$', partition_name)
                if match:
                    year, month, day = int(match.group(1)), int(match.group(2)), int(match.group(3))
                    return date(year, month, day)
            
            return None
        except Exception:
            return None
    
    async def get_partition_info(self, table_name: str) -> List[PartitionInfo]:
        """获取分区信息"""
        partition_info = []
        
        async with self.connection_pool.acquire() as conn:
            # 获取分区统计信息
            partitions = await conn.fetch("""
                SELECT 
                    schemaname,
                    tablename,
                    pg_size_pretty(pg_total_relation_size(quote_ident(schemaname)||'.'||quote_ident(tablename))) as size,
                    pg_total_relation_size(quote_ident(schemaname)||'.'||quote_ident(tablename)) as size_bytes
                FROM pg_tables 
                WHERE tablename LIKE $1 || '_%'
                AND schemaname = 'public'
                ORDER BY tablename
            """, table_name)
            
            for partition in partitions:
                partition_name = partition['tablename']
                
                # 获取行数
                try:
                    row_count = await conn.fetchval(f"SELECT COUNT(*) FROM {partition_name}")
                except Exception:
                    row_count = 0
                
                # 解析分区日期范围
                config = self.partition_configs.get(table_name)
                if config:
                    start_date = self._parse_partition_date(partition_name, config.partition_type)
                    if start_date:
                        if config.partition_type == "monthly":
                            _, last_day = monthrange(start_date.year, start_date.month)
                            end_date = date(start_date.year, start_date.month, last_day)
                        elif config.partition_type == "daily":
                            end_date = start_date
                        else:
                            end_date = start_date
                    else:
                        start_date = end_date = date.today()
                else:
                    start_date = end_date = date.today()
                
                partition_info.append(PartitionInfo(
                    partition_name=partition_name,
                    table_name=table_name,
                    partition_type=config.partition_type if config else "unknown",
                    start_date=start_date,
                    end_date=end_date,
                    size_bytes=partition['size_bytes'],
                    row_count=row_count,
                    last_modified=datetime.now()  # 简化处理
                ))
        
        return partition_info
    
    async def optimize_partitions(self, table_name: str) -> Dict[str, Any]:
        """优化分区性能"""
        optimization_results = {
            'table_name': table_name,
            'actions_taken': [],
            'recommendations': []
        }
        
        async with self.connection_pool.acquire() as conn:
            # 获取分区信息
            partitions_info = await self.get_partition_info(table_name)
            
            for partition_info in partitions_info:
                partition_name = partition_info.partition_name
                
                # 1. 更新统计信息
                try:
                    await conn.execute(f"ANALYZE {partition_name}")
                    optimization_results['actions_taken'].append(f"Updated statistics for {partition_name}")
                except Exception as e:
                    logger.warning(f"Failed to analyze {partition_name}: {e}")
                
                # 2. 检查索引使用情况
                index_usage = await conn.fetch("""
                    SELECT indexrelname, idx_scan, idx_tup_read, idx_tup_fetch
                    FROM pg_stat_user_indexes 
                    WHERE relname = $1
                """, partition_name)
                
                unused_indexes = [idx for idx in index_usage if idx['idx_scan'] == 0]
                if unused_indexes:
                    optimization_results['recommendations'].append(
                        f"Consider dropping unused indexes on {partition_name}: {[idx['indexrelname'] for idx in unused_indexes]}"
                    )
                
                # 3. 检查分区大小
                if partition_info.size_bytes > 10 * 1024 * 1024 * 1024:  # 10GB
                    optimization_results['recommendations'].append(
                        f"Partition {partition_name} is large ({partition_info.size_bytes // (1024**3)}GB), consider sub-partitioning"
                    )
        
        return optimization_results
    
    async def get_maintenance_recommendations(self, table_name: str) -> Dict[str, Any]:
        """获取维护建议"""
        recommendations = {
            'table_name': table_name,
            'urgent_actions': [],
            'optimization_suggestions': [],
            'performance_insights': []
        }
        
        partitions_info = await self.get_partition_info(table_name)
        config = self.partition_configs.get(table_name)
        
        if not partitions_info:
            recommendations['urgent_actions'].append("No partitions found - check table configuration")
            return recommendations
        
        # 检查分区覆盖范围
        latest_partition_date = max(p.end_date for p in partitions_info)
        days_to_cover = (date.today() - latest_partition_date).days
        
        if days_to_cover > 7:
            recommendations['urgent_actions'].append(
                f"Missing recent partitions - latest partition ends {days_to_cover} days ago"
            )
        
        # 检查未来分区
        future_coverage = (latest_partition_date - date.today()).days
        if future_coverage < 30:
            recommendations['optimization_suggestions'].append(
                "Create more future partitions for better performance"
            )
        
        # 性能分析
        total_size = sum(p.size_bytes for p in partitions_info)
        avg_size = total_size / len(partitions_info)
        
        large_partitions = [p for p in partitions_info if p.size_bytes > avg_size * 3]
        if large_partitions:
            recommendations['performance_insights'].append(
                f"Found {len(large_partitions)} partitions significantly larger than average"
            )
        
        return recommendations
    
    async def auto_maintain_all_tables(self) -> Dict[str, Any]:
        """自动维护所有注册的表"""
        maintenance_results = {}
        
        for table_name, config in self.partition_configs.items():
            try:
                # 确保表已分区
                await self.ensure_parent_table_partitioned(table_name, config.partition_column)
                
                # 创建未来分区
                future_partitions = await self.create_future_partitions(
                    table_name, config.auto_create_future
                )
                
                # 清理过期分区
                dropped_partitions = await self.cleanup_old_partitions(table_name)
                
                # 优化分区
                optimization_results = await self.optimize_partitions(table_name)
                
                maintenance_results[table_name] = {
                    'status': 'success',
                    'future_partitions_created': len(future_partitions),
                    'old_partitions_dropped': len(dropped_partitions),
                    'optimization_results': optimization_results
                }
                
            except Exception as e:
                logger.error(f"Maintenance failed for table {table_name}: {e}")
                maintenance_results[table_name] = {
                    'status': 'error',
                    'error': str(e)
                }
        
        return maintenance_results
    
    async def cleanup(self):
        """清理资源"""
        if self.connection_pool:
            await self.connection_pool.close()


# 预定义配置
VNPY_PARTITION_CONFIGS = [
    PartitionConfig(
        table_name="vnpy_tick_history",
        partition_column="datetime",
        partition_type="monthly",
        retention_months=24,
        auto_create_future=3
    ),
    PartitionConfig(
        table_name="vnpy_bar_history", 
        partition_column="datetime",
        partition_type="monthly",
        retention_months=36,
        auto_create_future=3
    ),
    PartitionConfig(
        table_name="vnpy_order_history",
        partition_column="datetime", 
        partition_type="monthly",
        retention_months=12,
        auto_create_future=2
    ),
    PartitionConfig(
        table_name="vnpy_trade_history",
        partition_column="datetime",
        partition_type="monthly", 
        retention_months=12,
        auto_create_future=2
    ),
    PartitionConfig(
        table_name="strategy_backtests",
        partition_column="start_date",
        partition_type="yearly",
        retention_months=60,
        auto_create_future=2
    )
]


async def setup_vnpy_partitions(connection_config: Dict[str, Any]) -> PostgreSQLPartitionManager:
    """设置VnPy相关表的分区"""
    manager = PostgreSQLPartitionManager(connection_config)
    await manager.initialize()
    
    # 注册所有VnPy表
    for config in VNPY_PARTITION_CONFIGS:
        await manager.register_table(config)
    
    # 执行初始维护
    maintenance_results = await manager.auto_maintain_all_tables()
    
    logger.info("VnPy partition setup completed")
    logger.info(f"Maintenance results: {maintenance_results}")
    
    return manager


if __name__ == "__main__":
    async def main():
        """主函数示例"""
        connection_config = {
            'host': 'localhost',
            'port': 5432,
            'user': 'redfire_analytics',
            'password': 'your_password',
            'database': 'redfire_analytics'
        }
        
        manager = await setup_vnpy_partitions(connection_config)
        
        try:
            # 获取分区信息
            for table_name in ["vnpy_tick_history", "vnpy_bar_history"]:
                partitions = await manager.get_partition_info(table_name)
                print(f"\n{table_name} partitions:")
                for p in partitions:
                    print(f"  {p.partition_name}: {p.row_count} rows, {p.size_bytes // (1024**2)}MB")
                
                # 获取维护建议
                recommendations = await manager.get_maintenance_recommendations(table_name)
                if recommendations['urgent_actions']:
                    print(f"  Urgent actions: {recommendations['urgent_actions']}")
        
        finally:
            await manager.cleanup()
    
    asyncio.run(main())
