"""
Topic管理API
提供Topic的创建、删除、配置管理等功能
"""

from fastapi import APIRouter, HTTPException, Depends
from typing import List, Dict, Any, Optional
from pydantic import BaseModel, Field
import asyncio
from datetime import datetime

from app.core.kafka_client import get_kafka_client
from app.utils.logger import logger

router = APIRouter(prefix="/api/topic-manager", tags=["topic-manager"])

# Pydantic模型定义
class TopicConfig(BaseModel):
    """Topic配置"""
    cleanup_policy: str = Field(default="delete", description="清理策略: delete, compact, compact,delete")
    retention_ms: int = Field(default=604800000, description="消息保留时间(毫秒), 默认7天")
    retention_bytes: int = Field(default=-1, description="消息保留大小(字节), -1表示无限制")
    segment_ms: int = Field(default=604800000, description="段文件滚动时间(毫秒)")
    segment_bytes: int = Field(default=1073741824, description="段文件大小(字节), 默认1GB")
    max_message_bytes: int = Field(default=1000012, description="最大消息大小(字节)")
    min_insync_replicas: int = Field(default=1, description="最小同步副本数")
    compression_type: str = Field(default="producer", description="压缩类型")
    delete_retention_ms: int = Field(default=86400000, description="删除保留时间(毫秒), 默认1天")
    file_delete_delay_ms: int = Field(default=60000, description="文件删除延迟(毫秒)")
    flush_messages: int = Field(default=9223372036854775807, description="刷新消息数")
    flush_ms: int = Field(default=9223372036854775807, description="刷新时间间隔(毫秒)")
    index_interval_bytes: int = Field(default=4096, description="索引间隔字节数")
    max_compaction_lag_ms: int = Field(default=9223372036854775807, description="最大压缩延迟(毫秒)")
    message_downconversion_enable: bool = Field(default=True, description="启用消息降级转换")
    message_format_version: str = Field(default="3.0-IV1", description="消息格式版本")
    message_timestamp_type: str = Field(default="CreateTime", description="消息时间戳类型")
    preallocate: bool = Field(default=False, description="预分配文件")
    unclean_leader_election_enable: bool = Field(default=False, description="允许不洁净的Leader选举")

class CreateTopicRequest(BaseModel):
    """创建Topic请求"""
    name: str = Field(..., description="Topic名称")
    partitions: int = Field(default=1, description="分区数", ge=1)
    replication_factor: int = Field(default=1, description="副本数", ge=1)
    config: Optional[TopicConfig] = Field(default=None, description="Topic配置")

class UpdateTopicRequest(BaseModel):
    """更新Topic请求"""
    partitions: Optional[int] = Field(default=None, description="分区数(只能增加)", ge=1)
    config: Optional[TopicConfig] = Field(default=None, description="Topic配置")

class TopicInfo(BaseModel):
    """Topic信息"""
    name: str
    partitions: int
    replication_factor: int
    config: Dict[str, Any]
    created_time: Optional[str] = None

class TopicPartitionInfo(BaseModel):
    """Topic分区信息"""
    partition: int
    leader: int
    replicas: List[int]
    isr: List[int]  # In-Sync Replicas
    earliest_offset: int
    latest_offset: int

class TopicDetailInfo(BaseModel):
    """Topic详细信息"""
    name: str
    partitions: int
    replication_factor: int
    config: Dict[str, Any]
    partition_details: List[TopicPartitionInfo]
    created_time: Optional[str] = None

@router.get("/topics", response_model=List[TopicInfo])
async def list_topics():
    """获取所有Topic列表"""
    try:
        kafka_client = get_kafka_client()
        
        # 获取Topic列表
        topics = await kafka_client.list_topics()
        
        # 过滤掉内部Topic
        user_topics = [topic for topic in topics if not topic.startswith('__')]

        if not user_topics:
            logger.info("没有用户Topic")
            return []

        # 并发获取Topic信息以提高性能
        import asyncio

        async def get_topic_info_safe(topic_name: str) -> TopicInfo:
            """安全地获取Topic信息"""
            try:
                return await _get_topic_info(kafka_client, topic_name)
            except Exception as e:
                logger.warning(f"获取Topic {topic_name} 信息失败: {e}")
                # 返回基本信息
                return TopicInfo(
                    name=topic_name,
                    partitions=0,
                    replication_factor=0,
                    config={}
                )

        # 并发获取所有Topic信息
        topic_infos = await asyncio.gather(
            *[get_topic_info_safe(topic) for topic in user_topics],
            return_exceptions=False
        )

        logger.info(f"获取到 {len(topic_infos)} 个Topic")
        return topic_infos
        
    except Exception as e:
        logger.error(f"获取Topic列表失败: {e}")
        raise HTTPException(status_code=500, detail=f"获取Topic列表失败: {str(e)}")

@router.get("/topics/{topic_name}", response_model=TopicDetailInfo)
async def get_topic_detail(topic_name: str):
    """获取Topic详细信息"""
    try:
        kafka_client = get_kafka_client()
        
        # 检查Topic是否存在
        topics = await kafka_client.list_topics()
        if topic_name not in topics:
            raise HTTPException(status_code=404, detail=f"Topic '{topic_name}' 不存在")
        
        # 获取Topic详细信息
        detail_info = await _get_topic_detail_info(kafka_client, topic_name)
        
        logger.info(f"获取Topic {topic_name} 详细信息成功")
        return detail_info
        
    except HTTPException:
        raise
    except Exception as e:
        logger.error(f"获取Topic {topic_name} 详细信息失败: {e}")
        raise HTTPException(status_code=500, detail=f"获取Topic详细信息失败: {str(e)}")

@router.post("/topics", response_model=dict)
async def create_topic(request: CreateTopicRequest):
    """创建新Topic"""
    try:
        kafka_client = get_kafka_client()
        
        # 检查Topic是否已存在
        existing_topics = await kafka_client.list_topics()
        if request.name in existing_topics:
            raise HTTPException(status_code=400, detail=f"Topic '{request.name}' 已存在")
        
        # 准备Topic配置
        topic_config = {}
        if request.config:
            topic_config = {
                "cleanup.policy": request.config.cleanup_policy,
                "retention.ms": str(request.config.retention_ms),
                "retention.bytes": str(request.config.retention_bytes),
                "segment.ms": str(request.config.segment_ms),
                "segment.bytes": str(request.config.segment_bytes),
                "max.message.bytes": str(request.config.max_message_bytes),
                "min.insync.replicas": str(request.config.min_insync_replicas),
                "compression.type": request.config.compression_type,
                "delete.retention.ms": str(request.config.delete_retention_ms),
                "file.delete.delay.ms": str(request.config.file_delete_delay_ms),
                "flush.messages": str(request.config.flush_messages),
                "flush.ms": str(request.config.flush_ms),
                "index.interval.bytes": str(request.config.index_interval_bytes),
                "max.compaction.lag.ms": str(request.config.max_compaction_lag_ms),
                "message.downconversion.enable": str(request.config.message_downconversion_enable).lower(),
                "message.format.version": request.config.message_format_version,
                "message.timestamp.type": request.config.message_timestamp_type,
                "preallocate": str(request.config.preallocate).lower(),
                "unclean.leader.election.enable": str(request.config.unclean_leader_election_enable).lower(),
            }
        
        # 创建Topic
        result = await kafka_client.create_topic(
            topic_name=request.name,
            num_partitions=request.partitions,
            replication_factor=request.replication_factor,
            config=topic_config
        )

        if result["success"]:
            verification = result.get("verification", {})
            warnings = verification.get("warnings", [])

            # 构建响应消息
            message = f"Topic '{request.name}' 创建成功"
            if warnings:
                message += f"，但有以下警告: {'; '.join(warnings)}"

            logger.info(f"Topic {request.name} 创建成功: {request.partitions}分区, {request.replication_factor}副本")

            return {
                "success": True,
                "message": message,
                "topic_name": request.name,
                "partitions": request.partitions,
                "replication_factor": request.replication_factor,
                "broker_count": result.get("broker_count", 1),
                "verification": verification,
                "warnings": warnings
            }
        else:
            error_msg = result.get("error", "未知错误")
            logger.error(f"Topic {request.name} 创建失败: {error_msg}")

            # 如果是副本数问题，返回更详细的错误信息
            if "副本数" in error_msg and "broker数量" in error_msg:
                raise HTTPException(
                    status_code=400,
                    detail={
                        "error": error_msg,
                        "broker_count": result.get("broker_count", 1),
                        "suggested_replication_factor": result.get("suggested_replication_factor", 1)
                    }
                )
            else:
                raise HTTPException(status_code=500, detail=error_msg)
            
    except HTTPException:
        raise
    except Exception as e:
        logger.error(f"创建Topic {request.name} 失败: {e}")
        raise HTTPException(status_code=500, detail=f"创建Topic失败: {str(e)}")

@router.put("/topics/{topic_name}", response_model=dict)
async def update_topic(topic_name: str, request: UpdateTopicRequest):
    """更新Topic配置"""
    try:
        kafka_client = get_kafka_client()
        
        # 检查Topic是否存在
        topics = await kafka_client.list_topics()
        if topic_name not in topics:
            raise HTTPException(status_code=404, detail=f"Topic '{topic_name}' 不存在")
        
        updated_items = []
        
        # 更新分区数（只能增加）
        if request.partitions:
            # 获取当前分区数
            current_partitions = await _get_topic_partition_count(kafka_client, topic_name)
            if request.partitions < current_partitions:
                raise HTTPException(
                    status_code=400, 
                    detail=f"分区数只能增加，当前: {current_partitions}, 请求: {request.partitions}"
                )
            elif request.partitions > current_partitions:
                # 增加分区
                success = await kafka_client.alter_topic_partitions(topic_name, request.partitions)
                if success:
                    updated_items.append(f"分区数: {current_partitions} -> {request.partitions}")
                else:
                    raise HTTPException(status_code=500, detail="更新分区数失败")
        
        # 更新配置
        if request.config:
            config_updates = {
                "cleanup.policy": request.config.cleanup_policy,
                "retention.ms": str(request.config.retention_ms),
                "retention.bytes": str(request.config.retention_bytes),
                "segment.ms": str(request.config.segment_ms),
                "segment.bytes": str(request.config.segment_bytes),
                "max.message.bytes": str(request.config.max_message_bytes),
                "min.insync.replicas": str(request.config.min_insync_replicas),
                "compression.type": request.config.compression_type,
                "delete.retention.ms": str(request.config.delete_retention_ms),
                "file.delete.delay.ms": str(request.config.file_delete_delay_ms),
                "flush.messages": str(request.config.flush_messages),
                "flush.ms": str(request.config.flush_ms),
                "index.interval.bytes": str(request.config.index_interval_bytes),
                "max.compaction.lag.ms": str(request.config.max_compaction_lag_ms),
                "message.downconversion.enable": str(request.config.message_downconversion_enable).lower(),
                "message.format.version": request.config.message_format_version,
                "message.timestamp.type": request.config.message_timestamp_type,
                "preallocate": str(request.config.preallocate).lower(),
                "unclean.leader.election.enable": str(request.config.unclean_leader_election_enable).lower(),
            }
            
            success = await kafka_client.alter_topic_config(topic_name, config_updates)
            if success:
                updated_items.append("配置已更新")
            else:
                # 生成完整的命令行建议
                config_args = []
                for key, value in config_updates.items():
                    config_args.append(f'{key}={value}')

                command_suggestion = f"kafka-configs.sh --bootstrap-server 192.168.37.11:9092 --entity-type topics --entity-name {topic_name} --alter --add-config {','.join(config_args)}"

                # 返回结构化的错误信息
                error_detail = {
                    "error": "配置更新失败",
                    "message": f"Topic '{topic_name}' 配置更新失败。当前环境不支持直接配置更新。",
                    "reason": "当前环境缺少必要的Kafka管理权限或工具",
                    "solution": "请使用命令行工具手动更新配置",
                    "command": command_suggestion,
                    "steps": [
                        "1. 确保已安装Kafka命令行工具",
                        "2. 复制下方命令到终端执行",
                        "3. 验证配置是否更新成功"
                    ]
                }

                raise HTTPException(
                    status_code=500,
                    detail=error_detail
                )
        
        if not updated_items:
            return {"success": True, "message": "没有需要更新的项目"}
        
        logger.info(f"Topic {topic_name} 更新成功: {', '.join(updated_items)}")
        return {
            "success": True,
            "message": f"Topic '{topic_name}' 更新成功",
            "updated_items": updated_items
        }
        
    except HTTPException:
        raise
    except Exception as e:
        logger.error(f"更新Topic {topic_name} 失败: {e}")
        raise HTTPException(status_code=500, detail=f"更新Topic失败: {str(e)}")

@router.delete("/topics/{topic_name}", response_model=dict)
async def delete_topic(topic_name: str):
    """删除Topic"""
    try:
        kafka_client = get_kafka_client()
        
        # 检查Topic是否存在
        topics = await kafka_client.list_topics()
        if topic_name not in topics:
            raise HTTPException(status_code=404, detail=f"Topic '{topic_name}' 不存在")
        
        # 删除Topic
        success = await kafka_client.delete_topic(topic_name)
        
        if success:
            logger.info(f"Topic {topic_name} 删除成功")
            return {
                "success": True,
                "message": f"Topic '{topic_name}' 删除成功"
            }
        else:
            raise HTTPException(status_code=500, detail="Topic删除失败")
            
    except HTTPException:
        raise
    except Exception as e:
        logger.error(f"删除Topic {topic_name} 失败: {e}")
        raise HTTPException(status_code=500, detail=f"删除Topic失败: {str(e)}")

# 辅助函数
async def _get_topic_info(kafka_client, topic_name: str) -> TopicInfo:
    """获取Topic基本信息"""
    try:
        # 获取Topic真实配置
        real_config = await kafka_client.get_topic_config(topic_name)

        # 使用优化的方法获取分区信息
        partitions = 1
        replication_factor = 1

        try:
            # 使用统一的分区获取方法（与producer API相同）
            partition_list = await kafka_client.get_topic_partitions(topic_name)
            if partition_list:
                partitions = len(partition_list)
                logger.info(f"成功获取Topic {topic_name} 分区数: {partitions}")

            # 获取副本数（简化实现，只在需要时获取）
            try:
                admin = await kafka_client.get_admin_client()
                topic_metadata = await admin.describe_topics([topic_name])

                if topic_name in topic_metadata:
                    topic_info = topic_metadata[topic_name]
                    if hasattr(topic_info, 'partitions') and topic_info.partitions:
                        # 获取第一个分区的副本数
                        first_partition = topic_info.partitions[0]
                        if hasattr(first_partition, 'replicas'):
                            replication_factor = len(first_partition.replicas)
                            logger.info(f"成功获取Topic {topic_name} 副本数: {replication_factor}")
            except Exception as replica_error:
                logger.warning(f"获取Topic {topic_name} 副本数失败，使用默认值: {replica_error}")

        except Exception as partition_error:
            logger.warning(f"获取Topic {topic_name} 分区信息失败，使用默认值: {partition_error}")

        return TopicInfo(
            name=topic_name,
            partitions=partitions,
            replication_factor=replication_factor,
            config=real_config
        )

    except Exception as e:
        logger.warning(f"获取Topic {topic_name} 信息失败: {e}")
        return TopicInfo(
            name=topic_name,
            partitions=0,
            replication_factor=0,
            config={}
        )

async def _get_topic_detail_info(kafka_client, topic_name: str) -> TopicDetailInfo:
    """获取Topic详细信息"""
    try:
        # 简化实现
        basic_info = await _get_topic_info(kafka_client, topic_name)
        
        return TopicDetailInfo(
            name=topic_name,
            partitions=basic_info.partitions,
            replication_factor=basic_info.replication_factor,
            config=basic_info.config,
            partition_details=[]  # 需要实现分区详情获取
        )
        
    except Exception as e:
        logger.error(f"获取Topic {topic_name} 详细信息失败: {e}")
        raise

async def _get_topic_partition_count(kafka_client, topic_name: str) -> int:
    """获取Topic当前分区数"""
    try:
        # 使用统一的分区获取方法
        partition_list = await kafka_client.get_topic_partitions(topic_name)
        return len(partition_list) if partition_list else 1
    except Exception as e:
        logger.warning(f"获取Topic {topic_name} 分区数失败: {e}")
        return 1
