"""Hot topic content generation API routes."""

import logging
from typing import Dict, Any, List, Optional
from fastapi import APIRouter, HTTPException, Depends, BackgroundTasks
from pydantic import BaseModel, Field
from datetime import datetime

from ..chains.hot_content_chain import HotContentGenerationChain
from ..core.dependencies import get_mongodb_client, get_redis_client
from typing import Optional

# 临时的用户获取函数，用于测试
async def get_current_user_optional():
    """临时函数，返回默认用户或None"""
    return {"user_id": "test_user", "username": "test"}
from ..models.base import ModelResponse

logger = logging.getLogger(__name__)

router = APIRouter(prefix="/hot-content", tags=["hot-content"])


class HotContentGenerationRequest(BaseModel):
    """Request model for hot topic content generation."""
    
    hot_item_id: str = Field(..., description="Hot topic item ID")
    platform: str = Field(..., description="Platform name (weibo, douyin, baidu, etc.)")
    writing_angle: str = Field(default="news_report", description="Writing angle")
    user_prompt: Optional[str] = Field(None, description="Additional user prompt")
    model_name: str = Field(default="gpt-3.5-turbo", description="Model to use")
    temperature: float = Field(default=0.7, description="Generation temperature")
    max_tokens: int = Field(default=2000, description="Maximum tokens to generate")
    stream: bool = Field(default=False, description="Enable streaming response")
    # 添加模型配置参数
    provider: Optional[str] = Field(None, description="Model provider")
    api_key: Optional[str] = Field(None, description="API key for the model")
    api_base: Optional[str] = Field(None, description="API base URL for the model")


class HotContentGenerationResponse(BaseModel):
    """Response model for hot topic content generation."""
    
    content: str = Field(..., description="Generated content")
    hot_item_info: Dict[str, Any] = Field(..., description="Hot topic information")
    generation_metadata: Dict[str, Any] = Field(..., description="Generation metadata")
    usage: Dict[str, Any] = Field(..., description="Token usage information")


class TrendAnalysisRequest(BaseModel):
    """Request model for hot topic trend analysis."""
    
    hot_item_id: str = Field(..., description="Hot topic item ID")
    platform: str = Field(..., description="Platform name")
    model_name: str = Field(default="gpt-3.5-turbo", description="Model to use")


class TrendAnalysisResponse(BaseModel):
    """Response model for trend analysis."""
    
    analysis: str = Field(..., description="Trend analysis content")
    hot_data: Dict[str, Any] = Field(..., description="Hot topic data with trends")
    timestamp: str = Field(..., description="Analysis timestamp")


@router.post("/generate", response_model=HotContentGenerationResponse)
async def generate_hot_content(
    request: HotContentGenerationRequest,
    current_user: dict = Depends(get_current_user_optional),
    mongodb_client = Depends(get_mongodb_client),
    redis_client = Depends(get_redis_client)
):
    """Generate content based on hot topic with LangChain."""
    
    try:
        logger.info(f"Generating hot content for item {request.hot_item_id} on {request.platform}")
        
        # Initialize hot content generation chain
        chain = HotContentGenerationChain(mongodb_client)
        chain.hot_data_loader.redis_client = redis_client
        
        # If API credentials are provided, create a dynamic model
        if request.api_key and request.api_base:
            # Check if it's a Claude model that needs special handling
            is_claude = (request.provider and request.provider.lower() in ['claude', 'anthropic']) or \
                       (request.model_name and 'claude' in request.model_name.lower())
            
            if is_claude:
                # For Claude, we need to create a custom wrapper that calls the API directly
                # since LangChain's ChatOpenAI doesn't support Anthropic's API format
                logger.info(f"Detected Claude model: {request.model_name}")
                
                # Import the custom Claude handler
                from ..models.claude_langchain_adapter import ClaudeLangChainAdapter
                
                # Create Claude adapter
                custom_model = ClaudeLangChainAdapter(
                    model_name=request.model_name,
                    api_key=request.api_key,
                    api_base=request.api_base,
                    temperature=request.temperature,
                    max_tokens=request.max_tokens
                )
                
                # Add to chain's models
                chain.models[request.model_name] = custom_model
                logger.info(f"Using Claude adapter for: {request.model_name} at {request.api_base}")
            else:
                # For OpenAI-compatible models
                from langchain_openai import ChatOpenAI
                
                # Log the original request data for debugging
                logger.info(f"Original API base: {request.api_base}")
                logger.info(f"Provider: {request.provider}")
                logger.info(f"Model name: {request.model_name}")
                
                # Use the API base URL directly as provided by frontend
                api_base = request.api_base.rstrip('/')
                
                # Only add /v1 if it's clearly missing and not a Doubao/Volces endpoint
                if "volces.com" not in api_base and "endpoints" not in api_base:
                    if not api_base.endswith('/v1') and '/v1' not in api_base:
                        api_base = api_base + '/v1'
                
                logger.info(f"Final API base URL: {api_base}")
                
                # Create a custom model with provided credentials
                custom_model = ChatOpenAI(
                    model=request.model_name,
                    temperature=request.temperature,
                    max_tokens=request.max_tokens,
                    streaming=True,
                    api_key=request.api_key,
                    base_url=api_base
                )
                
                # Add the custom model to the chain's models dictionary
                chain.models[request.model_name] = custom_model
                logger.info(f"Using OpenAI-compatible model: {request.model_name} at {api_base}")
        
        # Check if streaming is requested
        if request.stream:
            # For streaming, we'll use a different endpoint
            raise HTTPException(
                status_code=400, 
                detail="Streaming not supported in this endpoint. Use /generate-stream instead."
            )
        
        # Generate content (collect all chunks)
        content_chunks = []
        async for chunk in chain.generate_with_hot_context(
            hot_item_id=request.hot_item_id,
            platform=request.platform,
            writing_angle=request.writing_angle,
            user_prompt=request.user_prompt,
            model_name=request.model_name,
            temperature=request.temperature,
            max_tokens=request.max_tokens
        ):
            content_chunks.append(chunk)
        
        generated_content = "".join(content_chunks)
        
        # Load hot item info for response
        hot_item_info = await chain.hot_data_loader.load_hot_item(
            request.hot_item_id, 
            request.platform
        )
        
        if not hot_item_info:
            raise HTTPException(
                status_code=404, 
                detail=f"Hot topic {request.hot_item_id} not found on {request.platform}"
            )
        
        # Calculate token usage (simplified)
        prompt_tokens = len(request.user_prompt or "") // 4 + 200  # Estimated context tokens
        completion_tokens = len(generated_content) // 4
        total_tokens = prompt_tokens + completion_tokens
        
        return HotContentGenerationResponse(
            content=generated_content,
            hot_item_info={
                "title": hot_item_info.get("title", ""),
                "platform": hot_item_info.get("platform", ""),
                "heat_score": hot_item_info.get("heat_score", 0),
                "publish_time": hot_item_info.get("publish_time", ""),
                "url": hot_item_info.get("url", "")
            },
            generation_metadata={
                "writing_angle": request.writing_angle,
                "model_name": request.model_name,
                "temperature": request.temperature,
                "max_tokens": request.max_tokens,
                "generated_at": datetime.utcnow().isoformat(),
                "user_id": current_user.get("user_id")
            },
            usage={
                "prompt_tokens": prompt_tokens,
                "completion_tokens": completion_tokens,
                "total_tokens": total_tokens
            }
        )
        
    except Exception as e:
        logger.error(f"Hot content generation failed: {e}")
        raise HTTPException(status_code=500, detail=f"Content generation failed: {str(e)}")


@router.post("/generate-stream")
async def generate_hot_content_stream(
    request: HotContentGenerationRequest,
    current_user: dict = Depends(get_current_user_optional),
    mongodb_client = Depends(get_mongodb_client),
    redis_client = Depends(get_redis_client)
):
    """Generate hot topic content with streaming response."""
    
    from fastapi.responses import StreamingResponse
    import json
    
    async def generate_stream():
        try:
            logger.info(f"Streaming hot content for item {request.hot_item_id} on {request.platform}")
            
            # Initialize hot content generation chain
            chain = HotContentGenerationChain(mongodb_client)
            chain.hot_data_loader.redis_client = redis_client
            
            # If API credentials are provided, create a dynamic model
            if request.api_key and request.api_base:
                # Check if it's a Claude model that needs special handling
                is_claude = (request.provider and request.provider.lower() in ['claude', 'anthropic']) or \
                           (request.model_name and 'claude' in request.model_name.lower())
                
                if is_claude:
                    # For Claude, use custom adapter
                    logger.info(f"[Stream] Detected Claude model: {request.model_name}")
                    
                    from ..models.claude_langchain_adapter import ClaudeLangChainAdapter
                    
                    custom_model = ClaudeLangChainAdapter(
                        model_name=request.model_name,
                        api_key=request.api_key,
                        api_base=request.api_base,
                        temperature=request.temperature,
                        max_tokens=request.max_tokens
                    )
                    
                    chain.models[request.model_name] = custom_model
                    logger.info(f"[Stream] Using Claude adapter for: {request.model_name} at {request.api_base}")
                else:
                    # For OpenAI-compatible models
                    from langchain_openai import ChatOpenAI
                    
                    logger.info(f"[Stream] Original API base: {request.api_base}")
                    logger.info(f"[Stream] Provider: {request.provider}")
                    logger.info(f"[Stream] Model name: {request.model_name}")
                    
                    api_base = request.api_base.rstrip('/')
                    
                    # Only add /v1 if it's clearly missing and not a Doubao/Volces endpoint
                    if "volces.com" not in api_base and "endpoints" not in api_base:
                        if not api_base.endswith('/v1') and '/v1' not in api_base:
                            api_base = api_base + '/v1'
                    
                    logger.info(f"[Stream] Final API base URL: {api_base}")
                    
                    custom_model = ChatOpenAI(
                        model=request.model_name,
                        temperature=request.temperature,
                        max_tokens=request.max_tokens,
                        streaming=True,
                        api_key=request.api_key,
                        base_url=api_base
                    )
                    
                    chain.models[request.model_name] = custom_model
                    logger.info(f"[Stream] Using OpenAI-compatible model: {request.model_name} at {api_base}")
            
            # Load hot item info first
            hot_item_info = await chain.hot_data_loader.load_hot_item(
                request.hot_item_id, 
                request.platform
            )
            
            if not hot_item_info:
                error_data = {
                    "error": f"Hot topic {request.hot_item_id} not found on {request.platform}",
                    "finished": True
                }
                yield f"data: {json.dumps(error_data, ensure_ascii=False)}\n\n"
                return
            
            # Send hot item info first
            info_data = {
                "type": "hot_item_info",
                "data": {
                    "title": hot_item_info.get("title", ""),
                    "platform": hot_item_info.get("platform", ""),
                    "heat_score": hot_item_info.get("heat_score", 0),
                    "publish_time": hot_item_info.get("publish_time", "")
                },
                "finished": False
            }
            yield f"data: {json.dumps(info_data, ensure_ascii=False)}\n\n"
            
            # Stream content generation
            async for chunk in chain.generate_with_hot_context(
                hot_item_id=request.hot_item_id,
                platform=request.platform,
                writing_angle=request.writing_angle,
                user_prompt=request.user_prompt,
                model_name=request.model_name,
                temperature=request.temperature,
                max_tokens=request.max_tokens
            ):
                data = {
                    "type": "content",
                    "content": chunk,
                    "finished": False
                }
                yield f"data: {json.dumps(data, ensure_ascii=False)}\n\n"
            
            # Send completion signal
            completion_data = {
                "type": "completion",
                "finished": True,
                "metadata": {
                    "writing_angle": request.writing_angle,
                    "model_name": request.model_name,
                    "generated_at": datetime.utcnow().isoformat()
                }
            }
            yield f"data: {json.dumps(completion_data, ensure_ascii=False)}\n\n"
            yield "data: [DONE]\n\n"
            
        except Exception as e:
            logger.error(f"Streaming generation failed: {e}")
            error_data = {
                "error": str(e),
                "finished": True
            }
            yield f"data: {json.dumps(error_data, ensure_ascii=False)}\n\n"
    
    return StreamingResponse(
        generate_stream(),
        media_type="text/event-stream",
        headers={
            "Cache-Control": "no-cache",
            "Connection": "keep-alive",
            "Access-Control-Allow-Origin": "*",
        }
    )


@router.post("/analyze-trends", response_model=TrendAnalysisResponse)
async def analyze_hot_topic_trends(
    request: TrendAnalysisRequest,
    current_user: dict = Depends(get_current_user_optional),
    mongodb_client = Depends(get_mongodb_client),
    redis_client = Depends(get_redis_client)
):
    """Analyze hot topic trends and provide insights."""
    
    try:
        logger.info(f"Analyzing trends for item {request.hot_item_id} on {request.platform}")
        
        # Initialize hot content generation chain
        chain = HotContentGenerationChain(mongodb_client)
        chain.hot_data_loader.redis_client = redis_client
        
        # Perform trend analysis
        analysis_result = await chain.analyze_hot_topic_trends(
            hot_item_id=request.hot_item_id,
            platform=request.platform,
            model_name=request.model_name
        )
        
        return TrendAnalysisResponse(
            analysis=analysis_result["analysis"],
            hot_data=analysis_result["hot_data"],
            timestamp=analysis_result["timestamp"]
        )
        
    except Exception as e:
        logger.error(f"Trend analysis failed: {e}")
        raise HTTPException(status_code=500, detail=f"Trend analysis failed: {str(e)}")


@router.get("/writing-angles")
async def get_writing_angles(
    current_user: dict = Depends(get_current_user_optional)
):
    """Get supported writing angles for hot topic content generation."""
    
    try:
        # Initialize chain to get supported angles
        chain = HotContentGenerationChain()
        angles = chain.get_supported_writing_angles()
        
        return {
            "writing_angles": angles,
            "descriptions": {
                "news_report": "新闻报道 - 客观陈述事实，提供全面信息",
                "deep_analysis": "深度分析 - 深入探讨背景、原因和影响",
                "opinion_piece": "观点文章 - 表达明确立场和个人见解",
                "feature_story": "专题报道 - 多角度展现事件全貌",
                "commentary": "时事评论 - 针对热点进行评论和解读"
            }
        }
        
    except Exception as e:
        logger.error(f"Failed to get writing angles: {e}")
        raise HTTPException(status_code=500, detail=str(e))


@router.get("/topic-types")
async def get_topic_types(
    current_user: dict = Depends(get_current_user_optional)
):
    """Get supported hot topic types."""
    
    try:
        # Initialize chain to get supported types
        chain = HotContentGenerationChain()
        types = chain.get_hot_topic_types()
        
        return {
            "topic_types": types,
            "descriptions": {
                "event": "事件类 - 突发事件、新闻事件",
                "topic": "话题类 - 社会话题、讨论话题",
                "person": "人物类 - 名人动态、人物故事",
                "trend": "趋势类 - 行业趋势、社会趋势",
                "controversy": "争议类 - 争议话题、辩论话题"
            }
        }
        
    except Exception as e:
        logger.error(f"Failed to get topic types: {e}")
        raise HTTPException(status_code=500, detail=str(e))


@router.get("/related-items/{item_id}")
async def get_related_hot_items(
    item_id: str,
    platform: str,
    limit: int = 5,
    current_user: dict = Depends(get_current_user_optional),
    redis_client = Depends(get_redis_client)
):
    """Get related hot items for inspiration."""
    
    try:
        from ..loaders.hot_data import HotDataLoader
        
        loader = HotDataLoader(redis_client)
        related_items = await loader.get_related_hot_items(item_id, platform, limit)
        
        return {
            "related_items": related_items,
            "total": len(related_items)
        }
        
    except Exception as e:
        logger.error(f"Failed to get related items: {e}")
        raise HTTPException(status_code=500, detail=str(e))


@router.post("/batch-generate")
async def batch_generate_hot_content(
    requests: List[HotContentGenerationRequest],
    background_tasks: BackgroundTasks,
    current_user: dict = Depends(get_current_user_optional),
    mongodb_client = Depends(get_mongodb_client),
    redis_client = Depends(get_redis_client)
):
    """Batch generate content for multiple hot topics."""
    
    if len(requests) > 10:  # Limit batch size
        raise HTTPException(
            status_code=400, 
            detail="Batch size cannot exceed 10 items"
        )
    
    try:
        # Initialize chain
        chain = HotContentGenerationChain(mongodb_client)
        chain.hot_data_loader.redis_client = redis_client
        
        # Process requests concurrently (simplified version)
        results = []
        for i, request in enumerate(requests):
            try:
                # Generate content
                content_chunks = []
                async for chunk in chain.generate_with_hot_context(
                    hot_item_id=request.hot_item_id,
                    platform=request.platform,
                    writing_angle=request.writing_angle,
                    user_prompt=request.user_prompt,
                    model_name=request.model_name,
                    temperature=request.temperature,
                    max_tokens=request.max_tokens
                ):
                    content_chunks.append(chunk)
                
                generated_content = "".join(content_chunks)
                
                results.append({
                    "index": i,
                    "success": True,
                    "content": generated_content,
                    "hot_item_id": request.hot_item_id,
                    "platform": request.platform
                })
                
            except Exception as e:
                logger.error(f"Batch item {i} failed: {e}")
                results.append({
                    "index": i,
                    "success": False,
                    "error": str(e),
                    "hot_item_id": request.hot_item_id,
                    "platform": request.platform
                })
        
        return {
            "results": results,
            "total_requested": len(requests),
            "successful": len([r for r in results if r["success"]]),
            "failed": len([r for r in results if not r["success"]])
        }
        
    except Exception as e:
        logger.error(f"Batch generation failed: {e}")
        raise HTTPException(status_code=500, detail=f"Batch generation failed: {str(e)}")
