"""
API routes for hot topic based creation features
"""
from fastapi import APIRouter, HTTPException, Depends, BackgroundTasks
from typing import List, Dict, Any, Optional
from pydantic import BaseModel, Field
import logging
import redis.asyncio as redis

from ..loaders.hot_data import HotTopicLoader, HotTopicContextInjector, PlatformHotTopicAggregator
from ..prompts.hot_topic_generator import HotTopicPromptGenerator, WritingAngle, HotTopicType
from ..processors.comment_processor import CommentProcessor
from ..analyzers.perspective_analyzer import PerspectiveAnalyzer
from ..validators.fact_checker import FactChecker
from ..utils.sanitizer import InputSanitizer

logger = logging.getLogger(__name__)

# Create router
router = APIRouter(prefix="/api/v1/hot-topic", tags=["hot-topic"])

# Initialize components
prompt_generator = HotTopicPromptGenerator()
comment_processor = CommentProcessor()
perspective_analyzer = PerspectiveAnalyzer()
fact_checker = FactChecker()

# Redis client (would be injected via dependency in production)
redis_client: Optional[redis.Redis] = None

# Request/Response models
class GeneratePromptRequest(BaseModel):
    """Request model for prompt generation"""
    title: str = Field(..., description="Hot topic title")
    excerpt: Optional[str] = Field(None, description="Topic excerpt")
    platform: Optional[str] = Field(None, description="Source platform")
    topic_type: Optional[str] = Field(None, description="Override topic type")
    writing_angle: Optional[str] = Field(None, description="Preferred writing angle")
    custom_params: Optional[Dict[str, Any]] = Field(None, description="Custom parameters")

class GeneratePromptResponse(BaseModel):
    """Response model for prompt generation"""
    prompt: str
    topic_type: str
    writing_angle: str
    suggested_angles: List[str]
    parameters: Dict[str, Any]
    generated_at: str

class AnalyzeCommentsRequest(BaseModel):
    """Request model for comment analysis"""
    comments: List[Dict[str, Any]] = Field(..., description="Comments to analyze")
    topic_keywords: Optional[List[str]] = Field(None, description="Topic keywords")
    max_comments: int = Field(100, description="Maximum comments to process")
    min_quality_score: float = Field(0.6, description="Minimum quality score")
    min_relevance_score: float = Field(0.5, description="Minimum relevance score")

class AnalyzeCommentsResponse(BaseModel):
    """Response model for comment analysis"""
    processed_count: int
    high_quality_count: int
    sentiment_distribution: Dict[str, float]
    key_themes: List[Tuple[str, float]]
    top_comments: List[Dict[str, Any]]

class ExtractPerspectivesRequest(BaseModel):
    """Request model for perspective extraction"""
    content_sources: List[Dict[str, Any]] = Field(..., description="Content sources")
    topic: str = Field(..., description="Main topic")

class ExtractPerspectivesResponse(BaseModel):
    """Response model for perspective extraction"""
    topic: str
    viewpoint_count: int
    stance_distribution: Dict[str, float]
    consensus_points: List[str]
    controversy_points: List[str]
    perspective_matrix: Dict[str, Any]

class InjectContextRequest(BaseModel):
    """Request model for context injection"""
    conversation_id: str = Field(..., description="Conversation ID")
    platform: str = Field(..., description="Platform to fetch hot topics from")
    max_topics: int = Field(3, description="Maximum topics to inject")

class InjectContextResponse(BaseModel):
    """Response model for context injection"""
    success: bool
    topics_injected: int
    platform: str
    message: str

class CheckFactsRequest(BaseModel):
    """Request model for fact checking"""
    text: str = Field(..., description="Text to check")
    context: Optional[Dict[str, Any]] = Field(None, description="Additional context")

class CheckFactsResponse(BaseModel):
    """Response model for fact checking"""
    original_text: str
    annotated_text: str
    fact_count: int
    credibility_score: float
    verification_suggestions: List[str]
    report: str

# API Endpoints
@router.post("/generate-prompt", response_model=GeneratePromptResponse)
async def generate_prompt(request: GeneratePromptRequest):
    """
    Generate smart prompt based on hot topic
    """
    try:
        # Sanitize inputs
        title = InputSanitizer.sanitize_title(request.title)
        excerpt = InputSanitizer.sanitize_excerpt(request.excerpt) if request.excerpt else None
        
        # Parse topic type if provided
        topic_type = None
        if request.topic_type:
            try:
                topic_type = HotTopicType[request.topic_type.upper()]
            except KeyError:
                logger.warning(f"Invalid topic type: {request.topic_type}")
        
        # Parse writing angle if provided
        writing_angle = None
        if request.writing_angle:
            try:
                writing_angle = WritingAngle[request.writing_angle.upper()]
            except KeyError:
                logger.warning(f"Invalid writing angle: {request.writing_angle}")
        
        # Sanitize custom params if provided
        custom_params = request.custom_params
        if custom_params:
            custom_params = InputSanitizer.sanitize_dict(custom_params)
        
        # Generate prompt
        result = prompt_generator.generate_prompt(
            title=title,
            excerpt=excerpt,
            topic_type=topic_type,
            writing_angle=writing_angle,
            custom_params=custom_params
        )
        
        return GeneratePromptResponse(**result)
    
    except Exception as e:
        logger.error(f"Error generating prompt: {e}")
        raise HTTPException(status_code=500, detail=str(e))

@router.post("/analyze-comments", response_model=AnalyzeCommentsResponse)
async def analyze_comments(request: AnalyzeCommentsRequest):
    """
    Analyze and process comments
    """
    try:
        # Sanitize comments
        sanitized_comments = []
        for comment in request.comments:
            sanitized_comment = {
                "content": InputSanitizer.sanitize_comment(comment.get("content", "")),
                "author": InputSanitizer.sanitize_text(comment.get("author", ""), max_length=100),
                "likes": comment.get("likes", 0)
            }
            sanitized_comments.append(sanitized_comment)
        
        # Process comments
        processed = comment_processor.process_comments(
            comments=sanitized_comments,
            topic_keywords=request.topic_keywords,
            max_comments=request.max_comments
        )
        
        # Filter high quality
        high_quality = comment_processor.filter_high_quality(
            comments=processed,
            min_quality_score=request.min_quality_score,
            min_relevance_score=request.min_relevance_score
        )
        
        # Get sentiment distribution
        sentiment_dist = comment_processor.get_sentiment_distribution(processed)
        
        # Extract key themes
        key_themes = comment_processor.extract_key_themes(processed, top_n=5)
        
        # Format top comments
        top_comments = []
        for comment in high_quality[:10]:
            top_comments.append({
                "content": comment.content,
                "author": comment.author,
                "likes": comment.likes,
                "sentiment": comment.sentiment.value,
                "quality_score": comment.quality_score,
                "relevance_score": comment.relevance_score,
                "formatted_citation": comment_processor.format_for_citation(comment)
            })
        
        return AnalyzeCommentsResponse(
            processed_count=len(processed),
            high_quality_count=len(high_quality),
            sentiment_distribution=sentiment_dist,
            key_themes=key_themes,
            top_comments=top_comments
        )
    
    except Exception as e:
        logger.error(f"Error analyzing comments: {e}")
        raise HTTPException(status_code=500, detail=str(e))

@router.post("/extract-perspectives", response_model=ExtractPerspectivesResponse)
async def extract_perspectives(request: ExtractPerspectivesRequest):
    """
    Extract multiple perspectives from content
    """
    try:
        # Analyze perspectives
        analysis = perspective_analyzer.analyze_perspectives(
            content_sources=request.content_sources,
            topic=request.topic
        )
        
        # Get stance distribution from matrix
        stance_dist = analysis.perspective_matrix.get("stance_distribution", {})
        
        return ExtractPerspectivesResponse(
            topic=analysis.topic,
            viewpoint_count=len(analysis.viewpoints),
            stance_distribution=stance_dist,
            consensus_points=analysis.consensus_points,
            controversy_points=analysis.controversy_points,
            perspective_matrix=analysis.perspective_matrix
        )
    
    except Exception as e:
        logger.error(f"Error extracting perspectives: {e}")
        raise HTTPException(status_code=500, detail=str(e))

@router.post("/inject-context", response_model=InjectContextResponse)
async def inject_context(request: InjectContextRequest):
    """
    Inject hot topic context into conversation
    """
    try:
        # Create loader
        loader = HotTopicLoader(
            platform=request.platform,
            redis_client=redis_client
        )
        
        # Create injector
        injector = HotTopicContextInjector(loader)
        
        # Fetch conversation context from database
        from ..models.conversation import ConversationRepository
        
        conv_repo = ConversationRepository()
        try:
            conversation_context = await conv_repo.get_conversation_context(request.conversation_id)
        except Exception as e:
            logger.warning(f"Failed to fetch conversation context: {e}, using empty context")
            conversation_context = {
                "conversation_id": request.conversation_id,
                "messages": []
            }
        
        # Inject context
        updated_context = await injector.inject_context(
            conversation_context=conversation_context,
            max_topics=request.max_topics
        )
        
        # Get injected topic count
        topics_injected = len(
            updated_context.get("context", {})
            .get("hot_topics", {})
            .get("topics", [])
        )
        
        return InjectContextResponse(
            success=True,
            topics_injected=topics_injected,
            platform=request.platform,
            message=f"Successfully injected {topics_injected} hot topics"
        )
    
    except Exception as e:
        logger.error(f"Error injecting context: {e}")
        raise HTTPException(status_code=500, detail=str(e))

@router.post("/check-facts", response_model=CheckFactsResponse)
async def check_facts(request: CheckFactsRequest):
    """
    Check facts in text and provide verification suggestions
    """
    try:
        # Sanitize text
        sanitized_text = InputSanitizer.sanitize_content(request.text)
        
        # Sanitize context if provided
        sanitized_context = None
        if request.context:
            sanitized_context = InputSanitizer.sanitize_dict(request.context)
        
        # Check facts
        result = fact_checker.check_facts(
            text=sanitized_text,
            context=sanitized_context
        )
        
        # Generate report
        report = fact_checker.generate_fact_report(result)
        
        return CheckFactsResponse(
            original_text=result.original_text,
            annotated_text=result.annotated_text,
            fact_count=len(result.facts),
            credibility_score=result.credibility_score,
            verification_suggestions=result.verification_suggestions,
            report=report
        )
    
    except Exception as e:
        logger.error(f"Error checking facts: {e}")
        raise HTTPException(status_code=500, detail=str(e))

@router.get("/platforms")
async def get_supported_platforms():
    """
    Get list of supported platforms for hot topic data
    """
    return {
        "platforms": [
            "weibo",
            "zhihu", 
            "toutiao",
            "baidu",
            "xiaohongshu",
            "douyin",
            "twitter",
            "reddit",
            "youtube"
        ]
    }

@router.get("/writing-angles")
async def get_writing_angles():
    """
    Get available writing angles
    """
    return {
        "angles": [
            {
                "id": angle.name.lower(),
                "name": angle.value,
                "description": {
                    "NEWS_REPORT": "新闻报道 - 客观陈述事实",
                    "DEEP_ANALYSIS": "深度分析 - 全面深入剖析",
                    "COMMENTARY": "评论文章 - 表达观点和见解",
                    "FEATURE_STORY": "专题报道 - 详细展开报道",
                    "OPINION_PIECE": "观点文章 - 阐述个人观点"
                }.get(angle.name, "")
            }
            for angle in WritingAngle
        ]
    }

@router.get("/topic-types")
async def get_topic_types():
    """
    Get available topic types
    """
    return {
        "types": [
            {
                "id": topic_type.name.lower(),
                "name": topic_type.value,
                "description": {
                    "EVENT": "事件类 - 新闻事件、突发事件",
                    "TOPIC": "话题类 - 社会话题、热点讨论",
                    "PERSON": "人物类 - 人物报道、人物故事",
                    "TREND": "趋势类 - 行业趋势、社会潮流",
                    "CONTROVERSY": "争议类 - 争议话题、辩论焦点"
                }.get(topic_type.name, "")
            }
            for topic_type in HotTopicType
        ]
    }

@router.post("/batch-generate")
async def batch_generate_prompts(
    hot_topics: List[Dict[str, Any]],
    default_angle: Optional[str] = None
):
    """
    Generate prompts for multiple hot topics
    """
    try:
        # Parse default angle
        writing_angle = None
        if default_angle:
            try:
                writing_angle = WritingAngle[default_angle.upper()]
            except KeyError:
                logger.warning(f"Invalid writing angle: {default_angle}")
        
        # Generate batch prompts
        prompts = prompt_generator.generate_batch_prompts(
            hot_topics=hot_topics,
            default_angle=writing_angle
        )
        
        return {
            "count": len(prompts),
            "prompts": prompts
        }
    
    except Exception as e:
        logger.error(f"Error in batch generation: {e}")
        raise HTTPException(status_code=500, detail=str(e))

@router.post("/combined-prompt")
async def generate_combined_prompt(
    hot_topics: List[Dict[str, Any]],
    max_topics: int = 3
):
    """
    Generate a combined prompt for multiple related topics
    """
    try:
        # Generate combined prompt
        combined_prompt = prompt_generator.suggest_combined_prompt(
            hot_topics=hot_topics,
            max_topics=max_topics
        )
        
        return {
            "prompt": combined_prompt,
            "topics_used": min(len(hot_topics), max_topics)
        }
    
    except Exception as e:
        logger.error(f"Error generating combined prompt: {e}")
        raise HTTPException(status_code=500, detail=str(e))

# Health check
@router.get("/health")
async def health_check():
    """
    Health check endpoint
    """
    return {
        "status": "healthy",
        "service": "hot-topic",
        "components": {
            "prompt_generator": "ready",
            "comment_processor": "ready",
            "perspective_analyzer": "ready",
            "fact_checker": "ready"
        }
    }