"""Hot topic content generation chain implementation."""

from typing import AsyncGenerator, Dict, Any, Optional, List
import logging
import re
from langchain.prompts import ChatPromptTemplate
from langchain.schema import Document
from langchain_openai import ChatOpenAI
from motor.motor_asyncio import AsyncIOMotorClient

from .generation import ContentGenerationChain
from ..loaders.hot_data import HotDataLoader
from ..prompts.hot_topic_generator import HotTopicPromptGenerator, HotTopicType, WritingAngle
from ..processors.comment_processor import CommentProcessor

logger = logging.getLogger(__name__)


class HotContentGenerationChain(ContentGenerationChain):
    """Specialized chain for hot topic content generation with context injection."""
    
    def __init__(self, mongodb_client: Optional[AsyncIOMotorClient] = None):
        """Initialize the hot content generation chain.
        
        Args:
            mongodb_client: Optional MongoDB client for template storage
        """
        super().__init__(mongodb_client)
        
        # Initialize specialized components
        self.hot_data_loader = HotDataLoader()
        self.prompt_generator = HotTopicPromptGenerator()
        self.comment_processor = CommentProcessor()
    
    async def generate_with_hot_context(
        self,
        hot_item_id: str,
        platform: str,
        writing_angle: str = "news_report",
        user_prompt: Optional[str] = None,
        model_name: str = "gpt-3.5-turbo",
        callbacks: Optional[List] = None,
        **kwargs
    ) -> AsyncGenerator[str, None]:
        """Generate content with hot topic context injection.
        
        Args:
            hot_item_id: ID of the hot topic item
            platform: Platform where the hot topic is from
            writing_angle: Writing angle (news_report, analysis, opinion_piece, etc.)
            user_prompt: Optional user-provided prompt
            model_name: Name of the model to use
            callbacks: Optional list of callback handlers
            **kwargs: Additional model parameters
            
        Yields:
            Generated content chunks
        """
        try:
            # 1. Load hot topic data
            logger.info(f"Loading hot topic data: {hot_item_id} from {platform}")
            hot_data = await self.hot_data_loader.load_hot_item(hot_item_id, platform)
            if not hot_data:
                raise ValueError(f"Hot topic {hot_item_id} not found on {platform}")
            
            # 2. Process comments for context
            logger.info("Processing comments for context")
            processed_comments = await self.comment_processor.process_comments(
                hot_data.get("comments", [])
            )
            
            # 3. Generate smart prompt based on hot topic
            logger.info(f"Generating smart prompt for writing angle: {writing_angle}")
            
            # Convert writing_angle string to enum if needed
            angle_enum = None
            if writing_angle:
                try:
                    angle_enum = WritingAngle(writing_angle.lower())
                except ValueError:
                    logger.warning(f"Unknown writing angle: {writing_angle}, using default")
                    angle_enum = WritingAngle.NEWS_REPORT
            
            smart_prompt_result = self.prompt_generator.generate_prompt(
                title=hot_data.get('title', ''),
                excerpt=hot_data.get('excerpt', ''),
                writing_angle=angle_enum,
                custom_params={
                    'processed_comments': processed_comments,
                    'user_input': user_prompt,
                    'platform': hot_data.get('platform', ''),
                    'heat_score': hot_data.get('heat_score', 0)
                }
            )
            
            # Extract the actual prompt text from the result
            smart_prompt = smart_prompt_result.get('prompt', '') if isinstance(smart_prompt_result, dict) else str(smart_prompt_result)
            
            # Debug logging
            logger.info(f"Smart prompt generated: {smart_prompt[:200]}...")

            # ========== 关键优化：检测微调场景 ==========
            # 微调场景的特征：
            # 1. user_prompt包含"请根据以下要求对内容进行微调"
            # 2. user_prompt包含"原始内容："
            # 3. user_prompt长度超过1000字符（包含完整的已生成内容）
            is_refinement = False
            if user_prompt:
                is_refinement = (
                    "微调" in user_prompt or
                    "原始内容：" in user_prompt or
                    "请根据以下要求对内容进行" in user_prompt
                )

            if is_refinement:
                logger.warning(f"⚠️ 检测到微调请求，Prompt长度: {len(user_prompt)} 字符")
                logger.warning(f"⚠️ 当前模型: {model_name}")

                # 通用处理：所有模型的微调场景都需要精简prompt
                # 原因：长prompt（包含完整已生成内容）会导致部分模型输出不完整或超时
                if len(user_prompt) > 1500:  # 如果prompt超过1500字符才精简
                    logger.warning("⚠️ 微调prompt过长，使用精简模式")

                    # 提取requirement（格式要求）和original_content（原始内容）
                    requirement = ""
                    original_content = ""

                    if "请根据以下要求对内容进行微调：" in user_prompt:
                        parts = user_prompt.split("原始内容：")
                        if len(parts) == 2:
                            requirement_part = parts[0].replace("请根据以下要求对内容进行微调：", "").strip()
                            original_content = parts[1].replace("请直接输出微调后的内容，保持原有格式。", "").strip()
                            requirement = requirement_part

                    # 根据不同模型调整截断长度
                    max_content_length = 2000  # 默认2000字符

                    # 豆包模型需要更短的prompt
                    if "doubao" in model_name.lower() or "豆包" in model_name:
                        max_content_length = 1500
                        logger.info("⚠️ 豆包模型检测，限制内容长度为1500字符")

                    # DeepSeek-reasoner可以稍长一些
                    elif "deepseek" in model_name.lower() and "reasoner" in model_name.lower():
                        max_content_length = 2000
                        logger.info("⚠️ DeepSeek-reasoner模型检测，限制内容长度为2000字符")

                    if len(original_content) > max_content_length:
                        logger.warning(f"⚠️ 原始内容过长({len(original_content)}字符)，截断到{max_content_length}字符")
                        original_content = original_content[:max_content_length] + "\n\n...(内容已截断，请按照以上内容的风格和要求，输出完整的优化版本)"

                    # 构建精简的微调prompt
                    refined_user_prompt = f"""请对以下内容进行微调优化：

【优化要求】
{requirement}

【原始内容片段】（仅供参考风格）
{original_content}

【输出要求】
请基于优化要求，输出完整优化后的内容。保持原有结构和完整性。"""

                    logger.info(f"✅ 微调prompt已精简，长度: {len(refined_user_prompt)} 字符（原长度: {len(user_prompt)}）")
                    user_prompt = refined_user_prompt

            # 4. Create context-aware prompt template
            # Check if user_prompt contains formatted prompt (from frontend)
            if user_prompt and len(user_prompt) > 1000 and "【重要：这是强制性格式要求" in user_prompt:
                # Frontend sent a formatted prompt, use it directly
                logger.info("=== 检测到前端格式化Prompt ===")
                logger.info("使用前端发送的完整格式化Prompt")
                
                # Use minimal system prompt and frontend's formatted prompt as user message
                system_prompt = "你是一个专业的内容创作助手。"
                user_message = user_prompt
                
                logger.info("=== 前端Prompt结构分析 ===")
                logger.info(f"📝 Frontend Prompt 长度: {len(user_prompt)} 字符")
                logger.info(f"📋 包含格式要求: {'✅' if '格式要求' in user_prompt else '❌'}")
                logger.info(f"🎯 包含用户需求: {'✅' if '请写' in user_prompt or '撰写' in user_prompt else '❌'}")
                logger.info(f"🔧 包含HTML标签: {'✅' if '<h1' in user_prompt or '<p' in user_prompt else '❌'}")
                
            else:
                # Use backend's original logic
                logger.info("=== 使用后端原始Prompt逻辑 ===")

                # 智能检测：user_prompt是否已包含热点数据
                user_has_hot_data = self._detect_hot_data_in_user_prompt(user_prompt, hot_data)

                if user_has_hot_data:
                    # 前端已提供热点数据，System Prompt简化，只提供角色定义和约束
                    logger.info("=== 检测到User Input已包含热点数据，使用简化System Prompt ===")
                    system_prompt = self._build_minimal_system_prompt()

                    # 补充缺失的热点数据（如摘要为空）
                    enhanced_user_prompt = self._enhance_user_prompt_with_missing_data(user_prompt, hot_data)

                    # 补充评论分析到User Input（即使前端提供了热点数据，评论分析也很重要）
                    comment_analysis = self._build_comment_analysis(processed_comments)

                    # 检查User Input是否包含明确的任务描述（针对写作任务的动词）
                    # 注意：这里检查的是开头或明显的任务指令，而不是"创作要求"里的"分析"
                    has_explicit_task = any(keyword in user_prompt[:100] for keyword in ['撰写一篇', '写一篇', '请写', '创作一篇'])

                    # 组装最终的User Message
                    parts = [enhanced_user_prompt]

                    # 补充评论分析（如果有）
                    if comment_analysis:
                        parts.append(comment_analysis)
                        logger.info("✓ 补充了评论分析")

                    # 补充Smart Prompt任务描述（如果User Input没有明确的任务描述）
                    if not has_explicit_task:
                        parts.append(smart_prompt)
                        logger.info("✓ 补充了Smart Prompt任务描述")

                    user_message = "\n\n".join(parts)
                    logger.info(f"=== 最终User Message组成: {len(parts)}个部分 ===")
                else:
                    # 前端未提供热点数据，使用完整System Prompt
                    logger.info("=== User Input为纯个性化要求，使用完整System Prompt ===")
                    system_prompt = self._build_system_prompt(hot_data, processed_comments)
                    user_message = smart_prompt if not user_prompt else f"{smart_prompt}\n\n用户补充要求：{user_prompt}"
            
            # 打印完整的最终Prompt（用于调试）
            logger.info("=" * 80)
            logger.info("📋 最终输入模型的完整Prompt:")
            logger.info("=" * 80)
            logger.info(f"【System Prompt】({len(system_prompt)} 字符):")
            logger.info(system_prompt)
            logger.info("-" * 80)
            logger.info(f"【User Message】({len(user_message)} 字符):")
            logger.info(user_message)
            logger.info("=" * 80)

            # 5. Generate content using parent class stream method
            prompt_template = ChatPromptTemplate.from_messages([
                ("system", system_prompt),
                ("user", user_message)
            ])
            
            # Select model
            logger.info(f"Available models: {list(self.models.keys())}")
            logger.info(f"Requesting model: {model_name}")
            
            llm = self.models.get(model_name)
            if not llm:
                # Model should have been created dynamically in the API route
                logger.error(f"Model {model_name} not found in chain models")
                raise ValueError(f"Model {model_name} not configured. Please ensure API credentials are provided.")
            
            logger.info(f"Using model: {llm.__class__.__name__}")
            
            # Add callbacks
            if callbacks:
                llm.callbacks = callbacks
            
            # Update model parameters
            for key, value in kwargs.items():
                if hasattr(llm, key):
                    setattr(llm, key, value)
            
            # Create chain using new RunnableSequence format (prompt | llm)
            try:
                chain = prompt_template | llm
                logger.info("Chain created successfully")
            except Exception as chain_error:
                logger.error(f"Failed to create chain: {chain_error}")
                raise
            
            # 流式输出响应
            try:
                import time
                logger.info("开始流式输出...")
                chunk_count = 0
                empty_chunk_count = 0
                max_consecutive_empty = 500  # DeepSeek-reasoner推理时可能需要很多空块，增加到500
                content_received = False  # 是否收到过有效内容
                start_time = time.time()

                # 根据模型和场景调整超时时间
                max_wait_time = 120  # 默认120秒（2分钟）
                if "deepseek" in model_name.lower() and "reasoner" in model_name.lower():
                    if is_refinement:
                        max_wait_time = 180  # 微调场景增加到3分钟
                        logger.info(f"⚠️ DeepSeek-reasoner微调场景，超时时间延长至{max_wait_time}秒")
                    else:
                        max_wait_time = 150  # 普通生成2.5分钟
                        logger.info(f"⚠️ DeepSeek-reasoner模型，超时时间延长至{max_wait_time}秒")

                async for chunk in chain.astream({}):
                    chunk_count += 1

                    # 每处理前10个chunk都记录详细信息用于调试
                    if chunk_count <= 10:
                        logger.debug(f"[调试] 数据块 {chunk_count} 类型: {type(chunk).__name__}")
                        if hasattr(chunk, 'content'):
                            logger.debug(f"[调试] 数据块 {chunk_count} .content: '{chunk.content}'")
                        if isinstance(chunk, dict):
                            logger.debug(f"[调试] 数据块 {chunk_count} dict keys: {list(chunk.keys())[:10]}")

                    # 处理不同的响应格式
                    content = None

                    # 1. 标准AIMessage格式（最常见）
                    if hasattr(chunk, 'content'):
                        content = chunk.content
                        if content:  # 只在有内容时记录详细日志
                            logger.debug(f"[数据块 {chunk_count}] 通过.content属性提取，长度: {len(content)}")

                    # 2. 字典格式带'text'键（某些提供商）
                    elif isinstance(chunk, dict):
                        if "text" in chunk:
                            content = chunk["text"]
                            if content:
                                logger.debug(f"[数据块 {chunk_count}] 通过dict['text']提取")
                        elif "content" in chunk:
                            content = chunk["content"]
                            if content:
                                logger.debug(f"[数据块 {chunk_count}] 通过dict['content']提取")
                        # DeepSeek-R1 Reasoner格式
                        elif "choices" in chunk and len(chunk.get("choices", [])) > 0:
                            delta = chunk["choices"][0].get("delta", {})
                            if "content" in delta:
                                content = delta["content"]
                                if content:
                                    logger.debug(f"[数据块 {chunk_count}] 通过OpenAI delta格式提取")
                            elif "reasoning_content" in delta:
                                # DeepSeek-R1特殊的reasoning内容，暂时跳过
                                logger.debug(f"[数据块 {chunk_count}] 跳过reasoning_content")
                                continue

                    # 3. 字符串格式（少见）
                    elif isinstance(chunk, str):
                        content = chunk
                        if content:
                            logger.debug(f"[数据块 {chunk_count}] 直接使用字符串")

                    # 4. 其他格式（记录但不提取，避免误判）
                    else:
                        # 不要尝试转换为字符串，因为对象的__str__可能返回非空值
                        # 即使实际内容为空
                        logger.debug(f"[数据块 {chunk_count}] 未识别的格式: {type(chunk).__name__}")
                        content = None

                    # 只yield非空内容
                    if content:
                        empty_chunk_count = 0  # 重置空块计数
                        content_received = True
                        yield content
                    else:
                        empty_chunk_count += 1

                        # 检查总超时时间
                        elapsed_time = time.time() - start_time
                        if elapsed_time > max_wait_time:
                            if content_received:
                                logger.info(f"已超过最大等待时间（{max_wait_time}秒），且已收到内容，结束流式输出")
                                break
                            else:
                                logger.error(f"已超过最大等待时间（{max_wait_time}秒）且未收到任何内容")
                                raise ValueError(f"模型在{max_wait_time}秒内未返回任何内容，请检查模型配置和API密钥")

                        # 每100个空块记录一次警告
                        if empty_chunk_count % 100 == 0:
                            logger.warning(f"已连续收到 {empty_chunk_count} 个空数据块（总块数: {chunk_count}，已用时: {elapsed_time:.1f}秒）")

                        # 如果连续空块太多
                        if empty_chunk_count > max_consecutive_empty:
                            if content_received:
                                logger.info(f"连续 {empty_chunk_count} 个空块，且已收到内容，判断为流结束")
                                break
                            else:
                                # DeepSeek-reasoner可能需要很长时间推理，只要没超时就继续等待
                                logger.warning(f"连续 {empty_chunk_count} 个空块但未收到任何内容，继续等待（已用时: {elapsed_time:.1f}秒）...")

                logger.info(f"流式输出完成。共处理 {chunk_count} 个数据块，其中有效块: {chunk_count - empty_chunk_count} 个")

                # 检查是否正常完成
                if not content_received and chunk_count > 0:
                    # 收到了数据块但没有任何有效内容
                    error_msg = f"⚠️ 模型未返回任何有效内容\n"
                    error_msg += f"📊 统计: 总块数{chunk_count}, 空块数{empty_chunk_count}, 用时{time.time()-start_time:.1f}秒\n"
                    error_msg += f"🤖 模型: {model_name}\n"
                    error_msg += f"💡 可能原因:\n"
                    error_msg += f"   1. Prompt过长或格式不符合模型要求\n"
                    error_msg += f"   2. API配置错误或密钥无效\n"
                    error_msg += f"   3. 模型不支持当前任务类型\n"
                    error_msg += f"   4. 建议: 尝试切换其他模型或简化prompt"
                    logger.error(error_msg)
                    raise ValueError(f"模型 {model_name} 未返回任何内容，请检查模型配置或尝试其他模型")

            except Exception as stream_error:
                logger.error(f"流式输出错误: {stream_error}")
                import traceback
                logger.error(f"错误堆栈: {traceback.format_exc()}")
                raise
                    
        except Exception as e:
            logger.error(f"Error during hot content generation: {e}")
            import traceback
            logger.error(f"Full traceback: {traceback.format_exc()}")
            raise
    
    def _build_system_prompt(self, hot_data: Dict[str, Any], processed_comments: Dict[str, Any]) -> str:
        """Build system prompt with hot topic context.
        
        Args:
            hot_data: Hot topic data
            processed_comments: Processed comments data
            
        Returns:
            System prompt string
        """
        # Format sentiment distribution safely
        sentiment_dist = processed_comments.get('sentiment_distribution', {})
        sentiment_str = f"正面：{sentiment_dist.get('positive', 0)}，负面：{sentiment_dist.get('negative', 0)}，中立：{sentiment_dist.get('neutral', 0)}"
        
        # Format key viewpoints and top comments
        key_viewpoints = processed_comments.get('key_viewpoints', [])
        viewpoints_str = '、'.join(key_viewpoints[:3]) if key_viewpoints else '暂无'
        
        top_comments = processed_comments.get('top_comments', [])
        comments_str = '；'.join([c.get('content', '')[:50] for c in top_comments[:3]]) if top_comments else '暂无'
        
        system_prompt = f"""你是一个专业的内容创作助手，专门基于热点话题进行创作。

当前热点信息：
- 标题：{hot_data.get('title', '')}
- 平台：{hot_data.get('platform', '')}
- 热度：{hot_data.get('heat_score', 0)}
- 摘要：{hot_data.get('excerpt', '')}
- 发布时间：{hot_data.get('publish_time', '')}

评论分析：
- 总评论数：{processed_comments.get('total_count', 0)}
- 情感分布：{sentiment_str}
- 关键观点：{viewpoints_str}
- 热门评论：{comments_str}

创作要求：
1. 基于提供的热点信息和评论分析进行创作
2. 保持客观性，避免偏见和不实信息
3. 结合多角度观点，提供深度分析
4. 语言流畅，结构清晰
5. 适当引用相关评论作为佐证

请根据用户的具体要求进行创作。"""
        
        return system_prompt

    def _detect_hot_data_in_user_prompt(self, user_prompt: Optional[str], hot_data: Dict[str, Any]) -> bool:
        """检测user_prompt是否已经包含热点数据.

        Args:
            user_prompt: 用户输入的prompt
            hot_data: 热点数据

        Returns:
            True if user_prompt contains hot data, False otherwise
        """
        if not user_prompt:
            return False

        # 检测关键标识词
        hot_data_indicators = [
            "热点标题：",
            "平台来源：",
            "热度排名：",
            "话题摘要：",
            "创作要求：",
            "请基于以下热点话题"
        ]

        # 如果包含任意一个标识词，认为已包含热点数据
        for indicator in hot_data_indicators:
            if indicator in user_prompt:
                logger.info(f"检测到User Input包含热点数据标识: {indicator}")
                return True

        # 额外检查：如果user_prompt包含热点标题，也认为已包含数据
        hot_title = hot_data.get('title', '')
        if hot_title and hot_title in user_prompt:
            logger.info(f"检测到User Input包含热点标题: {hot_title[:50]}")
            return True

        return False

    def _build_minimal_system_prompt(self) -> str:
        """构建简化的System Prompt（当User Input已包含热点数据时使用）.

        Returns:
            Minimal system prompt string
        """
        return """你是一个专业的内容创作助手，专门基于热点话题进行创作。

基本约束：
1. 保持客观性，避免偏见和不实信息
2. 确保内容的准确性和时效性
3. 遵循新闻伦理和社会责任
4. 语言流畅，结构清晰

请根据用户提供的具体热点信息和创作要求进行创作。"""

    def _enhance_user_prompt_with_missing_data(self, user_prompt: str, hot_data: Dict[str, Any]) -> str:
        """补充User Prompt中缺失的热点数据.

        Args:
            user_prompt: 用户输入的prompt
            hot_data: 热点数据

        Returns:
            Enhanced user prompt with missing data filled
        """
        if not user_prompt:
            return user_prompt

        enhanced = user_prompt

        # 检查并补充话题摘要（如果为空）
        # 使用正则匹配 "话题摘要：" 后面跟着空白字符或直接换行的情况
        excerpt = hot_data.get('excerpt', '')
        if excerpt and re.search(r'话题摘要：\s*\n', enhanced):
            # 替换空的摘要
            enhanced = re.sub(
                r'话题摘要：\s*\n',
                f'话题摘要：{excerpt}\n',
                enhanced
            )
            logger.info(f"补充了话题摘要: {excerpt[:50]}...")

        # 检查并补充发布时间（如果缺失）
        if "发布时间：" not in enhanced:
            publish_time = hot_data.get('publish_time', '')
            if publish_time:
                # 在热度排名后插入发布时间
                if "热度排名：" in enhanced:
                    enhanced = enhanced.replace(
                        "热度排名：",
                        f"发布时间：{publish_time}\n热度排名："
                    )
                    logger.info(f"补充了发布时间: {publish_time}")

        # 检查并补充热度值（如果缺失）
        if "热度值：" not in enhanced and "热度排名：" in enhanced:
            heat_score = hot_data.get('heat_score', 0)
            if heat_score > 0:
                # 在热度排名后插入热度值
                enhanced = re.sub(
                    r'(热度排名：#\d+)',
                    f'\\1\n热度值：{heat_score}',
                    enhanced
                )
                logger.info(f"补充了热度值: {heat_score}")

        return enhanced

    def _build_comment_analysis(self, processed_comments: Dict[str, Any]) -> str:
        """构建评论分析内容（用于补充到User Message）.

        Args:
            processed_comments: 处理后的评论数据

        Returns:
            Comment analysis string, or empty string if no comments
        """
        if not processed_comments or processed_comments.get('total_count', 0) == 0:
            return ""

        # Format sentiment distribution
        sentiment_dist = processed_comments.get('sentiment_distribution', {})
        sentiment_str = f"正面：{sentiment_dist.get('positive', 0)}，负面：{sentiment_dist.get('negative', 0)}，中立：{sentiment_dist.get('neutral', 0)}"

        # Format key viewpoints
        key_viewpoints = processed_comments.get('key_viewpoints', [])
        viewpoints_str = '、'.join(key_viewpoints[:5]) if key_viewpoints else '暂无'

        # Format top comments
        top_comments = processed_comments.get('top_comments', [])
        comments_list = []
        for i, comment in enumerate(top_comments[:3], 1):
            content = comment.get('content', '')[:80]  # Limit length
            comments_list.append(f"{i}. {content}")
        comments_str = '\n'.join(comments_list) if comments_list else '暂无'

        analysis = f"""评论分析（供参考）：
- 总评论数：{processed_comments.get('total_count', 0)}
- 情感分布：{sentiment_str}
- 关键观点：{viewpoints_str}
- 热门评论：
{comments_str}"""

        return analysis

    def _get_fallback_model(self):
        """Get fallback model when requested model is not available."""
        if not self.models:
            raise ValueError("No models available")
        
        # Try default model first
        if hasattr(self, 'settings') and hasattr(self.settings, 'default_model'):
            fallback = self.models.get(self.settings.default_model)
            if fallback:
                return fallback
        
        # Return first available model
        return list(self.models.values())[0]
    
    async def analyze_hot_topic_trends(
        self,
        hot_item_id: str,
        platform: str,
        model_name: str = "gpt-3.5-turbo"
    ) -> Dict[str, Any]:
        """Analyze hot topic trends and provide insights.
        
        Args:
            hot_item_id: ID of the hot topic item
            platform: Platform where the hot topic is from
            model_name: Name of the model to use
            
        Returns:
            Analysis results dictionary
        """
        try:
            # Load hot topic data with trends
            hot_data = await self.hot_data_loader.load_hot_item_with_trends(
                hot_item_id, platform
            )
            
            # Generate trend analysis prompt
            analysis_prompt = f"""分析以下热点话题的趋势变化：

话题：{hot_data.get('title', '')}
平台：{hot_data.get('platform', '')}
当前热度：{hot_data.get('heat_score', 0)}

趋势数据：
{hot_data.get('trends', [])}

请从以下角度进行分析：
1. 热度变化趋势
2. 讨论焦点转移
3. 公众情绪变化
4. 预测后续发展
5. 创作机会点

请提供结构化的分析结果。"""

            # Select model and generate analysis
            llm = self.models.get(model_name) or self._get_fallback_model()
            
            result = await llm.agenerate([analysis_prompt])
            
            return {
                "analysis": result.generations[0][0].text,
                "hot_data": hot_data,
                "timestamp": hot_data.get("updated_at")
            }
            
        except Exception as e:
            logger.error(f"Error during trend analysis: {e}")
            raise
    
    def get_supported_writing_angles(self) -> List[str]:
        """Get list of supported writing angles.
        
        Returns:
            List of writing angle names
        """
        return self.prompt_generator.get_supported_angles()
    
    def get_hot_topic_types(self) -> List[str]:
        """Get list of supported hot topic types.
        
        Returns:
            List of hot topic type names
        """
        return [topic_type.value for topic_type in HotTopicType]
