import json
import requests
import os
from datetime import datetime, timedelta, date as _date
from dotenv import load_dotenv
import logging
import ast
from sqlalchemy import text  # 添加这一行
from time import sleep

# 加载 .env 文件
load_dotenv()

# DeepSeek API 配置
DEEPSEEK_API_KEY = os.getenv("DEEPSEEK_API_KEY")
DEEPSEEK_API_URL = "https://api.deepseek.com/v1/chat/completions"

# 分析提示词模板（要求使用中文回复）——已转义字面量花括号，避免 str.format KeyError
PROMPT_TEMPLATE = """
请对以下评论数据进行情感分析，输出结构化分析结果。

评论内容格式：
{{
  "original_text": "原始评论",
  "translated_text": "翻译后的内容",
  "primary_label": "标签"
}}

请分析以下内容，并以标准 JSON 格式返回结果，不要包含其他内容。请使用中文回复。

返回格式必须为：
{{
  "sentiment_intensity": "情感强度分析结果",
  "sentiment_by_topic": "主题/方面情感分析结果",
  "unmet_needs": "未满足需求与痛点聚类",
  "usage_scenarios": "使用场景分析",
  "purchase_drivers": "购买驱动因素分析",
  "product_opportunities": "产品机会",
  "strategy_recommendations": "策略建议"
}}

评论数据:
{comments_data}
"""

def setup_logger():
    """设置日志记录器"""
    logger = logging.getLogger("sentiment_analysis")
    if not logger.handlers:
        handler = logging.StreamHandler()
        formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
        handler.setFormatter(formatter)
        logger.addHandler(handler)
    logger.setLevel(logging.INFO)
    return logger

logger = setup_logger()

class SentimentAnalyzer:
    def __init__(self, db_manager):
        self.db_manager = db_manager
        self.api_key = os.getenv("DEEPSEEK_API_KEY")
        if not self.api_key:
            raise ValueError("❌ 未配置 DEEPSEEK_API_KEY")
        # 从环境变量读取配置参数
        self.max_retries = int(os.getenv("SENTIMENT_API_MAX_RETRIES", "3"))
        self.retry_delay = int(os.getenv("SENTIMENT_API_RETRY_DELAY", "5"))
        self.api_timeout = int(os.getenv("SENTIMENT_API_TIMEOUT", "120"))

    def normalize_time_range(self, start_time, end_time, fallback_days=30):
        """
        规范化时间区间：
        - 若 start_time 和 end_time 都存在，且 start_time > end_time，则交换
        - 若一端缺失，则以另一端为基准，按 fallback_days 天补齐
        - 若两端都缺失，则以今天为 end_time，start_time = end_time - fallback_days
        - 如果两端都为None，返回None, None表示不限时间范围
        返回: (norm_start, norm_end, changed: bool)
        """
        from datetime import datetime as _dt

        def to_dt(x):
            if x is None:
                return None
            if isinstance(x, _dt):
                return x
            if isinstance(x, _date):
                return _dt(x.year, x.month, x.day)
            try:
                return _dt.fromisoformat(str(x).split('.')[0])
            except Exception:
                return None

        # 特殊情况：如果开始和结束时间都为None，表示不限时间范围
        if start_time is None and end_time is None:
            return None, None, False

        now = _dt.now()
        st = to_dt(start_time)
        et = to_dt(end_time)

        changed = False
        if st and et:
            if st > et:
                st, et = et, st
                changed = True
        elif st and not et:
            et = st + timedelta(days=fallback_days)
            changed = True
        elif et and not st:
            st = et - timedelta(days=fallback_days)
            changed = True
        else:
            et = now
            st = et - timedelta(days=fallback_days)
            changed = True

        # 如果规范化后的时间仍然存在，进行日期处理
        if st and et:
            st = _dt(st.year, st.month, st.day)
            et = _dt(et.year, et.month, et.day)
        return st, et, changed

    def fetch_pending_analysis_record(self):
        """
        从 d_data_analysis 表查询一条 status 为 0 的记录
        """
        session = self.db_manager.Session()
        try:
            # 首先检查表中是否有任何记录
            count_query = "SELECT COUNT(*) FROM d_data_analysis"
            count_result = session.execute(text(count_query)).fetchone()
            total_records = count_result[0] if count_result else 0
            logger.info(f"📊 d_data_analysis 表中总记录数: {total_records}")

            # 检查状态为0的记录数量
            pending_count_query = "SELECT COUNT(*) FROM d_data_analysis WHERE status = 0"
            pending_count_result = session.execute(text(pending_count_query)).fetchone()
            pending_count = pending_count_result[0] if pending_count_result else 0
            logger.info(f"📊 状态为0的待处理记录数: {pending_count}")

            # 查询第一条 status 为 0 的记录
            query = """
                SELECT id, tags, start_time, end_time, c_a_types
                FROM d_data_analysis
                WHERE status = 0
                ORDER BY id ASC
                LIMIT 1
            """
            
            result = session.execute(text(query)).fetchall()

            if not result:
                logger.info("ℹ️ 未找到 status 为 0 的待处理记录")
                return None

            record = result[0]
            logger.info(f"取得以及待处理记录: ID={record[0]}, tags={record[1]}, start_time={record[2]}, end_time={record[3]}, c_a_types={record[4]}")

            # 解析 tags / c_a_types
            tags = []
            c_a_types = []

            # tags 允许为空
            if record[1]:
                try:
                    tags = json.loads(record[1])
                    logger.info(f"tags字段JSON解析成功: {tags}")
                except (json.JSONDecodeError, TypeError):
                    try:
                        tags = ast.literal_eval(record[1])
                        logger.info(f"tags字段字面量解析成功: {tags}")
                    except (ValueError, SyntaxError):
                        if isinstance(record[1], str):
                            tags = [int(tag.strip()) for tag in record[1].split(',') if tag.strip().isdigit()]
                            logger.info(f"tags字段字符串解析成功: {tags}")
                        else:
                            tags = []
                            logger.info("tags字段无法解析，设置为空列表")
            else:
                logger.info("tags字段为空（将启用仅按时间范围兜底）")

            if record[4]:
                try:
                    c_a_types = json.loads(record[4])
                    logger.info(f"c_a_types字段JSON解析成功: {c_a_types}")
                except (json.JSONDecodeError, TypeError):
                    try:
                        c_a_types = ast.literal_eval(record[4])
                        logger.info(f"c_a_types字段字面量解析成功: {c_a_types}")
                    except (ValueError, SyntaxError):
                        if isinstance(record[4], str):
                            c_a_types = [int(v.strip()) for v in record[4].split(',') if v.strip().isdigit()]
                            logger.info(f"c_a_types字段字符串解析成功: {c_a_types}")
                        else:
                            c_a_types = []
                            logger.info("c_a_types字段无法解析，设置为空列表")
            else:
                logger.info("c_a_types字段为空")

            data_analysis_record = {
                "id": record[0],
                "tags": tags,
                "start_time": record[2],
                "end_time": record[3],
                "c_a_types": c_a_types
            }

            logger.info(f"解析后记录数据: ID={record[0]}, tags={tags}, start_time={record[2]}, end_time={record[3]}, c_a_types={c_a_types}")
            return data_analysis_record
        except Exception as e:
            logger.error(f"❌ 查询待处理记录时发生错误: {str(e)}", exc_info=True)
            return None
        finally:
            session.close()

    def fetch_data_analysis_records(self, record_index=5):
        """
        从 d_data_analysis 表查询指定记录
        """
        session = self.db_manager.Session()
        try:
            offset = max(0, record_index - 1)

            # 直接取第 N 条（允许 tags 为空）
            query = f"""
                SELECT id, tags, start_time, end_time, c_a_types
                FROM d_data_analysis
                ORDER BY id DESC
                LIMIT 1 OFFSET {offset}
            """
            
            result = session.execute(text(query)).fetchall()  # 使用text()包装

            # 回退：记录不足 N 条时，取最新一条
            if not result:
                logger.warning(f"未找到第{record_index}条记录（可能记录数不足），回退到最新一条记录")
                fallback_query = """
                    SELECT id, tags, start_time, end_time, c_a_types
                    FROM d_data_analysis
                    ORDER BY id DESC
                    LIMIT 1 OFFSET 0
                """
                result = session.execute(text(fallback_query)).fetchall()  # 使用text()包装

            if not result:
                # 表为空
                logger.warning("⚠️ 未找到 d_data_analysis 表记录（表可能为空）")
                return []

            data_analysis_records = []
            for record in result:
                logger.info(f"目标第{record_index}条 -> 实际取记录: ID={record[0]}, tags={record[1]}, start_time={record[2]}, end_time={record[3]}, c_a_types={record[4]}")

                # 解析 tags / c_a_types
                tags = []
                c_a_types = []

                # tags 允许为空
                if record[1]:
                    try:
                        tags = json.loads(record[1])
                        logger.info(f"tags字段JSON解析成功: {tags}")
                    except (json.JSONDecodeError, TypeError):
                        try:
                            tags = ast.literal_eval(record[1])
                            logger.info(f"tags字段字面量解析成功: {tags}")
                        except (ValueError, SyntaxError):
                            if isinstance(record[1], str):
                                tags = [int(tag.strip()) for tag in record[1].split(',') if tag.strip().isdigit()]
                                logger.info(f"tags字段字符串解析成功: {tags}")
                            else:
                                tags = []
                                logger.info("tags字段无法解析，设置为空列表")
                else:
                    logger.info("tags字段为空（将启用仅按时间范围兜底）")

                if record[4]:
                    try:
                        c_a_types = json.loads(record[4])
                        logger.info(f"c_a_types字段JSON解析成功: {c_a_types}")
                    except (json.JSONDecodeError, TypeError):
                        try:
                            c_a_types = ast.literal_eval(record[4])
                            logger.info(f"c_a_types字段字面量解析成功: {c_a_types}")
                        except (ValueError, SyntaxError):
                            if isinstance(record[4], str):
                                c_a_types = [int(v.strip()) for v in record[4].split(',') if v.strip().isdigit()]
                                logger.info(f"c_a_types字段字符串解析成功: {c_a_types}")
                            else:
                                c_a_types = []
                                logger.info("c_a_types字段无法解析，设置为空列表")
                else:
                    logger.info("c_a_types字段为空")

                data_analysis_records.append({
                    "id": record[0],
                    "tags": tags,
                    "start_time": record[2],
                    "end_time": record[3],
                    "c_a_types": c_a_types
                })

                logger.info(f"解析后记录数据: ID={record[0]}, tags={tags}, start_time={record[2]}, end_time={record[3]}, c_a_types={c_a_types}")

            return data_analysis_records
        finally:
            session.close()

    def get_comments_by_tags_and_date(self, tags, start_time, end_time, max_comments=200):
        """
        根据标签和日期范围获取评论；若标签为空则按日期范围兜底查询
        时间参数为可选，如果都为空则获取最近30天的数据
        """
        # 先规范化时间区间
        norm_start, norm_end, changed = self.normalize_time_range(start_time, end_time, fallback_days=30)
        if changed:
            logger.warning(f"时间区间已自动纠正: ({start_time} ~ {end_time}) -> ({norm_start} ~ {norm_end})")
        else:
            logger.info(f"时间区间正常: ({norm_start} ~ {norm_end})")

        logger.info(f"查询条件: tags={tags}, start_time={norm_start}, end_time={norm_end}")

        session = self.db_manager.Session()
        try:
            # 构建视频源查询条件
            video_source_params = {}
            if norm_start and norm_end:
                video_source_query = """
                    SELECT id
                    FROM d_video_sources
                    WHERE DATE(publish_date) >= DATE(:start_time) AND DATE(publish_date) <= DATE(:end_time)
                """
                video_source_params = {"start_time": norm_start, "end_time": norm_end}
            else:
                # 如果时间为空，获取所有视频源
                video_source_query = "SELECT id FROM d_video_sources"
                logger.info("时间参数为空，将查询所有视频源")

            logger.info(f"视频源查询SQL: {video_source_query}")
            result = session.execute(text(video_source_query), video_source_params)
            video_source_ids = [row[0] for row in result.fetchall()]
            logger.info(f"找到 {len(video_source_ids)} 个符合条件的视频源ID: {video_source_ids[:10]}{'...' if len(video_source_ids) > 10 else ''}")

            if not video_source_ids:
                logger.warning("未找到符合条件的视频源")
                return []

            if tags:
                # 有标签 → 先取符合标签的 comment_id
                tag_placeholders = ','.join([':tag_' + str(i) for i in range(len(tags))])
                tag_query = f"""
                    SELECT DISTINCT comment_id
                    FROM d_comment_tag
                    WHERE tag_id IN ({tag_placeholders})
                """
                tag_params = {f"tag_{i}": tag for i, tag in enumerate(tags)}
                logger.info(f"标签查询SQL: {tag_query}")
                result = session.execute(text(tag_query), tag_params)
                comment_ids = [row[0] for row in result.fetchall()]
                logger.info(f"找到 {len(comment_ids)} 个符合条件的 comment_id: {comment_ids[:10]}{'...' if len(comment_ids) > 10 else ''}")

                if not comment_ids:
                    logger.warning("未找到符合条件的 comment_id")
                    return []

                # 构建查询参数
                comment_params = {f"comment_{i}": comment for i, comment in enumerate(comment_ids)}
                video_params = {f"video_{i}": video for i, video in enumerate(video_source_ids)}
                all_params = {**comment_params, **video_params}
                
                comment_placeholders = ','.join([f":comment_{i}" for i in range(len(comment_ids))])
                video_placeholders = ','.join([f":video_{i}" for i in range(len(video_source_ids))])
                
                comments_query = f"""
                    SELECT original_text, translated_text, primary_label
                    FROM d_analyzed_comment
                    WHERE id IN ({comment_placeholders})
                      AND video_source_id IN ({video_placeholders})
                    LIMIT {int(max_comments)}
                """
            else:
                # 无标签 → 仅按时间范围兜底取评论
                logger.info("标签列表为空，启用仅按时间范围的兜底查询")
                video_placeholders = ','.join([f":video_{i}" for i in range(len(video_source_ids))])
                video_params = {f"video_{i}": video for i, video in enumerate(video_source_ids)}
                all_params = video_params
                
                comments_query = f"""
                    SELECT original_text, translated_text, primary_label
                    FROM d_analyzed_comment
                    WHERE video_source_id IN ({video_placeholders})
                    ORDER BY id DESC
                    LIMIT {int(max_comments)}
                """

            logger.info(f"评论查询SQL: {comments_query}")
            result = session.execute(text(comments_query), all_params)
            rows = result.fetchall()

            result_list = [
                {
                    "original_text": r[0],
                    "translated_text": r[1],
                    "primary_label": r[2]
                }
                for r in rows
            ]
            logger.info(f"找到 {len(result_list)} 条符合条件的评论")
            return result_list
        finally:
            session.close()

    def get_analysis_type_descriptions(self, c_a_types):
        """根据 c_a_types 获取分析类型描述"""
        if not c_a_types:
            return []
            
        session = self.db_manager.Session()
        try:
            placeholders = ','.join([f":type_{i}" for i in range(len(c_a_types))])
            params = {f"type_{i}": t for i, t in enumerate(c_a_types)}
            query = f"SELECT topic FROM d_comment_analysis_type WHERE id IN ({placeholders})"
            logger.info(f"分析类型查询SQL: {query}")
            result = session.execute(text(query), params)  # 使用text()包装
            result_list = [row[0] for row in result.fetchall()]
            logger.info(f"分析类型描述查询结果: {result_list}")
            return result_list
        finally:
            session.close()

    def call_deepseek_api(self, comments):
        """调用 DeepSeek API 进行情感分析"""
        if not comments:
            logger.warning("没有评论数据用于分析")
            return None

        # 限制评论数量以防止超过token限制
        limited_comments = comments[:50]  # 最多处理50条评论
        logger.info(f"限制评论数量至 {len(limited_comments)} 条")

        comments_str = json.dumps(limited_comments, ensure_ascii=False, indent=2)
        prompt_content = PROMPT_TEMPLATE.format(comments_data=comments_str)

        prompt_length = len(prompt_content)
        logger.info(f"提示内容长度: {prompt_length} 字符")

        if prompt_length > 50000:
            logger.warning("提示内容过长，进一步减少评论数量")
            reduced_comments = limited_comments[:20]
            comments_str = json.dumps(reduced_comments, ensure_ascii=False, indent=2)
            prompt_content = PROMPT_TEMPLATE.format(comments_data=comments_str)
            logger.info(f"减少后评论数量: {len(reduced_comments)} 条")

        payload = {
            "model": "deepseek-chat",
            "messages": [
                {"role": "system", "content": "你是一个专业的评论情感分析专家，请严格遵循用户指令输出JSON格式的分析结果，并使用中文回复"},
                {"role": "user", "content": prompt_content}
            ],
            "temperature": 0.3,
            "response_format": {"type": "json_object"},
            "max_tokens": 4096
        }

        headers = {
            "Authorization": f"Bearer {self.api_key}",
            "Content-Type": "application/json"
        }

        logger.info("🧠 正在调用 DeepSeek API 进行情感分析...")

        # 添加重试机制
        for attempt in range(self.max_retries):
            try:
                response = requests.post(
                    DEEPSEEK_API_URL,
                    json=payload,
                    headers=headers,
                    timeout=self.api_timeout
                )

                logger.info(f"📡 API 返回状态码: {response.status_code}")

                if response.status_code != 200:
                    logger.error(f"❌ API 返回错误: {response.text[:500]}")
                    if attempt < self.max_retries - 1:
                        logger.info(f"⏳ 等待 {self.retry_delay} 秒后重试...")
                        sleep(self.retry_delay)
                        continue
                    return None

                api_response = response.json()
                choices = api_response.get("choices", [])

                if not choices:
                    logger.error("❌ API 返回无有效内容")
                    if attempt < self.max_retries - 1:
                        logger.info(f"⏳ 等待 {self.retry_delay} 秒后重试...")
                        sleep(self.retry_delay)
                        continue
                    return None

                message = choices[0].get("message", {})
                content = message.get("content", "")

                try:
                    analysis_result = json.loads(content)
                    required_fields = [
                        "sentiment_intensity", "sentiment_by_topic",
                        "unmet_needs", "usage_scenarios",
                        "purchase_drivers", "product_opportunities",
                        "strategy_recommendations"
                    ]
                    for field in required_fields:
                        if field not in analysis_result:
                            logger.warning(f"⚠️ 缺少字段: {field}")
                            analysis_result[field] = "未提供"
                    return analysis_result
                except json.JSONDecodeError as e:
                    logger.error(f"❌ 解析API响应失败: {str(e)}")
                    logger.debug(f"响应内容: {content}")
                    if attempt < self.max_retries - 1:
                        logger.info(f"⏳ 等待 {self.retry_delay} 秒后重试...")
                        sleep(self.retry_delay)
                        continue
                    return None

            except requests.exceptions.RequestException as e:
                logger.error(f"❌ API 请求失败: {str(e)}")
                if attempt < self.max_retries - 1:
                    logger.info(f"⏳ 等待 {self.retry_delay} 秒后重试...")
                    sleep(self.retry_delay)
                    continue
                return None
        return None

    def count_labels(self, comments):
        """统计各标签出现次数"""
        label_counts = {}
        for comment in comments:
            label = comment.get("primary_label")
            if label:
                label_counts[label] = label_counts.get(label, 0) + 1
        return label_counts

    def _format_sentiment_by_topic(self, sentiment_by_topic):
        """格式化主题/方面情感分析结果为用户友好的HTML格式"""
        if not sentiment_by_topic:
            return "无数据"
        
        try:
            # 尝试解析JSON格式的数据
            data = json.loads(sentiment_by_topic) if isinstance(sentiment_by_topic, str) else sentiment_by_topic
            
            if isinstance(data, dict):
                # 如果是字典格式，转换为HTML列表
                html_parts = []
                for topic, sentiment in data.items():
                    html_parts.append(f"<li><strong>{topic}:</strong> {sentiment}</li>")
                return f"<ul>{''.join(html_parts)}</ul>"
            elif isinstance(data, list):
                # 如果是列表格式，转换为HTML列表
                html_parts = []
                for item in data:
                    if isinstance(item, dict):
                        # 列表中包含字典的情况
                        for topic, sentiment in item.items():
                            html_parts.append(f"<li><strong>{topic}:</strong> {sentiment}</li>")
                    else:
                        # 列表中包含简单值的情况
                        html_parts.append(f"<li>{item}</li>")
                return f"<ul>{''.join(html_parts)}</ul>"
            else:
                # 其他情况直接返回原始内容
                return str(sentiment_by_topic)
        except json.JSONDecodeError:
            # 如果不是有效的JSON，直接返回原始内容
            return sentiment_by_topic

    def generate_rich_text_analysis(self, analysis_result, label_counts, type_descriptions):
        """生成富文本分析结果（用于 Web API 返回）"""
        if not analysis_result:
            logger.warning("⚠️ 分析结果为空")
            return {
                "status": "failed",
                "error": "分析结果为空",
                "timestamp": datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
                "label_counts": label_counts
            }

        sections = []
        sections.append("<h1>情感分析报告</h1>")
        sections.append(f"<p><strong>生成时间:</strong> {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}</p>")

        if analysis_result.get("sentiment_intensity") or analysis_result.get("sentiment_by_topic"):
            sections.append("<h2>一、情感与主题</h2>")
            if analysis_result.get("sentiment_intensity"):
                sections.append(f"<p><strong>情感强度分析:</strong> {analysis_result['sentiment_intensity']}</p>")
            if analysis_result.get("sentiment_by_topic"):
                formatted_topic_sentiment = self._format_sentiment_by_topic(analysis_result['sentiment_by_topic'])
                sections.append(f"<p><strong>主题/方面情感分析:</strong> {formatted_topic_sentiment}</p>")

        if any(analysis_result.get(k) for k in ["unmet_needs", "usage_scenarios", "purchase_drivers"]):
            sections.append("<h2>二、需求与痛点</h2>")
            if analysis_result.get("unmet_needs"):
                sections.append(f"<p><strong>未满足需求与痛点聚类:</strong> {analysis_result['unmet_needs']}</p>")
            if analysis_result.get("usage_scenarios"):
                sections.append(f"<p><strong>使用场景分析:</strong> {analysis_result['usage_scenarios']}</p>")
            if analysis_result.get("purchase_drivers"):
                sections.append(f"<p><strong>购买驱动因素分析:</strong> {analysis_result['purchase_drivers']}</p>")

        if analysis_result.get("product_opportunities") or analysis_result.get("strategy_recommendations"):
            sections.append("<h2>三、产品机会与策略建议</h2>")
            if analysis_result.get("product_opportunities"):
                sections.append(f"<p><strong>产品机会:</strong> {analysis_result['product_opportunities']}</p>")
            if analysis_result.get("strategy_recommendations"):
                sections.append(f"<p><strong>策略建议:</strong> {analysis_result['strategy_recommendations']}</p>")

        sections.append("<h2>四、标签维度统计</h2>")
        label_stats = "".join([f"<li>{label}: {count} 条</li>" for label, count in label_counts.items()])
        sections.append(f"<ul>{label_stats}</ul>")

        sections.append("<h2>五、分析类型描述</h2>")
        type_desc_stats = "".join([f"<li>{desc}</li>" for desc in type_descriptions])
        sections.append(f"<ul>{type_desc_stats}</ul>")

        html_report = "\n".join(sections)

        return {
            "status": "success",
            "timestamp": datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
            "analysis": analysis_result,
            "label_counts": label_counts,
            "type_descriptions": type_descriptions,
            "html": f"<div style='font-family: Arial; line-height: 1.6;'>{html_report}</div>"
        }

    def save_html_result_to_db(self, record_id, html_content):
        """将HTML分析结果保存到d_data_analysis表的prediction_result字段中"""
        session = self.db_manager.Session()
        try:
            # 更新d_data_analysis表中对应记录的prediction_result字段
            update_query = """
                UPDATE d_data_analysis 
                SET prediction_result = :html_content,
                    status = 1
                WHERE id = :record_id
            """
            session.execute(text(update_query), {
                "html_content": html_content,
                "record_id": record_id
            })
            session.commit()
            logger.info(f"✅ HTML分析结果已保存到d_data_analysis表中ID为{record_id}的记录的prediction_result字段，状态已更新为完成")
        except Exception as e:
            logger.error(f"❌ 保存HTML分析结果到数据库失败: {str(e)}")
            session.rollback()
        finally:
            session.close()

    def run_sentiment_analysis_for_record(self, record):
        """为单条记录执行情感分析"""
        logger.info(f"🔄 处理记录 ID: {record['id']}")
        norm_start, norm_end, changed = self.normalize_time_range(record['start_time'], record['end_time'], fallback_days=30)
        if changed:
            logger.warning(f"记录 {record['id']} 的时间区间已纠正: ({record['start_time']} ~ {record['end_time']}) -> ({norm_start} ~ {norm_end})")

        comments = self.get_comments_by_tags_and_date(
            record['tags'],
            norm_start,
            norm_end,
            max_comments=int(os.getenv("MAX_COMMENTS", "150"))
        )

        if not comments:
            logger.warning(f"⚠️ 记录 ID {record['id']} 未找到匹配的评论")
            return False

        logger.info(f"✅ 记录 ID {record['id']} 找到 {len(comments)} 条评论")

        type_descriptions = self.get_analysis_type_descriptions(record['c_a_types'])
        logger.info(f"📋 分析类型描述: {type_descriptions}")

        label_counts = self.count_labels(comments)
        analysis_result = self.call_deepseek_api(comments)

        rich_text_result = self.generate_rich_text_analysis(analysis_result, label_counts, type_descriptions)

        if rich_text_result["status"] == "success":
            logger.info(f"✅ 记录 ID {record['id']} 分析完成")
            # 保存HTML结果到d_data_analysis表的prediction_result字段，并更新状态为1
            html_content = rich_text_result.get("html", "")
            if html_content:
                self.save_html_result_to_db(record['id'], html_content)
                return True
            else:
                logger.warning(f"⚠️ 记录 ID {record['id']} 没有HTML内容可保存")
                return False
        else:
            logger.error(f"❌ 记录 ID {record['id']} 分析失败")
            return False

    def run_pending_analysis_task(self):
        """执行定时任务：获取一条待处理记录并执行情感分析"""
        logger.info("⏰ 开始执行定时情感分析任务")
        
        # 获取一条待处理记录（status=0）
        record = self.fetch_pending_analysis_record()
        
        if not record:
            logger.info("📭 没有待处理的记录")
            return False
        
        # 执行情感分析
        success = self.run_sentiment_analysis_for_record(record)
        return success

    def run_sentiment_analysis(self, record_index=5):
        """主函数"""
        logger.info("📚 开始执行情感数据分析任务...")

        # 1) 获取 d_data_analysis 表指定记录
        logger.info(f"🔍 正在从 d_data_analysis 表查询记录（第{record_index}条）...")
        data_analysis_records = self.fetch_data_analysis_records(record_index)

        if not data_analysis_records:
            logger.warning("⚠️ 未找到 d_data_analysis 表记录。tags 可以为空；若为空会自动按时间范围查询。")
            return

        logger.info(f"✅ 找到 {len(data_analysis_records)} 条分析记录")

        results = []
        # 2) 遍历进行分析
        for record in data_analysis_records:
            logger.info(f"🔄 处理记录 ID: {record['id']}")
            norm_start, norm_end, changed = self.normalize_time_range(record['start_time'], record['end_time'], fallback_days=30)
            if changed:
                logger.warning(f"记录 {record['id']} 的时间区间已纠正: ({record['start_time']} ~ {record['end_time']}) -> ({norm_start} ~ {norm_end})")

            comments = self.get_comments_by_tags_and_date(
                record['tags'],
                norm_start,
                norm_end,
                max_comments=int(os.getenv("MAX_COMMENTS", "150"))
            )

            if not comments:
                logger.warning(f"⚠️ 记录 ID {record['id']} 未找到匹配的评论")
                continue

            logger.info(f"✅ 记录 ID {record['id']} 找到 {len(comments)} 条评论")

            type_descriptions = self.get_analysis_type_descriptions(record['c_a_types'])
            logger.info(f"📋 分析类型描述: {type_descriptions}")

            label_counts = self.count_labels(comments)
            analysis_result = self.call_deepseek_api(comments)

            rich_text_result = self.generate_rich_text_analysis(analysis_result, label_counts, type_descriptions)

            if rich_text_result["status"] == "success":
                logger.info(f"✅ 记录 ID {record['id']} 分析完成")
                logger.info("📊 分析结果:")
                print(json.dumps(rich_text_result, ensure_ascii=False, indent=2))
                results.append({
                    "record_id": record['id'],
                    "result": rich_text_result
                })
                
                # 保存HTML结果到d_data_analysis表的prediction_result字段，并更新状态为1
                html_content = rich_text_result.get("html", "")
                if html_content:
                    self.save_html_result_to_db(record['id'], html_content)
                else:
                    logger.warning(f"⚠️ 记录 ID {record['id']} 没有HTML内容可保存")
            else:
                logger.error(f"❌ 记录 ID {record['id']} 分析失败")

        logger.info("🎉 所有记录处理完成")
        return results