import sqlite3
import os
import requests # 保留requests导入，其他地方还在使用
import httpx # 新增httpx导入，用于解决编码问题
import urllib.request # 新增导入
import urllib.parse # 新增导入
import re # 新增导入，用于清理文件名
import pdfplumber
import json # 用于处理LLM的输入输出（如果需要）
import time # 用于模拟LLM调用延迟
import random # 用于模拟不同的LLM返回结果
from datetime import datetime # 用于生成时间戳 # 修复导入
import config

# 设置环境变量确保UTF-8编码
os.environ['PYTHONIOENCODING'] = 'utf-8'
os.environ['LC_ALL'] = 'C.UTF-8'
os.environ['LANG'] = 'C.UTF-8'

# --- 配置 ---
PDF_TEMP_FOLDER = 'temp_pdfs'
MAX_TEXT_LENGTH_NO_SPLIT = 30000  # 如果文本长度小于此值，则不分割
CHUNK_SIZE = 30000  # 分割文本时，每块的目标大小 # 我们稍后会用到这个

# --- 新增: 检查公告是否已分析成功 ---
def is_announcement_analyzed(conn, announcement_id):
    try:
        cursor = conn.cursor()
        # 获取所有相关的分析结果字段
        cursor.execute("""SELECT insight_summary, positive_reason, negative_reason, llm_call_successful
                          FROM announcement_insights 
                          WHERE announcement_id=?""", (announcement_id,))
        row = cursor.fetchone()

        if not row:
            return False # 记录不存在，需要分析

        # 如果有llm_call_successful字段且为True，检查摘要是否有效
        if row['llm_call_successful'] is not None and row['llm_call_successful'] == 1:
            # 如果 insight_summary 为空或过短，也认为需要重新分析
            if not row['insight_summary'] or len(row['insight_summary'].strip()) < 10:
                print(f"[重新分析] insight_summary 为空或过短: {announcement_id}")
                return False
            return True # LLM调用成功且摘要有效，不需要重新分析

        # 检查是否有API调用失败的指示性文本
        api_failure_summary = "大模型API调用失败，未能获得分析结果。"
        api_failure_positive_reason = "大模型API调用失败，无利好信息。"
        api_failure_negative_reason = "大模型API调用失败，无利空信息。"
        # 也可以检查更通用的失败提示，例如 "未能提取"
        generic_failure_reason_part = "未能提取"
        generic_failure_summary_part = "未能提取摘要"

        if row['insight_summary'] == api_failure_summary or \
           row['insight_summary'] == generic_failure_summary_part or \
           row['positive_reason'] == api_failure_positive_reason or \
           row['positive_reason'].startswith(generic_failure_reason_part) or \
           row['negative_reason'] == api_failure_negative_reason or \
           row['negative_reason'].startswith(generic_failure_reason_part):
            print(f"[重新分析] 检测到上次分析失败或结果不完整: {announcement_id}")
            return False # 检测到上次分析失败的标志，需要重新分析
        
        # 如果 insight_summary 为空或过短，也认为需要重新分析
        if not row['insight_summary'] or len(row['insight_summary'].strip()) < 10:
            print(f"[重新分析] insight_summary 为空或过短: {announcement_id}")
            return False

        return True # 记录存在且看起来是成功的分析结果，可以跳过
    except sqlite3.Error as e:
        print(f"检查公告是否已分析失败 ({announcement_id}): {e}")
        return False # 如果查询出错，默认需要分析以避免跳过

# --- 新增: 确保表结构包含 sentiment_reason 字段 ---
def ensure_insights_table(conn):
    """确保 announcement_insights 表存在并且包含所有必要的列，包括 pdf_page_count, llm_call_successful, positive_keywords, negative_keywords, announcement_interpretation."""
    cursor = conn.cursor()
    cursor.execute('''CREATE TABLE IF NOT EXISTS announcement_insights (
        id INTEGER PRIMARY KEY AUTOINCREMENT,
        announcement_id TEXT,
        secCode TEXT,
        secName TEXT,
        title TEXT,
        publishTime TEXT,
        positive_reason TEXT,
        negative_reason TEXT,
        positive_level TEXT, -- 利好等级
        negative_level TEXT, -- 利空等级
        insight_summary TEXT,
        announcement_interpretation TEXT, -- 新增：全面的公告解读
        created_at TEXT,
        pdf_page_count INTEGER,
        pdf_view_link TEXT,
        llm_call_successful BOOLEAN, -- LLM调用是否成功
        positive_keywords TEXT,    -- 利好关键词 (JSON string)
        negative_keywords TEXT     -- 利空关键词 (JSON string)
    )''')
    # 如果表已存在但无特定字段，自动添加
    cursor.execute("PRAGMA table_info(announcement_insights)")
    cols = [row[1] for row in cursor.fetchall()]
    
    # 删除不需要的字段（SQLite不支持DROP COLUMN，需要重建表）
    if 'neutral_reason' in cols or 'sentiment_reason' in cols or 'confidence' in cols:
        print("  [DB] 检测到旧字段，正在重建表结构...")
        # 备份数据
        cursor.execute('''CREATE TABLE announcement_insights_backup AS 
                         SELECT announcement_id, secCode, secName, title, publishTime, 
                                positive_reason, negative_reason, positive_level, negative_level, 
                                insight_summary, created_at, pdf_page_count, pdf_view_link,
                                llm_call_successful, positive_keywords, negative_keywords
                         FROM announcement_insights''')
        # 删除旧表
        cursor.execute("DROP TABLE announcement_insights")
        # 重新创建表
        cursor.execute('''CREATE TABLE announcement_insights (
            id INTEGER PRIMARY KEY AUTOINCREMENT,
            announcement_id TEXT,
            secCode TEXT,
            secName TEXT,
            title TEXT,
            publishTime TEXT,
            positive_reason TEXT,
            negative_reason TEXT,
            positive_level TEXT,
            negative_level TEXT,
            insight_summary TEXT,
            announcement_interpretation TEXT,
            created_at TEXT,
            pdf_page_count INTEGER,
            pdf_view_link TEXT,
            llm_call_successful BOOLEAN,
            positive_keywords TEXT,
            negative_keywords TEXT
        )''')
        # 恢复数据
        cursor.execute('''INSERT INTO announcement_insights 
                         (announcement_id, secCode, secName, title, publishTime, 
                          positive_reason, negative_reason, positive_level, negative_level, 
                          insight_summary, created_at, pdf_page_count, pdf_view_link,
                          llm_call_successful, positive_keywords, negative_keywords)
                         SELECT announcement_id, secCode, secName, title, publishTime, 
                                positive_reason, negative_reason, positive_level, negative_level, 
                                insight_summary, created_at, pdf_page_count, pdf_view_link,
                                llm_call_successful, positive_keywords, negative_keywords
                         FROM announcement_insights_backup''')
        # 删除备份表
        cursor.execute("DROP TABLE announcement_insights_backup")
        print("  [DB] 表结构重建完成")
    
    # 检查并添加新字段
    cursor.execute("PRAGMA table_info(announcement_insights)")
    cols = [row[1] for row in cursor.fetchall()]
    if 'pdf_page_count' not in cols:
        cursor.execute("ALTER TABLE announcement_insights ADD COLUMN pdf_page_count INTEGER")
    if 'llm_call_successful' not in cols:
        cursor.execute("ALTER TABLE announcement_insights ADD COLUMN llm_call_successful BOOLEAN")
    if 'positive_keywords' not in cols:
        cursor.execute("ALTER TABLE announcement_insights ADD COLUMN positive_keywords TEXT")
    if 'negative_keywords' not in cols:
        cursor.execute("ALTER TABLE announcement_insights ADD COLUMN negative_keywords TEXT")
    if 'announcement_interpretation' not in cols:
        cursor.execute("ALTER TABLE announcement_insights ADD COLUMN announcement_interpretation TEXT")
        print("  [DB] 添加了公告解读字段")
    
    conn.commit()

# --- 1. 连接数据库并读取公告信息 ---
def get_announcements_from_db(db_path):
    """
    连接到 SQLite 数据库并从 announcements 表中获取数据。

    Args:
        db_path (str): 数据库文件的路径。

    Returns:
        list: 包含公告信息的元组列表 (pdf_download_link, secCode, title, publishTime)。
              如果发生错误则返回空列表。
    """
    announcements = []
    try:
        conn = sqlite3.connect(db_path)
        conn.row_factory = sqlite3.Row
        cursor = conn.cursor()
        # 确保表名和列名与您的数据库一致
        cursor.execute("SELECT * FROM announcements")
        announcements = cursor.fetchall()
        conn.close()
    except sqlite3.Error as e:
        print(f"数据库错误: {e}")
    return announcements

# --- 调用大模型API ---
import urllib.request
from config import SILICONFLOW_API_KEY, SILICONFLOW_API_URL, AI_MODEL_NAME

def build_prompt(text_chunk):
    return f"""
请你以专业金融分析师身份，阅读以下上市公司公告文本，严格以JSON格式输出如下八项：

1.  `positive_level`: 利好等级。请从以下选项中选择一个：["无利好信息", "轻微利好", "一般利好", "重大利好", "变革性利好"]。如果无明显利好信号，请选择 "无利好信息"。
2.  `positive_reason`: 对利好判断的核心理由，必须具体、简洁。如果无明显利好信号，原因为"无利好信息"。
3.  `positive_keywords`: 从公告中提取的利好相关标签关键词列表，用于分类筛选。请从以下标签中选择：["股份回购", "分红派息", "业绩增长", "重大合同", "产能扩张", "技术突破", "政策利好", "债务重组", "成本降低", "并购重组", "资产注入", "战略合作", "新产品发布", "市场拓展", "IPO上市", "增发融资", "可转债发行", "股权激励", "投资项目"]。如果没有则为空列表 []。
4.  `negative_level`: 利空等级。请从以下选项中选择一个：["无利空信息", "轻微利空", "一般利空", "严重利空", "毁灭性利空"]。如果无明显利空信号，请选择 "无利空信息"。
5.  `negative_reason`: 对利空判断的核心理由，必须具体、简洁。如果无明显利空信号，原因为"无利空信息"。
6.  `negative_keywords`: 从公告中提取的利空相关标签关键词列表，用于分类筛选。请从以下标签中选择：["高管减持", "大股东减持", "业绩下滑", "债务违约", "监管处罚", "商誉减值", "资产减值", "担保风险", "原材料涨价", "诉讼仲裁", "退市风险", "停产整顿", "关联交易风险", "汇率风险", "产能过剩", "合规整改", "特别处理", "风险提示", "澄清公告"]。如果没有则为空列表 []。
7.  `insight_summary`: 面向普通股民，全面总结公告的核心内容、主要观点、潜在影响等，字数适中。
8.  `announcement_interpretation`: 对公告的全面专业解读，包括背景分析、业务影响、财务影响、市场意义、风险提示等，为投资者提供深度分析，字数500-800字。

**重要提醒**：
- 关键词必须从提供的标签列表中选择，用于后续的分类和筛选功能
- 标签体系结合了投资价值导向和交易所分类导向，既便于投资决策又便于分类管理
- 具体的事件描述和数据请写在reason字段中
- 公告解读要专业、全面，涵盖多个维度的分析
- 请确保输出为有效的JSON格式

**公告文本**：
{text_chunk}

请严格按照JSON格式输出，示例：
{{
    "positive_level": "一般利好",
    "positive_reason": "公司中标8.5亿元重大基础设施项目，预计2024年开始确认收入",
    "positive_keywords": ["重大合同"],
    "negative_level": "无利空信息", 
    "negative_reason": "无利空信息",
    "negative_keywords": [],
    "insight_summary": "公司公告中标某重大基础设施项目，合同金额8.5亿元，预计将在2024年开始确认收入，对公司业绩有积极影响。",
    "announcement_interpretation": "本次中标体现了公司在基础设施建设领域的竞争优势和技术实力。从业务角度看，8.5亿元的合同金额占公司年收入的比重较大，将显著提升公司在手订单规模。从财务角度看，项目预计在2024年开始确认收入，有助于改善公司收入结构和现金流状况。从市场角度看，此次中标证明了公司在行业内的地位和客户认可度。投资者需关注项目执行进度、回款情况以及对公司资金占用的影响。总体而言，这是一个积极的业务进展，但仍需关注项目执行风险。"
}}
"""

def call_llm_api_openrouter(prompt):
    """调用OpenRouter API"""
    try:
        headers = {
            'Authorization': f'Bearer {config.OPENROUTER_API_KEY}',
            'HTTP-Referer': config.OPENROUTER_SITE_URL,
            'X-Title': config.OPENROUTER_SITE_NAME.encode('utf-8').decode('latin-1'),  # 编码处理中文
            'Content-Type': 'application/json; charset=utf-8'
        }
        
        payload = {
            "model": config.OPENROUTER_MODEL_NAME,
            "messages": [{"role": "user", "content": prompt}],
            "max_tokens": 2000,
            "temperature": 0.3,
            "top_p": 0.9
        }
        
        # 将包含中文的JSON数据编码为UTF-8字节
        json_data = json.dumps(payload, ensure_ascii=False).encode('utf-8')
        
        # 使用requests发送请求，传入编码后的字节数据
        response = requests.post(
            config.OPENROUTER_API_URL,
            data=json_data,  # 使用data参数传入编码后的字节
            headers=headers,
            timeout=config.API_TIMEOUT
        )
        
        if response.status_code == 200:
            result = response.json()
            if 'choices' in result and len(result['choices']) > 0:
                return result['choices'][0]['message']['content']
            else:
                print(f"OpenRouter API返回格式异常: {result}")
                return None
        else:
            print(f"OpenRouter API调用失败，状态码: {response.status_code}, 响应: {response.text}")
            return None
            
    except Exception as e:
        print(f"OpenRouter API调用失败: {e}")
        return None

def call_llm_api_siliconflow(prompt):
    """调用SiliconFlow API"""
    try:
        import urllib.request
        import urllib.parse
        import json
        
        payload = {
            "model": config.AI_MODEL_NAME,
            "messages": [
                {
                    "role": "user",
                    "content": prompt
                }
            ],
            "stream": False,
            "max_tokens": 1024,
            "temperature": 0.7,
            "top_p": 0.7,
        }
        
        # 使用json.dumps并明确指定UTF-8编码
        json_data = json.dumps(payload, ensure_ascii=False).encode('utf-8')
        
        # 创建请求
        req = urllib.request.Request(
            config.SILICONFLOW_API_URL,
            data=json_data,
            headers={
                "Authorization": f"Bearer {config.SILICONFLOW_API_KEY}",
                "Content-Type": "application/json; charset=utf-8",
            }
        )
        
        # 发送请求
        with urllib.request.urlopen(req, timeout=60) as response:
            response_data = response.read().decode('utf-8')
            result = json.loads(response_data)
            llm_reply = result["choices"][0]["message"]["content"]
            return llm_reply
            
    except Exception as e:
        print(f"SiliconFlow API调用失败: {e}")
        return None

def call_llm_api(prompt):
    """智能LLM API调用，支持多提供商和自动切换"""
    provider = config.LLM_PROVIDER.lower()
    
    if provider == "siliconflow":
        return call_llm_api_siliconflow(prompt)
    elif provider == "openrouter":
        return call_llm_api_openrouter(prompt)
    elif provider == "auto":
        # 自动模式：先尝试SiliconFlow，失败后尝试OpenRouter
        print("  [AUTO] 尝试SiliconFlow API...")
        result = call_llm_api_siliconflow(prompt)
        if result is not None:
            print("  [AUTO] SiliconFlow API调用成功")
            return result
        
        print("  [AUTO] SiliconFlow失败，尝试OpenRouter API...")
        result = call_llm_api_openrouter(prompt)
        if result is not None:
            print("  [AUTO] OpenRouter API调用成功")
            return result
        
        print("  [AUTO] 所有API提供商都失败")
        return None
    else:
        print(f"未知的LLM提供商: {provider}")
        return None

# --- 2. 下载PDF文件 ---
def sanitize_filename(filename):
    """清理文件名，移除或替换不安全/无效的字符。"""
    # 移除路径相关的字符
    filename = re.sub(r'[\\/]', '_', filename)
    # 移除其他不希望的字符，这里只允许字母、数字、下划线、连字符和点
    filename = re.sub(r'[^a-zA-Z0-9_.-]', '', filename)
    # 避免文件名过长
    return filename[:100]

def download_pdf(pdf_url, secCode, title, temp_folder, announcement_id):
    """
    下载PDF文件到指定的临时文件夹。

    Args:
        pdf_url (str): PDF的下载链接。
        secCode (str): 股票代码。
        title (str): 公告标题。
        temp_folder (str): 存储PDF的临时文件夹路径。

    Returns:
        str: 下载成功后的本地文件路径，如果下载失败或文件已存在则返回None或已存在路径。
    """
    if not pdf_url:
        print(f"股票代码 {secCode} 的PDF链接为空，跳过下载。")
        return None

    try:
        # 基于股票代码和标题生成一个更具描述性的文件名
        # 清理标题以用作文件名的一部分
        clean_title_part = sanitize_filename(title)
        # 从URL中提取文件名作为备选
        url_filename = pdf_url.split('/')[-1]
        if not url_filename.lower().endswith(".pdf"):
            url_filename = f"{clean_title_part[:50]}_{secCode}.pdf" # 如果URL末尾不是pdf，则构造一个
        
        # 优先使用公告唯一ID和标题构造文件名
        filename = f"{announcement_id}_{clean_title_part}.pdf"
        # 如果清理后的标题为空，则使用ID命名
        if not clean_title_part:
            filename = f"{announcement_id}_announcement.pdf"


        local_pdf_path = os.path.join(temp_folder, filename)

        if os.path.exists(local_pdf_path):
            print(f"文件已存在: {local_pdf_path} (链接: {pdf_url})")
            return local_pdf_path # 返回已存在的文件路径

        print(f"开始下载: {pdf_url} 为 {local_pdf_path}")
        import urllib.request
        urllib.request.urlretrieve(pdf_url, local_pdf_path)
        
        # 检查下载的文件是否真的是PDF
        try:
            with open(local_pdf_path, 'rb') as f:
                first_bytes = f.read(8)
                # PDF文件应该以 %PDF- 开头
                if not first_bytes.startswith(b'%PDF-'):
                    print(f"警告: 下载的文件不是PDF格式 (前8字节: {first_bytes})")
                    # 检查是否是HTML文件
                    f.seek(0)
                    content = f.read(1024).decode('utf-8', errors='ignore')
                    if '<html' in content.lower() or '<!doctype html' in content.lower():
                        print(f"错误: 下载的是HTML页面而不是PDF文件")
                        print(f"内容前512字符: {content[:512]}")
                        os.remove(local_pdf_path)  # 删除无效文件
                        return None
                    else:
                        print(f"未知文件格式，文件内容前512字符: {content[:512]}")
                        os.remove(local_pdf_path)  # 删除无效文件
                        return None
        except Exception as e:
            print(f"检查文件格式时出错: {e}")
            return None
        
        print(f"下载成功: {local_pdf_path}")
        return local_pdf_path

    except Exception as e:
        print(f"下载PDF失败 (链接: {pdf_url}): {e}")
    return None

# --- 3. 从PDF提取文本 ---
def extract_text_from_pdf(pdf_path):
    """
    从指定的PDF文件中提取所有文本内容和页数。

    Args:
        pdf_path (str): 本地PDF文件的路径。

    Returns:
        tuple: (str, int) 提取到的完整文本内容和PDF的页数。
               如果提取失败则返回 (None, 0)。
    """
    if not os.path.exists(pdf_path):
        print(f"PDF文件不存在: {pdf_path}")
        return None

    full_text = []
    try:
        with pdfplumber.open(pdf_path) as pdf:
            print(f"开始从 {pdf_path} (共 {len(pdf.pages)} 页) 提取文本...")
            for i, page in enumerate(pdf.pages):
                text = page.extract_text()
                if text:
                    full_text.append(text.strip())
            page_count = len(pdf.pages)
            print(f"文本提取完成: {pdf_path} (共 {page_count} 页)")
        return "\n".join(full_text), page_count
    except Exception as e:
        print(f"从PDF {pdf_path} 提取文本失败: {e}")
        return None, 0

# --- 4. 分批处理文本 ---
def split_text_into_chunks(text, max_len_no_split, chunk_size):
    """
    将长文本分割成较小的文本块。

    Args:
        text (str): 需要分割的原始文本。
        max_len_no_split (int): 如果文本长度小于此值，则不进行分割。
        chunk_size (int): 每个文本块的目标大小（字符数）。

    Returns:
        list: 包含一个或多个文本块的列表。
    """
    if not text:
        return []
    
    text_len = len(text)
    if text_len <= max_len_no_split:
        print(f"文本长度 {text_len} 未超过 {max_len_no_split}，无需分割。")
        return [text]

    print(f"文本长度 {text_len} 超过 {max_len_no_split}，开始分割成约 {chunk_size} 字/块...")
    chunks = []
    start = 0
    while start < text_len:
        end = start + chunk_size
        # 尽量在句子末尾分割，寻找最后一个句号、问号、感叹号或换行符
        if end < text_len:
            # 从 chunk_size 向前回溯，找到合适的断句点
            split_pos = -1
            # 优先考虑换行符作为分割点，其次是标点符号
            for i in range(min(end, text_len -1) , start + chunk_size // 2, -1): # 在后半段寻找
                 if text[i] in ['\n', '\r']:
                     split_pos = i + 1 # 包含换行符本身
                     break
                 elif text[i] in ['。', '！', '？', '.', '!', '?']:
                     split_pos = i + 1 # 包含标点符号本身
                     break
            
            if split_pos != -1 and start < split_pos : # 确保split_pos在合理范围内
                end = split_pos
            # 如果找不到合适的断句点，则硬性按 chunk_size 切割
            # 这种情况在上面的逻辑中已经通过 min(end, text_len -1) 避免了，
            # 并且如果找不到标点，就会自然地在 chunk_size 处切割

        chunks.append(text[start:end].strip())
        start = end
    
    print(f"文本已分割成 {len(chunks)} 块。")
    for i, chunk in enumerate(chunks):
        print(f"  块 {i+1} 长度: {len(chunk)}")
    return chunks

# --- 5. 大模型分析（占位符） ---
def analyze_text_with_llm(text_chunk):
    """
    使用大模型分析单个文本块的情感（中性/利好/利空）并给出原因。
    这是一个占位符函数，您需要用实际的LLM API调用逻辑替换它。

    Args:
        text_chunk (str): 需要分析的文本块。

    Returns:
        dict: 包含分析结果的字典，例如 {'sentiment': '利好', 'reason': '原因...'}
              如果分析失败，则返回 None。
    """
    print(f"\n  [LLM分析占位符] 正在分析文本块 (长度: {len(text_chunk)})...")
    print("  提示: 您需要在此处实现与您本地部署的大模型的实际交互逻辑。")
    processed_chunk = text_chunk[:50].replace('\n', ' ')
    print(f"  示例文本块前50字符: {processed_chunk}...")

    # 模拟LLM API调用延迟
    time.sleep(random.uniform(0.5, 1.5))

    # Actual LLM call is handled by call_llm_api, placeholder removed.
    
    # 假设LLM返回一个JSON格式的字符串，或者直接返回字典
    # response_from_llm = {
    #     "sentiment": sentiment,
    #     "reason": reason
    # }
    # print(f"  [LLM分析占位符] 模拟返回: {response_from_llm}")
    # return response_from_llm

    # 直接返回字典
    analysis_result = {"sentiment": sentiment, "reason": reason}
    print(f"  [LLM分析占位符] 模拟返回 - 情感: {analysis_result['sentiment']}, 原因: {analysis_result['reason'][:30]}...")
    return analysis_result

# --- 6. 汇总结果 ---
def summarize_all_analyses(chunk_analyses):
    """
    汇总来自所有文本块的分析结果，生成最终情感等级、原因、关键词、解读和摘要。

    Args:
        chunk_analyses (list): 包含每个文本块分析结果（字典）的列表。
            每个字典应包含 'positive_level', 'positive_reason', 'positive_keywords',
            'negative_level', 'negative_reason', 'negative_keywords', 'insight_summary', 
            'announcement_interpretation', 'llm_call_successful'.

    Returns:
        dict: 包含汇总后的情感等级、原因、关键词、摘要、公告解读以及整体LLM调用成功状态的字典。
              如果输入为空或无效，则返回预设的失败结果。
    """
    if not chunk_analyses:
        return {
            'final_positive_level': '无利好信息',
            'final_positive_reason': '无有效分析块。',
            'final_positive_keywords': [],
            'final_negative_level': '无利空信息',
            'final_negative_reason': '无有效分析块。',
            'final_negative_keywords': [],
            'final_insight_summary': '没有提供文本块进行分析。',
            'announcement_interpretation': '没有提供文本块进行分析。',
            'final_llm_call_successful': False
        }

    positive_reasons_list = []
    negative_reasons_list = []
    insight_summaries_list = []
    announcement_interpretations_list = []  # 新增公告解读列表
    
    all_positive_levels = []
    all_negative_levels = []
    positive_keywords_all = []
    negative_keywords_all = []
    
    valid_analyses_count = 0
    all_llm_calls_successful = True  # 假设所有调用都成功，除非有失败的

    POSITIVE_LEVEL_ORDER = ["无利好信息", "轻微利好", "一般利好", "重大利好", "变革性利好"]
    NEGATIVE_LEVEL_ORDER = ["无利空信息", "轻微利空", "一般利空", "严重利空", "毁灭性利空"]

    for analysis in chunk_analyses:
        if analysis:
            valid_analyses_count += 1
            positive_reasons_list.append(analysis.get('positive_reason', ''))
            negative_reasons_list.append(analysis.get('negative_reason', ''))
            insight_summaries_list.append(analysis.get('insight_summary', ''))
            announcement_interpretations_list.append(analysis.get('announcement_interpretation', ''))  # 新增
            
            all_positive_levels.append(analysis.get('positive_level', '无利好信息'))
            all_negative_levels.append(analysis.get('negative_level', '无利空信息'))
            
            # 收集关键词
            positive_keywords_all.extend(analysis.get('positive_keywords', []))
            negative_keywords_all.extend(analysis.get('negative_keywords', []))
            
            # 检查LLM调用状态
            if not analysis.get('llm_call_successful', True):
                all_llm_calls_successful = False

    if valid_analyses_count == 0:
        return {
            'final_positive_level': '无利好信息',
            'final_positive_reason': '无有效分析结果。',
            'final_positive_keywords': [],
            'final_negative_level': '无利空信息',
            'final_negative_reason': '无有效分析结果。',
            'final_negative_keywords': [],
            'final_insight_summary': 'LLM未能成功分析所有文本块或输出格式不正确。',
            'announcement_interpretation': 'LLM未能成功分析所有文本块或输出格式不正确。',
            'final_llm_call_successful': False
        }

    # 确定最终的利好和利空等级 (取最高等级)
    final_positive_level = "无利好信息"
    if all_positive_levels:
        final_positive_level = max(all_positive_levels, key=lambda level: POSITIVE_LEVEL_ORDER.index(level) if level in POSITIVE_LEVEL_ORDER else -1)
    
    final_negative_level = "无利空信息"
    if all_negative_levels:
        final_negative_level = max(all_negative_levels, key=lambda level: NEGATIVE_LEVEL_ORDER.index(level) if level in NEGATIVE_LEVEL_ORDER else -1)

    # Filter out empty reasons and join
    final_positive_reason = " ".join(filter(None, positive_reasons_list)).strip()
    final_negative_reason = " ".join(filter(None, negative_reasons_list)).strip()
    final_insight_summary = " ".join(filter(None, insight_summaries_list)).strip()
    final_announcement_interpretation = " ".join(filter(None, announcement_interpretations_list)).strip()  # 新增
    
    # 处理关键词，去重并排序
    final_positive_keywords = sorted(list(set(positive_keywords_all)))
    final_negative_keywords = sorted(list(set(negative_keywords_all)))

    # Provide default if reasons are empty after join
    if not final_positive_reason: final_positive_reason = "无利好信息。" # 如果等级不是"无利好信息"，但原因缺失，则补充
    if not final_negative_reason: final_negative_reason = "无利空信息。" # 如果等级不是"无利空信息"，但原因缺失，则补充
    if not final_insight_summary: final_insight_summary = "未能从文本块中提取有效的摘要。"
    if not final_announcement_interpretation: final_announcement_interpretation = "未能从文本块中提取有效的公告解读。"  # 新增

    # Truncate long strings
    max_reason_len = 500
    max_summary_len = 2000
    max_interpretation_len = 3000  # 公告解读允许更长
    if len(final_positive_reason) > max_reason_len: final_positive_reason = final_positive_reason[:max_reason_len] + "..."
    if len(final_negative_reason) > max_reason_len: final_negative_reason = final_negative_reason[:max_reason_len] + "..."
    if len(final_insight_summary) > max_summary_len: final_insight_summary = final_insight_summary[:max_summary_len] + "..."
    if len(final_announcement_interpretation) > max_interpretation_len: final_announcement_interpretation = final_announcement_interpretation[:max_interpretation_len] + "..."  # 新增
    
    # 限制关键词数量，避免过多
    max_keywords = 20
    if len(final_positive_keywords) > max_keywords: final_positive_keywords = final_positive_keywords[:max_keywords]
    if len(final_negative_keywords) > max_keywords: final_negative_keywords = final_negative_keywords[:max_keywords]

    return {
        'final_positive_level': final_positive_level,
        'final_positive_reason': final_positive_reason,
        'final_positive_keywords': final_positive_keywords,
        'final_negative_level': final_negative_level,
        'final_negative_reason': final_negative_reason,
        'final_negative_keywords': final_negative_keywords,
        'final_insight_summary': final_insight_summary,
        'announcement_interpretation': final_announcement_interpretation,  # 新增字段
        'final_llm_call_successful': all_llm_calls_successful
    }

# --- 7. 存入新表 ---
def create_insights_table(conn):
    """
    在数据库中创建 announcement_insights 表（如果不存在）。
    """
    try:
        cursor = conn.cursor()
        cursor.execute("""
        CREATE TABLE IF NOT EXISTS announcement_insights (
            id INTEGER PRIMARY KEY AUTOINCREMENT,
            secCode TEXT,
            title TEXT,
            publishTime TEXT,
            analysis_result TEXT, -- 中性/利好/利空
            insight_summary TEXT, -- 汇总原因
            generation_time TEXT, -- 生成时间
            pdf_download_link TEXT, -- 保留原始链接以供追溯
            pdf_page_count INTEGER,
            UNIQUE(secCode, title, publishTime) -- 避免重复记录同一公告的分析
        )
        """)
        # 检查并添加 pdf_page_count 列（如果不存在）
        cursor.execute("PRAGMA table_info(announcement_insights)")
        columns = [info[1] for info in cursor.fetchall()]
        
        # 删除不需要的字段（SQLite不支持DROP COLUMN，需要重建表）
        if 'neutral_reason' in columns or 'sentiment_reason' in columns or 'confidence' in columns:
            print("  [DB] 检测到旧字段，正在重建表结构...")
            # 备份数据
            cursor.execute('''CREATE TABLE announcement_insights_backup AS 
                             SELECT secCode, title, publishTime, analysis_result, insight_summary, generation_time, pdf_download_link
                             FROM announcement_insights''')
            # 删除旧表
            cursor.execute("DROP TABLE announcement_insights")
            # 重新创建表
            cursor.execute('''CREATE TABLE announcement_insights (
                id INTEGER PRIMARY KEY AUTOINCREMENT,
                secCode TEXT,
                title TEXT,
                publishTime TEXT,
                analysis_result TEXT,
                insight_summary TEXT,
                generation_time TEXT,
                pdf_download_link TEXT,
                pdf_page_count INTEGER
            )''')
            # 恢复数据
            cursor.execute('''INSERT INTO announcement_insights 
                             (secCode, title, publishTime, analysis_result, insight_summary, generation_time, pdf_download_link)
                             SELECT secCode, title, publishTime, analysis_result, insight_summary, generation_time, pdf_download_link
                             FROM announcement_insights_backup''')
            # 删除备份表
            cursor.execute("DROP TABLE announcement_insights_backup")
            print("  [DB] 表结构重建完成")
        
        # 检查并添加 pdf_page_count 列（如果不存在）
        cursor.execute("PRAGMA table_info(announcement_insights)")
        columns = [info[1] for info in cursor.fetchall()]
        if 'pdf_page_count' not in columns:
            cursor.execute("ALTER TABLE announcement_insights ADD COLUMN pdf_page_count INTEGER")
        conn.commit()
        print("表 'announcement_insights' 已确保存在。")
    except sqlite3.Error as e:
        print(f"创建 'announcement_insights' 表失败: {e}")

def is_announcement_analyzed(conn, announcement_id):
    """
    检查公告是否已分析。
    """
    try:
        cursor = conn.cursor()
        # 获取所有相关的分析结果字段，包括新增的llm_call_successful字段
        cursor.execute("""SELECT insight_summary, positive_reason, negative_reason, llm_call_successful
                          FROM announcement_insights 
                          WHERE announcement_id=?""", (announcement_id,))
        row = cursor.fetchone()

        if not row:
            return False # 记录不存在，需要分析

        # 如果有llm_call_successful字段且为True，检查摘要是否有效
        if row['llm_call_successful'] is not None and row['llm_call_successful'] == 1:
            # 如果 insight_summary 为空或过短，也认为需要重新分析
            if not row['insight_summary'] or len(row['insight_summary'].strip()) < 10:
                print(f"[重新分析] insight_summary 为空或过短: {announcement_id}")
                return False
            return True # LLM调用成功且摘要有效，不需要重新分析

        # 检查是否有API调用失败的指示性文本
        api_failure_summary = "大模型API调用失败，未能获得分析结果。"
        api_failure_positive_reason = "大模型API调用失败，无利好信息。"
        api_failure_negative_reason = "大模型API调用失败，无利空信息。"
        # 也可以检查更通用的失败提示，例如 “未能提取”
        generic_failure_reason_part = "未能提取"
        generic_failure_summary_part = "未能提取摘要"

        if row['insight_summary'] == api_failure_summary or \
           row['insight_summary'] == generic_failure_summary_part or \
           row['positive_reason'] == api_failure_positive_reason or \
           row['positive_reason'].startswith(generic_failure_reason_part) or \
           row['negative_reason'] == api_failure_negative_reason or \
           row['negative_reason'].startswith(generic_failure_reason_part):
            print(f"[重新分析] 检测到上次分析失败或结果不完整: {announcement_id}")
            return False # 检测到上次分析失败的标志，需要重新分析
        
        # 如果 insight_summary 为空或过短，也认为需要重新分析
        if not row['insight_summary'] or len(row['insight_summary'].strip()) < 10:
            print(f"[重新分析] insight_summary 为空或过短: {announcement_id}")
            return False

        return True # 记录存在且看起来是成功的分析结果，可以跳过
    except sqlite3.Error as e:
        print(f"检查公告是否已分析失败 ({announcement_id}): {e}")
        return False # 如果查询出错，默认需要分析以避免跳过

def save_analysis_to_db(conn, announcement_id, secCode, secName, title, publishTime, 
                        positive_level, positive_reason, positive_keywords_json,
                        negative_level, negative_reason, negative_keywords_json,
                        insight_summary, announcement_interpretation, pdf_page_count, pdf_view_link, llm_call_successful):
    """
    将公告分析结果保存到数据库中，包括利好/利空等级、原因、关键词、摘要、公告解读、PDF页数和LLM调用状态。
    """
    cursor = conn.cursor()
    now = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
    
    cursor.execute('''INSERT INTO announcement_insights 
                      (announcement_id, secCode, secName, title, publishTime, 
                       positive_level, positive_reason, positive_keywords,
                       negative_level, negative_reason, negative_keywords,
                       insight_summary, announcement_interpretation, created_at, 
                       pdf_page_count, pdf_view_link, llm_call_successful) 
                      VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)''',
                   (announcement_id, secCode, secName, title, publishTime, 
                    positive_level, positive_reason, positive_keywords_json,
                    negative_level, negative_reason, negative_keywords_json,
                    insight_summary, announcement_interpretation, now, 
                    pdf_page_count, pdf_view_link, llm_call_successful))
    conn.commit()

import concurrent.futures

def analyze_announcement_task(announcement):
    import sqlite3
    import config
    import json  # 添加json模块导入
    conn = sqlite3.connect(config.DATABASE_PATH)
    conn.row_factory = sqlite3.Row
    try:
        # 提取基本信息
        try:
            announcement_id = announcement['id']
            # sqlite3.Row对象没有get方法，需要直接访问或检查字段存在性
            pdf_download_link = announcement['pdf_download_link'] if 'pdf_download_link' in announcement.keys() and announcement['pdf_download_link'] else announcement['pdf_view_link']
            pdf_view_link = announcement['pdf_view_link'] if 'pdf_view_link' in announcement.keys() else ''
            secCode = announcement['secCode']
            secName = announcement['secName'] or announcement['secCode'] or '未知公司'  # 如果secName为空，使用secCode作为默认值
            title = announcement['title']
            publishTime = announcement['publishTime']
            
        except KeyError as ke:
            print(f"[ERROR] 缺少必需的键: {ke}")
            return
        except Exception as e:
            print(f"[ERROR] 提取基本信息时发生错误: {e}")
            return
        
        if is_announcement_analyzed(conn, announcement_id):
            print(f"[跳过] 公告已分析: {secCode} - {secName} - {title}")
            return
        # API速率控制：在任务开始前延时
        time.sleep(config.API_CALL_DELAY_SECONDS if hasattr(config, 'API_CALL_DELAY_SECONDS') else 2) # 默认延时2秒
        local_pdf_path = download_pdf(pdf_download_link, secCode, title, PDF_TEMP_FOLDER, announcement_id)
        if not local_pdf_path:
            return
        extracted_text, pdf_page_count = extract_text_from_pdf(local_pdf_path)
        if not extracted_text:
            # Even if text extraction fails, we might have a page count if the PDF was opened
            # For now, if text is None, we assume analysis cannot proceed.
            return
        print(f"  成功提取文本，总长度: {len(extracted_text)} 字。")
        text_chunks = split_text_into_chunks(extracted_text, MAX_TEXT_LENGTH_NO_SPLIT, CHUNK_SIZE)
        if not text_chunks:
            return
        print(f"  文本已准备好进行分析（共 {len(text_chunks)} 块）。")
        block_results = []
        for idx, text_chunk in enumerate(text_chunks):
            print(f"    处理文本块 {idx+1}/{len(text_chunks)}...")
            print(f"  [LLM分析] 正在分析文本块 (长度: {len(text_chunk)})...")
            processed_chunk = text_chunk[:50].replace('\n', ' ')
            print(f"  示例文本块前50字符: {processed_chunk}...")
            prompt = build_prompt(text_chunk)
            llm_reply = call_llm_api(prompt)
            if llm_reply:
                import json
                # Default values for all new fields
                positive_level = "无利好信息"  # 默认等级
                positive_reason = "未能提取利好原因"
                negative_level = "无利空信息"  # 默认等级
                negative_reason = "未能提取利空原因"
                insight_summary = "未能提取摘要"
                announcement_interpretation = "未能生成公告解读"  # 新增默认值

                # 定义合法的等级值，用于验证
                VALID_POSITIVE_LEVELS = ["无利好信息", "轻微利好", "一般利好", "重大利好", "变革性利好"]
                VALID_NEGATIVE_LEVELS = ["无利空信息", "轻微利空", "一般利空", "严重利空", "毁灭性利空"]

                # 初始化关键词变量
                positive_keywords = []
                negative_keywords = []

                try:
                    json_start = llm_reply.find('{')
                    json_end = llm_reply.rfind('}') + 1
                    json_str = llm_reply[json_start:json_end]
                    data = json.loads(json_str)

                    positive_level_from_llm = data.get('positive_level', '无利好信息').strip()
                    positive_reason = data.get('positive_reason', '无利好信息').strip()
                    negative_level_from_llm = data.get('negative_level', '无利空信息').strip()
                    negative_reason = data.get('negative_reason', '无利空信息').strip()
                    insight_summary = data.get('insight_summary', '').strip()
                    announcement_interpretation = data.get('announcement_interpretation', '').strip()  # 新增字段

                    # 提取关键词
                    positive_keywords = data.get('positive_keywords', [])
                    negative_keywords = data.get('negative_keywords', [])

                    # Validate levels
                    positive_level = positive_level_from_llm if positive_level_from_llm in VALID_POSITIVE_LEVELS else "无利好信息"
                    negative_level = negative_level_from_llm if negative_level_from_llm in VALID_NEGATIVE_LEVELS else "无利空信息"

                    print(f"  [DEBUG] 解析JSON成功: P_Level={positive_level}, N_Level={negative_level}, Summary={insight_summary[:30]}")

                except Exception as e:
                    print(f"  [ERROR] JSON解析失败: {e}\n原始返回: {llm_reply}")
                    # Keep default error messages for reasons, levels and summary if parsing fails
                    insight_summary = llm_reply.strip()[:200] # Fallback for summary
                    announcement_interpretation = "JSON解析失败，无法生成公告解读"  # 解析失败时的默认值
                    # 关键词在解析失败时保持为空列表（已在上面初始化）
                
                block_results.append({
                    'positive_level': positive_level,
                    'positive_reason': positive_reason,
                    'positive_keywords': positive_keywords,
                    'negative_level': negative_level,
                    'negative_reason': negative_reason,
                    'negative_keywords': negative_keywords,
                    'insight_summary': insight_summary,
                    'announcement_interpretation': announcement_interpretation,  # 新增字段
                    'llm_call_successful': True  # LLM调用成功并成功解析JSON
                })
            else:
                print("  [LLM分析] 调用失败，使用默认值")
                block_results.append({
                    'positive_level': '无利好信息',
                    'positive_reason': '大模型API调用失败，无利好信息。',
                    'positive_keywords': [],  # API调用失败时，关键词为空列表
                    'negative_level': '无利空信息',
                    'negative_reason': '大模型API调用失败，无利空信息。',
                    'negative_keywords': [],  # API调用失败时，关键词为空列表
                    'insight_summary': '大模型API调用失败，未能获得分析结果。',
                    'announcement_interpretation': '大模型API调用失败，无法生成公告解读。',  # 新增字段
                    'llm_call_successful': False  # API调用失败
                })
        if block_results:
            print(f"  所有文本块分析完成，共获得 {len(block_results)} 条分析结果。")
            final_analysis_results = summarize_all_analyses(block_results)
            if final_analysis_results:
                print(f"\n    --- 公告整体分析总结 ---")
                print(f"    利好等级: {final_analysis_results.get('final_positive_level', '无利好信息')}, 原因: {final_analysis_results.get('final_positive_reason', '')[:50]}...")
                print(f"    利好关键词: {final_analysis_results.get('final_positive_keywords', [])}")
                print(f"    利空等级: {final_analysis_results.get('final_negative_level', '无利空信息')}, 原因: {final_analysis_results.get('final_negative_reason', '')[:50]}...")
                print(f"    利空关键词: {final_analysis_results.get('final_negative_keywords', [])}")
                print(f"    整体摘要: {final_analysis_results.get('final_insight_summary', '')[:100]}...")
                print(f"    公告解读: {final_analysis_results.get('announcement_interpretation', '')[:100]}...")
                print(f"    LLM调用状态: {final_analysis_results.get('final_llm_call_successful', False)}")
                

                # 只有当LLM调用成功时才保存到数据库
                if final_analysis_results.get('final_llm_call_successful', False):
                    # 将关键词列表转换为JSON字符串存储
                    positive_keywords_json = json.dumps(final_analysis_results.get('final_positive_keywords', []), ensure_ascii=False)
                    negative_keywords_json = json.dumps(final_analysis_results.get('final_negative_keywords', []), ensure_ascii=False)
                    
                    save_analysis_to_db(conn, 
                                         announcement_id, 
                                         secCode, 
                                         secName, 
                                         title, 
                                         publishTime, 
                                         final_analysis_results.get('final_positive_level', '无利好信息'),
                                         final_analysis_results.get('final_positive_reason', '无利好信息'),
                                         positive_keywords_json,
                                         final_analysis_results.get('final_negative_level', '无利空信息'),
                                         final_analysis_results.get('final_negative_reason', '无利空信息'),
                                         negative_keywords_json,
                                         final_analysis_results.get('final_insight_summary', '未能生成摘要'),
                                         final_analysis_results.get('announcement_interpretation', ''), # 新增公告解读参数
                                         pdf_page_count,
                                         pdf_view_link, # 使用正确的预览链接
                                         final_analysis_results.get('final_llm_call_successful', False)
                                         )
                    print(f"  [SUCCESS] 分析结果已保存到数据库: {announcement_id}")
                    conn.commit() # 提交数据库事务
                else:
                    print(f"  [SKIP] LLM调用失败，跳过保存到数据库: {announcement_id}")
    except Exception as e:
        print(f"[ERROR] analyze_announcement_task 全局异常: {e}")
        import traceback
        print(f"[ERROR] 异常堆栈: {traceback.format_exc()}")
        raise e  # 重新抛出异常以便上层捕获
    finally:
        if conn:
            conn.close()

if __name__ == '__main__':
    conn_main = None
    try:
        conn_main = sqlite3.connect(config.DATABASE_PATH)
        conn_main.row_factory = sqlite3.Row
        ensure_insights_table(conn_main)
        announcements_data = get_announcements_from_db(config.DATABASE_PATH)
        print(f"成功从数据库 {config.DATABASE_PATH} 中读取了 {len(announcements_data)} 条公告。")
        with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor: 
            futures = []
            for announcement in announcements_data:
                futures.append(executor.submit(analyze_announcement_task, announcement))
            for future in concurrent.futures.as_completed(futures):
                try:
                    future.result()
                except Exception as e:
                    print(f"[ERROR] 并发任务异常: {e}")
    finally:
        if conn_main:
            conn_main.close()
            print("主数据库连接已关闭。")
