# /mnt/d/whj_dev/announcements/process_announcements_llm.py
import sqlite3
import requests
import pdfplumber
import os
import time
import json
import datetime
import logging
from concurrent.futures import ThreadPoolExecutor, as_completed
from functools import wraps
from typing import Optional, Dict, Any, Callable, TypeVar, Tuple
import random
from queue import Queue
from threading import Lock, Semaphore
import urllib.parse
import traceback # 添加导入
import os.path # 添加导入

# 类型变量，用于泛型函数
T = TypeVar('T')

# 全局锁，用于控制并发访问共享资源
db_lock = Lock()

# 信号量，用于控制大模型并发请求数
llm_semaphore = Semaphore(3)  # 限制为3个并发

class DownloadError(Exception):
    """下载错误异常类"""
    pass

class LLMRequestError(Exception):
    """大模型请求错误异常类"""
    pass

def retry_on_failure(max_retries: int = 3, delay: float = 1.0, exceptions=(Exception,)):
    """
    重试装饰器
    
    Args:
        max_retries: 最大重试次数
        delay: 重试延迟(秒)
        exceptions: 要捕获的异常类型
    """
    def decorator(func: Callable[..., T]) -> Callable[..., T]:
        @wraps(func)
        def wrapper(*args, **kwargs) -> T:
            last_exception = None
            for attempt in range(max_retries):
                try:
                    return func(*args, **kwargs)
                except exceptions as e:
                    last_exception = e
                    if attempt < max_retries - 1:  # 不是最后一次重试
                        wait_time = delay * (2 ** attempt)  # 指数退避
                        logging.warning(
                            f"{func.__name__} 失败 (尝试 {attempt + 1}/{max_retries}): {str(e)}. "
                            f"{wait_time:.1f}秒后重试..."
                        )
                        time.sleep(wait_time)
                    else:  # 最后一次重试仍然失败
                        logging.error(f"{func.__name__} 在 {max_retries} 次尝试后仍然失败: {str(e)}")
                        raise
            raise last_exception  # 这行代码实际上不会执行，因为前面的循环会抛出异常
        return wrapper
    return decorator

def get_new_proxy() -> Optional[str]:
    """
    从代理API获取一个新的代理IP
    
    Returns:
        str: 代理地址，格式为 "http://ip:port"，如果获取失败则返回None
    """
    try:
        from config import PROXY_API_URL
        resp = requests.get(PROXY_API_URL, timeout=10)
        data = resp.json()
        if data.get("code") == 200 and data.get("data") and data["data"].get("proxy_list"):
            proxy = data["data"]["proxy_list"][0]
            logging.info(f"获取到新代理: {proxy}")
            return proxy
    except Exception as e:
        logging.error(f"获取代理失败: {e}")
    return None

# 尝试从config.py导入配置
try:
    import config
    SILICONFLOW_API_KEY = config.SILICONFLOW_API_KEY
    SILICONFLOW_API_URL = config.SILICONFLOW_API_URL
    AI_MODEL_NAME = config.AI_MODEL_NAME
    DATABASE_PATH = config.DATABASE_PATH
    DOWNLOAD_DIR = config.DOWNLOAD_DIR
    # PDF_BASE_URL = config.PDF_BASE_URL # 不再直接使用，因为pdf_download_link是完整URL
    MAX_CHARS_PER_LLM_REQUEST = config.MAX_CHARS_PER_LLM_REQUEST
    CHUNK_SUMMARY_PROMPT_SUFFIX = config.CHUNK_SUMMARY_PROMPT_SUFFIX
    API_CALL_DELAY_SECONDS = config.API_CALL_DELAY_SECONDS
    LOG_LEVEL = getattr(config, 'LOG_LEVEL', 'INFO').upper()
    MAX_DOWNLOAD_WORKERS = getattr(config, 'MAX_DOWNLOAD_WORKERS', 10)
    MAX_LLM_WORKERS = getattr(config, 'MAX_LLM_WORKERS', 3)
    MAX_RETRIES = getattr(config, 'MAX_RETRIES', 3)
except ImportError:
    print("错误：无法导入 config.py 文件。请确保该文件存在于脚本同目录下，并包含必要的配置。")
    # 提供一些默认值以便脚本结构能被解析，但实际运行时会因缺少关键配置而失败
    SILICONFLOW_API_KEY = "YOUR_SILICONFLOW_API_KEY_HERE" # 强制用户配置
    SILICONFLOW_API_URL = "https://api.siliconflow.cn/v1/chat/completions"
    AI_MODEL_NAME = "Qwen/Qwen-32B-Chat"
    DATABASE_PATH = "announcements.db"
    DOWNLOAD_DIR = "downloads"
    # PDF_BASE_URL = "https://www.bseinfo.net" # 不再直接使用
    MAX_CHARS_PER_LLM_REQUEST = 10000
    CHUNK_SUMMARY_PROMPT_SUFFIX = "\n\n请总结以上内容的核心要点。"
    API_CALL_DELAY_SECONDS = 2
    LOG_LEVEL = 'INFO'
    MAX_DOWNLOAD_WORKERS = 10
    MAX_LLM_WORKERS = 3
    MAX_RETRIES = 3

# 配置日志
logging.basicConfig(level=LOG_LEVEL,
                    format='%(asctime)s - %(levelname)s - %(message)s',
                    handlers=[logging.StreamHandler()])

if SILICONFLOW_API_KEY == "YOUR_SILICONFLOW_API_KEY_HERE" or not SILICONFLOW_API_KEY:
    logging.error("严重错误：SiliconFlow API密钥未在 config.py 中配置。请编辑 config.py 文件并提供有效的API密钥。")
    exit(1)

if not os.path.exists(DOWNLOAD_DIR):
    os.makedirs(DOWNLOAD_DIR)
    logging.info(f"下载目录 '{DOWNLOAD_DIR}' 已创建。")

def get_db_connection():
    """建立并返回数据库连接"""
    conn = sqlite3.connect(DATABASE_PATH)
    conn.row_factory = sqlite3.Row # 允许通过列名访问数据
    return conn

def create_insights_table_if_not_exists(conn):
    """如果announcement_insights表不存在，则创建它"""
    cursor = conn.cursor()
    cursor.execute("""
    CREATE TABLE IF NOT EXISTS announcement_insights (
        insight_id INTEGER PRIMARY KEY AUTOINCREMENT,
        announcement_id INTEGER NOT NULL UNIQUE, -- 确保每个公告只有一条分析记录
        stock_code TEXT,
        title TEXT,
        publish_date TEXT,
        sentiment TEXT, -- '利好', '利空', '中性'
        reasoning TEXT, -- LLM给出的判断理由
        investor_interpretation TEXT, -- LLM从股民角度的深度解读
        llm_input_char_count INTEGER,
        llm_raw_response TEXT, -- 存储LLM原始响应，便于调试
        status TEXT DEFAULT 'pending', -- 'pending', 'processing', 'success', 'error'
        error_message TEXT,
        pdf_local_path TEXT, -- 存储实际分析的PDF本地路径
        created_at TEXT DEFAULT CURRENT_TIMESTAMP,
        updated_at TEXT DEFAULT CURRENT_TIMESTAMP
    )
    """)
    # 为 updated_at 创建触发器，在更新时自动修改时间
    cursor.execute("""
    CREATE TRIGGER IF NOT EXISTS update_announcement_insights_updated_at
    AFTER UPDATE ON announcement_insights
    FOR EACH ROW
    BEGIN
        UPDATE announcement_insights SET updated_at = CURRENT_TIMESTAMP WHERE insight_id = OLD.insight_id;
    END;
    """)
    conn.commit()
    logging.info("'announcement_insights' 表已准备就绪。")

def get_pending_announcements(conn):
    """获取待处理或处理失败的公告"""
    cursor = conn.cursor()
    
    # 先查询所有公告
    cursor.execute("""
    SELECT a.id, a.secCode, a.title, a.publishTime, a.pdf_local_path, 
           a.pdf_download_link as pdf_download_url
    FROM announcements a
    WHERE a.pdf_download_link IS NOT NULL AND a.pdf_download_link != ''
    ORDER BY a.publishTime DESC, a.id DESC
    LIMIT 10  -- 先只取10条进行测试
    """)
    
    # 获取查询结果
    announcements = cursor.fetchall()
    
    # 打印调试信息
    logging.info(f"获取到 {len(announcements)} 条待处理公告")
    for ann in announcements[:3]:  # 只打印前3条记录的调试信息
        logging.info(f"公告ID: {getattr(ann, 'id', '未知')}, 标题: {getattr(ann, 'title', '未知')}, 下载链接: {getattr(ann, 'pdf_download_url', '无')}")
    
    return announcements

def ensure_download_dir():
    """确保下载目录存在"""
    if not os.path.exists(DOWNLOAD_DIR):
        os.makedirs(DOWNLOAD_DIR, exist_ok=True)
        logging.info(f"创建下载目录: {DOWNLOAD_DIR}")

@retry_on_failure(max_retries=MAX_RETRIES, delay=2, exceptions=(requests.RequestException,))
def download_pdf_with_proxy(announcement_id: int, pdf_download_url: str, proxy: Optional[str] = None) -> str:
    """
    使用代理下载PDF文件到本地
    
    Args:
        announcement_id: 公告ID
        pdf_download_url: PDF下载URL
        proxy: 代理地址，格式为 "http://ip:port"
        
    Returns:
        str: 下载后的本地文件路径
        
    Raises:
        DownloadError: 下载失败时抛出
    """
    ensure_download_dir() # 确保下载目录存在

    if not pdf_download_url:
        raise DownloadError("没有可用的PDF下载链接")
    
    # 从URL中提取文件名
    parsed_url = urllib.parse.urlparse(pdf_download_url)
    filename = os.path.basename(parsed_url.path)
    if not filename.lower().endswith('.pdf'):
        filename = f"{announcement_id}.pdf"
    
    local_path = os.path.join(DOWNLOAD_DIR, filename)
    
    # 如果文件已存在且大小大于0，直接返回路径
    if os.path.exists(local_path) and os.path.getsize(local_path) > 0:
        logging.info(f"文件已存在且有效: {local_path} (大小: {os.path.getsize(local_path) / 1024:.2f} KB)")
        return local_path
    # 如果文件存在但大小为0，删除它
    elif os.path.exists(local_path):
        try:
            os.remove(local_path)
            logging.warning(f"已删除损坏/空文件: {local_path}")
        except OSError as e:
            logging.error(f"删除损坏/空文件 {local_path} 失败: {e}")
            # 如果删除失败，可能也无法写入，但还是尝试继续下载

    logging.info(f"正在下载 {pdf_download_url} 到 {local_path} (代理: {proxy or '无'})")
    
    try:
        # 发送HTTP GET请求下载文件
        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
        }
        
        with requests.get(pdf_download_url, stream=True, proxies={'http': proxy, 'https': proxy} if proxy else None, headers=headers, timeout=30) as response:
            response.raise_for_status()  # 如果请求失败，抛出HTTPError异常
            
            # 确保下载目录存在
            os.makedirs(os.path.dirname(local_path), exist_ok=True)
            
            # 将内容写入临时文件，下载完成后再重命名
            temp_path = f"{local_path}.download"
            with open(temp_path, 'wb') as f:
                for chunk in response.iter_content(chunk_size=8192):
                    if chunk:  # 过滤掉保持连接的chunk
                        f.write(chunk)
            
            # 下载完成，重命名临时文件
            if os.path.exists(local_path):
                os.remove(local_path)
            os.rename(temp_path, local_path)
            
            logging.info(f"成功下载到: {local_path} (大小: {os.path.getsize(local_path) / 1024:.2f} KB)")
            return local_path
            
    except requests.exceptions.ProxyError as pe:
        raise DownloadError(f"代理错误: {str(pe)}")
    except requests.exceptions.RequestException as re:
        raise DownloadError(f"下载请求失败: {str(re)}")
    except Exception as e:
        # 如果文件已部分下载，删除不完整的文件
        if os.path.exists(local_path):
            try:
                os.remove(local_path)
            except Exception as e_remove:
                logging.error(f"删除不完整文件 {local_path} 失败: {str(e_remove)}")
        raise DownloadError(f"下载PDF失败: {str(e)}")

def download_pdf(announcement_id: int, pdf_download_url: str, db_path: str) -> Optional[str]:
    """
    下载PDF文件并保存到本地
    
    :param announcement_id: 公告ID
    :param pdf_download_url: PDF下载链接
    :param db_path: 数据库文件路径
    :return: 本地文件路径，如果下载失败则返回None
    """
    # 在线程内部创建数据库连接
    conn = None
    try:
        # 确保下载目录存在
        os.makedirs(DOWNLOAD_DIR, exist_ok=True)
        
        # 从代理池获取一个代理
        proxy = get_new_proxy()
        
        # 下载PDF
        local_path = download_pdf_with_proxy(announcement_id, pdf_download_url, proxy)
        
        # 更新数据库记录
        conn = sqlite3.connect(db_path)
        with conn:
            cursor = conn.cursor()
            cursor.execute(
                "UPDATE announcements SET pdf_local_path = ? WHERE id = ?",
                (local_path, announcement_id)
            )
            
        return local_path
        
    except Exception as e:
        logging.error(f"下载PDF时发生未知错误 (公告ID: {announcement_id}): {str(e)}")
        logging.error(traceback.format_exc())
        
        # 更新数据库记录，标记下载失败
        try:
            if conn is not None:
                with conn:
                    cursor = conn.cursor()
                    cursor.execute(
                        "UPDATE announcements SET pdf_download_status = 'failed', "
                        "pdf_download_error = ? WHERE id = ?",
                        (str(e), announcement_id)
                    )
        except Exception as db_error:
            logging.error(f"更新数据库记录时发生错误: {str(db_error)}")
        return None
    finally:
        if conn is not None:
            conn.close()
    
def download_announcements(announcements, db_path):
    """
    并发下载公告PDF文件
    
    Args:
        announcements: 待下载的公告列表，每个元素是包含id和pdf_download_url的字典或对象
        db_path: 数据库文件路径
        
    Returns:
        tuple: (成功数量, 失败数量, 失败详情列表)
    """
    if not announcements:
        logging.info("没有需要下载的公告")
        return 0, 0, []
        
    success_count = 0
    fail_count = 0
    fail_details = []
    
    # 设置最大工作线程数
    max_workers = min(10, len(announcements))
    logging.info(f"开始并发下载 {len(announcements)} 个PDF文件 (最大并发数: {max_workers})...")
    
    with ThreadPoolExecutor(max_workers=max_workers) as executor:
        # 为每个下载任务准备参数
        future_to_ann = {}
        for ann in announcements:
            # 将 ann 对象转换为字典
            ann_dict = dict(ann)
            logging.info(f"ann 对象完整内容: {ann_dict}")
            
            # 获取公告ID和下载链接
            ann_id = ann_dict.get('id')
            pdf_url = ann_dict.get('pdf_download_url')
            
            logging.info(f"准备下载公告ID: {ann_id}, 下载链接: {pdf_url}")
            
            if not pdf_url or not ann_id:
                error_msg = f"公告ID {ann_id} 缺少必要的下载链接或ID"
                logging.warning(error_msg)
                fail_count += 1
                fail_details.append((ann_id, error_msg))
                continue
                
            # 提交下载任务
            future = executor.submit(download_pdf, ann_id, pdf_url, db_path)
            future_to_ann[future] = ann_id
        
        # 处理下载结果
        for future in as_completed(future_to_ann):
            ann_id = future_to_ann[future]
            try:
                local_path = future.result()
                if local_path:
                    success_count += 1
                    logging.info(f"成功下载公告 {ann_id} 的PDF到: {local_path}")
                else:
                    fail_count += 1
                    error_msg = f"下载公告 {ann_id} 的PDF失败"
                    fail_details.append((ann_id, error_msg))
                    logging.error(error_msg)
            except Exception as e:
                fail_count += 1
                error_msg = str(e)
                fail_details.append((ann_id, error_msg))
                logging.error(f"处理公告 {ann_id} 时发生错误: {error_msg}")
                logging.error(traceback.format_exc())
    
    logging.info(f"PDF下载完成: 成功 {success_count}, 失败 {fail_count}")
    return success_count, fail_count, fail_details

def main():
    logging.info("--- 开始处理公告LLM分析任务 ---")
    db_path = DATABASE_PATH
    conn = None
    
    try:
        # 获取数据库连接
        conn = get_db_connection()
        create_insights_table_if_not_exists(conn)
        
        # 获取待处理的公告
        pending_announcements = get_pending_announcements(conn)
        if not pending_announcements:
            logging.info("没有待处理的公告。")
            return

        total_announcements = len(pending_announcements)
        logging.info(f"发现 {total_announcements} 条待处理的公告。")
        
        # 将公告对象转换为字典，以便在多个线程中安全使用
        announcements_list = []
        for ann in pending_announcements:
            try:
                ann_dict = dict(ann)
                if 'id' in ann_dict and 'pdf_download_url' in ann_dict:
                    announcements_list.append(ann_dict)
                else:
                    logging.warning(f"公告对象缺少必要字段: {ann_dict}")
            except Exception as e:
                logging.error(f"转换公告对象时出错: {str(e)}")
        
        if not announcements_list:
            logging.warning("没有有效的公告可以处理")
            return
            
        logging.info(f"准备处理 {len(announcements_list)} 个有效公告...")
        
        # 第一步：并发下载所有PDF
        logging.info(f"开始并发下载 {len(announcements_list)} 个PDF文件...")
        success_dl, failed_dl, dl_errors = download_announcements(announcements_list, db_path)
        logging.info(f"PDF下载完成: 成功 {success_dl}, 失败 {failed_dl}")
        
        # 记录下载失败的信息到数据库
        if dl_errors:
            try:
                with conn:
                    cursor = conn.cursor()
                    for ann_id, error_msg in dl_errors:
                        try:
                            cursor.execute("""
                                INSERT OR REPLACE INTO announcement_insights 
                                (announcement_id, status, error_message, updated_at)
                                VALUES (?, 'error', ?, CURRENT_TIMESTAMP)
                            """, (ann_id, error_msg))
                        except Exception as e:
                            logging.error(f"无法更新公告ID {ann_id} 的下载错误状态: {e}")
            except Exception as e:
                logging.error(f"更新下载错误状态时发生错误: {e}")
        
        # 第二步：处理已成功下载的公告
        processed_count = 0
        failed_count = 0
        
        # 获取成功下载的公告ID集合
        success_ids = {ann['id'] for ann in announcements_list 
                      if ann['id'] not in {err[0] for err in dl_errors}}
        
        # 过滤出已成功下载的公告
        processed_announcements = [ann for ann in announcements_list 
                                 if ann.get('id') in success_ids]
        
        logging.info(f"开始处理 {len(processed_announcements)} 个已下载的公告...")
        
        # 使用线程池处理公告
        max_workers = min(MAX_LLM_WORKERS, len(processed_announcements))
        with ThreadPoolExecutor(max_workers=max_workers) as executor:
            # 为每个公告创建处理任务
            future_to_ann = {}
            for ann in processed_announcements:
                ann_id = ann.get('id')
                if not ann_id:
                    logging.warning("公告缺少ID，跳过处理")
                    continue
                    
                future = executor.submit(process_announcement, ann, db_path)
                future_to_ann[future] = ann_id
            
            # 处理任务结果
            for future in as_completed(future_to_ann):
                ann_id = future_to_ann[future]
                try:
                    result = future.result()
                    if result and len(result) >= 3 and result[1]:  # 检查返回结果格式
                        processed_count += 1
                        logging.info(f"公告ID {ann_id} 处理成功")
                    else:
                        failed_count += 1
                        error_msg = result[2] if result and len(result) >= 3 else "未知错误"
                        logging.error(f"公告ID {ann_id} 处理失败: {error_msg}")
                except Exception as e:
                    failed_count += 1
                    error_msg = f"处理公告ID {ann_id} 时发生未捕获的异常: {str(e)}"
                    logging.error(error_msg, exc_info=True)
        
        # 输出摘要信息
        logging.info("=" * 50)
        logging.info("任务完成摘要:")
        logging.info(f"- 总公告数: {total_announcements}")
        logging.info(f"- PDF下载: 成功 {success_dl}, 失败 {failed_dl}")
        logging.info(f"- 公告处理: 成功 {processed_count}, 失败 {failed_count}")
        logging.info("=" * 50)

    except sqlite3.Error as e:
        logging.error(f"数据库操作发生错误: {e}", exc_info=True)
    except Exception as e:
        logging.error(f"脚本执行过程中发生未捕获的错误: {e}", exc_info=True)
    finally:
        if conn:
            try:
                conn.close()
            except Exception as e:
                logging.error(f"关闭数据库连接时出错: {e}")
        logging.info("--- 公告LLM分析任务结束 ---")

if __name__ == "__main__":
    main()
