import requests
import json
import urllib.parse
import sqlite3
import os
import datetime
import time
import hashlib
import random
import re

API_URL = 'https://www.szse.cn/api/disc/announcement/annList?random=0.5819836900601464'
PDF_VIEW_BASE_URL = 'https://www.szse.cn/disclosure/listed/bulletinDetail/index.html'
PDF_DOWNLOAD_BASE_URL = 'https://disc.static.szse.cn/download'
# TODAY = datetime.datetime.now().strftime("%Y-%m-%d")
TODAY = '2025-06-16'

HEADERS = {
    'Accept': 'application/json, text/javascript, */*; q=0.01',
    'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6',
    'Cache-Control': 'no-cache',
    'Connection': 'keep-alive',
    'Content-Type': 'application/json',
    'Origin': 'https://www.szse.cn',
    'Pragma': 'no-cache',
    'Referer': 'https://www.szse.cn/disclosure/listed/notice/index.html',
    'Sec-Fetch-Dest': 'empty',
    'Sec-Fetch-Mode': 'cors',
    'Sec-Fetch-Site': 'same-origin',
    'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/137.0.0.0 Safari/537.36 Edg/137.0.0.0',
    'X-Request-Type': 'ajax',
    'X-Requested-With': 'XMLHttpRequest',
    'sec-ch-ua': '"Microsoft Edge";v="137", "Chromium";v="137", "Not/A)Brand";v="24"',
    'sec-ch-ua-mobile': '?0',
    'sec-ch-ua-platform': '"macOS"'
}

DATA_PAYLOAD = {
    "seDate": ["", ""],
    "channelCode": ["listedNotice_disc"],
    "pageSize": 50,  # 获取少量数据作为示例，您可以根据需要调整
    "pageNum": 1
}

def init_db(db_path='announcements.db'):
    """
    初始化数据库并创建表 announcements（如不存在）。新增抓取时间字段。
    """
    conn = sqlite3.connect(db_path)
    cursor = conn.cursor()
    # 检查表是否存在
    cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='announcements'")
    table_exists = cursor.fetchone()
    if not table_exists:
        # 表不存在，直接创建新表（包含新字段）
        cursor.execute('''
            CREATE TABLE announcements (
                id TEXT PRIMARY KEY,
                title TEXT,
                publishTime TEXT,
                pdf_view_link TEXT,
                pdf_download_link TEXT,
                secCode TEXT,
                secName TEXT,
                attachPath TEXT,
                attachFormat TEXT,
                attachSize INTEGER,
                apiChannelCode TEXT,
                rawData TEXT,
                exchange TEXT,
                category TEXT,
                crawl_session_start_time TEXT,
                retrieved_at TEXT
            )
        ''')
    else:
        # 表已存在，检查字段
        cursor.execute("PRAGMA table_info(announcements)")
        columns = [col[1] for col in cursor.fetchall()]
        if 'crawl_session_start_time' not in columns:
            cursor.execute("ALTER TABLE announcements ADD COLUMN crawl_session_start_time TEXT")
        if 'retrieved_at' not in columns:
            cursor.execute("ALTER TABLE announcements ADD COLUMN retrieved_at TEXT")
    conn.commit()
    return conn

def insert_announcement(conn, ann, exchange, logger_instance, crawl_session_start_time=None):
    # 兜底：保证 secCode/secName 类型一定为字符串
    sec_code = ann.get('secCode', '')
    if isinstance(sec_code, list):
        sec_code = ','.join([str(x) for x in sec_code])
    sec_name = ann.get('secName', '')
    if isinstance(sec_name, list):
        sec_name = ','.join([str(x) for x in sec_name])
    # 新增：记录入库时间
    retrieved_at = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
    data_tuple = (
        ann.get('id', hashlib.sha1(ann.get('pdf_download_link', '').encode('utf-8')).hexdigest()),
        ann.get('title', 'N/A'),
        ann.get('publishTime', datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")),
        ann.get('pdf_view_link', ''),
        ann.get('pdf_download_link', ''),
        sec_code,
        sec_name,
        ann.get('attachPath', ''),
        ann.get('attachFormat', ''),
        ann.get('attachSize', 0),
        ann.get('apiChannelCode', ''),
        ann.get('rawData', ''),
        exchange,
        ann.get('category', ''),
        crawl_session_start_time,
        retrieved_at
    )
    cursor = conn.cursor()
    try:
        cursor.execute('''
            INSERT OR IGNORE INTO announcements 
            (id, title, publishTime, pdf_view_link, pdf_download_link, 
             secCode, secName, attachPath, attachFormat, attachSize, 
             apiChannelCode, rawData, exchange, category, crawl_session_start_time, retrieved_at) 
            VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
        ''', data_tuple)
        conn.commit()
        if cursor.rowcount > 0:
            return True 
        else:
            return False 
    except sqlite3.Error as e:
        logger_instance.error(f"[{exchange}][DB_ERROR] 插入公告失败 {data_tuple[0]}: {e}")
        return False 

def get_new_proxy():
    PROXY_API_URL = "http://v2.api.juliangip.com/unlimited/getips?auto_white=1&num=1&pt=1&result_type=json&trade_no=5964580058336129&sign=546f29ff6cf4fe25b70ab1da019bd246"
    try:
        resp = requests.get(PROXY_API_URL, timeout=10)
        data = resp.json()
        if data.get("code") == 200 and data.get("data") and data["data"].get("proxy_list"):
            return data["data"]["proxy_list"][0]
    except Exception as e:
        print(f"获取代理失败: {e}")
    return None

def get_latest_announcement_time_for_date(conn, date_str, exchange):
    cursor = conn.cursor()
    cursor.execute(
        """
        SELECT MAX(publishTime) FROM announcements
        WHERE publishTime LIKE ? AND exchange=?
        """, (f"{date_str}%", exchange)
    )
    result = cursor.fetchone()
    return result[0] if result and result[0] else None

def crawl_szse_announcements(conn, logger, crawl_session_start_time=None):
    today = TODAY
    page_num = 1
    page_size = 50
    total_insert = 0
    exchange = 'SZSE'
    latest_db_time = get_latest_announcement_time_for_date(conn, today, exchange)
    if latest_db_time:
        logger.info(f"[SZSE] 数据库已有 {today} 最新公告时间: {latest_db_time}")
    else:
        logger.info(f"[SZSE] 数据库无 {today} 记录，将全量抓取")
    stop_flag = False
    # 用于调试的目标标题
    target_title_debug = "振东制药：北京大成律师事务所关于山西振东制药股份有限公司2024年年度股东大会之法律意见书"

    while not stop_flag:
        payload = DATA_PAYLOAD.copy()
        payload["seDate"] = [today, today]
        payload["pageSize"] = page_size
        payload["pageNum"] = page_num
        proxy = get_new_proxy()
        proxies = None
        if proxy:
            proxies = {"http": f"http://{proxy}", "https": f"http://{proxy}"}
            logger.info(f"[SZSE] 使用代理: {proxy}")
        else:
            logger.warning("[SZSE] 未获取到代理，使用直连")
        
        current_payload_for_retry = payload.copy() # 保存当前页的payload，用于可能的重试

        try:
            response = requests.post(API_URL, headers=HEADERS, json=current_payload_for_retry, proxies=proxies, timeout=30)
            response.raise_for_status()
            data = response.json()
            ann_list = data.get('data', [])
            
            if not ann_list:
                logger.info(f"[SZSE] 第{page_num}页无数据，终止。")
                break

            for announcement in ann_list:
                # --- 添加的调试代码开始 ---
                current_title_debug = announcement.get('title', 'N/A')
                if current_title_debug == target_title_debug:
                    ann_id_to_check_debug = announcement.get('id', 'ID_NOT_FOUND')
                    logger.info(f"[DEBUG_ID] 标题匹配: '{target_title_debug}', API返回的ID是: '{ann_id_to_check_debug}'")
                # --- 添加的调试代码结束 ---

                pub_time = announcement.get('publishTime', '')
                pub_date = pub_time.split(' ')[0] if pub_time else ''
                
                if pub_date != today:
                    stop_flag = True
                    logger.info(f"[SZSE] 遇到非今日公告，终止分页。公告发布日期: {pub_date}")
                    break
                
                # --- 修改后的时间比较逻辑 ---
                if latest_db_time and pub_time < latest_db_time: # 从 <= 改为 <
                    logger.info(f"[SZSE][SKIP_OLD] 公告时间 ({pub_time}) 早于数据库最新 ({latest_db_time})，终止分页。标题: {announcement.get('title','')}")
                    stop_flag = True 
                    break
                
                # 如果 pub_time == latest_db_time，或者 pub_time > latest_db_time, 或者 latest_db_time 为 None，则会继续执行到这里
                
                ann_id = announcement.get('id', '')
                title = announcement.get('title', 'N/A') 
                attach_path = announcement.get('attachPath', '')
                view_link = f"{PDF_VIEW_BASE_URL}?{ann_id}" if ann_id else 'N/A'
                download_link = 'N/A'
                if attach_path and title != 'N/A':
                    encoded_title = urllib.parse.quote(title)
                    download_link = f"{PDF_DOWNLOAD_BASE_URL}{attach_path}?n={encoded_title}.pdf"
                elif attach_path:
                    download_link = f"{PDF_DOWNLOAD_BASE_URL}{attach_path}"
                
                db_ann = announcement.copy()
                db_ann['pdf_view_link'] = view_link
                db_ann['pdf_download_link'] = download_link
                sec_code = db_ann.get('secCode', '')
                if isinstance(sec_code, list):
                    db_ann['secCode'] = ','.join([str(x) for x in sec_code])
                sec_name = db_ann.get('secName', '')
                if isinstance(sec_name, list):
                    db_ann['secName'] = ','.join([str(x) for x in sec_name])

                if insert_announcement(conn, db_ann, exchange, logger, crawl_session_start_time):
                    total_insert += 1
                    logger.info(f"[SZSE] 插入: {title} {pub_time}")
                else:
                    logger.info(f"[SZSE][SKIP/DUPLICATE_ID_EXISTS] 公告ID已存在或插入失败: {title} ({pub_time}), ID: {ann_id}")

            if stop_flag: 
                break
            page_num += 1
            time.sleep(random.uniform(1, 2))
            
        except requests.exceptions.RequestException as e:
            logger.error(f"[SZSE][ERROR] 第{page_num}页请求异常: {e}")
            retry_count = 1
            max_retries = 3

            while retry_count <= max_retries:
                logger.info(f"[SZSE] 第{page_num}页重试第{retry_count}/{max_retries}次，重新获取代理...")
                time.sleep(random.uniform(3, 5)) 
                proxy = get_new_proxy()
                proxies = None
                if proxy:
                    proxies = {"http": f"http://{proxy}", "https": f"http://{proxy}"}
                    logger.info(f"[SZSE] 使用新代理: {proxy} 进行重试")
                else:
                    logger.warning("[SZSE] 重试时未获取到代理，将使用直连")
                
                try:
                    response = requests.post(API_URL, headers=HEADERS, json=current_payload_for_retry, proxies=proxies, timeout=30)
                    response.raise_for_status()
                    data = response.json()
                    ann_list_retry = data.get('data', []) 
                    
                    logger.info(f"[SZSE] 第{page_num}页重试成功。")
                    temp_stop_flag_after_retry = False
                    for announcement_retry in ann_list_retry:
                        current_title_debug_retry = announcement_retry.get('title', 'N/A')
                        if current_title_debug_retry == target_title_debug:
                            ann_id_to_check_debug_retry = announcement_retry.get('id', 'ID_NOT_FOUND')
                            logger.info(f"[DEBUG_ID_RETRY] 标题匹配: '{target_title_debug}', API返回的ID是: '{ann_id_to_check_debug_retry}'")

                        pub_time_retry = announcement_retry.get('publishTime', '')
                        pub_date_retry = pub_time_retry.split(' ')[0] if pub_time_retry else ''
                        if pub_date_retry != today:
                            temp_stop_flag_after_retry = True
                            logger.info(f"[SZSE] (重试)遇到非今日公告，终止分页。公告发布日期: {pub_date_retry}")
                            break
                        
                        # --- 修改后的时间比较逻辑 (重试循环内) ---
                        if latest_db_time and pub_time_retry < latest_db_time: # 从 <= 改为 <
                            logger.info(f"[SZSE][SKIP_OLD] (重试)公告时间 ({pub_time_retry}) 早于数据库最新 ({latest_db_time})，终止分页。标题: {announcement_retry.get('title','')}")
                            temp_stop_flag_after_retry = True
                            break
                        
                        ann_id_retry = announcement_retry.get('id', '')
                        title_retry = announcement_retry.get('title', 'N/A')
                        attach_path_retry = announcement_retry.get('attachPath', '')
                        view_link_retry = f"{PDF_VIEW_BASE_URL}?{ann_id_retry}" if ann_id_retry else 'N/A'
                        download_link_retry = 'N/A'
                        if attach_path_retry and title_retry != 'N/A':
                            encoded_title_retry = urllib.parse.quote(title_retry)
                            download_link_retry = f"{PDF_DOWNLOAD_BASE_URL}{attach_path_retry}?n={encoded_title_retry}.pdf"
                        elif attach_path_retry:
                            download_link_retry = f"{PDF_DOWNLOAD_BASE_URL}{attach_path_retry}"
                        
                        db_ann_retry = announcement_retry.copy()
                        db_ann_retry['pdf_view_link'] = view_link_retry
                        db_ann_retry['pdf_download_link'] = download_link_retry
                        sec_code_retry = db_ann_retry.get('secCode', '')
                        if isinstance(sec_code_retry, list):
                            db_ann_retry['secCode'] = ','.join([str(x) for x in sec_code_retry])
                        sec_name_retry = db_ann_retry.get('secName', '')
                        if isinstance(sec_name_retry, list):
                            db_ann_retry['secName'] = ','.join([str(x) for x in sec_name_retry])

                        if insert_announcement(conn, db_ann_retry, exchange, logger):
                            total_insert += 1
                            logger.info(f"[SZSE] (重试后)插入: {title_retry} {pub_time_retry}")
                        else:
                            logger.info(f"[SZSE][SKIP/DUPLICATE_ID_EXISTS] (重试后)公告ID已存在或插入失败: {title_retry} ({pub_time_retry}), ID: {ann_id_retry}")
                    
                    if temp_stop_flag_after_retry: 
                        stop_flag = True 
                    break 
                except requests.exceptions.RequestException as retry_e:
                    logger.error(f"[SZSE] 第{page_num}页重试第{retry_count}/{max_retries}次失败: {retry_e}")
                    retry_count += 1
                    if retry_count > max_retries:
                        logger.error(f"[SZSE] 第{page_num}页达到最大重试次数，放弃该页。")
                        stop_flag = True 
                        break 
                except Exception as general_retry_e: 
                    logger.error(f"[SZSE] 第{page_num}页重试第{retry_count}/{max_retries}次时发生一般错误: {general_retry_e}")
                    stop_flag = True 
                    break 
            if stop_flag: 
                 break 
            page_num += 1
            time.sleep(random.uniform(1, 2))

        except Exception as e: 
            logger.error(f"[SZSE][FATAL] 第{page_num}页处理时发生未捕获的严重异常: {e}")
            break 
            
    logger.info(f"[SZSE][DONE] 今日共插入/更新公告: {total_insert}")



BSE_API_URL = 'https://www.bseinfo.net/disclosureInfoController/stockInfoResult.do'
BSE_PDF_BASE_URL = 'https://www.bseinfo.net'

def parse_bse_jsonp(jsonp_string):
    """
    从北交所API返回的JSONP字符串中提取JSON数据。
    例如: jQuery331_1749877507853([...]) -> [...]
    """
    try:
        # 找到第一个 '(' 和最后一个 ')'
        start_index = jsonp_string.index('(') + 1
        end_index = jsonp_string.rindex(')')
        json_string = jsonp_string[start_index:end_index]
        return json.loads(json_string)
    except (ValueError, IndexError) as e:
        # logger.error(f"[BSE][PARSE_ERROR] 解析JSONP失败: {e}, 原始字符串: {jsonp_string[:200]}") # 稍后添加logger
        logger.error(f"[BSE][PARSE_ERROR] 解析JSONP失败: {e}, 原始字符串: {jsonp_string[:200]}")
        return None

def crawl_bse_announcements(conn, logger, crawl_session_start_time=None):
    logger.info("[BSE] 开始抓取北交所公告...")
    exchange = 'BSE'
    today_str = TODAY # 用于与公告日期比较
    
    # 获取数据库中BSE最新的公告日期 (如果需要，后续实现)
    # latest_db_date_bse = get_latest_announcement_date_for_exchange(conn, exchange, logger)
    # logger.info(f"[BSE] 数据库最新公告日期: {latest_db_date_bse}")

    page_num = 0 # 北交所API的page从0开始
    total_insert = 0
    stop_flag = False

    bse_headers = {
        'Accept': 'text/javascript, application/javascript, application/ecmascript, application/x-ecmascript, */*; q=0.01',
        'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
        'X-Requested-With': 'XMLHttpRequest',
    }

    bse_payload_template = {
        'disclosureType': '9506',
        'page': page_num,
        'xxfcbj[]': '2',
        'startTime': '',
        'endTime': '',
        'keyword': '',
        'siteId': '6',
        'needFields[]': [
            'companyCd', 'companyName', 'disclosureTitle', 
            'disclosurePostTitle', 'destFilePath', 'publishDate', 
            'fileExt', 'isNewThree', 'xxzrlx', 'infoId'
        ],
        'sortfield': 'publishDate',
        'sorttype': 'desc',
    }

    while not stop_flag:
        current_payload = bse_payload_template.copy()
        current_payload['page'] = page_num

        logger.info(f"[BSE] 正在请求第 {page_num + 1} 页...")
        
        proxy = get_new_proxy()
        proxies = None
        if proxy:
            proxies = {"http": f"http://{proxy}", "https": f"http://{proxy}"}
            logger.info(f"[BSE] 使用代理: {proxy}")
        else:
            logger.warning("[BSE] 未获取到代理，使用直连")

        try:
            response = requests.post(BSE_API_URL, headers=bse_headers, data=current_payload, proxies=proxies, timeout=30)
            response.raise_for_status()
            
            parsed_data = parse_bse_jsonp(response.text)
            if not parsed_data or not isinstance(parsed_data, list) or not parsed_data[0].get('listInfo'):
                logger.error(f"[BSE] 第 {page_num + 1} 页返回数据格式错误或为空。")
                break
            
            list_info = parsed_data[0]['listInfo']
            ann_list = list_info.get('content', [])

            if not ann_list:
                logger.info(f"[BSE] 第 {page_num + 1} 页无数据，终止。")
                break
            
            logger.info(f"[BSE] 第 {page_num + 1} 页获取到 {len(ann_list)} 条公告。")

            for announcement_data in ann_list:
                ann_id = str(announcement_data.get('infoId', ''))
                title = announcement_data.get('disclosureTitle', 'N/A')
                publish_date_str = announcement_data.get('publishDate', '')
                
                if publish_date_str < today_str:
                    logger.info(f"[BSE] 遇到日期为 {publish_date_str} 的公告 (早于今天 {today_str})，终止当前页处理并停止分页。标题: {title}")
                    stop_flag = True
                    break

                dest_file_path = announcement_data.get('destFilePath', '')
                pdf_view_link = f"{BSE_PDF_BASE_URL}{dest_file_path}" if dest_file_path else 'N/A'
                pdf_download_link = pdf_view_link

                db_ann = {
                    'id': ann_id,
                    'title': title,
                    'publishTime': publish_date_str,
                    'pdf_view_link': pdf_view_link,
                    'pdf_download_link': pdf_download_link,
                    'exchange': exchange,
                    'secCode': announcement_data.get('companyCd', ''),
                    'secName': announcement_data.get('companyName', ''),
                }
                
                if insert_announcement(conn, db_ann, exchange, logger, crawl_session_start_time):
                    total_insert += 1
                    logger.info(f"[BSE] 插入: {title} ({publish_date_str})")
                else:
                    logger.info(f"[BSE][SKIP/DUPLICATE_ID_EXISTS] 公告ID已存在或插入失败: {title} ({publish_date_str}), ID: {ann_id}")

            if stop_flag:
                break

            if list_info.get('lastPage', False):
                logger.info("[BSE] 已到达最后一页数据。")
                break
            
            page_num += 1
            time.sleep(random.uniform(1, 3))

        except requests.exceptions.RequestException as e:
            logger.error(f"[BSE][REQUEST_ERROR] 第 {page_num + 1} 页请求异常: {e}")
            time.sleep(5)
            logger.warning("[BSE] 由于请求错误，终止本次北交所公告抓取。")
            break 
        except Exception as e:
            logger.error(f"[BSE][FATAL] 第 {page_num + 1} 页处理时发生未捕获的严重异常: {e}")
            break
            
    logger.info(f"[BSE][DONE] 本次共插入/更新北交所公告: {total_insert}")
    return total_insert

def crawl_sse_announcements(conn, logger, crawl_session_start_time=None):
    import random
    SSE_API = 'https://query.sse.com.cn/security/stock/queryCompanyBulletin.do'
    SSE_HEADERS = {
        'Referer': 'https://www.sse.com.cn/',
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
    }
    today = TODAY
    page_num = 1
    page_size = 50
    total_insert = 0
    exchange = 'SSE'
    latest_db_time = get_latest_announcement_time_for_date(conn, today, exchange)
    if latest_db_time:
        logger.info(f"[{exchange}] 数据库已有 {today} 最新公告时间: {latest_db_time}")
    else:
        logger.info(f"[{exchange}] 数据库无 {today} 记录，将全量抓取")
    stop_flag = False
    while not stop_flag:
        jsonp_callback = f"jsonpCallback{int(time.time() * 1000)}{random.randint(100, 999)}"
        params = {
            'isPagination': 'true',
            'pageHelp.pageSize': page_size,
            'pageHelp.pageNo': page_num,
            'pageHelp.beginPage': page_num,
            'pageHelp.cacheSize': 1,
            'pageHelp.endPage': page_num,
            'productId': '',
            'securityType': '0101,120100,020100,020200,120200',
            'reportType2': 'DQBG',
            'reportType': 'ALL',
            'beginDate': today,
            'endDate': today,
            '_': int(datetime.datetime.now().timestamp() * 1000),
            'jsonCallBack': jsonp_callback,
        }
        proxy = get_new_proxy()
        proxies = {"http": f"http://{proxy}", "https": f"http://{proxy}"} if proxy else None
        if proxy:
            logger.info(f"[{exchange}] 使用代理: {proxy}")
        else:
            logger.warning(f"[{exchange}] 未获取到代理，使用直连")
        try:
            response = requests.get(SSE_API, headers=SSE_HEADERS, params=params, proxies=proxies, timeout=30)
            response.raise_for_status()
            text = response.text
            match = re.search(r'^\s*' + re.escape(jsonp_callback) + r'\((.*)\)\s*$', text, re.DOTALL)
            if not match:
                logger.error(f"[{exchange}][ERROR] 第{page_num}页响应非预期JSONP格式: {text[:200]}")
                break
            json_str = match.group(1)
            data = json.loads(json_str)
            announcements = data.get('pageHelp', {}).get('data', [])
            if not announcements:
                logger.info(f"[{exchange}] 第{page_num}页无数据，抓取结束。")
                break
            for ann in announcements:
                # *** 使用SSEDATE作为发布时间进行比较和存储 ***
                pub_time = ann.get('SSEDATE', '') 
                if not pub_time: 
                    logger.error(f"[{exchange}][SKIP] 公告 {ann.get('TITLE')} 缺少SSEDATE，跳过")
                    continue

                # pub_date 就是 pub_time，因为SSEDATE已经是日期格式
                pub_date = pub_time 
                
                # 停止条件1: 公告日期不是今天
                if pub_date != today:
                    logger.info(f"[{exchange}] 遇到非今日公告 (发布于 {pub_date})，终止分页。")
                    stop_flag = True
                    break
                
                # 核心增量逻辑调整：
                # 只有当公告的发布时间严格早于数据库中当天的最新记录时，才停止分页。
                # 注意：如果latest_db_time也是纯日期，这里的比较是日期层面的。
                if latest_db_time and pub_time < latest_db_time:
                    logger.info(f"[{exchange}][OLD_RECORD_STOP] 公告日期 ({pub_time}) 早于数据库最新 ({latest_db_time})，终止分页。")
                    stop_flag = True
                    break
                url = ann.get('URL', '')
                if not url:
                    logger.warning(f"[{exchange}][SKIP] 公告缺少URL: {ann.get('TITLE')}")
                    continue
                ann_id = hashlib.sha1(url.encode('utf-8')).hexdigest()
                title = ann.get('TITLE', 'N/A')
                view_link = f"https://www.sse.com.cn{url}"
                db_ann = {
                    'id': ann_id,
                    'title': title,
                    'publishTime': pub_time,
                    'pdf_view_link': view_link,
                    'pdf_download_link': view_link,
                    'secCode': ann.get('SECURITY_CODE', ''),
                    'secName': ann.get('SECURITY_NAME', ''),
                    'attachPath': url,
                    'attachFormat': ann.get('bulletin_Type', ''), 
                    'attachSize': 0, 
                    'apiChannelCode': ann.get('BULLETIN_TYPE', ''), 
                }
                if insert_announcement(conn, db_ann, exchange, logger, crawl_session_start_time):
                    total_insert += 1
                    logger.info(f"[{exchange}] 插入: {title} ({pub_time})")
                else:
                    if latest_db_time and pub_time <= latest_db_time:
                        logger.info(f"[{exchange}][SKIP/DUPLICATE] 公告已存在或时间不新: {title} ({pub_time})")
            if not stop_flag:
                page_num += 1
                time.sleep(random.uniform(1.5, 3)) 
        except requests.exceptions.RequestException as e:
            logger.error(f"[{exchange}][ERROR] 第{page_num}页请求异常: {e}")
            time.sleep(5) # 发生请求异常时，多等待一会儿
            continue # 换个代理继续尝试当前页
        except Exception as e:
            logger.error(f"[{exchange}][FATAL] 第{page_num}页处理异常: {e}")
            break # 其他严重错误，终止
            
    logger.info(f"[{exchange}][DONE] 今日共插入/更新公告: {total_insert}")

if __name__ == "__main__":
    import logging
    import os
    log_dir = 'logs'
    os.makedirs(log_dir, exist_ok=True)
    log_file = os.path.join(log_dir, f"crawler_{datetime.datetime.now().strftime('%Y-%m-%d')}.log")
    logging.basicConfig(
        level=logging.INFO,
        format='%(asctime)s %(levelname)s %(message)s',
        handlers=[
            logging.FileHandler(log_file, encoding='utf-8'),
            logging.StreamHandler()
        ]
    )
    logger = logging.getLogger('crawler')
    # 记录脚本开始时间
    script_start_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
    logger.info(f"[SYSTEM] 脚本开始执行，抓取会话开始时间: {script_start_time}")
    conn = init_db()
    try:
        crawl_szse_announcements(conn, logger, script_start_time)
        logger.info(f"深交所公告处理完毕。")
        crawl_sse_announcements(conn, logger, script_start_time)
        logger.info(f"上交所公告处理完毕。")
        total_inserted_bse = crawl_bse_announcements(conn, logger, script_start_time)
        logger.info(f"北交所公告处理完毕，共插入 {total_inserted_bse} 条。")
    except Exception as e:
        logger.error(f"[SYSTEM] 脚本执行出错: {e}", exc_info=True)
    finally:
        conn.close()
        logger.info(f"[SYSTEM] 脚本执行完成，数据库连接已关闭。")
