import os
import time

import pandas as pd
import requests
from bs4 import BeautifulSoup


def fetch_announcements(page_num, headers, params):
    """
    从指定页面获取公告信息。
    :param page_num: 页面编号
    :param headers: 请求头
    :param params: 请求参数
    :return: 公告列表
    """
    url = f'https://www.wxtrust.com/index.php/message/message_ajax/24/{page_num}'
    response = requests.get(url, params=params, headers=headers)
    response.encoding = 'utf-8'

    soup = BeautifulSoup(response.text, 'html.parser')
    announcements = []

    for item in soup.select('.protects-item-wrap .protects-item'):
        title = item.select_one('.protects-h5').get_text(strip=True)
        time = item.select_one('.protects-time0').get_text(strip=True)
        link = item.attrs['href']  # 假设链接在a标签内
        announcements.append({
            'title': title,
            'time': time,
            'link': link
        })

    return announcements


def write_to_csv(announcements, filename='announcements.csv'):
    """
    将公告信息写入CSV文件。
    :param announcements: 要写入的公告信息列表
    :param filename: CSV文件名
    """
    df = pd.DataFrame(announcements)
    if not os.path.isfile(filename):
        df.to_csv(filename, index=False, mode='w', encoding='utf-8-sig')  # 创建新文件并写入表头
    else:
        df.to_csv(filename, index=False, mode='a', header=False, encoding='utf-8-sig')  # 追加内容


def main():
    headers = {
        'Accept': 'text/html, */*; q=0.01',
        'Accept-Language': 'zh-CN,zh;q=0.9',
        'Cache-Control': 'no-cache',
        'Connection': 'keep-alive',
        # 'Cookie': 'Hm_lvt_c3f009f814f701e8fad8a17f9682ec79=1740045271; HMACCOUNT=A93D1D4ED57DF201; mIsWzaTool=true; mColorType=0; mIsSound=false; mZoomV=16; _scfc=ce9eaedd8b90e1da366f62d1448dc7d8; boc_session_site=dky7y6rNiYsOIc7t27s5VYTkj%2FEz2reDqmPWqz7uzAw1WR1YO%2BgikD2zQEr8ixkx5ukkWmkJ1eep5ugO4HBjF2JOa2QFBNN1gSvlpONClWR28fK%2BgQzYiPZiVfqUYhFmKyD4PNHzQj0W0N1dDccVNdbk3Z4KIHUTa81sPU4iV%2FzoV3AVC7ydR2genkgvqUAlOBL2gJnn5NpLX6F5XBZUVA%2BlN3UmQXYc8BUVvh2i6u1LL%2Fa0HcKWn1hdIBAEGp%2BBFrFwYK3y4iCCLxSv4ID7xL5VR8O%2FL5Y%2BjZOUB3Th4eyUfFxKTHBWc3yFFl0QGLfzkEbXlhV5aoDBvkjEX3v9OUbFl5nbBZRmjytxPaGGLQvoW5k1DqKsMNOT9JGSt3s9CCDGdchZarntQPVrXAyYV6%2FGaHu7nCs7UwCxki6zVlkpCIhkdPdWmXesrXP5Vx8RN2YcOeLCmBZcg3UvbOpkug%3D%3D4e42a83f48de604e65ad603fe6d421245c64d908; Hm_lpvt_c3f009f814f701e8fad8a17f9682ec79=1740053378',
        'Pragma': 'no-cache',
        'Referer': 'https://www.wxtrust.com/index.php/message/index/21',
        'Sec-Fetch-Dest': 'empty',
        'Sec-Fetch-Mode': 'cors',
        'Sec-Fetch-Site': 'same-origin',
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.6261.95 Safari/537.36',
        'X-Requested-With': 'XMLHttpRequest',
        'sec-ch-ua': '"Chromium";v="122", "Not(A:Brand";v="24", "Google Chrome";v="122"',
        'sec-ch-ua-mobile': '?0',
        'sec-ch-ua-platform': '"Windows"',
    }
    params = {'id': '0', 'secondtype': '24'}
    for i in range(1, 428):
        print(f"正在处理第 {i} 页...")
        try:
            announcements = fetch_announcements(i, headers, params)
        except Exception as e:
            print(F'链接https://www.wxtrust.com/index.php/message/message_ajax/24/{i}请求失败，原因：{e}')
        # 写入提取结果到CSV文件
        write_to_csv(announcements)
        # 打印提取结果（可选）
        for announcement in announcements:
            print(f"标题: {announcement['title']}")
            print(f"时间: {announcement['time']}")
            print(f"链接: {announcement['link']}")
            print("-" * 40)
        time.sleep(3)


if __name__ == '__main__':
    main()
