import requests
from bs4 import BeautifulSoup
import datetime
import time

def get_news_list(base_url):
    """获取新闻列表页数据"""
    headers = {
        'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
        'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.6261.95 Safari/537.36',
    }
    params = {
        'fid': '',
        'cid': '51894',
        'page': '1',
    }
    response = requests.get(base_url, params=params, headers=headers)
    return response.content.decode()

def parse_news_list(html):
    """解析新闻列表页"""
    soup = BeautifulSoup(html, 'html.parser')
    page_list = []
    items = soup.select('ul#listcontent li')
    for item in items:
        link = item.select_one('a').get('href')
        title = item.select_one('a').text
        page_list.append({
            "link": link,
            "title": title,
        })
    return page_list

def get_news_detail(detail_url):
    """获取新闻详情内容"""
    headers = {
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36"
    }
    try:
        response = requests.get(detail_url, headers=headers)
        response.encoding = 'utf-8'
        if response.status_code == 200:
            return response.text
        return None
    except Exception as e:
        print(f"详情页请求异常: {str(e)}")
        return None

def parse_news_detail(detail_html):
    soup = BeautifulSoup(detail_html, 'html.parser')
    source_tag = soup.select_one('a.source.ent-source')
    source = source_tag.text.strip() if source_tag else "网站未标明来源"

    date_tag = soup.select_one('span.date') or soup.select_one('div.news-date')
    date = date_tag.text.split()[0] if date_tag else "网站未标明日期"

    article_tag = soup.select_one('div#article_content') or soup.select_one('div#artibody')
    article = article_tag.text.strip() if article_tag else "无正文内容"

    return {  # 改为返回字典
        "date": date,
        "source": source,
        "article": article,
    }

def main():
    all_news = []  # 新增数据容器
    
    for page in range(1, 4):
        print(f"正在抓取第 {page} 页...")
        base_url = f'https://finance.sina.com.cn/roll/index.d.html?fid=&cid=51894&page={page}'
        page_html = get_news_list(base_url)
        if page_html:
            page_items = parse_news_list(page_html)
            for item in page_items:
                # 初始化基础数据
                news_data = {
                    "title": item['title'],
                    "link": item['link'],
                    "date": "无日期",
                    "source": "无来源",
                    "article": "无正文"
                }
                
                # 获取详情
                detail_html = get_news_detail(item['link'])
                if detail_html:
                    detail_data = parse_news_detail(detail_html)
                    if detail_data:
                        news_data.update(detail_data)
                
                all_news.append(news_data)
                time.sleep(1)

    # 写入文件
    with open('新浪财经新闻爬取.txt', 'w', encoding='utf-8') as f:
        for news in all_news:
            f.write("="*60 + "\n")
            f.write(f"标题: {news['title']}\n")
            f.write(f"链接: {news['link']}\n")
            f.write(f"日期: {news['date']}\n")
            f.write(f"来源: {news['source']}\n")
            f.write(f"内容:\n{news['article']}\n")
    
    print("新闻抓取完成，结果已写入 新浪财经新闻爬取.txt")

if __name__ == "__main__":
    main()