import requests
from bs4 import BeautifulSoup
import time
import random
from urllib.parse import urljoin
import json


class SinaFinanceSpider:
    def __init__(self):
        self.base_url = "https://finance.sina.com.cn/roll/index.d.html?cid=56689&page={}"
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36',
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
            'Accept-Language': 'zh-CN,zh;q=0.9',
            'Referer': 'https://finance.sina.com.cn/'
        }
        self.session = requests.Session()
        self.session.headers.update(self.headers)
        self.article_links = []
        self.max_pages = 1  # 设置最大爬取页数，可根据需要调整

    def get_page_links(self, page_num):
        """获取单页新闻链接"""
        url = self.base_url.format(page_num)
        try:
            response = self.session.get(url)
            response.encoding = 'utf-8'
            if response.status_code == 200:
                soup = BeautifulSoup(response.text, 'html.parser')
                # 找到新闻列表区域
                news_list = soup.find('ul', {'id': 'listcontent'})
                if news_list:
                    for item in news_list.find_all('li'):
                        link = item.find('a')
                        if link and link.get('href'):
                            full_url = urljoin(url, link['href'])
                            if full_url not in self.article_links:
                                self.article_links.append(full_url)
                                print(f"发现新链接: {full_url}")
                # 检查是否有下一页
                next_page = soup.find('span', class_='pagebox_next')
                if next_page and next_page.find('a'):
                    return True  # 还有下一页
            return False  # 没有下一页或获取失败
        except Exception as e:
            print(f"获取第{page_num}页链接时出错: {e}")
            return False

    def crawl_all_links(self):
        """爬取所有页面的新闻链接"""
        current_page = 1
        has_next = True

        while has_next and current_page <= self.max_pages:
            print(f"正在爬取第 {current_page} 页...")
            has_next = self.get_page_links(current_page)
            current_page += 1
            # 随机延迟，避免触发反爬
            time.sleep(random.uniform(1, 3))

        print(f"共收集到 {len(self.article_links)} 篇新闻链接")
        # 保存链接到文件
        with open('sina_finance_links.json', 'w', encoding='utf-8') as f:
            json.dump(self.article_links, f, ensure_ascii=False, indent=2)

    def crawl_article_content(self, url):
        """爬取单篇新闻内容"""
        try:
            response = self.session.get(url)
            response.encoding = 'utf-8'
            if response.status_code == 200:
                soup = BeautifulSoup(response.text, 'html.parser')

                # 提取标题
                title = soup.find('h1')
                title = title.text.strip() if title else '无标题'

                # 提取发布时间
                date = soup.find('span', class_='date')
                date = date.text.strip() if date else '无日期'

                # 提取正文内容
                content_div = soup.find('div', class_='article')
                if not content_div:
                    content_div = soup.find('div', class_='content')

                content = ''
                if content_div:
                    # 移除不需要的标签
                    for elem in content_div(['script', 'style', 'iframe', 'img', 'a']):
                        elem.decompose()
                    content = '\n'.join(p.text.strip() for p in content_div.find_all('p') if p.text.strip())

                return {
                    'title': title,
                    'date': date,
                    'content': content,
                    'url': url
                }
            return None
        except Exception as e:
            print(f"爬取文章 {url} 时出错: {e}")
            return None

    def crawl_all_articles(self):
        """爬取所有已收集的文章"""
        articles = []
        # 从文件加载链接
        try:
            with open('sina_finance_links.json', 'r', encoding='utf-8') as f:
                self.article_links = json.load(f)
        except FileNotFoundError:
            print("未找到链接文件，请先运行 crawl_all_links()")
            return

        print(f"开始爬取 {len(self.article_links)} 篇文章...")

        for i, url in enumerate(self.article_links, 1):
            print(f"正在处理第 {i} 篇: {url}")
            article = self.crawl_article_content(url)
            if article:
                articles.append(article)
            # 随机延迟
            time.sleep(random.uniform(0.5, 2))

        # 保存所有文章数据
        with open('sina_finance_articles.json', 'w', encoding='utf-8') as f:
            json.dump(articles, f, ensure_ascii=False, indent=2)

        print(f"已完成 {len(articles)} 篇文章的爬取")


# 使用示例
if __name__ == '__main__':
    spider = SinaFinanceSpider()

    # 第一步：收集所有新闻链接
    spider.crawl_all_links()

    # 第二步：爬取所有新闻内容
    spider.crawl_all_articles()