#coding: utf-8
import requests
from bs4 import BeautifulSoup
from urllib.parse import urljoin

# 用于记录已经访问过的链接，避免循环
visited_urls = set()

def get_sina_news():
    url = 'https://news.sina.com.cn/'
    try:
        response = requests.get(url)
        response.encoding = response.apparent_encoding
        if response.status_code == 200:
            soup = BeautifulSoup(response.text, 'html.parser')
            news_list = []
            for a in soup.find_all('a'):
                link = a.get('href')
                if link and (link.startswith('https://news.sina.com.cn') or link.startswith('http://news.sina.com.cn')):
                    title = a.get_text().encode('UTF-8').decode('UTF-8').strip()
                    if title:
                        news_list.append({
                            'title': title,
                            'link': link
                        })
            return news_list
        else:
            print(f"请求失败，状态码: {response.status_code}")
    except requests.RequestException as e:
        print(f"请求发生错误: {e}")
    return []

def get_page_content(url):
    if url in visited_urls:
        return None
    visited_urls.add(url)
    try:
        response = requests.get(url)
        response.encoding = response.apparent_encoding
        if response.status_code == 200:
            soup = BeautifulSoup(response.text, 'html.parser')
            # 提取网页的文本内容，这里简单提取所有文本
            content = soup.get_text().strip()
            return content
        else:
            print(f"请求 {url} 失败，状态码: {response.status_code}")
    except requests.RequestException as e:
        print(f"请求 {url} 发生错误: {e}")
    return None

def crawl_news():
    news = get_sina_news()
    for item in news:
        link = item['link']
        print(f"标题: {item['title']}")
        print(f"链接: {link}")
        content = get_page_content(link)
        if content:
            print("网页内容:")
            print(content[:200])  # 只打印前 200 个字符
            # 查找当前页面中的其他新闻链接并继续爬取
            soup = BeautifulSoup(content, 'html.parser')
            for a in soup.find_all('a'):
                sub_link = a.get('href')
                if sub_link:
                    sub_link = urljoin(link, sub_link)
                    if sub_link.startswith('https://news.sina.com.cn') or sub_link.startswith('http://news.sina.com.cn'):
                        sub_content = get_page_content(sub_link)
                        if sub_content:
                            print(f"子链接: {sub_link}")
                            print("子链接网页内容:")
                            print(sub_content[:200])
        print("-" * 50)

if __name__ == "__main__":
    crawl_news()