import sys
import requests
from bs4 import BeautifulSoup
from urllib.parse import urljoin
import networkx as nx
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.cluster import KMeans

# 初始化栈和已访问链接集合
stack = []
visited = set()
visited.add('javascript')
visited.add('None')
max_depth = 10
# 初始化图
G = nx.DiGraph()

def get_page_content(url):
    try:
        response = requests.get(url)
        response.encoding = response.apparent_encoding
        if response.status_code == 200:
            soup = BeautifulSoup(response.text, 'html.parser')
            content = soup.get_text().strip()
            return soup, content
        else:
            print(f"请求 {url} 失败，状态码: {response.status_code}")
    except requests.RequestException as e:
        print(f"请求 {url} 发生错误: {e}")
    return None, None

def cluster_texts(texts, n_clusters=5):
    # 使用TF-IDF向量化文本
    vectorizer = TfidfVectorizer(max_df=0.5, min_df=2, stop_words='english')
    X = vectorizer.fit_transform(texts)
    
    # 使用K-Means进行聚类
    kmeans = KMeans(n_clusters=n_clusters, random_state=42)
    kmeans.fit(X)
    
    return kmeans.labels_

def crawl_sina_news():
    start_url = 'https://news.sina.com.cn/w/2025-02-24/doc-inempsmt0339892.shtml'
    stack.append((start_url,0))
    texts = []  # 用于存储所有爬取的文本内容
    while stack:
        current_url ,current_depth = stack.pop()
        if current_url in visited:
            continue
        visited.add(current_url)
        soup, content = get_page_content(current_url)
        if soup:
            paragraphs = soup.find_all('p')
            for paragraph in paragraphs:
                if paragraph.get_text().strip():
                    texts.append(paragraph.get_text().strip())  # 将段落文本添加到列表中
                    #print('段落:'+paragraph.get_text().strip())
                    pass
            for a in soup.find_all('a'):
                link = a.get('href')
                if link in visited:
                    continue
                if link and (
                    link.startswith('https://news.sina.com.cn') or link.startswith('http://news.sina.com.cn')):
                    title = a.get_text().encode('UTF-8').decode('UTF-8').strip()
                    sys.stdout.reconfigure(encoding='utf-8')
                    print(f"标题: {title}, 链接: {link}")
                    full_link = urljoin(current_url, link)
                    if (full_link.startswith('https://news.sina.com.cn') or
                        full_link.startswith('http://news.sina.com.cn')) and full_link not in visited:
                        if current_depth < max_depth:
                            stack.append((full_link,current_depth+1))
                        # 添加边到图中
                        G.add_edge(current_url, full_link)
    
    # 对爬取的文本进行聚类
    labels = cluster_texts(texts)
    for i, label in enumerate(labels):
        print(f"文本 {i} 属于聚类 {label}")

if __name__ == "__main__":
    crawl_sina_news()
    # 绘制图
    plt.figure(figsize=(12, 8))
    pos = nx.spring_layout(G, k=0.5)  # 选择布局算法
    nx.draw(G, pos, with_labels=True, node_size=2000, node_color='skyblue', font_size=8, font_weight='bold', arrows=True)
    plt.title("Sina News Crawl Graph")
    plt.show()
    ss = input("任意键结束")
