import requests
from bs4 import BeautifulSoup
import jieba
from wordcloud import WordCloud
import matplotlib.pyplot as plt

# 1. 爬取数据
def fetch_comments(url, headers):
    response = requests.get(url, headers=headers)
    if response.status_code == 200:
        soup = BeautifulSoup(response.text, 'html.parser')
        comments = soup.find_all('span', class_='short')
        return [comment.get_text() for comment in comments]
    else:
        print(f"Failed to fetch data from {url}")
        return []

# 示例：爬取豆瓣电影《肖申克的救赎》短评第一页
url = 'https://movie.douban.com/subject/1292052/comments?status=P'
headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.75 Safari/537.36'
}
comments = fetch_comments(url, headers)

# 2. 数据预处理
def preprocess_text(texts):
    # 分词
    tokenized_texts = [' '.join(jieba.cut(text)) for text in texts]
    return tokenized_texts

# 示例：对爬取的评论进行预处理
processed_comments = preprocess_text(comments)

# 3. 生成词云
# 将所有评论拼接为一个字符串
all_comments_text = ' '.join(processed_comments)

# 生成词云
wordcloud = WordCloud(
    width=800, 
    height=400, 
    background_color='white', 
    font_path='simhei.ttf'  # 指定中文字体文件路径
).generate(all_comments_text)

# 显示词云
plt.figure(figsize=(10, 5))
plt.imshow(wordcloud, interpolation='bilinear')
plt.axis('off')
plt.title("Word Cloud of Processed Comments")
plt.show()
