import requests
from bs4 import BeautifulSoup
import pandas as pd
import time
import random

# 增强请求头伪装
headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
    'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
    'Accept-Language': 'zh-CN,zh;q=0.8,en;q=0.6',
    'Connection': 'keep-alive',
}

# 免费代理IP池（需定期更新）
proxies_list = []  # 实际使用时添加可用代理

# 存储所有电影数据的列表
all_movies = []

# 爬取豆瓣电影TOP250，共10页
for start in range(0, 250, 25):
    url = f'https://movie.douban.com/top250?start={start}'

    # 随机选择代理（如果有）
    proxies = random.choice(proxies_list) if proxies_list else None

    try:
        # 发送GET请求
        response = requests.get(url, headers=headers, timeout=15, proxies=proxies)
        response.raise_for_status()  # 检查请求是否成功

        # 调试：保存页面内容
        with open(f'page_{start}.html', 'w', encoding='utf-8') as f:
            f.write(response.text)

        # 解析HTML内容
        soup = BeautifulSoup(response.text, 'html.parser')
        movie_list = soup.find('ol', class_='grid_view').find_all('li')

        # 提取每部电影的信息
        for movie in movie_list:
            movie_info = {}

            # 电影名称
            title_span = movie.find('span', class_='title')
            title = title_span.text if title_span else "未知名称"
            movie_info['title'] = title

            # 电影链接
            link_tag = movie.find('a')
            link = link_tag['href'] if link_tag and 'href' in link_tag.attrs else "#"
            movie_info['link'] = link

            # 评分
            rating_span = movie.find('span', class_='rating_num')
            rating = float(rating_span.text) if rating_span else 0.0
            movie_info['rating'] = rating

            # 评价人数 - 改进后的解析逻辑
            try:
                star_div = movie.find('div', class_='star')
                if star_div:
                    votes_span = star_div.find_all('span')[-1]
                    votes_text = votes_span.text.strip()
                    votes = int(votes_text.replace('人评价', ''))
                else:
                    votes = 0
            except Exception as e:
                print(f"解析电影 '{title}' 的评价人数失败: {e}")
                votes = 0
            movie_info['votes'] = votes

            # 简介
            quote_p = movie.find('p', class_='quote')
            intro = quote_p.find('span', class_='inq').text if quote_p and quote_p.find('span',
                                                                                        class_='inq') else "暂无简介"
            movie_info['intro'] = intro

            # 导演和演员
            info_p = movie.find('p', class_='')
            info = info_p.text.strip() if info_p else "未知信息"
            movie_info['info'] = info

            all_movies.append(movie_info)

        # 增加随机延时
        time.sleep(random.uniform(3, 8))
        print(f"已成功爬取第{start // 25 + 1}页数据")

    except requests.exceptions.HTTPError as http_err:
        print(f"爬取第{start // 25 + 1}页时HTTP错误: {http_err}")
        time.sleep(10)  # 出错后延长等待时间
    except requests.exceptions.RequestException as req_err:
        print(f"爬取第{start // 25 + 1}页时请求异常: {req_err}")
        time.sleep(10)
    except AttributeError as attr_err:
        print(f"爬取第{start // 25 + 1}页时解析错误（可能页面结构变化）: {attr_err}")
        break  # 页面结构变化时停止爬取
    except Exception as e:
        print(f"爬取第{start // 25 + 1}页时未知错误: {e}")
        time.sleep(10)

# 将数据转换为DataFrame
df = pd.DataFrame(all_movies)

# 保存数据到CSV文件
df.to_csv('douban_movies_top250.csv', index=False, encoding='utf-8-sig')
print(f"数据爬取完成，共{len(df)}部电影")


# 数据清洗与预处理
def extract_director_and_actors(info_text):
    """从info字段中提取导演和演员信息"""
    parts = info_text.split('\n') if info_text else []
    parts = [p.strip() for p in parts if p.strip()]

    director = "未知"
    actors = "未知"

    for part in parts:
        if "导演" in part:
            director = part.replace("导演: ", "").strip()
            break

    for part in parts:
        if "主演" in part:
            actors = part.replace("主演: ", "").strip()
            break

    return director, actors


def extract_year_and_country(title):
    """从标题中提取年份和国家/地区信息"""
    year = "未知"
    country = "未知"

    if "(" in title and ")" in title:
        year_part = title.split("(")[-1].split(")")[0]
        if year_part.isdigit():
            year = year_part

    if "美国" in title or "USA" in title or "US" in title:
        country = "美国"
    elif "中国" in title or "大陆" in title or "香港" in title or "台湾" in title:
        country = "中国"
    elif "日本" in title:
        country = "日本"
    elif "韩国" in title:
        country = "韩国"
    elif "英国" in title or "UK" in title:
        country = "英国"
    elif "法国" in title:
        country = "法国"
    elif "德国" in title:
        country = "德国"
    elif "印度" in title:
        country = "印度"
    else:
        country = "其他"

    return year, country


# 应用函数提取导演和演员信息
df['director'], df['actors'] = zip(*df['info'].apply(extract_director_and_actors))

# 提取电影年份和国家/地区信息
df['year'], df['country'] = zip(*df['title'].apply(extract_year_and_country))

# 将年份转换为数值类型
df['year'] = pd.to_numeric(df['year'], errors='coerce')

# 保存清洗后的数据
df.to_csv('douban_movies_cleaned.csv', index=False, encoding='utf-8-sig')
print("数据清洗完成")

# 数据可视化分析
import matplotlib.pyplot as plt
import seaborn as sns

# 设置图片清晰度
plt.rcParams['figure.dpi'] = 300

# 设置中文字体
plt.rcParams['font.sans-serif'] = ['SimHei']  # 用来正常显示中文标签
plt.rcParams['axes.unicode_minus'] = False  # 用来正常显示负号

# 1. 评分分布直方图
plt.figure(figsize=(10, 6))
sns.histplot(df['rating'], bins=10, kde=True, color='skyblue')
plt.title('豆瓣电影TOP250评分分布')
plt.xlabel('评分')
plt.ylabel('电影数量')
plt.grid(True, alpha=0.3)
plt.savefig('rating_distribution.png')
plt.show()

# 2. 不同国家/地区电影数量对比
country_counts = df['country'].value_counts()
plt.figure(figsize=(12, 6))
sns.barplot(x=country_counts.index, y=country_counts.values, palette='viridis')
plt.title('不同国家/地区的电影数量')
plt.xlabel('国家/地区')
plt.ylabel('电影数量')
plt.xticks(rotation=45)
plt.grid(True, axis='y', alpha=0.3)
plt.savefig('country_movie_counts.png')
plt.show()

# 3. 评分与评价人数的关系
plt.figure(figsize=(10, 6))
sns.scatterplot(x='rating', y='votes', data=df, color='red', alpha=0.7)
plt.title('电影评分与评价人数的关系')
plt.xlabel('评分')
plt.ylabel('评价人数')
plt.grid(True, alpha=0.3)
plt.savefig('rating_votes_relationship.png')
plt.show()

# 4. 各年代电影数量统计
# 按年份分组并统计数量
df['decade'] = (df['year'] // 10) * 10
decade_counts = df[df['year'].notna()]['decade'].value_counts().sort_index()

plt.figure(figsize=(12, 6))
sns.barplot(x=decade_counts.index.astype(int), y=decade_counts.values, palette='plasma')
plt.title('各年代电影数量统计')
plt.xlabel('年代')
plt.ylabel('电影数量')
plt.grid(True, axis='y', alpha=0.3)
plt.savefig('decade_movie_counts.png')
plt.show()

# 5. 高评分电影词云展示
from wordcloud import WordCloud

# 提取评分8.5以上的电影简介
high_rating_intro = ' '.join(df[df['rating'] >= 8.5]['intro'].dropna())

# 生成词云
try:
    wordcloud = WordCloud(
        font_path='simhei.ttf',  # 确保能显示中文
        width=800,
        height=600,
        background_color='white',
        max_words=100
    ).generate(high_rating_intro)

    # 显示词云图
    plt.figure(figsize=(10, 8))
    plt.imshow(wordcloud, interpolation='bilinear')
    plt.axis('off')
    plt.title('高评分电影简介词云')
    plt.savefig('high_rating_wordcloud.png', bbox_inches='tight')
    plt.show()
except Exception as e:
    print(f"生成词云失败: {e}")
    print("提示: 可能需要安装中文字体或指定正确的字体路径")