from bs4 import BeautifulSoup
import requests
import csv
import time
import random
import pymysql
from pymysql import OperationalError
import re  # 新增导入正则模块


def get_movie_links():
    """获取所有电影链接"""
    movie_links = []
    for start in range(0, 250, 25):
        url = f"https://movie.douban.com/top250?start={start}&filter="
        print(f"正在获取页面: {url}")
        headers = {
            'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/136.0.0.0 Safari/537.36 Edg/136.0.0.0'
        }

        res = requests.get(url=url, headers=headers)
        if res.status_code != 200:
            raise Exception(f"请求失败，状态码：{res.status_code}")
        else:
            print(f"成功获取页面，状态码: {res.status_code}")

        html = BeautifulSoup(res.text, 'html.parser')
        lis = html.select('#content > div > div.article > ol > li')
        print(f"从页面中提取到 {len(lis)} 个电影链接")

        for li in lis:
            link = li.select_one('div.item > div.pic > a')['href']
            movie_links.append(link)

        # 每爬取一个页面暂停2-8秒
        wait_time = random.uniform(2, 8)
        print(f"页面处理完成，等待 {wait_time:.2f} 秒后继续...")
        time.sleep(wait_time)

    print(f"共获取到 {len(movie_links)} 个电影链接")
    return movie_links



def get_movie_info(url):
    """获取单部电影的详细信息"""
    headers = {
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/136.0.0.0 Safari/537.36 Edg/136.0.0.0"
    }

    print(f"正在请求电影页面: {url}")
    response = requests.get(url, headers=headers)
    if response.status_code != 200:
        raise Exception(f"请求失败，状态码：{response.status_code}")
    else:
        print(f"成功获取电影页面，状态码: {response.status_code}")

    soup = BeautifulSoup(response.content, 'html.parser')

    # 初始化所有字段
    movie_name = "未找到电影名称"
    movie_type = "未找到电影类型"
    release_year = "未找到上映年份"
    rating = "未找到评分"
    rater_count = "未找到评分人数"
    director = "未找到导演"
    movie_duration = "未找到片长"
    actors = "未找到主演"  # 新增主演字段
    country = "未找到制片国家"  # 新增制片国家字段
    language = "未找到语言"  # 新增语言字段

    try:
        movie_name = soup.find('h1').text.strip().replace('\n', '').replace('  ', ' ')
    except AttributeError:
        print("警告: 未找到电影名称")

    try:
        movie_type = "/".join([genre.text for genre in soup.find_all('span', property="v:genre")])
    except AttributeError:
        print("警告: 未找到电影类型")

    try:
        release_year = soup.find('span', property="v:initialReleaseDate")['content'].split('-')[0]
    except (AttributeError, KeyError):
        print("警告: 未找到上映年份")

    try:
        rating = soup.find('strong', property="v:average").text
    except AttributeError:
        print("警告: 未找到评分")

    try:
        rater_count = soup.find('span', property="v:votes").text
    except AttributeError:
        print("警告: 未找到评分人数")

    try:
        directors = [a.text for a in soup.find_all('a', rel="v:directedBy")]
        director = "/".join(directors)
    except AttributeError:
        print("警告: 未找到导演")

    # 提取片长并只保留数字
    try:
        runtime_span = soup.find('span', property="v:runtime")
        if runtime_span:
            duration_text = runtime_span.text
            movie_duration = re.search(r'\d+', duration_text).group()
        else:
            # 备用方案：尝试从其他位置获取片长
            for span in soup.select('div#info span.pl'):
                if '片长' in span.text or '片長' in span.text:
                    duration_text = span.next_sibling.strip()
                    movie_duration = re.search(r'\d+', duration_text).group()
                    break
    except (AttributeError, TypeError):
        print("警告: 未找到片长")

    # 修复：提取主演
    try:
        actors_span = soup.find('span', string='主演')
        if actors_span:
            # 找到包含演员的容器
            actors_container = actors_span.find_next_sibling('span', class_='attrs')
            if actors_container:
                actors = "/".join([a.text for a in actors_container.find_all('a')])
    except Exception as e:
        print(f"警告: 提取主演时出错 - {str(e)}")

    # 修复：提取制片国家/地区
    try:
        country_span = soup.find('span', string=re.compile(r'制片国家/地区'))
        if country_span:
            country = country_span.next_sibling.strip()
        else:
            # 备用方案：尝试其他可能的文本
            country_span = soup.find('span', string=re.compile(r'制片国家|地区'))
            if country_span:
                country = country_span.next_sibling.strip()
    except Exception as e:
        print(f"警告: 提取制片国家时出错 - {str(e)}")

    # 修复：提取语言
    try:
        language_span = soup.find('span', string=re.compile(r'语言'))
        if language_span:
            language = language_span.next_sibling.strip()
    except Exception as e:
        print(f"警告: 提取语言时出错 - {str(e)}")

    actors = actors.split('/')[0].strip()
    country = country.split('/')[0].strip()
    language = language.split('/')[0].strip()


    print(f"成功提取电影信息: {movie_name}")
    return {
        "movie_name": movie_name,
        "movie_type": movie_type,
        "release_year": release_year,
        "rating": rating,
        "rater_count": rater_count,
        "director": director,
        "movie_duration": movie_duration,
        "actors": actors,
        "country": country,
        "language": language
    }


def save_to_csv(data, filename="movie_info.csv"):
    """将数据保存为CSV文件"""
    # 添加新字段到表头
    headers = ["movie_name", "movie_type", "release_year", "rating", "rater_count",
               "director", "movie_duration", "actors", "country", "language"]

    print(f"准备将 {len(data)} 条电影数据保存到 {filename}")
    with open(filename, 'w', newline='', encoding='utf-8') as csvfile:
        writer = csv.DictWriter(csvfile, fieldnames=headers)
        writer.writeheader()
        writer.writerows(data)

    print(f"数据已成功保存至 {filename}")



def init_db():
    """初始化数据库和表"""
    print("开始初始化数据库...")
    try:
        # 连接到MySQL服务器（不指定数据库）
        conn = pymysql.connect(
            host='localhost',
            user='root',
            password='123456',
            port=3306,
            charset='utf8mb4'
        )
        cursor = conn.cursor()

        # 删除已存在的数据库（如果有）
        cursor.execute("DROP DATABASE IF EXISTS douban_plat")
        # 创建新数据库
        cursor.execute("CREATE DATABASE douban_plat CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci")
        print("数据库 'douban_plat' 创建成功")

        # 切换到新创建的数据库
        cursor.execute("USE douban_plat")


        create_table_sql = """
        CREATE TABLE movies (
            id INT PRIMARY KEY AUTO_INCREMENT,
            movie_name VARCHAR(255) NOT NULL,
            movie_type VARCHAR(255),
            release_year VARCHAR(10),
            rating DECIMAL(3,1),
            rater_count INT,
            director VARCHAR(100),
            movie_duration INT,  
            actors VARCHAR(500),  
            country VARCHAR(100), 
            language VARCHAR(100) 
        ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;
        """
        cursor.execute(create_table_sql)
        print("表 'movies' 创建成功")

        conn.close()
        print("数据库初始化完成")

    except OperationalError as e:
        print(f"数据库连接错误: {e}")
        raise



def save_to_db(movie_data):
    """将电影数据保存到MySQL数据库"""
    try:
        # 连接到指定数据库
        conn = pymysql.connect(
            host='localhost',
            user='root',
            password='123456',
            port=3306,
            database='douban_plat',
            charset='utf8mb4'
        )
        cursor = conn.cursor()


        insert_sql = """
        INSERT INTO movies (movie_name, movie_type, release_year, rating, rater_count, 
                            director, movie_duration, actors, country, language)
        VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
        """

        for movie in movie_data:
            # 转换评分和评分人数为数值类型
            rating = float(movie['rating']) if movie['rating'].replace('.', '', 1).isdigit() else None
            rater_count = int(movie['rater_count'].replace(',', '')) if movie['rater_count'].replace(',', '',
                                                                                                     1).isdigit() else None

            # 转换片长为整数
            duration = int(movie['movie_duration']) if movie['movie_duration'].isdigit() else None

            # 执行插入
            cursor.execute(insert_sql, (
                movie['movie_name'],
                movie['movie_type'],
                movie['release_year'],
                rating,
                rater_count,
                movie['director'],
                duration,  # 使用转换后的数字
                movie['actors'],
                movie['country'],
                movie['language']
            ))

        conn.commit()
        print(f"成功将 {len(movie_data)} 条数据保存到数据库")

    except Exception as e:
        print(f"数据库保存错误: {e}")
        conn.rollback()
    finally:
        conn.close()


def main():
    print("开始爬取豆瓣电影Top250数据...")

    # 初始化数据库
    print("\n第一步: 初始化数据库")
    init_db()

    # 获取所有电影链接
    print("\n第二步: 获取所有电影链接")
    movie_links = get_movie_links()

    # 遍历链接，抓取每部电影的信息
    print("\n第三步: 抓取每部电影的详细信息")
    all_movies = []
    for i, link in enumerate(movie_links, start=1):
        print(f"\n正在处理第 {i}/{len(movie_links)} 部电影")
        try:
            movie_info = get_movie_info(link)
            all_movies.append(movie_info)
            wait_time = random.uniform(1, 3)
            print(f"电影信息提取完成，等待 {wait_time:.2f} 秒后继续...")
            time.sleep(wait_time)  # 随机延迟1-3秒，避免频繁请求
        except Exception as e:
            print(f"处理失败: {e}")
            print(f"跳过当前电影，继续处理下一部...")

    # 保存所有电影信息到CSV文件
    print("\n第四步: 保存数据到CSV文件")
    save_to_csv(all_movies)

    # 保存所有电影信息到数据库
    print("\n第五步: 保存数据到MySQL数据库")
    save_to_db(all_movies)

    print("\n数据爬取任务全部完成!")


if __name__ == "__main__":
    main()