import requests
from lxml import etree
import time
import pymysql

headers = {
    "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36",
    "cookie": 'bid=j_95V11oMkE; douban-fav-remind=1; ll="118183"; __utmc=30149280; __utmz=30149280.1733904538.2.2.utmcsr=cn.bing.com|utmccn=(referral)|utmcmd=referral|utmcct=/; __utmc=223695111; __utmz=223695111.1733904542.1.1.utmcsr=cn.bing.com|utmccn=(referral)|utmcmd=referral|utmcct=/; _pk_id.100001.4cf6=b5f2d88df7a0b91b.1733904542.; _vwo_uuid_v2=D6197BD9AFFD6A6026FEA7A509970F9D2|0cd57782d676e09b52b9ed8c38a3f1fb; _pk_ref.100001.4cf6=%5B%22%22%2C%22%22%2C1733966104%2C%22https%3A%2F%2Fcn.bing.com%2F%22%5D; _pk_ses.100001.4cf6=1; __utma=30149280.126929866.1724983487.1733904538.1733966104.3; __utmb=30149280.0.10.1733966104; __utma=223695111.1754835362.1733904542.1733904542.1733966104.2; __utmb=223695111.0.10.1733966104; ap_v=0,6.0; dbcl2="267574031:BcVxW5/MvRw"; ck=2duE; push_noty_num=0; push_doumail_num=0'
}

for page in range(1, 11):
    print(f"正在爬取第{page}页")
    start = 25 * (page - 1)
    # 发起请求
    response = requests.get(url=f"https://movie.douban.com/top250?start={start}&filter=", headers=headers)
    print(response.status_code)

    # 获取网站的源代码
    text = response.text

    # 解析源代码
    html = etree.HTML(text)
    # 查找到所有电影
    items = html.xpath("//div[@class='item']")

    # 1、创建数据库链接
    with pymysql.connect(host="master", port=3306, database="bigdata31", user="root", password="123456") as con:
        # 2、获取游标,用于执行sql
        cursor = con.cursor()

        # 循环解析每一个电影
        for item in items:
            # 获取电影详细信息网址
            href = item.xpath("div[@class='info']/div[1]/a/@href")[0]
            # https://movie.douban.com/subject/1292052/
            id = href.split("/")[-2]
            # 如果电影信息在数据库中已经存在了就不需要在爬取了
            num = cursor.execute("select id from movies where id=%s", id)
            if num == 1:
                print(f"{id}:在数据库中已存在")
                continue
            print(f"正在爬取:{id}")
            # 发起请求获取电影详细信息
            content = requests.get(url=href, headers=headers)

            content_text = content.text

            # 解析数据
            content_html = etree.HTML(content_text)
            # 电影名
            name = content_html.xpath("//span[@property='v:itemreviewed']/text()")[0]
            # 导演
            directer = content_html.xpath("//a[@rel='v:directedBy']/text()")[0]
            # 主演
            starring = "|".join(content_html.xpath("//a[@rel='v:starring']/text()"))
            # 类型
            genre = "|".join(content_html.xpath("//span[@property='v:genre']/text()"))
            info = content_html.xpath("//div[@id='info']/text()")
            # 去点数据的制表符和空格
            info = [item.strip().replace(" ", "").replace("/", "|") for item in info]
            # 清洗数据
            info = list(filter(lambda item: len(item) > 1, info))
            # 国家地区
            county = info[0]
            # 语言
            lan = info[1]
            # 上映时间
            rel_date = "|".join(content_html.xpath("//span[@property='v:initialReleaseDate']/text()"))
            # 片长
            runtime = "|".join(content_html.xpath("//span[@property='v:runtime']/text()"))
            # 评分
            score = "|".join(content_html.xpath("//strong[@property='v:average']/text()"))
            # 平均人数
            votes = "|".join(content_html.xpath("//span[@property='v:votes']/text()"))

            # 编写sql插入数据
            cursor.execute("""
            replace into movies values(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)
            """, [id,
                  name,
                  directer,
                  starring,
                  genre,
                  county,
                  lan,
                  rel_date,
                  runtime,
                  score,
                  votes])
            con.commit()
            # 等一会在获取下一个
            time.sleep(1)
