import re
import requests

# 域名
# url = "https://dy2018.com/"
url = "https://www.dy2018.com/"

headers = {

    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
                  "Chrome/119.0.0.0 Safari/537.36"
}
with open("../movie2023.csv", mode="w", encoding="utf-8") as f:
    resp = requests.get(url, headers=headers)
    resp.encoding = "gbk"
    # print(resp.text)

    # 1.爬取2023必看热片部分的html代码
    # 注意用re.S使.能够匹配换行符
    obj1 = re.compile(r"2023必看热片.*?<ul>(?P<html>.*?)</ul>", re.S)
    res1 = obj1.search(resp.text)  # 页面上只有一个"2023必看热片.*?<ul>(?P<html>.*?)</ul>,用search"
    html = res1.group('html')
    # print(html)

    # 2.爬取ul中的每一个li中的a的href值（子页面的路径）
    obj2 = re.compile(r"<li><a href='(?P<href>.*?)' title=")
    res2 = obj2.finditer(html)  # 页面上有很多个li需要提取,用finditer"

    # 3.爬取子页面中的片名和下载地址
    obj3 = re.compile(r'<div id="Zoom">.*?◎片　　名　(?P<movie>.*?)<br />.*?'
                      r'<td style="WORD-WRAP: break-word" bgcolor="#fdfddf"><a href="'
                      r'(?P<download>.*?)">', re.S)
    for item in res2:
        # print(item.group("href"))
        # 输出/i/109087.html
        # 拼接域名和子页面，注意域名的后面和子页面的前面都有一个”/“，去掉域名后面的”/“
        child_url = url.strip("/") + item.group("href")
        child_resp = requests.get(child_url)
        child_resp.encoding = "gbk"
        # 先输出一个看对不对
        # print(child_resp.text)
        res3 = obj3.search(child_resp.text)
        # print(res3.group("movie"))
        # print(res3.group("download"))
        movie = (res3.group("movie"))
        download = (res3.group("download"))
        print(movie , download)  # 一个一个输出很有成就感
        # f.write(f"{movie},{download}\n")
        # break  # 输出一个就break

print("电影天堂2023必看热片数据提取到movie2023.csv成功")
