# 定位主页面
# 从主页面提取到子页面的链接
# 再从子页面的链接中拿到想要的下载地址

import requests
import re

domain = "https://www.dytt89.com/"

headers = {
    "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36"
}

resp = requests.get(domain, verify=False, headers=headers)
resp.encoding = 'gb2312'
# print(resp.text)

obj1 = re.compile(r"2024必看热片.*?<ul>(?P<ul>.*?)</ul>", re.S)
obj2 = re.compile(r"<a href='(?P<href>.*?)'", re.S)
obj3 = re.compile(r'<title>.*?《(?P<movieName>.*?)》.*?<td style="WORD-WRAP: break-word" bgcolor="#fdfddf"><a href="'
                  r'(?P<download>.*?)">', re.S)

child_href_list = []

result1 = obj1.finditer(resp.text)
for it in result1:
    ul = it.group('ul').strip()  # 去空白行
    # print(ul)
    # 提取子页面链接：
    result2 = obj2.finditer(ul)
    for itt in result2:
        #print(itt.group('href'))
        child_href = domain + itt.group('href').strip("/") # 去除前置 /
        child_href_list.append(child_href) # 把子页面链接保存到列表中

# 提取子页面内容
for href in child_href_list:
    child_resp = requests.get(href, verify=False, headers=headers)
    child_resp.encoding = 'gb2312'
    # print(child_resp.text)
    result3 = obj3.search(child_resp.text)
    print(result3.group("movieName"))
    print(result3.group("download"))
    #break

    child_resp.close()

resp.close()
