# long 爬虫
# {2021/9/7}
# 本章目的：
import requests
import re

domain = "https://www.dytt8.net/"
resp = requests.get(domain) #verify=False 去掉安全验证
resp.encoding = 'gb2312'

obj1 = re.compile(r"2021新片精品.*?<table width=\"100%\" border=\"0\" cellspacing=\"0\" cellpadding=\"0\">(?P<url>.*?)</table>",re.S)
obj2 = re.compile(r"最新电影下载</a>]<a href='(?P<href>.*?)'>",re.S)
obj3 = re.compile(r"◎译　　名　(?P<yname>.*?)<br />◎片　　名　(?P<name>.*?)<br />.*?<br /><a"
                  r" target=\"_blank\" href=\"(?P<download>.*?)\">",re.S)

result1 = obj1.finditer(resp.text)

child_href_list = []
for it in result1:
    url = it.group("url")
    # 提取子页面url
    result1 = obj2.finditer(resp.text)
    for itt in result1:
        # 拼接子页面url地址: 域名 + 子页面
        child_href = domain+itt.group("href").strip("/")
        child_href_list.append(child_href) #把子页面链接保存起来

#提取子页面内容
for href in child_href_list:
   print(href)
   child_resp = requests.get(href)
   child_resp.encoding = 'gb2312'
   # print(child_resp.text)
   result3 = obj3.search(child_resp.text)
   result3 = obj3.search(child_resp.text)
   print('list地址：', href)
   print('译名：', result3.group("yname"))
   print('片名：',result3.group("name"))
   print(result3.group("download"))

   # break 测试断点