# 定位到 2023新片精品
# 获取到精品栏中所有电影的子页面的链接
# 获取电影名以及下载链接
import re
import requests
import csv

# 发送请求
domain = "http://dytt.dytt8.net/index.htm"

head = {
    "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36"
}

resp = requests.get(domain, verify=False)  # verify = False  去掉安全验证
resp.encoding = 'gb2312'  # 设置与网页同样的编码
# 获取内容
content = resp.text

# 正则表达式匹配
obj1 = re.compile(r'2023新片精品.*?<ul>(?P<ul>.*?)</ul>', re.S)
obj2 = re.compile(r"<a href='(?P<href>.*?)'>", re.S)
child_obj = re.compile(r'◎片　　名(?P<name>.*?)<br />.*?'
                           r'target="_blank" href="(?P<link>.*?)">', re.S)

# 获取精品栏中所有电影的源文件
reslut1 = obj1.finditer(content)
child_href_list = []

# 遍历精品栏中信息循环
for it in reslut1:
    ul = it.group("ul")

    # 获取每个电影的href 链接
    # 遍历每个电影链接循环
    reslut2 = obj2.finditer(ul)
    for itt in reslut2:
        if '/app' not in itt.group("href"):
            child_href = domain[0:-10] + itt.group("href")
            child_href_list.append(child_href)  # 将子页面链接保存到列表

# 创建文件
f = open('movie_download.csv', mode='w', encoding='utf-8')
csv_writer = csv.writer(f)

# 提取子页面中电影名与下载链接
for url in child_href_list:
    # 发送子页面请求
    child_resp = requests.get(url, verify=False, headers=head)
    child_resp.encoding = "gbk"
    child_content = child_resp.text

    # 设置正则 进行匹配
    child_result = child_obj.finditer(child_content)

    # 获取子页面内容存入文件
    for child_it in child_result:
        # 将数据写入csv 文件
        dic = child_it.groupdict()
        csv_writer.writerow(dic.values())

f.close()
resp.close()
print("over")

