"""
1、先定位到主页面元素，拿到想要的子页面链接地址。
2、然后再请求子页面，获取需要的内容。
"""
import csv
import os

import regex
import requests

from utils import os_utils

url = 'https://www.dytt8899.com'
with requests.get(url, verify=False) as response:  # 禁用SSL证书验证
    response.encoding = 'gb2312'
    # print(response.text)

    regex_compile = regex.compile(r'2025必看热片.*?<ul>(?P<ul>.*?)</ul>', regex.S)
    regex_compile_2 = regex.compile(r"<li><a href='(?P<href>.*?)'")
    regex_compile_3 = regex.compile(
        r'片　　名　(?P<name>.*?)<br />.*?<td style="WORD-WRAP: break-word" bgcolor="#fdfddf"><a href="(?P<download>.*?)"',
        regex.S)
    os_utils.remove( '电影天堂.csv')
    with open('电影天堂.csv', 'a', encoding='utf-8') as f:
        result = regex_compile.finditer(response.text)
        for item in result:
            item_group = item.group('ul')
            # print(item_group)
            result_2 = regex_compile_2.finditer(item_group)
            for item_2 in result_2:
                href = item_2.group('href')
                # print(href)
                url_child = url + href
                # print(url_child)
                response_child = requests.get(url_child, verify=False)
                response_child.encoding = 'gb2312'
                result_3 = regex_compile_3.search(response_child.text)
                # print(result_3.group('name'))
                # print(result_3.group('download'))
                writer = csv.writer(f)
                writer.writerow(result_3.groupdict().values())
                # writer.writerow([result_3.group('name'), f"{url}/{result_3.group('download')}"])
