import requests
import logging
import re
from urllib.parse import urljoin

# 定义日志
logging.basicConfig(level=logging.INFO,
                    format='%(asctime)s - %(levelname)s: %(message)s')

# 爬取的链接
BASE_URL = 'https://ssr1.scrape.center'
# 总的页数
TOTAL_PAGE = 10

"""  
遍历页码构造 10 页的索引页 URL。
从每个索引页分析提取出每个电影的详情页 URL。
"""


def request_scrape(url):
    """
    根据url 返回html源码
    :param url:
    :return:
    """
    logging.info('开始请求获取html源码,url: %s', url)
    try:
        headers = {
            'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36'
        }
        response = requests.get(url, headers=headers)
        if response.status_code == 200:
            html = response.text
            return html
        logging.error('请求状态码错误, code: %s', response.status_code)
    except requests.RequestException as e:
        # 错误信息日志-打印堆栈信息 exc_info = True
        logging.error('请求报错，url: %s', url, exc_info=True)


def scrape_index(page):
    """
     传入page 构建url
    :param page:
    :return:
    """
    url = urljoin(BASE_URL, f'/page/{page}')
    return url


def parse_index_get_detailurl(html):
    """
    解析html获取每一个列表页，详情页url
    :param html:
    :return:
    """
    # 需要正则捕获的超链接 <a data-v-7f856186="" href="/detail/1" class="name">
    pattern = re.compile('<a.*?href="(.*?)".*?class="name">')
    items = re.findall(pattern, html)
    if not items:
        return []
    # 循环遍历拼接url并返回，通过 yield 返回生成器(里面所有的url列表)
    for item in items:
        url = urljoin(BASE_URL, item)
        yield url


def run():
    # 循环遍历每一页获取
    for page in range(1, TOTAL_PAGE + 1):
        # 构建列表页-url
        url = scrape_index(page)
        # 获取列表页html源码
        html = request_scrape(url)
        # 获取某一页列表，所有详情页url
        datail_urls = parse_index_get_detailurl(html)
        logging.info("详情页url %s",list(datail_urls))




if __name__ == '__main__':
    # 测试执行
    # url = scrape_index(1)
    # html = request_scrape(url)
    # datail_urls = parse_index_get_detailurl(html)
    # print(list(datail_urls))
    run()
