#!/usr/bin/python
# -*- coding: UTF-8 -*-
'''
爬虫练习网站：https://scrape.center
'''
import requests
from bs4 import BeautifulSoup


def get_img_url(url_main, page_num):
    for i, page in enumerate(range(1, page_num + 1)):
        print(f"第{i + 1}页")
        url = url_main + "/page/" + str(page)
        rep = requests.get(url)
        rep.encoding = rep.apparent_encoding
        soup = BeautifulSoup(rep.text, "html.parser")
        movies = soup.select("div[class='el-card__body']")  # 选择所有电影
        for movie in movies:
            name = movie.select_one("h2[class='m-b-sm']").text.strip()
            # print(name)
            l = list(movie.select("img[class='cover']"))  # 选择所有封面图片
            for img in l:
                # print(i+1, img.get("src"))
                img_url = img.get("src")
                img_response = requests.get(img_url)
                with open("img/" + f"{i + 1}_" + name + ".jpg", "wb") as f:
                    f.write(img_response.content)


get_img_url("https://ssr1.scrape.center", 12)
