#!/usr/bin/python3
# -*- coding: utf-8 -*-

import json
import random

import httpx
import time
import os


pic_save_path = "D:\\image\\bilibili\\"


def save_image(url):
    img = url[url.rfind('/') + 1: len(url)]
    client = httpx.Client(http2=True, verify=False)
    if not os.path.isdir(pic_save_path):
        os.mkdir(pic_save_path)

    file = pic_save_path + img
    if not os.path.isfile(file):
        print('file= ', file)
        try:
            r = client.get(url)
            with open(file, 'wb') as f:
                f.write(r.content)
            time.sleep(random.randint(1, 2))
        except Exception as e:
            print("请求图片超时 ", e)
            time.sleep(5)


def get_items(content):
    html = str(content, 'utf-8')
    results = json.loads(html)
    for item in results['data']['items']:
        pictures = item['item']['pictures']
        for picture in pictures:
            img_src = picture['img_src']
            # print('img_src= ', img_src)
            save_image(img_src)


class BilibiliSpider:

    def __init__(self, page, max_page):
        self.page = page
        self.max_page = max_page
        self.base_url = "https://api.vc.bilibili.com/link_draw/v2/Photo/list?category=sifu&type=new&page_num={" \
                        "}&page_size=20"
        self.headers = {
            "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_0) AppleWebKit/537.36 (KHTML, like Gecko) "
                          "Chrome/70.0.3538.110 Safari/537.36 "
        }

    def run(self):
        # 1. 获取 url 列表
        url_list = self.get_url_list()
        for url in url_list:
            print('url= ', url)
            # 2. 发送请求获取响应
            content = self.get_content(url)
            # 3. 从响应中提取数据
            get_items(content)
            time.sleep(random.randint(3, 5))

    def get_url_list(self):
        url_list = []

        for pn in range(self.page, self.max_page, 1):
            url = self.base_url.format(pn)
            url_list.append(url)
        return url_list

    def get_content(self, url):
        client = httpx.Client(http2=True, verify=False)
        response = client.get(url=url, headers=self.headers)
        return response.content


if __name__ == '__main__':
    spider = BilibiliSpider(0, 10)
    spider.run()

