'''
Python爬虫实例
测试的网站，
https://spa5.scrape.center/

1. 爬取全站的图书数据，将数据保存在数据库中
2. 
'''
from ast import parse
import requests
from pathlib import Path
import pymysql
from parsel import Selector

url = 'https://spa5.scrape.center/'

# https://spa5.scrape.center/api/book/?limit=18&offset=0
page_url = 'https://spa5.scrape.center/api/book/?limit={limit}&offset={offset}'

req_page_url = page_url.format(limit=18,offset=0)

LIMIT = 18
def get_req_page_url(page):
    result = page_url.format(limit=LIMIT,offset=(page-1)*LIMIT)
    return result

# 得到页面数据
def get_url(url):
    print("request_url",url)
    response = requests.get(url)
    return response.json()

# 解析得到书名和作者，这个不用解析了
def parse_idx(html):
    print(html)
    selector = Selector(text=html)
    divs = selector.xpath("//div[@class='el-col el-col-24']")
    print(len(divs))

def get_data(json):
    results = json['results']
    data = []
    for result in results:    
        book_name = result['name']
        author = result['authors']
        data.append({
            "bookname":book_name,
            "author":author
        })
    return data

for page in range(1,1000):
    req_url = get_req_page_url(page)
    response = get_url(req_url)
    book_info_list = get_data(response)
    for book in book_info_list:
        print("《",book['bookname'],"》")