# 爬取在线书店http://books.toscrape.com/的所有信息，包括图书的名字、封面图片、价格、评分、库存、产品介绍、所属分类。
import requests
from bs4 import BeautifulSoup
import time

# 爬取的网址
url = 'http://books.toscrape.com/'
books = []


# 获取请求地址的html
def getUrlRequest(url):
    r = requests.get(url)
    r.raise_for_status()
    bs = BeautifulSoup(r.text, 'html.parser')
    return bs

def completionLink(url):
    if str.find(url, 'catalogue') == -1:
        url = 'catalogue/' + url
    return url
# 获取每本书的详情信息
def getBookDetail(url):
    bs = getUrlRequest(url)
    book = {}
    product_main = bs.find('div', class_='product_main')
    book['name'] = product_main.h1.text  # 名字
    book['src'] = bs.find(id='product_gallery').div.div.div.img['src']  # 封面图片
    book['star-rating'] = product_main.find('p', class_='star-rating')['class'][1]  # 评分
    book['product_description'] = bs.find(id='product_description').next_sibling.next_sibling.text  # 产品介绍
    book['class'] = bs.find('table', class_='table').contents[3].td.text  # 所属分类
    book['not_tax_price'] = bs.find('table', class_='table').contents[5].td.text  # 价格（不含税）
    book['tax_price'] = bs.find('table', class_='table').contents[7].td.text  # 价格（含税）
    book['stock'] = bs.find('table', class_='table').contents[11].td.text  # 库存
    return book


# 主程序
next_page_url = url
i = 1
j = 1
while next_page_url:
    bs = getUrlRequest(next_page_url)
    print('第%d页' % i)
    # 解析出当前⻚⾯的内容
    div_list = bs.find_all('div', class_='image_container')
    for div in div_list:
        print('第%d本书' % j)
        books.append(getBookDetail(url + completionLink(div.a['href'])))
        time.sleep(2)
        j += 1
    # 获取下⼀⻚按钮
    next_page = bs.find('li', class_='next')
    if not next_page:
        # 如果没有下⼀⻚，结束爬⾍
        break
    # 取下⼀⻚的链接
    next_page_url = next_page.a['href']
    # 判断下一页的链接是否完整
    next_page_url = completionLink(next_page_url)
    # 拼装成完整的URL
    next_page_url = url + next_page_url
    i += 1
print(books)
# print(beautiful_res)
