# -*- coding: utf-8 -*-
"""
Created on Sat Mar 27 12:26:41 2021

@author: Apple
"""
import requests
from bs4 import BeautifulSoup
import re
from lxml import etree

# url = 'https://www.goodreads.cc/sci-fi/lastupdate_1.html'
# headers = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.190 Safari/537.36'}
# data = requests.get(url,headers=headers).text
# f = etree.HTML(data)
# books = f.xpath('//*[@id="diyax3"]/article/header/h2/a/text()') # text()表示文字内容
# # for title in book:
# #     print('title:',title.text)
# authors = f.xpath('//*[@id="diyax3"]/article/p[1]/a/text()')
# notes = f.xpath('//*[@id="diyax3"]/article/p[3]/text()')
# for i in range(20):
#     print ("{}--->{}".format(books[i],authors[i]))
#     print(notes[i])
# soup = BeautifulSoup(html,'lxml')
# # print(soup)
# pattern = soup.find_all('a')
# for item in pattern:
#     print(item.text)

def crow(fitiontype:str,page:int):
    url = 'https://www.goodreads.cc/'+str(fitiontype)+'/lastupdate'+str(page)+'.html'
    headers = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.190 Safari/537.36'}
    data = requests.get(url,headers=headers).text
    f = etree.HTML(data)
    books = f.xpath('//*[@id="diyax3"]/article/header/h2/a/text()') 
    authors = f.xpath('//*[@id="diyax3"]/article/p[1]/a/text()')
    notes = f.xpath('//*[@id="diyax3"]/article/p[3]/text()')
    for j in range(20):
        print("No:"+str((page-1)*20+j+1))
        print ("{}--->{}".format(books[j],authors[j]))
        print(notes[j])
crow('horror',2)
    