import requests
from lxml import etree
import json

class BookSpdier(object):
    def __init__(self):
        self.base_url='http://www.allitebooks.org/page/{}/'
        self.headers={
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36'
        }
        self.data_list=[]
    #构建所有的url
    def get_url_list(self):
        url_list=[]
        for i in range(1,11):
            url=self.base_url.format(i)
            url_list.append(url)
        return url_list
    #2.发请求
    def send_requset(self,url):
        data=requests.get(url,headers=self.headers).content.decode()
        print(url)
        return data
    #3.解析数据
    def parse_xpath(self,data):
        parse_data=etree.HTML(data)

        #1.解析出所有的书book
        book_list=parse_data.xpath('//div[@class="main-content-inner clearfix"]/article')

        #2.解析出 每本书的 信息
        for book in book_list:
            book_dict={}
            #1.书名字
            book_dict['book_name']=book.xpath('.//h2[@class="entry-title"]//text()')

            #2.书的图片url
            book_dict['book_img']=book.xpath('.//div[@class="entry-thumbnail hover-thumb"]/a/img/@src')

            #3.书的作者
            book_dict['book_author']=book.xpath('.//h5[@class="entry-author"]/a/text()')

            #4.书的简介
            book_dict['book_info']=book.xpath('.//div[@class="entry-summary"]/p/text()')

            self.data_list.append(book_dict)

    #保存数据
    def save_data(self):
        # with open('book.html','w') as f:
        json.dump(self.data_list,open('book.json','w'))
            # f.write(data)


    #统筹调用
    def start(self):
        url_list=self.get_url_list()

        #循环遍历发送请求
        for url in url_list:
            data=self.send_requset(url)

            self.parse_xpath(data)
        self.save_data()

BookSpdier().start()