import requests
from lxml import etree
import json



class BookSpider():
    def __init__(self):
        self.base_url = 'http://www.allitebooks.org/page/{}'
        self.headers = {
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36"
        }

        # 数据接收的列表
        self.data_list = []


    # 1.构建所有的url
    def get_url_list(self):
        # 网站上有831页
        url_list = []
        for i in range(1, 2):
            url = self.base_url.format(i)
            url_list.append(url)
        return url_list

    # 2. 发送请求
    def send_request(self, url):
        data = requests.get(url, headers=self.headers).content.decode('utf-8')
        return data
    # 3. 解析数据  xpath
    def parse_xpath_data(self, data):
        parse_data = etree.HTML(data)
        # 2.1解析出所有的book
        book_list = parse_data.xpath('//div[@class="main-content-inner clearfix"]/article')
        # 2.2解析出每本书的信息
        # print(len(book_list))
        for book in book_list:
            book_dict = {}
            # 2.2.1 书名
            # //*[@id="post-32295"]/div[2]/header/h2/a
            #  因为链式地使用了book这个xpath 来进行下面的xpath解析
            #  所以如果不写 ./  (代表当前路径下)  那么xpath解析会从整个文档进行解析
            #  // 是跨节点的   /  不跨节点
            book_name = book.xpath('.//h2[@class="entry-title"]//text()')  # h2 下面还有一个节点 要用 // 才能获取到文本
            # print(book_name)
            # 2.2.2 书的图片的url
            #  @src  获取的是这个标签下的属性
            book_dict['book_img_url'] = book.xpath('./div[@class="entry-thumbnail hover-thumb"]/a/img/@src')[0]
            # print(book_img_url)
            # 2.2.3 书的作者
            book_dict['book_author'] = book.xpath('.//h5[@class="entry-author"]/a/text()')
            # print(book_author)
            # 2.2.4 书的简介
            book_dict['book_info'] = book.xpath('.//div[@class="entry-summary"]//text()')[1]
            # print(book_info)
            self.data_list.append(book_dict)


    # 4. 保存数据
    def save_data(self):
        json.dump(self.data_list, open("itBooks.json", 'w', encoding='utf-8'))


    # 启动调用
    def start(self):
        url_list = self.get_url_list()
        # 循环遍历发送请求
        for url in url_list:
            print(url)
            data = self.send_request(url)
            # 解析数据
            self.parse_xpath_data(data)
        # 存储数据
        self.save_data()


BookSpider().start()