import requests
from lxml import etree
from bs4 import BeautifulSoup
import json


class BookSpider(object):
    def __init__(self):
        # 统一的url ; 为了后续的 翻页着想
        self.url = "http://www.allitebooks.com/page/{}"
        self.headers = {
            "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36"
        }

        self.data = []

    # 1.拼接所有的url
    def get_url_list(self):
        url_list = []
        for i in range(1, 2):
            url = self.url.format(i)
            url_list.append(url)
        return url_list

    # 2.循环发送请求获取数据
    def get_data(self, url):
        response = requests.get(url, headers=self.headers)
        data = response.content.decode()
        return data

    # 3.解析数据 re,xpath,bs4
    def parse_xpath_data(self, data):

        # 1.解析类型
        xpath_data = etree.HTML(data)

        # 2.xpath
        # 2.1 书的名字,书的url, 作者, 书的介绍
        # 取出当前页面的   所有的书
        book_list = xpath_data.xpath('//div[@class="main-content-inner clearfix"]/article')

        # 遍历每本书 取出来 书的名字,书的url, 作者, 书的介绍
        for book in book_list:
            book_dict = {}
            book_dict['book_name'] = book.xpath('./div[@class="entry-body"]//h2[@class="entry-title"]/a/text()')[0]
            book_dict['book_url'] = book.xpath('./div[@class="entry-body"]//h2[@class="entry-title"]/a/@href')[0]
            book_dict['book_author'] = book.xpath('.//h5[@class="entry-author"]/a/text()')[0]
            book_dict['book_info'] = book.xpath('.//div[@class="entry-summary"]/p/text()')[0]
            print(book_dict)
            # print(len(book_list))

    def parse_bs_data(self, data):

        # 1.解析类型
        bs_data = BeautifulSoup(data, "lxml")

        # 2.xpath
        # 2.1 书的名字,书的url, 作者, 书的介绍
        # 取出当前页面的   所有的书
        book_list = bs_data.select('.main-content-inner article')

        # 遍历每本书 取出来 书的名字,书的url, 作者, 书的介绍
        for book in book_list:
            book_dict = {}
            book_dict['book_name'] = book.select('.entry-title a')[0].get_text()
            book_dict['book_url'] = book.select('.entry-title a')[0].get('href')
            book_dict['book_author'] = book.select('.entry-author a')[0].get_text()
            book_dict['book_info'] = book.select('.entry-summary p')[0].get_text()
            self.data.append(book_dict)

    # 4.保存文件
    def save_data(self, data):
        # with open('book.html', 'w') as f:
        #     f.write(data)
        json.dump(data, open('book.json', 'w'))

    # 调度方法
    def run(self):

        url_list = self.get_url_list()

        for url in url_list:
            data = self.get_data(url)
            # self.parse_xpath_data(data)
            self.parse_bs_data(data)
        self.save_data(self.data)


BookSpider().run()
