import requests
import time
import json
from lxml import etree
from bs4 import BeautifulSoup as bs
from pyquery import PyQuery as pq


def getPage(url):
    '''爬取置顶URL地址信息'''
    try:
        headers = {
            'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36'}
        res = requests.get(url, headers=headers)
        if res.status_code == 200:
            return res.text
        else:
            return None
    except:
        return None


def parsePage_Xpath(content):
    '''Xpath解析网页中的内容'''
    html = etree.HTML(content)
    items = html.xpath("//table[@width='100%']")
    # print(len(items))
    for item in items:
        yield {
            'title': item.xpath(".//div/a/@title")[0],
            'author': item.xpath(".//p[@class='pl']/text()")[0],
            'score': item.xpath(".//span[@class='rating_nums']/text()")[0],
            'image': item.xpath(".//img/@src")[0],
        }


def parsePage_bs(content):
    '''BeautifulSoup解析网页中的内容'''
    soup = bs(content, 'lxml')
    items = soup.find_all(name='table', attrs={'width': '100%'})
    # print(len(items))

    for item in items:
        yield {
            'title': item.select('div a')[0].attrs['title'],
            'author': item.select('p.pl')[0].string,
            'score': item.select('span.rating_nums')[0].string,
            'image': item.select('img')[0].attrs['src'],
        }


def parsePage_pq(content):
    '''PyQuery解析网页中的内容'''
    doc = pq(content)
    items = doc("table[width='100%']")
    # print(len(items))
    for item in items.items():
        yield {
            'title': item.find('div a').attr['title'],
            'author': item.find('p.pl').text(),
            'score': item.find('span.rating_nums').text(),
            'image': item.find('img').attr['src'],
        }


def writeFile(content):
    '''执行爬取内容的读写操作'''
    with open('./result.txt', 'a', encoding='utf-8') as f:
        f.write(json.dumps(content, ensure_ascii=False) + '\n')


def main(offset, key):
    '''主函数，负责调度处理'''
    url = 'https://book.douban.com/top250?start=' + str(offset)
    html = getPage(url)
    if html:
        if key == 'a':
            for item in parsePage_Xpath(html):
                writeFile(item)
        if key == 'b':
            for item in parsePage_bs(html):
                writeFile(item)
        if key == 'c':
            for item in parsePage_pq(html):
                writeFile(item)


if __name__ == '__main__':
    key = input('请选择解析方法 a.Xpath ; b.BeautifulSoup ; c.PyQuery :')
    #main(0, key)

    for i in range(10):
        offset = i * 25
        print('lodding...')
        main(offset, key)
        print('end...' + str(i + 1))
        time.sleep(1)
