"""爬取豆瓣图书 TOP 250"""
# -*- coding: utf-8 -*-
# @Author: yqbao
# @GiteeURL: https://gitee.com/yqbao
# @Date: 2019/8/26 12:33
# @Version: v.0.0

import requests
import json
import time


def query(url):
    """获取数据"""
    headers = {
        'Cookie': 'bid=c6nb91rAbeg; __utma=30149280.1830282378.1566792021.1566792021.1566792021.1; __utmc=30149280; __utmz=30149280.1566792021.1.1.utmcsr=(direct)|utmccn=(direct)|utmcmd=(none); __utma=81379588.903464223.1566792021.1566792021.1566792021.1; __utmc=81379588; __utmz=81379588.1566792021.1.1.utmcsr=(direct)|utmccn=(direct)|utmcmd=(none); ap_v=0,6.0; _pk_id.100001.3ac3=2143d50d552ee8c2.1566792020.1.1566792090.1566792020.',
        'Host': 'book.douban.com',
        'Upgrade-Insecure-Requests': '1',
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.131 Safari/537.36',
    }
    try:
        req = requests.get(url=url, headers=headers)  # 封装请求头并请求网页
        if req.status_code == 200:  # 成功，则获取响应的html内容，并返回
            response = req.content.decode('utf-8')
            return response
        else:
            return None
    except requests.exceptions.RequestException:
        return None


def analysis(content):
    """解析数据"""

    # ########################### 使用PyQuery 解析数据 ##################################
    from pyquery import PyQuery as Pq
    doc = Pq(content)  # 初始化PyQuery
    items = doc('table tr.item')  # 获取<table>...</table>图书信息列表
    for item in items.items():  # 在信息列表中解析需要的数据
        yield {
            'title': item.find('td div.pl2 a').text(),
            'image': item.find('td a.nbg img').attr('src'),
            'writer': item.find('td p.pl').text(),
            'evaluate ': item.find('div.star span.pl').text(),
            'quote': item.find('p.quote span.inq').text(),
        }

    """
    # ########################### 使用 BeautifulSoup 解析数据 ##################################
    from bs4 import BeautifulSoup
    soup = BeautifulSoup(content, 'lxml')  # 初始化BeautifulSoup
    items = soup.find_all(name='tr', attrs={'class': 'item'})  # 获取<li class="house-cell" ....></li>租房信息列表
    for item in items:  # 在信息列表中解析需要的数据
        yield {
            'title': item.select('div.pl2 a')[0].string,  
            'image': item.select('a.nbg img')[0].attrs['src'],
            'writer': item.select('p.pl')[0].string,
            'evaluate ': item.select('span.pl')[0].string,
            'quote': item.select('span.inq')[0].string,
        }
    """
    """
    # ########################### 使用 Xpath 解析数据 ##################################
    from lxml import etree
    html = etree.HTML(content)  # 初始化Xpath
    items = html.xpath('//tr[@class="item"]')  # 获取<li class="house-cell" ....></li>租房信息列表
    for item in items:  # 在信息列表中解析需要的数据
        yield {
            'title': item.xpath('.//div[@class="pl2"]/a/text()')[0],
            'image': item.xpath('.//a[@class="nbg"]/img/@src')[0],
            'writer': item.xpath('.//p[@class="pl"]/text()')[0],
            'evaluate': item.xpath('.//span[@class="pl"]/text()')[0],
            'quote': item.xpath('.//span[@class="inq"]/text()')[0],
        }
    """


def save(content):
    """保存数据"""
    # 字典数据转换为csv保存写入
    with open("result.txt", 'a', encoding="utf-8") as f:
        f.write(json.dumps(content, ensure_ascii=False) + "\n")


def main(start):
    """调度"""
    url = 'https://book.douban.com/top250?start={}'.format(start)
    html = query(url)
    if html:
        data = analysis(html)
        for item in data:
            print(item)
            # save(item)


if __name__ == '__main__':
    starts = input('获取指定页,请输入指定页页码（1-10），否则按 "a" 获取全部页：')
    if starts == 'a':
        for i in range(0, 9):
            print('#' * 50 + ' 正在爬取第 {} 页 '.format(i + 1) + '#' * 50)
            main(str(i * 25))
            time.sleep(25)
    else:
        print('#' * 50 + ' 正在爬取第 {} 页 '.format(starts) + '#' * 50)
        s = int(starts)
        main(str((s - 1) * 25))
