# -*- coding: utf-8 -*-
# @Time    : 18-6-28 下午4:21
# @Author  : hp
# @Email   : shaoeric@foxmail.com

import requests
from lxml import etree
import json
from time import sleep

headers = {
    'Host': 'book.dangdang.com',
    'Referer': 'http://www.dangdang.com/',
    'Upgrade-Insecure-Requests': '1',
    'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.62 Safari/537.36'
}

url = "http://product.dangdang.com/23368089.html"


def getUrls(url):
    headers = {
        'Host': 'bang.dangdang.com',
        'Referer': 'http://www.dangdang.com/',
        'Upgrade-Insecure-Requests': '1',
        'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.62 Safari/537.36'
    }
    html = requests.get(url=url, headers=headers).text
    selector = etree.HTML(html)
    ul = selector.xpath("//ul[@class='bang_list clearfix bang_list_mode']")[0]
    li = ul.xpath("//div[@class='pic']/a/@href")
    return li

def getBookInfo(url):
    html = requests.get(url=url, headers=headers).text
    selector = etree.HTML(html)
    show_info = selector.xpath("//div[@class='show_info']")[0]

    title = show_info.xpath("//h1/@title")[0]
    # 出版社
    publish = show_info.xpath("//span[@dd_name='出版社']/a/text()")[0]

    # 售价(当当价)
    sellPrice = show_info.xpath("//p[@id='dd-price']/text()")[1].strip()
    # 定价
    price = show_info.xpath("//div[@id='original-price']/text()")[1].strip()

    id = selector.xpath("//ul[@class='key clearfix']/li/text()")[4].strip("国际标准书号ISBN：")

    author = show_info.xpath("//span[@id='author']/a[@dd_name='作者']/text()")[0]
    book = {
        '图书编号': id,
        '书名': title,
        '出版社': publish,
        '售价': sellPrice,
        '定价': price,
        '作者': author
    }
    return book



hrefs = []
i = 0
s = "http://bang.dangdang.com/books/bestsellers/01.00.00.00.00.00-year-2014-0-1-1"
getUrls(s)
for year in range(2015, 2018):
    for page in range(1, 26):

        try:
            s = "http://bang.dangdang.com/books/bestsellers/01.00.00.00.00.00-year-{}-0-1-{}".format(year, page)
            urls = getUrls(s)
            hrefs.extend(urls)
            while hrefs:
                url = hrefs.pop()
                info = getBookInfo(url)

                with open("books.json", 'a', encoding='utf-8') as f:
                    json.dump(info, f, ensure_ascii=False)
                    i += 1

                    if i % 100 == 0:
                        print("已完成{}条\n".format(i))

                    sleep(0.4)
        except IndexError:
            pass
        except Exception:
            print("在{}年{}页{}停下".format(year, page, url))
            raise ConnectionError

print("运行结束")

